mirror of
https://github.com/Dispatcharr/Dispatcharr.git
synced 2026-01-23 10:45:27 +00:00
Compare commits
921 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8521df94ad | ||
|
|
c970cfcf9a | ||
|
|
fe60c4f3bc | ||
|
|
7cf7aecdf2 | ||
|
|
54644df9a3 | ||
|
|
38fa0fe99d | ||
|
|
a772f5c353 | ||
|
|
da186bcb9d | ||
|
|
75df00e329 | ||
|
|
d0ed682b3d | ||
|
|
60955a39c7 | ||
|
|
6c15ae940d | ||
|
|
516d0e02aa | ||
|
|
6607cef5d4 | ||
|
|
2f9b544519 | ||
|
|
36967c10ce | ||
|
|
4bfdd15b37 | ||
|
|
2a3d0db670 | ||
|
|
43636a84d0 | ||
|
|
6d5d16d667 | ||
|
|
f821dabe8e | ||
|
|
564dceb210 | ||
|
|
2e9280cf59 | ||
|
|
7594ba0a08 | ||
|
|
e8d949db86 | ||
|
|
a9a433bc5b | ||
|
|
e72e0215cb | ||
|
|
b8374fcc68 | ||
|
|
6b873be3cf | ||
|
|
edfa497203 | ||
|
|
0242eb69ee | ||
|
|
93f74c9d91 | ||
|
|
e2e6f61dee | ||
|
|
719a975210 | ||
|
|
a84553d15c | ||
|
|
cc9d38212e | ||
|
|
caf56a59f3 | ||
|
|
ba5aa861e3 | ||
|
|
312fa11cfb | ||
|
|
ad334347a9 | ||
|
|
74a9d3d0cb | ||
|
|
fa6315de33 | ||
|
|
d6c1a2369b | ||
|
|
72d9125c36 | ||
|
|
6e74c370cb | ||
|
|
10447f8c86 | ||
|
|
1a2d39de91 | ||
|
|
f389420251 | ||
|
|
3f6eff96fc | ||
|
|
02faa1a4a7 | ||
|
|
c5a3a2af81 | ||
|
|
01370e8892 | ||
|
|
8cbb55c44b | ||
|
|
0441dd7b7e | ||
|
|
30d093a2d3 | ||
|
|
518c93c398 | ||
|
|
cc09c89156 | ||
|
|
21c0758cc9 | ||
|
|
f664910bf4 | ||
|
|
bc19bf8629 | ||
|
|
16bbc1d875 | ||
|
|
9612a67412 | ||
|
|
4e65ffd113 | ||
|
|
6031885537 | ||
|
|
8ae1a98a3b | ||
|
|
48bdcfbd65 | ||
|
|
e151da27b9 | ||
|
|
fdca1fd165 | ||
|
|
9cc90354ee | ||
|
|
62b6cfa2fb | ||
|
|
3f46f28a70 | ||
|
|
058de26bdf | ||
|
|
f51463162c | ||
|
|
0cb189acba | ||
|
|
3fe5ff9130 | ||
|
|
131ebf9f55 | ||
|
|
2ed784e8c4 | ||
|
|
2e0aa90cd6 | ||
|
|
a363d9f0e6 | ||
|
|
6a985d7a7d | ||
|
|
1a67f3c8ec | ||
|
|
6bd8a0c12d | ||
|
|
6678311fa7 | ||
|
|
e8c9432f65 | ||
|
|
33f988b2c6 | ||
|
|
13e4b19960 | ||
|
|
042c34eecc | ||
|
|
ded785de54 | ||
|
|
c57f9fd7e7 | ||
|
|
b4b0774189 | ||
|
|
7b1a85617f | ||
|
|
a6361a07d2 | ||
|
|
b157159b87 | ||
|
|
d9fc0e68d6 | ||
|
|
43525ca32a | ||
|
|
ffa1331c3b | ||
|
|
26d9dbd246 | ||
|
|
f97399de07 | ||
|
|
a5688605cd | ||
|
|
ca96adf781 | ||
|
|
61247a452a | ||
|
|
fda188e738 | ||
|
|
57a6a842b2 | ||
|
|
f1c096bc94 | ||
|
|
5a4be532fd | ||
|
|
cc3ed80e1a | ||
|
|
af88756197 | ||
|
|
1b1f360705 | ||
|
|
bc3ef1a3a9 | ||
|
|
81af73a086 | ||
|
|
0abacf1fef | ||
|
|
36a39cd4de | ||
|
|
46413b7e3a | ||
|
|
874e981449 | ||
|
|
f5c6d2b576 | ||
|
|
1ef5a9ca13 | ||
|
|
2d31eca93d | ||
|
|
510c9fc617 | ||
|
|
8f63659ad7 | ||
|
|
31b9868bfd | ||
|
|
da4597ac95 | ||
|
|
523a127c81 | ||
|
|
ec3093d9af | ||
|
|
5481b18d8a | ||
|
|
bfca663870 | ||
|
|
11b3320277 | ||
|
|
44a122924f | ||
|
|
48ebaffadd | ||
|
|
daa919c764 | ||
|
|
8f811f2ed3 | ||
|
|
ff7298a93e | ||
|
|
9c9cbab94c | ||
|
|
904500906c | ||
|
|
106ea72c9d | ||
|
|
eea84cfd8b | ||
|
|
c7590d204e | ||
|
|
7a0af3445a | ||
|
|
18645fc08f | ||
|
|
aa5db6c3f4 | ||
|
|
1029eb5b5c | ||
|
|
ee183a9f75 | ||
|
|
63daa3ddf2 | ||
|
|
4cd63bc898 | ||
|
|
05b62c22ad | ||
|
|
2c12e8b872 | ||
|
|
20182c7ebf | ||
|
|
f0a9a3fc15 | ||
|
|
097551ccf7 | ||
|
|
22527b085d | ||
|
|
944736612b | ||
|
|
abc6ae94e5 | ||
|
|
5371519d8a | ||
|
|
b83f12809f | ||
|
|
601f7d0297 | ||
|
|
de31826137 | ||
|
|
e78c18c473 | ||
|
|
73956924f5 | ||
|
|
0a4d27c236 | ||
|
|
45ea63e9cf | ||
|
|
1510197bf0 | ||
|
|
9623dff6b1 | ||
|
|
3ddcadb50d | ||
|
|
1e42aa1011 | ||
|
|
ee0502f559 | ||
|
|
f43de44946 | ||
|
|
2b1d5622a6 | ||
|
|
bd148a7f14 | ||
|
|
a76a81c7f4 | ||
|
|
bd57ee3f3c | ||
|
|
2558ea0b0b | ||
|
|
2a0df81c59 | ||
|
|
1906c9955e | ||
|
|
4c60ce0c28 | ||
|
|
865ba432d3 | ||
|
|
7ea843956b | ||
|
|
98a016a418 | ||
|
|
36ec2fb1b0 | ||
|
|
dd75b5b21a | ||
|
|
38033da90f | ||
|
|
7c45542332 | ||
|
|
748d5dc72d | ||
|
|
48e7060cdb | ||
|
|
6c1b0f9a60 | ||
|
|
ffd8d9fe6b | ||
|
|
0ba22df233 | ||
|
|
bc72b2d4a3 | ||
|
|
88c10e85c3 | ||
|
|
1ad8d6cdfd | ||
|
|
ee7a39fe21 | ||
|
|
3b7f6dadaa | ||
|
|
41642cd479 | ||
|
|
1b27472c81 | ||
|
|
a60fd530f3 | ||
|
|
4878e92f44 | ||
|
|
3bf8ddf376 | ||
|
|
65dbc5498d | ||
|
|
85390a078c | ||
|
|
bd6cf287dc | ||
|
|
662c5ff89a | ||
|
|
1dc7700a62 | ||
|
|
d97f0c907f | ||
|
|
ae60f81314 | ||
|
|
bfcc47c331 | ||
|
|
679adb324c | ||
|
|
58a6cdedf7 | ||
|
|
dedd898a29 | ||
|
|
0b09cd18b9 | ||
|
|
3537c9ee09 | ||
|
|
97930c3de8 | ||
|
|
c51916b40c | ||
|
|
ed61ac656a | ||
|
|
56cf37d637 | ||
|
|
ea38c0b4b8 | ||
|
|
dd5ae8450d | ||
|
|
0070d9e500 | ||
|
|
aea888238a | ||
|
|
700d0d2383 | ||
|
|
0bfd06a5a3 | ||
|
|
8388152d79 | ||
|
|
795934dafe | ||
|
|
70e574e25a | ||
|
|
3c76c72479 | ||
|
|
53159bd420 | ||
|
|
901cc09e38 | ||
|
|
d4fbc9dc61 | ||
|
|
1a350e79e0 | ||
|
|
e71e6bc3d7 | ||
|
|
c65df2de89 | ||
|
|
5fbcaa91e0 | ||
|
|
d718e5a142 | ||
|
|
806f78244d | ||
|
|
e8fb01ebdd | ||
|
|
514e7e06e4 | ||
|
|
69f9ecd93c | ||
|
|
4df4e5f963 | ||
|
|
ecbef65891 | ||
|
|
98b29f97a1 | ||
|
|
62f5c32609 | ||
|
|
43b55e2d99 | ||
|
|
c03ddf60a0 | ||
|
|
ce70b04097 | ||
|
|
e2736babaa | ||
|
|
2155229d7f | ||
|
|
cf37c6fd98 | ||
|
|
3512c3a623 | ||
|
|
d0edc3fa07 | ||
|
|
b18bc62983 | ||
|
|
a912055255 | ||
|
|
10f329d673 | ||
|
|
f3a901cb3a | ||
|
|
759569b871 | ||
|
|
c1d960138e | ||
|
|
0d177e44f8 | ||
|
|
3b34fb11ef | ||
|
|
6c8270d0e5 | ||
|
|
5693ee7f9e | ||
|
|
256ac2f55a | ||
|
|
2a8ba9125c | ||
|
|
2de6ac5da1 | ||
|
|
6a96b6b485 | ||
|
|
5fce83fb51 | ||
|
|
81b6570366 | ||
|
|
042612c677 | ||
|
|
e64002dfc4 | ||
|
|
70cf8928c4 | ||
|
|
3f9fd424e2 | ||
|
|
f38fb36eba | ||
|
|
5e1ae23c4e | ||
|
|
53a50474ba | ||
|
|
92ced69bfd | ||
|
|
f1320c9a5d | ||
|
|
5b193249a8 | ||
|
|
0571c6801a | ||
|
|
c57c7d64de | ||
|
|
0bf3499917 | ||
|
|
3cb695279a | ||
|
|
2c5fbaffb4 | ||
|
|
85b5b18a57 | ||
|
|
be0409bfc2 | ||
|
|
bd3709463a | ||
|
|
cf08e54bd8 | ||
|
|
641dcfc21e | ||
|
|
43949c3ef4 | ||
|
|
6a9b5282cd | ||
|
|
b791190e3b | ||
|
|
1d23ed3685 | ||
|
|
3fb18ecce8 | ||
|
|
3eaa76174e | ||
|
|
2b58d7d46e | ||
|
|
fb084d013b | ||
|
|
8754839c81 | ||
|
|
13ad62d3e1 | ||
|
|
0997cd7a9d | ||
|
|
962d5e965b | ||
|
|
7673cd0793 | ||
|
|
aae7b1bc14 | ||
|
|
e7700b60f3 | ||
|
|
aa9fa09822 | ||
|
|
c5f6d8ccf3 | ||
|
|
cb1953baf2 | ||
|
|
d94d615d76 | ||
|
|
05f98e9275 | ||
|
|
db276f6d32 | ||
|
|
89a23164ff | ||
|
|
1f0fe00cbf | ||
|
|
204a5a0c76 | ||
|
|
fea7c99021 | ||
|
|
3e77259b2c | ||
|
|
968a8f1cd0 | ||
|
|
b6c3234e96 | ||
|
|
dc2a408041 | ||
|
|
afedce5cb2 | ||
|
|
d8df848136 | ||
|
|
1b16df4482 | ||
|
|
1560afab97 | ||
|
|
bbe1f6364b | ||
|
|
6bd5958c3c | ||
|
|
0700cf29ea | ||
|
|
2514528337 | ||
|
|
827501c9f7 | ||
|
|
23e2814fe7 | ||
|
|
5160ead093 | ||
|
|
acbcc46a91 | ||
|
|
ed7e16483b | ||
|
|
a3be679acf | ||
|
|
4f29f7f3f9 | ||
|
|
7321a6d7f8 | ||
|
|
761ee42396 | ||
|
|
6dab5e3cf3 | ||
|
|
b2a041c7c4 | ||
|
|
575b764487 | ||
|
|
325c836510 | ||
|
|
0360292b94 | ||
|
|
cc7cd32c90 | ||
|
|
4b5d3047bb | ||
|
|
6e79b37a66 | ||
|
|
4720e045a3 | ||
|
|
79895a1ce4 | ||
|
|
a3c16d48ec | ||
|
|
431ea6da32 | ||
|
|
b9e819e343 | ||
|
|
a7f449f746 | ||
|
|
b608af1d51 | ||
|
|
21723e29bc | ||
|
|
dc22dff713 | ||
|
|
9a5e04af0e | ||
|
|
6037c158f4 | ||
|
|
860c671f8c | ||
|
|
4701456a46 | ||
|
|
c3153f6b93 | ||
|
|
da628705df | ||
|
|
ed86eb2274 | ||
|
|
871f9f953e | ||
|
|
77e98508fb | ||
|
|
e6146e5243 | ||
|
|
d0ebfb57c4 | ||
|
|
81639c0f15 | ||
|
|
93f074241d | ||
|
|
d15d8f6644 | ||
|
|
12aae44672 | ||
|
|
60f77c85da | ||
|
|
c7e955b4a8 | ||
|
|
6715bc7c5c | ||
|
|
1b282f1987 | ||
|
|
16c44ea851 | ||
|
|
400c77f258 | ||
|
|
9d4fd63cde | ||
|
|
0741e45ce6 | ||
|
|
4284955412 | ||
|
|
c9d7e66545 | ||
|
|
28c211cd56 | ||
|
|
1fde8e4600 | ||
|
|
5c27bd2c10 | ||
|
|
3e2e704765 | ||
|
|
423c56f582 | ||
|
|
2042274f10 | ||
|
|
0fd464cb96 | ||
|
|
a1834d9885 | ||
|
|
57b99e3900 | ||
|
|
29c46eeb0a | ||
|
|
2de5acf12c | ||
|
|
0a6f9eb8e1 | ||
|
|
73bb1ecd2d | ||
|
|
645c1ec9df | ||
|
|
dd5f0d0753 | ||
|
|
d5de69cd6a | ||
|
|
119b222428 | ||
|
|
92d499a274 | ||
|
|
97c24dbea3 | ||
|
|
4b74673795 | ||
|
|
6a85475402 | ||
|
|
6e0e646938 | ||
|
|
937c20c082 | ||
|
|
75215cfdc6 | ||
|
|
163b1dd7cf | ||
|
|
603c9f9269 | ||
|
|
fe540045fc | ||
|
|
dee672287b | ||
|
|
c21ea5ecbe | ||
|
|
d456051eb3 | ||
|
|
9b07f013a4 | ||
|
|
7cbdb61f2c | ||
|
|
8494f615d0 | ||
|
|
0d987aae99 | ||
|
|
81276bfc16 | ||
|
|
1a541bd133 | ||
|
|
fa2a90fab4 | ||
|
|
91eaa64ebb | ||
|
|
0a4c7cae25 | ||
|
|
ba695ebbe9 | ||
|
|
22fb0b3bdd | ||
|
|
ca8e9d0143 | ||
|
|
d3d7f3c733 | ||
|
|
7744d7287b | ||
|
|
ec21e8329d | ||
|
|
0031d55bab | ||
|
|
b9a0aaa574 | ||
|
|
9b2ebf169b | ||
|
|
ae8b85a3e2 | ||
|
|
4df2f79bcf | ||
|
|
ed065f718d | ||
|
|
90d065df80 | ||
|
|
87d2131789 | ||
|
|
071561c570 | ||
|
|
404d2f82a3 | ||
|
|
74280baa85 | ||
|
|
fa08216600 | ||
|
|
fbd83e61b7 | ||
|
|
6acb0da933 | ||
|
|
d32abecb25 | ||
|
|
d5f9ba7e5e | ||
|
|
f58bc81c36 | ||
|
|
fefab4c4c6 | ||
|
|
9dc54fdcff | ||
|
|
85fdfedabe | ||
|
|
951af5f3fb | ||
|
|
fe58594a36 | ||
|
|
072201016c | ||
|
|
8794156767 | ||
|
|
da245c409a | ||
|
|
171bb004c4 | ||
|
|
99ad0ecb7b | ||
|
|
a959ba1748 | ||
|
|
d1aa9fe441 | ||
|
|
3326b9fbdc | ||
|
|
13874d64ad | ||
|
|
bc574c272c | ||
|
|
144a861142 | ||
|
|
e01338f055 | ||
|
|
22493c2797 | ||
|
|
a4a677a6fb | ||
|
|
8db9689999 | ||
|
|
dea6411e1c | ||
|
|
a31feee311 | ||
|
|
ad4143d035 | ||
|
|
0406c868bc | ||
|
|
cfd235ba34 | ||
|
|
cedd0d6f4d | ||
|
|
e6a9672a14 | ||
|
|
d7735255ec | ||
|
|
67a6ad1168 | ||
|
|
18dc73cbcb | ||
|
|
25d6322186 | ||
|
|
882be5cdf8 | ||
|
|
9173f0b876 | ||
|
|
29ee837b24 | ||
|
|
94f966e027 | ||
|
|
d1ac5b11e5 | ||
|
|
23209e79e9 | ||
|
|
a502d309f1 | ||
|
|
d316173ab6 | ||
|
|
23be065c52 | ||
|
|
9947f36286 | ||
|
|
e4a6d19c17 | ||
|
|
3128c116e8 | ||
|
|
ff894acf4d | ||
|
|
d0e31e8acd | ||
|
|
4aafece68e | ||
|
|
edda2ca3a5 | ||
|
|
d0413e63be | ||
|
|
9b2402a421 | ||
|
|
22409b4f01 | ||
|
|
172bb204f4 | ||
|
|
0a15e09805 | ||
|
|
d06c5bfdf3 | ||
|
|
68c8d4dc23 | ||
|
|
c73271c617 | ||
|
|
5bb2b57a4e | ||
|
|
d8ad33ff77 | ||
|
|
e841343e5b | ||
|
|
f90b24db40 | ||
|
|
a3e4f23891 | ||
|
|
c99456f77d | ||
|
|
9de6f79016 | ||
|
|
b74b5b9b1b | ||
|
|
c7c9607071 | ||
|
|
041cb69bb8 | ||
|
|
b5223c13e7 | ||
|
|
7136f8392d | ||
|
|
58924e6834 | ||
|
|
44478804f8 | ||
|
|
37b05f1dde | ||
|
|
9da20b1941 | ||
|
|
0e6889c6c1 | ||
|
|
e072fb88f9 | ||
|
|
bbb559d1cd | ||
|
|
a0745fcaa5 | ||
|
|
f3de398c89 | ||
|
|
836871f102 | ||
|
|
cd5135ba27 | ||
|
|
c5029213fc | ||
|
|
deb532071b | ||
|
|
01971fb91a | ||
|
|
e281041458 | ||
|
|
19f0088b40 | ||
|
|
db5713a050 | ||
|
|
563f890cf4 | ||
|
|
09b79bd174 | ||
|
|
857471e1ad | ||
|
|
1493eaf28b | ||
|
|
4a4d93767e | ||
|
|
134093b18e | ||
|
|
93dd37e822 | ||
|
|
ec193813b4 | ||
|
|
70f7484fb5 | ||
|
|
7bb4df78c8 | ||
|
|
d961d4cad1 | ||
|
|
0cdce1a81b | ||
|
|
23f397c805 | ||
|
|
70aaa2a04c | ||
|
|
86344b43ba | ||
|
|
a12bfeab46 | ||
|
|
6fa12f90c5 | ||
|
|
fd9038463b | ||
|
|
75fbf9639a | ||
|
|
4db8eca391 | ||
|
|
08e5b6f36f | ||
|
|
6f79845b21 | ||
|
|
99122cac7c | ||
|
|
3f7edd840e | ||
|
|
63729fb0ea | ||
|
|
207613c00b | ||
|
|
323f1d5c05 | ||
|
|
00b8119b81 | ||
|
|
db024130be | ||
|
|
6536f35dc0 | ||
|
|
eee4ab0725 | ||
|
|
02ac0d8de5 | ||
|
|
773e8e7d54 | ||
|
|
9eade91958 | ||
|
|
0dbc5221b2 | ||
|
|
b3debcd014 | ||
|
|
48a2f2da39 | ||
|
|
f4f29a0e27 | ||
|
|
424a450654 | ||
|
|
edc18e07fe | ||
|
|
00da233322 | ||
|
|
9ef2aa966d | ||
|
|
ab3350d08d | ||
|
|
2e5280c46a | ||
|
|
8b740fc3ac | ||
|
|
7e13e51198 | ||
|
|
3cb5a061c9 | ||
|
|
d2d1984797 | ||
|
|
8607d626fa | ||
|
|
388d9e7171 | ||
|
|
64a019597d | ||
|
|
cc03ad7d64 | ||
|
|
a846b09ad3 | ||
|
|
60e378b1ce | ||
|
|
20685b8344 | ||
|
|
c7235f66ba | ||
|
|
6384f4f56f | ||
|
|
d6bb9e40b2 | ||
|
|
c55dcfd26a | ||
|
|
fedc98f848 | ||
|
|
d2085d57f8 | ||
|
|
f6be6bc3a9 | ||
|
|
f1739f2394 | ||
|
|
eccc5d709a | ||
|
|
f4e91013f2 | ||
|
|
56aa5c77d2 | ||
|
|
ed0b291237 | ||
|
|
dfaae6e617 | ||
|
|
22e1a8cc05 | ||
|
|
5e661ea208 | ||
|
|
f8e91155e2 | ||
|
|
0c507988b6 | ||
|
|
d7129d6195 | ||
|
|
f84a347514 | ||
|
|
661d5f9d43 | ||
|
|
a1d35a8dad | ||
|
|
6c1dbff91c | ||
|
|
58a3da386a | ||
|
|
4886426ea0 | ||
|
|
0411fe003a | ||
|
|
4f49636899 | ||
|
|
8ab9a508d4 | ||
|
|
05641cfb02 | ||
|
|
7866eed613 | ||
|
|
301a162e71 | ||
|
|
cf7ea09341 | ||
|
|
5875c31750 | ||
|
|
d09a5efe80 | ||
|
|
fe2edf16b0 | ||
|
|
40cbb745cd | ||
|
|
92a26caf03 | ||
|
|
81520293f4 | ||
|
|
0d659f4435 | ||
|
|
d55909989c | ||
|
|
33700ccc06 | ||
|
|
082bbdd82b | ||
|
|
59379ae59a | ||
|
|
41d7066d6e | ||
|
|
75816b5d8e | ||
|
|
892fde89b5 | ||
|
|
55fdc28450 | ||
|
|
b578969b8e | ||
|
|
5e2794a62e | ||
|
|
65e947c95b | ||
|
|
900ce73200 | ||
|
|
42f52b7484 | ||
|
|
d88003b542 | ||
|
|
fa41b8eb23 | ||
|
|
448ef16987 | ||
|
|
a9ca6502ed | ||
|
|
51352ba734 | ||
|
|
09ba42d36c | ||
|
|
22f0a4078b | ||
|
|
c85316b912 | ||
|
|
ab36b28b51 | ||
|
|
3d2873fd4f | ||
|
|
0a8b8919b9 | ||
|
|
43afbd4505 | ||
|
|
e5ee64c575 | ||
|
|
7925b601f4 | ||
|
|
f797e6fff5 | ||
|
|
92b4e9e348 | ||
|
|
5f00027425 | ||
|
|
6cc67851b3 | ||
|
|
307bd25e3e | ||
|
|
45817c699f | ||
|
|
7a95555bd5 | ||
|
|
d1a3b667fe | ||
|
|
4c3aef196d | ||
|
|
74aff3fb1a | ||
|
|
f218eaad51 | ||
|
|
893fdcade3 | ||
|
|
84c752761a | ||
|
|
d1a5143312 | ||
|
|
c25de2a191 | ||
|
|
c023cde8fe | ||
|
|
c0ddec6b4b | ||
|
|
64e500c524 | ||
|
|
3fb8e0ebd1 | ||
|
|
0938a3c592 | ||
|
|
b45c6eda38 | ||
|
|
1200d7d894 | ||
|
|
be9823c5ce | ||
|
|
adf960753c | ||
|
|
5b31440018 | ||
|
|
c239f0300f | ||
|
|
4ca6bf763e | ||
|
|
80c3d2fa58 | ||
|
|
aeb1933abb | ||
|
|
f135c6ae8b | ||
|
|
1817aab2da | ||
|
|
db80a2fa09 | ||
|
|
d4b7b3d3d2 | ||
|
|
6b9d42fec1 | ||
|
|
1a8763731b | ||
|
|
18b8462a5f | ||
|
|
7c4d7865ea | ||
|
|
1080b1fb94 | ||
|
|
e9a11588c4 | ||
|
|
4712a4305c | ||
|
|
b68b904838 | ||
|
|
f1196bb988 | ||
|
|
98f485bac9 | ||
|
|
870e77b137 | ||
|
|
d709d92936 | ||
|
|
ca79cc1a1d | ||
|
|
67939ba44b | ||
|
|
8c364d3eb8 | ||
|
|
e80e1b9014 | ||
|
|
7401b4c8d3 | ||
|
|
f652d2b233 | ||
|
|
fbeca53cd7 | ||
|
|
c76d68f382 | ||
|
|
41e32bc08a | ||
|
|
00cc83882a | ||
|
|
5806464406 | ||
|
|
55f54081b1 | ||
|
|
c4f9a998e5 | ||
|
|
5ccef5db32 | ||
|
|
eff4b665b1 | ||
|
|
ffe6ce5a6b | ||
|
|
ac5df3fd28 | ||
|
|
e7201b4429 | ||
|
|
6c1bfaf052 | ||
|
|
8cae13f845 | ||
|
|
9c41960405 | ||
|
|
246b00e2bf | ||
|
|
6f6c28ca7c | ||
|
|
a87f7c875d | ||
|
|
071e74124e | ||
|
|
7ea10064d7 | ||
|
|
648e2bb2dd | ||
|
|
59c6b0565e | ||
|
|
17cb303273 | ||
|
|
10d5d487c3 | ||
|
|
3aa68c1a36 | ||
|
|
b17bc21159 | ||
|
|
fc2caa0e12 | ||
|
|
22498de395 | ||
|
|
58446fdd33 | ||
|
|
12c2b8c7a7 | ||
|
|
0228d8d7f4 | ||
|
|
144a71a636 | ||
|
|
b5e32421bd | ||
|
|
41bd372b14 | ||
|
|
c24da847fc | ||
|
|
d2b6096570 | ||
|
|
e3f988b071 | ||
|
|
3a81227a73 | ||
|
|
62c4e1625b | ||
|
|
5c6d1fe6fd | ||
|
|
2026eab7f2 | ||
|
|
ad774db59e | ||
|
|
11f54f3cda | ||
|
|
2b484a94ec | ||
|
|
d2b852c9a2 | ||
|
|
73065ed319 | ||
|
|
fe350bda91 | ||
|
|
774c80e2a4 | ||
|
|
59dd6383eb | ||
|
|
c35c02c64b | ||
|
|
a0ab0e8688 | ||
|
|
2645166c8c | ||
|
|
fd01d1b6af | ||
|
|
0e52117e78 | ||
|
|
8ad1c6ab1a | ||
|
|
b92d7c2c21 | ||
|
|
85a0b584a5 | ||
|
|
4942393842 | ||
|
|
5eedceedc3 | ||
|
|
07a29916de | ||
|
|
2d49b72f06 | ||
|
|
11bc2e57a9 | ||
|
|
3ecd7137ff | ||
|
|
c80752a21d | ||
|
|
efcf4c7920 | ||
|
|
a19bd14a84 | ||
|
|
e45082f5d6 | ||
|
|
0a5e7a3231 | ||
|
|
6746588c15 | ||
|
|
22e9e56a5b | ||
|
|
48c4cd8ca9 | ||
|
|
52d96201fe | ||
|
|
39598b4ff4 | ||
|
|
3bea39a117 | ||
|
|
f1a80c3389 | ||
|
|
3590265836 | ||
|
|
a4df1f1fb8 | ||
|
|
a95ed79e93 | ||
|
|
90eccd6edf | ||
|
|
a03744e24e | ||
|
|
56af468320 | ||
|
|
a95d2a77d8 | ||
|
|
81a293e7dd | ||
|
|
24f876d09f | ||
|
|
fa2b3fbe3e | ||
|
|
e12ce4e596 | ||
|
|
3a91994549 | ||
|
|
c87bd79051 | ||
|
|
083eb264e6 | ||
|
|
97b82e5520 | ||
|
|
d04fdb6f69 | ||
|
|
9137bd47c9 | ||
|
|
2903773c86 | ||
|
|
750bf90576 | ||
|
|
0843d32388 | ||
|
|
fa19525ab9 | ||
|
|
54404339c2 | ||
|
|
0f8fe82f17 | ||
|
|
1bf947caf2 | ||
|
|
d92ba0c7e1 | ||
|
|
0cf2c74b40 | ||
|
|
1ed4e97167 | ||
|
|
eec53d5874 | ||
|
|
6a0871183c | ||
|
|
e481ea4457 | ||
|
|
dac1490acc | ||
|
|
3e16614eab | ||
|
|
5eeb51585d | ||
|
|
be51be7872 | ||
|
|
a5db9d98e9 | ||
|
|
19017317f6 | ||
|
|
2632e71815 | ||
|
|
6addcebaf5 | ||
|
|
4acdfa99f9 | ||
|
|
310f3c455e | ||
|
|
b7fb9336be | ||
|
|
07966424f8 | ||
|
|
72fee02ec4 | ||
|
|
7e94ee5d49 | ||
|
|
920201312e | ||
|
|
429b01b569 | ||
|
|
d4a93b8e4b | ||
|
|
46a5b3355a | ||
|
|
3942517032 | ||
|
|
4d7987214b | ||
|
|
3ac84e530a | ||
|
|
21b7f80d42 | ||
|
|
b90956c49f | ||
|
|
19e59ae178 | ||
|
|
4a80cb6d5c | ||
|
|
d054e2cac5 | ||
|
|
00361c15b9 | ||
|
|
44fecff2f2 | ||
|
|
345247df11 | ||
|
|
22bc573c10 | ||
|
|
a332678cfb | ||
|
|
84dfe69f0a | ||
|
|
5feb843d73 | ||
|
|
b229c9628d | ||
|
|
430a486438 | ||
|
|
325d832c1b | ||
|
|
3741d28565 | ||
|
|
8d37c678d3 | ||
|
|
56922e1c01 | ||
|
|
656cd3007b | ||
|
|
5b931f6272 | ||
|
|
a6bace9241 | ||
|
|
0e388968c4 | ||
|
|
5a92487d38 | ||
|
|
44a2cf518c | ||
|
|
d18817acb0 | ||
|
|
5e7987ce1a | ||
|
|
8275e33223 | ||
|
|
5e5b5e9797 | ||
|
|
0565e0ae50 | ||
|
|
22b7a3efb2 | ||
|
|
4b6792ccbe | ||
|
|
cc8064877d | ||
|
|
0120419d09 | ||
|
|
a66028ff02 | ||
|
|
4754a0dcd7 | ||
|
|
b11270ce4a | ||
|
|
3c2534d8aa | ||
|
|
8584aae675 | ||
|
|
151f654dd9 | ||
|
|
9a927b1fc7 | ||
|
|
4b27f95b30 | ||
|
|
ce331abdbc | ||
|
|
b7abdac800 | ||
|
|
2f1da5d76a | ||
|
|
fa29c679f5 | ||
|
|
36450af23f | ||
|
|
b19efd2f75 | ||
|
|
d917a3a915 | ||
|
|
10ab3e4bd8 | ||
|
|
4accd2be85 | ||
|
|
e1f5cb24ec | ||
|
|
9f8054c9de | ||
|
|
3baaf8c170 | ||
|
|
88a9f8b14e | ||
|
|
f300da6eff | ||
|
|
d9192d003d | ||
|
|
f9a9d5d336 | ||
|
|
5010cae068 | ||
|
|
0585fc1dfe | ||
|
|
0b5906538e | ||
|
|
0ece58572e | ||
|
|
bcebcadfaa | ||
|
|
386a03381c | ||
|
|
84aa631196 | ||
|
|
1c47b7f84a | ||
|
|
f1752cc720 | ||
|
|
1612df14c1 | ||
|
|
ead76fe661 | ||
|
|
7b5a617bf8 | ||
|
|
953db79476 | ||
|
|
6945cecaca | ||
|
|
42c792cbb7 | ||
|
|
20651a8d59 | ||
|
|
59b75c18fc | ||
|
|
a9aac72a60 | ||
|
|
406ac37fb9 | ||
|
|
108a992643 | ||
|
|
826c824084 | ||
|
|
5a887cc55a | ||
|
|
e029cd8b3d | ||
|
|
4ae66e0bc9 | ||
|
|
06d8d01fd9 | ||
|
|
64ec6de414 | ||
|
|
26e237f2d1 | ||
|
|
38813e6c27 | ||
|
|
613c0d8bb5 | ||
|
|
e26ecad013 | ||
|
|
7551869a2e | ||
|
|
44f8d45768 | ||
|
|
cb49172e98 | ||
|
|
72ecb88c76 | ||
|
|
5b076fd00b | ||
|
|
b222dbe5a3 | ||
|
|
6180b4ffef | ||
|
|
db6e09370c | ||
|
|
7c442064e6 | ||
|
|
336a0d2558 | ||
|
|
491b0cea29 | ||
|
|
7313cafa1c | ||
|
|
39ec4d39fc | ||
|
|
8f529bf495 | ||
|
|
9c32cc68d9 | ||
|
|
a7b9d278c2 | ||
|
|
1475ca70ab | ||
|
|
7eef45f1c0 | ||
|
|
5148a5a79b | ||
|
|
192edda48e | ||
|
|
7e5be6094f |
325 changed files with 68728 additions and 10765 deletions
|
|
@ -31,3 +31,4 @@
|
|||
LICENSE
|
||||
README.md
|
||||
data/
|
||||
docker/data/
|
||||
|
|
|
|||
214
.github/workflows/base-image.yml
vendored
214
.github/workflows/base-image.yml
vendored
|
|
@ -2,42 +2,37 @@ name: Base Image Build
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, dev ]
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
pull_request:
|
||||
branches: [ main, dev ]
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'docker/DispatcharrBase'
|
||||
- '.github/workflows/base-image.yml'
|
||||
- 'requirements.txt'
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
build-base-image:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
|
|
@ -66,13 +61,190 @@ jobs:
|
|||
echo "branch_tag=base-${BRANCH}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker base image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/DispatcharrBase
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:base-${{ steps.timestamp.outputs.timestamp }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# branch tag (e.g. base or base-dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# branch + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
277
.github/workflows/ci.yml
vendored
277
.github/workflows/ci.yml
vendored
|
|
@ -2,19 +2,86 @@ name: CI Pipeline
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
branches: [dev]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
pull_request:
|
||||
branches: [ dev ]
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
branches: [dev]
|
||||
workflow_dispatch:
|
||||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
# compute a single timestamp, version, and repo metadata for the entire workflow
|
||||
outputs:
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
branch_tag: ${{ steps.meta.outputs.branch_tag }}
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
id: meta
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
# no per-job outputs here; shared metadata comes from the `prepare` job
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
@ -45,66 +112,162 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract version info
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python -c "import version; print(version.__version__)")
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set repository and image metadata
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
run: |
|
||||
# Get lowercase repository owner
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get repository name
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Determine branch name
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "branch_tag=latest" >> $GITHUB_OUTPUT
|
||||
echo "is_main=true" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/dev" ]]; then
|
||||
echo "branch_tag=dev" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# For other branches, use the branch name
|
||||
BRANCH=$(echo "${{ github.ref }}" | sed 's/refs\/heads\///' | sed 's/[^a-zA-Z0-9]/-/g')
|
||||
echo "branch_tag=${BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "is_main=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine if this is from a fork
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
echo "is_fork=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_fork=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# Build only the platform for this matrix job to avoid running amd64
|
||||
# stages under qemu on an arm64 runner (and vice-versa). This makes
|
||||
# the matrix runner's platform the one built by buildx.
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
# push arch-specific tags from each matrix job (they will be combined
|
||||
# into a multi-arch manifest in a follow-up job)
|
||||
tags: |
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.meta.outputs.branch_tag }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.version }}-${{ steps.timestamp.outputs.timestamp }}
|
||||
ghcr.io/${{ steps.meta.outputs.repo_owner }}/${{ steps.meta.outputs.repo_name }}:${{ steps.version.outputs.sha_short }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ steps.meta.outputs.repo_owner }}
|
||||
REPO_NAME=${{ steps.meta.outputs.repo_name }}
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BASE_TAG=base
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
TIMESTAMP=${{ steps.timestamp.outputs.timestamp }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
# wait for prepare and all matrix builds to finish
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH_TAG=${{ needs.prepare.outputs.branch_tag }}
|
||||
VERSION=${{ needs.prepare.outputs.version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# branch tag (e.g. latest or dev)
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \
|
||||
ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
# version + timestamp tag
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
||||
# also create Docker Hub manifests using the same username
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${BRANCH_TAG}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64
|
||||
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64
|
||||
|
|
|
|||
41
.github/workflows/frontend-tests.yml
vendored
Normal file
41
.github/workflows/frontend-tests.yml
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
name: Frontend Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-tests.yml'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./frontend
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './frontend/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# - name: Run linter
|
||||
# run: npm run lint
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
195
.github/workflows/release.yml
vendored
195
.github/workflows/release.yml
vendored
|
|
@ -15,16 +15,22 @@ on:
|
|||
|
||||
# Add explicit permissions for the workflow
|
||||
permissions:
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
contents: write # For managing releases and pushing tags
|
||||
packages: write # For publishing to GitHub Container Registry
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
new_version: ${{ steps.update_version.outputs.new_version }}
|
||||
repo_owner: ${{ steps.meta.outputs.repo_owner }}
|
||||
repo_name: ${{ steps.meta.outputs.repo_name }}
|
||||
timestamp: ${{ steps.timestamp.outputs.timestamp }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
|
|
@ -38,14 +44,55 @@ jobs:
|
|||
NEW_VERSION=$(python -c "import version; print(f'{version.__version__}')")
|
||||
echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set lowercase repo owner
|
||||
id: repo_owner
|
||||
- name: Update Changelog
|
||||
run: |
|
||||
python scripts/update_changelog.py ${{ steps.update_version.outputs.new_version }}
|
||||
|
||||
- name: Set repository metadata
|
||||
id: meta
|
||||
run: |
|
||||
REPO_OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "lowercase=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
echo "repo_owner=${REPO_OWNER}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]')
|
||||
echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Generate timestamp for build
|
||||
id: timestamp
|
||||
run: |
|
||||
TIMESTAMP=$(date -u +'%Y%m%d%H%M%S')
|
||||
echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py CHANGELOG.md
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
|
||||
docker:
|
||||
needs: [prepare]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [amd64, arm64]
|
||||
include:
|
||||
- platform: amd64
|
||||
runner: ubuntu-24.04
|
||||
- platform: arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: main
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "actions@github.com"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
|
@ -57,36 +104,134 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Commit and Tag
|
||||
run: |
|
||||
git add version.py
|
||||
git commit -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git tag -a "v${{ steps.update_version.outputs.new_version }}" -m "Release v${{ steps.update_version.outputs.new_version }}"
|
||||
git push origin main --tags
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and Push Release Image
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}
|
||||
org.opencontainers.image.description=Your ultimate IPTV & stream Management companion.
|
||||
org.opencontainers.image.url=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }}
|
||||
org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=See repository
|
||||
org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/
|
||||
org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }}
|
||||
org.opencontainers.image.authors=${{ github.actor }}
|
||||
maintainer=${{ github.actor }}
|
||||
build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64, #linux/arm/v7 # Multi-arch support for releases
|
||||
platforms: linux/${{ matrix.platform }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:latest-arm64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-amd64
|
||||
ghcr.io/${{ steps.repo_owner.outputs.lowercase }}/dispatcharr:${{ steps.update_version.outputs.new_version }}-arm64
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }}
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
REPO_OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO_NAME=${{ needs.prepare.outputs.repo_name }}
|
||||
BRANCH=${{ github.ref_name }}
|
||||
REPO_URL=https://github.com/${{ github.repository }}
|
||||
file: ./docker/Dockerfile
|
||||
|
||||
create-manifest:
|
||||
needs: [prepare, docker]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Create multi-arch manifest tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER=${{ needs.prepare.outputs.repo_owner }}
|
||||
REPO=${{ needs.prepare.outputs.repo_name }}
|
||||
VERSION=${{ needs.prepare.outputs.new_version }}
|
||||
TIMESTAMP=${{ needs.prepare.outputs.timestamp }}
|
||||
|
||||
echo "Creating multi-arch manifest for ${OWNER}/${REPO}"
|
||||
|
||||
# GitHub Container Registry manifests
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:latest \
|
||||
--tag ghcr.io/${OWNER}/${REPO}:${VERSION} \
|
||||
ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64
|
||||
|
||||
# Docker Hub manifests
|
||||
# Create one manifest with both latest and version tags
|
||||
docker buildx imagetools create \
|
||||
--annotation "index:org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \
|
||||
--annotation "index:org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \
|
||||
--annotation "index:org.opencontainers.image.url=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.source=https://github.com/${{ github.repository }}" \
|
||||
--annotation "index:org.opencontainers.image.version=${VERSION}" \
|
||||
--annotation "index:org.opencontainers.image.created=${TIMESTAMP}" \
|
||||
--annotation "index:org.opencontainers.image.revision=${{ github.sha }}" \
|
||||
--annotation "index:org.opencontainers.image.licenses=See repository" \
|
||||
--annotation "index:org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \
|
||||
--annotation "index:org.opencontainers.image.vendor=${OWNER}" \
|
||||
--annotation "index:org.opencontainers.image.authors=${{ github.actor }}" \
|
||||
--annotation "index:maintainer=${{ github.actor }}" \
|
||||
--annotation "index:build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \
|
||||
--tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \
|
||||
docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64
|
||||
|
||||
create-release:
|
||||
needs: [prepare, create-manifest]
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
tag_name: v${{ steps.update_version.outputs.new_version }}
|
||||
name: Release v${{ steps.update_version.outputs.new_version }}
|
||||
tag_name: v${{ needs.prepare.outputs.new_version }}
|
||||
name: Release v${{ needs.prepare.outputs.new_version }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -18,4 +18,5 @@ dump.rdb
|
|||
debugpy*
|
||||
uwsgi.sock
|
||||
package-lock.json
|
||||
models
|
||||
models
|
||||
.idea
|
||||
1014
CHANGELOG.md
Normal file
1014
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load diff
286
Plugins.md
Normal file
286
Plugins.md
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
# Dispatcharr Plugins
|
||||
|
||||
This document explains how to build, install, and use Python plugins in Dispatcharr. It covers discovery, the plugin interface, settings, actions, how to access application APIs, and examples.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
1) Create a folder under `/app/data/plugins/my_plugin/` (host path `data/plugins/my_plugin/` in the repo).
|
||||
|
||||
2) Add a `plugin.py` file exporting a `Plugin` class:
|
||||
|
||||
```
|
||||
# /app/data/plugins/my_plugin/plugin.py
|
||||
class Plugin:
|
||||
name = "My Plugin"
|
||||
version = "0.1.0"
|
||||
description = "Does something useful"
|
||||
|
||||
# Settings fields rendered by the UI and persisted by the backend
|
||||
fields = [
|
||||
{"id": "enabled", "label": "Enabled", "type": "boolean", "default": True},
|
||||
{"id": "limit", "label": "Item limit", "type": "number", "default": 5},
|
||||
{"id": "mode", "label": "Mode", "type": "select", "default": "safe",
|
||||
"options": [
|
||||
{"value": "safe", "label": "Safe"},
|
||||
{"value": "fast", "label": "Fast"},
|
||||
]},
|
||||
{"id": "note", "label": "Note", "type": "string", "default": ""},
|
||||
]
|
||||
|
||||
# Actions appear as buttons. Clicking one calls run(action, params, context)
|
||||
actions = [
|
||||
{"id": "do_work", "label": "Do Work", "description": "Process items"},
|
||||
]
|
||||
|
||||
def run(self, action: str, params: dict, context: dict):
|
||||
settings = context.get("settings", {})
|
||||
logger = context.get("logger")
|
||||
|
||||
if action == "do_work":
|
||||
limit = int(settings.get("limit", 5))
|
||||
mode = settings.get("mode", "safe")
|
||||
logger.info(f"My Plugin running with limit={limit}, mode={mode}")
|
||||
# Do a small amount of work here. Schedule Celery tasks for heavy work.
|
||||
return {"status": "ok", "processed": limit, "mode": mode}
|
||||
|
||||
return {"status": "error", "message": f"Unknown action {action}"}
|
||||
```
|
||||
|
||||
3) Open the Plugins page in the UI, click the refresh icon to reload discovery, then configure and run your plugin.
|
||||
|
||||
---
|
||||
|
||||
## Where Plugins Live
|
||||
|
||||
- Default directory: `/app/data/plugins` inside the container.
|
||||
- Override with env var: `DISPATCHARR_PLUGINS_DIR`.
|
||||
- Each plugin is a directory containing either:
|
||||
- `plugin.py` exporting a `Plugin` class, or
|
||||
- a Python package (`__init__.py`) exporting a `Plugin` class.
|
||||
|
||||
The directory name (lowercased, spaces as `_`) is used as the registry key and module import path (e.g. `my_plugin.plugin`).
|
||||
|
||||
---
|
||||
|
||||
## Discovery & Lifecycle
|
||||
|
||||
- Discovery runs at server startup and on-demand when:
|
||||
- Fetching the plugins list from the UI
|
||||
- Hitting `POST /api/plugins/plugins/reload/`
|
||||
- The loader imports each plugin module and instantiates `Plugin()`.
|
||||
- Metadata (name, version, description) and a per-plugin settings JSON are stored in the DB.
|
||||
|
||||
Backend code:
|
||||
- Loader: `apps/plugins/loader.py`
|
||||
- API Views: `apps/plugins/api_views.py`
|
||||
- API URLs: `apps/plugins/api_urls.py`
|
||||
- Model: `apps/plugins/models.py` (stores `enabled` flag and `settings` per plugin)
|
||||
|
||||
---
|
||||
|
||||
## Plugin Interface
|
||||
|
||||
Export a `Plugin` class. Supported attributes and behavior:
|
||||
|
||||
- `name` (str): Human-readable name.
|
||||
- `version` (str): Semantic version string.
|
||||
- `description` (str): Short description.
|
||||
- `fields` (list): Settings schema used by the UI to render controls.
|
||||
- `actions` (list): Available actions; the UI renders a Run button for each.
|
||||
- `run(action, params, context)` (callable): Invoked when a user clicks an action.
|
||||
|
||||
### Settings Schema
|
||||
Supported field `type`s:
|
||||
- `boolean`
|
||||
- `number`
|
||||
- `string`
|
||||
- `select` (requires `options`: `[{"value": ..., "label": ...}, ...]`)
|
||||
|
||||
Common field keys:
|
||||
- `id` (str): Settings key.
|
||||
- `label` (str): Label shown in the UI.
|
||||
- `type` (str): One of above.
|
||||
- `default` (any): Default value used until saved.
|
||||
- `help_text` (str, optional): Shown under the control.
|
||||
- `options` (list, for select): List of `{value, label}`.
|
||||
|
||||
The UI automatically renders settings and persists them. The backend stores settings in `PluginConfig.settings`.
|
||||
|
||||
Read settings in `run` via `context["settings"]`.
|
||||
|
||||
### Actions
|
||||
Each action is a dict:
|
||||
- `id` (str): Unique action id.
|
||||
- `label` (str): Button label.
|
||||
- `description` (str, optional): Helper text.
|
||||
|
||||
Clicking an action calls your plugin’s `run(action, params, context)` and shows a notification with the result or error.
|
||||
|
||||
### Action Confirmation (Modal)
|
||||
Developers can request a confirmation modal per action using the `confirm` key on the action. Options:
|
||||
|
||||
- Boolean: `confirm: true` will show a default confirmation modal.
|
||||
- Object: `confirm: { required: true, title: '...', message: '...' }` to customize the modal title and message.
|
||||
|
||||
Example:
|
||||
```
|
||||
actions = [
|
||||
{
|
||||
"id": "danger_run",
|
||||
"label": "Do Something Risky",
|
||||
"description": "Runs a job that affects many records.",
|
||||
"confirm": { "required": true, "title": "Proceed?", "message": "This will modify many records." },
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Accessing Dispatcharr APIs from Plugins
|
||||
|
||||
Plugins are server-side Python code running within the Django application. You can:
|
||||
|
||||
- Import models and run queries/updates:
|
||||
```
|
||||
from apps.m3u.models import M3UAccount
|
||||
from apps.epg.models import EPGSource
|
||||
from apps.channels.models import Channel
|
||||
from core.models import CoreSettings
|
||||
```
|
||||
|
||||
- Dispatch Celery tasks for heavy work (recommended):
|
||||
```
|
||||
from apps.m3u.tasks import refresh_m3u_accounts # apps/m3u/tasks.py
|
||||
from apps.epg.tasks import refresh_all_epg_data # apps/epg/tasks.py
|
||||
|
||||
refresh_m3u_accounts.delay()
|
||||
refresh_all_epg_data.delay()
|
||||
```
|
||||
|
||||
- Send WebSocket updates:
|
||||
```
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {"type": "plugin", "plugin": "my_plugin", "message": "Done"})
|
||||
```
|
||||
|
||||
- Use transactions:
|
||||
```
|
||||
from django.db import transaction
|
||||
with transaction.atomic():
|
||||
# bulk updates here
|
||||
...
|
||||
```
|
||||
|
||||
- Log via provided context or standard logging:
|
||||
```
|
||||
def run(self, action, params, context):
|
||||
logger = context.get("logger") # already configured
|
||||
logger.info("running action %s", action)
|
||||
```
|
||||
|
||||
Prefer Celery tasks (`.delay()`) to keep `run` fast and non-blocking.
|
||||
|
||||
---
|
||||
|
||||
## REST Endpoints (for UI and tooling)
|
||||
|
||||
- List plugins: `GET /api/plugins/plugins/`
|
||||
- Response: `{ "plugins": [{ key, name, version, description, enabled, fields, settings, actions }, ...] }`
|
||||
- Reload discovery: `POST /api/plugins/plugins/reload/`
|
||||
- Import plugin: `POST /api/plugins/plugins/import/` with form-data file field `file`
|
||||
- Update settings: `POST /api/plugins/plugins/<key>/settings/` with `{"settings": {...}}`
|
||||
- Run action: `POST /api/plugins/plugins/<key>/run/` with `{"action": "id", "params": {...}}`
|
||||
- Enable/disable: `POST /api/plugins/plugins/<key>/enabled/` with `{"enabled": true|false}`
|
||||
|
||||
Notes:
|
||||
- When disabled, a plugin cannot run actions; backend returns HTTP 403.
|
||||
|
||||
---
|
||||
|
||||
## Importing Plugins
|
||||
|
||||
- In the UI, click the Import button on the Plugins page and upload a `.zip` containing a plugin folder.
|
||||
- The archive should contain either `plugin.py` or a Python package (`__init__.py`).
|
||||
- On success, the UI shows the plugin name/description and lets you enable it immediately (plugins are disabled by default).
|
||||
|
||||
---
|
||||
|
||||
## Enabling / Disabling Plugins
|
||||
|
||||
- Each plugin has a persisted `enabled` flag (default: disabled) and `ever_enabled` flag in the DB (`apps/plugins/models.py`).
|
||||
- New plugins are disabled by default and require an explicit enable.
|
||||
- The first time a plugin is enabled, the UI shows a trust warning modal explaining that plugins can run arbitrary server-side code.
|
||||
- The Plugins page shows a toggle in the card header. Turning it off dims the card and disables the Run button.
|
||||
- Backend enforcement: Attempts to run an action for a disabled plugin return HTTP 403.
|
||||
|
||||
---
|
||||
|
||||
## Example: Refresh All Sources Plugin
|
||||
|
||||
Path: `data/plugins/refresh_all/plugin.py`
|
||||
|
||||
```
|
||||
class Plugin:
|
||||
name = "Refresh All Sources"
|
||||
version = "1.0.0"
|
||||
description = "Force refresh all M3U accounts and EPG sources."
|
||||
|
||||
fields = [
|
||||
{"id": "confirm", "label": "Require confirmation", "type": "boolean", "default": True,
|
||||
"help_text": "If enabled, the UI should ask before running."}
|
||||
]
|
||||
|
||||
actions = [
|
||||
{"id": "refresh_all", "label": "Refresh All M3Us and EPGs",
|
||||
"description": "Queues background refresh for all active M3U accounts and EPG sources."}
|
||||
]
|
||||
|
||||
def run(self, action: str, params: dict, context: dict):
|
||||
if action == "refresh_all":
|
||||
from apps.m3u.tasks import refresh_m3u_accounts
|
||||
from apps.epg.tasks import refresh_all_epg_data
|
||||
refresh_m3u_accounts.delay()
|
||||
refresh_all_epg_data.delay()
|
||||
return {"status": "queued", "message": "Refresh jobs queued"}
|
||||
return {"status": "error", "message": f"Unknown action: {action}"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Keep `run` short and schedule heavy operations via Celery tasks.
|
||||
- Validate and sanitize `params` received from the UI.
|
||||
- Use database transactions for bulk or related updates.
|
||||
- Log actionable messages for troubleshooting.
|
||||
- Only write files under `/data` or `/app/data` paths.
|
||||
- Treat plugins as trusted code: they run with full app permissions.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Plugin not listed: ensure the folder exists and contains `plugin.py` with a `Plugin` class.
|
||||
- Import errors: the folder name is the import name; avoid spaces or exotic characters.
|
||||
- No confirmation: include a boolean field with `id: "confirm"` and set it to true or default true.
|
||||
- HTTP 403 on run: the plugin is disabled; enable it from the toggle or via the `enabled/` endpoint.
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
- Keep dependencies minimal. Vendoring small helpers into the plugin folder is acceptable.
|
||||
- Use the existing task and model APIs where possible; propose extensions if you need new capabilities.
|
||||
|
||||
---
|
||||
|
||||
## Internals Reference
|
||||
|
||||
- Loader: `apps/plugins/loader.py`
|
||||
- API Views: `apps/plugins/api_views.py`
|
||||
- API URLs: `apps/plugins/api_urls.py`
|
||||
- Model: `apps/plugins/models.py`
|
||||
- Frontend page: `frontend/src/pages/Plugins.jsx`
|
||||
- Sidebar entry: `frontend/src/components/Sidebar.jsx`
|
||||
|
|
@ -22,6 +22,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
|
|||
📊 **Real-Time Stats Dashboard** — Live insights into stream health and client activity\
|
||||
🧠 **EPG Auto-Match** — Match program data to channels automatically\
|
||||
⚙️ **Streamlink + FFmpeg Support** — Flexible backend options for streaming and recording\
|
||||
🎬 **VOD Management** — Full Video on Demand support with movies and TV series\
|
||||
🧼 **UI & UX Enhancements** — Smoother, faster, more responsive interface\
|
||||
🛁 **Output Compatibility** — HDHomeRun, M3U, and XMLTV EPG support for Plex, Jellyfin, and more
|
||||
|
||||
|
|
@ -31,6 +32,7 @@ Dispatcharr has officially entered **BETA**, bringing powerful new features and
|
|||
|
||||
✅ **Full IPTV Control** — Import, organize, proxy, and monitor IPTV streams on your own terms\
|
||||
✅ **Smart Playlist Handling** — M3U import, filtering, grouping, and failover support\
|
||||
✅ **VOD Content Management** — Organize movies and TV series with metadata and streaming\
|
||||
✅ **Reliable EPG Integration** — Match and manage TV guide data with ease\
|
||||
✅ **Clean & Responsive Interface** — Modern design that gets out of your way\
|
||||
✅ **Fully Self-Hosted** — Total control, zero reliance on third-party services
|
||||
|
|
|
|||
|
|
@ -20,30 +20,88 @@ class TokenObtainPairView(TokenObtainPairView):
|
|||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked login attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
username = request.data.get("username", 'unknown')
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied',
|
||||
)
|
||||
return Response({"error": "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
# Get the response from the parent class first
|
||||
response = super().post(request, *args, **kwargs)
|
||||
username = request.data.get("username")
|
||||
|
||||
# If login was successful, update last_login
|
||||
if response.status_code == 200:
|
||||
username = request.data.get("username")
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
# Log login attempt
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
return response
|
||||
try:
|
||||
response = super().post(request, *args, **kwargs)
|
||||
|
||||
# If login was successful, update last_login and log success
|
||||
if response.status_code == 200:
|
||||
if username:
|
||||
from django.utils import timezone
|
||||
try:
|
||||
user = User.objects.get(username=username)
|
||||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
except User.DoesNotExist:
|
||||
pass # User doesn't exist, but login somehow succeeded
|
||||
else:
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
# If parent class raises an exception (e.g., validation error), log failed attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason=f'Authentication error: {str(e)[:100]}',
|
||||
)
|
||||
raise # Re-raise the exception to maintain normal error flow
|
||||
|
||||
|
||||
class TokenRefreshView(TokenRefreshView):
|
||||
def post(self, request, *args, **kwargs):
|
||||
# Custom logic here
|
||||
if not network_access_allowed(request, "UI"):
|
||||
# Log blocked token refresh attempt due to network restrictions
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user='token_refresh',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Network access denied (token refresh)',
|
||||
)
|
||||
return Response({"error": "Unauthorized"}, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
return super().post(request, *args, **kwargs)
|
||||
|
|
@ -80,6 +138,15 @@ def initialize_superuser(request):
|
|||
class AuthViewSet(viewsets.ViewSet):
|
||||
"""Handles user login and logout"""
|
||||
|
||||
def get_permissions(self):
|
||||
"""
|
||||
Login doesn't require auth, but logout does
|
||||
"""
|
||||
if self.action == 'logout':
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
return [IsAuthenticated()]
|
||||
return []
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Authenticate and log in a user",
|
||||
request_body=openapi.Schema(
|
||||
|
|
@ -100,6 +167,11 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
password = request.data.get("password")
|
||||
user = authenticate(request, username=username, password=password)
|
||||
|
||||
# Get client info for logging
|
||||
from core.utils import log_system_event
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
if user:
|
||||
login(request, user)
|
||||
# Update last_login timestamp
|
||||
|
|
@ -107,6 +179,14 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
user.last_login = timezone.now()
|
||||
user.save(update_fields=['last_login'])
|
||||
|
||||
# Log successful login
|
||||
log_system_event(
|
||||
event_type='login_success',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"message": "Login successful",
|
||||
|
|
@ -118,6 +198,15 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
},
|
||||
}
|
||||
)
|
||||
|
||||
# Log failed login attempt
|
||||
log_system_event(
|
||||
event_type='login_failed',
|
||||
user=username or 'unknown',
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
reason='Invalid credentials',
|
||||
)
|
||||
return Response({"error": "Invalid credentials"}, status=400)
|
||||
|
||||
@swagger_auto_schema(
|
||||
|
|
@ -126,6 +215,19 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
)
|
||||
def logout(self, request):
|
||||
"""Logs out the authenticated user"""
|
||||
# Log logout event before actually logging out
|
||||
from core.utils import log_system_event
|
||||
username = request.user.username if request.user and request.user.is_authenticated else 'unknown'
|
||||
client_ip = request.META.get('REMOTE_ADDR', 'unknown')
|
||||
user_agent = request.META.get('HTTP_USER_AGENT', 'unknown')
|
||||
|
||||
log_system_event(
|
||||
event_type='logout',
|
||||
user=username,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
)
|
||||
|
||||
logout(request)
|
||||
return Response({"message": "Logout successful"})
|
||||
|
||||
|
|
@ -134,7 +236,7 @@ class AuthViewSet(viewsets.ViewSet):
|
|||
class UserViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for Users"""
|
||||
|
||||
queryset = User.objects.all()
|
||||
queryset = User.objects.all().prefetch_related('channel_profiles')
|
||||
serializer_class = UserSerializer
|
||||
|
||||
def get_permissions(self):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('accounts', '0002_remove_user_channel_groups_user_channel_profiles_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='user',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -21,7 +21,7 @@ class User(AbstractUser):
|
|||
related_name="users",
|
||||
)
|
||||
user_level = models.IntegerField(default=UserLevel.STREAMER)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.username
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
from django.urls import path, include
|
||||
from django.urls import path, include, re_path
|
||||
from drf_yasg.views import get_schema_view
|
||||
from drf_yasg import openapi
|
||||
from rest_framework.permissions import AllowAny
|
||||
|
||||
app_name = 'api'
|
||||
|
||||
# Configure Swagger Schema
|
||||
schema_view = get_schema_view(
|
||||
openapi.Info(
|
||||
title="Dispatcharr API",
|
||||
|
|
@ -26,6 +25,9 @@ urlpatterns = [
|
|||
path('hdhr/', include(('apps.hdhr.api_urls', 'hdhr'), namespace='hdhr')),
|
||||
path('m3u/', include(('apps.m3u.api_urls', 'm3u'), namespace='m3u')),
|
||||
path('core/', include(('core.api_urls', 'core'), namespace='core')),
|
||||
path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')),
|
||||
path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')),
|
||||
path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')),
|
||||
# path('output/', include(('apps.output.api_urls', 'output'), namespace='output')),
|
||||
#path('player/', include(('apps.player.api_urls', 'player'), namespace='player')),
|
||||
#path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')),
|
||||
|
|
@ -34,7 +36,7 @@ urlpatterns = [
|
|||
|
||||
|
||||
# Swagger Documentation api_urls
|
||||
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
re_path(r'^swagger/?$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
|
||||
path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'),
|
||||
]
|
||||
|
|
|
|||
0
apps/backups/__init__.py
Normal file
0
apps/backups/__init__.py
Normal file
18
apps/backups/api_urls.py
Normal file
18
apps/backups/api_urls.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
from django.urls import path
|
||||
|
||||
from . import api_views
|
||||
|
||||
app_name = "backups"
|
||||
|
||||
urlpatterns = [
|
||||
path("", api_views.list_backups, name="backup-list"),
|
||||
path("create/", api_views.create_backup, name="backup-create"),
|
||||
path("upload/", api_views.upload_backup, name="backup-upload"),
|
||||
path("schedule/", api_views.get_schedule, name="backup-schedule-get"),
|
||||
path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"),
|
||||
path("status/<str:task_id>/", api_views.backup_status, name="backup-status"),
|
||||
path("<str:filename>/download-token/", api_views.get_download_token, name="backup-download-token"),
|
||||
path("<str:filename>/download/", api_views.download_backup, name="backup-download"),
|
||||
path("<str:filename>/delete/", api_views.delete_backup, name="backup-delete"),
|
||||
path("<str:filename>/restore/", api_views.restore_backup, name="backup-restore"),
|
||||
]
|
||||
364
apps/backups/api_views.py
Normal file
364
apps/backups/api_views.py
Normal file
|
|
@ -0,0 +1,364 @@
|
|||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from django.conf import settings
|
||||
from django.http import HttpResponse, StreamingHttpResponse, Http404
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import api_view, permission_classes, parser_classes
|
||||
from rest_framework.permissions import IsAdminUser, AllowAny
|
||||
from rest_framework.parsers import MultiPartParser, FormParser
|
||||
from rest_framework.response import Response
|
||||
|
||||
from . import services
|
||||
from .tasks import create_backup_task, restore_backup_task
|
||||
from .scheduler import get_schedule_settings, update_schedule_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _generate_task_token(task_id: str) -> str:
|
||||
"""Generate a signed token for task status access without auth."""
|
||||
secret = settings.SECRET_KEY.encode()
|
||||
return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32]
|
||||
|
||||
|
||||
def _verify_task_token(task_id: str, token: str) -> bool:
|
||||
"""Verify a task token is valid."""
|
||||
expected = _generate_task_token(task_id)
|
||||
return hmac.compare_digest(expected, token)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def list_backups(request):
|
||||
"""List all available backup files."""
|
||||
try:
|
||||
backups = services.list_backups()
|
||||
return Response(backups, status=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to list backups: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def create_backup(request):
|
||||
"""Create a new backup (async via Celery)."""
|
||||
try:
|
||||
task = create_backup_task.delay()
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start backup: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def backup_status(request, task_id):
|
||||
"""Check the status of a backup/restore task.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid task_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (for restore when session is invalidated)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(task_id, token):
|
||||
return Response(
|
||||
{"detail": "Invalid task token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
result = AsyncResult(task_id)
|
||||
|
||||
if result.ready():
|
||||
task_result = result.get()
|
||||
if task_result.get("status") == "completed":
|
||||
return Response({
|
||||
"state": "completed",
|
||||
"result": task_result,
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": task_result.get("error", "Unknown error"),
|
||||
})
|
||||
elif result.failed():
|
||||
return Response({
|
||||
"state": "failed",
|
||||
"error": str(result.result),
|
||||
})
|
||||
else:
|
||||
return Response({
|
||||
"state": result.state.lower(),
|
||||
})
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get task status: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_download_token(request, filename):
|
||||
"""Get a signed token for downloading a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
token = _generate_task_token(filename)
|
||||
return Response({"token": token})
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to generate token: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([AllowAny])
|
||||
def download_backup(request, filename):
|
||||
"""Download a backup file.
|
||||
|
||||
Requires either:
|
||||
- Valid admin authentication, OR
|
||||
- Valid download_token query parameter
|
||||
"""
|
||||
# Check for token-based auth (avoids CORS preflight issues)
|
||||
token = request.query_params.get("token")
|
||||
if token:
|
||||
if not _verify_task_token(filename, token):
|
||||
return Response(
|
||||
{"detail": "Invalid download token"},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
else:
|
||||
# Fall back to admin auth check
|
||||
if not request.user.is_authenticated or not request.user.is_staff:
|
||||
return Response(
|
||||
{"detail": "Authentication required"},
|
||||
status=status.HTTP_401_UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
# Security: prevent path traversal by checking for suspicious characters
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = (backup_dir / filename).resolve()
|
||||
|
||||
# Security: ensure the resolved path is still within backup_dir
|
||||
if not str(backup_file).startswith(str(backup_dir.resolve())):
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
if not backup_file.exists() or not backup_file.is_file():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
file_size = backup_file.stat().st_size
|
||||
|
||||
# Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly
|
||||
# Fall back to streaming for non-nginx deployments
|
||||
use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true"
|
||||
logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}")
|
||||
|
||||
if use_nginx_accel:
|
||||
# X-Accel-Redirect: Django returns immediately, nginx serves file
|
||||
logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}")
|
||||
response = HttpResponse()
|
||||
response["X-Accel-Redirect"] = f"/protected-backups/{filename}"
|
||||
response["Content-Type"] = "application/zip"
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
else:
|
||||
# Streaming fallback for non-nginx deployments
|
||||
logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)")
|
||||
def file_iterator(file_path, chunk_size=2 * 1024 * 1024):
|
||||
with open(file_path, "rb") as f:
|
||||
while chunk := f.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
response = StreamingHttpResponse(
|
||||
file_iterator(backup_file),
|
||||
content_type="application/zip",
|
||||
)
|
||||
response["Content-Length"] = file_size
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Download failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["DELETE"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def delete_backup(request, filename):
|
||||
"""Delete a backup file."""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
services.delete_backup(filename)
|
||||
return Response(
|
||||
{"detail": "Backup deleted successfully"},
|
||||
status=status.HTTP_204_NO_CONTENT,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise Http404("Backup file not found")
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Delete failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
@parser_classes([MultiPartParser, FormParser])
|
||||
def upload_backup(request):
|
||||
"""Upload a backup file for restoration."""
|
||||
uploaded = request.FILES.get("file")
|
||||
if not uploaded:
|
||||
return Response(
|
||||
{"detail": "No file uploaded"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
try:
|
||||
backup_dir = services.get_backup_dir()
|
||||
filename = uploaded.name or "uploaded-backup.zip"
|
||||
|
||||
# Ensure unique filename
|
||||
backup_file = backup_dir / filename
|
||||
counter = 1
|
||||
while backup_file.exists():
|
||||
name_parts = filename.rsplit(".", 1)
|
||||
if len(name_parts) == 2:
|
||||
backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}"
|
||||
else:
|
||||
backup_file = backup_dir / f"{filename}-{counter}"
|
||||
counter += 1
|
||||
|
||||
# Save uploaded file
|
||||
with backup_file.open("wb") as f:
|
||||
for chunk in uploaded.chunks():
|
||||
f.write(chunk)
|
||||
|
||||
return Response(
|
||||
{
|
||||
"detail": "Backup uploaded successfully",
|
||||
"filename": backup_file.name,
|
||||
},
|
||||
status=status.HTTP_201_CREATED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Upload failed: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["POST"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def restore_backup(request, filename):
|
||||
"""Restore from a backup file (async via Celery). WARNING: This will flush the database!"""
|
||||
try:
|
||||
# Security: prevent path traversal
|
||||
if ".." in filename or "/" in filename or "\\" in filename:
|
||||
raise Http404("Invalid filename")
|
||||
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise Http404("Backup file not found")
|
||||
|
||||
task = restore_backup_task.delay(filename)
|
||||
return Response(
|
||||
{
|
||||
"detail": "Restore started",
|
||||
"task_id": task.id,
|
||||
"task_token": _generate_task_token(task.id),
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Http404:
|
||||
raise
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to start restore: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["GET"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def get_schedule(request):
|
||||
"""Get backup schedule settings."""
|
||||
try:
|
||||
settings = get_schedule_settings()
|
||||
return Response(settings)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to get schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@api_view(["PUT"])
|
||||
@permission_classes([IsAdminUser])
|
||||
def update_schedule(request):
|
||||
"""Update backup schedule settings."""
|
||||
try:
|
||||
settings = update_schedule_settings(request.data)
|
||||
return Response(settings)
|
||||
except ValueError as e:
|
||||
return Response(
|
||||
{"detail": str(e)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"detail": f"Failed to update schedule: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
7
apps/backups/apps.py
Normal file
7
apps/backups/apps.py
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class BackupsConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "apps.backups"
|
||||
verbose_name = "Backups"
|
||||
0
apps/backups/migrations/__init__.py
Normal file
0
apps/backups/migrations/__init__.py
Normal file
0
apps/backups/models.py
Normal file
0
apps/backups/models.py
Normal file
202
apps/backups/scheduler.py
Normal file
202
apps/backups/scheduler.py
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
from django_celery_beat.models import PeriodicTask, CrontabSchedule
|
||||
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task"
|
||||
|
||||
DEFAULTS = {
|
||||
"schedule_enabled": True,
|
||||
"schedule_frequency": "daily",
|
||||
"schedule_time": "03:00",
|
||||
"schedule_day_of_week": 0, # Sunday
|
||||
"retention_count": 3,
|
||||
"schedule_cron_expression": "",
|
||||
}
|
||||
|
||||
|
||||
def _get_backup_settings():
|
||||
"""Get all backup settings from CoreSettings grouped JSON."""
|
||||
try:
|
||||
settings_obj = CoreSettings.objects.get(key="backup_settings")
|
||||
return settings_obj.value if isinstance(settings_obj.value, dict) else DEFAULTS.copy()
|
||||
except CoreSettings.DoesNotExist:
|
||||
return DEFAULTS.copy()
|
||||
|
||||
|
||||
def _update_backup_settings(updates: dict) -> None:
|
||||
"""Update backup settings in the grouped JSON."""
|
||||
obj, created = CoreSettings.objects.get_or_create(
|
||||
key="backup_settings",
|
||||
defaults={"name": "Backup Settings", "value": DEFAULTS.copy()}
|
||||
)
|
||||
current = obj.value if isinstance(obj.value, dict) else {}
|
||||
current.update(updates)
|
||||
obj.value = current
|
||||
obj.save()
|
||||
|
||||
|
||||
def get_schedule_settings() -> dict:
|
||||
"""Get all backup schedule settings."""
|
||||
settings = _get_backup_settings()
|
||||
return {
|
||||
"enabled": bool(settings.get("schedule_enabled", DEFAULTS["schedule_enabled"])),
|
||||
"frequency": str(settings.get("schedule_frequency", DEFAULTS["schedule_frequency"])),
|
||||
"time": str(settings.get("schedule_time", DEFAULTS["schedule_time"])),
|
||||
"day_of_week": int(settings.get("schedule_day_of_week", DEFAULTS["schedule_day_of_week"])),
|
||||
"retention_count": int(settings.get("retention_count", DEFAULTS["retention_count"])),
|
||||
"cron_expression": str(settings.get("schedule_cron_expression", DEFAULTS["schedule_cron_expression"])),
|
||||
}
|
||||
|
||||
|
||||
def update_schedule_settings(data: dict) -> dict:
|
||||
"""Update backup schedule settings and sync the PeriodicTask."""
|
||||
# Validate
|
||||
if "frequency" in data and data["frequency"] not in ("daily", "weekly"):
|
||||
raise ValueError("frequency must be 'daily' or 'weekly'")
|
||||
|
||||
if "time" in data:
|
||||
try:
|
||||
hour, minute = data["time"].split(":")
|
||||
int(hour)
|
||||
int(minute)
|
||||
except (ValueError, AttributeError):
|
||||
raise ValueError("time must be in HH:MM format")
|
||||
|
||||
if "day_of_week" in data:
|
||||
day = int(data["day_of_week"])
|
||||
if day < 0 or day > 6:
|
||||
raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)")
|
||||
|
||||
if "retention_count" in data:
|
||||
count = int(data["retention_count"])
|
||||
if count < 0:
|
||||
raise ValueError("retention_count must be >= 0")
|
||||
|
||||
# Update settings with proper key names
|
||||
updates = {}
|
||||
if "enabled" in data:
|
||||
updates["schedule_enabled"] = bool(data["enabled"])
|
||||
if "frequency" in data:
|
||||
updates["schedule_frequency"] = str(data["frequency"])
|
||||
if "time" in data:
|
||||
updates["schedule_time"] = str(data["time"])
|
||||
if "day_of_week" in data:
|
||||
updates["schedule_day_of_week"] = int(data["day_of_week"])
|
||||
if "retention_count" in data:
|
||||
updates["retention_count"] = int(data["retention_count"])
|
||||
if "cron_expression" in data:
|
||||
updates["schedule_cron_expression"] = str(data["cron_expression"])
|
||||
|
||||
_update_backup_settings(updates)
|
||||
|
||||
# Sync the periodic task
|
||||
_sync_periodic_task()
|
||||
|
||||
return get_schedule_settings()
|
||||
|
||||
|
||||
def _sync_periodic_task() -> None:
|
||||
"""Create, update, or delete the scheduled backup task based on settings."""
|
||||
settings = get_schedule_settings()
|
||||
|
||||
if not settings["enabled"]:
|
||||
# Delete the task if it exists
|
||||
task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first()
|
||||
if task:
|
||||
old_crontab = task.crontab
|
||||
task.delete()
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
logger.info("Backup schedule disabled, removed periodic task")
|
||||
return
|
||||
|
||||
# Get old crontab before creating new one
|
||||
old_crontab = None
|
||||
try:
|
||||
old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME)
|
||||
old_crontab = old_task.crontab
|
||||
except PeriodicTask.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Check if using cron expression (advanced mode)
|
||||
if settings["cron_expression"]:
|
||||
# Parse cron expression: "minute hour day month weekday"
|
||||
try:
|
||||
parts = settings["cron_expression"].split()
|
||||
if len(parts) != 5:
|
||||
raise ValueError("Cron expression must have 5 parts: minute hour day month weekday")
|
||||
|
||||
minute, hour, day_of_month, month_of_year, day_of_week = parts
|
||||
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=day_of_week,
|
||||
day_of_month=day_of_month,
|
||||
month_of_year=month_of_year,
|
||||
timezone=CoreSettings.get_system_time_zone(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}")
|
||||
raise ValueError(f"Invalid cron expression: {e}")
|
||||
else:
|
||||
# Use simple frequency-based scheduling
|
||||
# Parse time
|
||||
hour, minute = settings["time"].split(":")
|
||||
|
||||
# Build crontab based on frequency
|
||||
system_tz = CoreSettings.get_system_time_zone()
|
||||
if settings["frequency"] == "daily":
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week="*",
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
else: # weekly
|
||||
crontab, _ = CrontabSchedule.objects.get_or_create(
|
||||
minute=minute,
|
||||
hour=hour,
|
||||
day_of_week=str(settings["day_of_week"]),
|
||||
day_of_month="*",
|
||||
month_of_year="*",
|
||||
timezone=system_tz,
|
||||
)
|
||||
|
||||
# Create or update the periodic task
|
||||
task, created = PeriodicTask.objects.update_or_create(
|
||||
name=BACKUP_SCHEDULE_TASK_NAME,
|
||||
defaults={
|
||||
"task": "apps.backups.tasks.scheduled_backup_task",
|
||||
"crontab": crontab,
|
||||
"enabled": True,
|
||||
"kwargs": json.dumps({"retention_count": settings["retention_count"]}),
|
||||
},
|
||||
)
|
||||
|
||||
# Clean up old crontab if it changed and is orphaned
|
||||
if old_crontab and old_crontab.id != crontab.id:
|
||||
_cleanup_orphaned_crontab(old_crontab)
|
||||
|
||||
action = "Created" if created else "Updated"
|
||||
logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}")
|
||||
|
||||
|
||||
def _cleanup_orphaned_crontab(crontab_schedule):
|
||||
"""Delete old CrontabSchedule if no other tasks are using it."""
|
||||
if crontab_schedule is None:
|
||||
return
|
||||
|
||||
# Check if any other tasks are using this crontab
|
||||
if PeriodicTask.objects.filter(crontab=crontab_schedule).exists():
|
||||
logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting")
|
||||
return
|
||||
|
||||
logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}")
|
||||
crontab_schedule.delete()
|
||||
350
apps/backups/services.py
Normal file
350
apps/backups/services.py
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from zipfile import ZipFile, ZIP_DEFLATED
|
||||
import logging
|
||||
import pytz
|
||||
|
||||
from django.conf import settings
|
||||
from core.models import CoreSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_backup_dir() -> Path:
|
||||
"""Get the backup directory, creating it if necessary."""
|
||||
backup_dir = Path(settings.BACKUP_ROOT)
|
||||
backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
return backup_dir
|
||||
|
||||
|
||||
def _is_postgresql() -> bool:
|
||||
"""Check if we're using PostgreSQL."""
|
||||
return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql"
|
||||
|
||||
|
||||
def _get_pg_env() -> dict:
|
||||
"""Get environment variables for PostgreSQL commands."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
env = os.environ.copy()
|
||||
env["PGPASSWORD"] = db_config.get("PASSWORD", "")
|
||||
return env
|
||||
|
||||
|
||||
def _get_pg_args() -> list[str]:
|
||||
"""Get common PostgreSQL command arguments."""
|
||||
db_config = settings.DATABASES["default"]
|
||||
return [
|
||||
"-h", db_config.get("HOST", "localhost"),
|
||||
"-p", str(db_config.get("PORT", 5432)),
|
||||
"-U", db_config.get("USER", "postgres"),
|
||||
"-d", db_config.get("NAME", "dispatcharr"),
|
||||
]
|
||||
|
||||
|
||||
def _dump_postgresql(output_file: Path) -> None:
|
||||
"""Dump PostgreSQL database using pg_dump."""
|
||||
logger.info("Dumping PostgreSQL database with pg_dump...")
|
||||
|
||||
cmd = [
|
||||
"pg_dump",
|
||||
*_get_pg_args(),
|
||||
"-Fc", # Custom format for pg_restore
|
||||
"-v", # Verbose
|
||||
"-f", str(output_file),
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"pg_dump failed: {result.stderr}")
|
||||
raise RuntimeError(f"pg_dump failed: {result.stderr}")
|
||||
|
||||
logger.debug(f"pg_dump output: {result.stderr}")
|
||||
|
||||
|
||||
def _clean_postgresql_schema() -> None:
|
||||
"""Drop and recreate the public schema to ensure a completely clean restore."""
|
||||
logger.info("[PG_CLEAN] Dropping and recreating public schema...")
|
||||
|
||||
# Commands to drop and recreate schema
|
||||
sql_commands = "DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO public;"
|
||||
|
||||
cmd = [
|
||||
"psql",
|
||||
*_get_pg_args(),
|
||||
"-c", sql_commands,
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"[PG_CLEAN] Failed to clean schema: {result.stderr}")
|
||||
raise RuntimeError(f"Failed to clean PostgreSQL schema: {result.stderr}")
|
||||
|
||||
logger.info("[PG_CLEAN] Schema cleaned successfully")
|
||||
|
||||
|
||||
def _restore_postgresql(dump_file: Path) -> None:
|
||||
"""Restore PostgreSQL database using pg_restore."""
|
||||
logger.info("[PG_RESTORE] Starting pg_restore...")
|
||||
logger.info(f"[PG_RESTORE] Dump file: {dump_file}")
|
||||
|
||||
# Drop and recreate schema to ensure a completely clean restore
|
||||
_clean_postgresql_schema()
|
||||
|
||||
pg_args = _get_pg_args()
|
||||
logger.info(f"[PG_RESTORE] Connection args: {pg_args}")
|
||||
|
||||
cmd = [
|
||||
"pg_restore",
|
||||
"--no-owner", # Skip ownership commands (we already created schema)
|
||||
*pg_args,
|
||||
"-v", # Verbose
|
||||
str(dump_file),
|
||||
]
|
||||
|
||||
logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}")
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
env=_get_pg_env(),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
logger.info(f"[PG_RESTORE] Return code: {result.returncode}")
|
||||
|
||||
# pg_restore may return non-zero even on partial success
|
||||
# Check for actual errors vs warnings
|
||||
if result.returncode != 0:
|
||||
# Some errors during restore are expected (e.g., "does not exist" when cleaning)
|
||||
# Only fail on critical errors
|
||||
stderr = result.stderr.lower()
|
||||
if "fatal" in stderr or "could not connect" in stderr:
|
||||
logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}")
|
||||
raise RuntimeError(f"pg_restore failed: {result.stderr}")
|
||||
else:
|
||||
logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...")
|
||||
|
||||
logger.info("[PG_RESTORE] Completed successfully")
|
||||
|
||||
|
||||
def _dump_sqlite(output_file: Path) -> None:
|
||||
"""Dump SQLite database using sqlite3 .backup command."""
|
||||
logger.info("Dumping SQLite database with sqlite3 .backup...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"SQLite database not found: {db_path}")
|
||||
|
||||
# Use sqlite3 .backup command via stdin for reliable execution
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=f".backup '{output_file}'\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 backup failed: {result.stderr}")
|
||||
raise RuntimeError(f"sqlite3 backup failed: {result.stderr}")
|
||||
|
||||
# Verify the backup file was created
|
||||
if not output_file.exists():
|
||||
raise RuntimeError("sqlite3 backup failed: output file not created")
|
||||
|
||||
logger.info(f"sqlite3 backup completed successfully: {output_file}")
|
||||
|
||||
|
||||
def _restore_sqlite(dump_file: Path) -> None:
|
||||
"""Restore SQLite database by replacing the database file."""
|
||||
logger.info("Restoring SQLite database...")
|
||||
db_path = Path(settings.DATABASES["default"]["NAME"])
|
||||
backup_current = None
|
||||
|
||||
# Backup current database before overwriting
|
||||
if db_path.exists():
|
||||
backup_current = db_path.with_suffix(".db.bak")
|
||||
shutil.copy2(db_path, backup_current)
|
||||
logger.info(f"Backed up current database to {backup_current}")
|
||||
|
||||
# Ensure parent directory exists
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# The backup file from _dump_sqlite is a complete SQLite database file
|
||||
# We can simply copy it over the existing database
|
||||
shutil.copy2(dump_file, db_path)
|
||||
|
||||
# Verify the restore worked by checking if sqlite3 can read it
|
||||
result = subprocess.run(
|
||||
["sqlite3", str(db_path)],
|
||||
input=".tables\n",
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(f"sqlite3 verification failed: {result.stderr}")
|
||||
# Try to restore from backup
|
||||
if backup_current and backup_current.exists():
|
||||
shutil.copy2(backup_current, db_path)
|
||||
logger.info("Restored original database from backup")
|
||||
raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}")
|
||||
|
||||
logger.info("sqlite3 restore completed successfully")
|
||||
|
||||
|
||||
def create_backup() -> Path:
|
||||
"""
|
||||
Create a backup archive containing database dump and data directories.
|
||||
Returns the path to the created backup file.
|
||||
"""
|
||||
backup_dir = get_backup_dir()
|
||||
|
||||
# Use system timezone for filename (user-friendly), but keep internal timestamps as UTC
|
||||
system_tz_name = CoreSettings.get_system_time_zone()
|
||||
try:
|
||||
system_tz = pytz.timezone(system_tz_name)
|
||||
now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz)
|
||||
timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC")
|
||||
timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S")
|
||||
|
||||
backup_name = f"dispatcharr-backup-{timestamp}.zip"
|
||||
backup_file = backup_dir / backup_name
|
||||
|
||||
logger.info(f"Creating backup: {backup_name}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Determine database type and dump accordingly
|
||||
if _is_postgresql():
|
||||
db_dump_file = temp_path / "database.dump"
|
||||
_dump_postgresql(db_dump_file)
|
||||
db_type = "postgresql"
|
||||
else:
|
||||
db_dump_file = temp_path / "database.sqlite3"
|
||||
_dump_sqlite(db_dump_file)
|
||||
db_type = "sqlite"
|
||||
|
||||
# Create ZIP archive with compression and ZIP64 support for large files
|
||||
with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file:
|
||||
# Add database dump
|
||||
zip_file.write(db_dump_file, db_dump_file.name)
|
||||
|
||||
# Add metadata
|
||||
metadata = {
|
||||
"format": "dispatcharr-backup",
|
||||
"version": 2,
|
||||
"database_type": db_type,
|
||||
"database_file": db_dump_file.name,
|
||||
"created_at": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
}
|
||||
zip_file.writestr("metadata.json", json.dumps(metadata, indent=2))
|
||||
|
||||
logger.info(f"Backup created successfully: {backup_file}")
|
||||
return backup_file
|
||||
|
||||
|
||||
def restore_backup(backup_file: Path) -> None:
|
||||
"""
|
||||
Restore from a backup archive.
|
||||
WARNING: This will overwrite the database!
|
||||
"""
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {backup_file}")
|
||||
|
||||
logger.info(f"Restoring from backup: {backup_file}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Extract backup
|
||||
logger.debug("Extracting backup archive...")
|
||||
with ZipFile(backup_file, "r") as zip_file:
|
||||
zip_file.extractall(temp_path)
|
||||
|
||||
# Read metadata
|
||||
metadata_file = temp_path / "metadata.json"
|
||||
if not metadata_file.exists():
|
||||
raise ValueError("Invalid backup: missing metadata.json")
|
||||
|
||||
with open(metadata_file) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Restore database
|
||||
_restore_database(temp_path, metadata)
|
||||
|
||||
logger.info("Restore completed successfully")
|
||||
|
||||
|
||||
def _restore_database(temp_path: Path, metadata: dict) -> None:
|
||||
"""Restore database from backup."""
|
||||
db_type = metadata.get("database_type", "postgresql")
|
||||
db_file = metadata.get("database_file", "database.dump")
|
||||
dump_file = temp_path / db_file
|
||||
|
||||
if not dump_file.exists():
|
||||
raise ValueError(f"Invalid backup: missing {db_file}")
|
||||
|
||||
current_db_type = "postgresql" if _is_postgresql() else "sqlite"
|
||||
|
||||
if db_type != current_db_type:
|
||||
raise ValueError(
|
||||
f"Database type mismatch: backup is {db_type}, "
|
||||
f"but current database is {current_db_type}"
|
||||
)
|
||||
|
||||
if db_type == "postgresql":
|
||||
_restore_postgresql(dump_file)
|
||||
else:
|
||||
_restore_sqlite(dump_file)
|
||||
|
||||
|
||||
def list_backups() -> list[dict]:
|
||||
"""List all available backup files with metadata."""
|
||||
backup_dir = get_backup_dir()
|
||||
backups = []
|
||||
|
||||
for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True):
|
||||
# Use UTC timezone so frontend can convert to user's local time
|
||||
created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC)
|
||||
backups.append({
|
||||
"name": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"created": created_time.isoformat(),
|
||||
})
|
||||
|
||||
return backups
|
||||
|
||||
|
||||
def delete_backup(filename: str) -> None:
|
||||
"""Delete a backup file."""
|
||||
backup_dir = get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
|
||||
if not backup_file.exists():
|
||||
raise FileNotFoundError(f"Backup file not found: {filename}")
|
||||
|
||||
if not backup_file.is_file():
|
||||
raise ValueError(f"Invalid backup file: {filename}")
|
||||
|
||||
backup_file.unlink()
|
||||
logger.info(f"Deleted backup: {filename}")
|
||||
106
apps/backups/tasks.py
Normal file
106
apps/backups/tasks.py
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
import logging
|
||||
import traceback
|
||||
from celery import shared_task
|
||||
|
||||
from . import services
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _cleanup_old_backups(retention_count: int) -> int:
|
||||
"""Delete old backups, keeping only the most recent N. Returns count deleted."""
|
||||
if retention_count <= 0:
|
||||
return 0
|
||||
|
||||
backups = services.list_backups()
|
||||
if len(backups) <= retention_count:
|
||||
return 0
|
||||
|
||||
# Backups are sorted newest first, so delete from the end
|
||||
to_delete = backups[retention_count:]
|
||||
deleted = 0
|
||||
|
||||
for backup in to_delete:
|
||||
try:
|
||||
services.delete_backup(backup["name"])
|
||||
deleted += 1
|
||||
logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}")
|
||||
except Exception as e:
|
||||
logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}")
|
||||
|
||||
return deleted
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def create_backup_task(self):
|
||||
"""Celery task to create a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[BACKUP] Starting backup task {self.request.id}")
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def restore_backup_task(self, filename: str):
|
||||
"""Celery task to restore a backup asynchronously."""
|
||||
try:
|
||||
logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}")
|
||||
backup_dir = services.get_backup_dir()
|
||||
backup_file = backup_dir / filename
|
||||
logger.info(f"[RESTORE] Backup file path: {backup_file}")
|
||||
services.restore_backup(backup_file)
|
||||
logger.info(f"[RESTORE] Task {self.request.id} completed successfully")
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": filename,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(bind=True)
|
||||
def scheduled_backup_task(self, retention_count: int = 0):
|
||||
"""Celery task for scheduled backups with optional retention cleanup."""
|
||||
try:
|
||||
logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}")
|
||||
|
||||
# Create backup
|
||||
backup_file = services.create_backup()
|
||||
logger.info(f"[SCHEDULED] Backup created: {backup_file.name}")
|
||||
|
||||
# Cleanup old backups if retention is set
|
||||
deleted = 0
|
||||
if retention_count > 0:
|
||||
deleted = _cleanup_old_backups(retention_count)
|
||||
logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)")
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"filename": backup_file.name,
|
||||
"size": backup_file.stat().st_size,
|
||||
"deleted_count": deleted,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}")
|
||||
logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
1163
apps/backups/tests.py
Normal file
1163
apps/backups/tests.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -13,7 +13,14 @@ from .api_views import (
|
|||
UpdateChannelMembershipAPIView,
|
||||
BulkUpdateChannelMembershipAPIView,
|
||||
RecordingViewSet,
|
||||
RecurringRecordingRuleViewSet,
|
||||
GetChannelStreamsAPIView,
|
||||
SeriesRulesAPIView,
|
||||
DeleteSeriesRuleAPIView,
|
||||
EvaluateSeriesRulesAPIView,
|
||||
BulkRemoveSeriesRecordingsAPIView,
|
||||
BulkDeleteUpcomingRecordingsAPIView,
|
||||
ComskipConfigAPIView,
|
||||
)
|
||||
|
||||
app_name = 'channels' # for DRF routing
|
||||
|
|
@ -25,6 +32,7 @@ router.register(r'channels', ChannelViewSet, basename='channel')
|
|||
router.register(r'logos', LogoViewSet, basename='logo')
|
||||
router.register(r'profiles', ChannelProfileViewSet, basename='profile')
|
||||
router.register(r'recordings', RecordingViewSet, basename='recording')
|
||||
router.register(r'recurring-rules', RecurringRecordingRuleViewSet, basename='recurring-rule')
|
||||
|
||||
urlpatterns = [
|
||||
# Bulk delete is a single APIView, not a ViewSet
|
||||
|
|
@ -35,6 +43,13 @@ urlpatterns = [
|
|||
path('channels/<int:channel_id>/streams/', GetChannelStreamsAPIView.as_view(), name='get_channel_streams'),
|
||||
path('profiles/<int:profile_id>/channels/<int:channel_id>/', UpdateChannelMembershipAPIView.as_view(), name='update_channel_membership'),
|
||||
path('profiles/<int:profile_id>/channels/bulk-update/', BulkUpdateChannelMembershipAPIView.as_view(), name='bulk_update_channel_membership'),
|
||||
# DVR series rules (order matters: specific routes before catch-all slug)
|
||||
path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'),
|
||||
path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'),
|
||||
path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'),
|
||||
path('series-rules/<path:tvg_id>/', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'),
|
||||
path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'),
|
||||
path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'),
|
||||
]
|
||||
|
||||
urlpatterns += router.urls
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,23 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-29 02:39
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0022_channel_auto_created_channel_auto_created_by_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='stream_stats',
|
||||
field=models.JSONField(blank=True, help_text='JSON object containing stream statistics like video codec, resolution, etc.', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='stream_stats_updated_at',
|
||||
field=models.DateTimeField(blank=True, db_index=True, help_text='When stream statistics were last updated', null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-22 20:14
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0023_stream_stream_stats_stream_stream_stats_updated_at'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='channel_group',
|
||||
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='m3u_accounts', to='dispatcharr_channels.channelgroup'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0024_alter_channelgroupm3uaccount_channel_group'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='recording',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='stream',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
31
apps/channels/migrations/0026_recurringrecordingrule.py
Normal file
31
apps/channels/migrations/0026_recurringrecordingrule.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
# Generated by Django 5.0.14 on 2025-09-18 14:56
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0025_alter_channelgroupm3uaccount_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='RecurringRecordingRule',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('days_of_week', models.JSONField(default=list)),
|
||||
('start_time', models.TimeField()),
|
||||
('end_time', models.TimeField()),
|
||||
('enabled', models.BooleanField(default=True)),
|
||||
('name', models.CharField(blank=True, max_length=255)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recurring_rules', to='dispatcharr_channels.channel')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['channel', 'start_time'],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-05 20:50
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0026_recurringrecordingrule'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='end_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='recurringrecordingrule',
|
||||
name='start_date',
|
||||
field=models.DateField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-06 22:55
|
||||
|
||||
import django.utils.timezone
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0027_recurringrecordingrule_end_date_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='created_at',
|
||||
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Timestamp when this channel was created'),
|
||||
preserve_default=False,
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channel',
|
||||
name='updated_at',
|
||||
field=models.DateTimeField(auto_now=True, help_text='Timestamp when this channel was last updated'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
# Generated migration to backfill stream_hash for existing custom streams
|
||||
|
||||
from django.db import migrations
|
||||
import hashlib
|
||||
|
||||
|
||||
def backfill_custom_stream_hashes(apps, schema_editor):
|
||||
"""
|
||||
Generate stream_hash for all custom streams that don't have one.
|
||||
Uses stream ID to create a stable hash that won't change when name/url is edited.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams_without_hash = Stream.objects.filter(
|
||||
is_custom=True,
|
||||
stream_hash__isnull=True
|
||||
)
|
||||
|
||||
updated_count = 0
|
||||
for stream in custom_streams_without_hash:
|
||||
# Generate a stable hash using the stream's ID
|
||||
# This ensures the hash never changes even if name/url is edited
|
||||
unique_string = f"custom_stream_{stream.id}"
|
||||
stream.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
stream.save(update_fields=['stream_hash'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Backfilled stream_hash for {updated_count} custom streams")
|
||||
else:
|
||||
print("No custom streams needed stream_hash backfill")
|
||||
|
||||
|
||||
def reverse_backfill(apps, schema_editor):
|
||||
"""
|
||||
Reverse migration - clear stream_hash for custom streams.
|
||||
Note: This will break preview functionality for custom streams.
|
||||
"""
|
||||
Stream = apps.get_model('dispatcharr_channels', 'Stream')
|
||||
|
||||
custom_streams = Stream.objects.filter(is_custom=True)
|
||||
count = custom_streams.update(stream_hash=None)
|
||||
print(f"Cleared stream_hash for {count} custom streams")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0028_channel_created_at_channel_updated_at'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(backfill_custom_stream_hashes, reverse_backfill),
|
||||
]
|
||||
18
apps/channels/migrations/0030_alter_stream_url.py
Normal file
18
apps/channels/migrations/0030_alter_stream_url.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-28 20:00
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0029_backfill_custom_stream_hashes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='stream',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=4096, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
# Generated by Django 5.2.9 on 2026-01-09 18:19
|
||||
|
||||
import datetime
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('dispatcharr_channels', '0030_alter_stream_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='channelgroupm3uaccount',
|
||||
name='last_seen',
|
||||
field=models.DateTimeField(db_index=True, default=datetime.datetime.now, help_text='Last time this group was seen in the M3U source during a refresh'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='stream',
|
||||
name='is_stale',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Whether this stream is stale (not seen in recent refresh, pending deletion)'),
|
||||
),
|
||||
]
|
||||
|
|
@ -55,7 +55,7 @@ class Stream(models.Model):
|
|||
"""
|
||||
|
||||
name = models.CharField(max_length=255, default="Default Stream")
|
||||
url = models.URLField(max_length=2000, blank=True, null=True)
|
||||
url = models.URLField(max_length=4096, blank=True, null=True)
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -94,7 +94,25 @@ class Stream(models.Model):
|
|||
db_index=True,
|
||||
)
|
||||
last_seen = models.DateTimeField(db_index=True, default=datetime.now)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text="Whether this stream is stale (not seen in recent refresh, pending deletion)"
|
||||
)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
# Stream statistics fields
|
||||
stream_stats = models.JSONField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="JSON object containing stream statistics like video codec, resolution, etc."
|
||||
)
|
||||
stream_stats_updated_at = models.DateTimeField(
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="When stream statistics were last updated",
|
||||
db_index=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
# If you use m3u_account, you might do unique_together = ('name','url','m3u_account')
|
||||
|
|
@ -106,11 +124,11 @@ class Stream(models.Model):
|
|||
return self.name or self.url or f"Stream ID {self.id}"
|
||||
|
||||
@classmethod
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None):
|
||||
def generate_hash_key(cls, name, url, tvg_id, keys=None, m3u_id=None, group=None):
|
||||
if keys is None:
|
||||
keys = CoreSettings.get_m3u_hash_key().split(",")
|
||||
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id}
|
||||
stream_parts = {"name": name, "url": url, "tvg_id": tvg_id, "m3u_id": m3u_id, "group": group}
|
||||
|
||||
hash_parts = {key: stream_parts[key] for key in keys if key in stream_parts}
|
||||
|
||||
|
|
@ -139,8 +157,14 @@ class Stream(models.Model):
|
|||
stream = cls.objects.create(**fields_to_update)
|
||||
return stream, True # True means it was created
|
||||
|
||||
# @TODO: honor stream's stream profile
|
||||
def get_stream_profile(self):
|
||||
"""
|
||||
Get the stream profile for this stream.
|
||||
Uses the stream's own profile if set, otherwise returns the default.
|
||||
"""
|
||||
if self.stream_profile:
|
||||
return self.stream_profile
|
||||
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
|
|
@ -290,6 +314,15 @@ class Channel(models.Model):
|
|||
help_text="The M3U account that auto-created this channel"
|
||||
)
|
||||
|
||||
created_at = models.DateTimeField(
|
||||
auto_now_add=True,
|
||||
help_text="Timestamp when this channel was created"
|
||||
)
|
||||
updated_at = models.DateTimeField(
|
||||
auto_now=True,
|
||||
help_text="Timestamp when this channel was last updated"
|
||||
)
|
||||
|
||||
def clean(self):
|
||||
# Enforce unique channel_number within a given group
|
||||
existing = Channel.objects.filter(
|
||||
|
|
@ -366,14 +399,17 @@ class Channel(models.Model):
|
|||
if not m3u_account:
|
||||
logger.debug(f"Stream {stream.id} has no M3U account")
|
||||
continue
|
||||
if m3u_account.is_active == False:
|
||||
logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.")
|
||||
continue
|
||||
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next(
|
||||
(obj for obj in m3u_profiles if obj.is_default), None
|
||||
)
|
||||
|
||||
if not default_profile:
|
||||
logger.debug(f"M3U account {m3u_account.id} has no default profile")
|
||||
logger.debug(f"M3U account {m3u_account.id} has no active default profile")
|
||||
continue
|
||||
|
||||
profiles = [default_profile] + [
|
||||
|
|
@ -381,11 +417,6 @@ class Channel(models.Model):
|
|||
]
|
||||
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
has_active_profiles = True
|
||||
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
|
|
@ -420,9 +451,9 @@ class Channel(models.Model):
|
|||
|
||||
# No available streams - determine specific reason
|
||||
if has_streams_but_maxed_out:
|
||||
error_reason = "All M3U profiles have reached maximum connection limits"
|
||||
error_reason = "All active M3U profiles have reached maximum connection limits"
|
||||
elif has_active_profiles:
|
||||
error_reason = "No compatible profile found for any assigned stream"
|
||||
error_reason = "No compatible active profile found for any assigned stream"
|
||||
else:
|
||||
error_reason = "No active profiles found for any assigned stream"
|
||||
|
||||
|
|
@ -547,12 +578,12 @@ class ChannelStream(models.Model):
|
|||
|
||||
class ChannelGroupM3UAccount(models.Model):
|
||||
channel_group = models.ForeignKey(
|
||||
ChannelGroup, on_delete=models.CASCADE, related_name="m3u_account"
|
||||
ChannelGroup, on_delete=models.CASCADE, related_name="m3u_accounts"
|
||||
)
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount, on_delete=models.CASCADE, related_name="channel_group"
|
||||
)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
enabled = models.BooleanField(default=True)
|
||||
auto_channel_sync = models.BooleanField(
|
||||
default=False,
|
||||
|
|
@ -563,6 +594,16 @@ class ChannelGroupM3UAccount(models.Model):
|
|||
blank=True,
|
||||
help_text='Starting channel number for auto-created channels in this group'
|
||||
)
|
||||
last_seen = models.DateTimeField(
|
||||
default=datetime.now,
|
||||
db_index=True,
|
||||
help_text='Last time this group was seen in the M3U source during a refresh'
|
||||
)
|
||||
is_stale = models.BooleanField(
|
||||
default=False,
|
||||
db_index=True,
|
||||
help_text='Whether this group relationship is stale (not seen in recent refresh, pending deletion)'
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ("channel_group", "m3u_account")
|
||||
|
|
@ -586,7 +627,39 @@ class Recording(models.Model):
|
|||
start_time = models.DateTimeField()
|
||||
end_time = models.DateTimeField()
|
||||
task_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.channel.name} - {self.start_time} to {self.end_time}"
|
||||
|
||||
|
||||
class RecurringRecordingRule(models.Model):
|
||||
"""Rule describing a recurring manual DVR schedule."""
|
||||
|
||||
channel = models.ForeignKey(
|
||||
"Channel",
|
||||
on_delete=models.CASCADE,
|
||||
related_name="recurring_rules",
|
||||
)
|
||||
days_of_week = models.JSONField(default=list)
|
||||
start_time = models.TimeField()
|
||||
end_time = models.TimeField()
|
||||
enabled = models.BooleanField(default=True)
|
||||
name = models.CharField(max_length=255, blank=True)
|
||||
start_date = models.DateField(null=True, blank=True)
|
||||
end_date = models.DateField(null=True, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["channel", "start_time"]
|
||||
|
||||
def __str__(self):
|
||||
channel_name = getattr(self.channel, "name", str(self.channel_id))
|
||||
return f"Recurring rule for {channel_name}"
|
||||
|
||||
def cleaned_days(self):
|
||||
try:
|
||||
return sorted({int(d) for d in (self.days_of_week or []) if 0 <= int(d) <= 6})
|
||||
except Exception:
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from rest_framework import serializers
|
||||
from .models import (
|
||||
Stream,
|
||||
|
|
@ -9,6 +12,7 @@ from .models import (
|
|||
ChannelProfile,
|
||||
ChannelProfileMembership,
|
||||
Recording,
|
||||
RecurringRecordingRule,
|
||||
)
|
||||
from apps.epg.serializers import EPGDataSerializer
|
||||
from core.models import StreamProfile
|
||||
|
|
@ -16,6 +20,7 @@ from apps.epg.models import EPGData
|
|||
from django.urls import reverse
|
||||
from rest_framework import serializers
|
||||
from django.utils import timezone
|
||||
from core.utils import validate_flexible_url
|
||||
|
||||
|
||||
class LogoSerializer(serializers.ModelSerializer):
|
||||
|
|
@ -32,12 +37,23 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
"""Validate that the URL is unique for creation or update"""
|
||||
if self.instance and self.instance.url == value:
|
||||
return value
|
||||
|
||||
|
||||
if Logo.objects.filter(url=value).exists():
|
||||
raise serializers.ValidationError("A logo with this URL already exists.")
|
||||
|
||||
|
||||
return value
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Handle logo creation with proper URL validation"""
|
||||
return Logo.objects.create(**validated_data)
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
"""Handle logo updates"""
|
||||
for attr, value in validated_data.items():
|
||||
setattr(instance, attr, value)
|
||||
instance.save()
|
||||
return instance
|
||||
|
||||
def get_cache_url(self, obj):
|
||||
# return f"/api/channels/logos/{obj.id}/cache/"
|
||||
request = self.context.get("request")
|
||||
|
|
@ -57,10 +73,18 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
|
||||
def get_channel_names(self, obj):
|
||||
"""Get the names of channels using this logo (limited to first 5)"""
|
||||
names = []
|
||||
|
||||
# Get channel names
|
||||
channels = obj.channels.all()[:5]
|
||||
names = [channel.name for channel in channels]
|
||||
if obj.channels.count() > 5:
|
||||
names.append(f"...and {obj.channels.count() - 5} more")
|
||||
for channel in channels:
|
||||
names.append(f"Channel: {channel.name}")
|
||||
|
||||
# Calculate total count for "more" message
|
||||
total_count = self.get_channel_count(obj)
|
||||
if total_count > 5:
|
||||
names.append(f"...and {total_count - 5} more")
|
||||
|
||||
return names
|
||||
|
||||
|
||||
|
|
@ -68,6 +92,12 @@ class LogoSerializer(serializers.ModelSerializer):
|
|||
# Stream
|
||||
#
|
||||
class StreamSerializer(serializers.ModelSerializer):
|
||||
url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url]
|
||||
)
|
||||
stream_profile_id = serializers.PrimaryKeyRelatedField(
|
||||
queryset=StreamProfile.objects.all(),
|
||||
source="stream_profile",
|
||||
|
|
@ -89,10 +119,13 @@ class StreamSerializer(serializers.ModelSerializer):
|
|||
"current_viewers",
|
||||
"updated_at",
|
||||
"last_seen",
|
||||
"is_stale",
|
||||
"stream_profile_id",
|
||||
"is_custom",
|
||||
"channel_group",
|
||||
"stream_hash",
|
||||
"stream_stats",
|
||||
"stream_stats_updated_at",
|
||||
]
|
||||
|
||||
def get_fields(self):
|
||||
|
|
@ -114,16 +147,57 @@ class StreamSerializer(serializers.ModelSerializer):
|
|||
return fields
|
||||
|
||||
|
||||
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
||||
m3u_accounts = serializers.IntegerField(source="m3u_accounts.id", read_only=True)
|
||||
enabled = serializers.BooleanField()
|
||||
auto_channel_sync = serializers.BooleanField(default=False)
|
||||
auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False)
|
||||
custom_properties = serializers.JSONField(required=False)
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroupM3UAccount
|
||||
fields = ["m3u_accounts", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties", "is_stale", "last_seen"]
|
||||
|
||||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
return data
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# Accept both dict and JSON string for custom_properties (for backward compatibility)
|
||||
val = data.get("custom_properties")
|
||||
if isinstance(val, str):
|
||||
try:
|
||||
data["custom_properties"] = json.loads(val)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return super().to_internal_value(data)
|
||||
|
||||
#
|
||||
# Channel Group
|
||||
#
|
||||
class ChannelGroupSerializer(serializers.ModelSerializer):
|
||||
channel_count = serializers.IntegerField(read_only=True)
|
||||
m3u_account_count = serializers.IntegerField(read_only=True)
|
||||
channel_count = serializers.SerializerMethodField()
|
||||
m3u_account_count = serializers.SerializerMethodField()
|
||||
m3u_accounts = ChannelGroupM3UAccountSerializer(
|
||||
many=True,
|
||||
read_only=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroup
|
||||
fields = ["id", "name", "channel_count", "m3u_account_count"]
|
||||
fields = ["id", "name", "channel_count", "m3u_account_count", "m3u_accounts"]
|
||||
|
||||
def get_channel_count(self, obj):
|
||||
"""Get count of channels in this group"""
|
||||
return obj.channels.count()
|
||||
|
||||
def get_m3u_account_count(self, obj):
|
||||
"""Get count of M3U accounts associated with this group"""
|
||||
return obj.m3u_accounts.count()
|
||||
|
||||
|
||||
class ChannelProfileSerializer(serializers.ModelSerializer):
|
||||
|
|
@ -229,8 +303,17 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
|
||||
if include_streams:
|
||||
self.fields["streams"] = serializers.SerializerMethodField()
|
||||
|
||||
return super().to_representation(instance)
|
||||
return super().to_representation(instance)
|
||||
else:
|
||||
# Fix: For PATCH/PUT responses, ensure streams are ordered
|
||||
representation = super().to_representation(instance)
|
||||
if "streams" in representation:
|
||||
representation["streams"] = list(
|
||||
instance.streams.all()
|
||||
.order_by("channelstream__order")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
return representation
|
||||
|
||||
def get_logo(self, obj):
|
||||
return LogoSerializer(obj.logo).data
|
||||
|
|
@ -327,40 +410,6 @@ class ChannelSerializer(serializers.ModelSerializer):
|
|||
return None
|
||||
|
||||
|
||||
class ChannelGroupM3UAccountSerializer(serializers.ModelSerializer):
|
||||
enabled = serializers.BooleanField()
|
||||
auto_channel_sync = serializers.BooleanField(default=False)
|
||||
auto_sync_channel_start = serializers.FloatField(allow_null=True, required=False)
|
||||
custom_properties = serializers.JSONField(required=False)
|
||||
|
||||
class Meta:
|
||||
model = ChannelGroupM3UAccount
|
||||
fields = ["id", "channel_group", "enabled", "auto_channel_sync", "auto_sync_channel_start", "custom_properties"]
|
||||
|
||||
def to_representation(self, instance):
|
||||
ret = super().to_representation(instance)
|
||||
# Ensure custom_properties is always a dict or None
|
||||
val = ret.get("custom_properties")
|
||||
if isinstance(val, str):
|
||||
import json
|
||||
try:
|
||||
ret["custom_properties"] = json.loads(val)
|
||||
except Exception:
|
||||
ret["custom_properties"] = None
|
||||
return ret
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# Accept both dict and JSON string for custom_properties
|
||||
val = data.get("custom_properties")
|
||||
if isinstance(val, str):
|
||||
import json
|
||||
try:
|
||||
data["custom_properties"] = json.loads(val)
|
||||
except Exception:
|
||||
pass
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
class RecordingSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = Recording
|
||||
|
|
@ -368,9 +417,48 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
read_only_fields = ["task_id"]
|
||||
|
||||
def validate(self, data):
|
||||
from core.models import CoreSettings
|
||||
start_time = data.get("start_time")
|
||||
end_time = data.get("end_time")
|
||||
|
||||
if start_time and timezone.is_naive(start_time):
|
||||
start_time = timezone.make_aware(start_time, timezone.get_current_timezone())
|
||||
data["start_time"] = start_time
|
||||
if end_time and timezone.is_naive(end_time):
|
||||
end_time = timezone.make_aware(end_time, timezone.get_current_timezone())
|
||||
data["end_time"] = end_time
|
||||
|
||||
# If this is an EPG-based recording (program provided), apply global pre/post offsets
|
||||
try:
|
||||
cp = data.get("custom_properties") or {}
|
||||
is_epg_based = isinstance(cp, dict) and isinstance(cp.get("program"), (dict,))
|
||||
except Exception:
|
||||
is_epg_based = False
|
||||
|
||||
if is_epg_based and start_time and end_time:
|
||||
try:
|
||||
pre_min = int(CoreSettings.get_dvr_pre_offset_minutes())
|
||||
except Exception:
|
||||
pre_min = 0
|
||||
try:
|
||||
post_min = int(CoreSettings.get_dvr_post_offset_minutes())
|
||||
except Exception:
|
||||
post_min = 0
|
||||
from datetime import timedelta
|
||||
try:
|
||||
if pre_min and pre_min > 0:
|
||||
start_time = start_time - timedelta(minutes=pre_min)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if post_min and post_min > 0:
|
||||
end_time = end_time + timedelta(minutes=post_min)
|
||||
except Exception:
|
||||
pass
|
||||
# write back adjusted times so scheduling uses them
|
||||
data["start_time"] = start_time
|
||||
data["end_time"] = end_time
|
||||
|
||||
now = timezone.now() # timezone-aware current time
|
||||
|
||||
if end_time < now:
|
||||
|
|
@ -383,3 +471,56 @@ class RecordingSerializer(serializers.ModelSerializer):
|
|||
raise serializers.ValidationError("End time must be after start time.")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class RecurringRecordingRuleSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = RecurringRecordingRule
|
||||
fields = "__all__"
|
||||
read_only_fields = ["created_at", "updated_at"]
|
||||
|
||||
def validate_days_of_week(self, value):
|
||||
if not value:
|
||||
raise serializers.ValidationError("Select at least one day of the week")
|
||||
cleaned = []
|
||||
for entry in value:
|
||||
try:
|
||||
iv = int(entry)
|
||||
except (TypeError, ValueError):
|
||||
raise serializers.ValidationError("Days of week must be integers 0-6")
|
||||
if iv < 0 or iv > 6:
|
||||
raise serializers.ValidationError("Days of week must be between 0 (Monday) and 6 (Sunday)")
|
||||
cleaned.append(iv)
|
||||
return sorted(set(cleaned))
|
||||
|
||||
def validate(self, attrs):
|
||||
start = attrs.get("start_time") or getattr(self.instance, "start_time", None)
|
||||
end = attrs.get("end_time") or getattr(self.instance, "end_time", None)
|
||||
start_date = attrs.get("start_date") if "start_date" in attrs else getattr(self.instance, "start_date", None)
|
||||
end_date = attrs.get("end_date") if "end_date" in attrs else getattr(self.instance, "end_date", None)
|
||||
if start_date is None:
|
||||
existing_start = getattr(self.instance, "start_date", None)
|
||||
if existing_start is None:
|
||||
raise serializers.ValidationError("Start date is required")
|
||||
if start_date and end_date and end_date < start_date:
|
||||
raise serializers.ValidationError("End date must be on or after start date")
|
||||
if end_date is None:
|
||||
existing_end = getattr(self.instance, "end_date", None)
|
||||
if existing_end is None:
|
||||
raise serializers.ValidationError("End date is required")
|
||||
if start and end and start_date and end_date:
|
||||
start_dt = datetime.combine(start_date, start)
|
||||
end_dt = datetime.combine(end_date, end)
|
||||
if end_dt <= start_dt:
|
||||
raise serializers.ValidationError("End datetime must be after start datetime")
|
||||
elif start and end and end == start:
|
||||
raise serializers.ValidationError("End time must be different from start time")
|
||||
# Normalize empty strings to None for dates
|
||||
if attrs.get("end_date") == "":
|
||||
attrs["end_date"] = None
|
||||
if attrs.get("start_date") == "":
|
||||
attrs["start_date"] = None
|
||||
return super().validate(attrs)
|
||||
|
||||
def create(self, validated_data):
|
||||
return super().create(validated_data)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from .models import Channel, Stream, ChannelProfile, ChannelProfileMembership, R
|
|||
from apps.m3u.models import M3UAccount
|
||||
from apps.epg.tasks import parse_programs_for_tvg_id
|
||||
import logging, requests, time
|
||||
from .tasks import run_recording
|
||||
from .tasks import run_recording, prefetch_recording_artwork
|
||||
from django.utils.timezone import now, is_aware, make_aware
|
||||
from datetime import timedelta
|
||||
|
||||
|
|
@ -45,6 +45,20 @@ def set_default_m3u_account(sender, instance, **kwargs):
|
|||
else:
|
||||
raise ValueError("No default M3UAccount found.")
|
||||
|
||||
@receiver(post_save, sender=Stream)
|
||||
def generate_custom_stream_hash(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Generate a stable stream_hash for custom streams after creation.
|
||||
Uses the stream's ID to ensure the hash never changes even if name/url is edited.
|
||||
"""
|
||||
if instance.is_custom and not instance.stream_hash and created:
|
||||
import hashlib
|
||||
# Use stream ID for a stable, unique hash that never changes
|
||||
unique_string = f"custom_stream_{instance.id}"
|
||||
instance.stream_hash = hashlib.sha256(unique_string.encode()).hexdigest()
|
||||
# Use update to avoid triggering signals again
|
||||
Stream.objects.filter(id=instance.id).update(stream_hash=instance.stream_hash)
|
||||
|
||||
@receiver(post_save, sender=Channel)
|
||||
def refresh_epg_programs(sender, instance, created, **kwargs):
|
||||
"""
|
||||
|
|
@ -62,15 +76,6 @@ def refresh_epg_programs(sender, instance, created, **kwargs):
|
|||
logger.info(f"New channel {instance.id} ({instance.name}) created with EPG data, refreshing program data")
|
||||
parse_programs_for_tvg_id.delay(instance.epg_data.id)
|
||||
|
||||
@receiver(post_save, sender=Channel)
|
||||
def add_new_channel_to_groups(sender, instance, created, **kwargs):
|
||||
if created:
|
||||
profiles = ChannelProfile.objects.all()
|
||||
ChannelProfileMembership.objects.bulk_create([
|
||||
ChannelProfileMembership(channel_profile=profile, channel=instance)
|
||||
for profile in profiles
|
||||
])
|
||||
|
||||
@receiver(post_save, sender=ChannelProfile)
|
||||
def create_profile_memberships(sender, instance, created, **kwargs):
|
||||
if created:
|
||||
|
|
@ -82,8 +87,9 @@ def create_profile_memberships(sender, instance, created, **kwargs):
|
|||
|
||||
def schedule_recording_task(instance):
|
||||
eta = instance.start_time
|
||||
# Pass recording_id first so task can persist metadata to the correct row
|
||||
task = run_recording.apply_async(
|
||||
args=[instance.channel_id, str(instance.start_time), str(instance.end_time)],
|
||||
args=[instance.id, instance.channel_id, str(instance.start_time), str(instance.end_time)],
|
||||
eta=eta
|
||||
)
|
||||
return task.id
|
||||
|
|
@ -132,6 +138,11 @@ def schedule_task_on_save(sender, instance, created, **kwargs):
|
|||
instance.save(update_fields=['task_id'])
|
||||
else:
|
||||
print("Start time is in the past. Not scheduling.")
|
||||
# Kick off poster/artwork prefetch to enrich Upcoming cards
|
||||
try:
|
||||
prefetch_recording_artwork.apply_async(args=[instance.id], countdown=1)
|
||||
except Exception as e:
|
||||
print("Error scheduling artwork prefetch:", e)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
print("Error in post_save signal:", e)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
0
apps/channels/tests/__init__.py
Normal file
0
apps/channels/tests/__init__.py
Normal file
211
apps/channels/tests/test_channel_api.py
Normal file
211
apps/channels/tests/test_channel_api.py
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
from django.test import TestCase
|
||||
from django.contrib.auth import get_user_model
|
||||
from rest_framework.test import APIClient
|
||||
from rest_framework import status
|
||||
|
||||
from apps.channels.models import Channel, ChannelGroup
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class ChannelBulkEditAPITests(TestCase):
|
||||
def setUp(self):
|
||||
# Create a test admin user (user_level >= 10) and authenticate
|
||||
self.user = User.objects.create_user(username="testuser", password="testpass123")
|
||||
self.user.user_level = 10 # Set admin level
|
||||
self.user.save()
|
||||
self.client = APIClient()
|
||||
self.client.force_authenticate(user=self.user)
|
||||
self.bulk_edit_url = "/api/channels/channels/edit/bulk/"
|
||||
|
||||
# Create test channel group
|
||||
self.group1 = ChannelGroup.objects.create(name="Test Group 1")
|
||||
self.group2 = ChannelGroup.objects.create(name="Test Group 2")
|
||||
|
||||
# Create test channels
|
||||
self.channel1 = Channel.objects.create(
|
||||
channel_number=1.0,
|
||||
name="Channel 1",
|
||||
tvg_id="channel1",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel2 = Channel.objects.create(
|
||||
channel_number=2.0,
|
||||
name="Channel 2",
|
||||
tvg_id="channel2",
|
||||
channel_group=self.group1
|
||||
)
|
||||
self.channel3 = Channel.objects.create(
|
||||
channel_number=3.0,
|
||||
name="Channel 3",
|
||||
tvg_id="channel3"
|
||||
)
|
||||
|
||||
def test_bulk_edit_success(self):
|
||||
"""Test successful bulk update of multiple channels"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Updated Channel 1"},
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2", "channel_number": 22.0},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
self.assertEqual(len(response.data["channels"]), 2)
|
||||
|
||||
# Verify database changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Updated Channel 1")
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
self.assertEqual(self.channel2.channel_number, 22.0)
|
||||
|
||||
def test_bulk_edit_with_empty_validated_data_first(self):
|
||||
"""
|
||||
Test the bug fix: when first channel has empty validated_data.
|
||||
This was causing: ValueError: Field names must be given to bulk_update()
|
||||
"""
|
||||
# Create a channel with data that will be "unchanged" (empty validated_data)
|
||||
# We'll send the same data it already has
|
||||
data = [
|
||||
# First channel: no actual changes (this would create empty validated_data)
|
||||
{"id": self.channel1.id},
|
||||
# Second channel: has changes
|
||||
{"id": self.channel2.id, "name": "Updated Channel 2"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should not crash with ValueError
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
# Verify the channel with changes was updated
|
||||
self.channel2.refresh_from_db()
|
||||
self.assertEqual(self.channel2.name, "Updated Channel 2")
|
||||
|
||||
def test_bulk_edit_all_empty_updates(self):
|
||||
"""Test when all channels have empty updates (no actual changes)"""
|
||||
data = [
|
||||
{"id": self.channel1.id},
|
||||
{"id": self.channel2.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should succeed without calling bulk_update
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 2 channels")
|
||||
|
||||
def test_bulk_edit_mixed_fields(self):
|
||||
"""Test bulk update where different channels update different fields"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "New Name 1"},
|
||||
{"id": self.channel2.id, "channel_number": 99.0},
|
||||
{"id": self.channel3.id, "tvg_id": "new_tvg_id", "name": "New Name 3"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 3 channels")
|
||||
|
||||
# Verify all updates
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel2.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
|
||||
self.assertEqual(self.channel1.name, "New Name 1")
|
||||
self.assertEqual(self.channel2.channel_number, 99.0)
|
||||
self.assertEqual(self.channel3.tvg_id, "new_tvg_id")
|
||||
self.assertEqual(self.channel3.name, "New Name 3")
|
||||
|
||||
def test_bulk_edit_with_channel_group(self):
|
||||
"""Test bulk update with channel_group_id changes"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_group_id": self.group2.id},
|
||||
{"id": self.channel3.id, "channel_group_id": self.group1.id},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify group changes
|
||||
self.channel1.refresh_from_db()
|
||||
self.channel3.refresh_from_db()
|
||||
self.assertEqual(self.channel1.channel_group, self.group2)
|
||||
self.assertEqual(self.channel3.channel_group, self.group1)
|
||||
|
||||
def test_bulk_edit_nonexistent_channel(self):
|
||||
"""Test bulk update with a channel that doesn't exist"""
|
||||
nonexistent_id = 99999
|
||||
data = [
|
||||
{"id": nonexistent_id, "name": "Should Fail"},
|
||||
{"id": self.channel1.id, "name": "Should Still Update"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertEqual(response.data["errors"][0]["channel_id"], nonexistent_id)
|
||||
self.assertEqual(response.data["errors"][0]["error"], "Channel not found")
|
||||
|
||||
# The valid channel should still be updated
|
||||
self.assertEqual(response.data["updated_count"], 1)
|
||||
|
||||
def test_bulk_edit_validation_error(self):
|
||||
"""Test bulk update with invalid data (validation error)"""
|
||||
data = [
|
||||
{"id": self.channel1.id, "channel_number": "invalid_number"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Should return 400 with validation errors
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertIn("errors", response.data)
|
||||
self.assertEqual(len(response.data["errors"]), 1)
|
||||
self.assertIn("channel_number", response.data["errors"][0]["errors"])
|
||||
|
||||
def test_bulk_edit_empty_channel_updates(self):
|
||||
"""Test bulk update with empty list"""
|
||||
data = []
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
# Empty list is accepted and returns success with 0 updates
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(response.data["message"], "Successfully updated 0 channels")
|
||||
|
||||
def test_bulk_edit_missing_channel_updates(self):
|
||||
"""Test bulk update without proper format (dict instead of list)"""
|
||||
data = {"channel_updates": {}}
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
||||
self.assertEqual(response.data["error"], "Expected a list of channel updates")
|
||||
|
||||
def test_bulk_edit_preserves_other_fields(self):
|
||||
"""Test that bulk update only changes specified fields"""
|
||||
original_channel_number = self.channel1.channel_number
|
||||
original_tvg_id = self.channel1.tvg_id
|
||||
|
||||
data = [
|
||||
{"id": self.channel1.id, "name": "Only Name Changed"},
|
||||
]
|
||||
|
||||
response = self.client.patch(self.bulk_edit_url, data, format="json")
|
||||
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
|
||||
# Verify only name changed, other fields preserved
|
||||
self.channel1.refresh_from_db()
|
||||
self.assertEqual(self.channel1.name, "Only Name Changed")
|
||||
self.assertEqual(self.channel1.channel_number, original_channel_number)
|
||||
self.assertEqual(self.channel1.tvg_id, original_tvg_id)
|
||||
40
apps/channels/tests/test_recurring_rules.py
Normal file
40
apps/channels/tests/test_recurring_rules.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
from datetime import datetime, timedelta
|
||||
from django.test import TestCase
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.channels.models import Channel, RecurringRecordingRule, Recording
|
||||
from apps.channels.tasks import sync_recurring_rule_impl, purge_recurring_rule_impl
|
||||
|
||||
|
||||
class RecurringRecordingRuleTasksTests(TestCase):
|
||||
def test_sync_recurring_rule_creates_and_purges_recordings(self):
|
||||
now = timezone.now()
|
||||
channel = Channel.objects.create(channel_number=1, name='Test Channel')
|
||||
|
||||
start_time = (now + timedelta(minutes=15)).time().replace(second=0, microsecond=0)
|
||||
end_time = (now + timedelta(minutes=75)).time().replace(second=0, microsecond=0)
|
||||
|
||||
rule = RecurringRecordingRule.objects.create(
|
||||
channel=channel,
|
||||
days_of_week=[now.weekday()],
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
)
|
||||
|
||||
created = sync_recurring_rule_impl(rule.id, drop_existing=True, horizon_days=1)
|
||||
self.assertEqual(created, 1)
|
||||
|
||||
recording = Recording.objects.filter(custom_properties__rule__id=rule.id).first()
|
||||
self.assertIsNotNone(recording)
|
||||
self.assertEqual(recording.channel, channel)
|
||||
self.assertEqual(recording.custom_properties.get('rule', {}).get('id'), rule.id)
|
||||
|
||||
expected_start = timezone.make_aware(
|
||||
datetime.combine(recording.start_time.date(), start_time),
|
||||
timezone.get_current_timezone(),
|
||||
)
|
||||
self.assertLess(abs((recording.start_time - expected_start).total_seconds()), 60)
|
||||
|
||||
removed = purge_recurring_rule_impl(rule.id)
|
||||
self.assertEqual(removed, 1)
|
||||
self.assertFalse(Recording.objects.filter(custom_properties__rule__id=rule.id).exists())
|
||||
|
|
@ -147,23 +147,37 @@ class EPGGridAPIView(APIView):
|
|||
f"EPGGridAPIView: Found {count} program(s), including recently ended, currently running, and upcoming shows."
|
||||
)
|
||||
|
||||
# Generate dummy programs for channels that have no EPG data
|
||||
# Generate dummy programs for channels that have no EPG data OR dummy EPG sources
|
||||
from apps.channels.models import Channel
|
||||
from apps.epg.models import EPGSource
|
||||
from django.db.models import Q
|
||||
|
||||
# Get channels with no EPG data
|
||||
# Get channels with no EPG data at all (standard dummy)
|
||||
channels_without_epg = Channel.objects.filter(Q(epg_data__isnull=True))
|
||||
channels_count = channels_without_epg.count()
|
||||
|
||||
# Log more detailed information about channels missing EPG data
|
||||
if channels_count > 0:
|
||||
# Get channels with custom dummy EPG sources (generate on-demand with patterns)
|
||||
channels_with_custom_dummy = Channel.objects.filter(
|
||||
epg_data__epg_source__source_type='dummy'
|
||||
).distinct()
|
||||
|
||||
# Log what we found
|
||||
without_count = channels_without_epg.count()
|
||||
custom_count = channels_with_custom_dummy.count()
|
||||
|
||||
if without_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_without_epg]
|
||||
logger.warning(
|
||||
f"EPGGridAPIView: Missing EPG data for these channels: {', '.join(channel_names)}"
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing standard dummy EPG: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
if custom_count > 0:
|
||||
channel_names = [f"{ch.name} (ID: {ch.id})" for ch in channels_with_custom_dummy]
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Channels needing custom dummy EPG: {', '.join(channel_names)}"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"EPGGridAPIView: Found {channels_count} channels with no EPG data."
|
||||
f"EPGGridAPIView: Found {without_count} channels needing standard dummy, {custom_count} needing custom dummy EPG."
|
||||
)
|
||||
|
||||
# Serialize the regular programs
|
||||
|
|
@ -205,12 +219,91 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Generate and append dummy programs
|
||||
dummy_programs = []
|
||||
for channel in channels_without_epg:
|
||||
# Use the channel UUID as tvg_id for dummy programs to match in the guide
|
||||
|
||||
# Import the function from output.views
|
||||
from apps.output.views import generate_dummy_programs as gen_dummy_progs
|
||||
|
||||
# Handle channels with CUSTOM dummy EPG sources (with patterns)
|
||||
for channel in channels_with_custom_dummy:
|
||||
# For dummy EPGs, ALWAYS use channel UUID to ensure unique programs per channel
|
||||
# This prevents multiple channels assigned to the same dummy EPG from showing identical data
|
||||
# Each channel gets its own unique program data even if they share the same EPG source
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
# Create programs every 4 hours for the next 24 hours
|
||||
# Get the custom dummy EPG source
|
||||
epg_source = channel.epg_data.epg_source if channel.epg_data else None
|
||||
|
||||
logger.debug(f"Generating custom dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Determine which name to parse based on custom properties
|
||||
name_to_parse = channel.name
|
||||
if epg_source and epg_source.custom_properties:
|
||||
custom_props = epg_source.custom_properties
|
||||
name_source = custom_props.get('name_source')
|
||||
|
||||
if name_source == 'stream':
|
||||
# Get the stream index (1-based from user, convert to 0-based)
|
||||
stream_index = custom_props.get('stream_index', 1) - 1
|
||||
|
||||
# Get streams ordered by channelstream order
|
||||
channel_streams = channel.streams.all().order_by('channelstream__order')
|
||||
|
||||
if channel_streams.exists() and 0 <= stream_index < channel_streams.count():
|
||||
stream = list(channel_streams)[stream_index]
|
||||
name_to_parse = stream.name
|
||||
logger.debug(f"Using stream name for parsing: {name_to_parse} (stream index: {stream_index})")
|
||||
else:
|
||||
logger.warning(f"Stream index {stream_index} not found for channel {channel.name}, falling back to channel name")
|
||||
elif name_source == 'channel':
|
||||
logger.debug(f"Using channel name for parsing: {name_to_parse}")
|
||||
|
||||
# Generate programs using custom patterns from the dummy EPG source
|
||||
# Use the same tvg_id that will be set in the program data
|
||||
generated = gen_dummy_progs(
|
||||
channel_id=dummy_tvg_id,
|
||||
channel_name=name_to_parse,
|
||||
num_days=1,
|
||||
program_length_hours=4,
|
||||
epg_source=epg_source
|
||||
)
|
||||
|
||||
# Custom dummy should always return data (either from patterns or fallback)
|
||||
if generated:
|
||||
logger.debug(f"Generated {len(generated)} custom dummy programs for {channel.name}")
|
||||
# Convert generated programs to API format
|
||||
for program in generated:
|
||||
dummy_program = {
|
||||
"id": f"dummy-custom-{channel.id}-{program['start_time'].hour}",
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": program['start_time'].isoformat(),
|
||||
"end_time": program['end_time'].isoformat(),
|
||||
"title": program['title'],
|
||||
"description": program['description'],
|
||||
"tvg_id": dummy_tvg_id,
|
||||
"sub_title": None,
|
||||
"custom_properties": None,
|
||||
}
|
||||
dummy_programs.append(dummy_program)
|
||||
else:
|
||||
logger.warning(f"No programs generated for custom dummy EPG channel: {channel.name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating custom dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Handle channels with NO EPG data (standard dummy with humorous descriptions)
|
||||
for channel in channels_without_epg:
|
||||
# For channels with no EPG, use UUID to ensure uniqueness (matches frontend logic)
|
||||
# The frontend uses: tvgRecord?.tvg_id ?? channel.uuid
|
||||
# Since there's no EPG data, it will fall back to UUID
|
||||
dummy_tvg_id = str(channel.uuid)
|
||||
|
||||
try:
|
||||
logger.debug(f"Generating standard dummy programs for channel: {channel.name} (ID: {channel.id})")
|
||||
|
||||
# Create programs every 4 hours for the next 24 hours with humorous descriptions
|
||||
for hour_offset in range(0, 24, 4):
|
||||
# Use timedelta for time arithmetic instead of replace() to avoid hour overflow
|
||||
start_time = now + timedelta(hours=hour_offset)
|
||||
|
|
@ -238,7 +331,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
# Create a dummy program in the same format as regular programs
|
||||
dummy_program = {
|
||||
"id": f"dummy-{channel.id}-{hour_offset}", # Create a unique ID
|
||||
"id": f"dummy-standard-{channel.id}-{hour_offset}",
|
||||
"epg": {"tvg_id": dummy_tvg_id, "name": channel.name},
|
||||
"start_time": start_time.isoformat(),
|
||||
"end_time": end_time.isoformat(),
|
||||
|
|
@ -252,7 +345,7 @@ class EPGGridAPIView(APIView):
|
|||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error creating dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
f"Error creating standard dummy programs for channel {channel.name} (ID: {channel.id}): {str(e)}"
|
||||
)
|
||||
|
||||
# Combine regular and dummy programs
|
||||
|
|
@ -284,7 +377,22 @@ class EPGImportAPIView(APIView):
|
|||
)
|
||||
def post(self, request, format=None):
|
||||
logger.info("EPGImportAPIView: Received request to import EPG data.")
|
||||
refresh_epg_data.delay(request.data.get("id", None)) # Trigger Celery task
|
||||
epg_id = request.data.get("id", None)
|
||||
|
||||
# Check if this is a dummy EPG source
|
||||
try:
|
||||
from .models import EPGSource
|
||||
epg_source = EPGSource.objects.get(id=epg_id)
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"EPGImportAPIView: Skipping refresh for dummy EPG source {epg_id}")
|
||||
return Response(
|
||||
{"success": False, "message": "Dummy EPG sources do not require refreshing."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
except EPGSource.DoesNotExist:
|
||||
pass # Let the task handle the missing source
|
||||
|
||||
refresh_epg_data.delay(epg_id) # Trigger Celery task
|
||||
logger.info("EPGImportAPIView: Task dispatched to refresh EPG data.")
|
||||
return Response(
|
||||
{"success": True, "message": "EPG data import initiated."},
|
||||
|
|
@ -308,3 +416,4 @@ class EPGDataViewSet(viewsets.ReadOnlyModelViewSet):
|
|||
return [perm() for perm in permission_classes_by_action[self.action]]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 14:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0014_epgsource_extracted_file_path'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='programdata',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
18
apps/epg/migrations/0016_epgdata_icon_url.py
Normal file
18
apps/epg/migrations/0016_epgdata_icon_url.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-16 22:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0015_alter_programdata_custom_properties'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgdata',
|
||||
name='icon_url',
|
||||
field=models.URLField(blank=True, max_length=500, null=True),
|
||||
),
|
||||
]
|
||||
18
apps/epg/migrations/0017_alter_epgsource_url.py
Normal file
18
apps/epg/migrations/0017_alter_epgsource_url.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-24 21:07
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0016_epgdata_icon_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='url',
|
||||
field=models.URLField(blank=True, max_length=1000, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-17 17:02
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0017_alter_epgsource_url'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)', null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='epgsource',
|
||||
name='source_type',
|
||||
field=models.CharField(choices=[('xmltv', 'XMLTV URL'), ('schedules_direct', 'Schedules Direct API'), ('dummy', 'Custom Dummy EPG')], max_length=20),
|
||||
),
|
||||
]
|
||||
18
apps/epg/migrations/0019_alter_programdata_sub_title.py
Normal file
18
apps/epg/migrations/0019_alter_programdata_sub_title.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-10-22 21:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0018_epgsource_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='programdata',
|
||||
name='sub_title',
|
||||
field=models.TextField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# Generated migration to replace {time} placeholders with {starttime}
|
||||
|
||||
import re
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def migrate_time_placeholders(apps, schema_editor):
|
||||
"""
|
||||
Replace {time} with {starttime} and {time24} with {starttime24}
|
||||
in all dummy EPG source custom_properties templates.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
# Fields that contain templates with placeholders
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
# Get all dummy EPG sources
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Replace {time24} first (before {time}) to avoid double replacement
|
||||
# e.g., {time24} shouldn't become {starttime24} via {time} -> {starttime}
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{time24\}', '{starttime24}', new_value)
|
||||
new_value = re.sub(r'\{time\}', '{starttime}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Migration complete: Updated {updated_count} dummy EPG source(s) with new placeholder names.")
|
||||
else:
|
||||
print("No dummy EPG sources needed placeholder updates.")
|
||||
|
||||
|
||||
def reverse_migration(apps, schema_editor):
|
||||
"""
|
||||
Reverse the migration by replacing {starttime} back to {time}.
|
||||
"""
|
||||
EPGSource = apps.get_model('epg', 'EPGSource')
|
||||
|
||||
template_fields = [
|
||||
'title_template',
|
||||
'description_template',
|
||||
'upcoming_title_template',
|
||||
'upcoming_description_template',
|
||||
'ended_title_template',
|
||||
'ended_description_template',
|
||||
'channel_logo_url',
|
||||
'program_poster_url',
|
||||
]
|
||||
|
||||
dummy_sources = EPGSource.objects.filter(source_type='dummy')
|
||||
|
||||
updated_count = 0
|
||||
for source in dummy_sources:
|
||||
if not source.custom_properties:
|
||||
continue
|
||||
|
||||
modified = False
|
||||
custom_props = source.custom_properties.copy()
|
||||
|
||||
for field in template_fields:
|
||||
if field in custom_props and custom_props[field]:
|
||||
original_value = custom_props[field]
|
||||
|
||||
# Reverse the replacements
|
||||
new_value = original_value
|
||||
new_value = re.sub(r'\{starttime24\}', '{time24}', new_value)
|
||||
new_value = re.sub(r'\{starttime\}', '{time}', new_value)
|
||||
|
||||
if new_value != original_value:
|
||||
custom_props[field] = new_value
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
source.custom_properties = custom_props
|
||||
source.save(update_fields=['custom_properties'])
|
||||
updated_count += 1
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"Reverse migration complete: Reverted {updated_count} dummy EPG source(s) to old placeholder names.")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0019_alter_programdata_sub_title'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_time_placeholders, reverse_migration),
|
||||
]
|
||||
18
apps/epg/migrations/0021_epgsource_priority.py
Normal file
18
apps/epg/migrations/0021_epgsource_priority.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-12-05 15:24
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('epg', '0020_migrate_time_to_starttime_placeholders'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='epgsource',
|
||||
name='priority',
|
||||
field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'),
|
||||
),
|
||||
]
|
||||
|
|
@ -8,6 +8,7 @@ class EPGSource(models.Model):
|
|||
SOURCE_TYPE_CHOICES = [
|
||||
('xmltv', 'XMLTV URL'),
|
||||
('schedules_direct', 'Schedules Direct API'),
|
||||
('dummy', 'Custom Dummy EPG'),
|
||||
]
|
||||
|
||||
STATUS_IDLE = 'idle'
|
||||
|
|
@ -28,7 +29,7 @@ class EPGSource(models.Model):
|
|||
|
||||
name = models.CharField(max_length=255, unique=True)
|
||||
source_type = models.CharField(max_length=20, choices=SOURCE_TYPE_CHOICES)
|
||||
url = models.URLField(blank=True, null=True) # For XMLTV
|
||||
url = models.URLField(max_length=1000, blank=True, null=True) # For XMLTV
|
||||
api_key = models.CharField(max_length=255, blank=True, null=True) # For Schedules Direct
|
||||
is_active = models.BooleanField(default=True)
|
||||
file_path = models.CharField(max_length=1024, blank=True, null=True)
|
||||
|
|
@ -38,6 +39,16 @@ class EPGSource(models.Model):
|
|||
refresh_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
)
|
||||
custom_properties = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)"
|
||||
)
|
||||
priority = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel."
|
||||
)
|
||||
status = models.CharField(
|
||||
max_length=20,
|
||||
choices=STATUS_CHOICES,
|
||||
|
|
@ -127,6 +138,7 @@ class EPGData(models.Model):
|
|||
# and a name (which might simply be the tvg_id if no real channel exists).
|
||||
tvg_id = models.CharField(max_length=255, null=True, blank=True, db_index=True)
|
||||
name = models.CharField(max_length=255)
|
||||
icon_url = models.URLField(max_length=500, null=True, blank=True)
|
||||
epg_source = models.ForeignKey(
|
||||
EPGSource,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -147,10 +159,10 @@ class ProgramData(models.Model):
|
|||
start_time = models.DateTimeField()
|
||||
end_time = models.DateTimeField()
|
||||
title = models.CharField(max_length=255)
|
||||
sub_title = models.CharField(max_length=255, blank=True, null=True)
|
||||
sub_title = models.TextField(blank=True, null=True)
|
||||
description = models.TextField(blank=True, null=True)
|
||||
tvg_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.title} ({self.start_time} - {self.end_time})"
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
from core.utils import validate_flexible_url
|
||||
from rest_framework import serializers
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from apps.channels.models import Channel
|
||||
|
||||
class EPGSourceSerializer(serializers.ModelSerializer):
|
||||
epg_data_ids = serializers.SerializerMethodField()
|
||||
epg_data_count = serializers.SerializerMethodField()
|
||||
read_only_fields = ['created_at', 'updated_at']
|
||||
url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url]
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = EPGSource
|
||||
|
|
@ -17,15 +24,18 @@ class EPGSourceSerializer(serializers.ModelSerializer):
|
|||
'is_active',
|
||||
'file_path',
|
||||
'refresh_interval',
|
||||
'priority',
|
||||
'status',
|
||||
'last_message',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'epg_data_ids'
|
||||
'custom_properties',
|
||||
'epg_data_count'
|
||||
]
|
||||
|
||||
def get_epg_data_ids(self, obj):
|
||||
return list(obj.epgs.values_list('id', flat=True))
|
||||
def get_epg_data_count(self, obj):
|
||||
"""Return the count of EPG data entries instead of all IDs to prevent large payloads"""
|
||||
return obj.epgs.count()
|
||||
|
||||
class ProgramDataSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
|
|
@ -45,5 +55,6 @@ class EPGDataSerializer(serializers.ModelSerializer):
|
|||
'id',
|
||||
'tvg_id',
|
||||
'name',
|
||||
'icon_url',
|
||||
'epg_source',
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from django.db.models.signals import post_save, post_delete, pre_save
|
||||
from django.dispatch import receiver
|
||||
from .models import EPGSource
|
||||
from .models import EPGSource, EPGData
|
||||
from .tasks import refresh_epg_data, delete_epg_refresh_task_by_id
|
||||
from django_celery_beat.models import PeriodicTask, IntervalSchedule
|
||||
from core.utils import is_protected_path
|
||||
from core.utils import is_protected_path, send_websocket_update
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
|
@ -12,15 +12,77 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def trigger_refresh_on_new_epg_source(sender, instance, created, **kwargs):
|
||||
# Trigger refresh only if the source is newly created and active
|
||||
if created and instance.is_active:
|
||||
# Trigger refresh only if the source is newly created, active, and not a dummy EPG
|
||||
if created and instance.is_active and instance.source_type != 'dummy':
|
||||
refresh_epg_data.delay(instance.id)
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_dummy_epg_data(sender, instance, created, **kwargs):
|
||||
"""
|
||||
Automatically create EPGData for dummy EPG sources when they are created.
|
||||
This allows channels to be assigned to dummy EPGs immediately without
|
||||
requiring a refresh first.
|
||||
"""
|
||||
if instance.source_type == 'dummy':
|
||||
# Ensure dummy EPGs always have idle status and no status message
|
||||
if instance.status != EPGSource.STATUS_IDLE or instance.last_message:
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
instance.save(update_fields=['status', 'last_message'])
|
||||
|
||||
# Create a URL-friendly tvg_id from the dummy EPG name
|
||||
# Replace spaces and special characters with underscores
|
||||
friendly_tvg_id = instance.name.replace(' ', '_').replace('-', '_')
|
||||
# Remove any characters that aren't alphanumeric or underscores
|
||||
friendly_tvg_id = ''.join(c for c in friendly_tvg_id if c.isalnum() or c == '_')
|
||||
# Convert to lowercase for consistency
|
||||
friendly_tvg_id = friendly_tvg_id.lower()
|
||||
# Prefix with 'dummy_' to make it clear this is a dummy EPG
|
||||
friendly_tvg_id = f"dummy_{friendly_tvg_id}"
|
||||
|
||||
# Create or update the EPGData record
|
||||
epg_data, data_created = EPGData.objects.get_or_create(
|
||||
tvg_id=friendly_tvg_id,
|
||||
epg_source=instance,
|
||||
defaults={
|
||||
'name': instance.name,
|
||||
'icon_url': None
|
||||
}
|
||||
)
|
||||
|
||||
# Update name if it changed and record already existed
|
||||
if not data_created and epg_data.name != instance.name:
|
||||
epg_data.name = instance.name
|
||||
epg_data.save(update_fields=['name'])
|
||||
|
||||
if data_created:
|
||||
logger.info(f"Auto-created EPGData for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
# Send websocket update to notify frontend that EPG data has been created
|
||||
# This allows the channel form to immediately show the new dummy EPG without refreshing
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'epg_data_created',
|
||||
'source_id': instance.id,
|
||||
'source_name': instance.name,
|
||||
'epg_data_id': epg_data.id
|
||||
})
|
||||
else:
|
||||
logger.debug(f"EPGData already exists for dummy EPG source: {instance.name} (ID: {instance.id})")
|
||||
|
||||
@receiver(post_save, sender=EPGSource)
|
||||
def create_or_update_refresh_task(sender, instance, **kwargs):
|
||||
"""
|
||||
Create or update a Celery Beat periodic task when an EPGSource is created/updated.
|
||||
Skip creating tasks for dummy EPG sources as they don't need refreshing.
|
||||
"""
|
||||
# Skip task creation for dummy EPGs
|
||||
if instance.source_type == 'dummy':
|
||||
# If there's an existing task, disable it
|
||||
if instance.refresh_task:
|
||||
instance.refresh_task.enabled = False
|
||||
instance.refresh_task.save(update_fields=['enabled'])
|
||||
return
|
||||
|
||||
task_name = f"epg_source-refresh-{instance.id}"
|
||||
interval, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=int(instance.refresh_interval),
|
||||
|
|
@ -80,7 +142,14 @@ def delete_refresh_task(sender, instance, **kwargs):
|
|||
def update_status_on_active_change(sender, instance, **kwargs):
|
||||
"""
|
||||
When an EPGSource's is_active field changes, update the status accordingly.
|
||||
For dummy EPGs, always ensure status is idle and no status message.
|
||||
"""
|
||||
# Dummy EPGs should always be idle with no status message
|
||||
if instance.source_type == 'dummy':
|
||||
instance.status = EPGSource.STATUS_IDLE
|
||||
instance.last_message = None
|
||||
return
|
||||
|
||||
if instance.pk: # Only for existing records, not new ones
|
||||
try:
|
||||
# Get the current record from the database
|
||||
|
|
|
|||
|
|
@ -24,11 +24,30 @@ from asgiref.sync import async_to_sync
|
|||
from channels.layers import get_channel_layer
|
||||
|
||||
from .models import EPGSource, EPGData, ProgramData
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory
|
||||
from core.utils import acquire_task_lock, release_task_lock, send_websocket_update, cleanup_memory, log_system_event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validate_icon_url_fast(icon_url, max_length=None):
|
||||
"""
|
||||
Fast validation for icon URLs during parsing.
|
||||
Returns None if URL is too long, original URL otherwise.
|
||||
If max_length is None, gets it dynamically from the EPGData model field.
|
||||
"""
|
||||
if max_length is None:
|
||||
# Get max_length dynamically from the model field
|
||||
max_length = EPGData._meta.get_field('icon_url').max_length
|
||||
|
||||
if icon_url and len(icon_url) > max_length:
|
||||
logger.warning(f"Icon URL too long ({len(icon_url)} > {max_length}), skipping: {icon_url[:100]}...")
|
||||
return None
|
||||
return icon_url
|
||||
|
||||
|
||||
MAX_EXTRACT_CHUNK_SIZE = 65536 # 64kb (base2)
|
||||
|
||||
|
||||
def send_epg_update(source_id, action, progress, **kwargs):
|
||||
"""Send WebSocket update about EPG download/parsing progress"""
|
||||
# Start with the base data dictionary
|
||||
|
|
@ -114,8 +133,9 @@ def delete_epg_refresh_task_by_id(epg_id):
|
|||
@shared_task
|
||||
def refresh_all_epg_data():
|
||||
logger.info("Starting refresh_epg_data task.")
|
||||
active_sources = EPGSource.objects.filter(is_active=True)
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s).")
|
||||
# Exclude dummy EPG sources from refresh - they don't need refreshing
|
||||
active_sources = EPGSource.objects.filter(is_active=True).exclude(source_type='dummy')
|
||||
logger.debug(f"Found {active_sources.count()} active EPGSource(s) (excluding dummy EPGs).")
|
||||
|
||||
for source in active_sources:
|
||||
refresh_epg_data(source.id)
|
||||
|
|
@ -161,6 +181,13 @@ def refresh_epg_data(source_id):
|
|||
gc.collect()
|
||||
return
|
||||
|
||||
# Skip refresh for dummy EPG sources - they don't need refreshing
|
||||
if source.source_type == 'dummy':
|
||||
logger.info(f"Skipping refresh for dummy EPG source {source.name} (ID: {source_id})")
|
||||
release_task_lock('refresh_epg_data', source_id)
|
||||
gc.collect()
|
||||
return
|
||||
|
||||
# Continue with the normal processing...
|
||||
logger.info(f"Processing EPGSource: {source.name} (type: {source.source_type})")
|
||||
if source.source_type == 'xmltv':
|
||||
|
|
@ -186,6 +213,12 @@ def refresh_epg_data(source_id):
|
|||
fetch_schedules_direct(source)
|
||||
|
||||
source.save(update_fields=['updated_at'])
|
||||
# After successful EPG refresh, evaluate DVR series rules to schedule new episodes
|
||||
try:
|
||||
from apps.channels.tasks import evaluate_series_rules
|
||||
evaluate_series_rules.delay()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Error in refresh_epg_data for source {source_id}: {e}", exc_info=True)
|
||||
try:
|
||||
|
|
@ -253,11 +286,12 @@ def fetch_xmltv(source):
|
|||
logger.info(f"Fetching XMLTV data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
if default_user_agent_id:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -641,7 +675,11 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
|
|||
# Reset file pointer and extract the content
|
||||
gz_file.seek(0)
|
||||
with open(extracted_path, 'wb') as out_file:
|
||||
out_file.write(gz_file.read())
|
||||
while True:
|
||||
chunk = gz_file.read(MAX_EXTRACT_CHUNK_SIZE)
|
||||
if not chunk or len(chunk) == 0:
|
||||
break
|
||||
out_file.write(chunk)
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting GZIP file: {e}", exc_info=True)
|
||||
return None
|
||||
|
|
@ -685,9 +723,13 @@ def extract_compressed_file(file_path, output_path=None, delete_original=False):
|
|||
return None
|
||||
|
||||
# Extract the first XML file
|
||||
xml_content = zip_file.read(xml_files[0])
|
||||
with open(extracted_path, 'wb') as out_file:
|
||||
out_file.write(xml_content)
|
||||
with zip_file.open(xml_files[0], "r") as xml_file:
|
||||
while True:
|
||||
chunk = xml_file.read(MAX_EXTRACT_CHUNK_SIZE)
|
||||
if not chunk or len(chunk) == 0:
|
||||
break
|
||||
out_file.write(chunk)
|
||||
|
||||
logger.info(f"Successfully extracted zip file to: {extracted_path}")
|
||||
|
||||
|
|
@ -815,6 +857,7 @@ def parse_channels_only(source):
|
|||
processed_channels = 0
|
||||
batch_size = 500 # Process in batches to limit memory usage
|
||||
progress = 0 # Initialize progress variable here
|
||||
icon_url_max_length = EPGData._meta.get_field('icon_url').max_length # Get max length for icon_url field
|
||||
|
||||
# Track memory at key points
|
||||
if process:
|
||||
|
|
@ -843,7 +886,7 @@ def parse_channels_only(source):
|
|||
|
||||
# Change iterparse to look for both channel and programme elements
|
||||
logger.debug(f"Creating iterparse context for channels and programmes")
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True)
|
||||
channel_parser = etree.iterparse(source_file, events=('end',), tag=('channel', 'programme'), remove_blank_text=True, recover=True)
|
||||
if process:
|
||||
logger.debug(f"[parse_channels_only] Memory after creating iterparse: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
|
||||
|
|
@ -857,10 +900,15 @@ def parse_channels_only(source):
|
|||
tvg_id = elem.get('id', '').strip()
|
||||
if tvg_id:
|
||||
display_name = None
|
||||
icon_url = None
|
||||
for child in elem:
|
||||
if child.tag == 'display-name' and child.text:
|
||||
if display_name is None and child.tag == 'display-name' and child.text:
|
||||
display_name = child.text.strip()
|
||||
break
|
||||
elif child.tag == 'icon':
|
||||
raw_icon_url = child.get('src', '').strip()
|
||||
icon_url = validate_icon_url_fast(raw_icon_url, icon_url_max_length)
|
||||
if display_name and icon_url:
|
||||
break # No need to continue if we have both
|
||||
|
||||
if not display_name:
|
||||
display_name = tvg_id
|
||||
|
|
@ -878,17 +926,24 @@ def parse_channels_only(source):
|
|||
epgs_to_create.append(EPGData(
|
||||
tvg_id=tvg_id,
|
||||
name=display_name,
|
||||
icon_url=icon_url,
|
||||
epg_source=source,
|
||||
))
|
||||
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 1: {tvg_id} - {display_name}")
|
||||
processed_channels += 1
|
||||
continue
|
||||
|
||||
# We use the cached object to check if the name has changed
|
||||
# We use the cached object to check if the name or icon_url has changed
|
||||
epg_obj = existing_epgs[tvg_id]
|
||||
needs_update = False
|
||||
if epg_obj.name != display_name:
|
||||
# Only update if the name actually changed
|
||||
epg_obj.name = display_name
|
||||
needs_update = True
|
||||
if epg_obj.icon_url != icon_url:
|
||||
epg_obj.icon_url = icon_url
|
||||
needs_update = True
|
||||
|
||||
if needs_update:
|
||||
epgs_to_update.append(epg_obj)
|
||||
logger.debug(f"[parse_channels_only] Added channel to update to epgs_to_update: {tvg_id} - {display_name}")
|
||||
else:
|
||||
|
|
@ -899,6 +954,7 @@ def parse_channels_only(source):
|
|||
epgs_to_create.append(EPGData(
|
||||
tvg_id=tvg_id,
|
||||
name=display_name,
|
||||
icon_url=icon_url,
|
||||
epg_source=source,
|
||||
))
|
||||
logger.debug(f"[parse_channels_only] Added new channel to epgs_to_create 2: {tvg_id} - {display_name}")
|
||||
|
|
@ -921,7 +977,7 @@ def parse_channels_only(source):
|
|||
logger.info(f"[parse_channels_only] Bulk updating {len(epgs_to_update)} EPG entries")
|
||||
if process:
|
||||
logger.info(f"[parse_channels_only] Memory before bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name"])
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
|
||||
if process:
|
||||
logger.info(f"[parse_channels_only] Memory after bulk_update: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
epgs_to_update = []
|
||||
|
|
@ -988,7 +1044,7 @@ def parse_channels_only(source):
|
|||
logger.debug(f"[parse_channels_only] Created final batch of {len(epgs_to_create)} EPG entries")
|
||||
|
||||
if epgs_to_update:
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name"])
|
||||
EPGData.objects.bulk_update(epgs_to_update, ["name", "icon_url"])
|
||||
logger.debug(f"[parse_channels_only] Updated final batch of {len(epgs_to_update)} EPG entries")
|
||||
if process:
|
||||
logger.debug(f"[parse_channels_only] Memory after final batch creation: {process.memory_info().rss / 1024 / 1024:.2f} MB")
|
||||
|
|
@ -1102,6 +1158,12 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
epg = EPGData.objects.get(id=epg_id)
|
||||
epg_source = epg.epg_source
|
||||
|
||||
# Skip program parsing for dummy EPG sources - they don't have program data files
|
||||
if epg_source.source_type == 'dummy':
|
||||
logger.info(f"Skipping program parsing for dummy EPG source {epg_source.name} (ID: {epg_id})")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
return
|
||||
|
||||
if not Channel.objects.filter(epg_data=epg).exists():
|
||||
logger.info(f"No channels matched to EPG {epg.tvg_id}")
|
||||
release_task_lock('parse_epg_programs', epg_id)
|
||||
|
|
@ -1195,7 +1257,7 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True)
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
if elem.get('channel') == epg.tvg_id:
|
||||
|
|
@ -1224,10 +1286,7 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
|
||||
if custom_props:
|
||||
logger.trace(f"Number of custom properties: {len(custom_props)}")
|
||||
try:
|
||||
custom_properties_json = json.dumps(custom_props)
|
||||
except Exception as e:
|
||||
logger.error(f"Error serializing custom properties to JSON: {e}", exc_info=True)
|
||||
custom_properties_json = custom_props
|
||||
|
||||
programs_to_create.append(ProgramData(
|
||||
epg=epg,
|
||||
|
|
@ -1335,11 +1394,23 @@ def parse_programs_for_tvg_id(epg_id):
|
|||
|
||||
|
||||
def parse_programs_for_source(epg_source, tvg_id=None):
|
||||
"""
|
||||
Parse programs for all MAPPED channels from an EPG source in a single pass.
|
||||
|
||||
This is an optimized version that:
|
||||
1. Only processes EPG entries that are actually mapped to channels
|
||||
2. Parses the XML file ONCE instead of once per channel
|
||||
3. Skips programmes for unmapped channels entirely during parsing
|
||||
|
||||
This dramatically improves performance when an EPG source has many channels
|
||||
but only a fraction are mapped.
|
||||
"""
|
||||
# Send initial programs parsing notification
|
||||
send_epg_update(epg_source.id, "parsing_programs", 0)
|
||||
should_log_memory = False
|
||||
process = None
|
||||
initial_memory = 0
|
||||
source_file = None
|
||||
|
||||
# Add memory tracking only in trace mode or higher
|
||||
try:
|
||||
|
|
@ -1359,91 +1430,251 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
should_log_memory = False
|
||||
|
||||
try:
|
||||
# Process EPG entries in batches rather than all at once
|
||||
batch_size = 20 # Process fewer channels at once to reduce memory usage
|
||||
epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
# Only get EPG entries that are actually mapped to channels
|
||||
mapped_epg_ids = set(
|
||||
Channel.objects.filter(
|
||||
epg_data__epg_source=epg_source,
|
||||
epg_data__isnull=False
|
||||
).values_list('epg_data_id', flat=True)
|
||||
)
|
||||
|
||||
if epg_count == 0:
|
||||
logger.info(f"No EPG entries found for source: {epg_source.name}")
|
||||
# Update status - this is not an error, just no entries
|
||||
if not mapped_epg_ids:
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} "
|
||||
f"(source has {total_epg_count} EPG entries, 0 mapped)")
|
||||
# Update status - this is not an error, just no mapped entries
|
||||
epg_source.status = 'success'
|
||||
epg_source.save(update_fields=['status'])
|
||||
epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="success")
|
||||
return True
|
||||
|
||||
logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}")
|
||||
# Get the mapped EPG entries with their tvg_ids
|
||||
mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id')
|
||||
tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']}
|
||||
mapped_tvg_ids = set(tvg_id_to_epg_id.keys())
|
||||
|
||||
failed_entries = []
|
||||
program_count = 0
|
||||
channel_count = 0
|
||||
updated_count = 0
|
||||
processed = 0
|
||||
# Process in batches using cursor-based approach to limit memory usage
|
||||
last_id = 0
|
||||
while True:
|
||||
# Get a batch of EPG entries
|
||||
batch_entries = list(EPGData.objects.filter(
|
||||
epg_source=epg_source,
|
||||
id__gt=last_id
|
||||
).order_by('id')[:batch_size])
|
||||
total_epg_count = EPGData.objects.filter(epg_source=epg_source).count()
|
||||
mapped_count = len(mapped_tvg_ids)
|
||||
|
||||
if not batch_entries:
|
||||
break # No more entries to process
|
||||
logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} "
|
||||
f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)")
|
||||
|
||||
# Update last_id for next iteration
|
||||
last_id = batch_entries[-1].id
|
||||
# Get the file path
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
if not file_path:
|
||||
file_path = epg_source.get_cache_file()
|
||||
|
||||
# Process this batch
|
||||
for epg in batch_entries:
|
||||
if epg.tvg_id:
|
||||
try:
|
||||
result = parse_programs_for_tvg_id(epg.id)
|
||||
if result == "Task already running":
|
||||
logger.info(f"Program parse for {epg.id} already in progress, skipping")
|
||||
# Check if the file exists
|
||||
if not os.path.exists(file_path):
|
||||
logger.error(f"EPG file not found at: {file_path}")
|
||||
|
||||
processed += 1
|
||||
progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress)
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True)
|
||||
failed_entries.append(f"{epg.tvg_id}: {str(e)}")
|
||||
if epg_source.url:
|
||||
# Update the file path in the database
|
||||
new_path = epg_source.get_cache_file()
|
||||
logger.info(f"Updating file_path from '{file_path}' to '{new_path}'")
|
||||
epg_source.file_path = new_path
|
||||
epg_source.save(update_fields=['file_path'])
|
||||
logger.info(f"Fetching new EPG data from URL: {epg_source.url}")
|
||||
|
||||
# Force garbage collection after each batch
|
||||
batch_entries = None # Remove reference to help garbage collection
|
||||
# Fetch new data before continuing
|
||||
fetch_success = fetch_xmltv(epg_source)
|
||||
|
||||
if not fetch_success:
|
||||
logger.error(f"Failed to fetch EPG data for source: {epg_source.name}")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"Failed to download EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file")
|
||||
return False
|
||||
|
||||
# Update file_path with the new location
|
||||
file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path
|
||||
else:
|
||||
logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data")
|
||||
epg_source.status = 'error'
|
||||
epg_source.last_message = f"No URL provided, cannot fetch EPG data"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided")
|
||||
return False
|
||||
|
||||
# SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory
|
||||
# We parse FIRST, then do an atomic delete+insert to avoid race conditions
|
||||
# where clients might see empty/partial EPG data during the transition
|
||||
all_programs_to_create = []
|
||||
programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel
|
||||
total_programs = 0
|
||||
skipped_programs = 0
|
||||
last_progress_update = 0
|
||||
|
||||
try:
|
||||
logger.debug(f"Opening file for single-pass parsing: {file_path}")
|
||||
source_file = open(file_path, 'rb')
|
||||
|
||||
# Stream parse the file using lxml's iterparse
|
||||
program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True)
|
||||
|
||||
for _, elem in program_parser:
|
||||
channel_id = elem.get('channel')
|
||||
|
||||
# Skip programmes for unmapped channels immediately
|
||||
if channel_id not in mapped_tvg_ids:
|
||||
skipped_programs += 1
|
||||
# Clear element to free memory
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
# This programme is for a mapped channel - process it
|
||||
try:
|
||||
start_time = parse_xmltv_time(elem.get('start'))
|
||||
end_time = parse_xmltv_time(elem.get('stop'))
|
||||
title = None
|
||||
desc = None
|
||||
sub_title = None
|
||||
|
||||
# Efficiently process child elements
|
||||
for child in elem:
|
||||
if child.tag == 'title':
|
||||
title = child.text or 'No Title'
|
||||
elif child.tag == 'desc':
|
||||
desc = child.text or ''
|
||||
elif child.tag == 'sub-title':
|
||||
sub_title = child.text or ''
|
||||
|
||||
if not title:
|
||||
title = 'No Title'
|
||||
|
||||
# Extract custom properties
|
||||
custom_props = extract_custom_properties(elem)
|
||||
custom_properties_json = custom_props if custom_props else None
|
||||
|
||||
epg_id = tvg_id_to_epg_id[channel_id]
|
||||
all_programs_to_create.append(ProgramData(
|
||||
epg_id=epg_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
title=title,
|
||||
description=desc,
|
||||
sub_title=sub_title,
|
||||
tvg_id=channel_id,
|
||||
custom_properties=custom_properties_json
|
||||
))
|
||||
total_programs += 1
|
||||
programs_by_channel[channel_id] += 1
|
||||
|
||||
# Clear the element to free memory
|
||||
clear_element(elem)
|
||||
|
||||
# Send progress update (estimate based on programs processed)
|
||||
if total_programs - last_progress_update >= 5000:
|
||||
last_progress_update = total_programs
|
||||
# Cap at 70% during parsing phase (save 30% for DB operations)
|
||||
progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60))
|
||||
send_epg_update(epg_source.id, "parsing_programs", progress,
|
||||
processed=total_programs, channels=mapped_count)
|
||||
|
||||
# Periodic garbage collection during parsing
|
||||
if total_programs % 5000 == 0:
|
||||
gc.collect()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True)
|
||||
clear_element(elem)
|
||||
continue
|
||||
|
||||
except etree.XMLSyntaxError as xml_error:
|
||||
logger.error(f"XML syntax error parsing program data: {xml_error}")
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"XML parsing error: {str(xml_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing XML for programs: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
if source_file:
|
||||
source_file.close()
|
||||
source_file = None
|
||||
|
||||
# Now perform atomic delete + bulk insert
|
||||
# This ensures clients never see empty/partial EPG data
|
||||
logger.info(f"Parsed {total_programs} programs, performing atomic database update...")
|
||||
send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...")
|
||||
|
||||
batch_size = 1000
|
||||
try:
|
||||
with transaction.atomic():
|
||||
# Delete existing programs for mapped EPGs
|
||||
deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0]
|
||||
logger.debug(f"Deleted {deleted_count} existing programs")
|
||||
|
||||
# Clean up orphaned programs for unmapped EPG entries
|
||||
unmapped_epg_ids = list(EPGData.objects.filter(
|
||||
epg_source=epg_source
|
||||
).exclude(id__in=mapped_epg_ids).values_list('id', flat=True))
|
||||
|
||||
if unmapped_epg_ids:
|
||||
orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0]
|
||||
if orphaned_count > 0:
|
||||
logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries")
|
||||
|
||||
# Bulk insert all new programs in batches within the same transaction
|
||||
for i in range(0, len(all_programs_to_create), batch_size):
|
||||
batch = all_programs_to_create[i:i + batch_size]
|
||||
ProgramData.objects.bulk_create(batch)
|
||||
|
||||
# Update progress during insertion
|
||||
progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95
|
||||
if i % (batch_size * 5) == 0:
|
||||
send_epg_update(epg_source.id, "parsing_programs", min(95, progress),
|
||||
message=f"Inserting programs... {i}/{len(all_programs_to_create)}")
|
||||
|
||||
logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs")
|
||||
|
||||
except Exception as db_error:
|
||||
logger.error(f"Database error during atomic update: {db_error}", exc_info=True)
|
||||
epg_source.status = EPGSource.STATUS_ERROR
|
||||
epg_source.last_message = f"Database error: {str(db_error)}"
|
||||
epg_source.save(update_fields=['status', 'last_message'])
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error))
|
||||
return False
|
||||
finally:
|
||||
# Clear the large list to free memory
|
||||
all_programs_to_create = None
|
||||
gc.collect()
|
||||
|
||||
# If there were failures, include them in the message but continue
|
||||
if failed_entries:
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed
|
||||
error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries"
|
||||
stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.last_message = f"{stats_summary} Warning: {error_summary}"
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
# Count channels that actually got programs
|
||||
channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0)
|
||||
|
||||
# Send completion notification with mixed status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message)
|
||||
|
||||
# Explicitly release memory of large lists before returning
|
||||
del failed_entries
|
||||
gc.collect()
|
||||
|
||||
return True
|
||||
|
||||
# If all successful, set a comprehensive success message
|
||||
# Success message
|
||||
epg_source.status = EPGSource.STATUS_SUCCESS
|
||||
epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}."
|
||||
epg_source.last_message = (
|
||||
f"Parsed {total_programs:,} programs for {channels_with_programs} channels "
|
||||
f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)"
|
||||
)
|
||||
epg_source.updated_at = timezone.now()
|
||||
epg_source.save(update_fields=['status', 'last_message', 'updated_at'])
|
||||
|
||||
# Log system event for EPG refresh
|
||||
log_system_event(
|
||||
event_type='epg_refresh',
|
||||
source_name=epg_source.name,
|
||||
programs=total_programs,
|
||||
channels=channels_with_programs,
|
||||
skipped_programs=skipped_programs,
|
||||
unmapped_channels=total_epg_count - mapped_count,
|
||||
)
|
||||
|
||||
# Send completion notification with status
|
||||
send_epg_update(epg_source.id, "parsing_programs", 100,
|
||||
status="success",
|
||||
message=epg_source.last_message)
|
||||
message=epg_source.last_message,
|
||||
updated_at=epg_source.updated_at.isoformat())
|
||||
|
||||
logger.info(f"Completed parsing all programs for source: {epg_source.name}")
|
||||
logger.info(f"Completed parsing programs for source: {epg_source.name} - "
|
||||
f"{total_programs:,} programs for {channels_with_programs} channels, "
|
||||
f"skipped {skipped_programs:,} programs for unmapped channels")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -1458,14 +1689,19 @@ def parse_programs_for_source(epg_source, tvg_id=None):
|
|||
return False
|
||||
finally:
|
||||
# Final memory cleanup and tracking
|
||||
|
||||
if source_file:
|
||||
try:
|
||||
source_file.close()
|
||||
except:
|
||||
pass
|
||||
source_file = None
|
||||
|
||||
# Explicitly release any remaining large data structures
|
||||
failed_entries = None
|
||||
program_count = None
|
||||
channel_count = None
|
||||
updated_count = None
|
||||
processed = None
|
||||
programs_to_create = None
|
||||
programs_by_channel = None
|
||||
mapped_epg_ids = None
|
||||
mapped_tvg_ids = None
|
||||
tvg_id_to_epg_id = None
|
||||
gc.collect()
|
||||
|
||||
# Add comprehensive memory cleanup at the end
|
||||
|
|
@ -1479,12 +1715,13 @@ def fetch_schedules_direct(source):
|
|||
logger.info(f"Fetching Schedules Direct data from source: {source.name}")
|
||||
try:
|
||||
# Get default user agent from settings
|
||||
default_user_agent_setting = CoreSettings.objects.filter(key='default-user-agent').first()
|
||||
stream_settings = CoreSettings.get_stream_settings()
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0" # Fallback default
|
||||
default_user_agent_id = stream_settings.get('default_user_agent')
|
||||
|
||||
if default_user_agent_setting and default_user_agent_setting.value:
|
||||
if default_user_agent_id:
|
||||
try:
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_setting.value)).first()
|
||||
user_agent_obj = UserAgent.objects.filter(id=int(default_user_agent_id)).first()
|
||||
if user_agent_obj and user_agent_obj.user_agent:
|
||||
user_agent = user_agent_obj.user_agent
|
||||
logger.debug(f"Using default user agent: {user_agent}")
|
||||
|
|
@ -1899,3 +2136,20 @@ def detect_file_format(file_path=None, content=None):
|
|||
|
||||
# If we reach here, we couldn't reliably determine the format
|
||||
return format_type, is_compressed, file_extension
|
||||
|
||||
|
||||
def generate_dummy_epg(source):
|
||||
"""
|
||||
DEPRECATED: This function is no longer used.
|
||||
|
||||
Dummy EPG programs are now generated on-demand when they are requested
|
||||
(during XMLTV export or EPG grid display), rather than being pre-generated
|
||||
and stored in the database.
|
||||
|
||||
See: apps/output/views.py - generate_custom_dummy_programs()
|
||||
|
||||
This function remains for backward compatibility but should not be called.
|
||||
"""
|
||||
logger.warning(f"generate_dummy_epg() called for {source.name} but this function is deprecated. "
|
||||
f"Dummy EPG programs are now generated on-demand.")
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ from django.views import View
|
|||
from django.utils.decorators import method_decorator
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -60,43 +59,9 @@ class DiscoverAPIView(APIView):
|
|||
base_url = request.build_absolute_uri(f'/{"/".join(uri_parts)}/').rstrip("/")
|
||||
device = HDHRDevice.objects.first()
|
||||
|
||||
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
is_active=True,
|
||||
m3u_account__is_active=True, # Only include profiles from enabled M3U accounts
|
||||
).exclude(id=1)
|
||||
|
||||
# 1. Check if any profile has unlimited streams (max_streams=0)
|
||||
has_unlimited = profiles.filter(max_streams=0).exists()
|
||||
|
||||
# 2. Calculate tuner count from limited profiles
|
||||
limited_tuners = 0
|
||||
if not has_unlimited:
|
||||
limited_tuners = (
|
||||
profiles.filter(max_streams__gt=0)
|
||||
.aggregate(total=models.Sum("max_streams"))
|
||||
.get("total", 0)
|
||||
or 0
|
||||
)
|
||||
|
||||
# 3. Add custom stream count to tuner count
|
||||
custom_stream_count = Stream.objects.filter(is_custom=True).count()
|
||||
logger.debug(f"Found {custom_stream_count} custom streams")
|
||||
|
||||
# 4. Calculate final tuner count
|
||||
if has_unlimited:
|
||||
# If there are unlimited profiles, start with 10 plus custom streams
|
||||
tuner_count = 10 + custom_stream_count
|
||||
else:
|
||||
# Otherwise use the limited profile sum plus custom streams
|
||||
tuner_count = limited_tuners + custom_stream_count
|
||||
|
||||
# 5. Ensure minimum of 1 tuners
|
||||
tuner_count = max(1, tuner_count)
|
||||
|
||||
logger.debug(
|
||||
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"
|
||||
)
|
||||
# Calculate tuner count using centralized function
|
||||
from apps.m3u.utils import calculate_tuner_count
|
||||
tuner_count = calculate_tuner_count(minimum=1, unlimited_default=10)
|
||||
|
||||
# Create a unique DeviceID for the HDHomeRun device based on profile ID or a default value
|
||||
device_ID = "12345678" # Default DeviceID
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
from django.contrib import admin
|
||||
from django.utils.html import format_html
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, UserAgent, M3UAccountProfile
|
||||
import json
|
||||
|
||||
|
||||
class M3UFilterInline(admin.TabularInline):
|
||||
model = M3UFilter
|
||||
|
|
@ -8,50 +10,181 @@ class M3UFilterInline(admin.TabularInline):
|
|||
verbose_name = "M3U Filter"
|
||||
verbose_name_plural = "M3U Filters"
|
||||
|
||||
|
||||
@admin.register(M3UAccount)
|
||||
class M3UAccountAdmin(admin.ModelAdmin):
|
||||
list_display = ('name', 'server_url', 'server_group', 'max_streams', 'is_active', 'user_agent_display', 'uploaded_file_link', 'created_at', 'updated_at')
|
||||
list_filter = ('is_active', 'server_group')
|
||||
search_fields = ('name', 'server_url', 'server_group__name')
|
||||
list_display = (
|
||||
"name",
|
||||
"server_url",
|
||||
"server_group",
|
||||
"max_streams",
|
||||
"priority",
|
||||
"is_active",
|
||||
"user_agent_display",
|
||||
"uploaded_file_link",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
)
|
||||
list_filter = ("is_active", "server_group")
|
||||
search_fields = ("name", "server_url", "server_group__name")
|
||||
inlines = [M3UFilterInline]
|
||||
actions = ['activate_accounts', 'deactivate_accounts']
|
||||
actions = ["activate_accounts", "deactivate_accounts"]
|
||||
|
||||
# Handle both ForeignKey and ManyToManyField cases for UserAgent
|
||||
def user_agent_display(self, obj):
|
||||
if hasattr(obj, 'user_agent'): # ForeignKey case
|
||||
if hasattr(obj, "user_agent"): # ForeignKey case
|
||||
return obj.user_agent.user_agent if obj.user_agent else "None"
|
||||
elif hasattr(obj, 'user_agents'): # ManyToManyField case
|
||||
elif hasattr(obj, "user_agents"): # ManyToManyField case
|
||||
return ", ".join([ua.user_agent for ua in obj.user_agents.all()]) or "None"
|
||||
return "None"
|
||||
|
||||
user_agent_display.short_description = "User Agent(s)"
|
||||
|
||||
def vod_enabled_display(self, obj):
|
||||
"""Display whether VOD is enabled for this account"""
|
||||
if obj.custom_properties:
|
||||
custom_props = obj.custom_properties or {}
|
||||
return "Yes" if custom_props.get('enable_vod', False) else "No"
|
||||
return "No"
|
||||
vod_enabled_display.short_description = "VOD Enabled"
|
||||
vod_enabled_display.boolean = True
|
||||
|
||||
def uploaded_file_link(self, obj):
|
||||
if obj.uploaded_file:
|
||||
return format_html("<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url)
|
||||
return format_html(
|
||||
"<a href='{}' target='_blank'>Download M3U</a>", obj.uploaded_file.url
|
||||
)
|
||||
return "No file uploaded"
|
||||
|
||||
uploaded_file_link.short_description = "Uploaded File"
|
||||
|
||||
@admin.action(description='Activate selected accounts')
|
||||
@admin.action(description="Activate selected accounts")
|
||||
def activate_accounts(self, request, queryset):
|
||||
queryset.update(is_active=True)
|
||||
|
||||
@admin.action(description='Deactivate selected accounts')
|
||||
@admin.action(description="Deactivate selected accounts")
|
||||
def deactivate_accounts(self, request, queryset):
|
||||
queryset.update(is_active=False)
|
||||
|
||||
# Add ManyToManyField for Django Admin (if applicable)
|
||||
if hasattr(M3UAccount, 'user_agents'):
|
||||
filter_horizontal = ('user_agents',) # Only for ManyToManyField
|
||||
if hasattr(M3UAccount, "user_agents"):
|
||||
filter_horizontal = ("user_agents",) # Only for ManyToManyField
|
||||
|
||||
|
||||
@admin.register(M3UFilter)
|
||||
class M3UFilterAdmin(admin.ModelAdmin):
|
||||
list_display = ('m3u_account', 'filter_type', 'regex_pattern', 'exclude')
|
||||
list_filter = ('filter_type', 'exclude')
|
||||
search_fields = ('regex_pattern',)
|
||||
ordering = ('m3u_account',)
|
||||
list_display = ("m3u_account", "filter_type", "regex_pattern", "exclude")
|
||||
list_filter = ("filter_type", "exclude")
|
||||
search_fields = ("regex_pattern",)
|
||||
ordering = ("m3u_account",)
|
||||
|
||||
|
||||
@admin.register(ServerGroup)
|
||||
class ServerGroupAdmin(admin.ModelAdmin):
|
||||
list_display = ('name',)
|
||||
search_fields = ('name',)
|
||||
list_display = ("name",)
|
||||
search_fields = ("name",)
|
||||
|
||||
|
||||
@admin.register(M3UAccountProfile)
|
||||
class M3UAccountProfileAdmin(admin.ModelAdmin):
|
||||
list_display = (
|
||||
"name",
|
||||
"m3u_account",
|
||||
"is_default",
|
||||
"is_active",
|
||||
"max_streams",
|
||||
"current_viewers",
|
||||
"account_status_display",
|
||||
"account_expiration_display",
|
||||
"last_refresh_display",
|
||||
)
|
||||
list_filter = ("is_active", "is_default", "m3u_account__account_type")
|
||||
search_fields = ("name", "m3u_account__name")
|
||||
readonly_fields = ("account_info_display",)
|
||||
|
||||
def account_status_display(self, obj):
|
||||
"""Display account status from custom properties"""
|
||||
status = obj.get_account_status()
|
||||
if status:
|
||||
# Create colored status display
|
||||
color_map = {
|
||||
'Active': 'green',
|
||||
'Expired': 'red',
|
||||
'Disabled': 'red',
|
||||
'Banned': 'red',
|
||||
}
|
||||
color = color_map.get(status, 'black')
|
||||
return format_html(
|
||||
'<span style="color: {};">{}</span>',
|
||||
color,
|
||||
status
|
||||
)
|
||||
return "Unknown"
|
||||
account_status_display.short_description = "Account Status"
|
||||
|
||||
def account_expiration_display(self, obj):
|
||||
"""Display account expiration from custom properties"""
|
||||
expiration = obj.get_account_expiration()
|
||||
if expiration:
|
||||
from datetime import datetime
|
||||
if expiration < datetime.now():
|
||||
return format_html(
|
||||
'<span style="color: red;">{}</span>',
|
||||
expiration.strftime('%Y-%m-%d %H:%M')
|
||||
)
|
||||
else:
|
||||
return format_html(
|
||||
'<span style="color: green;">{}</span>',
|
||||
expiration.strftime('%Y-%m-%d %H:%M')
|
||||
)
|
||||
return "Unknown"
|
||||
account_expiration_display.short_description = "Expires"
|
||||
|
||||
def last_refresh_display(self, obj):
|
||||
"""Display last refresh time from custom properties"""
|
||||
last_refresh = obj.get_last_refresh()
|
||||
if last_refresh:
|
||||
return last_refresh.strftime('%Y-%m-%d %H:%M:%S')
|
||||
return "Never"
|
||||
last_refresh_display.short_description = "Last Refresh"
|
||||
|
||||
def account_info_display(self, obj):
|
||||
"""Display formatted account information from custom properties"""
|
||||
if not obj.custom_properties:
|
||||
return "No account information available"
|
||||
|
||||
html_parts = []
|
||||
|
||||
# User Info
|
||||
user_info = obj.custom_properties.get('user_info', {})
|
||||
if user_info:
|
||||
html_parts.append("<h3>User Information:</h3>")
|
||||
html_parts.append("<ul>")
|
||||
for key, value in user_info.items():
|
||||
if key == 'exp_date' and value:
|
||||
try:
|
||||
from datetime import datetime
|
||||
exp_date = datetime.fromtimestamp(float(value))
|
||||
value = exp_date.strftime('%Y-%m-%d %H:%M:%S')
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
|
||||
html_parts.append("</ul>")
|
||||
|
||||
# Server Info
|
||||
server_info = obj.custom_properties.get('server_info', {})
|
||||
if server_info:
|
||||
html_parts.append("<h3>Server Information:</h3>")
|
||||
html_parts.append("<ul>")
|
||||
for key, value in server_info.items():
|
||||
html_parts.append(f"<li><strong>{key}:</strong> {value}</li>")
|
||||
html_parts.append("</ul>")
|
||||
|
||||
# Last Refresh
|
||||
last_refresh = obj.custom_properties.get('last_refresh')
|
||||
if last_refresh:
|
||||
html_parts.append(f"<p><strong>Last Refresh:</strong> {last_refresh}</p>")
|
||||
|
||||
return format_html(''.join(html_parts)) if html_parts else "No account information available"
|
||||
|
||||
account_info_display.short_description = "Account Information"
|
||||
|
|
|
|||
|
|
@ -1,18 +1,44 @@
|
|||
from django.urls import path, include
|
||||
from rest_framework.routers import DefaultRouter
|
||||
from .api_views import M3UAccountViewSet, M3UFilterViewSet, ServerGroupViewSet, RefreshM3UAPIView, RefreshSingleM3UAPIView, UserAgentViewSet, M3UAccountProfileViewSet
|
||||
from .api_views import (
|
||||
M3UAccountViewSet,
|
||||
M3UFilterViewSet,
|
||||
ServerGroupViewSet,
|
||||
RefreshM3UAPIView,
|
||||
RefreshSingleM3UAPIView,
|
||||
RefreshAccountInfoAPIView,
|
||||
UserAgentViewSet,
|
||||
M3UAccountProfileViewSet,
|
||||
)
|
||||
|
||||
app_name = 'm3u'
|
||||
app_name = "m3u"
|
||||
|
||||
router = DefaultRouter()
|
||||
router.register(r'accounts', M3UAccountViewSet, basename='m3u-account')
|
||||
router.register(r'accounts\/(?P<account_id>\d+)\/profiles', M3UAccountProfileViewSet, basename='m3u-account-profiles')
|
||||
router.register(r'filters', M3UFilterViewSet, basename='m3u-filter')
|
||||
router.register(r'server-groups', ServerGroupViewSet, basename='server-group')
|
||||
router.register(r"accounts", M3UAccountViewSet, basename="m3u-account")
|
||||
router.register(
|
||||
r"accounts\/(?P<account_id>\d+)\/profiles",
|
||||
M3UAccountProfileViewSet,
|
||||
basename="m3u-account-profiles",
|
||||
)
|
||||
router.register(
|
||||
r"accounts\/(?P<account_id>\d+)\/filters",
|
||||
M3UFilterViewSet,
|
||||
basename="m3u-filters",
|
||||
)
|
||||
router.register(r"server-groups", ServerGroupViewSet, basename="server-group")
|
||||
|
||||
urlpatterns = [
|
||||
path('refresh/', RefreshM3UAPIView.as_view(), name='m3u_refresh'),
|
||||
path('refresh/<int:account_id>/', RefreshSingleM3UAPIView.as_view(), name='m3u_refresh_single'),
|
||||
path("refresh/", RefreshM3UAPIView.as_view(), name="m3u_refresh"),
|
||||
path(
|
||||
"refresh/<int:account_id>/",
|
||||
RefreshSingleM3UAPIView.as_view(),
|
||||
name="m3u_refresh_single",
|
||||
),
|
||||
path(
|
||||
"refresh-account-info/<int:profile_id>/",
|
||||
RefreshAccountInfoAPIView.as_view(),
|
||||
name="m3u_refresh_account_info",
|
||||
),
|
||||
]
|
||||
|
||||
urlpatterns += router.urls
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
|
|||
from core.models import UserAgent
|
||||
from apps.channels.models import ChannelGroupM3UAccount
|
||||
from core.serializers import UserAgentSerializer
|
||||
from apps.vod.models import M3UVODCategoryRelation
|
||||
|
||||
from .serializers import (
|
||||
M3UAccountSerializer,
|
||||
|
|
@ -29,9 +30,8 @@ from .serializers import (
|
|||
M3UAccountProfileSerializer,
|
||||
)
|
||||
|
||||
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts
|
||||
from django.core.files.storage import default_storage
|
||||
from django.core.files.base import ContentFile
|
||||
from .tasks import refresh_single_m3u_account, refresh_m3u_accounts, refresh_account_info
|
||||
import json
|
||||
|
||||
|
||||
class M3UAccountViewSet(viewsets.ModelViewSet):
|
||||
|
|
@ -78,15 +78,37 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Now call super().create() to create the instance
|
||||
response = super().create(request, *args, **kwargs)
|
||||
|
||||
print(response.data.get("account_type"))
|
||||
if response.data.get("account_type") == M3UAccount.Types.XC:
|
||||
refresh_m3u_groups(response.data.get("id"))
|
||||
account_type = response.data.get("account_type")
|
||||
account_id = response.data.get("id")
|
||||
|
||||
# Notify frontend that a new playlist was created
|
||||
from core.utils import send_websocket_update
|
||||
send_websocket_update('updates', 'update', {
|
||||
'type': 'playlist_created',
|
||||
'playlist_id': account_id
|
||||
})
|
||||
|
||||
if account_type == M3UAccount.Types.XC:
|
||||
refresh_m3u_groups(account_id)
|
||||
|
||||
# Check if VOD is enabled
|
||||
enable_vod = request.data.get("enable_vod", False)
|
||||
if enable_vod:
|
||||
from apps.vod.tasks import refresh_categories
|
||||
|
||||
refresh_categories(account_id)
|
||||
|
||||
# After the instance is created, return the response
|
||||
return response
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
old_vod_enabled = False
|
||||
|
||||
# Check current VOD setting
|
||||
if instance.custom_properties:
|
||||
custom_props = instance.custom_properties or {}
|
||||
old_vod_enabled = custom_props.get("enable_vod", False)
|
||||
|
||||
# Handle file upload first, if any
|
||||
file_path = None
|
||||
|
|
@ -122,6 +144,58 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Now call super().update() to update the instance
|
||||
response = super().update(request, *args, **kwargs)
|
||||
|
||||
# Check if VOD setting changed and trigger refresh if needed
|
||||
new_vod_enabled = request.data.get("enable_vod", old_vod_enabled)
|
||||
|
||||
if (
|
||||
instance.account_type == M3UAccount.Types.XC
|
||||
and not old_vod_enabled
|
||||
and new_vod_enabled
|
||||
):
|
||||
# Create Uncategorized categories immediately so they're available in the UI
|
||||
from apps.vod.models import VODCategory, M3UVODCategoryRelation
|
||||
|
||||
# Create movie Uncategorized category
|
||||
movie_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="movie",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create series Uncategorized category
|
||||
series_category, _ = VODCategory.objects.get_or_create(
|
||||
name="Uncategorized",
|
||||
category_type="series",
|
||||
defaults={}
|
||||
)
|
||||
|
||||
# Create relations for both categories (disabled by default until first refresh)
|
||||
account_custom_props = instance.custom_properties or {}
|
||||
auto_enable_new = account_custom_props.get("auto_enable_new_groups_vod", True)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=movie_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
M3UVODCategoryRelation.objects.get_or_create(
|
||||
category=series_category,
|
||||
m3u_account=instance,
|
||||
defaults={
|
||||
'enabled': auto_enable_new,
|
||||
'custom_properties': {}
|
||||
}
|
||||
)
|
||||
|
||||
# Trigger full VOD refresh
|
||||
from apps.vod.tasks import refresh_vod_content
|
||||
|
||||
refresh_vod_content.delay(instance.id)
|
||||
|
||||
# After the instance is updated, return the response
|
||||
return response
|
||||
|
||||
|
|
@ -143,11 +217,49 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
# Continue with regular partial update
|
||||
return super().partial_update(request, *args, **kwargs)
|
||||
|
||||
@action(detail=True, methods=["post"], url_path="refresh-vod")
|
||||
def refresh_vod(self, request, pk=None):
|
||||
"""Trigger VOD content refresh for XtreamCodes accounts"""
|
||||
account = self.get_object()
|
||||
|
||||
if account.account_type != M3UAccount.Types.XC:
|
||||
return Response(
|
||||
{"error": "VOD refresh is only available for XtreamCodes accounts"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
# Check if VOD is enabled
|
||||
vod_enabled = False
|
||||
if account.custom_properties:
|
||||
custom_props = account.custom_properties or {}
|
||||
vod_enabled = custom_props.get("enable_vod", False)
|
||||
|
||||
if not vod_enabled:
|
||||
return Response(
|
||||
{"error": "VOD is not enabled for this account"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
try:
|
||||
from apps.vod.tasks import refresh_vod_content
|
||||
|
||||
refresh_vod_content.delay(account.id)
|
||||
return Response(
|
||||
{"message": f"VOD refresh initiated for account {account.name}"},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"error": f"Failed to initiate VOD refresh: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
@action(detail=True, methods=["patch"], url_path="group-settings")
|
||||
def update_group_settings(self, request, pk=None):
|
||||
"""Update auto channel sync settings for M3U account groups"""
|
||||
account = self.get_object()
|
||||
group_settings = request.data.get("group_settings", [])
|
||||
category_settings = request.data.get("category_settings", [])
|
||||
|
||||
try:
|
||||
for setting in group_settings:
|
||||
|
|
@ -165,11 +277,22 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
"enabled": enabled,
|
||||
"auto_channel_sync": auto_sync,
|
||||
"auto_sync_channel_start": sync_start,
|
||||
"custom_properties": (
|
||||
custom_properties
|
||||
if isinstance(custom_properties, str)
|
||||
else json.dumps(custom_properties)
|
||||
),
|
||||
"custom_properties": custom_properties,
|
||||
},
|
||||
)
|
||||
|
||||
for setting in category_settings:
|
||||
category_id = setting.get("id")
|
||||
enabled = setting.get("enabled", True)
|
||||
custom_properties = setting.get("custom_properties", {})
|
||||
|
||||
if category_id:
|
||||
M3UVODCategoryRelation.objects.update_or_create(
|
||||
category_id=category_id,
|
||||
m3u_account=account,
|
||||
defaults={
|
||||
"enabled": enabled,
|
||||
"custom_properties": custom_properties,
|
||||
},
|
||||
)
|
||||
|
||||
|
|
@ -183,8 +306,6 @@ class M3UAccountViewSet(viewsets.ModelViewSet):
|
|||
|
||||
|
||||
class M3UFilterViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for M3U filters"""
|
||||
|
||||
queryset = M3UFilter.objects.all()
|
||||
serializer_class = M3UFilterSerializer
|
||||
|
||||
|
|
@ -194,6 +315,23 @@ class M3UFilterViewSet(viewsets.ModelViewSet):
|
|||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def get_queryset(self):
|
||||
m3u_account_id = self.kwargs["account_id"]
|
||||
return M3UFilter.objects.filter(m3u_account_id=m3u_account_id)
|
||||
|
||||
def perform_create(self, serializer):
|
||||
# Get the account ID from the URL
|
||||
account_id = self.kwargs["account_id"]
|
||||
|
||||
# # Get the M3UAccount instance for the account_id
|
||||
# m3u_account = M3UAccount.objects.get(id=account_id)
|
||||
|
||||
# Save the 'm3u_account' in the serializer context
|
||||
serializer.context["m3u_account"] = account_id
|
||||
|
||||
# Perform the actual save
|
||||
serializer.save(m3u_account_id=account_id)
|
||||
|
||||
|
||||
class ServerGroupViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for Server Groups"""
|
||||
|
|
@ -257,6 +395,54 @@ class RefreshSingleM3UAPIView(APIView):
|
|||
)
|
||||
|
||||
|
||||
class RefreshAccountInfoAPIView(APIView):
|
||||
"""Triggers account info refresh for a single M3U account"""
|
||||
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
@swagger_auto_schema(
|
||||
operation_description="Triggers a refresh of account information for a specific M3U profile",
|
||||
responses={202: "Account info refresh initiated", 400: "Profile not found or not XtreamCodes"},
|
||||
)
|
||||
def post(self, request, profile_id, format=None):
|
||||
try:
|
||||
from .models import M3UAccountProfile
|
||||
profile = M3UAccountProfile.objects.get(id=profile_id)
|
||||
account = profile.m3u_account
|
||||
|
||||
if account.account_type != M3UAccount.Types.XC:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Account info refresh is only available for XtreamCodes accounts",
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
refresh_account_info.delay(profile_id)
|
||||
return Response(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Account info refresh initiated for profile {profile.name}.",
|
||||
},
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
)
|
||||
except M3UAccountProfile.DoesNotExist:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Profile not found",
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
|
||||
class UserAgentViewSet(viewsets.ModelViewSet):
|
||||
"""Handles CRUD operations for User Agents"""
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,13 @@ from .models import M3UAccount, M3UFilter
|
|||
import re
|
||||
|
||||
class M3UAccountForm(forms.ModelForm):
|
||||
enable_vod = forms.BooleanField(
|
||||
required=False,
|
||||
initial=False,
|
||||
label="Enable VOD Content",
|
||||
help_text="Parse and import VOD (movies/series) content for XtreamCodes accounts"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = M3UAccount
|
||||
fields = [
|
||||
|
|
@ -13,8 +20,34 @@ class M3UAccountForm(forms.ModelForm):
|
|||
'server_group',
|
||||
'max_streams',
|
||||
'is_active',
|
||||
'enable_vod',
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Set initial value for enable_vod from custom_properties
|
||||
if self.instance and self.instance.custom_properties:
|
||||
custom_props = self.instance.custom_properties or {}
|
||||
self.fields['enable_vod'].initial = custom_props.get('enable_vod', False)
|
||||
|
||||
def save(self, commit=True):
|
||||
instance = super().save(commit=False)
|
||||
|
||||
# Handle enable_vod field
|
||||
enable_vod = self.cleaned_data.get('enable_vod', False)
|
||||
|
||||
# Parse existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update VOD preference
|
||||
custom_props['enable_vod'] = enable_vod
|
||||
instance.custom_properties = custom_props
|
||||
|
||||
if commit:
|
||||
instance.save()
|
||||
return instance
|
||||
|
||||
def clean_uploaded_file(self):
|
||||
uploaded_file = self.cleaned_data.get('uploaded_file')
|
||||
if uploaded_file:
|
||||
|
|
|
|||
18
apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py
Normal file
18
apps/m3u/migrations/0013_alter_m3ufilter_filter_type.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-22 21:16
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0012_alter_m3uaccount_refresh_interval'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='m3ufilter',
|
||||
name='filter_type',
|
||||
field=models.CharField(choices=[('group', 'Group'), ('name', 'Stream Name'), ('url', 'Stream URL')], default='group', help_text='Filter based on either group title or stream name.', max_length=50),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# Generated by Django 5.1.6 on 2025-07-31 17:14
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0013_alter_m3ufilter_filter_type'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='m3ufilter',
|
||||
options={'ordering': ['order']},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='m3ufilter',
|
||||
name='order',
|
||||
field=models.PositiveIntegerField(default=0),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-02 16:06
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0014_alter_m3ufilter_options_m3ufilter_order'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='m3ufilter',
|
||||
options={},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='m3ufilter',
|
||||
name='custom_properties',
|
||||
field=models.TextField(blank=True, null=True),
|
||||
),
|
||||
]
|
||||
18
apps/m3u/migrations/0016_m3uaccount_priority.py
Normal file
18
apps/m3u/migrations/0016_m3uaccount_priority.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-08-20 22:35
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0015_alter_m3ufilter_options_m3ufilter_custom_properties'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='m3uaccount',
|
||||
name='priority',
|
||||
field=models.PositiveIntegerField(default=0, help_text='Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.'),
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-02 15:19
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0016_m3uaccount_priority'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='m3uaccount',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='m3uaccount',
|
||||
name='server_url',
|
||||
field=models.URLField(blank=True, help_text='The base URL of the M3U server (optional if a file is uploaded)', max_length=1000, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='m3ufilter',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
]
|
||||
18
apps/m3u/migrations/0018_add_profile_custom_properties.py
Normal file
18
apps/m3u/migrations/0018_add_profile_custom_properties.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-09 20:57
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('m3u', '0017_alter_m3uaccount_custom_properties_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='m3uaccountprofile',
|
||||
name='custom_properties',
|
||||
field=models.JSONField(blank=True, default=dict, help_text='Custom properties for storing account information from provider (e.g., XC account details, expiration dates)', null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -29,6 +29,7 @@ class M3UAccount(models.Model):
|
|||
max_length=255, unique=True, help_text="Unique name for this M3U account"
|
||||
)
|
||||
server_url = models.URLField(
|
||||
max_length=1000,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="The base URL of the M3U server (optional if a file is uploaded)",
|
||||
|
|
@ -85,7 +86,7 @@ class M3UAccount(models.Model):
|
|||
account_type = models.CharField(choices=Types.choices, default=Types.STADNARD)
|
||||
username = models.CharField(max_length=255, null=True, blank=True)
|
||||
password = models.CharField(max_length=255, null=True, blank=True)
|
||||
custom_properties = models.TextField(null=True, blank=True)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
refresh_interval = models.IntegerField(default=0)
|
||||
refresh_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
|
|
@ -94,6 +95,10 @@ class M3UAccount(models.Model):
|
|||
default=7,
|
||||
help_text="Number of days after which a stream will be removed if not seen in the M3U source.",
|
||||
)
|
||||
priority = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Priority for VOD provider selection (higher numbers = higher priority). Used when multiple providers offer the same content.",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
|
@ -155,9 +160,11 @@ class M3UFilter(models.Model):
|
|||
"""Defines filters for M3U accounts based on stream name or group title."""
|
||||
|
||||
FILTER_TYPE_CHOICES = (
|
||||
("group", "Group Title"),
|
||||
("group", "Group"),
|
||||
("name", "Stream Name"),
|
||||
("url", "Stream URL"),
|
||||
)
|
||||
|
||||
m3u_account = models.ForeignKey(
|
||||
M3UAccount,
|
||||
on_delete=models.CASCADE,
|
||||
|
|
@ -177,6 +184,8 @@ class M3UFilter(models.Model):
|
|||
default=True,
|
||||
help_text="If True, matching items are excluded; if False, only matches are included.",
|
||||
)
|
||||
order = models.PositiveIntegerField(default=0)
|
||||
custom_properties = models.JSONField(default=dict, blank=True, null=True)
|
||||
|
||||
def applies_to(self, stream_name, group_name):
|
||||
target = group_name if self.filter_type == "group" else stream_name
|
||||
|
|
@ -226,9 +235,6 @@ class ServerGroup(models.Model):
|
|||
return self.name
|
||||
|
||||
|
||||
from django.db import models
|
||||
|
||||
|
||||
class M3UAccountProfile(models.Model):
|
||||
"""Represents a profile associated with an M3U Account."""
|
||||
|
||||
|
|
@ -257,6 +263,12 @@ class M3UAccountProfile(models.Model):
|
|||
max_length=255,
|
||||
)
|
||||
current_viewers = models.PositiveIntegerField(default=0)
|
||||
custom_properties = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Custom properties for storing account information from provider (e.g., XC account details, expiration dates)"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
constraints = [
|
||||
|
|
@ -268,6 +280,70 @@ class M3UAccountProfile(models.Model):
|
|||
def __str__(self):
|
||||
return f"{self.name} ({self.m3u_account.name})"
|
||||
|
||||
def get_account_expiration(self):
|
||||
"""Get account expiration date from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
exp_date = user_info.get('exp_date')
|
||||
|
||||
if exp_date:
|
||||
try:
|
||||
from datetime import datetime
|
||||
# XC exp_date is typically a Unix timestamp
|
||||
if isinstance(exp_date, (int, float)):
|
||||
return datetime.fromtimestamp(exp_date)
|
||||
elif isinstance(exp_date, str):
|
||||
# Try to parse as timestamp first, then as ISO date
|
||||
try:
|
||||
return datetime.fromtimestamp(float(exp_date))
|
||||
except ValueError:
|
||||
return datetime.fromisoformat(exp_date)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def get_account_status(self):
|
||||
"""Get account status from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('status')
|
||||
|
||||
def get_max_connections(self):
|
||||
"""Get maximum connections from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('max_connections')
|
||||
|
||||
def get_active_connections(self):
|
||||
"""Get active connections from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
user_info = self.custom_properties.get('user_info', {})
|
||||
return user_info.get('active_cons')
|
||||
|
||||
def get_last_refresh(self):
|
||||
"""Get last refresh timestamp from custom properties if available"""
|
||||
if not self.custom_properties:
|
||||
return None
|
||||
|
||||
last_refresh = self.custom_properties.get('last_refresh')
|
||||
if last_refresh:
|
||||
try:
|
||||
from datetime import datetime
|
||||
return datetime.fromisoformat(last_refresh)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@receiver(models.signals.post_save, sender=M3UAccount)
|
||||
def create_profile_for_m3u_account(sender, instance, created, **kwargs):
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
from rest_framework import serializers
|
||||
from core.utils import validate_flexible_url
|
||||
from rest_framework import serializers, status
|
||||
from rest_framework.response import Response
|
||||
from .models import M3UAccount, M3UFilter, ServerGroup, M3UAccountProfile
|
||||
from core.models import UserAgent
|
||||
from apps.channels.models import ChannelGroup, ChannelGroupM3UAccount
|
||||
from apps.channels.serializers import (
|
||||
ChannelGroupM3UAccountSerializer,
|
||||
ChannelGroupSerializer,
|
||||
)
|
||||
import logging
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -15,14 +16,30 @@ logger = logging.getLogger(__name__)
|
|||
class M3UFilterSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for M3U Filters"""
|
||||
|
||||
channel_groups = ChannelGroupM3UAccountSerializer(source="m3u_account", many=True)
|
||||
|
||||
class Meta:
|
||||
model = M3UFilter
|
||||
fields = ["id", "filter_type", "regex_pattern", "exclude", "channel_groups"]
|
||||
fields = [
|
||||
"id",
|
||||
"filter_type",
|
||||
"regex_pattern",
|
||||
"exclude",
|
||||
"order",
|
||||
"custom_properties",
|
||||
]
|
||||
|
||||
|
||||
class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
||||
account = serializers.SerializerMethodField()
|
||||
|
||||
def get_account(self, obj):
|
||||
"""Include basic account information for frontend use"""
|
||||
return {
|
||||
'id': obj.m3u_account.id,
|
||||
'name': obj.m3u_account.name,
|
||||
'account_type': obj.m3u_account.account_type,
|
||||
'is_xtream_codes': obj.m3u_account.account_type == 'XC'
|
||||
}
|
||||
|
||||
class Meta:
|
||||
model = M3UAccountProfile
|
||||
fields = [
|
||||
|
|
@ -34,8 +51,14 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
"current_viewers",
|
||||
"search_pattern",
|
||||
"replace_pattern",
|
||||
"custom_properties",
|
||||
"account",
|
||||
]
|
||||
read_only_fields = ["id"]
|
||||
read_only_fields = ["id", "account"]
|
||||
extra_kwargs = {
|
||||
'search_pattern': {'required': False, 'allow_blank': True},
|
||||
'replace_pattern': {'required': False, 'allow_blank': True},
|
||||
}
|
||||
|
||||
def create(self, validated_data):
|
||||
m3u_account = self.context.get("m3u_account")
|
||||
|
|
@ -45,9 +68,39 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
|
||||
return super().create(validated_data)
|
||||
|
||||
def validate(self, data):
|
||||
"""Custom validation to handle default profiles"""
|
||||
# For updates to existing instances
|
||||
if self.instance and self.instance.is_default:
|
||||
# For default profiles, search_pattern and replace_pattern are not required
|
||||
# and we don't want to validate them since they shouldn't be changed
|
||||
return data
|
||||
|
||||
# For non-default profiles or new profiles, ensure required fields are present
|
||||
if not data.get('search_pattern'):
|
||||
raise serializers.ValidationError({
|
||||
'search_pattern': ['This field is required for non-default profiles.']
|
||||
})
|
||||
if not data.get('replace_pattern'):
|
||||
raise serializers.ValidationError({
|
||||
'replace_pattern': ['This field is required for non-default profiles.']
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
if instance.is_default:
|
||||
raise serializers.ValidationError("Default profiles cannot be modified.")
|
||||
# For default profiles, only allow updating name and custom_properties (for notes)
|
||||
allowed_fields = {'name', 'custom_properties'}
|
||||
|
||||
# Remove any fields that aren't allowed for default profiles
|
||||
disallowed_fields = set(validated_data.keys()) - allowed_fields
|
||||
if disallowed_fields:
|
||||
raise serializers.ValidationError(
|
||||
f"Default profiles can only modify name and notes. "
|
||||
f"Cannot modify: {', '.join(disallowed_fields)}"
|
||||
)
|
||||
|
||||
return super().update(instance, validated_data)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
|
|
@ -63,7 +116,7 @@ class M3UAccountProfileSerializer(serializers.ModelSerializer):
|
|||
class M3UAccountSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for M3U Account"""
|
||||
|
||||
filters = M3UFilterSerializer(many=True, read_only=True)
|
||||
filters = serializers.SerializerMethodField()
|
||||
# Include user_agent as a mandatory field using its primary key.
|
||||
user_agent = serializers.PrimaryKeyRelatedField(
|
||||
queryset=UserAgent.objects.all(),
|
||||
|
|
@ -76,6 +129,16 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
channel_groups = ChannelGroupM3UAccountSerializer(
|
||||
source="channel_group", many=True, required=False
|
||||
)
|
||||
server_url = serializers.CharField(
|
||||
required=False,
|
||||
allow_blank=True,
|
||||
allow_null=True,
|
||||
validators=[validate_flexible_url],
|
||||
)
|
||||
enable_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_live = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_vod = serializers.BooleanField(required=False, write_only=True)
|
||||
auto_enable_new_groups_series = serializers.BooleanField(required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = M3UAccount
|
||||
|
|
@ -100,8 +163,13 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
"username",
|
||||
"password",
|
||||
"stale_stream_days",
|
||||
"priority",
|
||||
"status",
|
||||
"last_message",
|
||||
"enable_vod",
|
||||
"auto_enable_new_groups_live",
|
||||
"auto_enable_new_groups_vod",
|
||||
"auto_enable_new_groups_series",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"password": {
|
||||
|
|
@ -110,7 +178,40 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
},
|
||||
}
|
||||
|
||||
def to_representation(self, instance):
|
||||
data = super().to_representation(instance)
|
||||
|
||||
# Parse custom_properties to get VOD preference and auto_enable_new_groups settings
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
data["enable_vod"] = custom_props.get("enable_vod", False)
|
||||
data["auto_enable_new_groups_live"] = custom_props.get("auto_enable_new_groups_live", True)
|
||||
data["auto_enable_new_groups_vod"] = custom_props.get("auto_enable_new_groups_vod", True)
|
||||
data["auto_enable_new_groups_series"] = custom_props.get("auto_enable_new_groups_series", True)
|
||||
return data
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings
|
||||
enable_vod = validated_data.pop("enable_vod", None)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", None)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", None)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", None)
|
||||
|
||||
# Get existing custom_properties
|
||||
custom_props = instance.custom_properties or {}
|
||||
|
||||
# Update preferences
|
||||
if enable_vod is not None:
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
if auto_enable_new_groups_live is not None:
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
if auto_enable_new_groups_vod is not None:
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
if auto_enable_new_groups_series is not None:
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
# Pop out channel group memberships so we can handle them manually
|
||||
channel_group_data = validated_data.pop("channel_group", [])
|
||||
|
||||
|
|
@ -142,6 +243,29 @@ class M3UAccountSerializer(serializers.ModelSerializer):
|
|||
|
||||
return instance
|
||||
|
||||
def create(self, validated_data):
|
||||
# Handle enable_vod preference and auto_enable_new_groups settings during creation
|
||||
enable_vod = validated_data.pop("enable_vod", False)
|
||||
auto_enable_new_groups_live = validated_data.pop("auto_enable_new_groups_live", True)
|
||||
auto_enable_new_groups_vod = validated_data.pop("auto_enable_new_groups_vod", True)
|
||||
auto_enable_new_groups_series = validated_data.pop("auto_enable_new_groups_series", True)
|
||||
|
||||
# Parse existing custom_properties or create new
|
||||
custom_props = validated_data.get("custom_properties", {})
|
||||
|
||||
# Set preferences (default to True for auto_enable_new_groups)
|
||||
custom_props["enable_vod"] = enable_vod
|
||||
custom_props["auto_enable_new_groups_live"] = auto_enable_new_groups_live
|
||||
custom_props["auto_enable_new_groups_vod"] = auto_enable_new_groups_vod
|
||||
custom_props["auto_enable_new_groups_series"] = auto_enable_new_groups_series
|
||||
validated_data["custom_properties"] = custom_props
|
||||
|
||||
return super().create(validated_data)
|
||||
|
||||
def get_filters(self, obj):
|
||||
filters = obj.filters.order_by("order")
|
||||
return M3UFilterSerializer(filters, many=True).data
|
||||
|
||||
|
||||
class ServerGroupSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for Server Group"""
|
||||
|
|
|
|||
2508
apps/m3u/tasks.py
2508
apps/m3u/tasks.py
File diff suppressed because it is too large
Load diff
|
|
@ -1,9 +1,40 @@
|
|||
# apps/m3u/utils.py
|
||||
import threading
|
||||
import logging
|
||||
from django.db import models
|
||||
|
||||
lock = threading.Lock()
|
||||
# Dictionary to track usage: {m3u_account_id: current_usage}
|
||||
active_streams_map = {}
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def normalize_stream_url(url):
|
||||
"""
|
||||
Normalize stream URLs for compatibility with FFmpeg.
|
||||
|
||||
Handles VLC-specific syntax like udp://@239.0.0.1:1234 by removing the @ symbol.
|
||||
FFmpeg doesn't recognize the @ prefix for multicast addresses.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to normalize
|
||||
|
||||
Returns:
|
||||
str: The normalized URL
|
||||
"""
|
||||
if not url:
|
||||
return url
|
||||
|
||||
# Handle VLC-style UDP multicast URLs: udp://@239.0.0.1:1234 -> udp://239.0.0.1:1234
|
||||
# The @ symbol in VLC means "listen on all interfaces" but FFmpeg doesn't use this syntax
|
||||
if url.startswith('udp://@'):
|
||||
normalized = url.replace('udp://@', 'udp://', 1)
|
||||
logger.debug(f"Normalized VLC-style UDP URL: {url} -> {normalized}")
|
||||
return normalized
|
||||
|
||||
# Could add other normalizations here in the future (rtp://@, etc.)
|
||||
return url
|
||||
|
||||
|
||||
def increment_stream_count(account):
|
||||
with lock:
|
||||
|
|
@ -24,3 +55,64 @@ def decrement_stream_count(account):
|
|||
active_streams_map[account.id] = current_usage
|
||||
account.active_streams = current_usage
|
||||
account.save(update_fields=['active_streams'])
|
||||
|
||||
|
||||
def calculate_tuner_count(minimum=1, unlimited_default=10):
|
||||
"""
|
||||
Calculate tuner/connection count from active M3U profiles and custom streams.
|
||||
This is the centralized function used by both HDHR and XtreamCodes APIs.
|
||||
|
||||
Args:
|
||||
minimum (int): Minimum number to return (default: 1)
|
||||
unlimited_default (int): Default value when unlimited profiles exist (default: 10)
|
||||
|
||||
Returns:
|
||||
int: Calculated tuner/connection count
|
||||
"""
|
||||
try:
|
||||
from apps.m3u.models import M3UAccountProfile
|
||||
from apps.channels.models import Stream
|
||||
|
||||
# Calculate tuner count from active profiles from active M3U accounts (excluding default "custom Default" profile)
|
||||
profiles = M3UAccountProfile.objects.filter(
|
||||
is_active=True,
|
||||
m3u_account__is_active=True, # Only include profiles from enabled M3U accounts
|
||||
).exclude(id=1)
|
||||
|
||||
# 1. Check if any profile has unlimited streams (max_streams=0)
|
||||
has_unlimited = profiles.filter(max_streams=0).exists()
|
||||
|
||||
# 2. Calculate tuner count from limited profiles
|
||||
limited_tuners = 0
|
||||
if not has_unlimited:
|
||||
limited_tuners = (
|
||||
profiles.filter(max_streams__gt=0)
|
||||
.aggregate(total=models.Sum("max_streams"))
|
||||
.get("total", 0)
|
||||
or 0
|
||||
)
|
||||
|
||||
# 3. Add custom stream count to tuner count
|
||||
custom_stream_count = Stream.objects.filter(is_custom=True).count()
|
||||
logger.debug(f"Found {custom_stream_count} custom streams")
|
||||
|
||||
# 4. Calculate final tuner count
|
||||
if has_unlimited:
|
||||
# If there are unlimited profiles, start with unlimited_default plus custom streams
|
||||
tuner_count = unlimited_default + custom_stream_count
|
||||
else:
|
||||
# Otherwise use the limited profile sum plus custom streams
|
||||
tuner_count = limited_tuners + custom_stream_count
|
||||
|
||||
# 5. Ensure minimum number
|
||||
tuner_count = max(minimum, tuner_count)
|
||||
|
||||
logger.debug(
|
||||
f"Calculated tuner count: {tuner_count} (limited profiles: {limited_tuners}, custom streams: {custom_stream_count}, unlimited: {has_unlimited})"
|
||||
)
|
||||
|
||||
return tuner_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating tuner count: {e}")
|
||||
return minimum # Fallback to minimum value
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from django.views import View
|
|||
from django.utils.decorators import method_decorator
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.http import JsonResponse
|
||||
from apps.m3u.models import M3UAccount
|
||||
import json
|
||||
|
||||
|
|
|
|||
|
|
@ -14,3 +14,26 @@ class OutputM3UTest(TestCase):
|
|||
self.assertEqual(response.status_code, 200)
|
||||
content = response.content.decode()
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_empty_body(self):
|
||||
"""
|
||||
Test that a POST request with an empty body returns 200 OK.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data=None, content_type='application/x-www-form-urlencoded')
|
||||
content = response.content.decode()
|
||||
|
||||
self.assertEqual(response.status_code, 200, "POST with empty body should return 200 OK")
|
||||
self.assertIn("#EXTM3U", content)
|
||||
|
||||
def test_generate_m3u_response_post_with_body(self):
|
||||
"""
|
||||
Test that a POST request with a non-empty body returns 403 Forbidden.
|
||||
"""
|
||||
url = reverse('output:generate_m3u')
|
||||
|
||||
response = self.client.post(url, data={'evilstring': 'muhahaha'})
|
||||
|
||||
self.assertEqual(response.status_code, 403, "POST with body should return 403 Forbidden")
|
||||
self.assertIn("POST requests with body are not allowed, body is:", response.content.decode())
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from django.urls import path, re_path, include
|
||||
from .views import m3u_endpoint, epg_endpoint, xc_get
|
||||
from .views import m3u_endpoint, epg_endpoint, xc_get, xc_movie_stream, xc_series_stream
|
||||
from core.views import stream_view
|
||||
|
||||
app_name = "output"
|
||||
|
|
|
|||
2406
apps/output/views.py
2406
apps/output/views.py
File diff suppressed because it is too large
Load diff
2
apps/plugins/__init__.py
Normal file
2
apps/plugins/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
default_app_config = "apps.plugins.apps.PluginsConfig"
|
||||
|
||||
22
apps/plugins/api_urls.py
Normal file
22
apps/plugins/api_urls.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
from django.urls import path
|
||||
from .api_views import (
|
||||
PluginsListAPIView,
|
||||
PluginReloadAPIView,
|
||||
PluginSettingsAPIView,
|
||||
PluginRunAPIView,
|
||||
PluginEnabledAPIView,
|
||||
PluginImportAPIView,
|
||||
PluginDeleteAPIView,
|
||||
)
|
||||
|
||||
app_name = "plugins"
|
||||
|
||||
urlpatterns = [
|
||||
path("plugins/", PluginsListAPIView.as_view(), name="list"),
|
||||
path("plugins/reload/", PluginReloadAPIView.as_view(), name="reload"),
|
||||
path("plugins/import/", PluginImportAPIView.as_view(), name="import"),
|
||||
path("plugins/<str:key>/delete/", PluginDeleteAPIView.as_view(), name="delete"),
|
||||
path("plugins/<str:key>/settings/", PluginSettingsAPIView.as_view(), name="settings"),
|
||||
path("plugins/<str:key>/run/", PluginRunAPIView.as_view(), name="run"),
|
||||
path("plugins/<str:key>/enabled/", PluginEnabledAPIView.as_view(), name="enabled"),
|
||||
]
|
||||
306
apps/plugins/api_views.py
Normal file
306
apps/plugins/api_views.py
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
import logging
|
||||
from rest_framework.views import APIView
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import api_view
|
||||
from django.conf import settings
|
||||
from django.core.files.uploadedfile import UploadedFile
|
||||
import io
|
||||
import os
|
||||
import zipfile
|
||||
import shutil
|
||||
import tempfile
|
||||
from apps.accounts.permissions import (
|
||||
Authenticated,
|
||||
permission_classes_by_method,
|
||||
)
|
||||
|
||||
from .loader import PluginManager
|
||||
from .models import PluginConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginsListAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def get(self, request):
|
||||
pm = PluginManager.get()
|
||||
# Ensure registry is up-to-date on each request
|
||||
pm.discover_plugins()
|
||||
return Response({"plugins": pm.list_plugins()})
|
||||
|
||||
|
||||
class PluginReloadAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request):
|
||||
pm = PluginManager.get()
|
||||
pm.discover_plugins()
|
||||
return Response({"success": True, "count": len(pm._registry)})
|
||||
|
||||
|
||||
class PluginImportAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request):
|
||||
file: UploadedFile = request.FILES.get("file")
|
||||
if not file:
|
||||
return Response({"success": False, "error": "Missing 'file' upload"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
pm = PluginManager.get()
|
||||
plugins_dir = pm.plugins_dir
|
||||
|
||||
try:
|
||||
zf = zipfile.ZipFile(file)
|
||||
except zipfile.BadZipFile:
|
||||
return Response({"success": False, "error": "Invalid zip file"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Extract to a temporary directory first to avoid server reload thrash
|
||||
tmp_root = tempfile.mkdtemp(prefix="plugin_import_")
|
||||
try:
|
||||
file_members = [m for m in zf.infolist() if not m.is_dir()]
|
||||
if not file_members:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Archive is empty"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
for member in file_members:
|
||||
name = member.filename
|
||||
if not name or name.endswith("/"):
|
||||
continue
|
||||
# Normalize and prevent path traversal
|
||||
norm = os.path.normpath(name)
|
||||
if norm.startswith("..") or os.path.isabs(norm):
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Unsafe path in archive"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
dest_path = os.path.join(tmp_root, norm)
|
||||
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
||||
with zf.open(member, 'r') as src, open(dest_path, 'wb') as dst:
|
||||
shutil.copyfileobj(src, dst)
|
||||
|
||||
# Find candidate directory containing plugin.py or __init__.py
|
||||
candidates = []
|
||||
for dirpath, dirnames, filenames in os.walk(tmp_root):
|
||||
has_pluginpy = "plugin.py" in filenames
|
||||
has_init = "__init__.py" in filenames
|
||||
if has_pluginpy or has_init:
|
||||
depth = len(os.path.relpath(dirpath, tmp_root).split(os.sep))
|
||||
candidates.append((0 if has_pluginpy else 1, depth, dirpath))
|
||||
if not candidates:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": "Invalid plugin: missing plugin.py or package __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
candidates.sort()
|
||||
chosen = candidates[0][2]
|
||||
# Determine plugin key: prefer chosen folder name; if chosen is tmp_root, use zip base name
|
||||
base_name = os.path.splitext(getattr(file, "name", "plugin"))[0]
|
||||
plugin_key = os.path.basename(chosen.rstrip(os.sep))
|
||||
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
|
||||
plugin_key = base_name
|
||||
plugin_key = plugin_key.replace(" ", "_").lower()
|
||||
|
||||
final_dir = os.path.join(plugins_dir, plugin_key)
|
||||
if os.path.exists(final_dir):
|
||||
# If final dir exists but contains a valid plugin, refuse; otherwise clear it
|
||||
if os.path.exists(os.path.join(final_dir, "plugin.py")) or os.path.exists(os.path.join(final_dir, "__init__.py")):
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
return Response({"success": False, "error": f"Plugin '{plugin_key}' already exists"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
shutil.rmtree(final_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Move chosen directory into final location
|
||||
if chosen.rstrip(os.sep) == tmp_root.rstrip(os.sep):
|
||||
# Move all contents into final_dir
|
||||
os.makedirs(final_dir, exist_ok=True)
|
||||
for item in os.listdir(tmp_root):
|
||||
shutil.move(os.path.join(tmp_root, item), os.path.join(final_dir, item))
|
||||
else:
|
||||
shutil.move(chosen, final_dir)
|
||||
# Cleanup temp
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
target_dir = final_dir
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(tmp_root, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Reload discovery and validate plugin entry
|
||||
pm.discover_plugins()
|
||||
plugin = pm._registry.get(plugin_key)
|
||||
if not plugin:
|
||||
# Cleanup the copied folder to avoid leaving invalid plugin behind
|
||||
try:
|
||||
shutil.rmtree(target_dir, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
return Response({"success": False, "error": "Invalid plugin: missing Plugin class in plugin.py or __init__.py"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Extra validation: ensure Plugin.run exists
|
||||
instance = getattr(plugin, "instance", None)
|
||||
run_method = getattr(instance, "run", None)
|
||||
if not callable(run_method):
|
||||
try:
|
||||
shutil.rmtree(target_dir, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
return Response({"success": False, "error": "Invalid plugin: Plugin class must define a callable run(action, params, context)"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Find DB config to return enabled/ever_enabled
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=plugin_key)
|
||||
enabled = cfg.enabled
|
||||
ever_enabled = getattr(cfg, "ever_enabled", False)
|
||||
except PluginConfig.DoesNotExist:
|
||||
enabled = False
|
||||
ever_enabled = False
|
||||
|
||||
return Response({
|
||||
"success": True,
|
||||
"plugin": {
|
||||
"key": plugin.key,
|
||||
"name": plugin.name,
|
||||
"version": plugin.version,
|
||||
"description": plugin.description,
|
||||
"enabled": enabled,
|
||||
"ever_enabled": ever_enabled,
|
||||
"fields": plugin.fields or [],
|
||||
"actions": plugin.actions or [],
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
class PluginSettingsAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
data = request.data or {}
|
||||
settings = data.get("settings", {})
|
||||
try:
|
||||
updated = pm.update_settings(key, settings)
|
||||
return Response({"success": True, "settings": updated})
|
||||
except Exception as e:
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class PluginRunAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
action = request.data.get("action")
|
||||
params = request.data.get("params", {})
|
||||
if not action:
|
||||
return Response({"success": False, "error": "Missing 'action'"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Respect plugin enabled flag
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
if not cfg.enabled:
|
||||
return Response({"success": False, "error": "Plugin is disabled"}, status=status.HTTP_403_FORBIDDEN)
|
||||
except PluginConfig.DoesNotExist:
|
||||
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
try:
|
||||
result = pm.run_action(key, action, params)
|
||||
return Response({"success": True, "result": result})
|
||||
except PermissionError as e:
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_403_FORBIDDEN)
|
||||
except Exception as e:
|
||||
logger.exception("Plugin action failed")
|
||||
return Response({"success": False, "error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class PluginEnabledAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def post(self, request, key):
|
||||
enabled = request.data.get("enabled")
|
||||
if enabled is None:
|
||||
return Response({"success": False, "error": "Missing 'enabled' boolean"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
cfg.enabled = bool(enabled)
|
||||
# Mark that this plugin has been enabled at least once
|
||||
if cfg.enabled and not cfg.ever_enabled:
|
||||
cfg.ever_enabled = True
|
||||
cfg.save(update_fields=["enabled", "ever_enabled", "updated_at"])
|
||||
return Response({"success": True, "enabled": cfg.enabled, "ever_enabled": cfg.ever_enabled})
|
||||
except PluginConfig.DoesNotExist:
|
||||
return Response({"success": False, "error": "Plugin not found"}, status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
|
||||
class PluginDeleteAPIView(APIView):
|
||||
def get_permissions(self):
|
||||
try:
|
||||
return [
|
||||
perm() for perm in permission_classes_by_method[self.request.method]
|
||||
]
|
||||
except KeyError:
|
||||
return [Authenticated()]
|
||||
|
||||
def delete(self, request, key):
|
||||
pm = PluginManager.get()
|
||||
plugins_dir = pm.plugins_dir
|
||||
target_dir = os.path.join(plugins_dir, key)
|
||||
# Safety: ensure path inside plugins_dir
|
||||
abs_plugins = os.path.abspath(plugins_dir) + os.sep
|
||||
abs_target = os.path.abspath(target_dir)
|
||||
if not abs_target.startswith(abs_plugins):
|
||||
return Response({"success": False, "error": "Invalid plugin path"}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Remove files
|
||||
if os.path.isdir(target_dir):
|
||||
try:
|
||||
shutil.rmtree(target_dir)
|
||||
except Exception as e:
|
||||
return Response({"success": False, "error": f"Failed to delete plugin files: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
# Remove DB record
|
||||
try:
|
||||
PluginConfig.objects.filter(key=key).delete()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Reload registry
|
||||
pm.discover_plugins()
|
||||
return Response({"success": True})
|
||||
54
apps/plugins/apps.py
Normal file
54
apps/plugins/apps.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
from django.apps import AppConfig
|
||||
import os
|
||||
import sys
|
||||
from django.db.models.signals import post_migrate
|
||||
|
||||
|
||||
class PluginsConfig(AppConfig):
|
||||
name = "apps.plugins"
|
||||
verbose_name = "Plugins"
|
||||
|
||||
def ready(self):
|
||||
"""Wire up plugin discovery without hitting the DB during app init.
|
||||
|
||||
- Skip during common management commands that don't need discovery.
|
||||
- Register post_migrate handler to sync plugin registry to DB after migrations.
|
||||
- Do an in-memory discovery (no DB) so registry is available early.
|
||||
"""
|
||||
try:
|
||||
# Allow explicit opt-out via env var
|
||||
if os.environ.get("DISPATCHARR_SKIP_PLUGIN_AUTODISCOVERY", "").lower() in ("1", "true", "yes"):
|
||||
return
|
||||
|
||||
argv = sys.argv[1:] if len(sys.argv) > 1 else []
|
||||
mgmt_cmds_to_skip = {
|
||||
# Skip immediate discovery for these commands
|
||||
"makemigrations", "collectstatic", "check", "test", "shell", "showmigrations",
|
||||
}
|
||||
if argv and argv[0] in mgmt_cmds_to_skip:
|
||||
return
|
||||
|
||||
# Run discovery with DB sync after the plugins app has been migrated
|
||||
def _post_migrate_discover(sender=None, app_config=None, **kwargs):
|
||||
try:
|
||||
if app_config and getattr(app_config, 'label', None) != 'plugins':
|
||||
return
|
||||
from .loader import PluginManager
|
||||
PluginManager.get().discover_plugins(sync_db=True)
|
||||
except Exception:
|
||||
import logging
|
||||
logging.getLogger(__name__).exception("Plugin discovery failed in post_migrate")
|
||||
|
||||
post_migrate.connect(
|
||||
_post_migrate_discover,
|
||||
dispatch_uid="apps.plugins.post_migrate_discover",
|
||||
)
|
||||
|
||||
# Perform non-DB discovery now to populate in-memory registry.
|
||||
from .loader import PluginManager
|
||||
PluginManager.get().discover_plugins(sync_db=False)
|
||||
except Exception:
|
||||
# Avoid breaking startup due to plugin errors
|
||||
import logging
|
||||
|
||||
logging.getLogger(__name__).exception("Plugin discovery wiring failed during app ready")
|
||||
254
apps/plugins/loader.py
Normal file
254
apps/plugins/loader.py
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from django.db import transaction
|
||||
|
||||
from .models import PluginConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoadedPlugin:
|
||||
key: str
|
||||
name: str
|
||||
version: str = ""
|
||||
description: str = ""
|
||||
module: Any = None
|
||||
instance: Any = None
|
||||
fields: List[Dict[str, Any]] = field(default_factory=list)
|
||||
actions: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
|
||||
class PluginManager:
|
||||
"""Singleton manager that discovers and runs plugins from /data/plugins."""
|
||||
|
||||
_instance: Optional["PluginManager"] = None
|
||||
|
||||
@classmethod
|
||||
def get(cls) -> "PluginManager":
|
||||
if not cls._instance:
|
||||
cls._instance = PluginManager()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.plugins_dir = os.environ.get("DISPATCHARR_PLUGINS_DIR", "/data/plugins")
|
||||
self._registry: Dict[str, LoadedPlugin] = {}
|
||||
|
||||
# Ensure plugins directory exists
|
||||
os.makedirs(self.plugins_dir, exist_ok=True)
|
||||
if self.plugins_dir not in sys.path:
|
||||
sys.path.append(self.plugins_dir)
|
||||
|
||||
def discover_plugins(self, *, sync_db: bool = True) -> Dict[str, LoadedPlugin]:
|
||||
if sync_db:
|
||||
logger.info(f"Discovering plugins in {self.plugins_dir}")
|
||||
else:
|
||||
logger.debug(f"Discovering plugins (no DB sync) in {self.plugins_dir}")
|
||||
self._registry.clear()
|
||||
|
||||
try:
|
||||
for entry in sorted(os.listdir(self.plugins_dir)):
|
||||
path = os.path.join(self.plugins_dir, entry)
|
||||
if not os.path.isdir(path):
|
||||
continue
|
||||
|
||||
plugin_key = entry.replace(" ", "_").lower()
|
||||
|
||||
try:
|
||||
self._load_plugin(plugin_key, path)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to load plugin '{plugin_key}' from {path}")
|
||||
|
||||
logger.info(f"Discovered {len(self._registry)} plugin(s)")
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Plugins directory not found: {self.plugins_dir}")
|
||||
|
||||
# Sync DB records (optional)
|
||||
if sync_db:
|
||||
try:
|
||||
self._sync_db_with_registry()
|
||||
except Exception:
|
||||
# Defer sync if database is not ready (e.g., first startup before migrate)
|
||||
logger.exception("Deferring plugin DB sync; database not ready yet")
|
||||
return self._registry
|
||||
|
||||
def _load_plugin(self, key: str, path: str):
|
||||
# Plugin can be a package and/or contain plugin.py. Prefer plugin.py when present.
|
||||
has_pkg = os.path.exists(os.path.join(path, "__init__.py"))
|
||||
has_pluginpy = os.path.exists(os.path.join(path, "plugin.py"))
|
||||
if not (has_pkg or has_pluginpy):
|
||||
logger.debug(f"Skipping {path}: no plugin.py or package")
|
||||
return
|
||||
|
||||
candidate_modules = []
|
||||
if has_pluginpy:
|
||||
candidate_modules.append(f"{key}.plugin")
|
||||
if has_pkg:
|
||||
candidate_modules.append(key)
|
||||
|
||||
module = None
|
||||
plugin_cls = None
|
||||
last_error = None
|
||||
for module_name in candidate_modules:
|
||||
try:
|
||||
logger.debug(f"Importing plugin module {module_name}")
|
||||
module = importlib.import_module(module_name)
|
||||
plugin_cls = getattr(module, "Plugin", None)
|
||||
if plugin_cls is not None:
|
||||
break
|
||||
else:
|
||||
logger.warning(f"Module {module_name} has no Plugin class")
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
logger.exception(f"Error importing module {module_name}")
|
||||
|
||||
if plugin_cls is None:
|
||||
if last_error:
|
||||
raise last_error
|
||||
else:
|
||||
logger.warning(f"No Plugin class found for {key}; skipping")
|
||||
return
|
||||
|
||||
instance = plugin_cls()
|
||||
|
||||
name = getattr(instance, "name", key)
|
||||
version = getattr(instance, "version", "")
|
||||
description = getattr(instance, "description", "")
|
||||
fields = getattr(instance, "fields", [])
|
||||
actions = getattr(instance, "actions", [])
|
||||
|
||||
self._registry[key] = LoadedPlugin(
|
||||
key=key,
|
||||
name=name,
|
||||
version=version,
|
||||
description=description,
|
||||
module=module,
|
||||
instance=instance,
|
||||
fields=fields,
|
||||
actions=actions,
|
||||
)
|
||||
|
||||
def _sync_db_with_registry(self):
|
||||
with transaction.atomic():
|
||||
for key, lp in self._registry.items():
|
||||
obj, _ = PluginConfig.objects.get_or_create(
|
||||
key=key,
|
||||
defaults={
|
||||
"name": lp.name,
|
||||
"version": lp.version,
|
||||
"description": lp.description,
|
||||
"settings": {},
|
||||
},
|
||||
)
|
||||
# Update meta if changed
|
||||
changed = False
|
||||
if obj.name != lp.name:
|
||||
obj.name = lp.name
|
||||
changed = True
|
||||
if obj.version != lp.version:
|
||||
obj.version = lp.version
|
||||
changed = True
|
||||
if obj.description != lp.description:
|
||||
obj.description = lp.description
|
||||
changed = True
|
||||
if changed:
|
||||
obj.save()
|
||||
|
||||
def list_plugins(self) -> List[Dict[str, Any]]:
|
||||
from .models import PluginConfig
|
||||
|
||||
plugins: List[Dict[str, Any]] = []
|
||||
try:
|
||||
configs = {c.key: c for c in PluginConfig.objects.all()}
|
||||
except Exception as e:
|
||||
# Database might not be migrated yet; fall back to registry only
|
||||
logger.warning("PluginConfig table unavailable; listing registry only: %s", e)
|
||||
configs = {}
|
||||
|
||||
# First, include all discovered plugins
|
||||
for key, lp in self._registry.items():
|
||||
conf = configs.get(key)
|
||||
plugins.append(
|
||||
{
|
||||
"key": key,
|
||||
"name": lp.name,
|
||||
"version": lp.version,
|
||||
"description": lp.description,
|
||||
"enabled": conf.enabled if conf else False,
|
||||
"ever_enabled": getattr(conf, "ever_enabled", False) if conf else False,
|
||||
"fields": lp.fields or [],
|
||||
"settings": (conf.settings if conf else {}),
|
||||
"actions": lp.actions or [],
|
||||
"missing": False,
|
||||
}
|
||||
)
|
||||
|
||||
# Then, include any DB-only configs (files missing or failed to load)
|
||||
discovered_keys = set(self._registry.keys())
|
||||
for key, conf in configs.items():
|
||||
if key in discovered_keys:
|
||||
continue
|
||||
plugins.append(
|
||||
{
|
||||
"key": key,
|
||||
"name": conf.name,
|
||||
"version": conf.version,
|
||||
"description": conf.description,
|
||||
"enabled": conf.enabled,
|
||||
"ever_enabled": getattr(conf, "ever_enabled", False),
|
||||
"fields": [],
|
||||
"settings": conf.settings or {},
|
||||
"actions": [],
|
||||
"missing": True,
|
||||
}
|
||||
)
|
||||
|
||||
return plugins
|
||||
|
||||
def get_plugin(self, key: str) -> Optional[LoadedPlugin]:
|
||||
return self._registry.get(key)
|
||||
|
||||
def update_settings(self, key: str, settings: Dict[str, Any]) -> Dict[str, Any]:
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
cfg.settings = settings or {}
|
||||
cfg.save(update_fields=["settings", "updated_at"])
|
||||
return cfg.settings
|
||||
|
||||
def run_action(self, key: str, action_id: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
lp = self.get_plugin(key)
|
||||
if not lp or not lp.instance:
|
||||
raise ValueError(f"Plugin '{key}' not found")
|
||||
|
||||
cfg = PluginConfig.objects.get(key=key)
|
||||
if not cfg.enabled:
|
||||
raise PermissionError(f"Plugin '{key}' is disabled")
|
||||
params = params or {}
|
||||
|
||||
# Provide a context object to the plugin
|
||||
context = {
|
||||
"settings": cfg.settings or {},
|
||||
"logger": logger,
|
||||
"actions": {a.get("id"): a for a in (lp.actions or [])},
|
||||
}
|
||||
|
||||
# Run either via Celery if plugin provides a delayed method, or inline
|
||||
run_method = getattr(lp.instance, "run", None)
|
||||
if not callable(run_method):
|
||||
raise ValueError(f"Plugin '{key}' has no runnable 'run' method")
|
||||
|
||||
try:
|
||||
result = run_method(action_id, params, context)
|
||||
except Exception:
|
||||
logger.exception(f"Plugin '{key}' action '{action_id}' failed")
|
||||
raise
|
||||
|
||||
# Normalize return
|
||||
if isinstance(result, dict):
|
||||
return result
|
||||
return {"status": "ok", "result": result}
|
||||
29
apps/plugins/migrations/0001_initial.py
Normal file
29
apps/plugins/migrations/0001_initial.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Generated by Django 5.2.4 on 2025-09-13 13:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='PluginConfig',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('key', models.CharField(max_length=128, unique=True)),
|
||||
('name', models.CharField(max_length=255)),
|
||||
('version', models.CharField(blank=True, default='', max_length=64)),
|
||||
('description', models.TextField(blank=True, default='')),
|
||||
('enabled', models.BooleanField(default=False)),
|
||||
('ever_enabled', models.BooleanField(default=False)),
|
||||
('settings', models.JSONField(blank=True, default=dict)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
],
|
||||
),
|
||||
]
|
||||
1
apps/plugins/migrations/__init__.py
Normal file
1
apps/plugins/migrations/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
# This file marks the migrations package for the plugins app.
|
||||
19
apps/plugins/models.py
Normal file
19
apps/plugins/models.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
from django.db import models
|
||||
|
||||
|
||||
class PluginConfig(models.Model):
|
||||
"""Stores discovered plugins and their persisted settings."""
|
||||
|
||||
key = models.CharField(max_length=128, unique=True)
|
||||
name = models.CharField(max_length=255)
|
||||
version = models.CharField(max_length=64, blank=True, default="")
|
||||
description = models.TextField(blank=True, default="")
|
||||
enabled = models.BooleanField(default=False)
|
||||
# Tracks whether this plugin has ever been enabled at least once
|
||||
ever_enabled = models.BooleanField(default=False)
|
||||
settings = models.JSONField(default=dict, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.name} ({self.key})"
|
||||
28
apps/plugins/serializers.py
Normal file
28
apps/plugins/serializers.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
from rest_framework import serializers
|
||||
|
||||
|
||||
class PluginActionSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
description = serializers.CharField(required=False, allow_blank=True)
|
||||
|
||||
|
||||
class PluginFieldSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
type = serializers.ChoiceField(choices=["string", "number", "boolean", "select"]) # simple types
|
||||
default = serializers.JSONField(required=False)
|
||||
help_text = serializers.CharField(required=False, allow_blank=True)
|
||||
options = serializers.ListField(child=serializers.DictField(), required=False)
|
||||
|
||||
|
||||
class PluginSerializer(serializers.Serializer):
|
||||
key = serializers.CharField()
|
||||
name = serializers.CharField()
|
||||
version = serializers.CharField(allow_blank=True)
|
||||
description = serializers.CharField(allow_blank=True)
|
||||
enabled = serializers.BooleanField()
|
||||
fields = PluginFieldSerializer(many=True)
|
||||
settings = serializers.JSONField()
|
||||
actions = PluginActionSerializer(many=True)
|
||||
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
"""Shared configuration between proxy types"""
|
||||
import time
|
||||
from django.db import connection
|
||||
|
||||
class BaseConfig:
|
||||
DEFAULT_USER_AGENT = 'VLC/3.0.20 LibVLC/3.0.20' # Will only be used if connection to settings fail
|
||||
|
|
@ -12,13 +14,29 @@ class BaseConfig:
|
|||
BUFFERING_TIMEOUT = 15 # Seconds to wait for buffering before switching streams
|
||||
BUFFER_SPEED = 1 # What speed to condsider the stream buffering, 1x is normal speed, 2x is double speed, etc.
|
||||
|
||||
# Cache for proxy settings (class-level, shared across all instances)
|
||||
_proxy_settings_cache = None
|
||||
_proxy_settings_cache_time = 0
|
||||
_proxy_settings_cache_ttl = 10 # Cache for 10 seconds
|
||||
|
||||
@classmethod
|
||||
def get_proxy_settings(cls):
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults"""
|
||||
"""Get proxy settings from CoreSettings JSON data with fallback to defaults (cached)"""
|
||||
# Check if cache is still valid
|
||||
now = time.time()
|
||||
if cls._proxy_settings_cache is not None and (now - cls._proxy_settings_cache_time) < cls._proxy_settings_cache_ttl:
|
||||
return cls._proxy_settings_cache
|
||||
|
||||
# Cache miss or expired - fetch from database
|
||||
try:
|
||||
from core.models import CoreSettings
|
||||
return CoreSettings.get_proxy_settings()
|
||||
settings = CoreSettings.get_proxy_settings()
|
||||
cls._proxy_settings_cache = settings
|
||||
cls._proxy_settings_cache_time = now
|
||||
return settings
|
||||
|
||||
except Exception:
|
||||
# Return defaults if database query fails
|
||||
return {
|
||||
"buffering_timeout": 15,
|
||||
"buffering_speed": 1.0,
|
||||
|
|
@ -27,6 +45,13 @@ class BaseConfig:
|
|||
"channel_init_grace_period": 5,
|
||||
}
|
||||
|
||||
finally:
|
||||
# Always close the connection after reading settings
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_redis_chunk_ttl(cls):
|
||||
"""Get Redis chunk TTL from database or default"""
|
||||
|
|
@ -69,10 +94,10 @@ class TSConfig(BaseConfig):
|
|||
CLEANUP_INTERVAL = 60 # Check for inactive channels every 60 seconds
|
||||
|
||||
# Client tracking settings
|
||||
CLIENT_RECORD_TTL = 5 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLIENT_RECORD_TTL = 60 # How long client records persist in Redis (seconds). Client will be considered MIA after this time.
|
||||
CLEANUP_CHECK_INTERVAL = 1 # How often to check for disconnected clients (seconds)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 1 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 5.0 # How many heartbeat intervals before client considered ghost (5 would mean 5 secondsif heartbeat interval is 1)
|
||||
CLIENT_HEARTBEAT_INTERVAL = 5 # How often to send client heartbeats (seconds)
|
||||
GHOST_CLIENT_MULTIPLIER = 6.0 # How many heartbeat intervals before client considered ghost (6 would mean 36 seconds if heartbeat interval is 6)
|
||||
CLIENT_WAIT_TIMEOUT = 30 # Seconds to wait for client to connect
|
||||
|
||||
# Stream health and recovery settings
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import gc # Add import for garbage collection
|
|||
from core.utils import RedisClient
|
||||
from apps.proxy.ts_proxy.channel_status import ChannelStatus
|
||||
from core.utils import send_websocket_update
|
||||
from apps.proxy.vod_proxy.connection_manager import get_connection_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -59,3 +60,13 @@ def fetch_channel_stats():
|
|||
# Explicitly clean up large data structures
|
||||
all_channels = None
|
||||
gc.collect()
|
||||
|
||||
@shared_task
|
||||
def cleanup_vod_connections():
|
||||
"""Clean up stale VOD connections"""
|
||||
try:
|
||||
connection_manager = get_connection_manager()
|
||||
connection_manager.cleanup_stale_connections(max_age_seconds=3600) # 1 hour
|
||||
logger.info("VOD connection cleanup completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in VOD connection cleanup: {e}", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@ import gevent
|
|||
from typing import Set, Optional
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .constants import EventType
|
||||
from .constants import EventType, ChannelState, ChannelMetadataField
|
||||
from .config_helper import ConfigHelper
|
||||
from .redis_keys import RedisKeys
|
||||
from .utils import get_logger
|
||||
from core.utils import send_websocket_update
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
|
@ -25,6 +26,7 @@ class ClientManager:
|
|||
self.lock = threading.Lock()
|
||||
self.last_active_time = time.time()
|
||||
self.worker_id = worker_id # Store worker ID as instance variable
|
||||
self._heartbeat_running = True # Flag to control heartbeat thread
|
||||
|
||||
# STANDARDIZED KEYS: Move client set under channel namespace
|
||||
self.client_set_key = RedisKeys.clients(channel_id)
|
||||
|
|
@ -32,61 +34,78 @@ class ClientManager:
|
|||
self.heartbeat_interval = ConfigHelper.get('CLIENT_HEARTBEAT_INTERVAL', 10)
|
||||
self.last_heartbeat_time = {}
|
||||
|
||||
# Get ProxyServer instance for ownership checks
|
||||
from .server import ProxyServer
|
||||
self.proxy_server = ProxyServer.get_instance()
|
||||
|
||||
# Start heartbeat thread for local clients
|
||||
self._start_heartbeat_thread()
|
||||
self._registered_clients = set() # Track already registered client IDs
|
||||
|
||||
def _start_heartbeat_thread(self):
|
||||
"""Start thread to regularly refresh client presence in Redis"""
|
||||
def heartbeat_task():
|
||||
no_clients_count = 0 # Track consecutive empty cycles
|
||||
max_empty_cycles = 3 # Exit after this many consecutive empty checks
|
||||
def _trigger_stats_update(self):
|
||||
"""Trigger a channel stats update via WebSocket"""
|
||||
try:
|
||||
# Import here to avoid potential import issues
|
||||
from apps.proxy.ts_proxy.channel_status import ChannelStatus
|
||||
import redis
|
||||
from django.conf import settings
|
||||
|
||||
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
# Get all channels from Redis using settings
|
||||
redis_url = getattr(settings, 'REDIS_URL', 'redis://localhost:6379/0')
|
||||
redis_client = redis.Redis.from_url(redis_url, decode_responses=True)
|
||||
all_channels = []
|
||||
cursor = 0
|
||||
|
||||
while True:
|
||||
cursor, keys = redis_client.scan(cursor, match="ts_proxy:channel:*:clients", count=100)
|
||||
for key in keys:
|
||||
# Extract channel ID from key
|
||||
parts = key.split(':')
|
||||
if len(parts) >= 4:
|
||||
ch_id = parts[2]
|
||||
channel_info = ChannelStatus.get_basic_channel_info(ch_id)
|
||||
if channel_info:
|
||||
all_channels.append(channel_info)
|
||||
|
||||
if cursor == 0:
|
||||
break
|
||||
|
||||
# Send WebSocket update using existing infrastructure
|
||||
send_websocket_update(
|
||||
"updates",
|
||||
"update",
|
||||
{
|
||||
"success": True,
|
||||
"type": "channel_stats",
|
||||
"stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to trigger stats update: {e}")
|
||||
|
||||
def _start_heartbeat_thread(self):
|
||||
"""Start thread to regularly refresh client presence in Redis for local clients"""
|
||||
def heartbeat_task():
|
||||
logger.debug(f"Started heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
while self._heartbeat_running:
|
||||
try:
|
||||
# Wait for the interval
|
||||
gevent.sleep(self.heartbeat_interval)
|
||||
# Wait for the interval, but check stop flag frequently for quick shutdown
|
||||
# Sleep in 1-second increments to allow faster response to stop signal
|
||||
for _ in range(int(self.heartbeat_interval)):
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
# Final check before doing work
|
||||
if not self._heartbeat_running:
|
||||
break
|
||||
|
||||
# Send heartbeat for all local clients
|
||||
with self.lock:
|
||||
if not self.clients or not self.redis_client:
|
||||
# No clients left, increment our counter
|
||||
no_clients_count += 1
|
||||
|
||||
# Check if we're in a shutdown delay period before exiting
|
||||
in_shutdown_delay = False
|
||||
if self.redis_client:
|
||||
try:
|
||||
disconnect_key = RedisKeys.last_client_disconnect(self.channel_id)
|
||||
disconnect_time_bytes = self.redis_client.get(disconnect_key)
|
||||
if disconnect_time_bytes:
|
||||
disconnect_time = float(disconnect_time_bytes.decode('utf-8'))
|
||||
elapsed = time.time() - disconnect_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if elapsed < shutdown_delay:
|
||||
in_shutdown_delay = True
|
||||
logger.debug(f"Channel {self.channel_id} in shutdown delay: {elapsed:.1f}s of {shutdown_delay}s elapsed")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking shutdown delay: {e}")
|
||||
|
||||
# Only exit if we've seen no clients for several consecutive checks AND we're not in shutdown delay
|
||||
if no_clients_count >= max_empty_cycles and not in_shutdown_delay:
|
||||
logger.info(f"No clients for channel {self.channel_id} after {no_clients_count} consecutive checks and not in shutdown delay, exiting heartbeat thread")
|
||||
return # This exits the thread
|
||||
|
||||
# Skip this cycle if we have no clients but continue if in shutdown delay
|
||||
if not in_shutdown_delay:
|
||||
continue
|
||||
else:
|
||||
# Reset counter during shutdown delay to prevent premature exit
|
||||
no_clients_count = 0
|
||||
continue
|
||||
else:
|
||||
# Reset counter when we see clients
|
||||
no_clients_count = 0
|
||||
# Skip this cycle if we have no local clients
|
||||
if not self.clients:
|
||||
continue
|
||||
|
||||
# IMPROVED GHOST DETECTION: Check for stale clients before sending heartbeats
|
||||
current_time = time.time()
|
||||
|
|
@ -157,11 +176,20 @@ class ClientManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Error in client heartbeat thread: {e}")
|
||||
|
||||
logger.debug(f"Heartbeat thread exiting for channel {self.channel_id}")
|
||||
|
||||
thread = threading.Thread(target=heartbeat_task, daemon=True)
|
||||
thread.name = f"client-heartbeat-{self.channel_id}"
|
||||
thread.start()
|
||||
logger.debug(f"Started client heartbeat thread for channel {self.channel_id} (interval: {self.heartbeat_interval}s)")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the heartbeat thread and cleanup"""
|
||||
logger.debug(f"Stopping ClientManager for channel {self.channel_id}")
|
||||
self._heartbeat_running = False
|
||||
# Give the thread a moment to exit gracefully
|
||||
# Note: We don't join() here because it's a daemon thread and will exit on its own
|
||||
|
||||
def _execute_redis_command(self, command_func):
|
||||
"""Execute Redis command with error handling"""
|
||||
if not self.redis_client:
|
||||
|
|
@ -260,6 +288,9 @@ class ClientManager:
|
|||
json.dumps(event_data)
|
||||
)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
|
||||
# Get total clients across all workers
|
||||
total_clients = self.get_total_client_count()
|
||||
logger.info(f"New client connected: {client_id} (local: {len(self.clients)}, total: {total_clients})")
|
||||
|
|
@ -274,6 +305,8 @@ class ClientManager:
|
|||
|
||||
def remove_client(self, client_id):
|
||||
"""Remove a client from this channel and Redis"""
|
||||
client_ip = None
|
||||
|
||||
with self.lock:
|
||||
if client_id in self.clients:
|
||||
self.clients.remove(client_id)
|
||||
|
|
@ -284,6 +317,14 @@ class ClientManager:
|
|||
self.last_active_time = time.time()
|
||||
|
||||
if self.redis_client:
|
||||
# Get client IP before removing the data
|
||||
client_key = f"ts_proxy:channel:{self.channel_id}:clients:{client_id}"
|
||||
client_data = self.redis_client.hgetall(client_key)
|
||||
if client_data and b'ip_address' in client_data:
|
||||
client_ip = client_data[b'ip_address'].decode('utf-8')
|
||||
elif client_data and 'ip_address' in client_data:
|
||||
client_ip = client_data['ip_address']
|
||||
|
||||
# Remove from channel's client set
|
||||
self.redis_client.srem(self.client_set_key, client_id)
|
||||
|
||||
|
|
@ -302,16 +343,33 @@ class ClientManager:
|
|||
|
||||
self._notify_owner_of_activity()
|
||||
|
||||
# Publish client disconnected event
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED, # Use constant instead of string
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
# Check if we're the owner - if so, handle locally; if not, publish event
|
||||
am_i_owner = self.proxy_server and self.proxy_server.am_i_owner(self.channel_id)
|
||||
|
||||
if am_i_owner:
|
||||
# We're the owner - handle the disconnect directly
|
||||
logger.debug(f"Owner handling CLIENT_DISCONNECTED for client {client_id} locally (not publishing)")
|
||||
if remaining == 0:
|
||||
# Trigger shutdown check directly via ProxyServer method
|
||||
logger.debug(f"No clients left - triggering immediate shutdown check")
|
||||
# Spawn greenlet to avoid blocking
|
||||
import gevent
|
||||
gevent.spawn(self.proxy_server.handle_client_disconnect, self.channel_id)
|
||||
else:
|
||||
# We're not the owner - publish event so owner can handle it
|
||||
logger.debug(f"Non-owner publishing CLIENT_DISCONNECTED event for client {client_id} on channel {self.channel_id} from worker {self.worker_id}")
|
||||
event_data = json.dumps({
|
||||
"event": EventType.CLIENT_DISCONNECTED,
|
||||
"channel_id": self.channel_id,
|
||||
"client_id": client_id,
|
||||
"worker_id": self.worker_id or "unknown",
|
||||
"timestamp": time.time(),
|
||||
"remaining_clients": remaining
|
||||
})
|
||||
self.redis_client.publish(RedisKeys.events_channel(self.channel_id), event_data)
|
||||
|
||||
# Trigger channel stats update via WebSocket
|
||||
self._trigger_stats_update()
|
||||
|
||||
total_clients = self.get_total_client_count()
|
||||
logger.info(f"Client disconnected: {client_id} (local: {len(self.clients)}, total: {total_clients})")
|
||||
|
|
|
|||
|
|
@ -100,3 +100,12 @@ class ConfigHelper:
|
|||
def channel_init_grace_period():
|
||||
"""Get channel initialization grace period in seconds"""
|
||||
return Config.get_channel_init_grace_period()
|
||||
|
||||
@staticmethod
|
||||
def chunk_timeout():
|
||||
"""
|
||||
Get chunk timeout in seconds (used for both socket and HTTP read timeouts).
|
||||
This controls how long we wait for each chunk before timing out.
|
||||
Set this higher (e.g., 30s) for slow providers that may have intermittent delays.
|
||||
"""
|
||||
return ConfigHelper.get('CHUNK_TIMEOUT', 5) # Default 5 seconds
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ class EventType:
|
|||
# Stream types
|
||||
class StreamType:
|
||||
HLS = "hls"
|
||||
RTSP = "rtsp"
|
||||
UDP = "udp"
|
||||
TS = "ts"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
|
|
|||
138
apps/proxy/ts_proxy/http_streamer.py
Normal file
138
apps/proxy/ts_proxy/http_streamer.py
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
"""
|
||||
HTTP Stream Reader - Thread-based HTTP stream reader that writes to a pipe.
|
||||
This allows us to use the same fetch_chunk() path for both transcode and HTTP streams.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import os
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from .utils import get_logger
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
class HTTPStreamReader:
|
||||
"""Thread-based HTTP stream reader that writes to a pipe"""
|
||||
|
||||
def __init__(self, url, user_agent=None, chunk_size=8192):
|
||||
self.url = url
|
||||
self.user_agent = user_agent
|
||||
self.chunk_size = chunk_size
|
||||
self.session = None
|
||||
self.response = None
|
||||
self.thread = None
|
||||
self.pipe_read = None
|
||||
self.pipe_write = None
|
||||
self.running = False
|
||||
|
||||
def start(self):
|
||||
"""Start the HTTP stream reader thread"""
|
||||
# Create a pipe (works on Windows and Unix)
|
||||
self.pipe_read, self.pipe_write = os.pipe()
|
||||
|
||||
# Start the reader thread
|
||||
self.running = True
|
||||
self.thread = threading.Thread(target=self._read_stream, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
logger.info(f"Started HTTP stream reader thread for {self.url}")
|
||||
return self.pipe_read
|
||||
|
||||
def _read_stream(self):
|
||||
"""Thread worker that reads HTTP stream and writes to pipe"""
|
||||
try:
|
||||
# Build headers
|
||||
headers = {}
|
||||
if self.user_agent:
|
||||
headers['User-Agent'] = self.user_agent
|
||||
|
||||
logger.info(f"HTTP reader connecting to {self.url}")
|
||||
|
||||
# Create session
|
||||
self.session = requests.Session()
|
||||
|
||||
# Disable retries for faster failure detection
|
||||
adapter = HTTPAdapter(max_retries=0, pool_connections=1, pool_maxsize=1)
|
||||
self.session.mount('http://', adapter)
|
||||
self.session.mount('https://', adapter)
|
||||
|
||||
# Stream the URL
|
||||
self.response = self.session.get(
|
||||
self.url,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=(5, 30) # 5s connect, 30s read
|
||||
)
|
||||
|
||||
if self.response.status_code != 200:
|
||||
logger.error(f"HTTP {self.response.status_code} from {self.url}")
|
||||
return
|
||||
|
||||
logger.info(f"HTTP reader connected successfully, streaming data...")
|
||||
|
||||
# Stream chunks to pipe
|
||||
chunk_count = 0
|
||||
for chunk in self.response.iter_content(chunk_size=self.chunk_size):
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
try:
|
||||
# Write binary data to pipe
|
||||
os.write(self.pipe_write, chunk)
|
||||
chunk_count += 1
|
||||
|
||||
# Log progress periodically
|
||||
if chunk_count % 1000 == 0:
|
||||
logger.debug(f"HTTP reader streamed {chunk_count} chunks")
|
||||
except OSError as e:
|
||||
logger.error(f"Pipe write error: {e}")
|
||||
break
|
||||
|
||||
logger.info("HTTP stream ended")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP reader request error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"HTTP reader unexpected error: {e}", exc_info=True)
|
||||
finally:
|
||||
self.running = False
|
||||
# Close write end of pipe to signal EOF
|
||||
try:
|
||||
if self.pipe_write is not None:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""Stop the HTTP stream reader"""
|
||||
logger.info("Stopping HTTP stream reader")
|
||||
self.running = False
|
||||
|
||||
# Close response
|
||||
if self.response:
|
||||
try:
|
||||
self.response.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close session
|
||||
if self.session:
|
||||
try:
|
||||
self.session.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Close write end of pipe
|
||||
if self.pipe_write is not None:
|
||||
try:
|
||||
os.close(self.pipe_write)
|
||||
self.pipe_write = None
|
||||
except:
|
||||
pass
|
||||
|
||||
# Wait for thread
|
||||
if self.thread and self.thread.is_alive():
|
||||
self.thread.join(timeout=2.0)
|
||||
|
|
@ -19,7 +19,7 @@ import gevent # Add gevent import
|
|||
from typing import Dict, Optional, Set
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from core.utils import RedisClient
|
||||
from core.utils import RedisClient, log_system_event
|
||||
from redis.exceptions import ConnectionError, TimeoutError
|
||||
from .stream_manager import StreamManager
|
||||
from .stream_buffer import StreamBuffer
|
||||
|
|
@ -131,6 +131,8 @@ class ProxyServer:
|
|||
max_retries = 10
|
||||
base_retry_delay = 1 # Start with 1 second delay
|
||||
max_retry_delay = 30 # Cap at 30 seconds
|
||||
pubsub_client = None
|
||||
pubsub = None
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
|
@ -192,35 +194,11 @@ class ProxyServer:
|
|||
self.redis_client.delete(disconnect_key)
|
||||
|
||||
elif event_type == EventType.CLIENT_DISCONNECTED:
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}")
|
||||
# Check if any clients remain
|
||||
if channel_id in self.client_managers:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay) # REPLACE: time.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
client_id = data.get("client_id")
|
||||
worker_id = data.get("worker_id")
|
||||
logger.debug(f"Owner received {EventType.CLIENT_DISCONNECTED} event for channel {channel_id}, client {client_id} from worker {worker_id}")
|
||||
# Delegate to dedicated method
|
||||
self.handle_client_disconnect(channel_id)
|
||||
|
||||
|
||||
elif event_type == EventType.STREAM_SWITCH:
|
||||
|
|
@ -339,20 +317,27 @@ class ProxyServer:
|
|||
logger.error(f"Error in event listener: {e}. Retrying in {final_delay:.1f}s (attempt {retry_count})")
|
||||
gevent.sleep(final_delay) # REPLACE: time.sleep(final_delay)
|
||||
|
||||
# Try to clean up the old connection
|
||||
try:
|
||||
if 'pubsub' in locals():
|
||||
pubsub.close()
|
||||
if 'pubsub_client' in locals():
|
||||
pubsub_client.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in event listener: {e}")
|
||||
# Add a short delay to prevent rapid retries on persistent errors
|
||||
gevent.sleep(5) # REPLACE: time.sleep(5)
|
||||
|
||||
finally:
|
||||
# Always clean up PubSub connections in all error paths
|
||||
try:
|
||||
if pubsub:
|
||||
pubsub.close()
|
||||
pubsub = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub: {e}")
|
||||
|
||||
try:
|
||||
if pubsub_client:
|
||||
pubsub_client.close()
|
||||
pubsub_client = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing pubsub_client: {e}")
|
||||
|
||||
thread = threading.Thread(target=event_listener, daemon=True)
|
||||
thread.name = "redis-event-listener"
|
||||
thread.start()
|
||||
|
|
@ -486,17 +471,18 @@ class ProxyServer:
|
|||
)
|
||||
return True
|
||||
|
||||
# Create buffer and client manager instances
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
# Create buffer and client manager instances (or reuse if they exist)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Store in local tracking
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
self.client_managers[channel_id] = client_manager
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# IMPROVED: Set initializing state in Redis BEFORE any other operations
|
||||
if self.redis_client:
|
||||
|
|
@ -550,13 +536,15 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} already owned by worker {current_owner}")
|
||||
logger.info(f"This worker ({self.worker_id}) will read from Redis buffer only")
|
||||
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -571,13 +559,15 @@ class ProxyServer:
|
|||
# Another worker just acquired ownership
|
||||
logger.info(f"Another worker just acquired ownership of channel {channel_id}")
|
||||
|
||||
# Create buffer but not stream manager
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
# Create buffer but not stream manager (only if not already exists)
|
||||
if channel_id not in self.stream_buffers:
|
||||
buffer = StreamBuffer(channel_id=channel_id, redis_client=self.redis_client)
|
||||
self.stream_buffers[channel_id] = buffer
|
||||
|
||||
# Create client manager with channel_id and redis_client
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Create client manager with channel_id and redis_client (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(channel_id=channel_id, redis_client=self.redis_client, worker_id=self.worker_id)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -596,7 +586,7 @@ class ProxyServer:
|
|||
if channel_user_agent:
|
||||
metadata["user_agent"] = channel_user_agent
|
||||
|
||||
# CRITICAL FIX: Make sure stream_id is always set in metadata and properly logged
|
||||
# Make sure stream_id is always set in metadata and properly logged
|
||||
if channel_stream_id:
|
||||
metadata["stream_id"] = str(channel_stream_id)
|
||||
logger.info(f"Storing stream_id {channel_stream_id} in metadata for channel {channel_id}")
|
||||
|
|
@ -632,13 +622,37 @@ class ProxyServer:
|
|||
logger.info(f"Created StreamManager for channel {channel_id} with stream ID {channel_stream_id}")
|
||||
self.stream_managers[channel_id] = stream_manager
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
# Log channel start event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Get stream name if stream_id is available
|
||||
stream_name = None
|
||||
if channel_stream_id:
|
||||
try:
|
||||
stream_obj = Stream.objects.get(id=channel_stream_id)
|
||||
stream_name = stream_obj.name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_start',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
stream_name=stream_name,
|
||||
stream_id=channel_stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel start event: {e}")
|
||||
|
||||
# Create client manager with channel_id, redis_client AND worker_id (only if not already exists)
|
||||
if channel_id not in self.client_managers:
|
||||
client_manager = ClientManager(
|
||||
channel_id=channel_id,
|
||||
redis_client=self.redis_client,
|
||||
worker_id=self.worker_id
|
||||
)
|
||||
self.client_managers[channel_id] = client_manager
|
||||
|
||||
# Start stream manager thread only for the owner
|
||||
thread = threading.Thread(target=stream_manager.run, daemon=True)
|
||||
|
|
@ -688,9 +702,10 @@ class ProxyServer:
|
|||
state = metadata.get(b'state', b'unknown').decode('utf-8')
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8')
|
||||
|
||||
# States that indicate the channel is running properly
|
||||
# States that indicate the channel is running properly or shutting down
|
||||
valid_states = [ChannelState.ACTIVE, ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING]
|
||||
ChannelState.CONNECTING, ChannelState.BUFFERING, ChannelState.INITIALIZING,
|
||||
ChannelState.STOPPING]
|
||||
|
||||
# If the channel is in a valid state, check if the owner is still active
|
||||
if state in valid_states:
|
||||
|
|
@ -703,12 +718,24 @@ class ProxyServer:
|
|||
else:
|
||||
# This is a zombie channel - owner is gone but metadata still exists
|
||||
logger.warning(f"Detected zombie channel {channel_id} - owner {owner} is no longer active")
|
||||
|
||||
# Check if there are any clients connected
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if client_count > 0:
|
||||
logger.warning(f"Zombie channel {channel_id} has {client_count} clients - attempting ownership takeover")
|
||||
# Could potentially take ownership here in the future
|
||||
# For now, just clean it up to be safe
|
||||
else:
|
||||
logger.warning(f"Zombie channel {channel_id} has no clients - cleaning up")
|
||||
|
||||
self._clean_zombie_channel(channel_id, metadata)
|
||||
return False
|
||||
elif state in [ChannelState.STOPPING, ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These states indicate the channel should be reinitialized
|
||||
logger.info(f"Channel {channel_id} exists but in terminal state: {state}")
|
||||
return True
|
||||
elif state in [ChannelState.STOPPED, ChannelState.ERROR]:
|
||||
# These terminal states indicate the channel should be cleaned up and reinitialized
|
||||
logger.info(f"Channel {channel_id} in terminal state {state} - returning False to trigger cleanup")
|
||||
return False
|
||||
else:
|
||||
# Unknown or initializing state, check how long it's been in this state
|
||||
if b'state_changed_at' in metadata:
|
||||
|
|
@ -772,6 +799,44 @@ class ProxyServer:
|
|||
logger.error(f"Error cleaning zombie channel {channel_id}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def handle_client_disconnect(self, channel_id):
|
||||
"""
|
||||
Handle client disconnect event - check if channel should shut down.
|
||||
Can be called directly by owner or via PubSub from non-owner workers.
|
||||
"""
|
||||
if channel_id not in self.client_managers:
|
||||
return
|
||||
|
||||
try:
|
||||
# VERIFY REDIS CLIENT COUNT DIRECTLY
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug(f"No clients left after disconnect event - stopping channel {channel_id}")
|
||||
# Set the disconnect timer for other workers to see
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
self.redis_client.setex(disconnect_key, 60, str(time.time()))
|
||||
|
||||
# Get configured shutdown delay or default
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if shutdown_delay > 0:
|
||||
logger.info(f"Waiting {shutdown_delay}s before stopping channel...")
|
||||
gevent.sleep(shutdown_delay)
|
||||
|
||||
# Re-check client count before stopping
|
||||
total = self.redis_client.scard(client_set_key) or 0
|
||||
if total > 0:
|
||||
logger.info(f"New clients connected during shutdown delay - aborting shutdown")
|
||||
self.redis_client.delete(disconnect_key)
|
||||
return
|
||||
|
||||
# Stop the channel directly
|
||||
self.stop_channel(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling client disconnect for channel {channel_id}: {e}")
|
||||
|
||||
def stop_channel(self, channel_id):
|
||||
"""Stop a channel with proper ownership handling"""
|
||||
try:
|
||||
|
|
@ -819,6 +884,41 @@ class ProxyServer:
|
|||
self.release_ownership(channel_id)
|
||||
logger.info(f"Released ownership of channel {channel_id}")
|
||||
|
||||
# Log channel stop event (after cleanup, before releasing ownership section ends)
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=channel_id)
|
||||
|
||||
# Calculate runtime and get total bytes from metadata
|
||||
runtime = None
|
||||
total_bytes = None
|
||||
if self.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(channel_id)
|
||||
metadata = self.redis_client.hgetall(metadata_key)
|
||||
if metadata:
|
||||
# Calculate runtime from init_time
|
||||
if b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
runtime = round(time.time() - init_time, 2)
|
||||
except Exception:
|
||||
pass
|
||||
# Get total bytes transferred
|
||||
if b'total_bytes' in metadata:
|
||||
try:
|
||||
total_bytes = int(metadata[b'total_bytes'].decode('utf-8'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_system_event(
|
||||
'channel_stop',
|
||||
channel_id=channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
runtime=runtime,
|
||||
total_bytes=total_bytes
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log channel stop event: {e}")
|
||||
|
||||
# Always clean up local resources - WITH SAFE CHECKS
|
||||
if channel_id in self.stream_managers:
|
||||
del self.stream_managers[channel_id]
|
||||
|
|
@ -846,6 +946,10 @@ class ProxyServer:
|
|||
# Clean up client manager - SAFE CHECK HERE TOO
|
||||
if channel_id in self.client_managers:
|
||||
try:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
# Stop the heartbeat thread before deleting
|
||||
if hasattr(client_manager, 'stop'):
|
||||
client_manager.stop()
|
||||
del self.client_managers[channel_id]
|
||||
logger.info(f"Removed client manager for channel {channel_id}")
|
||||
except KeyError:
|
||||
|
|
@ -920,6 +1024,15 @@ class ProxyServer:
|
|||
if channel_id in self.client_managers:
|
||||
client_manager = self.client_managers[channel_id]
|
||||
total_clients = client_manager.get_total_client_count()
|
||||
else:
|
||||
# This can happen during reconnection attempts or crashes
|
||||
# Check Redis directly for any connected clients
|
||||
if self.redis_client:
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
total_clients = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
if total_clients == 0:
|
||||
logger.warning(f"Channel {channel_id} is missing client_manager but we're the owner with 0 clients - will trigger cleanup")
|
||||
|
||||
# Log client count periodically
|
||||
if time.time() % 30 < 1: # Every ~30 seconds
|
||||
|
|
@ -927,7 +1040,14 @@ class ProxyServer:
|
|||
|
||||
# If in connecting or waiting_for_clients state, check grace period
|
||||
if channel_state in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS]:
|
||||
# Get connection ready time from metadata
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Get connection_ready_time from metadata (indicates if channel reached ready state)
|
||||
connection_ready_time = None
|
||||
if metadata and b'connection_ready_time' in metadata:
|
||||
try:
|
||||
|
|
@ -935,17 +1055,60 @@ class ProxyServer:
|
|||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# If still connecting, give it more time
|
||||
if channel_state == ChannelState.CONNECTING:
|
||||
logger.debug(f"Channel {channel_id} still connecting - not checking for clients yet")
|
||||
continue
|
||||
if total_clients == 0:
|
||||
# Check if we have a connection_attempt timestamp (set when CONNECTING starts)
|
||||
connection_attempt_time = None
|
||||
attempt_key = RedisKeys.connection_attempt(channel_id)
|
||||
if self.redis_client:
|
||||
attempt_value = self.redis_client.get(attempt_key)
|
||||
if attempt_value:
|
||||
try:
|
||||
connection_attempt_time = float(attempt_value.decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# If waiting for clients, check grace period
|
||||
if connection_ready_time:
|
||||
# Also get init time as a fallback
|
||||
init_time = None
|
||||
if metadata and b'init_time' in metadata:
|
||||
try:
|
||||
init_time = float(metadata[b'init_time'].decode('utf-8'))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Use whichever timestamp we have (prefer connection_attempt as it's more recent)
|
||||
start_time = connection_attempt_time or init_time
|
||||
|
||||
if start_time:
|
||||
# Check which timeout to apply based on channel lifecycle
|
||||
if connection_ready_time:
|
||||
# Already reached ready - use shutdown_delay
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
shutdown_delay = ConfigHelper.channel_shutdown_delay()
|
||||
|
||||
if time_since_ready > shutdown_delay:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} in {channel_state} state with 0 clients for {time_since_ready:.1f}s "
|
||||
f"(after reaching ready, shutdown_delay: {shutdown_delay}s) - stopping channel"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
else:
|
||||
# Never reached ready - use grace_period timeout
|
||||
time_since_start = time.time() - start_time
|
||||
connecting_timeout = ConfigHelper.channel_init_grace_period()
|
||||
|
||||
if time_since_start > connecting_timeout:
|
||||
logger.warning(
|
||||
f"Channel {channel_id} stuck in {channel_state} state for {time_since_start:.1f}s "
|
||||
f"with no clients (timeout: {connecting_timeout}s) - stopping channel due to upstream issues"
|
||||
)
|
||||
self.stop_channel(channel_id)
|
||||
continue
|
||||
elif connection_ready_time:
|
||||
# We have clients now, but check grace period for state transition
|
||||
grace_period = ConfigHelper.channel_init_grace_period()
|
||||
time_since_ready = time.time() - connection_ready_time
|
||||
|
||||
# Add this debug log
|
||||
logger.debug(f"GRACE PERIOD CHECK: Channel {channel_id} in {channel_state} state, "
|
||||
f"time_since_ready={time_since_ready:.1f}s, grace_period={grace_period}s, "
|
||||
f"total_clients={total_clients}")
|
||||
|
|
@ -954,16 +1117,9 @@ class ProxyServer:
|
|||
# Still within grace period
|
||||
logger.debug(f"Channel {channel_id} in grace period - {time_since_ready:.1f}s of {grace_period}s elapsed")
|
||||
continue
|
||||
elif total_clients == 0:
|
||||
# Grace period expired with no clients
|
||||
logger.info(f"Grace period expired ({time_since_ready:.1f}s > {grace_period}s) with no clients - stopping channel {channel_id}")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Grace period expired but we have clients - mark channel as active
|
||||
# Grace period expired with clients - mark channel as active
|
||||
logger.info(f"Grace period expired with {total_clients} clients - marking channel {channel_id} as active")
|
||||
old_state = "unknown"
|
||||
if metadata and b'state' in metadata:
|
||||
old_state = metadata[b'state'].decode('utf-8')
|
||||
if self.update_channel_state(channel_id, ChannelState.ACTIVE, {
|
||||
"grace_period_ended_at": str(time.time()),
|
||||
"clients_at_activation": str(total_clients)
|
||||
|
|
@ -971,6 +1127,13 @@ class ProxyServer:
|
|||
logger.info(f"Channel {channel_id} activated with {total_clients} clients after grace period")
|
||||
# If active and no clients, start normal shutdown procedure
|
||||
elif channel_state not in [ChannelState.CONNECTING, ChannelState.WAITING_FOR_CLIENTS] and total_clients == 0:
|
||||
# Check if channel is already stopping
|
||||
if self.redis_client:
|
||||
stop_key = RedisKeys.channel_stopping(channel_id)
|
||||
if self.redis_client.exists(stop_key):
|
||||
logger.debug(f"Channel {channel_id} is already stopping - skipping monitor shutdown")
|
||||
continue
|
||||
|
||||
# Check if there's a pending no-clients timeout
|
||||
disconnect_key = RedisKeys.last_client_disconnect(channel_id)
|
||||
disconnect_time = None
|
||||
|
|
@ -1030,14 +1193,30 @@ class ProxyServer:
|
|||
continue
|
||||
|
||||
# Check for local client count - if zero, clean up our local resources
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
if channel_id in self.client_managers:
|
||||
if self.client_managers[channel_id].get_client_count() == 0:
|
||||
# We're not the owner, and we have no local clients - clean up our resources
|
||||
logger.debug(f"Non-owner cleanup: Channel {channel_id} has no local clients, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
else:
|
||||
# This shouldn't happen, but clean up anyway
|
||||
logger.warning(f"Non-owner cleanup: Channel {channel_id} has no client_manager entry, cleaning up local resources")
|
||||
self._cleanup_local_resources(channel_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in cleanup thread: {e}", exc_info=True)
|
||||
|
||||
# Periodically check for orphaned channels (every 30 seconds)
|
||||
if hasattr(self, '_last_orphan_check'):
|
||||
if time.time() - self._last_orphan_check > 30:
|
||||
try:
|
||||
self._check_orphaned_metadata()
|
||||
self._last_orphan_check = time.time()
|
||||
except Exception as orphan_error:
|
||||
logger.error(f"Error checking orphaned metadata: {orphan_error}", exc_info=True)
|
||||
else:
|
||||
self._last_orphan_check = time.time()
|
||||
|
||||
gevent.sleep(ConfigHelper.cleanup_check_interval()) # REPLACE: time.sleep(ConfigHelper.cleanup_check_interval())
|
||||
|
||||
thread = threading.Thread(target=cleanup_task, daemon=True)
|
||||
|
|
@ -1059,10 +1238,6 @@ class ProxyServer:
|
|||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Skip channels we already have locally
|
||||
if channel_id in self.stream_buffers:
|
||||
continue
|
||||
|
||||
# Check if this channel has an owner
|
||||
owner = self.get_channel_owner(channel_id)
|
||||
|
||||
|
|
@ -1077,13 +1252,84 @@ class ProxyServer:
|
|||
else:
|
||||
# Orphaned channel with no clients - clean it up
|
||||
logger.info(f"Cleaning up orphaned channel {channel_id}")
|
||||
self._clean_redis_keys(channel_id)
|
||||
|
||||
# If we have it locally, stop it properly to clean up processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Orphaned channel {channel_id} is local - calling stop_channel")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing channel key {key}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned channels: {e}")
|
||||
|
||||
def _check_orphaned_metadata(self):
|
||||
"""
|
||||
Check for metadata entries that have no owner and no clients.
|
||||
This catches zombie channels that weren't cleaned up properly.
|
||||
"""
|
||||
if not self.redis_client:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get all channel metadata keys
|
||||
channel_pattern = "ts_proxy:channel:*:metadata"
|
||||
channel_keys = self.redis_client.keys(channel_pattern)
|
||||
|
||||
for key in channel_keys:
|
||||
try:
|
||||
channel_id = key.decode('utf-8').split(':')[2]
|
||||
|
||||
# Get metadata first
|
||||
metadata = self.redis_client.hgetall(key)
|
||||
if not metadata:
|
||||
# Empty metadata - clean it up
|
||||
logger.warning(f"Found empty metadata for channel {channel_id} - cleaning up")
|
||||
# If we have it locally, stop it properly
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
self._clean_redis_keys(channel_id)
|
||||
continue
|
||||
|
||||
# Get owner
|
||||
owner = metadata.get(b'owner', b'').decode('utf-8') if b'owner' in metadata else ''
|
||||
|
||||
# Check if owner is still alive
|
||||
owner_alive = False
|
||||
if owner:
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
owner_alive = self.redis_client.exists(owner_heartbeat_key)
|
||||
|
||||
# Check client count
|
||||
client_set_key = RedisKeys.clients(channel_id)
|
||||
client_count = self.redis_client.scard(client_set_key) or 0
|
||||
|
||||
# If no owner and no clients, clean it up
|
||||
if not owner_alive and client_count == 0:
|
||||
state = metadata.get(b'state', b'unknown').decode('utf-8') if b'state' in metadata else 'unknown'
|
||||
logger.warning(f"Found orphaned metadata for channel {channel_id} (state: {state}, owner: {owner}, clients: {client_count}) - cleaning up")
|
||||
|
||||
# If we have it locally, stop it properly to clean up transcode/proxy processes
|
||||
if channel_id in self.stream_managers or channel_id in self.client_managers:
|
||||
logger.info(f"Channel {channel_id} is local - calling stop_channel to clean up processes")
|
||||
self.stop_channel(channel_id)
|
||||
else:
|
||||
# Just clean up Redis keys for remote channels
|
||||
self._clean_redis_keys(channel_id)
|
||||
elif not owner_alive and client_count > 0:
|
||||
# Owner is gone but clients remain - just log for now
|
||||
logger.warning(f"Found orphaned channel {channel_id} with {client_count} clients but no owner - may need ownership takeover")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing metadata key {key}: {e}", exc_info=True)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking orphaned metadata: {e}", exc_info=True)
|
||||
|
||||
def _clean_redis_keys(self, channel_id):
|
||||
"""Clean up all Redis keys for a channel more efficiently"""
|
||||
# Release the channel, stream, and profile keys from the channel
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ from ..server import ProxyServer
|
|||
from ..redis_keys import RedisKeys
|
||||
from ..constants import EventType, ChannelState, ChannelMetadataField
|
||||
from ..url_utils import get_stream_info_for_switch
|
||||
from core.utils import log_system_event
|
||||
from .log_parsers import LogParserFactory
|
||||
|
||||
logger = logging.getLogger("ts_proxy")
|
||||
|
||||
|
|
@ -417,106 +419,52 @@ class ChannelService:
|
|||
return False, None, None, {"error": f"Exception: {str(e)}"}
|
||||
|
||||
@staticmethod
|
||||
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video"):
|
||||
"""Parse FFmpeg stream info line and store in Redis metadata"""
|
||||
def parse_and_store_stream_info(channel_id, stream_info_line, stream_type="video", stream_id=None):
|
||||
"""
|
||||
Parse stream info from FFmpeg/VLC/Streamlink logs and store in Redis/DB.
|
||||
Uses specialized parsers for each streaming tool.
|
||||
"""
|
||||
try:
|
||||
if stream_type == "input":
|
||||
# Example lines:
|
||||
# Input #0, mpegts, from 'http://example.com/stream.ts':
|
||||
# Input #0, hls, from 'http://example.com/stream.m3u8':
|
||||
# Use factory to parse the line based on stream type
|
||||
parsed_data = LogParserFactory.parse(stream_type, stream_info_line)
|
||||
|
||||
if not parsed_data:
|
||||
return
|
||||
|
||||
# Extract input format (e.g., "mpegts", "hls", "flv", etc.)
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', stream_info_line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
# Update Redis and database with parsed data
|
||||
ChannelService._update_stream_info_in_redis(
|
||||
channel_id,
|
||||
parsed_data.get('video_codec'),
|
||||
parsed_data.get('resolution'),
|
||||
parsed_data.get('width'),
|
||||
parsed_data.get('height'),
|
||||
parsed_data.get('source_fps'),
|
||||
parsed_data.get('pixel_format'),
|
||||
parsed_data.get('video_bitrate'),
|
||||
parsed_data.get('audio_codec'),
|
||||
parsed_data.get('sample_rate'),
|
||||
parsed_data.get('audio_channels'),
|
||||
parsed_data.get('audio_bitrate'),
|
||||
parsed_data.get('stream_type')
|
||||
)
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if input_format:
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, None, None, None, None, input_format)
|
||||
|
||||
logger.debug(f"Input format info - Format: {input_format} for channel {channel_id}")
|
||||
|
||||
elif stream_type == "video":
|
||||
# Example line:
|
||||
# Stream #0:0: Video: h264 (Main), yuv420p(tv, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 2000 kb/s, 29.97 fps, 90k tbn
|
||||
|
||||
# Extract video codec (e.g., "h264", "mpeg2video", etc.)
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
video_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract resolution (e.g., "1280x720") - be more specific to avoid hex values
|
||||
# Look for resolution patterns that are realistic video dimensions
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', stream_info_line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
# Validate that these look like reasonable video dimensions
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
resolution = f"{width}x{height}"
|
||||
else:
|
||||
width = height = resolution = None
|
||||
else:
|
||||
width = height = resolution = None
|
||||
|
||||
# Extract source FPS (e.g., "29.97 fps")
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', stream_info_line)
|
||||
source_fps = float(fps_match.group(1)) if fps_match else None
|
||||
|
||||
# Extract pixel format (e.g., "yuv420p")
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', stream_info_line)
|
||||
pixel_format = None
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
# Clean up pixel format (remove extra info in parentheses)
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
pixel_format = pf
|
||||
|
||||
# Extract bitrate if present (e.g., "2000 kb/s")
|
||||
video_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
video_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [video_codec, resolution, source_fps, pixel_format, video_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, video_codec, resolution, width, height, source_fps, pixel_format, video_bitrate, None, None, None, None, None)
|
||||
|
||||
logger.info(f"Video stream info - Codec: {video_codec}, Resolution: {resolution}, "
|
||||
f"Source FPS: {source_fps}, Pixel Format: {pixel_format}, "
|
||||
f"Video Bitrate: {video_bitrate} kb/s")
|
||||
|
||||
elif stream_type == "audio":
|
||||
# Example line:
|
||||
# Stream #0:1[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 48000 Hz, stereo, fltp, 64 kb/s
|
||||
|
||||
# Extract audio codec (e.g., "aac", "mp3", etc.)
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', stream_info_line)
|
||||
audio_codec = codec_match.group(1) if codec_match else None
|
||||
|
||||
# Extract sample rate (e.g., "48000 Hz")
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', stream_info_line)
|
||||
sample_rate = int(sample_rate_match.group(1)) if sample_rate_match else None
|
||||
|
||||
# Extract channel layout (e.g., "stereo", "5.1", "mono")
|
||||
# Look for common channel layouts
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', stream_info_line, re.IGNORECASE)
|
||||
channels = channel_match.group(1) if channel_match else None
|
||||
|
||||
# Extract audio bitrate if present (e.g., "64 kb/s")
|
||||
audio_bitrate = None
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', stream_info_line)
|
||||
if bitrate_match:
|
||||
audio_bitrate = float(bitrate_match.group(1))
|
||||
|
||||
# Store in Redis if we have valid data
|
||||
if any(x is not None for x in [audio_codec, sample_rate, channels, audio_bitrate]):
|
||||
ChannelService._update_stream_info_in_redis(channel_id, None, None, None, None, None, None, None, audio_codec, sample_rate, channels, audio_bitrate, None)
|
||||
|
||||
logger.info(f"Audio stream info - Codec: {audio_codec}, Sample Rate: {sample_rate} Hz, "
|
||||
f"Channels: {channels}, Audio Bitrate: {audio_bitrate} kb/s")
|
||||
if stream_id:
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
stream_id,
|
||||
video_codec=parsed_data.get('video_codec'),
|
||||
resolution=parsed_data.get('resolution'),
|
||||
source_fps=parsed_data.get('source_fps'),
|
||||
pixel_format=parsed_data.get('pixel_format'),
|
||||
video_bitrate=parsed_data.get('video_bitrate'),
|
||||
audio_codec=parsed_data.get('audio_codec'),
|
||||
sample_rate=parsed_data.get('sample_rate'),
|
||||
audio_channels=parsed_data.get('audio_channels'),
|
||||
audio_bitrate=parsed_data.get('audio_bitrate'),
|
||||
stream_type=parsed_data.get('stream_type')
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg {stream_type} stream info: {e}")
|
||||
logger.debug(f"Error parsing {stream_type} stream info: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _update_stream_info_in_redis(channel_id, codec, resolution, width, height, fps, pixel_format, video_bitrate, audio_codec=None, sample_rate=None, channels=None, audio_bitrate=None, input_format=None):
|
||||
|
|
@ -575,6 +523,44 @@ class ChannelService:
|
|||
logger.error(f"Error updating stream info in Redis: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _update_stream_stats_in_db(stream_id, **stats):
|
||||
"""Update stream stats in database"""
|
||||
from django.db import connection
|
||||
|
||||
try:
|
||||
from apps.channels.models import Stream
|
||||
from django.utils import timezone
|
||||
|
||||
stream = Stream.objects.get(id=stream_id)
|
||||
|
||||
# Get existing stats or create new dict
|
||||
current_stats = stream.stream_stats or {}
|
||||
|
||||
# Update with new stats
|
||||
for key, value in stats.items():
|
||||
if value is not None:
|
||||
current_stats[key] = value
|
||||
|
||||
# Save updated stats and timestamp
|
||||
stream.stream_stats = current_stats
|
||||
stream.stream_stats_updated_at = timezone.now()
|
||||
stream.save(update_fields=['stream_stats', 'stream_stats_updated_at'])
|
||||
|
||||
logger.debug(f"Updated stream stats in database for stream {stream_id}: {stats}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream stats in database for stream {stream_id}: {e}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
# Always close database connection after update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Helper methods for Redis operations
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -630,7 +616,7 @@ class ChannelService:
|
|||
|
||||
switch_request = {
|
||||
"event": EventType.STREAM_SWITCH,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"url": new_url,
|
||||
"user_agent": user_agent,
|
||||
"stream_id": stream_id,
|
||||
|
|
@ -643,6 +629,7 @@ class ChannelService:
|
|||
RedisKeys.events_channel(channel_id),
|
||||
json.dumps(switch_request)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -655,7 +642,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CHANNEL_STOP,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
|
@ -678,7 +665,7 @@ class ChannelService:
|
|||
|
||||
stop_request = {
|
||||
"event": EventType.CLIENT_STOP,
|
||||
"channel_id": channel_id,
|
||||
"channel_id": str(channel_id),
|
||||
"client_id": client_id,
|
||||
"requester_worker_id": proxy_server.worker_id,
|
||||
"timestamp": time.time()
|
||||
|
|
|
|||
410
apps/proxy/ts_proxy/services/log_parsers.py
Normal file
410
apps/proxy/ts_proxy/services/log_parsers.py
Normal file
|
|
@ -0,0 +1,410 @@
|
|||
"""Log parsers for FFmpeg, Streamlink, and VLC output."""
|
||||
import re
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseLogParser(ABC):
|
||||
"""Base class for log parsers"""
|
||||
|
||||
# Map of stream_type -> method_name that this parser handles
|
||||
STREAM_TYPE_METHODS: Dict[str, str] = {}
|
||||
|
||||
@abstractmethod
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""
|
||||
Check if this parser can handle the line.
|
||||
Returns the stream_type if it can parse, None otherwise.
|
||||
e.g., 'video', 'audio', 'vlc_video', 'vlc_audio', 'streamlink'
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
pass
|
||||
|
||||
|
||||
class FFmpegLogParser(BaseLogParser):
|
||||
"""Parser for FFmpeg log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'input': 'parse_input_format',
|
||||
'video': 'parse_video_stream',
|
||||
'audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is an FFmpeg line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# Input format detection
|
||||
if lower.startswith('input #'):
|
||||
return 'input'
|
||||
|
||||
# Stream info (only during input phase, but we'll let stream_manager handle phase tracking)
|
||||
if 'stream #' in lower:
|
||||
if 'video:' in lower:
|
||||
return 'video'
|
||||
elif 'audio:' in lower:
|
||||
return 'audio'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg input format (e.g., mpegts, hls)"""
|
||||
try:
|
||||
input_match = re.search(r'Input #\d+,\s*([^,]+)', line)
|
||||
input_format = input_match.group(1).strip() if input_match else None
|
||||
|
||||
if input_format:
|
||||
logger.debug(f"Input format info - Format: {input_format}")
|
||||
return {'stream_type': input_format}
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg input format: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg video stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
# Extract codec, resolution, fps, pixel format, bitrate
|
||||
codec_match = re.search(r'Video:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['video_codec'] = codec_match.group(1)
|
||||
|
||||
resolution_match = re.search(r'\b(\d{3,5})x(\d{3,5})\b', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
fps_match = re.search(r'(\d+(?:\.\d+)?)\s*fps', line)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
pixel_format_match = re.search(r'Video:\s*[^,]+,\s*([^,(]+)', line)
|
||||
if pixel_format_match:
|
||||
pf = pixel_format_match.group(1).strip()
|
||||
if '(' in pf:
|
||||
pf = pf.split('(')[0].strip()
|
||||
result['pixel_format'] = pf
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['video_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
logger.info(f"Video stream info - Codec: {result.get('video_codec')}, "
|
||||
f"Resolution: {result.get('resolution')}, "
|
||||
f"Source FPS: {result.get('source_fps')}, "
|
||||
f"Pixel Format: {result.get('pixel_format')}, "
|
||||
f"Video Bitrate: {result.get('video_bitrate')} kb/s")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse FFmpeg audio stream info"""
|
||||
try:
|
||||
result = {}
|
||||
|
||||
codec_match = re.search(r'Audio:\s*([a-zA-Z0-9_]+)', line)
|
||||
if codec_match:
|
||||
result['audio_codec'] = codec_match.group(1)
|
||||
|
||||
sample_rate_match = re.search(r'(\d+)\s*Hz', line)
|
||||
if sample_rate_match:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', line, re.IGNORECASE)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
bitrate_match = re.search(r'(\d+(?:\.\d+)?)\s*kb/s', line)
|
||||
if bitrate_match:
|
||||
result['audio_bitrate'] = float(bitrate_match.group(1))
|
||||
|
||||
if result:
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing FFmpeg audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class VLCLogParser(BaseLogParser):
|
||||
"""Parser for VLC log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'vlc_video': 'parse_video_stream',
|
||||
'vlc_audio': 'parse_audio_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a VLC line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
# VLC TS demux codec detection
|
||||
if 'ts demux debug' in lower and 'type=' in lower:
|
||||
if 'video' in lower:
|
||||
return 'vlc_video'
|
||||
elif 'audio' in lower:
|
||||
return 'vlc_audio'
|
||||
|
||||
# VLC decoder output
|
||||
if 'decoder' in lower and ('channels:' in lower or 'samplerate:' in lower or 'x' in line or 'fps' in lower):
|
||||
if 'audio' in lower or 'channels:' in lower or 'samplerate:' in lower:
|
||||
return 'vlc_audio'
|
||||
else:
|
||||
return 'vlc_video'
|
||||
|
||||
# VLC transcode output for resolution/FPS
|
||||
if 'stream_out_transcode' in lower and ('source fps' in lower or ('source ' in lower and 'x' in line)):
|
||||
return 'vlc_video'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for video"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
video_codec_map = {
|
||||
('avc', 'h.264', 'type=0x1b'): "h264",
|
||||
('hevc', 'h.265', 'type=0x24'): "hevc",
|
||||
('mpeg-2', 'type=0x02'): "mpeg2video",
|
||||
('mpeg-4', 'type=0x10'): "mpeg4"
|
||||
}
|
||||
|
||||
for patterns, codec in video_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['video_codec'] = codec
|
||||
break
|
||||
|
||||
# Extract FPS from transcode output: "source fps 30/1"
|
||||
fps_fraction_match = re.search(r'source fps\s+(\d+)/(\d+)', lower)
|
||||
if fps_fraction_match:
|
||||
numerator = int(fps_fraction_match.group(1))
|
||||
denominator = int(fps_fraction_match.group(2))
|
||||
if denominator > 0:
|
||||
result['source_fps'] = numerator / denominator
|
||||
|
||||
# Extract resolution from transcode output: "source 1280x720"
|
||||
source_res_match = re.search(r'source\s+(\d{3,4})x(\d{3,4})', lower)
|
||||
if source_res_match:
|
||||
width = int(source_res_match.group(1))
|
||||
height = int(source_res_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
else:
|
||||
# Fallback: generic resolution pattern
|
||||
resolution_match = re.search(r'(\d{3,4})x(\d{3,4})', line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
if 100 <= width <= 10000 and 100 <= height <= 10000:
|
||||
result['resolution'] = f"{width}x{height}"
|
||||
result['width'] = width
|
||||
result['height'] = height
|
||||
|
||||
# Fallback: try to extract FPS from generic format
|
||||
if 'source_fps' not in result:
|
||||
fps_match = re.search(r'(\d+\.?\d*)\s*fps', lower)
|
||||
if fps_match:
|
||||
result['source_fps'] = float(fps_match.group(1))
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing VLC video stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse VLC TS demux output and decoder info for audio"""
|
||||
try:
|
||||
lower = line.lower()
|
||||
result = {}
|
||||
|
||||
# Codec detection from TS demux
|
||||
audio_codec_map = {
|
||||
('type=0xf', 'adts'): "aac",
|
||||
('type=0x03', 'type=0x04'): "mp3",
|
||||
('type=0x06', 'type=0x81'): "ac3",
|
||||
('type=0x0b', 'lpcm'): "pcm"
|
||||
}
|
||||
|
||||
for patterns, codec in audio_codec_map.items():
|
||||
if any(p in lower for p in patterns):
|
||||
result['audio_codec'] = codec
|
||||
break
|
||||
|
||||
# VLC decoder format: "AAC channels: 2 samplerate: 48000"
|
||||
if 'channels:' in lower:
|
||||
channels_match = re.search(r'channels:\s*(\d+)', lower)
|
||||
if channels_match:
|
||||
num_channels = int(channels_match.group(1))
|
||||
# Convert number to name
|
||||
channel_names = {1: 'mono', 2: 'stereo', 6: '5.1', 8: '7.1'}
|
||||
result['audio_channels'] = channel_names.get(num_channels, str(num_channels))
|
||||
|
||||
if 'samplerate:' in lower:
|
||||
samplerate_match = re.search(r'samplerate:\s*(\d+)', lower)
|
||||
if samplerate_match:
|
||||
result['sample_rate'] = int(samplerate_match.group(1))
|
||||
|
||||
# Try to extract sample rate (Hz format)
|
||||
sample_rate_match = re.search(r'(\d+)\s*hz', lower)
|
||||
if sample_rate_match and 'sample_rate' not in result:
|
||||
result['sample_rate'] = int(sample_rate_match.group(1))
|
||||
|
||||
# Try to extract channels (word format)
|
||||
if 'audio_channels' not in result:
|
||||
channel_match = re.search(r'\b(mono|stereo|5\.1|7\.1|quad|2\.1)\b', lower)
|
||||
if channel_match:
|
||||
result['audio_channels'] = channel_match.group(1)
|
||||
|
||||
return result if result else None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[VLC AUDIO PARSER] Error parsing VLC audio stream info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class StreamlinkLogParser(BaseLogParser):
|
||||
"""Parser for Streamlink log output"""
|
||||
|
||||
STREAM_TYPE_METHODS = {
|
||||
'streamlink': 'parse_video_stream'
|
||||
}
|
||||
|
||||
def can_parse(self, line: str) -> Optional[str]:
|
||||
"""Check if this is a Streamlink line we can parse"""
|
||||
lower = line.lower()
|
||||
|
||||
if 'opening stream:' in lower or 'available streams:' in lower:
|
||||
return 'streamlink'
|
||||
|
||||
return None
|
||||
|
||||
def parse_input_format(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
def parse_video_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""Parse Streamlink quality/resolution"""
|
||||
try:
|
||||
quality_match = re.search(r'(\d+p|\d+x\d+)', line)
|
||||
if quality_match:
|
||||
quality = quality_match.group(1)
|
||||
|
||||
if 'x' in quality:
|
||||
resolution = quality
|
||||
width, height = map(int, quality.split('x'))
|
||||
else:
|
||||
resolutions = {
|
||||
'2160p': ('3840x2160', 3840, 2160),
|
||||
'1080p': ('1920x1080', 1920, 1080),
|
||||
'720p': ('1280x720', 1280, 720),
|
||||
'480p': ('854x480', 854, 480),
|
||||
'360p': ('640x360', 640, 360)
|
||||
}
|
||||
resolution, width, height = resolutions.get(quality, ('1920x1080', 1920, 1080))
|
||||
|
||||
return {
|
||||
'video_codec': 'h264',
|
||||
'resolution': resolution,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'pixel_format': 'yuv420p'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing Streamlink video info: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def parse_audio_stream(self, line: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
|
||||
class LogParserFactory:
|
||||
"""Factory to get the appropriate log parser"""
|
||||
|
||||
_parsers = {
|
||||
'ffmpeg': FFmpegLogParser(),
|
||||
'vlc': VLCLogParser(),
|
||||
'streamlink': StreamlinkLogParser()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _get_parser_and_method(cls, stream_type: str) -> Optional[tuple[BaseLogParser, str]]:
|
||||
"""Determine parser and method from stream_type"""
|
||||
# Check each parser to see if it handles this stream_type
|
||||
for parser in cls._parsers.values():
|
||||
method_name = parser.STREAM_TYPE_METHODS.get(stream_type)
|
||||
if method_name:
|
||||
return (parser, method_name)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def parse(cls, stream_type: str, line: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Parse a log line based on stream type.
|
||||
Returns parsed data or None if parsing fails.
|
||||
"""
|
||||
result = cls._get_parser_and_method(stream_type)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
parser, method_name = result
|
||||
method = getattr(parser, method_name, None)
|
||||
if method:
|
||||
return method(line)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def auto_parse(cls, line: str) -> Optional[tuple[str, Dict[str, Any]]]:
|
||||
"""
|
||||
Automatically detect which parser can handle this line and parse it.
|
||||
Returns (stream_type, parsed_data) or None if no parser can handle it.
|
||||
"""
|
||||
# Try each parser to see if it can handle this line
|
||||
for parser in cls._parsers.values():
|
||||
stream_type = parser.can_parse(line)
|
||||
if stream_type:
|
||||
# Parser can handle this line, now parse it
|
||||
parsed_data = cls.parse(stream_type, line)
|
||||
if parsed_data:
|
||||
return (stream_type, parsed_data)
|
||||
|
||||
return None
|
||||
|
|
@ -303,6 +303,14 @@ class StreamBuffer:
|
|||
# Retrieve chunks
|
||||
chunks = self.get_chunks_exact(client_index, chunk_count)
|
||||
|
||||
# Check if we got significantly fewer chunks than expected (likely due to expiration)
|
||||
# Only check if we expected multiple chunks and got none or very few
|
||||
if chunk_count > 3 and len(chunks) == 0 and chunks_behind > 10:
|
||||
# Chunks are missing - likely expired from Redis
|
||||
# Return empty list to signal client should skip forward
|
||||
logger.debug(f"Chunks missing for client at index {client_index}, buffer at {self.index} ({chunks_behind} behind)")
|
||||
return [], client_index
|
||||
|
||||
# Check total size
|
||||
total_size = sum(len(c) for c in chunks)
|
||||
|
||||
|
|
@ -316,7 +324,7 @@ class StreamBuffer:
|
|||
additional_size = sum(len(c) for c in more_chunks)
|
||||
if total_size + additional_size <= MAX_SIZE:
|
||||
chunks.extend(more_chunks)
|
||||
chunk_count += additional
|
||||
chunk_count += len(more_chunks) # Fixed: count actual additional chunks retrieved
|
||||
|
||||
return chunks, client_index + chunk_count
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import logging
|
|||
import threading
|
||||
import gevent # Add this import at the top of your file
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel
|
||||
from core.utils import log_system_event
|
||||
from .server import ProxyServer
|
||||
from .utils import create_ts_packet, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -52,6 +54,10 @@ class StreamGenerator:
|
|||
self.last_stats_bytes = 0
|
||||
self.current_rate = 0.0
|
||||
|
||||
# TTL refresh tracking
|
||||
self.last_ttl_refresh = time.time()
|
||||
self.ttl_refresh_interval = 3 # Refresh TTL every 3 seconds of active streaming
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generator function that produces the stream content for the client.
|
||||
|
|
@ -84,6 +90,20 @@ class StreamGenerator:
|
|||
if not self._setup_streaming():
|
||||
return
|
||||
|
||||
# Log client connect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_connect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client connect event: {e}")
|
||||
|
||||
# Main streaming loop
|
||||
for chunk in self._stream_data_generator():
|
||||
yield chunk
|
||||
|
|
@ -204,6 +224,18 @@ class StreamGenerator:
|
|||
self.empty_reads += 1
|
||||
self.consecutive_empty += 1
|
||||
|
||||
# Check if we're too far behind (chunks expired from Redis)
|
||||
chunks_behind = self.buffer.index - self.local_index
|
||||
if chunks_behind > 50: # If more than 50 chunks behind, jump forward
|
||||
# Calculate new position: stay a few chunks behind current buffer
|
||||
initial_behind = ConfigHelper.initial_behind_chunks()
|
||||
new_index = max(self.local_index, self.buffer.index - initial_behind)
|
||||
|
||||
logger.warning(f"[{self.client_id}] Client too far behind ({chunks_behind} chunks), jumping from {self.local_index} to {new_index}")
|
||||
self.local_index = new_index
|
||||
self.consecutive_empty = 0 # Reset since we're repositioning
|
||||
continue # Try again immediately with new position
|
||||
|
||||
if self._should_send_keepalive(self.local_index):
|
||||
keepalive_packet = create_ts_packet('keepalive')
|
||||
logger.debug(f"[{self.client_id}] Sending keepalive packet while waiting at buffer head")
|
||||
|
|
@ -324,7 +356,20 @@ class StreamGenerator:
|
|||
ChannelMetadataField.STATS_UPDATED_AT: str(current_time)
|
||||
}
|
||||
proxy_server.redis_client.hset(client_key, mapping=stats)
|
||||
# No need to set expiration as client heartbeat will refresh this key
|
||||
|
||||
# Refresh TTL periodically while actively streaming
|
||||
# This provides proof-of-life independent of heartbeat thread
|
||||
if current_time - self.last_ttl_refresh > self.ttl_refresh_interval:
|
||||
try:
|
||||
# Refresh TTL on client key
|
||||
proxy_server.redis_client.expire(client_key, Config.CLIENT_RECORD_TTL)
|
||||
# Also refresh the client set TTL
|
||||
client_set_key = f"ts_proxy:channel:{self.channel_id}:clients"
|
||||
proxy_server.redis_client.expire(client_set_key, Config.CLIENT_RECORD_TTL)
|
||||
self.last_ttl_refresh = current_time
|
||||
logger.debug(f"[{self.client_id}] Refreshed client TTL (active streaming)")
|
||||
except Exception as ttl_error:
|
||||
logger.debug(f"[{self.client_id}] Failed to refresh TTL: {ttl_error}")
|
||||
except Exception as e:
|
||||
logger.warning(f"[{self.client_id}] Failed to store stats in Redis: {e}")
|
||||
|
||||
|
|
@ -410,6 +455,22 @@ class StreamGenerator:
|
|||
total_clients = client_manager.get_total_client_count()
|
||||
logger.info(f"[{self.client_id}] Disconnected after {elapsed:.2f}s (local: {local_clients}, total: {total_clients})")
|
||||
|
||||
# Log client disconnect event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'client_disconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
client_ip=self.client_ip,
|
||||
client_id=self.client_id,
|
||||
user_agent=self.client_user_agent[:100] if self.client_user_agent else None,
|
||||
duration=round(elapsed, 2),
|
||||
bytes_sent=self.bytes_sent
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log client disconnect event: {e}")
|
||||
|
||||
# Schedule channel shutdown if no clients left
|
||||
if not stream_released: # Only if we haven't already released the stream
|
||||
self._schedule_channel_shutdown_if_needed(local_clients)
|
||||
|
|
|
|||
|
|
@ -9,11 +9,14 @@ import subprocess
|
|||
import gevent
|
||||
import re
|
||||
from typing import Optional, List
|
||||
from django.db import connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from urllib3.exceptions import ReadTimeoutError
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.utils import log_system_event
|
||||
from .stream_buffer import StreamBuffer
|
||||
from .utils import detect_stream_type, get_logger
|
||||
from .redis_keys import RedisKeys
|
||||
|
|
@ -91,17 +94,23 @@ class StreamManager:
|
|||
self.tried_stream_ids.add(self.current_stream_id)
|
||||
logger.info(f"Loaded stream ID {self.current_stream_id} from Redis for channel {buffer.channel_id}")
|
||||
else:
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}")
|
||||
logger.warning(f"No stream_id found in Redis for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading stream ID from Redis: {e}")
|
||||
else:
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id} - stream switching may not work correctly")
|
||||
logger.warning(f"Unable to get stream ID for channel {channel_id}. "
|
||||
f"Stream switching will rely on URL comparison to avoid selecting the same stream.")
|
||||
|
||||
logger.info(f"Initialized stream manager for channel {buffer.channel_id}")
|
||||
|
||||
# Add this flag for tracking transcoding process status
|
||||
self.transcode_process_active = False
|
||||
|
||||
# Track stream command for efficient log parser routing
|
||||
self.stream_command = None
|
||||
self.parser_type = None # Will be set when transcode process starts
|
||||
|
||||
# Add tracking for data throughput
|
||||
self.bytes_processed = 0
|
||||
self.last_bytes_update = time.time()
|
||||
|
|
@ -111,6 +120,9 @@ class StreamManager:
|
|||
self.stderr_reader_thread = None
|
||||
self.ffmpeg_input_phase = True # Track if we're still reading input info
|
||||
|
||||
# Add HTTP reader thread property
|
||||
self.http_reader = None
|
||||
|
||||
def _create_session(self):
|
||||
"""Create and configure requests session with optimal settings"""
|
||||
session = requests.Session()
|
||||
|
|
@ -220,11 +232,12 @@ class StreamManager:
|
|||
# Continue with normal flow
|
||||
|
||||
# Check stream type before connecting
|
||||
stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and stream_type == StreamType.HLS:
|
||||
logger.info(f"Detected HLS stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"HLS streams will be handled with FFmpeg for now - future version will support HLS natively for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS streams
|
||||
self.stream_type = detect_stream_type(self.url)
|
||||
if self.transcode == False and self.stream_type in (StreamType.HLS, StreamType.RTSP, StreamType.UDP):
|
||||
stream_type_name = "HLS" if self.stream_type == StreamType.HLS else ("RTSP/RTP" if self.stream_type == StreamType.RTSP else "UDP")
|
||||
logger.info(f"Detected {stream_type_name} stream: {self.url} for channel {self.channel_id}")
|
||||
logger.info(f"{stream_type_name} streams require FFmpeg for channel {self.channel_id}")
|
||||
# Enable transcoding for HLS, RTSP/RTP, and UDP streams
|
||||
self.transcode = True
|
||||
# We'll override the stream profile selection with ffmpeg in the transcoding section
|
||||
self.force_ffmpeg = True
|
||||
|
|
@ -252,6 +265,20 @@ class StreamManager:
|
|||
# Store connection start time to measure success duration
|
||||
connection_start_time = time.time()
|
||||
|
||||
# Log reconnection event if this is a retry (not first attempt)
|
||||
if self.retry_count > 0:
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
attempt=self.retry_count + 1,
|
||||
max_attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
# Successfully connected - read stream data until disconnect/error
|
||||
self._process_stream_data()
|
||||
# If we get here, the connection was closed/failed
|
||||
|
|
@ -281,6 +308,20 @@ class StreamManager:
|
|||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
logger.warning(f"Maximum retry attempts ({self.max_retries}) reached for URL: {self.url} for channel: {self.channel_id}")
|
||||
|
||||
# Log connection error event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_failed',
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log connection error event: {e}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -294,6 +335,21 @@ class StreamManager:
|
|||
|
||||
if self.retry_count >= self.max_retries:
|
||||
url_failed = True
|
||||
|
||||
# Log connection error event with exception details
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_error',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
error_type='connection_exception',
|
||||
error_message=str(e)[:200],
|
||||
url=self.url[:100] if self.url else None,
|
||||
attempts=self.max_retries
|
||||
)
|
||||
except Exception as log_error:
|
||||
logger.error(f"Could not log connection error event: {log_error}")
|
||||
else:
|
||||
# Wait with exponential backoff before retrying
|
||||
timeout = min(.25 * self.retry_count, 3) # Cap at 3 seconds
|
||||
|
|
@ -378,6 +434,12 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Failed to update channel state in Redis: {e} for channel {self.channel_id}", exc_info=True)
|
||||
|
||||
# Close database connection for this thread
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(f"Stream manager stopped for channel {self.channel_id}")
|
||||
|
||||
def _establish_transcode_connection(self):
|
||||
|
|
@ -407,7 +469,7 @@ class StreamManager:
|
|||
from core.models import StreamProfile
|
||||
try:
|
||||
stream_profile = StreamProfile.objects.get(name='ffmpeg', locked=True)
|
||||
logger.info("Using FFmpeg stream profile for HLS content")
|
||||
logger.info("Using FFmpeg stream profile for unsupported proxy content (HLS/RTSP/UDP)")
|
||||
except StreamProfile.DoesNotExist:
|
||||
# Fall back to channel's profile if FFmpeg not found
|
||||
stream_profile = channel.get_stream_profile()
|
||||
|
|
@ -417,6 +479,28 @@ class StreamManager:
|
|||
|
||||
# Build and start transcode command
|
||||
self.transcode_cmd = stream_profile.build_command(self.url, self.user_agent)
|
||||
|
||||
# Store stream command for efficient log parser routing
|
||||
self.stream_command = stream_profile.command
|
||||
# Map actual commands to parser types for direct routing
|
||||
command_to_parser = {
|
||||
'ffmpeg': 'ffmpeg',
|
||||
'cvlc': 'vlc',
|
||||
'vlc': 'vlc',
|
||||
'streamlink': 'streamlink'
|
||||
}
|
||||
self.parser_type = command_to_parser.get(self.stream_command.lower())
|
||||
if self.parser_type:
|
||||
logger.debug(f"Using {self.parser_type} parser for log parsing (command: {self.stream_command})")
|
||||
else:
|
||||
logger.debug(f"Unknown stream command '{self.stream_command}', will use auto-detection for log parsing")
|
||||
|
||||
# For UDP streams, remove any user_agent parameters from the command
|
||||
if hasattr(self, 'stream_type') and self.stream_type == StreamType.UDP:
|
||||
# Filter out any arguments that contain the user_agent value or related headers
|
||||
self.transcode_cmd = [arg for arg in self.transcode_cmd if self.user_agent not in arg and 'user-agent' not in arg.lower() and 'user_agent' not in arg.lower()]
|
||||
logger.debug(f"Removed user_agent parameters from UDP stream command for channel: {self.channel_id}")
|
||||
|
||||
logger.debug(f"Starting transcode process: {self.transcode_cmd} for channel: {self.channel_id}")
|
||||
|
||||
# Modified to capture stderr instead of discarding it
|
||||
|
|
@ -580,35 +664,51 @@ class StreamManager:
|
|||
if content_lower.startswith('output #') or 'encoder' in content_lower:
|
||||
self.ffmpeg_input_phase = False
|
||||
|
||||
# Only parse stream info if we're still in the input phase
|
||||
if ("stream #" in content_lower and
|
||||
("video:" in content_lower or "audio:" in content_lower) and
|
||||
self.ffmpeg_input_phase):
|
||||
# Route to appropriate parser based on known command type
|
||||
from .services.log_parsers import LogParserFactory
|
||||
from .services.channel_service import ChannelService
|
||||
|
||||
from .services.channel_service import ChannelService
|
||||
if "video:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "video")
|
||||
elif "audio:" in content_lower:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "audio")
|
||||
parse_result = None
|
||||
|
||||
# If we know the parser type, use direct routing for efficiency
|
||||
if self.parser_type:
|
||||
# Get the appropriate parser and check what it can parse
|
||||
parser = LogParserFactory._parsers.get(self.parser_type)
|
||||
if parser:
|
||||
stream_type = parser.can_parse(content)
|
||||
if stream_type:
|
||||
# Parser can handle this line, parse it directly
|
||||
parsed_data = LogParserFactory.parse(stream_type, content)
|
||||
if parsed_data:
|
||||
parse_result = (stream_type, parsed_data)
|
||||
else:
|
||||
# Unknown command type - use auto-detection as fallback
|
||||
parse_result = LogParserFactory.auto_parse(content)
|
||||
|
||||
if parse_result:
|
||||
stream_type, parsed_data = parse_result
|
||||
# For FFmpeg, only parse during input phase
|
||||
if stream_type in ['video', 'audio', 'input']:
|
||||
if self.ffmpeg_input_phase:
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
else:
|
||||
# VLC and Streamlink can be parsed anytime
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, stream_type, self.current_stream_id)
|
||||
|
||||
# Determine log level based on content
|
||||
if any(keyword in content_lower for keyword in ['error', 'failed', 'cannot', 'invalid', 'corrupt']):
|
||||
logger.error(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.error(f"Stream process error for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['warning', 'deprecated', 'ignoring']):
|
||||
logger.warning(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.warning(f"Stream process warning for channel {self.channel_id}: {content}")
|
||||
elif content.startswith('frame=') or 'fps=' in content or 'speed=' in content:
|
||||
# Stats lines - log at trace level to avoid spam
|
||||
logger.trace(f"FFmpeg stats for channel {self.channel_id}: {content}")
|
||||
logger.trace(f"Stream stats for channel {self.channel_id}: {content}")
|
||||
elif any(keyword in content_lower for keyword in ['input', 'output', 'stream', 'video', 'audio']):
|
||||
# Stream info - log at info level
|
||||
logger.info(f"FFmpeg info for channel {self.channel_id}: {content}")
|
||||
if content.startswith('Input #0'):
|
||||
# If it's input 0, parse stream info
|
||||
from .services.channel_service import ChannelService
|
||||
ChannelService.parse_and_store_stream_info(self.channel_id, content, "input")
|
||||
logger.info(f"Stream info for channel {self.channel_id}: {content}")
|
||||
else:
|
||||
# Everything else at debug level
|
||||
logger.debug(f"FFmpeg stderr for channel {self.channel_id}: {content}")
|
||||
logger.debug(f"Stream process output for channel {self.channel_id}: {content}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error logging stderr content for channel {self.channel_id}: {e}")
|
||||
|
|
@ -649,6 +749,14 @@ class StreamManager:
|
|||
if any(x is not None for x in [ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate]):
|
||||
self._update_ffmpeg_stats_in_redis(ffmpeg_speed, ffmpeg_fps, actual_fps, ffmpeg_output_bitrate)
|
||||
|
||||
# Also save ffmpeg_output_bitrate to database if we have stream_id
|
||||
if ffmpeg_output_bitrate is not None and self.current_stream_id:
|
||||
from .services.channel_service import ChannelService
|
||||
ChannelService._update_stream_stats_in_db(
|
||||
self.current_stream_id,
|
||||
ffmpeg_output_bitrate=ffmpeg_output_bitrate
|
||||
)
|
||||
|
||||
# Fix the f-string formatting
|
||||
actual_fps_str = f"{actual_fps:.1f}" if actual_fps is not None else "N/A"
|
||||
ffmpeg_output_bitrate_str = f"{ffmpeg_output_bitrate:.1f}" if ffmpeg_output_bitrate is not None else "N/A"
|
||||
|
|
@ -673,6 +781,19 @@ class StreamManager:
|
|||
# Reset buffering state
|
||||
self.buffering = False
|
||||
self.buffering_start_time = None
|
||||
|
||||
# Log failover event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_failover',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='buffering_timeout',
|
||||
duration=buffering_duration
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log failover event: {e}")
|
||||
else:
|
||||
logger.error(f"Failed to switch to next stream for channel {self.channel_id} after buffering timeout")
|
||||
else:
|
||||
|
|
@ -680,6 +801,19 @@ class StreamManager:
|
|||
self.buffering = True
|
||||
self.buffering_start_time = time.time()
|
||||
logger.warning(f"Buffering started for channel {self.channel_id} - speed: {ffmpeg_speed}x")
|
||||
|
||||
# Log system event for buffering
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_buffering',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
speed=ffmpeg_speed
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log buffering event: {e}")
|
||||
|
||||
# Log buffering warning
|
||||
logger.debug(f"FFmpeg speed on channel {self.channel_id} is below {self.buffering_speed} ({ffmpeg_speed}x) - buffering detected")
|
||||
# Set channel state to buffering
|
||||
|
|
@ -729,9 +863,9 @@ class StreamManager:
|
|||
|
||||
|
||||
def _establish_http_connection(self):
|
||||
"""Establish a direct HTTP connection to the stream"""
|
||||
"""Establish HTTP connection using thread-based reader (same as transcode path)"""
|
||||
try:
|
||||
logger.debug(f"Using TS Proxy to connect to stream: {self.url}")
|
||||
logger.debug(f"Using HTTP streamer thread to connect to stream: {self.url}")
|
||||
|
||||
# Check if we already have active HTTP connections
|
||||
if self.current_response or self.current_session:
|
||||
|
|
@ -748,41 +882,39 @@ class StreamManager:
|
|||
logger.debug(f"Closing existing transcode process before establishing HTTP connection for channel {self.channel_id}")
|
||||
self._close_socket()
|
||||
|
||||
# Create new session for each connection attempt
|
||||
session = self._create_session()
|
||||
self.current_session = session
|
||||
# Use HTTPStreamReader to fetch stream and pipe to a readable file descriptor
|
||||
# This allows us to use the same fetch_chunk() path as transcode
|
||||
from .http_streamer import HTTPStreamReader
|
||||
|
||||
# Stream the URL with proper timeout handling
|
||||
response = session.get(
|
||||
self.url,
|
||||
stream=True,
|
||||
timeout=(10, 60) # 10s connect timeout, 60s read timeout
|
||||
# Create and start the HTTP stream reader
|
||||
self.http_reader = HTTPStreamReader(
|
||||
url=self.url,
|
||||
user_agent=self.user_agent,
|
||||
chunk_size=self.chunk_size
|
||||
)
|
||||
self.current_response = response
|
||||
|
||||
if response.status_code == 200:
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
logger.info(f"Successfully connected to stream source for channel {self.channel_id}")
|
||||
# Start the reader thread and get the read end of the pipe
|
||||
pipe_fd = self.http_reader.start()
|
||||
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
# Wrap the file descriptor in a file object (same as transcode stdout)
|
||||
import os
|
||||
self.socket = os.fdopen(pipe_fd, 'rb', buffering=0)
|
||||
self.connected = True
|
||||
self.healthy = True
|
||||
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
logger.info(f"Successfully started HTTP streamer thread for channel {self.channel_id}")
|
||||
|
||||
# Store connection start time for stability tracking
|
||||
self.connection_start_time = time.time()
|
||||
|
||||
# Set channel state to waiting for clients
|
||||
self._set_waiting_for_clients()
|
||||
|
||||
return True
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to connect to stream for channel {self.channel_id}: HTTP {response.status_code}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"HTTP request error: {e}")
|
||||
self._close_connection()
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error establishing HTTP connection for channel {self.channel_id}: {e}", exc_info=True)
|
||||
self._close_connection()
|
||||
self._close_socket()
|
||||
return False
|
||||
|
||||
def _update_bytes_processed(self, chunk_size):
|
||||
|
|
@ -810,48 +942,19 @@ class StreamManager:
|
|||
logger.error(f"Error updating bytes processed: {e}")
|
||||
|
||||
def _process_stream_data(self):
|
||||
"""Process stream data until disconnect or error"""
|
||||
"""Process stream data until disconnect or error - unified path for both transcode and HTTP"""
|
||||
try:
|
||||
if self.transcode:
|
||||
# Handle transcoded stream data
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
if not self.running:
|
||||
break
|
||||
gevent.sleep(0.1) # REPLACE time.sleep(0.1)
|
||||
else:
|
||||
# Handle direct HTTP connection
|
||||
chunk_count = 0
|
||||
try:
|
||||
for chunk in self.current_response.iter_content(chunk_size=self.chunk_size):
|
||||
# Check if we've been asked to stop
|
||||
if self.stop_requested or self.url_switching or self.needs_stream_switch:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
# Track chunk size before adding to buffer
|
||||
chunk_size = len(chunk)
|
||||
self._update_bytes_processed(chunk_size)
|
||||
|
||||
# Add chunk to buffer with TS packet alignment
|
||||
success = self.buffer.add_chunk(chunk)
|
||||
|
||||
if success:
|
||||
self.last_data_time = time.time()
|
||||
chunk_count += 1
|
||||
|
||||
# Update last data timestamp in Redis
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
last_data_key = RedisKeys.last_data(self.buffer.channel_id)
|
||||
self.buffer.redis_client.set(last_data_key, str(time.time()), ex=60)
|
||||
except (AttributeError, ConnectionError) as e:
|
||||
if self.stop_requested or self.url_switching:
|
||||
logger.debug(f"Expected connection error during shutdown/URL switch for channel {self.channel_id}: {e}")
|
||||
else:
|
||||
logger.error(f"Unexpected stream error for channel {self.channel_id}: {e}")
|
||||
raise
|
||||
# Both transcode and HTTP now use the same subprocess/socket approach
|
||||
# This gives us perfect control: check flags between chunks, timeout just returns False
|
||||
while self.running and self.connected and not self.stop_requested and not self.needs_stream_switch:
|
||||
if self.fetch_chunk():
|
||||
self.last_data_time = time.time()
|
||||
else:
|
||||
# fetch_chunk() returned False - could be timeout, no data, or error
|
||||
if not self.running:
|
||||
break
|
||||
# Brief sleep before retry to avoid tight loop
|
||||
gevent.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing stream data for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
||||
|
|
@ -940,6 +1043,7 @@ class StreamManager:
|
|||
|
||||
# Import both models for proper resource management
|
||||
from apps.channels.models import Stream, Channel
|
||||
from django.db import connection
|
||||
|
||||
# Update stream profile if we're switching streams
|
||||
if self.current_stream_id and stream_id and self.current_stream_id != stream_id:
|
||||
|
|
@ -957,9 +1061,17 @@ class StreamManager:
|
|||
logger.debug(f"Updated m3u profile for channel {self.channel_id} to use profile from stream {stream_id}")
|
||||
else:
|
||||
logger.warning(f"Failed to update stream profile for channel {self.channel_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating stream profile for channel {self.channel_id}: {e}")
|
||||
|
||||
finally:
|
||||
# Always close database connection after profile update
|
||||
try:
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# CRITICAL: Set a flag to prevent immediate reconnection with old URL
|
||||
self.url_switching = True
|
||||
self.url_switch_start_time = time.time()
|
||||
|
|
@ -997,6 +1109,19 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.warning(f"Failed to reset buffer position: {e}")
|
||||
|
||||
# Log stream switch event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'stream_switch',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
new_url=new_url[:100] if new_url else None,
|
||||
stream_id=stream_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log stream switch event: {e}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during URL update for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
@ -1115,6 +1240,19 @@ class StreamManager:
|
|||
if connection_result:
|
||||
self.connection_start_time = time.time()
|
||||
logger.info(f"Reconnect successful for channel {self.channel_id}")
|
||||
|
||||
# Log reconnection event
|
||||
try:
|
||||
channel_obj = Channel.objects.get(uuid=self.channel_id)
|
||||
log_system_event(
|
||||
'channel_reconnect',
|
||||
channel_id=self.channel_id,
|
||||
channel_name=channel_obj.name,
|
||||
reason='health_monitor'
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Could not log reconnection event: {e}")
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Reconnect failed for channel {self.channel_id}")
|
||||
|
|
@ -1175,6 +1313,15 @@ class StreamManager:
|
|||
if self.current_response or self.current_session:
|
||||
self._close_connection()
|
||||
|
||||
# Stop HTTP reader thread if it exists
|
||||
if hasattr(self, 'http_reader') and self.http_reader:
|
||||
try:
|
||||
logger.debug(f"Stopping HTTP reader thread for channel {self.channel_id}")
|
||||
self.http_reader.stop()
|
||||
self.http_reader = None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error stopping HTTP reader for channel {self.channel_id}: {e}")
|
||||
|
||||
# Otherwise handle socket and transcode resources
|
||||
if self.socket:
|
||||
try:
|
||||
|
|
@ -1183,25 +1330,17 @@ class StreamManager:
|
|||
logger.debug(f"Error closing socket for channel {self.channel_id}: {e}")
|
||||
pass
|
||||
|
||||
# Enhanced transcode process cleanup with more aggressive termination
|
||||
# Enhanced transcode process cleanup with immediate termination
|
||||
if self.transcode_process:
|
||||
try:
|
||||
# First try polite termination
|
||||
logger.debug(f"Terminating transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.terminate()
|
||||
logger.debug(f"Killing transcode process for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
# Give it a short time to terminate gracefully
|
||||
# Give it a very short time to die
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
self.transcode_process.wait(timeout=0.5)
|
||||
except subprocess.TimeoutExpired:
|
||||
# If it doesn't terminate quickly, kill it
|
||||
logger.warning(f"Transcode process didn't terminate within timeout, killing forcefully for channel {self.channel_id}")
|
||||
self.transcode_process.kill()
|
||||
|
||||
try:
|
||||
self.transcode_process.wait(timeout=1.0)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
logger.error(f"Failed to kill transcode process even with force for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error terminating transcode process for channel {self.channel_id}: {e}")
|
||||
|
||||
|
|
@ -1211,6 +1350,30 @@ class StreamManager:
|
|||
except Exception as e:
|
||||
logger.error(f"Final kill attempt failed for channel {self.channel_id}: {e}")
|
||||
|
||||
# Explicitly close all subprocess pipes to prevent file descriptor leaks
|
||||
try:
|
||||
if self.transcode_process.stdin:
|
||||
self.transcode_process.stdin.close()
|
||||
if self.transcode_process.stdout:
|
||||
self.transcode_process.stdout.close()
|
||||
if self.transcode_process.stderr:
|
||||
self.transcode_process.stderr.close()
|
||||
logger.debug(f"Closed all subprocess pipes for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing subprocess pipes for channel {self.channel_id}: {e}")
|
||||
|
||||
# Join stderr reader thread to ensure it's fully terminated
|
||||
if hasattr(self, 'stderr_reader_thread') and self.stderr_reader_thread and self.stderr_reader_thread.is_alive():
|
||||
try:
|
||||
logger.debug(f"Waiting for stderr reader thread to terminate for channel {self.channel_id}")
|
||||
self.stderr_reader_thread.join(timeout=2.0)
|
||||
if self.stderr_reader_thread.is_alive():
|
||||
logger.warning(f"Stderr reader thread did not terminate within timeout for channel {self.channel_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error joining stderr reader thread for channel {self.channel_id}: {e}")
|
||||
finally:
|
||||
self.stderr_reader_thread = None
|
||||
|
||||
self.transcode_process = None
|
||||
self.transcode_process_active = False # Reset the flag
|
||||
|
||||
|
|
@ -1242,7 +1405,7 @@ class StreamManager:
|
|||
|
||||
try:
|
||||
# Set timeout for chunk reads
|
||||
chunk_timeout = ConfigHelper.get('CHUNK_TIMEOUT', 10) # Default 10 seconds
|
||||
chunk_timeout = ConfigHelper.chunk_timeout() # Use centralized timeout configuration
|
||||
|
||||
try:
|
||||
# Handle different socket types with timeout
|
||||
|
|
@ -1325,7 +1488,17 @@ class StreamManager:
|
|||
# Only update if not already past connecting
|
||||
if not current_state or current_state in [ChannelState.INITIALIZING, ChannelState.CONNECTING]:
|
||||
# NEW CODE: Check if buffer has enough chunks
|
||||
current_buffer_index = getattr(self.buffer, 'index', 0)
|
||||
# IMPORTANT: Read from Redis, not local buffer.index, because in multi-worker setup
|
||||
# each worker has its own StreamBuffer instance with potentially stale local index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks()
|
||||
|
||||
if current_buffer_index < initial_chunks_needed:
|
||||
|
|
@ -1373,10 +1546,21 @@ class StreamManager:
|
|||
# Clean up completed timers
|
||||
self._buffer_check_timers = [t for t in self._buffer_check_timers if t.is_alive()]
|
||||
|
||||
if hasattr(self.buffer, 'index') and hasattr(self.buffer, 'channel_id'):
|
||||
current_buffer_index = self.buffer.index
|
||||
initial_chunks_needed = getattr(Config, 'INITIAL_BEHIND_CHUNKS', 10)
|
||||
if hasattr(self.buffer, 'channel_id') and hasattr(self.buffer, 'redis_client'):
|
||||
channel_id = self.buffer.channel_id
|
||||
redis_client = self.buffer.redis_client
|
||||
|
||||
# IMPORTANT: Read from Redis, not local buffer.index
|
||||
buffer_index_key = RedisKeys.buffer_index(channel_id)
|
||||
current_buffer_index = 0
|
||||
try:
|
||||
redis_index = redis_client.get(buffer_index_key)
|
||||
if redis_index:
|
||||
current_buffer_index = int(redis_index)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading buffer index from Redis: {e}")
|
||||
|
||||
initial_chunks_needed = ConfigHelper.initial_behind_chunks() # Use ConfigHelper for consistency
|
||||
|
||||
if current_buffer_index >= initial_chunks_needed:
|
||||
# We now have enough buffer, call _set_waiting_for_clients again
|
||||
|
|
@ -1401,6 +1585,7 @@ class StreamManager:
|
|||
def _try_next_stream(self):
|
||||
"""
|
||||
Try to switch to the next available stream for this channel.
|
||||
Will iterate through multiple alternate streams if needed to find one with a different URL.
|
||||
|
||||
Returns:
|
||||
bool: True if successfully switched to a new stream, False otherwise
|
||||
|
|
@ -1426,60 +1611,71 @@ class StreamManager:
|
|||
logger.warning(f"All {len(alternate_streams)} alternate streams have been tried for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
# Get the next stream to try
|
||||
next_stream = untried_streams[0]
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
# IMPROVED: Try multiple streams until we find one with a different URL
|
||||
for next_stream in untried_streams:
|
||||
stream_id = next_stream['stream_id']
|
||||
profile_id = next_stream['profile_id'] # This is the M3U profile ID we need
|
||||
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
# Add to tried streams
|
||||
self.tried_stream_ids.add(stream_id)
|
||||
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
# Get stream info including URL using the profile_id we already have
|
||||
logger.info(f"Trying next stream ID {stream_id} with profile ID {profile_id} for channel {self.channel_id}")
|
||||
stream_info = get_stream_info_for_switch(self.channel_id, stream_id)
|
||||
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
return False
|
||||
if 'error' in stream_info or not stream_info.get('url'):
|
||||
logger.error(f"Error getting info for stream {stream_id} for channel {self.channel_id}: {stream_info.get('error', 'No URL')}")
|
||||
continue # Try next stream instead of giving up
|
||||
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
# Update URL and user agent
|
||||
new_url = stream_info['url']
|
||||
new_user_agent = stream_info['user_agent']
|
||||
new_transcode = stream_info['transcode']
|
||||
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
# CRITICAL FIX: Check if the new URL is the same as current URL
|
||||
# This can happen when current_stream_id is None and we accidentally select the same stream
|
||||
if new_url == self.url:
|
||||
logger.warning(f"Stream ID {stream_id} generates the same URL as current stream ({new_url}). "
|
||||
f"Skipping this stream and trying next alternative.")
|
||||
continue # Try next stream instead of giving up
|
||||
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
return False
|
||||
logger.info(f"Switching from URL {self.url} to {new_url} for channel {self.channel_id}")
|
||||
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
# IMPORTANT: Just update the URL, don't stop the channel or release resources
|
||||
switch_result = self.update_url(new_url, stream_id, profile_id)
|
||||
if not switch_result:
|
||||
logger.error(f"Failed to update URL for stream ID {stream_id} for channel {self.channel_id}")
|
||||
continue # Try next stream
|
||||
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
# Update stream ID tracking
|
||||
self.current_stream_id = stream_id
|
||||
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
# Store the new user agent and transcode settings
|
||||
self.user_agent = new_user_agent
|
||||
self.transcode = new_transcode
|
||||
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
# Update stream metadata in Redis - use the profile_id we got from get_alternate_streams
|
||||
if hasattr(self.buffer, 'redis_client') and self.buffer.redis_client:
|
||||
metadata_key = RedisKeys.channel_metadata(self.channel_id)
|
||||
self.buffer.redis_client.hset(metadata_key, mapping={
|
||||
ChannelMetadataField.URL: new_url,
|
||||
ChannelMetadataField.USER_AGENT: new_user_agent,
|
||||
ChannelMetadataField.STREAM_PROFILE: stream_info['stream_profile'],
|
||||
ChannelMetadataField.M3U_PROFILE: str(profile_id), # Use the profile_id from get_alternate_streams
|
||||
ChannelMetadataField.STREAM_ID: str(stream_id),
|
||||
ChannelMetadataField.STREAM_SWITCH_TIME: str(time.time()),
|
||||
ChannelMetadataField.STREAM_SWITCH_REASON: "max_retries_exceeded"
|
||||
})
|
||||
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
# Log the switch
|
||||
logger.info(f"Stream metadata updated for channel {self.channel_id} to stream ID {stream_id} with M3U profile {profile_id}")
|
||||
|
||||
logger.info(f"Successfully switched to stream ID {stream_id} with URL {new_url} for channel {self.channel_id}")
|
||||
return True
|
||||
|
||||
# If we get here, we tried all streams but none worked
|
||||
logger.error(f"Tried {len(untried_streams)} alternate streams but none were suitable for channel {self.channel_id}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error trying next stream for channel {self.channel_id}: {e}", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from typing import Optional, Tuple, List
|
|||
from django.shortcuts import get_object_or_404
|
||||
from apps.channels.models import Channel, Stream
|
||||
from apps.m3u.models import M3UAccount, M3UAccountProfile
|
||||
from core.models import UserAgent, CoreSettings
|
||||
from core.models import UserAgent, CoreSettings, StreamProfile
|
||||
from .utils import get_logger
|
||||
from uuid import UUID
|
||||
import requests
|
||||
|
|
@ -26,16 +26,100 @@ def get_stream_object(id: str):
|
|||
|
||||
def generate_stream_url(channel_id: str) -> Tuple[str, str, bool, Optional[int]]:
|
||||
"""
|
||||
Generate the appropriate stream URL for a channel based on its profile settings.
|
||||
Generate the appropriate stream URL for a channel or stream based on its profile settings.
|
||||
|
||||
Args:
|
||||
channel_id: The UUID of the channel
|
||||
channel_id: The UUID of the channel or stream hash
|
||||
|
||||
Returns:
|
||||
Tuple[str, str, bool, Optional[int]]: (stream_url, user_agent, transcode_flag, profile_id)
|
||||
"""
|
||||
try:
|
||||
channel = get_stream_object(channel_id)
|
||||
channel_or_stream = get_stream_object(channel_id)
|
||||
|
||||
# Handle direct stream preview (custom streams)
|
||||
if isinstance(channel_or_stream, Stream):
|
||||
from core.utils import RedisClient
|
||||
|
||||
stream = channel_or_stream
|
||||
logger.info(f"Previewing stream directly: {stream.id} ({stream.name})")
|
||||
|
||||
# For custom streams, we need to get the M3U account and profile
|
||||
m3u_account = stream.m3u_account
|
||||
if not m3u_account:
|
||||
logger.error(f"Stream {stream.id} has no M3U account")
|
||||
return None, None, False, None
|
||||
|
||||
# Get active profiles for this M3U account
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
logger.error(f"No default active profile found for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Check profiles in order: default first, then others
|
||||
profiles = [default_profile] + [obj for obj in m3u_profiles if not obj.is_default]
|
||||
|
||||
# Try to find an available profile with connection capacity
|
||||
redis_client = RedisClient.get_client()
|
||||
selected_profile = None
|
||||
|
||||
for profile in profiles:
|
||||
logger.info(profile)
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
current_connections = int(redis_client.get(profile_connections_key) or 0)
|
||||
|
||||
# Check if profile has available slots (or unlimited connections)
|
||||
if profile.max_streams == 0 or current_connections < profile.max_streams:
|
||||
selected_profile = profile
|
||||
logger.debug(f"Selected profile {profile.id} with {current_connections}/{profile.max_streams} connections for stream preview")
|
||||
break
|
||||
else:
|
||||
logger.debug(f"Profile {profile.id} at max connections: {current_connections}/{profile.max_streams}")
|
||||
else:
|
||||
# No Redis available, use first active profile
|
||||
selected_profile = profile
|
||||
break
|
||||
|
||||
if not selected_profile:
|
||||
logger.error(f"No profiles available with connection capacity for M3U account {m3u_account.id}")
|
||||
return None, None, False, None
|
||||
|
||||
# Get the appropriate user agent
|
||||
stream_user_agent = m3u_account.get_user_agent().user_agent
|
||||
if stream_user_agent is None:
|
||||
stream_user_agent = UserAgent.objects.get(id=CoreSettings.get_default_user_agent_id())
|
||||
logger.debug(f"No user agent found for account, using default: {stream_user_agent}")
|
||||
|
||||
# Get stream URL with the selected profile's URL transformation
|
||||
stream_url = transform_url(stream.url, selected_profile.search_pattern, selected_profile.replace_pattern)
|
||||
|
||||
# Check if the stream has its own stream_profile set, otherwise use default
|
||||
if stream.stream_profile:
|
||||
stream_profile = stream.stream_profile
|
||||
logger.debug(f"Using stream's own stream profile: {stream_profile.name}")
|
||||
else:
|
||||
stream_profile = StreamProfile.objects.get(
|
||||
id=CoreSettings.get_default_stream_profile_id()
|
||||
)
|
||||
logger.debug(f"Using default stream profile: {stream_profile.name}")
|
||||
|
||||
# Check if transcoding is needed
|
||||
if stream_profile.is_proxy() or stream_profile is None:
|
||||
transcode = False
|
||||
else:
|
||||
transcode = True
|
||||
|
||||
stream_profile_id = stream_profile.id
|
||||
|
||||
return stream_url, stream_user_agent, transcode, stream_profile_id
|
||||
|
||||
# Handle channel preview (existing logic)
|
||||
channel = channel_or_stream
|
||||
|
||||
# Get stream and profile for this channel
|
||||
# Note: get_stream now returns 3 values (stream_id, profile_id, error_reason)
|
||||
|
|
@ -142,7 +226,7 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int]
|
|||
if not m3u_account:
|
||||
return {'error': 'Stream has no M3U account'}
|
||||
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
|
|
@ -153,10 +237,6 @@ def get_stream_info_for_switch(channel_id: str, target_stream_id: Optional[int]
|
|||
|
||||
selected_profile = None
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
|
|
@ -281,8 +361,10 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No
|
|||
if not m3u_account:
|
||||
logger.debug(f"Stream {stream.id} has no M3U account")
|
||||
continue
|
||||
|
||||
m3u_profiles = m3u_account.profiles.all()
|
||||
if m3u_account.is_active == False:
|
||||
logger.debug(f"M3U account {m3u_account.id} is inactive, skipping.")
|
||||
continue
|
||||
m3u_profiles = m3u_account.profiles.filter(is_active=True)
|
||||
default_profile = next((obj for obj in m3u_profiles if obj.is_default), None)
|
||||
|
||||
if not default_profile:
|
||||
|
|
@ -294,11 +376,6 @@ def get_alternate_streams(channel_id: str, current_stream_id: Optional[int] = No
|
|||
|
||||
selected_profile = None
|
||||
for profile in profiles:
|
||||
# Skip inactive profiles
|
||||
if not profile.is_active:
|
||||
logger.debug(f"Skipping inactive profile {profile.id}")
|
||||
continue
|
||||
|
||||
# Check connection availability
|
||||
if redis_client:
|
||||
profile_connections_key = f"profile_connections:{profile.id}"
|
||||
|
|
@ -358,6 +435,9 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
"""
|
||||
Validate if a stream URL is accessible without downloading the full content.
|
||||
|
||||
Note: UDP/RTP/RTSP streams are automatically considered valid as they cannot
|
||||
be validated via HTTP methods.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate
|
||||
user_agent (str): User agent to use for the request
|
||||
|
|
@ -366,6 +446,12 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
Returns:
|
||||
tuple: (is_valid, final_url, status_code, message)
|
||||
"""
|
||||
# Check if URL uses non-HTTP protocols (UDP/RTP/RTSP)
|
||||
# These cannot be validated via HTTP methods, so we skip validation
|
||||
if url.startswith(('udp://', 'rtp://', 'rtsp://')):
|
||||
logger.info(f"Skipping HTTP validation for non-HTTP protocol: {url}")
|
||||
return True, url, 200, "Non-HTTP protocol (UDP/RTP/RTSP) - validation skipped"
|
||||
|
||||
try:
|
||||
# Create session with proper headers
|
||||
session = requests.Session()
|
||||
|
|
@ -376,16 +462,21 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
session.headers.update(headers)
|
||||
|
||||
# Make HEAD request first as it's faster and doesn't download content
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
head_request_success = True
|
||||
try:
|
||||
head_response = session.head(
|
||||
url,
|
||||
timeout=timeout,
|
||||
allow_redirects=True
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
head_request_success = False
|
||||
logger.warning(f"Request error (HEAD), assuming HEAD not supported: {str(e)}")
|
||||
|
||||
# If HEAD not supported, server will return 405 or other error
|
||||
if 200 <= head_response.status_code < 300:
|
||||
if head_request_success and (200 <= head_response.status_code < 300):
|
||||
# HEAD request successful
|
||||
return True, head_response.url, head_response.status_code, "Valid (HEAD request)"
|
||||
return True, url, head_response.status_code, "Valid (HEAD request)"
|
||||
|
||||
# Try a GET request with stream=True to avoid downloading all content
|
||||
get_response = session.get(
|
||||
|
|
@ -398,7 +489,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
# IMPORTANT: Check status code first before checking content
|
||||
if not (200 <= get_response.status_code < 300):
|
||||
logger.warning(f"Stream validation failed with HTTP status {get_response.status_code}")
|
||||
return False, get_response.url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
return False, url, get_response.status_code, f"Invalid HTTP status: {get_response.status_code}"
|
||||
|
||||
# Only check content if status code is valid
|
||||
try:
|
||||
|
|
@ -452,7 +543,7 @@ def validate_stream_url(url, user_agent=None, timeout=(5, 5)):
|
|||
get_response.close()
|
||||
|
||||
# If we have content, consider it valid even with unrecognized content type
|
||||
return is_valid, get_response.url, get_response.status_code, message
|
||||
return is_valid, url, get_response.status_code, message
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
return False, url, 0, "Timeout connecting to stream"
|
||||
|
|
|
|||
|
|
@ -7,19 +7,27 @@ logger = logging.getLogger("ts_proxy")
|
|||
|
||||
def detect_stream_type(url):
|
||||
"""
|
||||
Detect if stream URL is HLS or TS format.
|
||||
Detect if stream URL is HLS, RTSP/RTP, UDP, or TS format.
|
||||
|
||||
Args:
|
||||
url (str): The stream URL to analyze
|
||||
|
||||
Returns:
|
||||
str: 'hls' or 'ts' depending on detected format
|
||||
str: 'hls', 'rtsp', 'udp', or 'ts' depending on detected format
|
||||
"""
|
||||
if not url:
|
||||
return 'unknown'
|
||||
|
||||
url_lower = url.lower()
|
||||
|
||||
# Check for UDP streams (requires FFmpeg)
|
||||
if url_lower.startswith('udp://'):
|
||||
return 'udp'
|
||||
|
||||
# Check for RTSP/RTP streams (requires FFmpeg)
|
||||
if url_lower.startswith('rtsp://') or url_lower.startswith('rtp://'):
|
||||
return 'rtsp'
|
||||
|
||||
# Look for common HLS indicators
|
||||
if (url_lower.endswith('.m3u8') or
|
||||
'.m3u8?' in url_lower or
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import time
|
|||
import random
|
||||
import re
|
||||
import pathlib
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect
|
||||
from django.http import StreamingHttpResponse, JsonResponse, HttpResponseRedirect, HttpResponse
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.shortcuts import get_object_or_404
|
||||
from apps.proxy.config import TSConfig as Config
|
||||
|
|
@ -28,6 +28,7 @@ from apps.accounts.permissions import (
|
|||
from .constants import ChannelState, EventType, StreamType, ChannelMetadataField
|
||||
from .config_helper import ConfigHelper
|
||||
from .services.channel_service import ChannelService
|
||||
from core.utils import send_websocket_update
|
||||
from .url_utils import (
|
||||
generate_stream_url,
|
||||
transform_url,
|
||||
|
|
@ -83,11 +84,18 @@ def stream_ts(request, channel_id):
|
|||
if state_field in metadata:
|
||||
channel_state = metadata[state_field].decode("utf-8")
|
||||
|
||||
if channel_state:
|
||||
# Channel is being initialized or already active - no need for reinitialization
|
||||
# Active/running states - channel is operational, don't reinitialize
|
||||
if channel_state in [
|
||||
ChannelState.ACTIVE,
|
||||
ChannelState.WAITING_FOR_CLIENTS,
|
||||
ChannelState.BUFFERING,
|
||||
ChannelState.INITIALIZING,
|
||||
ChannelState.CONNECTING,
|
||||
ChannelState.STOPPING,
|
||||
]:
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} already in state {channel_state}, skipping initialization"
|
||||
f"[{client_id}] Channel {channel_id} in state {channel_state}, skipping initialization"
|
||||
)
|
||||
|
||||
# Special handling for initializing/connecting states
|
||||
|
|
@ -97,19 +105,34 @@ def stream_ts(request, channel_id):
|
|||
]:
|
||||
channel_initializing = True
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait for completion"
|
||||
f"[{client_id}] Channel {channel_id} is still initializing, client will wait"
|
||||
)
|
||||
# Terminal states - channel needs cleanup before reinitialization
|
||||
elif channel_state in [
|
||||
ChannelState.ERROR,
|
||||
ChannelState.STOPPED,
|
||||
]:
|
||||
needs_initialization = True
|
||||
logger.info(
|
||||
f"[{client_id}] Channel {channel_id} in terminal state {channel_state}, will reinitialize"
|
||||
)
|
||||
# Unknown/empty state - check if owner is alive
|
||||
else:
|
||||
# Only check for owner if channel is in a valid state
|
||||
owner_field = ChannelMetadataField.OWNER.encode("utf-8")
|
||||
if owner_field in metadata:
|
||||
owner = metadata[owner_field].decode("utf-8")
|
||||
owner_heartbeat_key = f"ts_proxy:worker:{owner}:heartbeat"
|
||||
if proxy_server.redis_client.exists(owner_heartbeat_key):
|
||||
# Owner is still active, so we don't need to reinitialize
|
||||
# Owner is still active with unknown state - don't reinitialize
|
||||
needs_initialization = False
|
||||
logger.debug(
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}"
|
||||
f"[{client_id}] Channel {channel_id} has active owner {owner}, skipping init"
|
||||
)
|
||||
else:
|
||||
# Owner dead - needs reinitialization
|
||||
needs_initialization = True
|
||||
logger.warning(
|
||||
f"[{client_id}] Channel {channel_id} owner {owner} is dead, will reinitialize"
|
||||
)
|
||||
|
||||
# Start initialization if needed
|
||||
|
|
@ -126,9 +149,9 @@ def stream_ts(request, channel_id):
|
|||
)
|
||||
ChannelService.stop_channel(channel_id)
|
||||
|
||||
# Use max retry attempts and connection timeout from config
|
||||
max_retries = ConfigHelper.max_retries()
|
||||
retry_timeout = ConfigHelper.connection_timeout()
|
||||
# Use fixed retry interval and timeout
|
||||
retry_timeout = 3 # 3 seconds total timeout
|
||||
retry_interval = 0.1 # 100ms between attempts
|
||||
wait_start_time = time.time()
|
||||
|
||||
stream_url = None
|
||||
|
|
@ -136,54 +159,69 @@ def stream_ts(request, channel_id):
|
|||
transcode = False
|
||||
profile_value = None
|
||||
error_reason = None
|
||||
attempt = 0
|
||||
should_retry = True
|
||||
|
||||
# Try to get a stream with configured retries
|
||||
for attempt in range(max_retries):
|
||||
# Try to get a stream with fixed interval retries
|
||||
while should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id}"
|
||||
f"[{client_id}] Successfully obtained stream for channel {channel_id} after {attempt} attempts"
|
||||
)
|
||||
break
|
||||
|
||||
# If we failed because there are no streams assigned, don't retry
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
# On first failure, check if the error is retryable
|
||||
if attempt == 1:
|
||||
_, _, error_reason = channel.get_stream()
|
||||
if error_reason and "maximum connection limits" not in error_reason:
|
||||
logger.warning(
|
||||
f"[{client_id}] Can't retry - error not related to connection limits: {error_reason}"
|
||||
)
|
||||
should_retry = False
|
||||
break
|
||||
|
||||
# Check if we have time remaining for another sleep cycle
|
||||
elapsed_time = time.time() - wait_start_time
|
||||
remaining_time = retry_timeout - elapsed_time
|
||||
|
||||
# If we don't have enough time for the next sleep interval, break
|
||||
# but only after we've already made an attempt (the while condition will try one more time)
|
||||
if remaining_time <= retry_interval:
|
||||
logger.info(
|
||||
f"[{client_id}] Insufficient time ({remaining_time:.1f}s) for another sleep cycle, will make one final attempt"
|
||||
)
|
||||
break
|
||||
|
||||
# Don't exceed the overall connection timeout
|
||||
if time.time() - wait_start_time > retry_timeout:
|
||||
logger.warning(
|
||||
f"[{client_id}] Connection wait timeout exceeded ({retry_timeout}s)"
|
||||
)
|
||||
break
|
||||
|
||||
# Wait before retrying (using exponential backoff with a cap)
|
||||
wait_time = min(0.5 * (2**attempt), 2.0) # Caps at 2 seconds
|
||||
# Wait before retrying
|
||||
logger.info(
|
||||
f"[{client_id}] Waiting {wait_time:.1f}s for a connection to become available (attempt {attempt+1}/{max_retries})"
|
||||
f"[{client_id}] Waiting {retry_interval*1000:.0f}ms for a connection to become available (attempt {attempt}, {remaining_time:.1f}s remaining)"
|
||||
)
|
||||
gevent.sleep(
|
||||
wait_time
|
||||
) # FIXED: Using gevent.sleep instead of time.sleep
|
||||
gevent.sleep(retry_interval)
|
||||
retry_interval += 0.025 # Increase wait time by 25ms for next attempt
|
||||
|
||||
# Make one final attempt if we still don't have a stream, should retry, and haven't exceeded timeout
|
||||
if stream_url is None and should_retry and time.time() - wait_start_time < retry_timeout:
|
||||
attempt += 1
|
||||
logger.info(
|
||||
f"[{client_id}] Making final attempt {attempt} at timeout boundary"
|
||||
)
|
||||
stream_url, stream_user_agent, transcode, profile_value = (
|
||||
generate_stream_url(channel_id)
|
||||
)
|
||||
if stream_url is not None:
|
||||
logger.info(
|
||||
f"[{client_id}] Successfully obtained stream on final attempt for channel {channel_id}"
|
||||
)
|
||||
|
||||
if stream_url is None:
|
||||
# Make sure to release any stream locks that might have been acquired
|
||||
if hasattr(channel, "streams") and channel.streams.exists():
|
||||
for stream in channel.streams.all():
|
||||
try:
|
||||
stream.release_stream()
|
||||
logger.info(
|
||||
f"[{client_id}] Released stream {stream.id} for channel {channel_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{client_id}] Error releasing stream: {e}")
|
||||
# Release the channel's stream lock if one was acquired
|
||||
# Note: Only call this if get_stream() actually assigned a stream
|
||||
# In our case, if stream_url is None, no stream was ever assigned, so don't release
|
||||
|
||||
# Get the specific error message if available
|
||||
wait_duration = f"{int(time.time() - wait_start_time)}s"
|
||||
|
|
@ -192,6 +230,9 @@ def stream_ts(request, channel_id):
|
|||
if error_reason
|
||||
else "No available streams for this channel"
|
||||
)
|
||||
logger.info(
|
||||
f"[{client_id}] Failed to obtain stream after {attempt} attempts over {wait_duration}: {error_msg}"
|
||||
)
|
||||
return JsonResponse(
|
||||
{"error": error_msg, "waited": wait_duration}, status=503
|
||||
) # 503 Service Unavailable is appropriate here
|
||||
|
|
@ -273,6 +314,15 @@ def stream_ts(request, channel_id):
|
|||
logger.info(
|
||||
f"[{client_id}] Redirecting to validated URL: {final_url} ({message})"
|
||||
)
|
||||
|
||||
# For non-HTTP protocols (RTSP/RTP/UDP), we need to manually create the redirect
|
||||
# because Django's HttpResponseRedirect blocks them for security
|
||||
if final_url.startswith(('rtsp://', 'rtp://', 'udp://')):
|
||||
logger.info(f"[{client_id}] Using manual redirect for non-HTTP protocol")
|
||||
response = HttpResponse(status=301)
|
||||
response['Location'] = final_url
|
||||
return response
|
||||
|
||||
return HttpResponseRedirect(final_url)
|
||||
else:
|
||||
logger.error(
|
||||
|
|
@ -467,9 +517,7 @@ def stream_xc(request, username, password, channel_id):
|
|||
extension = pathlib.Path(channel_id).suffix
|
||||
channel_id = pathlib.Path(channel_id).stem
|
||||
|
||||
custom_properties = (
|
||||
json.loads(user.custom_properties) if user.custom_properties else {}
|
||||
)
|
||||
custom_properties = user.custom_properties or {}
|
||||
|
||||
if "xc_password" not in custom_properties:
|
||||
return Response({"error": "Invalid credentials"}, status=401)
|
||||
|
|
@ -479,24 +527,33 @@ def stream_xc(request, username, password, channel_id):
|
|||
|
||||
print(f"Fetchin channel with ID: {channel_id}")
|
||||
if user.user_level < 10:
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
}
|
||||
user_profile_count = user.channel_profiles.count()
|
||||
|
||||
if user.channel_profiles.count() > 0:
|
||||
channel_profiles = user.channel_profiles.all()
|
||||
filters["channelprofilemembership__channel_profile__in"] = channel_profiles
|
||||
# If user has ALL profiles or NO profiles, give unrestricted access
|
||||
if user_profile_count == 0:
|
||||
# No profile filtering - user sees all channels based on user_level
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"user_level__lte": user.user_level
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).first()
|
||||
else:
|
||||
# User has specific limited profiles assigned
|
||||
filters = {
|
||||
"id": int(channel_id),
|
||||
"channelprofilemembership__enabled": True,
|
||||
"user_level__lte": user.user_level,
|
||||
"channelprofilemembership__channel_profile__in": user.channel_profiles.all()
|
||||
}
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
|
||||
channel = Channel.objects.filter(**filters).distinct().first()
|
||||
if not channel:
|
||||
return JsonResponse({"error": "Not found"}, status=404)
|
||||
else:
|
||||
channel = get_object_or_404(Channel, id=channel_id)
|
||||
|
||||
# @TODO: we've got the file 'type' via extension, support this when we support multiple outputs
|
||||
return stream_ts(request._request, channel.uuid)
|
||||
return stream_ts(request._request, str(channel.uuid))
|
||||
|
||||
|
||||
@csrf_exempt
|
||||
|
|
@ -635,6 +692,18 @@ def channel_status(request, channel_id=None):
|
|||
if cursor == 0:
|
||||
break
|
||||
|
||||
# Send WebSocket update with the stats
|
||||
# Format it the same way the original Celery task did
|
||||
send_websocket_update(
|
||||
"updates",
|
||||
"update",
|
||||
{
|
||||
"success": True,
|
||||
"type": "channel_stats",
|
||||
"stats": json.dumps({'channels': all_channels, 'count': len(all_channels)})
|
||||
}
|
||||
)
|
||||
|
||||
return JsonResponse({"channels": all_channels, "count": len(all_channels)})
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -5,4 +5,5 @@ app_name = 'proxy'
|
|||
urlpatterns = [
|
||||
path('ts/', include('apps.proxy.ts_proxy.urls')),
|
||||
path('hls/', include('apps.proxy.hls_proxy.urls')),
|
||||
path('vod/', include('apps.proxy.vod_proxy.urls')),
|
||||
]
|
||||
0
apps/proxy/vod_proxy/__init__.py
Normal file
0
apps/proxy/vod_proxy/__init__.py
Normal file
1449
apps/proxy/vod_proxy/connection_manager.py
Normal file
1449
apps/proxy/vod_proxy/connection_manager.py
Normal file
File diff suppressed because it is too large
Load diff
1408
apps/proxy/vod_proxy/multi_worker_connection_manager.py
Normal file
1408
apps/proxy/vod_proxy/multi_worker_connection_manager.py
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue