mirror of
https://github.com/mailcow/mailcow-dockerized.git
synced 2026-01-23 18:26:54 +00:00
Compare commits
1169 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8d9315d4a | ||
|
|
d977ddb501 | ||
|
|
e76f5237ed | ||
|
|
c11ed5dd1e | ||
|
|
4ef65fc382 | ||
|
|
dbb9e474b0 | ||
|
|
f8eed8c786 | ||
|
|
ef010aa39c | ||
|
|
79171ea6f5 | ||
|
|
4e3294b273 | ||
|
|
32a6ecddb6 | ||
|
|
f3d9833ecf | ||
|
|
930ca76ea7 | ||
|
|
9a2887cf46 | ||
|
|
9950914086 | ||
|
|
470cfb0026 | ||
|
|
6c106b4e4d | ||
|
|
3d6253a2b2 | ||
|
|
b873812588 | ||
|
|
514fefd2ed | ||
|
|
6f9ee2d151 | ||
|
|
9832006141 | ||
|
|
0413d26855 | ||
|
|
7b29c1f304 | ||
|
|
ae3ef391ee | ||
|
|
7313f996d3 | ||
|
|
62d16c9e56 | ||
|
|
674b41ce08 | ||
|
|
1b833be760 | ||
|
|
88adb1adf5 | ||
|
|
ec472f13cf | ||
|
|
2e1d98cc7c | ||
|
|
07d7e3dc30 | ||
|
|
b0f5aee628 | ||
|
|
d3065612fd | ||
|
|
9912e41f78 | ||
|
|
04200c99a4 | ||
|
|
45666d2c4e | ||
|
|
9a806e64ce | ||
|
|
22a09b9795 | ||
|
|
04d5c43550 | ||
|
|
fbcb8cbeb9 | ||
|
|
0338a36ecf | ||
|
|
23fb5e2fca | ||
|
|
3507ff2773 | ||
|
|
a4970397f1 | ||
|
|
4132f6bd48 | ||
|
|
586b3a2ed1 | ||
|
|
6af2addf3c | ||
|
|
f6eed6c441 | ||
|
|
b85837c803 | ||
|
|
653fc40d4c | ||
|
|
c17d80a6fd | ||
|
|
980bfa3aa0 | ||
|
|
664a954393 | ||
|
|
d5a27c4ccb | ||
|
|
6a8a2e2136 | ||
|
|
b859a52b8e | ||
|
|
10e0c42eff | ||
|
|
f47df263d7 | ||
|
|
2642d9109e | ||
|
|
6708b94ebb | ||
|
|
79cf0abc6e | ||
|
|
7de70322d6 | ||
|
|
417835dea8 | ||
|
|
3dcacc4187 | ||
|
|
69f0552d4f | ||
|
|
c443a9400a | ||
|
|
5c9f387d94 | ||
|
|
e9414d17e4 | ||
|
|
6bfa58611e | ||
|
|
df4d3bb6e0 | ||
|
|
e31b6d9a07 | ||
|
|
455ef084b4 | ||
|
|
c2948735f2 | ||
|
|
24c62b2f09 | ||
|
|
1ef0149076 | ||
|
|
922d173540 | ||
|
|
fd088cb504 | ||
|
|
721ee2394e | ||
|
|
c217be06c6 | ||
|
|
871c422ec1 | ||
|
|
3cc28af607 | ||
|
|
796e131c3a | ||
|
|
dd160cd508 | ||
|
|
732b321962 | ||
|
|
c51a769aec | ||
|
|
45a61755a5 | ||
|
|
769c57c355 | ||
|
|
2e7eb7c0fd | ||
|
|
4c83147d01 | ||
|
|
ca0bec4fc2 | ||
|
|
6f50dd17da | ||
|
|
4a331929d0 | ||
|
|
748bc893b6 | ||
|
|
e462602ddc | ||
|
|
4e0f435d12 | ||
|
|
46f0581936 | ||
|
|
20f04ecf6b | ||
|
|
ff43799763 | ||
|
|
85ca197615 | ||
|
|
d06d23bbaf | ||
|
|
702ed85dfd | ||
|
|
8abe74a562 | ||
|
|
2f8a181281 | ||
|
|
5c5287ca21 | ||
|
|
83ba8d5840 | ||
|
|
ce219668cf | ||
|
|
5b1b49a418 | ||
|
|
8978a9ad79 | ||
|
|
5f4a4fd759 | ||
|
|
171c591da4 | ||
|
|
9133b9899c | ||
|
|
701c9fb1b4 | ||
|
|
eabd22188b | ||
|
|
7028619742 | ||
|
|
c915bf2ee2 | ||
|
|
011edd5ac9 | ||
|
|
7ba3de4ced | ||
|
|
8ead77083f | ||
|
|
b2774fb50b | ||
|
|
4440bd46ad | ||
|
|
28985973eb | ||
|
|
f2c4697ca3 | ||
|
|
383b5affb5 | ||
|
|
ed4dcff63b | ||
|
|
caca32bbba | ||
|
|
d31e74c778 | ||
|
|
6c00e29276 | ||
|
|
9940c503a2 | ||
|
|
4b2862cb3c | ||
|
|
a36485f0f1 | ||
|
|
78168ee80a | ||
|
|
610609378f | ||
|
|
260906e350 | ||
|
|
2891bbf82a | ||
|
|
eb26bcbc94 | ||
|
|
ef0f366d1c | ||
|
|
84e230de8f | ||
|
|
f67a12d157 | ||
|
|
34b48eedfc | ||
|
|
0d900d4fc8 | ||
|
|
642ac6d02c | ||
|
|
4db1569c93 | ||
|
|
94c1a6c4e1 | ||
|
|
7ce3b0faed | ||
|
|
262fe04286 | ||
|
|
b1c088a57f | ||
|
|
1c438330c6 | ||
|
|
8cb25709ae | ||
|
|
221f2989b0 | ||
|
|
3d05207bc7 | ||
|
|
8c8497d885 | ||
|
|
56d083ced4 | ||
|
|
a90b3544a7 | ||
|
|
08aea7fb26 | ||
|
|
13f7f9830b | ||
|
|
2f75039194 | ||
|
|
1e192e14f4 | ||
|
|
9cd1f931fc | ||
|
|
8d7235b535 | ||
|
|
8446abd484 | ||
|
|
f67c0530f5 | ||
|
|
06db1d6a72 | ||
|
|
81775ab4d5 | ||
|
|
34877ecf9c | ||
|
|
dbde144014 | ||
|
|
5361a4a4ee | ||
|
|
0997548d7f | ||
|
|
921de02a2b | ||
|
|
48e90a72dc | ||
|
|
c0b7a98e6c | ||
|
|
6dc90186f9 | ||
|
|
0b0a65a3f3 | ||
|
|
6c5d82c4df | ||
|
|
5e66ffa366 | ||
|
|
4d88e19106 | ||
|
|
29e28b47ed | ||
|
|
1cb38bacdb | ||
|
|
169aafec50 | ||
|
|
3826c4b5be | ||
|
|
e1410baaeb | ||
|
|
c39712af67 | ||
|
|
53c35493a5 | ||
|
|
af871fdacb | ||
|
|
2b93b59cdd | ||
|
|
2b2da1679e | ||
|
|
8cdb0b869e | ||
|
|
1e42b8dd21 | ||
|
|
842cb235b6 | ||
|
|
e91d678bd1 | ||
|
|
ef5739c32f | ||
|
|
88bf9b02e1 | ||
|
|
3803b5d351 | ||
|
|
14d58c8163 | ||
|
|
728fcdb375 | ||
|
|
1fc36263dc | ||
|
|
69420113f7 | ||
|
|
360fe03497 | ||
|
|
7557802933 | ||
|
|
2e9ba1e9b3 | ||
|
|
795bcdc5d2 | ||
|
|
ad9b328ed5 | ||
|
|
3d5b57889a | ||
|
|
6b8e981bdc | ||
|
|
2f1eb4b004 | ||
|
|
3ee3d7d969 | ||
|
|
95eb350f15 | ||
|
|
1e5fcfe392 | ||
|
|
527f27d249 | ||
|
|
02557b2098 | ||
|
|
4c7a9ed195 | ||
|
|
d5b30a7a08 | ||
|
|
b7acef4d9d | ||
|
|
fc43c26c48 | ||
|
|
b12ce1eacd | ||
|
|
ec6dbb099a | ||
|
|
2fbbbbe9a9 | ||
|
|
1e4f3c55d8 | ||
|
|
a0f5454c2a | ||
|
|
4e7adacda9 | ||
|
|
4c64cf18a6 | ||
|
|
8a89f5c685 | ||
|
|
cc0e4fee9d | ||
|
|
5861c9af29 | ||
|
|
dd475c0ab3 | ||
|
|
407e9d3584 | ||
|
|
d4f899b091 | ||
|
|
372923ae2f | ||
|
|
3bd01190bf | ||
|
|
1994b9895b | ||
|
|
03d979c089 | ||
|
|
798e6a4c00 | ||
|
|
ffa2933873 | ||
|
|
7f47a3f00e | ||
|
|
1bcab9a9a5 | ||
|
|
1b2f424edc | ||
|
|
486b297409 | ||
|
|
75d7f06b25 | ||
|
|
ea0944d743 | ||
|
|
cb6ffe65c8 | ||
|
|
580dabd276 | ||
|
|
846862aa80 | ||
|
|
e7a1f24c78 | ||
|
|
8ff0e029f0 | ||
|
|
0680b21938 | ||
|
|
0c8e7bfeca | ||
|
|
badcd27b93 | ||
|
|
7d3ef3d67f | ||
|
|
5b89e253a6 | ||
|
|
a90f4c2a2e | ||
|
|
db7b917944 | ||
|
|
401b744808 | ||
|
|
0c83255573 | ||
|
|
d55f0fc366 | ||
|
|
06b3ba91a0 | ||
|
|
aa4125fe62 | ||
|
|
d8c6ed9191 | ||
|
|
cb47fa406f | ||
|
|
c4d0f35008 | ||
|
|
0d3e8dd738 | ||
|
|
692355a08a | ||
|
|
a370499aaa | ||
|
|
84f67d6608 | ||
|
|
4ac839cf49 | ||
|
|
b96a5b1efd | ||
|
|
766c5e8580 | ||
|
|
3f493e043d | ||
|
|
3ddad9dee8 | ||
|
|
2c10c39bc4 | ||
|
|
0eb8f38792 | ||
|
|
402bf53a5c | ||
|
|
428a59dd3f | ||
|
|
153890b283 | ||
|
|
a741c2ba4a | ||
|
|
741e5c719f | ||
|
|
34e4f93db9 | ||
|
|
3758135dc3 | ||
|
|
6794e6ff43 | ||
|
|
62f816e64a | ||
|
|
e65478076b | ||
|
|
ceeabded73 | ||
|
|
805634f9a9 | ||
|
|
a92832d115 | ||
|
|
4c5f485587 | ||
|
|
db3a577ae3 | ||
|
|
e452917de9 | ||
|
|
f37961b7d0 | ||
|
|
0157cbddaf | ||
|
|
65d872cc14 | ||
|
|
4ad2422810 | ||
|
|
2c47145dee | ||
|
|
9b41b24522 | ||
|
|
1c9d80f554 | ||
|
|
7172cad257 | ||
|
|
b550c6f88e | ||
|
|
5baf9eb375 | ||
|
|
4eb89f67ed | ||
|
|
efdc798238 | ||
|
|
8408b82e9c | ||
|
|
65fb4c2aa8 | ||
|
|
a5ca3353da | ||
|
|
95aa35e133 | ||
|
|
21b11ed999 | ||
|
|
348107dae8 | ||
|
|
fcb1b29c89 | ||
|
|
05fc4f7aba | ||
|
|
cd3b1ab828 | ||
|
|
c3c68360dc | ||
|
|
d584dd387e | ||
|
|
986b0afbfa | ||
|
|
59d139bc63 | ||
|
|
ad5f07f077 | ||
|
|
cf2d3c1b4e | ||
|
|
91c82e8a67 | ||
|
|
ba7437a8f3 | ||
|
|
684256b66e | ||
|
|
70ba361583 | ||
|
|
94d4817ecb | ||
|
|
72ced70e33 | ||
|
|
887b7114a8 | ||
|
|
ceebc56e62 | ||
|
|
8910135f02 | ||
|
|
463e3ab78c | ||
|
|
2a15914324 | ||
|
|
e21696ff27 | ||
|
|
5a7275843a | ||
|
|
c93106f9d6 | ||
|
|
43c1597051 | ||
|
|
c3aa4f7418 | ||
|
|
cb08132a74 | ||
|
|
2596b9d386 | ||
|
|
062539b7d7 | ||
|
|
18acbc7a4c | ||
|
|
2f93f1d0c5 | ||
|
|
0860a7503e | ||
|
|
86df78255d | ||
|
|
aac0a900ce | ||
|
|
03565df48d | ||
|
|
5f15475b55 | ||
|
|
25d34b5acf | ||
|
|
6b165887d8 | ||
|
|
82eb3c64cd | ||
|
|
bc21e7fe50 | ||
|
|
6f9c8deab7 | ||
|
|
8761d8fc47 | ||
|
|
0435766c17 | ||
|
|
79f4cf4021 | ||
|
|
81803836f0 | ||
|
|
4bd267515a | ||
|
|
70190e5230 | ||
|
|
a632980871 | ||
|
|
5296085189 | ||
|
|
a4c2cf4c67 | ||
|
|
2d1ef41d32 | ||
|
|
3c9d0c9d57 | ||
|
|
35a6f81d0d | ||
|
|
4b31c04e3e | ||
|
|
3d9cc2f6dd | ||
|
|
704dd50262 | ||
|
|
8d0c03b2fc | ||
|
|
c4a0e370b7 | ||
|
|
b77ff2f51c | ||
|
|
787fa49d0c | ||
|
|
a6c38590ca | ||
|
|
e52323bf1d | ||
|
|
f15ee39b63 | ||
|
|
fcebe98557 | ||
|
|
6ec5e88793 | ||
|
|
7d35646342 | ||
|
|
321965adee | ||
|
|
7bce5d836b | ||
|
|
351f4ce787 | ||
|
|
a567d5dc31 | ||
|
|
4ac541f671 | ||
|
|
f6dc0b463f | ||
|
|
16e22e23dc | ||
|
|
d8afa6f393 | ||
|
|
836e3f15b7 | ||
|
|
aaa7e4a184 | ||
|
|
3912341b32 | ||
|
|
735d5f0e56 | ||
|
|
f375794fb7 | ||
|
|
4ed3017a02 | ||
|
|
ef2f5f7be0 | ||
|
|
54728bf780 | ||
|
|
743e88fd67 | ||
|
|
ac2f0c7db1 | ||
|
|
f64c6aa1d4 | ||
|
|
e2cf22ff9e | ||
|
|
55dcae4a01 | ||
|
|
f0016eeecd | ||
|
|
120366fec7 | ||
|
|
3544a2246e | ||
|
|
97890b71f1 | ||
|
|
e645f931dc | ||
|
|
bbdec0960a | ||
|
|
41ba7d97fa | ||
|
|
83fc2c6387 | ||
|
|
aac4c6b5f4 | ||
|
|
3c0f775e2f | ||
|
|
3a81b84cf7 | ||
|
|
a2e87e0880 | ||
|
|
2407aa7895 | ||
|
|
244d4b8c4c | ||
|
|
0ad327bbe5 | ||
|
|
f92ddd86c5 | ||
|
|
1a087bb2c8 | ||
|
|
65bc581fab | ||
|
|
60a2270d1e | ||
|
|
cb5cae3e44 | ||
|
|
8ed51e500f | ||
|
|
aca01c8aa2 | ||
|
|
45d14254f2 | ||
|
|
de6bd222fc | ||
|
|
04116982a5 | ||
|
|
36d4fcbf39 | ||
|
|
ba0349a911 | ||
|
|
04058ab06e | ||
|
|
9d791d0c4f | ||
|
|
8caf09cd80 | ||
|
|
da02e26172 | ||
|
|
43f945fe01 | ||
|
|
e76c0ba9a6 | ||
|
|
d83111568e | ||
|
|
1b578caabb | ||
|
|
1e77f8d8a1 | ||
|
|
5f45f8ae34 | ||
|
|
1dac8f1f66 | ||
|
|
5a04942d89 | ||
|
|
a30f6696a3 | ||
|
|
d430b595c1 | ||
|
|
1fca328266 | ||
|
|
7bcd61ecb5 | ||
|
|
ee7a8624fc | ||
|
|
4708b1398b | ||
|
|
746915cbdd | ||
|
|
36db68677c | ||
|
|
08599c1960 | ||
|
|
31e001ebee | ||
|
|
1e70a20188 | ||
|
|
8048e0a53c | ||
|
|
8fea9fc21f | ||
|
|
2f1884e94b | ||
|
|
24b3d8f850 | ||
|
|
d280025b51 | ||
|
|
abd789f629 | ||
|
|
69f6a82905 | ||
|
|
10328981b6 | ||
|
|
150b2bbd9d | ||
|
|
40a8bc808a | ||
|
|
d92aa4b15d | ||
|
|
2d2dacb70e | ||
|
|
ade20d79d4 | ||
|
|
65bc8f0972 | ||
|
|
c6f6eda0bf | ||
|
|
357a4d7fb3 | ||
|
|
1c6684a539 | ||
|
|
de80c120c9 | ||
|
|
3e8bb06a37 | ||
|
|
b087ac9e27 | ||
|
|
d09e4ff020 | ||
|
|
f065842402 | ||
|
|
3875e8377a | ||
|
|
7c8e5c10ca | ||
|
|
1a8e1a2677 | ||
|
|
0d635e2658 | ||
|
|
60ca25026d | ||
|
|
69b03791a2 | ||
|
|
ed2837edd8 | ||
|
|
fa3b789fbb | ||
|
|
49e05f5120 | ||
|
|
24453993f3 | ||
|
|
8853e2c44a | ||
|
|
c9dd102741 | ||
|
|
d1af52b4e7 | ||
|
|
bbddfc3eab | ||
|
|
a41bb55c83 | ||
|
|
b6174fae23 | ||
|
|
1d6513ffba | ||
|
|
6e8e13cebc | ||
|
|
896a9638d6 | ||
|
|
83e53eb524 | ||
|
|
f36184df64 | ||
|
|
6fa1c9f63d | ||
|
|
f3060b37a6 | ||
|
|
59c68f2603 | ||
|
|
ccc8595665 | ||
|
|
45c13c687b | ||
|
|
d61a08c2a9 | ||
|
|
c8c4cfd939 | ||
|
|
ec4b9b088c | ||
|
|
b2db8e6b31 | ||
|
|
05e4bd7602 | ||
|
|
31185e3de1 | ||
|
|
4dbfd3abad | ||
|
|
b4e6002bcf | ||
|
|
6af907cff0 | ||
|
|
ba282233ea | ||
|
|
6f4c2b3361 | ||
|
|
d08b9aec32 | ||
|
|
bb310600b2 | ||
|
|
fe7211f27f | ||
|
|
8e9a9364a8 | ||
|
|
6831f94fdb | ||
|
|
b0de756a7c | ||
|
|
922f8777b0 | ||
|
|
c1903f121d | ||
|
|
89fb1322c6 | ||
|
|
852d944cfb | ||
|
|
bca4e1a03d | ||
|
|
326a446f8b | ||
|
|
70ca5fde95 | ||
|
|
5ad4ab5b60 | ||
|
|
bd9f4ba0a5 | ||
|
|
d10d64dd92 | ||
|
|
6d1f7482ed | ||
|
|
b9f52df3f1 | ||
|
|
dc379267a9 | ||
|
|
4d688c5500 | ||
|
|
b90375b6e5 | ||
|
|
9542698e95 | ||
|
|
afe0ba74d2 | ||
|
|
dc5a28111d | ||
|
|
52f3f93aee | ||
|
|
6550f0a3e8 | ||
|
|
0a58aa293a | ||
|
|
be79f320d2 | ||
|
|
6ec1e357c3 | ||
|
|
8b2f71f97e | ||
|
|
93cf99cc9e | ||
|
|
d8c8e4ab1b | ||
|
|
2d76ffc88c | ||
|
|
672bb345fd | ||
|
|
5c88030b5a | ||
|
|
b106945c73 | ||
|
|
502a7100ca | ||
|
|
ee2791d93a | ||
|
|
399630cf34 | ||
|
|
fce93609dd | ||
|
|
38907b5032 | ||
|
|
5a0f20b9ea | ||
|
|
8dcaffe925 | ||
|
|
c53bf85480 | ||
|
|
982e823c71 | ||
|
|
382056ec18 | ||
|
|
4c9690e87c | ||
|
|
9a58e5e35a | ||
|
|
932cf453de | ||
|
|
1538fda71c | ||
|
|
54a0d53deb | ||
|
|
fda95301ba | ||
|
|
f9304dcd9b | ||
|
|
1528e8766a | ||
|
|
0b9b8c9060 | ||
|
|
220fdbb168 | ||
|
|
fe3d08515e | ||
|
|
22f7f61ac9 | ||
|
|
0d2046baeb | ||
|
|
29d8cfe2ba | ||
|
|
f2e35dff68 | ||
|
|
b1368d29d1 | ||
|
|
0d704a57f5 | ||
|
|
462137ede7 | ||
|
|
bb6f405841 | ||
|
|
8b2d67169b | ||
|
|
710cec996c | ||
|
|
0129f84a32 | ||
|
|
ae3653a925 | ||
|
|
82fcddb177 | ||
|
|
320bd31d37 | ||
|
|
b307e0a0d5 | ||
|
|
af0c61b90a | ||
|
|
7203735532 | ||
|
|
ef238e5332 | ||
|
|
4f9e37c0c3 | ||
|
|
d21c1bfa72 | ||
|
|
822d9a7de6 | ||
|
|
37beed6ad9 | ||
|
|
0066040bdc | ||
|
|
75f18df143 | ||
|
|
8e7b27aae4 | ||
|
|
c62b467ac4 | ||
|
|
be5a181be5 | ||
|
|
dbf87e99fc | ||
|
|
10dfd0a443 | ||
|
|
cc5138da13 | ||
|
|
89398c4726 | ||
|
|
aeeac63e1f | ||
|
|
8971b11c49 | ||
|
|
bb7fd483f7 | ||
|
|
439a936fd8 | ||
|
|
ffcd242048 | ||
|
|
567ebbc324 | ||
|
|
f9a7712025 | ||
|
|
3d62869664 | ||
|
|
e21157c10d | ||
|
|
b70bcd36fb | ||
|
|
cb50d08605 | ||
|
|
f3da8bb85f | ||
|
|
12e4d639f0 | ||
|
|
eb3f88fc91 | ||
|
|
9a729d89bf | ||
|
|
fa3c453d6e | ||
|
|
962ac39e4a | ||
|
|
74b4097ee0 | ||
|
|
e00d0d5f8d | ||
|
|
c5e399ebc2 | ||
|
|
cb9ca772b1 | ||
|
|
ebc8e6b838 | ||
|
|
162f05ccda | ||
|
|
6c97c4f372 | ||
|
|
1fc964d72e | ||
|
|
5571d80ae6 | ||
|
|
6d4fcacd83 | ||
|
|
1994f706c0 | ||
|
|
e34afd3fdd | ||
|
|
a6f71faf46 | ||
|
|
3396e1b427 | ||
|
|
b26ccc2019 | ||
|
|
58a5a4578c | ||
|
|
b1c1e403d2 | ||
|
|
519d95cb8b | ||
|
|
092d3cd80b | ||
|
|
c034f4bd27 | ||
|
|
8753ea2be6 | ||
|
|
9fee568082 | ||
|
|
73d60eb085 | ||
|
|
b39b7c24a5 | ||
|
|
294a406b91 | ||
|
|
8b933f1967 | ||
|
|
d0ecb72e08 | ||
|
|
824a473fea | ||
|
|
7f790c5360 | ||
|
|
52431a3942 | ||
|
|
8017394e9d | ||
|
|
772d5c51fd | ||
|
|
76194be7dd | ||
|
|
3b23afa0ff | ||
|
|
6e00d653ce | ||
|
|
b6c036496d | ||
|
|
5d7c9b20bc | ||
|
|
4b400eadb1 | ||
|
|
ab2abda8cc | ||
|
|
2fe21e9641 | ||
|
|
b7ed6982d8 | ||
|
|
fd927853cb | ||
|
|
c48f4f4ab8 | ||
|
|
a4c006828e | ||
|
|
b56291f62b | ||
|
|
0cdf7647c4 | ||
|
|
8fe1cc4961 | ||
|
|
bf050f17c4 | ||
|
|
edd85dea8d | ||
|
|
3bf90c1f73 | ||
|
|
292306b191 | ||
|
|
b3e0a66222 | ||
|
|
e994cf4d05 | ||
|
|
cc0dc2eae0 | ||
|
|
a001a0584f | ||
|
|
926af87cfb | ||
|
|
b0339372b5 | ||
|
|
e398cb91e9 | ||
|
|
6ee0303b0f | ||
|
|
68616c2d57 | ||
|
|
f8de520d29 | ||
|
|
10077ece31 | ||
|
|
c918726143 | ||
|
|
3885b07a99 | ||
|
|
fcf27d640d | ||
|
|
82fde23cc1 | ||
|
|
9b86ff764e | ||
|
|
cbca306fc1 | ||
|
|
6a8986fe4f | ||
|
|
ff34eb12e2 | ||
|
|
57bc03b878 | ||
|
|
fbecd60e56 | ||
|
|
c37bf0bb32 | ||
|
|
2208d7e6fb | ||
|
|
e426c3a7e7 | ||
|
|
03fccb28e9 | ||
|
|
8fbfd99dd6 | ||
|
|
7f7a869678 | ||
|
|
73257151c4 | ||
|
|
efb2572f0f | ||
|
|
66aa28b5de | ||
|
|
987a027339 | ||
|
|
eea81e21f6 | ||
|
|
a689109f44 | ||
|
|
58c0a46459 | ||
|
|
2dbe8bf4ca | ||
|
|
ef7ec06947 | ||
|
|
fc7ea7a247 | ||
|
|
9b478b3859 | ||
|
|
384e5a2e64 | ||
|
|
aadeeb0df3 | ||
|
|
f33d82ffc1 | ||
|
|
ffeeb179e1 | ||
|
|
8e2d3a6db5 | ||
|
|
70126e1f0c | ||
|
|
b9ae174a6a | ||
|
|
9715c57314 | ||
|
|
b9f8959d92 | ||
|
|
9c814cc182 | ||
|
|
cf6594220c | ||
|
|
2cf952eb36 | ||
|
|
6fc86dd7d3 | ||
|
|
bf13af9691 | ||
|
|
1af9c21a50 | ||
|
|
443941e687 | ||
|
|
527577b438 | ||
|
|
9daf2d80c0 | ||
|
|
38b0641742 | ||
|
|
f675af5bb0 | ||
|
|
533c4e7956 | ||
|
|
1b2c2c0037 | ||
|
|
97768494e1 | ||
|
|
4a052da289 | ||
|
|
18d7a55b15 | ||
|
|
9ca2fb7ccf | ||
|
|
b4e8355827 | ||
|
|
e0bde1c459 | ||
|
|
27c007ebd3 | ||
|
|
f7ae2a6162 | ||
|
|
8f3ea09732 | ||
|
|
af626d98d3 | ||
|
|
34b0574e56 | ||
|
|
49d738809b | ||
|
|
2fa3a22eca | ||
|
|
dc5eb6f92e | ||
|
|
ba8902f0b1 | ||
|
|
3080a70287 | ||
|
|
11e9a77840 | ||
|
|
64cd7e74c5 | ||
|
|
cac65d081e | ||
|
|
e5ada994be | ||
|
|
6ba2459645 | ||
|
|
58f63aad08 | ||
|
|
8a8687a63c | ||
|
|
f7f93c360d | ||
|
|
c160e1f68e | ||
|
|
47c08ab8d2 | ||
|
|
cd83ffbaa2 | ||
|
|
e12981a821 | ||
|
|
47fd1bb894 | ||
|
|
20582b6353 | ||
|
|
caee770e36 | ||
|
|
95d6eeb37a | ||
|
|
eadf70d809 | ||
|
|
c8ff5387c0 | ||
|
|
7cb138d515 | ||
|
|
3dd4c45fab | ||
|
|
549539bec9 | ||
|
|
e449cac464 | ||
|
|
62e458f39b | ||
|
|
b37caaf9e5 | ||
|
|
7660ca89ae | ||
|
|
6e9c3e2687 | ||
|
|
cf2fda66e2 | ||
|
|
cd24057f1a | ||
|
|
c68a436a22 | ||
|
|
36b5cccd18 | ||
|
|
9decfa9c31 | ||
|
|
3aee2b6cf5 | ||
|
|
17d797cee4 | ||
|
|
75550eeea3 | ||
|
|
0d09c86c12 | ||
|
|
2db8f482db | ||
|
|
00d4b32a1b | ||
|
|
8a82bab1f3 | ||
|
|
237a25e6b0 | ||
|
|
5dc836671d | ||
|
|
26be1cb602 | ||
|
|
dc7a48cbf9 | ||
|
|
52455be815 | ||
|
|
5c851f2935 | ||
|
|
bbbdcfb625 | ||
|
|
b054a57e16 | ||
|
|
fd73b3ad88 | ||
|
|
0807c122f6 | ||
|
|
e0bda6ca6a | ||
|
|
2ba64e93f9 | ||
|
|
e1c3ad9fe8 | ||
|
|
8c0637b556 | ||
|
|
914a8204d4 | ||
|
|
d92ffe8fc7 | ||
|
|
e0eb3a4f13 | ||
|
|
1fb0060a73 | ||
|
|
d7430bf516 | ||
|
|
35f039a119 | ||
|
|
ffbf1758e0 | ||
|
|
a3af2d8392 | ||
|
|
39a4b115ed | ||
|
|
881c2d6e02 | ||
|
|
d237157c0b | ||
|
|
6928eb632e | ||
|
|
79432a40d7 | ||
|
|
010d898786 | ||
|
|
766c270b1f | ||
|
|
916d0fd46a | ||
|
|
9561526f33 | ||
|
|
45811bc2dc | ||
|
|
132e37bfec | ||
|
|
b3e26e14ef | ||
|
|
a3bb889def | ||
|
|
3a1dcb3aaf | ||
|
|
d22cafacc8 | ||
|
|
78e7266368 | ||
|
|
a06c78362a | ||
|
|
d479d18507 | ||
|
|
98cdb95bc0 | ||
|
|
02a55ce9db | ||
|
|
6f4720e1ea | ||
|
|
6a807b7799 | ||
|
|
8d4ef147d2 | ||
|
|
8ed6217d1c | ||
|
|
7dae4a976d | ||
|
|
3b83949ba3 | ||
|
|
d8baadb991 | ||
|
|
7d3f9fa407 | ||
|
|
705d144a85 | ||
|
|
ff05cff36c | ||
|
|
861fa7b145 | ||
|
|
d65a0bba44 | ||
|
|
dac1bd88dc | ||
|
|
288dbfa37c | ||
|
|
a0e55cb9b1 | ||
|
|
86ba019ca0 | ||
|
|
19deda31bc | ||
|
|
4f47534824 | ||
|
|
3cb9c2ece5 | ||
|
|
1787c53d98 | ||
|
|
8ae762a8c8 | ||
|
|
63426c3cd0 | ||
|
|
e184713c67 | ||
|
|
40146839ef | ||
|
|
448f85abe8 | ||
|
|
9a4b79a629 | ||
|
|
058b79ed5c | ||
|
|
216398355b | ||
|
|
1cda16523d | ||
|
|
2f1e1438e9 | ||
|
|
9039ab4e12 | ||
|
|
db47696ba7 | ||
|
|
eb9e3b8391 | ||
|
|
ba32f1131e | ||
|
|
27ef04baa0 | ||
|
|
b3a94e79e3 | ||
|
|
3a4c0c84a3 | ||
|
|
73a044ec14 | ||
|
|
389eb99c10 | ||
|
|
597d98e1d7 | ||
|
|
981307a1c6 | ||
|
|
2d51881ae3 | ||
|
|
788f03e993 | ||
|
|
81024b8c12 | ||
|
|
89c5064213 | ||
|
|
4b18a99e55 | ||
|
|
92d2cca7c3 | ||
|
|
466e36ecbb | ||
|
|
7ec7bd21cb | ||
|
|
38db7226a8 | ||
|
|
60f9412bb8 | ||
|
|
737c0502ac | ||
|
|
6da41b1027 | ||
|
|
2bd46ae0fd | ||
|
|
c15ab10b1b | ||
|
|
ddaeebc822 | ||
|
|
e64293c82f | ||
|
|
ccc17e4a20 | ||
|
|
a53ef2ed7a | ||
|
|
185c36cdfe | ||
|
|
9beb47c067 | ||
|
|
3d486678ae | ||
|
|
04e2494af8 | ||
|
|
7b47159478 | ||
|
|
17b6ac3313 | ||
|
|
43600cd127 | ||
|
|
6d3a32c1d9 | ||
|
|
21fa3c8458 | ||
|
|
6df663825a | ||
|
|
8ce4600562 | ||
|
|
3179c0e712 | ||
|
|
37254738e2 | ||
|
|
a4cce147aa | ||
|
|
b176585a9c | ||
|
|
f8647bb15e | ||
|
|
85368971fd | ||
|
|
e4284b8e19 | ||
|
|
5545d8a56c | ||
|
|
4dc3222f03 | ||
|
|
7cf6a9d808 | ||
|
|
95a15d18a7 | ||
|
|
cee771a3fb | ||
|
|
a805d3b2e3 | ||
|
|
b251c58b23 | ||
|
|
cc7516685f | ||
|
|
ad19ff5429 | ||
|
|
e784c98a5a | ||
|
|
28679eb916 | ||
|
|
c8fec24da3 | ||
|
|
0c1e2ed6f2 | ||
|
|
90476ae057 | ||
|
|
3b6a1d50bd | ||
|
|
1ab1505c88 | ||
|
|
593e581cf3 | ||
|
|
e202d00beb | ||
|
|
dca5f1baab | ||
|
|
f0689e08d9 | ||
|
|
5bbb12b53e | ||
|
|
c6a56e0748 | ||
|
|
3c62a7fd9f | ||
|
|
61ab17d8a1 | ||
|
|
d4ae616460 | ||
|
|
b7a18255fe | ||
|
|
1c73a16ca0 | ||
|
|
1aeb36d40e | ||
|
|
f251c9826e | ||
|
|
204063819c | ||
|
|
13f8882616 | ||
|
|
eba1d469c8 | ||
|
|
6e9980bf0f | ||
|
|
67c9c5b8ed | ||
|
|
cd3660a96d | ||
|
|
9d8c1a01ac | ||
|
|
0a77cad2dd | ||
|
|
f6869da3a0 | ||
|
|
6adad79e5c | ||
|
|
50d4d59626 | ||
|
|
56a9f1a411 | ||
|
|
84ff6ff2c5 | ||
|
|
6e35574c72 | ||
|
|
415c1d0574 | ||
|
|
cfce7086a5 | ||
|
|
c90d637a48 | ||
|
|
1926625297 | ||
|
|
63bb8e8cef | ||
|
|
583c5b48a0 | ||
|
|
d08ccbce78 | ||
|
|
5a9702771c | ||
|
|
eb91d9905b | ||
|
|
38cc85fa4c | ||
|
|
77e6ef218c | ||
|
|
464b6f2e93 | ||
|
|
20c90642f9 | ||
|
|
57e67ea8f7 | ||
|
|
c9e9628383 | ||
|
|
909f07939e | ||
|
|
a310493485 | ||
|
|
1e09df20b6 | ||
|
|
087481ac12 | ||
|
|
c941e802d4 | ||
|
|
39589bd441 | ||
|
|
2e57325dde | ||
|
|
2072301d89 | ||
|
|
b236fd3ac6 | ||
|
|
b968695e31 | ||
|
|
694f1d1623 | ||
|
|
93e4d58606 | ||
|
|
cc77caad67 | ||
|
|
f74573f5d0 | ||
|
|
deb6f0babc | ||
|
|
cb978136bd | ||
|
|
1159450cc4 | ||
|
|
a0613e4b10 | ||
|
|
68989f0a45 | ||
|
|
7da5e3697e | ||
|
|
6e7a0eb662 | ||
|
|
b25ac855ca | ||
|
|
3e02dcbb95 | ||
|
|
53be119e39 | ||
|
|
25bdc4c9ed | ||
|
|
9d4055fc4d | ||
|
|
d2edf359ac | ||
|
|
aa1d92dfbb | ||
|
|
b89d71e6e4 | ||
|
|
ed493f9c3a | ||
|
|
76f8a5b7de | ||
|
|
cb3bc207b9 | ||
|
|
b5db5dd0b4 | ||
|
|
90a7cff2c9 | ||
|
|
cc3adbe78c | ||
|
|
bd6a7210b7 | ||
|
|
905a202873 | ||
|
|
accedf0280 | ||
|
|
99d9a2eacd | ||
|
|
ac4f131fa8 | ||
|
|
7f6f7e0e9f | ||
|
|
43bb26f28c | ||
|
|
b29dc37991 | ||
|
|
cf9f02adbb | ||
|
|
6dc0bdbfa3 | ||
|
|
b5a1a18b04 | ||
|
|
b4eeb0ffae | ||
|
|
48549ead7f | ||
|
|
01b0ad0fd9 | ||
|
|
2b21501450 | ||
|
|
b491f6af9b | ||
|
|
942ef7c254 | ||
|
|
1ee3bb42f3 | ||
|
|
25007b1963 | ||
|
|
f442378377 | ||
|
|
333b7ebc0c | ||
|
|
5896766fc3 | ||
|
|
89540aec28 | ||
|
|
b960143045 | ||
|
|
6ab45cf668 | ||
|
|
fd206a7ef6 | ||
|
|
1c7347d38d | ||
|
|
7f58c422f2 | ||
|
|
0a0e2b5e93 | ||
|
|
de00c424f4 | ||
|
|
a249e2028d | ||
|
|
68036eeccf | ||
|
|
cb0b0235f0 | ||
|
|
6ff6f7a28d | ||
|
|
0b628fb22d | ||
|
|
b4bb11320f | ||
|
|
c61938db23 | ||
|
|
acf9d5480c | ||
|
|
a1cb7fd778 | ||
|
|
c24543fea0 | ||
|
|
100e8ab00d | ||
|
|
38497b04ac | ||
|
|
7bd27b920a | ||
|
|
efab11720d | ||
|
|
121f0120f0 | ||
|
|
515b85bb2f | ||
|
|
f27e41d19c | ||
|
|
603d451fc9 | ||
|
|
89adaabb64 | ||
|
|
987ca68ca6 | ||
|
|
71defbf2f9 | ||
|
|
5c35b42844 | ||
|
|
904b37c4be | ||
|
|
4e252f8243 | ||
|
|
dc3e52a900 | ||
|
|
06ad5f6652 | ||
|
|
c3b5474cbf | ||
|
|
69e3b830ed | ||
|
|
96a5891ce7 | ||
|
|
66b9245b28 | ||
|
|
f38ec68695 | ||
|
|
996772a27d | ||
|
|
7f4e9c1ad4 | ||
|
|
218ba69501 | ||
|
|
c2e5dfd933 | ||
|
|
3e40bbc603 | ||
|
|
3498d4b9c5 | ||
|
|
f4b838cad8 | ||
|
|
86fa8634ee | ||
|
|
8882006700 | ||
|
|
40fdf99a55 | ||
|
|
0257736c64 | ||
|
|
2024cda560 | ||
|
|
03aaf4ad76 | ||
|
|
550b88861f | ||
|
|
02ae5fa007 | ||
|
|
d81f105ed7 | ||
|
|
d3ed225675 | ||
|
|
efcca61f5a | ||
|
|
4dad0002cd | ||
|
|
9ffc83f0f6 | ||
|
|
981c7d5974 | ||
|
|
5da089ccd7 | ||
|
|
91e00f7d97 | ||
|
|
3a675fb541 | ||
|
|
9a5d8d2d22 | ||
|
|
de812221ef | ||
|
|
340980bdd0 | ||
|
|
f68a28fa2b | ||
|
|
7b7798e8c4 | ||
|
|
b3ac94115e | ||
|
|
b1a172cad9 | ||
|
|
f2e21c68d0 | ||
|
|
8b784c0eb1 | ||
|
|
bc59f32b96 | ||
|
|
a4fa8a4fae | ||
|
|
f730192c98 | ||
|
|
f994501296 | ||
|
|
9c3e73606c | ||
|
|
5619e16b70 | ||
|
|
d2e3867893 | ||
|
|
979f5475c3 | ||
|
|
5a10f2dd7c | ||
|
|
a80b5b7dd0 | ||
|
|
392967d664 | ||
|
|
d4dd1e37ce | ||
|
|
a8dfa95126 | ||
|
|
3b3c2b7141 | ||
|
|
f55c3c0887 | ||
|
|
f423ad77f3 | ||
|
|
8ba1e1ba9e | ||
|
|
55576084fc | ||
|
|
03311b06c9 | ||
|
|
b5c3d01834 | ||
|
|
f398ecbe39 | ||
|
|
8f1ae0f099 | ||
|
|
c8bee57732 | ||
|
|
85641794c3 | ||
|
|
849decaa59 | ||
|
|
6e88550f92 | ||
|
|
7c52483887 | ||
|
|
0aa520c030 | ||
|
|
548999f163 | ||
|
|
63df547306 | ||
|
|
547d2ca308 | ||
|
|
46b995f9e3 | ||
|
|
4f109c1a94 | ||
|
|
1fdf704cb4 | ||
|
|
5ec9c4c750 | ||
|
|
28cec99699 | ||
|
|
3e194c7906 | ||
|
|
afed94cc0e | ||
|
|
6f48c5ace0 | ||
|
|
9a7e1c2b5a | ||
|
|
2ef7539d55 | ||
|
|
4e52542e33 | ||
|
|
a1895ad924 | ||
|
|
d5a2c96887 | ||
|
|
3f30fe3113 | ||
|
|
d89f24a1a3 | ||
|
|
413354ff29 | ||
|
|
a28ba5bebb | ||
|
|
b93375b671 | ||
|
|
f39005b72d | ||
|
|
b568a33581 | ||
|
|
b05ef8edac | ||
|
|
015f9b663f | ||
|
|
b6167257c9 | ||
|
|
687fe044b2 | ||
|
|
cfa47eb873 | ||
|
|
7079000ee0 | ||
|
|
f60c4f39ee | ||
|
|
473713219f | ||
|
|
03ed81dc3f | ||
|
|
53543ccf26 | ||
|
|
3b183933e3 | ||
|
|
6c6fde8e2e | ||
|
|
61e23b6b81 | ||
|
|
6c649debc9 | ||
|
|
87b0683f77 | ||
|
|
59c1e7a18a | ||
|
|
4f9dad5dd3 | ||
|
|
adc6a0054c | ||
|
|
5425cca47e | ||
|
|
8a70cdb48b | ||
|
|
bb4bc11383 | ||
|
|
a366494c34 | ||
|
|
99de302ec9 | ||
|
|
907912046f | ||
|
|
2c0d379dc5 | ||
|
|
db2759b7d1 | ||
|
|
3c3b9575a2 | ||
|
|
987cfd5dae | ||
|
|
1537fb39c0 | ||
|
|
65cbc478b8 | ||
|
|
e2e8fbe313 | ||
|
|
a3c5f785e9 | ||
|
|
7877215d59 | ||
|
|
e4347792b8 | ||
|
|
50fde60899 | ||
|
|
38f5e293b0 | ||
|
|
b6b399a590 | ||
|
|
b83841d253 | ||
|
|
3e69304f0f | ||
|
|
fe8131f743 | ||
|
|
9ef14a20d1 | ||
|
|
5897b97065 |
1158 changed files with 80230 additions and 19467 deletions
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
|
|
@ -1 +1,2 @@
|
|||
github: mailcow
|
||||
custom: ["https://www.servercow.de/mailcow?lang=en#sal"]
|
||||
|
|
|
|||
75
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
75
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
|
|
@ -11,22 +11,35 @@ body:
|
|||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: I've found a bug and checked that ...
|
||||
description: Prior to placing the issue, please check following:** *(fill out each checkbox with an `X` once done)*
|
||||
label: Checklist prior issue creation
|
||||
description: Prior to creating the issue...
|
||||
options:
|
||||
- label: ... I understand that not following the below instructions will result in immediate closure and/or deletion of my issue.
|
||||
- label: I understand that failure to follow below instructions may cause this issue to be closed.
|
||||
required: true
|
||||
- label: ... I have understood that this bug report is dedicated for bugs, and not for support-related inquiries.
|
||||
- label: I understand that vague, incomplete or inaccurate information may cause this issue to be closed.
|
||||
required: true
|
||||
- label: ... I have understood that answers are voluntary and community-driven, and not commercial support.
|
||||
- label: I understand that this form is intended solely for reporting software bugs and not for support-related inquiries.
|
||||
required: true
|
||||
- label: ... I have verified that my issue has not been already answered in the past. I also checked previous [issues](https://github.com/mailcow/mailcow-dockerized/issues).
|
||||
- label: I understand that all responses are voluntary and community-driven, and do not constitute commercial support.
|
||||
required: true
|
||||
- label: I confirm that I have reviewed previous [issues](https://github.com/mailcow/mailcow-dockerized/issues) to ensure this matter has not already been addressed.
|
||||
required: true
|
||||
- label: I confirm that my environment meets all [prerequisite requirements](https://docs.mailcow.email/getstarted/prerequisite-system/) as specified in the official documentation.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please provide a brief description of the bug in 1-2 sentences. If applicable, add screenshots to help explain your problem. Very useful for bugs in mailcow UI.
|
||||
render: plain text
|
||||
description: Please provide a brief description of the bug. If applicable, add screenshots to help explain your problem. (Very useful for bugs in mailcow UI.)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Steps to reproduce:"
|
||||
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
||||
placeholder: |-
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
|
@ -36,35 +49,36 @@ body:
|
|||
render: plain text
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Steps to reproduce:"
|
||||
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
||||
render: plain text
|
||||
placeholder: |-
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## System information
|
||||
### In this stage we would kindly ask you to attach general system information about your setup.
|
||||
In this stage we would kindly ask you to attach general system information about your setup.
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which branch are you using?"
|
||||
description: "#### `git rev-parse --abbrev-ref HEAD`"
|
||||
description: "#### Run: `git rev-parse --abbrev-ref HEAD`"
|
||||
multiple: false
|
||||
options:
|
||||
- master
|
||||
- master (stable)
|
||||
- staging
|
||||
- nightly
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which architecture are you using?"
|
||||
description: "#### Run: `uname -m`"
|
||||
multiple: false
|
||||
options:
|
||||
- x86_64
|
||||
- ARM64 (aarch64)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Operating System:"
|
||||
description: "#### Run: `lsb_release -ds`"
|
||||
placeholder: "e.g. Ubuntu 22.04 LTS"
|
||||
validations:
|
||||
required: true
|
||||
|
|
@ -83,43 +97,44 @@ body:
|
|||
- type: input
|
||||
attributes:
|
||||
label: "Virtualization technology:"
|
||||
placeholder: "KVM, VMware, Xen, etc - **LXC and OpenVZ are not supported**"
|
||||
description: "LXC and OpenVZ are not supported!"
|
||||
placeholder: "KVM, VMware ESXi, Xen, etc"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Docker version:"
|
||||
description: "#### `docker version`"
|
||||
description: "#### Run: `docker version`"
|
||||
placeholder: "20.10.21"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "docker-compose version or docker compose version:"
|
||||
description: "#### `docker-compose version` or `docker compose version`"
|
||||
description: "#### Run: `docker-compose version` or `docker compose version`"
|
||||
placeholder: "v2.12.2"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "mailcow version:"
|
||||
description: "#### ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
||||
placeholder: "2022-08"
|
||||
description: "#### Run: ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
||||
placeholder: "2022-08x"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Reverse proxy:"
|
||||
placeholder: "e.g. Nginx/Traefik"
|
||||
placeholder: "e.g. nginx/Traefik, or none"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Logs of git diff:"
|
||||
description: "#### Output of `git diff origin/master`, any other changes to the code? If so, **please post them**:"
|
||||
description: "#### Output of `git diff origin/master`, any other changes to the code? Sanitize if needed. If so, **please post them**:"
|
||||
render: plain text
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Logs of iptables -L -vn:"
|
||||
|
|
|
|||
9
.github/ISSUE_TEMPLATE/config.yml
vendored
9
.github/ISSUE_TEMPLATE/config.yml
vendored
|
|
@ -1,8 +1,11 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: ❓ Community-driven support
|
||||
url: https://docs.mailcow.email/#get-support
|
||||
- name: ❓ Community-driven support (Free)
|
||||
url: https://docs.mailcow.email/#community-support-and-chat
|
||||
about: Please use the community forum for questions or assistance
|
||||
- name: 🔥 Premium Support (Paid)
|
||||
url: https://www.servercow.de/mailcow?lang=en#support
|
||||
about: Buy a support subscription for any critical issues and get assisted by the mailcow Team. See conditions!
|
||||
- name: 🚨 Report a security vulnerability
|
||||
url: https://www.servercow.de/anfrage?lang=en
|
||||
url: "mailto:info@servercow.de?subject=mailcow: dockerized Security Vulnerability"
|
||||
about: Please give us appropriate time to verify, respond and fix before disclosure.
|
||||
|
|
|
|||
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
|
|
@ -1,13 +1,3 @@
|
|||
## :memo: Brief description
|
||||
<!-- Diff summary - START -->
|
||||
<!-- Diff summary - END -->
|
||||
|
||||
|
||||
## :computer: Commits
|
||||
<!-- Diff commits - START -->
|
||||
<!-- Diff commits - END -->
|
||||
|
||||
|
||||
## :file_folder: Modified files
|
||||
<!-- Diff files - START -->
|
||||
<!-- Diff files - END -->
|
||||
<!-- Diff files - END -->
|
||||
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
<!-- _Please make sure to review and check all of these items, otherwise we might refuse your PR:_ -->
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
* [ ] I've read the [contribution guidelines](https://github.com/mailcow/mailcow-dockerized/blob/master/CONTRIBUTING.md) and wholeheartedly agree them
|
||||
|
||||
<!-- _NOTE: this tickbox is needed to fullfil on order to get your PR reviewed._ -->
|
||||
|
||||
## What does this PR include?
|
||||
|
||||
### Short Description
|
||||
|
||||
<!-- Please write a short description, what your PR does here. -->
|
||||
|
||||
### Affected Containers
|
||||
|
||||
<!-- Please list all affected Docker containers here, which you commited changes to -->
|
||||
|
||||
<!--
|
||||
|
||||
Please list them like this:
|
||||
|
||||
- container1
|
||||
- container2
|
||||
- container3
|
||||
etc.
|
||||
|
||||
-->
|
||||
|
||||
## Did you run tests?
|
||||
|
||||
### What did you tested?
|
||||
|
||||
<!-- Please write shortly, what you've tested (which components etc.). -->
|
||||
|
||||
### What were the final results? (Awaited, got)
|
||||
|
||||
<!-- Please write shortly, what your final tests results were. What did you awaited? Was the outcome the awaited one? -->
|
||||
8
.github/renovate.json
vendored
8
.github/renovate.json
vendored
|
|
@ -12,15 +12,9 @@
|
|||
"baseBranches": ["staging"],
|
||||
"enabledManagers": ["github-actions", "regex", "docker-compose"],
|
||||
"ignorePaths": [
|
||||
"data\/web\/inc\/lib\/vendor\/matthiasmullie\/minify\/**"
|
||||
"data\/web\/inc\/lib\/vendor\/**"
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": ["^helper-scripts\/nextcloud.sh$"],
|
||||
"matchStrings": [
|
||||
"#\\srenovate:\\sdatasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?( extractVersion=(?<extractVersion>.*?))?\\s.*?_VERSION=(?<currentValue>.*)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"fileMatch": ["(^|/)Dockerfile[^/]*$"],
|
||||
"matchStrings": [
|
||||
|
|
|
|||
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
name: Check if labeled support, if so send message and close issue
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
jobs:
|
||||
add-comment:
|
||||
if: github.event.label.name == 'support'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Add comment
|
||||
run: gh issue comment "$NUMBER" --body "$BODY"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
BODY: |
|
||||
**THIS IS A AUTOMATED MESSAGE!**
|
||||
|
||||
It seems your issue is not a bug.
|
||||
Therefore we highly advise you to get support!
|
||||
|
||||
You can get support either by:
|
||||
- ordering a paid [support contract at Servercow](https://www.servercow.de/mailcow?lang=en#support/) (Directly from the developers) or
|
||||
- using the [community forum](https://community.mailcow.email) (**Based on volunteers! NO guaranteed answer**) or
|
||||
- using the [Telegram support channel](https://t.me/mailcow) (**Based on volunteers! NO guaranteed answer**)
|
||||
|
||||
This issue will be closed. If you think your reported issue is not a support case feel free to comment above and if so the issue will reopened.
|
||||
|
||||
- name: Close issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
run: gh issue close "$NUMBER" -r "not planned"
|
||||
|
|
@ -10,9 +10,9 @@ jobs:
|
|||
if: github.event.pull_request.base.ref != 'staging' #check if the target branch is not staging
|
||||
steps:
|
||||
- name: Send message
|
||||
uses: thollander/actions-comment-pull-request@v2.4.2
|
||||
uses: thollander/actions-comment-pull-request@v3.0.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.CHECKIFPRISSTAGING_ACTION_PAT }}
|
||||
github-token: ${{ secrets.CHECKIFPRISSTAGING_ACTION_PAT }}
|
||||
message: |
|
||||
Thanks for contributing!
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
pull-requests: write
|
||||
steps:
|
||||
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
||||
uses: actions/stale@v8.0.0
|
||||
uses: actions/stale@v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
||||
days-before-stale: 60
|
||||
|
|
|
|||
3
.github/workflows/image_builds.yml
vendored
3
.github/workflows/image_builds.yml
vendored
|
|
@ -23,12 +23,11 @@ jobs:
|
|||
- "postfix-mailcow"
|
||||
- "rspamd-mailcow"
|
||||
- "sogo-mailcow"
|
||||
- "solr-mailcow"
|
||||
- "unbound-mailcow"
|
||||
- "watchdog-mailcow"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Setup Docker
|
||||
run: |
|
||||
curl -sSL https://get.docker.com/ | CHANNEL=stable sudo sh
|
||||
|
|
|
|||
4
.github/workflows/pr_to_nightly.yml
vendored
4
.github/workflows/pr_to_nightly.yml
vendored
|
|
@ -8,11 +8,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run the Action
|
||||
uses: devops-infra/action-pull-request@v0.5.5
|
||||
uses: devops-infra/action-pull-request@v1.0.2
|
||||
with:
|
||||
github_token: ${{ secrets.PRTONIGHTLY_ACTION_PAT }}
|
||||
title: Automatic PR to nightly from ${{ github.event.repository.updated_at}}
|
||||
|
|
|
|||
16
.github/workflows/rebuild_backup_image.yml
vendored
16
.github/workflows/rebuild_backup_image.yml
vendored
|
|
@ -9,9 +9,11 @@ on:
|
|||
jobs:
|
||||
docker_image_build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
|
@ -19,17 +21,19 @@ jobs:
|
|||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
- name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: data/Dockerfiles/backup/Dockerfile
|
||||
push: true
|
||||
tags: mailcow/backup:latest
|
||||
tags: ghcr.io/mailcow/backup:latest
|
||||
|
|
@ -15,14 +15,14 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Generate postscreen_access.cidr
|
||||
run: |
|
||||
bash helper-scripts/update_postscreen_whitelist.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.mailcow_action_Update_postscreen_access_cidr_pat }}
|
||||
commit-message: update postscreen_access.cidr
|
||||
|
|
|
|||
10
.gitignore
vendored
10
.gitignore
vendored
|
|
@ -13,6 +13,7 @@ data/conf/dovecot/acl_anyone
|
|||
data/conf/dovecot/dovecot-master.passwd
|
||||
data/conf/dovecot/dovecot-master.userdb
|
||||
data/conf/dovecot/extra.conf
|
||||
data/conf/dovecot/mail_replica.conf
|
||||
data/conf/dovecot/global_sieve_*
|
||||
data/conf/dovecot/last_login
|
||||
data/conf/dovecot/lua
|
||||
|
|
@ -22,6 +23,7 @@ data/conf/dovecot/sni.conf
|
|||
data/conf/dovecot/sogo-sso.conf
|
||||
data/conf/dovecot/sogo_trusted_ip.conf
|
||||
data/conf/dovecot/sql
|
||||
data/conf/dovecot/conf.d/fts.conf
|
||||
data/conf/nextcloud-*.bak
|
||||
data/conf/nginx/*.active
|
||||
data/conf/nginx/*.bak
|
||||
|
|
@ -43,8 +45,12 @@ data/conf/rspamd/local.d/*
|
|||
data/conf/rspamd/override.d/*
|
||||
data/conf/sogo/custom-theme.js
|
||||
data/conf/sogo/plist_ldap
|
||||
data/conf/sogo/plist_ldap.sh
|
||||
data/conf/sogo/sieve.creds
|
||||
data/conf/sogo/sogo-full.svg
|
||||
data/conf/sogo/cron.creds
|
||||
data/conf/sogo/custom-fulllogo.svg
|
||||
data/conf/sogo/custom-shortlogo.svg
|
||||
data/conf/sogo/custom-fulllogo.png
|
||||
data/gitea/
|
||||
data/gogs/
|
||||
data/hooks/dovecot/*
|
||||
|
|
@ -68,3 +74,5 @@ rebuild-images.sh
|
|||
refresh_images.sh
|
||||
update_diffs/
|
||||
create_cold_standby.sh
|
||||
!data/conf/nginx/mailcow_auth.conf
|
||||
data/conf/postfix/postfix-tlspol
|
||||
|
|
@ -1,5 +1,56 @@
|
|||
When a problem occurs, then always for a reason! What you want to do in such a case is:
|
||||
# Contribution Guidelines
|
||||
**_Last modified on 12th November 2025_**
|
||||
|
||||
First of all, thank you for wanting to provide a bugfix or a new feature for the mailcow community, it's because of your help that the project can continue to grow!
|
||||
|
||||
As we want to keep mailcow's development structured we setup these Guidelines which helps you to create your issue/pull request accordingly.
|
||||
|
||||
**PLEASE NOTE, THAT WE WILL CLOSE ISSUES/PULL REQUESTS IF THEY DON'T FULFILL OUR WRITTEN GUIDELINES WRITTEN INSIDE THIS DOCUMENT**. So please check this guidelines before you propose a Issue/Pull Request.
|
||||
|
||||
## Topics
|
||||
|
||||
- [Pull Requests](#pull-requests)
|
||||
- [Issue Reporting](#issue-reporting)
|
||||
- [Guidelines](#issue-reporting-guidelines)
|
||||
- [Issue Report Guide](#issue-report-guide)
|
||||
|
||||
## Pull Requests
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
However, please note the following regarding pull requests:
|
||||
|
||||
1. **ALWAYS** create your PR using the staging branch of your locally cloned mailcow instance, as the pull request will end up in said staging branch of mailcow once approved. Ideally, you should simply create a new branch for your pull request that is named after the type of your PR (e.g. `feat/` for function updates or `fix/` for bug fixes) and the actual content (e.g. `sogo-6.0.0` for an update from SOGo to version 6 or `html-escape` for a fix that includes escaping HTML in mailcow).
|
||||
2. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
3. Please **keep** this pull request branch **clean** and free of commits that have nothing to do with the changes you have made (e.g. commits from other users from other branches). *If you make changes to the `update.sh` script or other scripts that trigger a commit, there is usually a developer mode for clean working in this case.*
|
||||
4. **Test your changes before you commit them as a pull request.** <ins>If possible</ins>, write a small **test log** or demonstrate the functionality with a **screenshot or GIF**. *We will of course also test your pull request ourselves, but proof from you will save us the question of whether you have tested your own changes yourself.*
|
||||
5. **Please use** the pull request template we provide once creating a pull request. *HINT: During editing you encounter comments which looks like: `<!-- CONTENT -->`. These can be removed or kept, as they will not rendered later on GitHub! Please only create actual content without the said comments.*
|
||||
6. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.*
|
||||
7. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project.
|
||||
8. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort!
|
||||
9. If your PR requires a Docker image rebuild (changes to Dockerfiles or files in data/Dockerfiles/), update the image tag in docker-compose.yml. Use the base-image versioning (e.g. ghcr.io/mailcow/sogo:5.12.4 → :5.12.5 for version bumps; append a letter for patch fixes, e.g. :5.12.4a). Follow this scheme.
|
||||
|
||||
---
|
||||
|
||||
## Issue Reporting
|
||||
**_Last modified on 12th November 2025_**
|
||||
|
||||
If you plan to report a issue within mailcow please read and understand the following rules:
|
||||
|
||||
### Security disclosures / Security-related fixes
|
||||
- Security vulnerabilities and security fixes must always be reported confidentially first to the contact address specified in SECURITY.md before they are integrated, published, or publicly disclosed in issues/PRs. Please wait for a response from the specified contact to ensure coordinated and responsible disclosure.
|
||||
|
||||
### Issue Reporting Guidelines
|
||||
|
||||
1. **ONLY** use the issue tracker for bug reports or improvement requests and NOT for support questions. For support questions you can either contact the [mailcow community on Telegram](https://docs.mailcow.email/#community-support-and-chat) or the mailcow team directly in exchange for a [support fee](https://docs.mailcow.email/#commercial-support).
|
||||
2. **ONLY** report an error if you have the **necessary know-how (at least the basics)** for the administration of an e-mail server and the usage of Docker. mailcow is a complex and fully-fledged e-mail server including groupware components on a Docker basement and it requires a bit of technical know-how for debugging and operating.
|
||||
3. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
4. **ONLY** report bugs that are contained in the latest mailcow release series. *The definition of the latest release series includes the last major patch (e.g. 2023-12) and all minor patches (revisions) below it (e.g. 2023-12a, b, c etc.).* New issue reports published starting from January 1, 2024 must meet this criterion, as versions below the latest releases are no longer supported by us.
|
||||
5. When reporting a problem, please be as detailed as possible and include even the smallest changes to your mailcow installation. Simply fill out the corresponding bug report form in detail and accurately to minimize possible questions.
|
||||
6. **Before you open an issue/feature request**, please first check whether a similar request already exists in the mailcow tracker on GitHub. If so, please include yourself in this request.
|
||||
7. When you create a issue/feature request: Please note that the creation does <ins>**not guarantee an instant implementation or fix by the mailcow team or the community**</ins>.
|
||||
8. Please **ALWAYS** anonymize any sensitive information in your bug report or feature request before submitting it.
|
||||
|
||||
### Issue Report Guide
|
||||
1. Read your logs; follow them to see what the reason for your problem is.
|
||||
2. Follow the leads given to you in your logfiles and start investigating.
|
||||
3. Restarting the troubled service or the whole stack to see if the problem persists.
|
||||
|
|
@ -7,3 +58,5 @@ When a problem occurs, then always for a reason! What you want to do in such a c
|
|||
5. Search our [issues](https://github.com/mailcow/mailcow-dockerized/issues) for your problem.
|
||||
6. [Create an issue](https://github.com/mailcow/mailcow-dockerized/issues/new/choose) over at our GitHub repository if you think your problem might be a bug or a missing feature you badly need. But please make sure, that you include **all the logs** and a full description to your problem.
|
||||
7. Ask your questions in our community-driven [support channels](https://docs.mailcow.email/#community-support-and-chat).
|
||||
|
||||
## When creating an issue/feature request or a pull request, you will be asked to confirm these guidelines.
|
||||
|
|
|
|||
24
README.md
24
README.md
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
[](https://translate.mailcow.email/engage/mailcow-dockerized/)
|
||||
[](https://twitter.com/mailcow_email)
|
||||

|
||||
|
||||
|
||||
## Want to support mailcow?
|
||||
|
||||
|
|
@ -11,6 +13,22 @@ You can also [get a SAL](https://www.servercow.de/mailcow?lang=en#sal) which is
|
|||
|
||||
Or just spread the word: moo.
|
||||
|
||||
## Many thanks to our GitHub Sponsors ❤️
|
||||
A big thank you to everyone supporting us on GitHub Sponsors—your contributions mean the world to us! Special thanks to the following amazing supporters:
|
||||
|
||||
### 100$/Month Sponsors
|
||||
<a href="https://www.colba.net/" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/204464723" height="58"
|
||||
/></a>
|
||||
<a href="https://www.maehdros.com/" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/173894712" height="58"
|
||||
/></a>
|
||||
|
||||
### 50$/Month Sponsors
|
||||
<a href="https://github.com/vnukhr" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/7805987?s=52&v=4" height="58"
|
||||
/></a>
|
||||
|
||||
## Info, documentation and support
|
||||
|
||||
Please see [the official documentation](https://docs.mailcow.email/) for installation and support instructions. 🐄
|
||||
|
|
@ -25,7 +43,9 @@ Please see [the official documentation](https://docs.mailcow.email/) for install
|
|||
|
||||
[Telegram mailcow Off-Topic channel](https://t.me/mailcowOfftopic)
|
||||
|
||||
[Official Twitter Account](https://twitter.com/mailcow_email)
|
||||
[Official 𝕏 (Twitter) Account](https://twitter.com/mailcow_email)
|
||||
|
||||
[Official Mastodon Account](https://mailcow.social/@doncow)
|
||||
|
||||
Telegram desktop clients are available for [multiple platforms](https://desktop.telegram.org). You can search the groups history for keywords.
|
||||
|
||||
|
|
@ -38,4 +58,4 @@ mailcow is a registered word mark of The Infrastructure Company GmbH, Parkstr. 4
|
|||
|
||||
The project is managed and maintained by The Infrastructure Company GmbH.
|
||||
|
||||
Originated from @andryyy (André)
|
||||
Originated from @andryyy (André)
|
||||
|
|
|
|||
230
_modules/scripts/core.sh
Normal file
230
_modules/scripts/core.sh
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
#!/usr/bin/env bash
|
||||
# _modules/scripts/core.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
# ANSI color for red errors
|
||||
RED='\e[31m'
|
||||
GREEN='\e[32m'
|
||||
YELLOW='\e[33m'
|
||||
BLUE='\e[34m'
|
||||
MAGENTA='\e[35m'
|
||||
LIGHT_RED='\e[91m'
|
||||
LIGHT_GREEN='\e[92m'
|
||||
NC='\e[0m'
|
||||
|
||||
caller="${BASH_SOURCE[1]##*/}"
|
||||
|
||||
get_installed_tools(){
|
||||
for bin in openssl curl docker git awk sha1sum grep cut jq; do
|
||||
if [[ -z $(command -v ${bin}) ]]; then
|
||||
echo "Error: Cannot find command '${bin}'. Cannot proceed."
|
||||
echo "Solution: Please review system requirements and install requirements. Then, re-run the script."
|
||||
echo "See System Requirements: https://docs.mailcow.email/getstarted/install/"
|
||||
echo "Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if grep --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox grep detected, please install gnu grep, \"apk add --no-cache --upgrade grep\"${NC}"; exit 1; fi
|
||||
# This will also cover sort
|
||||
if cp --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox cp detected, please install coreutils, \"apk add --no-cache --upgrade coreutils\"${NC}"; exit 1; fi
|
||||
if sed --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox sed detected, please install gnu sed, \"apk add --no-cache --upgrade sed\"${NC}"; exit 1; fi
|
||||
}
|
||||
|
||||
get_docker_version(){
|
||||
# Check Docker Version (need at least 24.X)
|
||||
docker_version=$(docker version --format '{{.Server.Version}}' | cut -d '.' -f 1)
|
||||
}
|
||||
|
||||
get_compose_type(){
|
||||
if docker compose > /dev/null 2>&1; then
|
||||
if docker compose version --short | grep -e "^[2-9]\." -e "^v[2-9]\." -e "^[1-9][0-9]\." -e "^v[1-9][0-9]\." > /dev/null 2>&1; then
|
||||
COMPOSE_VERSION=native
|
||||
COMPOSE_COMMAND="docker compose"
|
||||
if [[ "$caller" == "update.sh" ]]; then
|
||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=native/' "$SCRIPT_DIR/mailcow.conf"
|
||||
fi
|
||||
echo -e "\e[33mFound Docker Compose Plugin (native).\e[0m"
|
||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to native\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mNotice: You'll have to update this Compose Version via your Package Manager manually!\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
||||
echo -e "\e[31mPlease update/install it manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
elif docker-compose > /dev/null 2>&1; then
|
||||
if ! [[ $(alias docker-compose 2> /dev/null) ]] ; then
|
||||
if docker-compose version --short | grep -e "^[2-9]\." -e "^[1-9][0-9]\." > /dev/null 2>&1; then
|
||||
COMPOSE_VERSION=standalone
|
||||
COMPOSE_COMMAND="docker-compose"
|
||||
if [[ "$caller" == "update.sh" ]]; then
|
||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=standalone/' "$SCRIPT_DIR/mailcow.conf"
|
||||
fi
|
||||
echo -e "\e[33mFound Docker Compose Standalone.\e[0m"
|
||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to standalone\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mNotice: For an automatic update of docker-compose please use the update_compose.sh scripts located at the helper-scripts folder.\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
||||
echo -e "\e[31mPlease update/install manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose.\e[0m"
|
||||
echo -e "\e[31mPlease install it regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
detect_bad_asn() {
|
||||
echo -e "\e[33mDetecting if your IP is listed on Spamhaus Bad ASN List...\e[0m"
|
||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
||||
if [ "$response" -eq 503 ]; then
|
||||
if [ -z "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
||||
echo -e "\e[33mmailcow did not detected a value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf!\e[0m"
|
||||
sleep 2
|
||||
echo ""
|
||||
echo -e "\e[33mTo use the Spamhaus DNS Blocklists again, you will need to create a FREE account for their Data Query Service (DQS) at: https://www.spamhaus.com/free-trial/sign-up-for-a-free-data-query-service-account\e[0m"
|
||||
echo -e "\e[33mOnce done, enter your DQS API key in mailcow.conf and mailcow will do the rest for you!\e[0m"
|
||||
echo ""
|
||||
sleep 2
|
||||
else
|
||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
||||
echo -e "\e[32mmailcow detected a Value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf. Postfix will use DQS with the given API key...\e[0m"
|
||||
fi
|
||||
elif [ "$response" -eq 200 ]; then
|
||||
echo -e "\e[33mCheck completed! Your IP is \e[32mclean\e[0m"
|
||||
elif [ "$response" -eq 429 ]; then
|
||||
echo -e "\e[33mCheck completed! \e[31mYour IP seems to be rate limited on the ASN Check service... please try again later!\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCheck failed! \e[0mMaybe a DNS or Network problem?\e[0m"
|
||||
fi
|
||||
}
|
||||
|
||||
check_online_status() {
|
||||
CHECK_ONLINE_DOMAINS=('https://github.com' 'https://hub.docker.com')
|
||||
for domain in "${CHECK_ONLINE_DOMAINS[@]}"; do
|
||||
if timeout 6 curl --head --silent --output /dev/null ${domain}; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
prefetch_images() {
|
||||
[[ -z ${BRANCH} ]] && { echo -e "\e[33m\nUnknown branch...\e[0m"; exit 1; }
|
||||
git fetch origin #${BRANCH}
|
||||
while read image; do
|
||||
RET_C=0
|
||||
until docker pull "${image}"; do
|
||||
RET_C=$((RET_C + 1))
|
||||
echo -e "\e[33m\nError pulling $image, retrying...\e[0m"
|
||||
[ ${RET_C} -gt 3 ] && { echo -e "\e[31m\nToo many failed retries, exiting\e[0m"; exit 1; }
|
||||
sleep 1
|
||||
done
|
||||
done < <(git show "origin/${BRANCH}:docker-compose.yml" | grep "image:" | awk '{ gsub("image:","", $3); print $2 }')
|
||||
}
|
||||
|
||||
docker_garbage() {
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
|
||||
IMGS_TO_DELETE=()
|
||||
|
||||
declare -A IMAGES_INFO
|
||||
COMPOSE_IMAGES=($(grep -oP "image: \K(ghcr\.io/)?mailcow.+" "${SCRIPT_DIR}/docker-compose.yml"))
|
||||
|
||||
for existing_image in $(docker images --format "{{.ID}}:{{.Repository}}:{{.Tag}}" | grep -E '(mailcow/|ghcr\.io/mailcow/)'); do
|
||||
ID=$(echo "$existing_image" | cut -d ':' -f 1)
|
||||
REPOSITORY=$(echo "$existing_image" | cut -d ':' -f 2)
|
||||
TAG=$(echo "$existing_image" | cut -d ':' -f 3)
|
||||
|
||||
if [[ "$REPOSITORY" == "mailcow/backup" || "$REPOSITORY" == "ghcr.io/mailcow/backup" ]]; then
|
||||
if [[ "$TAG" != "<none>" ]]; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ " ${COMPOSE_IMAGES[@]} " =~ " ${REPOSITORY}:${TAG} " ]]; then
|
||||
continue
|
||||
else
|
||||
IMGS_TO_DELETE+=("$ID")
|
||||
IMAGES_INFO["$ID"]="$REPOSITORY:$TAG"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -z ${IMGS_TO_DELETE[*]} ]]; then
|
||||
echo "The following unused mailcow images were found:"
|
||||
for id in "${IMGS_TO_DELETE[@]}"; do
|
||||
echo " ${IMAGES_INFO[$id]} ($id)"
|
||||
done
|
||||
|
||||
if [ -z "$FORCE" ]; then
|
||||
read -r -p "Do you want to delete them to free up some space? [y/N] " response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
docker rmi ${IMGS_TO_DELETE[*]}
|
||||
else
|
||||
echo "OK, skipped."
|
||||
fi
|
||||
else
|
||||
echo "Running in forced mode! Force removing old mailcow images..."
|
||||
docker rmi ${IMGS_TO_DELETE[*]}
|
||||
fi
|
||||
echo -e "\e[32mFurther cleanup...\e[0m"
|
||||
echo "If you want to cleanup further garbage collected by Docker, please make sure all containers are up and running before cleaning your system by executing \"docker system prune\""
|
||||
fi
|
||||
}
|
||||
|
||||
in_array() {
|
||||
local e match="$1"
|
||||
shift
|
||||
for e; do [[ "$e" == "$match" ]] && return 0; done
|
||||
return 1
|
||||
}
|
||||
|
||||
detect_major_update() {
|
||||
if [ ${BRANCH} == "master" ]; then
|
||||
# Array with major versions
|
||||
# Add major versions here
|
||||
MAJOR_VERSIONS=(
|
||||
"2025-02"
|
||||
"2025-03"
|
||||
"2025-09"
|
||||
)
|
||||
|
||||
current_version=""
|
||||
if [[ -f "${SCRIPT_DIR}/data/web/inc/app_info.inc.php" ]]; then
|
||||
current_version=$(grep 'MAILCOW_GIT_VERSION' ${SCRIPT_DIR}/data/web/inc/app_info.inc.php | sed -E 's/.*MAILCOW_GIT_VERSION="([^"]+)".*/\1/')
|
||||
fi
|
||||
if [[ -z "$current_version" ]]; then
|
||||
return 1
|
||||
fi
|
||||
release_url="https://github.com/mailcow/mailcow-dockerized/releases/tag"
|
||||
|
||||
updates_to_apply=()
|
||||
|
||||
for version in "${MAJOR_VERSIONS[@]}"; do
|
||||
if [[ "$current_version" < "$version" ]]; then
|
||||
updates_to_apply+=("$version")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#updates_to_apply[@]} -gt 0 ]]; then
|
||||
echo -e "\e[33m\nMAJOR UPDATES to be applied:\e[0m"
|
||||
for update in "${updates_to_apply[@]}"; do
|
||||
echo "$update - $release_url/$update"
|
||||
done
|
||||
|
||||
echo -e "\nPlease read the release notes before proceeding."
|
||||
read -p "Do you want to proceed with the update? [y/n] " response
|
||||
if [[ "${response}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "Proceeding with the update..."
|
||||
else
|
||||
echo "Update canceled. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
239
_modules/scripts/ipv6_controller.sh
Normal file
239
_modules/scripts/ipv6_controller.sh
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
#!/usr/bin/env bash
|
||||
# _modules/scripts/ipv6_controller.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
# 1) Check if the host supports IPv6
|
||||
get_ipv6_support() {
|
||||
# ---- helper: probe external IPv6 connectivity without DNS ----
|
||||
_probe_ipv6_connectivity() {
|
||||
# Use literal, always-on IPv6 echo responders (no DNS required)
|
||||
local PROBE_IPS=("2001:4860:4860::8888" "2606:4700:4700::1111")
|
||||
local ip rc=1
|
||||
|
||||
for ip in "${PROBE_IPS[@]}"; do
|
||||
if command -v ping6 &>/dev/null; then
|
||||
ping6 -c1 -W2 "$ip" &>/dev/null || ping6 -c1 -w2 "$ip" &>/dev/null
|
||||
rc=$?
|
||||
elif command -v ping &>/dev/null; then
|
||||
ping -6 -c1 -W2 "$ip" &>/dev/null || ping -6 -c1 -w2 "$ip" &>/dev/null
|
||||
rc=$?
|
||||
else
|
||||
rc=1
|
||||
fi
|
||||
[[ $rc -eq 0 ]] && return 0
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
if [[ ! -f /proc/net/if_inet6 ]] || grep -qs '^1' /proc/sys/net/ipv6/conf/all/disable_ipv6 2>/dev/null; then
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}IPv6 is administratively disabled${YELLOW}.${NC}"
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 route show default 2>/dev/null | grep -qE '^default'; then
|
||||
echo -e "${YELLOW}Default IPv6 route found – testing external IPv6 connectivity...${NC}"
|
||||
if _probe_ipv6_connectivity; then
|
||||
DETECTED_IPV6=true
|
||||
echo -e "IPv6 detected on host – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
||||
else
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Default IPv6 route present but external IPv6 connectivity failed – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 addr show scope global 2>/dev/null | grep -q 'inet6'; then
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Global IPv6 address present but no default route – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 addr show scope link 2>/dev/null | grep -q 'inet6'; then
|
||||
echo -e "${YELLOW}Only link-local IPv6 addresses found – testing external IPv6 connectivity...${NC}"
|
||||
if _probe_ipv6_connectivity; then
|
||||
DETECTED_IPV6=true
|
||||
echo -e "External IPv6 connectivity available – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
||||
else
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Only link-local IPv6 present and no external connectivity – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
}
|
||||
|
||||
# 2) Ensure Docker daemon.json has (or create) the required IPv6 settings
|
||||
docker_daemon_edit(){
|
||||
DOCKER_DAEMON_CONFIG="/etc/docker/daemon.json"
|
||||
DOCKER_MAJOR=$(docker version --format '{{.Server.Version}}' 2>/dev/null | cut -d. -f1)
|
||||
MISSING=()
|
||||
|
||||
_has_kv() { grep -Eq "\"$1\"[[:space:]]*:[[:space:]]*$2" "$DOCKER_DAEMON_CONFIG" 2>/dev/null; }
|
||||
|
||||
if [[ -f "$DOCKER_DAEMON_CONFIG" ]]; then
|
||||
|
||||
# reject empty or whitespace-only file immediately
|
||||
if [[ ! -s "$DOCKER_DAEMON_CONFIG" ]] || ! grep -Eq '[{}]' "$DOCKER_DAEMON_CONFIG"; then
|
||||
echo -e "${RED}ERROR: $DOCKER_DAEMON_CONFIG exists but is empty or contains no JSON braces – please initialize it with valid JSON (e.g. {}).${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate JSON if jq is present
|
||||
if command -v jq &>/dev/null && ! jq empty "$DOCKER_DAEMON_CONFIG" &>/dev/null; then
|
||||
echo -e "${RED}ERROR: Invalid JSON in $DOCKER_DAEMON_CONFIG – please correct manually.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Gather missing keys
|
||||
! _has_kv ipv6 true && MISSING+=("ipv6: true")
|
||||
|
||||
# For Docker < 28, keep requiring fixed-cidr-v6 (default bridge needs it on old engines)
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
! grep -Eq '"fixed-cidr-v6"[[:space:]]*:[[:space:]]*".+"' "$DOCKER_DAEMON_CONFIG" \
|
||||
&& MISSING+=('fixed-cidr-v6: "fd00:dead:beef:c0::/80"')
|
||||
fi
|
||||
|
||||
# For Docker < 27, ip6tables needed and was tied to experimental in older releases
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
_has_kv ipv6 true && ! _has_kv ip6tables true && MISSING+=("ip6tables: true")
|
||||
! _has_kv experimental true && MISSING+=("experimental: true")
|
||||
fi
|
||||
|
||||
# Fix if needed
|
||||
if ((${#MISSING[@]}>0)); then
|
||||
echo -e "${MAGENTA}Your daemon.json is missing: ${YELLOW}${MISSING[*]}${NC}"
|
||||
if [[ -n "$FORCE" ]]; then
|
||||
ans=Y
|
||||
else
|
||||
read -p "Would you like to update $DOCKER_DAEMON_CONFIG now? [Y/n] " ans
|
||||
ans=${ans:-Y}
|
||||
fi
|
||||
|
||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
||||
cp "$DOCKER_DAEMON_CONFIG" "${DOCKER_DAEMON_CONFIG}.bak"
|
||||
if command -v jq &>/dev/null; then
|
||||
TMP=$(mktemp)
|
||||
# Base filter: ensure ipv6 = true
|
||||
JQ_FILTER='.ipv6 = true'
|
||||
|
||||
# Add fixed-cidr-v6 only for Docker < 28
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
JQ_FILTER+=' | .["fixed-cidr-v6"] = (.["fixed-cidr-v6"] // "fd00:dead:beef:c0::/80")'
|
||||
fi
|
||||
|
||||
# Add ip6tables/experimental only for Docker < 27
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
JQ_FILTER+=' | .ip6tables = true | .experimental = true'
|
||||
fi
|
||||
|
||||
jq "$JQ_FILTER" "$DOCKER_DAEMON_CONFIG" >"$TMP" && mv "$TMP" "$DOCKER_DAEMON_CONFIG"
|
||||
echo -e "${LIGHT_GREEN}daemon.json updated. Restarting Docker...${NC}"
|
||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
||||
echo -e "${YELLOW}Docker restarted.${NC}"
|
||||
else
|
||||
echo -e "${RED}Please install jq or manually update daemon.json and restart Docker.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}User declined Docker update – please insert these changes manually:${NC}"
|
||||
echo "${MISSING[*]}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
# Create new daemon.json if missing
|
||||
if [[ -n "$FORCE" ]]; then
|
||||
ans=Y
|
||||
else
|
||||
read -p "$DOCKER_DAEMON_CONFIG not found. Create it with IPv6 settings? [Y/n] " ans
|
||||
ans=${ans:-Y}
|
||||
fi
|
||||
|
||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
||||
mkdir -p "$(dirname "$DOCKER_DAEMON_CONFIG")"
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80",
|
||||
"ip6tables": true,
|
||||
"experimental": true
|
||||
}
|
||||
EOF
|
||||
elif [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80"
|
||||
}
|
||||
EOF
|
||||
else
|
||||
# Docker 28+: ipv6 works without fixed-cidr-v6
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
echo -e "${GREEN}Created $DOCKER_DAEMON_CONFIG with IPv6 settings.${NC}"
|
||||
echo "Restarting Docker..."
|
||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
||||
echo "Docker restarted."
|
||||
else
|
||||
echo "User declined to create daemon.json – please manually merge the docker daemon with these configs:"
|
||||
echo "${MISSING[*]}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# 3) Main wrapper for generate_config.sh and update.sh
|
||||
configure_ipv6() {
|
||||
# detect manual override if mailcow.conf is present
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]] && grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
MANUAL_SETTING=$(grep '^ENABLE_IPV6=' "$MAILCOW_CONF" | cut -d= -f2)
|
||||
elif [[ -z "$MAILCOW_CONF" ]] && [[ -n "${ENABLE_IPV6:-}" ]]; then
|
||||
MANUAL_SETTING="$ENABLE_IPV6"
|
||||
else
|
||||
MANUAL_SETTING=""
|
||||
fi
|
||||
|
||||
get_ipv6_support
|
||||
|
||||
# if user manually set it, check for mismatch
|
||||
if [[ "$DETECTED_IPV6" != "true" ]]; then
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=false/' "$MAILCOW_CONF"
|
||||
else
|
||||
echo "ENABLE_IPV6=false" >> "$MAILCOW_CONF"
|
||||
fi
|
||||
else
|
||||
export IPV6_BOOL=false
|
||||
fi
|
||||
echo "Skipping Docker IPv6 configuration because host does not support IPv6."
|
||||
echo "Make sure to check if your docker daemon.json does not include \"enable_ipv6\": true if you do not want IPv6."
|
||||
echo "IPv6 configuration complete: ENABLE_IPV6=false"
|
||||
sleep 2
|
||||
return
|
||||
fi
|
||||
|
||||
docker_daemon_edit
|
||||
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=true/' "$MAILCOW_CONF"
|
||||
else
|
||||
echo "ENABLE_IPV6=true" >> "$MAILCOW_CONF"
|
||||
fi
|
||||
else
|
||||
export IPV6_BOOL=true
|
||||
fi
|
||||
|
||||
echo "IPv6 configuration complete: ENABLE_IPV6=true"
|
||||
}
|
||||
96
_modules/scripts/migrate_options.sh
Normal file
96
_modules/scripts/migrate_options.sh
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
#!/usr/bin/env bash
|
||||
# _modules/scripts/migrate_options.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
migrate_config_options() {
|
||||
|
||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
||||
|
||||
KEYS=(
|
||||
SOLR_HEAP
|
||||
SKIP_SOLR
|
||||
SOLR_PORT
|
||||
FLATCURVE_EXPERIMENTAL
|
||||
DISABLE_IPv6
|
||||
ACME_CONTACT
|
||||
)
|
||||
|
||||
for key in "${KEYS[@]}"; do
|
||||
if grep -q "${key}" mailcow.conf; then
|
||||
case "${key}" in
|
||||
SOLR_HEAP)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/# Solr heap size in MB\b/d' mailcow.conf
|
||||
sed -i '/# Solr is a prone to run\b/d' mailcow.conf
|
||||
sed -i '/SOLR_HEAP\b/d' mailcow.conf
|
||||
;;
|
||||
SKIP_SOLR)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bSkip Solr on low-memory\b/d' mailcow.conf
|
||||
sed -i '/\bSolr is disabled by default\b/d' mailcow.conf
|
||||
sed -i '/\bDisable Solr or\b/d' mailcow.conf
|
||||
sed -i '/\bSKIP_SOLR\b/d' mailcow.conf
|
||||
;;
|
||||
SOLR_PORT)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bSOLR_PORT\b/d' mailcow.conf
|
||||
;;
|
||||
FLATCURVE_EXPERIMENTAL)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bFLATCURVE_EXPERIMENTAL\b/d' mailcow.conf
|
||||
;;
|
||||
DISABLE_IPv6)
|
||||
echo "Migrating ${key} to ENABLE_IPv6 in mailcow.conf"
|
||||
local old=$(grep '^DISABLE_IPv6=' "mailcow.conf" | cut -d'=' -f2)
|
||||
local new
|
||||
if [[ "$old" == "y" ]]; then
|
||||
new="false"
|
||||
else
|
||||
new="true"
|
||||
fi
|
||||
sed -i '/^DISABLE_IPv6=/d' "mailcow.conf"
|
||||
echo "ENABLE_IPV6=$new" >> "mailcow.conf"
|
||||
;;
|
||||
ACME_CONTACT)
|
||||
echo "Deleting obsoleted ${key} in mailcow.conf"
|
||||
sed -i '/^# Lets Encrypt registration contact information/d' mailcow.conf
|
||||
sed -i '/^# Optional: Leave empty for none/d' mailcow.conf
|
||||
sed -i '/^# This value is only used on first order!/d' mailcow.conf
|
||||
sed -i '/^# Setting it at a later point will require the following steps:/d' mailcow.conf
|
||||
sed -i '/^# https:\/\/docs.mailcow.email\/troubleshooting\/debug-reset_tls\//d' mailcow.conf
|
||||
sed -i '/^ACME_CONTACT=.*/d' mailcow.conf
|
||||
sed -i '/^#ACME_CONTACT=.*/d' mailcow.conf
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
|
||||
solr_volume=$(docker volume ls -qf name=^${COMPOSE_PROJECT_NAME}_solr-vol-1)
|
||||
if [[ -n $solr_volume ]]; then
|
||||
echo -e "\e[34mSolr has been replaced within mailcow since 2025-01.\nThe volume $solr_volume is unused.\e[0m"
|
||||
sleep 1
|
||||
if [ ! "$FORCE" ]; then
|
||||
read -r -p "Remove $solr_volume? [y/N] " response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -e "\e[33mRemoving $solr_volume...\e[0m"
|
||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
||||
else
|
||||
echo -e "Not removing $solr_volume. Run \`docker volume rm $solr_volume\` manually if needed."
|
||||
fi
|
||||
else
|
||||
echo -e "\e[33mForce removing $solr_volume...\e[0m"
|
||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete old fts.conf before forced switch to flatcurve to ensure update is working properly
|
||||
FTS_CONF_PATH="${SCRIPT_DIR}/data/conf/dovecot/conf.d/fts.conf"
|
||||
if [[ -f "$FTS_CONF_PATH" ]]; then
|
||||
if grep -q "Autogenerated by mailcow" "$FTS_CONF_PATH"; then
|
||||
rm -rf $FTS_CONF_PATH
|
||||
fi
|
||||
fi
|
||||
}
|
||||
300
_modules/scripts/new_options.sh
Normal file
300
_modules/scripts/new_options.sh
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
#!/usr/bin/env bash
|
||||
# _modules/scripts/new_options.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
adapt_new_options() {
|
||||
|
||||
CONFIG_ARRAY=(
|
||||
"AUTODISCOVER_SAN"
|
||||
"SKIP_LETS_ENCRYPT"
|
||||
"SKIP_SOGO"
|
||||
"USE_WATCHDOG"
|
||||
"WATCHDOG_NOTIFY_EMAIL"
|
||||
"WATCHDOG_NOTIFY_WEBHOOK"
|
||||
"WATCHDOG_NOTIFY_WEBHOOK_BODY"
|
||||
"WATCHDOG_NOTIFY_BAN"
|
||||
"WATCHDOG_NOTIFY_START"
|
||||
"WATCHDOG_EXTERNAL_CHECKS"
|
||||
"WATCHDOG_SUBJECT"
|
||||
"SKIP_CLAMD"
|
||||
"SKIP_OLEFY"
|
||||
"SKIP_IP_CHECK"
|
||||
"ADDITIONAL_SAN"
|
||||
"DOVEADM_PORT"
|
||||
"IPV4_NETWORK"
|
||||
"IPV6_NETWORK"
|
||||
"LOG_LINES"
|
||||
"SNAT_TO_SOURCE"
|
||||
"SNAT6_TO_SOURCE"
|
||||
"COMPOSE_PROJECT_NAME"
|
||||
"DOCKER_COMPOSE_VERSION"
|
||||
"SQL_PORT"
|
||||
"API_KEY"
|
||||
"API_KEY_READ_ONLY"
|
||||
"API_ALLOW_FROM"
|
||||
"MAILDIR_GC_TIME"
|
||||
"MAILDIR_SUB"
|
||||
"ACL_ANYONE"
|
||||
"FTS_HEAP"
|
||||
"FTS_PROCS"
|
||||
"SKIP_FTS"
|
||||
"ENABLE_SSL_SNI"
|
||||
"ALLOW_ADMIN_EMAIL_LOGIN"
|
||||
"SKIP_HTTP_VERIFICATION"
|
||||
"SOGO_EXPIRE_SESSION"
|
||||
"SOGO_URL_ENCRYPTION_KEY"
|
||||
"REDIS_PORT"
|
||||
"REDISPASS"
|
||||
"DOVECOT_MASTER_USER"
|
||||
"DOVECOT_MASTER_PASS"
|
||||
"MAILCOW_PASS_SCHEME"
|
||||
"ADDITIONAL_SERVER_NAMES"
|
||||
"WATCHDOG_VERBOSE"
|
||||
"WEBAUTHN_ONLY_TRUSTED_VENDORS"
|
||||
"SPAMHAUS_DQS_KEY"
|
||||
"SKIP_UNBOUND_HEALTHCHECK"
|
||||
"DISABLE_NETFILTER_ISOLATION_RULE"
|
||||
"HTTP_REDIRECT"
|
||||
"ENABLE_IPV6"
|
||||
)
|
||||
|
||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
||||
for option in ${CONFIG_ARRAY[@]}; do
|
||||
if grep -q "${option}" mailcow.conf; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Adding new option \"${option}\" to mailcow.conf"
|
||||
|
||||
case "${option}" in
|
||||
AUTODISCOVER_SAN)
|
||||
echo '# Obtain certificates for autodiscover.* and autoconfig.* domains.' >> mailcow.conf
|
||||
echo '# This can be useful to switch off in case you are in a scenario where a reverse proxy already handles those.' >> mailcow.conf
|
||||
echo '# There are mixed scenarios where ports 80,443 are occupied and you do not want to share certs' >> mailcow.conf
|
||||
echo '# between services. So acme-mailcow obtains for maildomains and all web-things get handled' >> mailcow.conf
|
||||
echo '# in the reverse proxy.' >> mailcow.conf
|
||||
echo 'AUTODISCOVER_SAN=y' >> mailcow.conf
|
||||
;;
|
||||
|
||||
DOCKER_COMPOSE_VERSION)
|
||||
echo "# Used Docker Compose version" >> mailcow.conf
|
||||
echo "# Switch here between native (compose plugin) and standalone" >> mailcow.conf
|
||||
echo "# For more informations take a look at the mailcow docs regarding the configuration options." >> mailcow.conf
|
||||
echo "# Normally this should be untouched but if you decided to use either of those you can switch it manually here." >> mailcow.conf
|
||||
echo "# Please be aware that at least one of those variants should be installed on your machine or mailcow will fail." >> mailcow.conf
|
||||
echo "" >> mailcow.conf
|
||||
echo "DOCKER_COMPOSE_VERSION=${DOCKER_COMPOSE_VERSION}" >> mailcow.conf
|
||||
;;
|
||||
|
||||
DOVEADM_PORT)
|
||||
echo "DOVEADM_PORT=127.0.0.1:19991" >> mailcow.conf
|
||||
;;
|
||||
|
||||
LOG_LINES)
|
||||
echo '# Max log lines per service to keep in Redis logs' >> mailcow.conf
|
||||
echo "LOG_LINES=9999" >> mailcow.conf
|
||||
;;
|
||||
IPV4_NETWORK)
|
||||
echo '# Internal IPv4 /24 subnet, format n.n.n. (expands to n.n.n.0/24)' >> mailcow.conf
|
||||
echo "IPV4_NETWORK=172.22.1" >> mailcow.conf
|
||||
;;
|
||||
IPV6_NETWORK)
|
||||
echo '# Internal IPv6 subnet in fc00::/7' >> mailcow.conf
|
||||
echo "IPV6_NETWORK=fd4d:6169:6c63:6f77::/64" >> mailcow.conf
|
||||
;;
|
||||
SQL_PORT)
|
||||
echo '# Bind SQL to 127.0.0.1 on port 13306' >> mailcow.conf
|
||||
echo "SQL_PORT=127.0.0.1:13306" >> mailcow.conf
|
||||
;;
|
||||
API_KEY)
|
||||
echo '# Create or override API key for web UI' >> mailcow.conf
|
||||
echo "#API_KEY=" >> mailcow.conf
|
||||
;;
|
||||
API_KEY_READ_ONLY)
|
||||
echo '# Create or override read-only API key for web UI' >> mailcow.conf
|
||||
echo "#API_KEY_READ_ONLY=" >> mailcow.conf
|
||||
;;
|
||||
API_ALLOW_FROM)
|
||||
echo '# Must be set for API_KEY to be active' >> mailcow.conf
|
||||
echo '# IPs only, no networks (networks can be set via UI)' >> mailcow.conf
|
||||
echo "#API_ALLOW_FROM=" >> mailcow.conf
|
||||
;;
|
||||
SNAT_TO_SOURCE)
|
||||
echo '# Use this IPv4 for outgoing connections (SNAT)' >> mailcow.conf
|
||||
echo "#SNAT_TO_SOURCE=" >> mailcow.conf
|
||||
;;
|
||||
SNAT6_TO_SOURCE)
|
||||
echo '# Use this IPv6 for outgoing connections (SNAT)' >> mailcow.conf
|
||||
echo "#SNAT6_TO_SOURCE=" >> mailcow.conf
|
||||
;;
|
||||
MAILDIR_GC_TIME)
|
||||
echo '# Garbage collector cleanup' >> mailcow.conf
|
||||
echo '# Deleted domains and mailboxes are moved to /var/vmail/_garbage/timestamp_sanitizedstring' >> mailcow.conf
|
||||
echo '# How long should objects remain in the garbage until they are being deleted? (value in minutes)' >> mailcow.conf
|
||||
echo '# Check interval is hourly' >> mailcow.conf
|
||||
echo 'MAILDIR_GC_TIME=1440' >> mailcow.conf
|
||||
;;
|
||||
ACL_ANYONE)
|
||||
echo '# Set this to "allow" to enable the anyone pseudo user. Disabled by default.' >> mailcow.conf
|
||||
echo '# When enabled, ACL can be created, that apply to "All authenticated users"' >> mailcow.conf
|
||||
echo '# This should probably only be activated on mail hosts, that are used exclusively by one organisation.' >> mailcow.conf
|
||||
echo '# Otherwise a user might share data with too many other users.' >> mailcow.conf
|
||||
echo 'ACL_ANYONE=disallow' >> mailcow.conf
|
||||
;;
|
||||
FTS_HEAP)
|
||||
echo '# Dovecot Indexing (FTS) Process maximum heap size in MB, there is no recommendation, please see Dovecot docs.' >> mailcow.conf
|
||||
echo '# Flatcurve is used as FTS Engine. It is supposed to be pretty efficient in CPU and RAM consumption.' >> mailcow.conf
|
||||
echo '# Please always monitor your Resource consumption!' >> mailcow.conf
|
||||
echo "FTS_HEAP=128" >> mailcow.conf
|
||||
;;
|
||||
SKIP_FTS)
|
||||
echo '# Skip FTS (Fulltext Search) for Dovecot on low-memory, low-threaded systems or if you simply want to disable it.' >> mailcow.conf
|
||||
echo "# Dovecot inside mailcow use Flatcurve as FTS Backend." >> mailcow.conf
|
||||
echo "SKIP_FTS=y" >> mailcow.conf
|
||||
;;
|
||||
FTS_PROCS)
|
||||
echo '# Controls how many processes the Dovecot indexing process can spawn at max.' >> mailcow.conf
|
||||
echo '# Too many indexing processes can use a lot of CPU and Disk I/O' >> mailcow.conf
|
||||
echo '# Please visit: https://doc.dovecot.org/configuration_manual/service_configuration/#indexer-worker for more informations' >> mailcow.conf
|
||||
echo "FTS_PROCS=1" >> mailcow.conf
|
||||
;;
|
||||
ENABLE_SSL_SNI)
|
||||
echo '# Create seperate certificates for all domains - y/n' >> mailcow.conf
|
||||
echo '# this will allow adding more than 100 domains, but some email clients will not be able to connect with alternative hostnames' >> mailcow.conf
|
||||
echo '# see https://wiki.dovecot.org/SSL/SNIClientSupport' >> mailcow.conf
|
||||
echo "ENABLE_SSL_SNI=n" >> mailcow.conf
|
||||
;;
|
||||
SKIP_SOGO)
|
||||
echo '# Skip SOGo: Will disable SOGo integration and therefore webmail, DAV protocols and ActiveSync support (experimental, unsupported, not fully implemented) - y/n' >> mailcow.conf
|
||||
echo "SKIP_SOGO=n" >> mailcow.conf
|
||||
;;
|
||||
MAILDIR_SUB)
|
||||
echo '# MAILDIR_SUB defines a path in a users virtual home to keep the maildir in. Leave empty for updated setups.' >> mailcow.conf
|
||||
echo "#MAILDIR_SUB=Maildir" >> mailcow.conf
|
||||
echo "MAILDIR_SUB=" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_WEBHOOK)
|
||||
echo '# Send notifications to a webhook URL that receives a POST request with the content type "application/json".' >> mailcow.conf
|
||||
echo '# You can use this to send notifications to services like Discord, Slack and others.' >> mailcow.conf
|
||||
echo '#WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_WEBHOOK_BODY)
|
||||
echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf
|
||||
echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf
|
||||
WEBHOOK_BODY='{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}'
|
||||
echo "#WATCHDOG_NOTIFY_WEBHOOK_BODY='${WEBHOOK_BODY}'" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_BAN)
|
||||
echo '# Notify about banned IP. Includes whois lookup.' >> mailcow.conf
|
||||
echo "WATCHDOG_NOTIFY_BAN=y" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_START)
|
||||
echo '# Send a notification when the watchdog is started.' >> mailcow.conf
|
||||
echo "WATCHDOG_NOTIFY_START=y" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_SUBJECT)
|
||||
echo '# Subject for watchdog mails. Defaults to "Watchdog ALERT" followed by the error message.' >> mailcow.conf
|
||||
echo "#WATCHDOG_SUBJECT=" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_EXTERNAL_CHECKS)
|
||||
echo '# Checks if mailcow is an open relay. Requires a SAL. More checks will follow.' >> mailcow.conf
|
||||
echo '# No data is collected. Opt-in and anonymous.' >> mailcow.conf
|
||||
echo '# Will only work with unmodified mailcow setups.' >> mailcow.conf
|
||||
echo "WATCHDOG_EXTERNAL_CHECKS=n" >> mailcow.conf
|
||||
;;
|
||||
SOGO_EXPIRE_SESSION)
|
||||
echo '# SOGo session timeout in minutes' >> mailcow.conf
|
||||
echo "SOGO_EXPIRE_SESSION=480" >> mailcow.conf
|
||||
;;
|
||||
REDIS_PORT)
|
||||
echo "REDIS_PORT=127.0.0.1:7654" >> mailcow.conf
|
||||
;;
|
||||
DOVECOT_MASTER_USER)
|
||||
echo '# DOVECOT_MASTER_USER and _PASS must _both_ be provided. No special chars.' >> mailcow.conf
|
||||
echo '# Empty by default to auto-generate master user and password on start.' >> mailcow.conf
|
||||
echo '# User expands to DOVECOT_MASTER_USER@mailcow.local' >> mailcow.conf
|
||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
||||
echo "DOVECOT_MASTER_USER=" >> mailcow.conf
|
||||
;;
|
||||
DOVECOT_MASTER_PASS)
|
||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
||||
echo "DOVECOT_MASTER_PASS=" >> mailcow.conf
|
||||
;;
|
||||
MAILCOW_PASS_SCHEME)
|
||||
echo '# Password hash algorithm' >> mailcow.conf
|
||||
echo '# Only certain password hash algorithm are supported. For a fully list of supported schemes,' >> mailcow.conf
|
||||
echo '# see https://docs.mailcow.email/models/model-passwd/' >> mailcow.conf
|
||||
echo "MAILCOW_PASS_SCHEME=BLF-CRYPT" >> mailcow.conf
|
||||
;;
|
||||
ADDITIONAL_SERVER_NAMES)
|
||||
echo '# Additional server names for mailcow UI' >> mailcow.conf
|
||||
echo '#' >> mailcow.conf
|
||||
echo '# Specify alternative addresses for the mailcow UI to respond to' >> mailcow.conf
|
||||
echo '# This is useful when you set mail.* as ADDITIONAL_SAN and want to make sure mail.maildomain.com will always point to the mailcow UI.' >> mailcow.conf
|
||||
echo '# If the server name does not match a known site, Nginx decides by best-guess and may redirect users to the wrong web root.' >> mailcow.conf
|
||||
echo '# You can understand this as server_name directive in Nginx.' >> mailcow.conf
|
||||
echo '# Comma separated list without spaces! Example: ADDITIONAL_SERVER_NAMES=a.b.c,d.e.f' >> mailcow.conf
|
||||
echo 'ADDITIONAL_SERVER_NAMES=' >> mailcow.conf
|
||||
;;
|
||||
WEBAUTHN_ONLY_TRUSTED_VENDORS)
|
||||
echo "# WebAuthn device manufacturer verification" >> mailcow.conf
|
||||
echo '# After setting WEBAUTHN_ONLY_TRUSTED_VENDORS=y only devices from trusted manufacturers are allowed' >> mailcow.conf
|
||||
echo '# root certificates can be placed for validation under mailcow-dockerized/data/web/inc/lib/WebAuthn/rootCertificates' >> mailcow.conf
|
||||
echo 'WEBAUTHN_ONLY_TRUSTED_VENDORS=n' >> mailcow.conf
|
||||
;;
|
||||
SPAMHAUS_DQS_KEY)
|
||||
echo "# Spamhaus Data Query Service Key" >> mailcow.conf
|
||||
echo '# Optional: Leave empty for none' >> mailcow.conf
|
||||
echo '# Enter your key here if you are using a blocked ASN (OVH, AWS, Cloudflare e.g) for the unregistered Spamhaus Blocklist.' >> mailcow.conf
|
||||
echo '# If empty, it will completely disable Spamhaus blocklists if it detects that you are running on a server using a blocked AS.' >> mailcow.conf
|
||||
echo '# Otherwise it will work as usual.' >> mailcow.conf
|
||||
echo 'SPAMHAUS_DQS_KEY=' >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_VERBOSE)
|
||||
echo '# Enable watchdog verbose logging' >> mailcow.conf
|
||||
echo 'WATCHDOG_VERBOSE=n' >> mailcow.conf
|
||||
;;
|
||||
SKIP_UNBOUND_HEALTHCHECK)
|
||||
echo '# Skip Unbound (DNS Resolver) Healthchecks (NOT Recommended!) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_UNBOUND_HEALTHCHECK=n' >> mailcow.conf
|
||||
;;
|
||||
DISABLE_NETFILTER_ISOLATION_RULE)
|
||||
echo '# Prevent netfilter from setting an iptables/nftables rule to isolate the mailcow docker network - y/n' >> mailcow.conf
|
||||
echo '# CAUTION: Disabling this may expose container ports to other neighbors on the same subnet, even if the ports are bound to localhost' >> mailcow.conf
|
||||
echo 'DISABLE_NETFILTER_ISOLATION_RULE=n' >> mailcow.conf
|
||||
;;
|
||||
HTTP_REDIRECT)
|
||||
echo '# Redirect HTTP connections to HTTPS - y/n' >> mailcow.conf
|
||||
echo 'HTTP_REDIRECT=n' >> mailcow.conf
|
||||
;;
|
||||
ENABLE_IPV6)
|
||||
echo '# IPv6 Controller Section' >> mailcow.conf
|
||||
echo '# This variable controls the usage of IPv6 within mailcow.' >> mailcow.conf
|
||||
echo '# Can either be true or false | Defaults to true' >> mailcow.conf
|
||||
echo '# WARNING: MAKE SURE TO PROPERLY CONFIGURE IPv6 ON YOUR HOST FIRST BEFORE ENABLING THIS AS FAULTY CONFIGURATIONS CAN LEAD TO OPEN RELAYS!' >> mailcow.conf
|
||||
echo '# A COMPLETE DOCKER STACK REBUILD (compose down && compose up -d) IS NEEDED TO APPLY THIS.' >> mailcow.conf
|
||||
echo ENABLE_IPV6=${IPV6_BOOL} >> mailcow.conf
|
||||
;;
|
||||
SKIP_CLAMD)
|
||||
echo '# Skip ClamAV (clamd-mailcow) anti-virus (Rspamd will auto-detect a missing ClamAV container) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_CLAMD=n' >> mailcow.conf
|
||||
;;
|
||||
SKIP_OLEFY)
|
||||
echo '# Skip Olefy (olefy-mailcow) anti-virus for Office documents (Rspamd will auto-detect a missing Olefy container) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_OLEFY=n' >> mailcow.conf
|
||||
;;
|
||||
REDISPASS)
|
||||
echo "REDISPASS=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 28)" >> mailcow.conf
|
||||
;;
|
||||
SOGO_URL_ENCRYPTION_KEY)
|
||||
echo '# SOGo URL encryption key (exactly 16 characters, limited to A–Z, a–z, 0–9)' >> mailcow.conf
|
||||
echo '# This key is used to encrypt email addresses within SOGo URLs' >> mailcow.conf
|
||||
echo "SOGO_URL_ENCRYPTION_KEY=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 16)" >> mailcow.conf
|
||||
;;
|
||||
*)
|
||||
echo "${option}=" >> mailcow.conf
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
|
|
@ -14,9 +14,7 @@ RUN apk upgrade --no-cache \
|
|||
tini \
|
||||
tzdata \
|
||||
python3 \
|
||||
py3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install acme-tiny
|
||||
acme-tiny
|
||||
|
||||
COPY acme.sh /srv/acme.sh
|
||||
COPY functions.sh /srv/functions.sh
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ exec 5>&1
|
|||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
|
|
@ -33,6 +33,10 @@ if [[ "${ONLY_MAILCOW_HOSTNAME}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|||
ONLY_MAILCOW_HOSTNAME=y
|
||||
fi
|
||||
|
||||
if [[ "${AUTODISCOVER_SAN}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
AUTODISCOVER_SAN=y
|
||||
fi
|
||||
|
||||
# Request individual certificate for every domain
|
||||
if [[ "${ENABLE_SSL_SNI}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
ENABLE_SSL_SNI=y
|
||||
|
|
@ -113,13 +117,13 @@ fi
|
|||
chmod 600 ${ACME_BASE}/key.pem
|
||||
|
||||
log_f "Waiting for database..."
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
while ! /usr/bin/mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Database OK"
|
||||
|
||||
log_f "Waiting for Nginx..."
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx:8081); do
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network:8081); do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Nginx OK"
|
||||
|
|
@ -133,8 +137,8 @@ log_f "Resolver OK"
|
|||
# Waiting for domain table
|
||||
log_f "Waiting for domain table..."
|
||||
while [[ -z ${DOMAIN_TABLE} ]]; do
|
||||
curl --silent http://nginx/ >/dev/null 2>&1
|
||||
DOMAIN_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
|
||||
curl --silent http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network/ >/dev/null 2>&1
|
||||
DOMAIN_TABLE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
|
||||
[[ -z ${DOMAIN_TABLE} ]] && sleep 10
|
||||
done
|
||||
log_f "OK" no_date
|
||||
|
|
@ -155,18 +159,6 @@ while true; do
|
|||
fi
|
||||
if [[ ! -f ${ACME_BASE}/acme/account.pem ]]; then
|
||||
log_f "Generating missing Lets Encrypt account key..."
|
||||
if [[ ! -z ${ACME_CONTACT} ]]; then
|
||||
if ! verify_email "${ACME_CONTACT}"; then
|
||||
log_f "Invalid email address, will not start registration!"
|
||||
sleep 365d
|
||||
exec $(readlink -f "$0")
|
||||
else
|
||||
ACME_CONTACT_PARAMETER="--contact mailto:${ACME_CONTACT}"
|
||||
log_f "Valid email address, using ${ACME_CONTACT} for registration"
|
||||
fi
|
||||
else
|
||||
ACME_CONTACT_PARAMETER=""
|
||||
fi
|
||||
openssl genrsa 4096 > ${ACME_BASE}/acme/account.pem
|
||||
else
|
||||
log_f "Using existing Lets Encrypt account key ${ACME_BASE}/acme/account.pem"
|
||||
|
|
@ -211,7 +203,11 @@ while true; do
|
|||
ADDITIONAL_SAN_ARR+=($i)
|
||||
fi
|
||||
done
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
|
||||
|
||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||
# Fetch certs for autoconfig and autodiscover subdomains
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig' 'mta-sts')
|
||||
fi
|
||||
|
||||
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
||||
# Start IP detection
|
||||
|
|
@ -223,7 +219,7 @@ while true; do
|
|||
|
||||
#########################################
|
||||
# IP and webroot challenge verification #
|
||||
SQL_DOMAINS=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain WHERE backupmx=0 and active=1" -Bs)
|
||||
SQL_DOMAINS=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain WHERE backupmx=0 and active=1" -Bs)
|
||||
if [[ ! $? -eq 0 ]]; then
|
||||
log_f "Failed to read SQL domains, retrying in 1 minute..."
|
||||
sleep 1m
|
||||
|
|
@ -291,7 +287,7 @@ while true; do
|
|||
VALIDATED_CERTIFICATES+=("${CERT_NAME}")
|
||||
|
||||
# obtain server certificate if required
|
||||
ACME_CONTACT_PARAMETER=${ACME_CONTACT_PARAMETER} DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
||||
DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
||||
RETURN="$?"
|
||||
if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
|
||||
CERT_AMOUNT_CHANGED=1
|
||||
|
|
|
|||
|
|
@ -93,8 +93,8 @@ until dig letsencrypt.org +time=3 +tries=1 @unbound > /dev/null; do
|
|||
sleep 2
|
||||
done
|
||||
log_f "Resolver OK"
|
||||
log_f "Using command acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
||||
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} \
|
||||
log_f "Using command acme-tiny ${DIRECTORY_URL} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
||||
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} \
|
||||
--account-key ${ACME_BASE}/acme/account.pem \
|
||||
--disable-check \
|
||||
--csr ${CSR} \
|
||||
|
|
@ -124,7 +124,7 @@ case "$SUCCESS" in
|
|||
;;
|
||||
*) # non-zero is non-fun
|
||||
log_f "Failed to obtain certificate ${CERT} for domains '${CERT_DOMAINS[*]}'"
|
||||
redis-cli -h redis SET ACME_FAIL_TIME "$(date +%s)"
|
||||
redis-cli -h redis -a ${REDISPASS} --no-auth-warning SET ACME_FAIL_TIME "$(date +%s)"
|
||||
exit 100${SUCCESS}
|
||||
;;
|
||||
esac
|
||||
|
|
|
|||
|
|
@ -2,32 +2,32 @@
|
|||
|
||||
# Reading container IDs
|
||||
# Wrapping as array to ensure trimmed content when calling $NGINX etc.
|
||||
NGINX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
NGINX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
|
||||
reload_nginx(){
|
||||
echo "Reloading Nginx..."
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${NGINX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Nginx, restarting container..."; restart_container ${NGINX} ; }
|
||||
}
|
||||
|
||||
reload_dovecot(){
|
||||
echo "Reloading Dovecot..."
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${DOVECOT_RELOAD_RET} != 'success' ]] && { echo "Could not reload Dovecot, restarting container..."; restart_container ${DOVECOT} ; }
|
||||
}
|
||||
|
||||
reload_postfix(){
|
||||
echo "Reloading Postfix..."
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${POSTFIX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Postfix, restarting container..."; restart_container ${POSTFIX} ; }
|
||||
}
|
||||
|
||||
restart_container(){
|
||||
for container in $*; do
|
||||
echo "Restarting ${container}..."
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi/containers/${container}/restart --silent | jq -r '.msg')
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${container}/restart --silent | jq -r '.msg')
|
||||
echo "${C_REST_OUT}"
|
||||
done
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
FROM debian:bullseye-slim
|
||||
FROM debian:trixie-slim
|
||||
|
||||
RUN apt update && apt install pigz
|
||||
RUN apt update && apt install pigz zstd -y --no-install-recommends
|
||||
|
|
@ -1,12 +1,99 @@
|
|||
FROM clamav/clamav:1.0.3_base
|
||||
FROM alpine:3.21 AS builder
|
||||
|
||||
LABEL maintainer "André Peters <andre.peters@servercow.de>"
|
||||
WORKDIR /src
|
||||
ENV CLAMD_VERSION=1.4.2
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
rsync \
|
||||
bind-tools \
|
||||
bash
|
||||
g++ \
|
||||
gcc \
|
||||
gdb \
|
||||
make \
|
||||
cmake \
|
||||
py3-pytest \
|
||||
python3 \
|
||||
valgrind \
|
||||
bzip2-dev \
|
||||
check-dev \
|
||||
curl-dev \
|
||||
json-c-dev \
|
||||
libmilter-dev \
|
||||
libxml2-dev \
|
||||
linux-headers \
|
||||
ncurses-dev \
|
||||
openssl-dev \
|
||||
pcre2-dev \
|
||||
zlib-dev \
|
||||
cargo \
|
||||
rust
|
||||
|
||||
RUN wget -P /src https://www.clamav.net/downloads/production/clamav-${CLAMD_VERSION}.tar.gz \
|
||||
&& tar xzfv /src/clamav-${CLAMD_VERSION}.tar.gz \
|
||||
&& cd /src/clamav-${CLAMD_VERSION} \
|
||||
&& cmake . \
|
||||
-D CMAKE_BUILD_TYPE="Release" \
|
||||
-D CMAKE_INSTALL_PREFIX="/usr" \
|
||||
-D CMAKE_INSTALL_LIBDIR="/usr/lib" \
|
||||
-D APP_CONFIG_DIRECTORY="/etc/clamav" \
|
||||
-D DATABASE_DIRECTORY="/var/lib/clamav" \
|
||||
-D ENABLE_CLAMONACC=OFF \
|
||||
-D ENABLE_EXAMPLES=OFF \
|
||||
-D ENABLE_MILTER=ON \
|
||||
-D ENABLE_MAN_PAGES=OFF \
|
||||
-D ENABLE_STATIC_LIB=OFF \
|
||||
-D ENABLE_JSON_SHARED=ON \
|
||||
&& cmake --build . \
|
||||
&& make DESTDIR="/clamav" -j$(($(nproc) - 1)) install \
|
||||
&& rm -r "/clamav/usr/lib/pkgconfig/" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(LocalSocket\) .*|\1 /tmp/clamd.sock|" \
|
||||
-e "s|.*\(TCPSocket\) .*|\1 3310|" \
|
||||
-e "s|.*\(TCPAddr\) .*|#\1 0.0.0.0|" \
|
||||
-e "s|.*\(User\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(LogFile\) .*|\1 /var/log/clamav/clamd.log|" \
|
||||
-e "s|^\#\(LogTime\).*|\1 yes|" \
|
||||
"/clamav/etc/clamav/clamd.conf.sample" > "/clamav/etc/clamav/clamd.conf" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(DatabaseOwner\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(UpdateLogFile\) .*|\1 /var/log/clamav/freshclam.log|" \
|
||||
-e "s|^\#\(NotifyClamd\).*|\1 /etc/clamav/clamd.conf|" \
|
||||
-e "s|^\#\(ScriptedUpdates\).*|\1 yes|" \
|
||||
"/clamav/etc/clamav/freshclam.conf.sample" > "/clamav/etc/clamav/freshclam.conf" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(MilterSocket\) .*|\1 inet:7357|" \
|
||||
-e "s|.*\(User\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(LogFile\) .*|\1 /var/log/clamav/milter.log|" \
|
||||
-e "s|^\#\(LogTime\).*|\1 yes|" \
|
||||
-e "s|.*\(\ClamdSocket\) .*|\1 unix:/tmp/clamd.sock|" \
|
||||
"/clamav/etc/clamav/clamav-milter.conf.sample" > "/clamav/etc/clamav/clamav-milter.conf" || exit 1
|
||||
|
||||
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
tzdata \
|
||||
rsync \
|
||||
bind-tools \
|
||||
bash \
|
||||
tini \
|
||||
json-c \
|
||||
libbz2 \
|
||||
libcurl \
|
||||
libmilter \
|
||||
libxml2 \
|
||||
ncurses-libs \
|
||||
pcre2 \
|
||||
zlib \
|
||||
libgcc \
|
||||
&& addgroup -S "clamav" && \
|
||||
adduser -D -G "clamav" -h "/var/lib/clamav" -s "/bin/false" -S "clamav" && \
|
||||
install -d -m 755 -g "clamav" -o "clamav" "/var/log/clamav" && \
|
||||
chown -R clamav:clamav /var/lib/clamav
|
||||
|
||||
COPY --from=builder "/clamav" "/"
|
||||
|
||||
# init
|
||||
COPY clamd.sh /clamd.sh
|
||||
|
|
@ -14,7 +101,9 @@ RUN chmod +x /sbin/tini
|
|||
|
||||
# healthcheck
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY clamdcheck.sh /usr/local/bin
|
||||
RUN chmod +x /healthcheck.sh
|
||||
RUN chmod +x /usr/local/bin/clamdcheck.sh
|
||||
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
||||
|
||||
ENTRYPOINT []
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ fi
|
|||
|
||||
# Cleaning up garbage
|
||||
echo "Cleaning up tmp files..."
|
||||
rm -rf /var/lib/clamav/clamav-*.tmp
|
||||
rm -rf /var/lib/clamav/tmp.*
|
||||
|
||||
# Prepare whitelist
|
||||
|
||||
|
|
@ -91,6 +91,7 @@ done
|
|||
) &
|
||||
BACKGROUND_TASKS+=($!)
|
||||
|
||||
echo "$(clamd -V) is starting... please wait a moment."
|
||||
nice -n10 clamd &
|
||||
BACKGROUND_TASKS+=($!)
|
||||
|
||||
|
|
|
|||
14
data/Dockerfiles/clamd/clamdcheck.sh
Normal file
14
data/Dockerfiles/clamd/clamdcheck.sh
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
if [ "${CLAMAV_NO_CLAMD:-}" != "false" ]; then
|
||||
if [ "$(echo "PING" | nc localhost 3310)" != "PONG" ]; then
|
||||
echo "ERROR: Unable to contact server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Clamd is up"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --update --no-cache python3 \
|
||||
|
|
@ -9,12 +10,13 @@ RUN apk add --update --no-cache python3 \
|
|||
openssl \
|
||||
tzdata \
|
||||
py3-psutil \
|
||||
py3-redis \
|
||||
py3-async-timeout \
|
||||
&& pip3 install --upgrade pip \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
aiodocker \
|
||||
docker \
|
||||
aioredis
|
||||
docker
|
||||
RUN mkdir /app/modules
|
||||
|
||||
COPY docker-entrypoint.sh /app/
|
||||
|
|
@ -22,4 +24,4 @@ COPY main.py /app/main.py
|
|||
COPY modules/ /app/modules/
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
||||
CMD exec python main.py
|
||||
CMD ["python", "main.py"]
|
||||
|
|
@ -5,16 +5,63 @@ import json
|
|||
import uuid
|
||||
import async_timeout
|
||||
import asyncio
|
||||
import aioredis
|
||||
import aiodocker
|
||||
import docker
|
||||
import logging
|
||||
from logging.config import dictConfig
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from modules.DockerApi import DockerApi
|
||||
from redis import asyncio as aioredis
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
dockerapi = None
|
||||
app = FastAPI()
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
global dockerapi
|
||||
|
||||
# Initialize a custom logger
|
||||
logger = logging.getLogger("dockerapi")
|
||||
logger.setLevel(logging.INFO)
|
||||
# Configure the logger to output logs to the terminal
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(levelname)s: %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
logger.info("Init APP")
|
||||
|
||||
# Init redis client
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0", password=os.environ['REDISPASS'])
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0", password=os.environ['REDISPASS'])
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
|
||||
|
||||
dockerapi = DockerApi(redis_client, sync_docker_client, async_docker_client, logger)
|
||||
|
||||
logger.info("Subscribe to redis channel")
|
||||
# Subscribe to redis channel
|
||||
dockerapi.pubsub = redis.pubsub()
|
||||
await dockerapi.pubsub.subscribe("MC_CHANNEL")
|
||||
asyncio.create_task(handle_pubsub_messages(dockerapi.pubsub))
|
||||
|
||||
|
||||
yield
|
||||
|
||||
# Close docker connections
|
||||
dockerapi.sync_docker_client.close()
|
||||
await dockerapi.async_docker_client.close()
|
||||
|
||||
# Close redis
|
||||
await dockerapi.pubsub.unsubscribe("MC_CHANNEL")
|
||||
await dockerapi.redis_client.close()
|
||||
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
|
||||
# Define Routes
|
||||
@app.get("/host/stats")
|
||||
|
|
@ -43,7 +90,7 @@ async def get_container(container_id : str):
|
|||
if container._id == container_id:
|
||||
container_info = await container.show()
|
||||
return Response(content=json.dumps(container_info, indent=4), media_type="application/json")
|
||||
|
||||
|
||||
res = {
|
||||
"type": "danger",
|
||||
"msg": "no container found"
|
||||
|
|
@ -83,7 +130,7 @@ async def get_containers():
|
|||
async def post_containers(container_id : str, post_action : str, request: Request):
|
||||
global dockerapi
|
||||
|
||||
try :
|
||||
try:
|
||||
request_json = await request.json()
|
||||
except Exception as err:
|
||||
request_json = {}
|
||||
|
|
@ -145,52 +192,6 @@ async def post_container_update_stats(container_id : str):
|
|||
stats = json.loads(await dockerapi.redis_client.get(container_id + '_stats'))
|
||||
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
|
||||
|
||||
# Events
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
global dockerapi
|
||||
|
||||
# Initialize a custom logger
|
||||
logger = logging.getLogger("dockerapi")
|
||||
logger.setLevel(logging.INFO)
|
||||
# Configure the logger to output logs to the terminal
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(levelname)s: %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
logger.info("Init APP")
|
||||
|
||||
# Init redis client
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0")
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0")
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
|
||||
|
||||
dockerapi = DockerApi(redis_client, sync_docker_client, async_docker_client, logger)
|
||||
|
||||
logger.info("Subscribe to redis channel")
|
||||
# Subscribe to redis channel
|
||||
dockerapi.pubsub = redis.pubsub()
|
||||
await dockerapi.pubsub.subscribe("MC_CHANNEL")
|
||||
asyncio.create_task(handle_pubsub_messages(dockerapi.pubsub))
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
global dockerapi
|
||||
|
||||
# Close docker connections
|
||||
dockerapi.sync_docker_client.close()
|
||||
await dockerapi.async_docker_client.close()
|
||||
|
||||
# Close redis
|
||||
await dockerapi.pubsub.unsubscribe("MC_CHANNEL")
|
||||
await dockerapi.redis_client.close()
|
||||
|
||||
# PubSub Handler
|
||||
async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
|
|
@ -240,10 +241,10 @@ async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
|||
else:
|
||||
dockerapi.logger.error("api call: missing container_name, post_action or request")
|
||||
else:
|
||||
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
|
||||
dockerapi.logger.error("Unknown PubSub received - %s" % json.dumps(data_json))
|
||||
else:
|
||||
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
|
||||
|
||||
dockerapi.logger.error("Unknown PubSub received - %s" % json.dumps(data_json))
|
||||
|
||||
await asyncio.sleep(0.0)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ class DockerApi:
|
|||
postqueue_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
|
||||
# todo: check each exit code
|
||||
res = { 'type': 'success', 'msg': 'Scheduled immediate delivery'}
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: mailq - task: list
|
||||
def container_post__exec__mailq__list(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
|
|
@ -318,7 +318,7 @@ class DockerApi:
|
|||
|
||||
if 'username' in request_json and 'script_name' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
|
||||
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
|
||||
sieve_return = container.exec_run(cmd)
|
||||
return self.exec_run_handler('utf8_text_only', sieve_return)
|
||||
# api call: container_post - post_action: exec - cmd: maildir - task: cleanup
|
||||
|
|
@ -342,6 +342,30 @@ class DockerApi:
|
|||
cmd = ["/bin/bash", "-c", cmd_vmail]
|
||||
maildir_cleanup = container.exec_run(cmd, user='vmail')
|
||||
return self.exec_run_handler('generic', maildir_cleanup)
|
||||
# api call: container_post - post_action: exec - cmd: maildir - task: move
|
||||
def container_post__exec__maildir__move(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
if 'old_maildir' in request_json and 'new_maildir' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
vmail_name = request_json['old_maildir'].replace("'", "'\\''")
|
||||
new_vmail_name = request_json['new_maildir'].replace("'", "'\\''")
|
||||
cmd_vmail = f"if [[ -d '/var/vmail/{vmail_name}' ]]; then /bin/mv '/var/vmail/{vmail_name}' '/var/vmail/{new_vmail_name}'; fi"
|
||||
|
||||
index_name = request_json['old_maildir'].split("/")
|
||||
new_index_name = request_json['new_maildir'].split("/")
|
||||
if len(index_name) > 1 and len(new_index_name) > 1:
|
||||
index_name = index_name[1].replace("'", "'\\''") + "@" + index_name[0].replace("'", "'\\''")
|
||||
new_index_name = new_index_name[1].replace("'", "'\\''") + "@" + new_index_name[0].replace("'", "'\\''")
|
||||
cmd_vmail_index = f"if [[ -d '/var/vmail_index/{index_name}' ]]; then /bin/mv '/var/vmail_index/{index_name}' '/var/vmail_index/{new_index_name}_index'; fi"
|
||||
cmd = ["/bin/bash", "-c", cmd_vmail + " && " + cmd_vmail_index]
|
||||
else:
|
||||
cmd = ["/bin/bash", "-c", cmd_vmail]
|
||||
maildir_move = container.exec_run(cmd, user='vmail')
|
||||
return self.exec_run_handler('generic', maildir_move)
|
||||
# api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
|
||||
def container_post__exec__rspamd__worker_password(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
|
|
@ -358,8 +382,8 @@ class DockerApi:
|
|||
for line in cmd_response.split("\n"):
|
||||
if '$2$' in line:
|
||||
hash = line.strip()
|
||||
hash_out = re.search('\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
hash_out = re.search(r'\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub(r'[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
|
||||
cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
|
||||
cmd_response = self.exec_cmd_container(container, cmd, user="_rspamd")
|
||||
|
|
@ -374,6 +398,121 @@ class DockerApi:
|
|||
self.logger.error('failed changing Rspamd password')
|
||||
res = { 'type': 'danger', 'msg': 'command did not complete' }
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: sogo - task: rename
|
||||
def container_post__exec__sogo__rename_user(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
if 'old_username' in request_json and 'new_username' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
old_username = request_json['old_username'].replace("'", "'\\''")
|
||||
new_username = request_json['new_username'].replace("'", "'\\''")
|
||||
|
||||
sogo_return = container.exec_run(["/bin/bash", "-c", f"sogo-tool rename-user '{old_username}' '{new_username}'"], user='sogo')
|
||||
return self.exec_run_handler('generic', sogo_return)
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: get_acl
|
||||
def container_post__exec__doveadm__get_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
|
||||
shared_folders = container.exec_run(["/bin/bash", "-c", f"doveadm mailbox list -u '{id}'"])
|
||||
shared_folders = shared_folders.output.decode('utf-8')
|
||||
shared_folders = shared_folders.splitlines()
|
||||
|
||||
formatted_acls = []
|
||||
mailbox_seen = []
|
||||
for shared_folder in shared_folders:
|
||||
if "Shared" not in shared_folder:
|
||||
mailbox = shared_folder.replace("'", "'\\''")
|
||||
if mailbox in mailbox_seen:
|
||||
continue
|
||||
|
||||
acls = container.exec_run(["/bin/bash", "-c", f"doveadm acl get -u '{id}' '{mailbox}'"])
|
||||
acls = acls.output.decode('utf-8').strip().splitlines()
|
||||
if len(acls) >= 2:
|
||||
for acl in acls[1:]:
|
||||
user_id, rights = acl.split(maxsplit=1)
|
||||
user_id = user_id.split('=')[1]
|
||||
mailbox_seen.append(mailbox)
|
||||
formatted_acls.append({ 'user': id, 'id': user_id, 'mailbox': mailbox, 'rights': rights.split() })
|
||||
elif "Shared" in shared_folder and "/" in shared_folder:
|
||||
shared_folder = shared_folder.split("/")
|
||||
if len(shared_folder) < 3:
|
||||
continue
|
||||
|
||||
user = shared_folder[1].replace("'", "'\\''")
|
||||
mailbox = '/'.join(shared_folder[2:]).replace("'", "'\\''")
|
||||
if mailbox in mailbox_seen:
|
||||
continue
|
||||
|
||||
acls = container.exec_run(["/bin/bash", "-c", f"doveadm acl get -u '{user}' '{mailbox}'"])
|
||||
acls = acls.output.decode('utf-8').strip().splitlines()
|
||||
if len(acls) >= 2:
|
||||
for acl in acls[1:]:
|
||||
user_id, rights = acl.split(maxsplit=1)
|
||||
user_id = user_id.split('=')[1].replace("'", "'\\''")
|
||||
if user_id == id and mailbox not in mailbox_seen:
|
||||
mailbox_seen.append(mailbox)
|
||||
formatted_acls.append({ 'user': user, 'id': id, 'mailbox': mailbox, 'rights': rights.split() })
|
||||
|
||||
return Response(content=json.dumps(formatted_acls, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: delete_acl
|
||||
def container_post__exec__doveadm__delete_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
user = request_json['user'].replace("'", "'\\''")
|
||||
mailbox = request_json['mailbox'].replace("'", "'\\''")
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
|
||||
if user and mailbox and id:
|
||||
acl_delete_return = container.exec_run(["/bin/bash", "-c", f"doveadm acl delete -u '{user}' '{mailbox}' 'user={id}'"])
|
||||
return self.exec_run_handler('generic', acl_delete_return)
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: set_acl
|
||||
def container_post__exec__doveadm__set_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
user = request_json['user'].replace("'", "'\\''")
|
||||
mailbox = request_json['mailbox'].replace("'", "'\\''")
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
rights = ""
|
||||
|
||||
available_rights = [
|
||||
"admin",
|
||||
"create",
|
||||
"delete",
|
||||
"expunge",
|
||||
"insert",
|
||||
"lookup",
|
||||
"post",
|
||||
"read",
|
||||
"write",
|
||||
"write-deleted",
|
||||
"write-seen"
|
||||
]
|
||||
for right in request_json['rights']:
|
||||
right = right.replace("'", "'\\''").lower()
|
||||
if right in available_rights:
|
||||
rights += right + " "
|
||||
|
||||
if user and mailbox and id and rights:
|
||||
acl_set_return = container.exec_run(["/bin/bash", "-c", f"doveadm acl set -u '{user}' '{mailbox}' 'user={id}' {rights}"])
|
||||
return self.exec_run_handler('generic', acl_set_return)
|
||||
|
||||
|
||||
# Collect host stats
|
||||
async def get_host_stats(self, wait=5):
|
||||
|
|
@ -462,7 +601,7 @@ class DockerApi:
|
|||
except:
|
||||
pass
|
||||
return ''.join(total_data)
|
||||
|
||||
|
||||
try :
|
||||
socket = container.exec_run([shell_cmd], stdin=True, socket=True, user=user).output._sock
|
||||
if not cmd.endswith("\n"):
|
||||
|
|
|
|||
|
|
@ -1,119 +1,119 @@
|
|||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
# renovate: datasource=github-tags depName=dovecot/core versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG DOVECOT=2.3.21
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
ENV LC_ALL C
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
|
||||
# Add groups and users before installing Dovecot to not break compatibility
|
||||
RUN groupadd -g 5000 vmail \
|
||||
&& groupadd -g 401 dovecot \
|
||||
&& groupadd -g 402 dovenull \
|
||||
&& groupadd -g 999 sogo \
|
||||
&& usermod -a -G sogo nobody \
|
||||
&& useradd -g vmail -u 5000 vmail -d /var/vmail \
|
||||
&& useradd -c "Dovecot unprivileged user" -d /dev/null -u 401 -g dovecot -s /bin/false dovecot \
|
||||
&& useradd -c "Dovecot login user" -d /dev/null -u 402 -g dovenull -s /bin/false dovenull \
|
||||
&& touch /etc/default/locale \
|
||||
&& apt-get update \
|
||||
&& apt-get -y --no-install-recommends install \
|
||||
build-essential \
|
||||
apt-transport-https \
|
||||
RUN addgroup -g 5000 vmail \
|
||||
&& addgroup -g 401 dovecot \
|
||||
&& addgroup -g 402 dovenull \
|
||||
&& sed -i "s/999/99/" /etc/group \
|
||||
&& addgroup -g 999 sogo \
|
||||
&& addgroup nobody sogo \
|
||||
&& adduser -D -u 5000 -G vmail -h /var/vmail vmail \
|
||||
&& adduser -D -G dovecot -u 401 -h /dev/null -s /sbin/nologin dovecot \
|
||||
&& adduser -D -G dovenull -u 402 -h /dev/null -s /sbin/nologin dovenull \
|
||||
&& apk add --no-cache --update \
|
||||
bash \
|
||||
bind-tools \
|
||||
findutils \
|
||||
envsubst \
|
||||
ca-certificates \
|
||||
cpanminus \
|
||||
curl \
|
||||
dnsutils \
|
||||
dirmngr \
|
||||
gettext \
|
||||
gnupg2 \
|
||||
coreutils \
|
||||
jq \
|
||||
libauthen-ntlm-perl \
|
||||
libcgi-pm-perl \
|
||||
libcrypt-openssl-rsa-perl \
|
||||
libcrypt-ssleay-perl \
|
||||
libdata-uniqid-perl \
|
||||
libdbd-mysql-perl \
|
||||
libdbi-perl \
|
||||
libdigest-hmac-perl \
|
||||
libdist-checkconflicts-perl \
|
||||
libencode-imaputf7-perl \
|
||||
libfile-copy-recursive-perl \
|
||||
libfile-tail-perl \
|
||||
libhtml-parser-perl \
|
||||
libio-compress-perl \
|
||||
libio-socket-inet6-perl \
|
||||
libio-socket-ssl-perl \
|
||||
libio-tee-perl \
|
||||
libipc-run-perl \
|
||||
libjson-webtoken-perl \
|
||||
liblockfile-simple-perl \
|
||||
libmail-imapclient-perl \
|
||||
libmodule-implementation-perl \
|
||||
libmodule-scandeps-perl \
|
||||
libnet-ssleay-perl \
|
||||
libpackage-stash-perl \
|
||||
libpackage-stash-xs-perl \
|
||||
libpar-packer-perl \
|
||||
libparse-recdescent-perl \
|
||||
libproc-processtable-perl \
|
||||
libreadonly-perl \
|
||||
libregexp-common-perl \
|
||||
libssl-dev \
|
||||
libsys-meminfo-perl \
|
||||
libterm-readkey-perl \
|
||||
libtest-deep-perl \
|
||||
libtest-fatal-perl \
|
||||
libtest-mock-guard-perl \
|
||||
libtest-mockobject-perl \
|
||||
libtest-nowarnings-perl \
|
||||
libtest-pod-perl \
|
||||
libtest-requires-perl \
|
||||
libtest-simple-perl \
|
||||
libtest-warn-perl \
|
||||
libtry-tiny-perl \
|
||||
libunicode-string-perl \
|
||||
liburi-perl \
|
||||
libwww-perl \
|
||||
lua-sql-mysql \
|
||||
lua \
|
||||
lua-cjson \
|
||||
lua-socket \
|
||||
lua-sql-mysql \
|
||||
lua5.3-sql-mysql \
|
||||
icu-data-full \
|
||||
mariadb-connector-c \
|
||||
lua-sec \
|
||||
mariadb-dev \
|
||||
glib-dev \
|
||||
gcompat \
|
||||
mariadb-client \
|
||||
perl \
|
||||
perl-dev \
|
||||
perl-ntlm \
|
||||
perl-cgi \
|
||||
perl-crypt-openssl-rsa \
|
||||
perl-utils \
|
||||
perl-crypt-ssleay \
|
||||
perl-data-uniqid \
|
||||
perl-dbd-mysql \
|
||||
perl-dbi \
|
||||
perl-digest-hmac \
|
||||
perl-dist-checkconflicts \
|
||||
perl-encode-imaputf7 \
|
||||
perl-file-copy-recursive \
|
||||
perl-file-tail \
|
||||
perl-io-socket-inet6 \
|
||||
perl-io-gzip \
|
||||
perl-io-socket-ssl \
|
||||
perl-io-tee \
|
||||
perl-ipc-run \
|
||||
perl-json-webtoken \
|
||||
perl-mail-imapclient \
|
||||
perl-module-implementation \
|
||||
perl-module-scandeps \
|
||||
perl-net-ssleay \
|
||||
perl-package-stash \
|
||||
perl-package-stash-xs \
|
||||
perl-par-packer \
|
||||
perl-parse-recdescent \
|
||||
perl-lockfile-simple \
|
||||
libproc2 \
|
||||
perl-readonly \
|
||||
perl-regexp-common \
|
||||
perl-sys-meminfo \
|
||||
perl-term-readkey \
|
||||
perl-test-deep \
|
||||
perl-test-fatal \
|
||||
perl-test-mockobject \
|
||||
perl-test-mock-guard \
|
||||
perl-test-pod \
|
||||
perl-test-requires \
|
||||
perl-test-simple \
|
||||
perl-test-warn \
|
||||
perl-try-tiny \
|
||||
perl-unicode-string \
|
||||
perl-proc-processtable \
|
||||
perl-app-cpanminus \
|
||||
procps \
|
||||
python3-pip \
|
||||
redis-server \
|
||||
supervisor \
|
||||
python3 \
|
||||
py3-mysqlclient \
|
||||
py3-html2text \
|
||||
py3-jinja2 \
|
||||
py3-redis \
|
||||
redis \
|
||||
syslog-ng \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
syslog-ng-redis \
|
||||
syslog-ng-json \
|
||||
supervisor \
|
||||
tzdata \
|
||||
wget \
|
||||
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true \
|
||||
&& apt-key adv --fetch-keys https://repo.dovecot.org/DOVECOT-REPO-GPG \
|
||||
&& echo "deb https://repo.dovecot.org/ce-${DOVECOT}/debian/bullseye bullseye main" > /etc/apt/sources.list.d/dovecot.list \
|
||||
&& apt-get update \
|
||||
&& apt-get -y --no-install-recommends install \
|
||||
dovecot-lua \
|
||||
dovecot-managesieved \
|
||||
dovecot-sieve \
|
||||
dovecot \
|
||||
dovecot-dev \
|
||||
dovecot-lmtpd \
|
||||
dovecot-lua \
|
||||
dovecot-ldap \
|
||||
dovecot-mysql \
|
||||
dovecot-core \
|
||||
dovecot-sql \
|
||||
dovecot-submissiond \
|
||||
dovecot-pigeonhole-plugin \
|
||||
dovecot-pop3d \
|
||||
dovecot-imapd \
|
||||
dovecot-solr \
|
||||
&& pip3 install mysql-connector-python html2text jinja2 redis \
|
||||
&& apt-get autoremove --purge -y \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -rf /tmp/* /var/tmp/* /root/.cache/
|
||||
# imapsync dependencies
|
||||
RUN cpan Crypt::OpenSSL::PKCS12
|
||||
dovecot-fts-flatcurve \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$arch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true
|
||||
|
||||
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
|
|
@ -133,6 +133,7 @@ COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
|||
COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY repl_health.sh /usr/local/bin/repl_health.sh
|
||||
COPY optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
source /source_env.sh
|
||||
|
||||
MAX_AGE=$(redis-cli --raw -h redis-mailcow GET Q_MAX_AGE)
|
||||
MAX_AGE=$(redis-cli --raw -h redis-mailcow -a ${REDISPASS} --no-auth-warning GET Q_MAX_AGE)
|
||||
|
||||
if [[ -z ${MAX_AGE} ]]; then
|
||||
echo "Max age for quarantine items not defined"
|
||||
|
|
@ -15,6 +15,6 @@ if ! [[ ${MAX_AGE} =~ ${NUM_REGEXP} ]] ; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
TO_DELETE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT COUNT(id) FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY" -BN)
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY"
|
||||
TO_DELETE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT COUNT(id) FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY" -BN)
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY"
|
||||
echo "Deleted ${TO_DELETE} items from quarantine table (max age is ${MAX_AGE//[!0-9]/} days)"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
set -e
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
|
@ -14,9 +14,9 @@ done
|
|||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
|
|
@ -28,7 +28,8 @@ ${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
|||
|
||||
# Create missing directories
|
||||
[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
|
||||
[[ ! -d /etc/dovecot/lua/ ]] && mkdir -p /etc/dovecot/lua/
|
||||
[[ ! -d /etc/dovecot/auth/ ]] && mkdir -p /etc/dovecot/auth/
|
||||
[[ ! -d /etc/dovecot/conf.d/ ]] && mkdir -p /etc/dovecot/conf.d/
|
||||
[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
|
||||
[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
|
||||
[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
|
||||
|
|
@ -109,14 +110,16 @@ EOF
|
|||
|
||||
echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication' > /etc/dovecot/mail_plugins
|
||||
if [[ "${SKIP_FTS}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -e "\e[33mDetecting SKIP_FTS=y... not enabling Flatcurve (FTS) then...\e[0m"
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
else
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_solr listescape replication' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_solr listescape replication' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_solr notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
echo -e "\e[32mDetecting SKIP_FTS=n... enabling Flatcurve (FTS)\e[0m"
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_flatcurve listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_flatcurve listescape replication' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_flatcurve notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
fi
|
||||
chmod 644 /etc/dovecot/mail_plugins /etc/dovecot/mail_plugins_imap /etc/dovecot/mail_plugins_lmtp /templates/quarantine.tpl
|
||||
|
||||
|
|
@ -128,123 +131,6 @@ user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format
|
|||
iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /etc/dovecot/lua/passwd-verify.lua
|
||||
function auth_password_verify(req, pass)
|
||||
|
||||
if req.domain == nil then
|
||||
return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, "No such user"
|
||||
end
|
||||
|
||||
if cur == nil then
|
||||
script_init()
|
||||
end
|
||||
|
||||
if req.user == nil then
|
||||
req.user = ''
|
||||
end
|
||||
|
||||
respbody = {}
|
||||
|
||||
-- check against mailbox passwds
|
||||
local cur,errorString = con:execute(string.format([[SELECT password FROM mailbox
|
||||
WHERE username = '%s'
|
||||
AND active = '1'
|
||||
AND domain IN (SELECT domain FROM domain WHERE domain='%s' AND active='1')
|
||||
AND IFNULL(JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.force_pw_update')), 0) != '1'
|
||||
AND IFNULL(JSON_UNQUOTE(JSON_VALUE(attributes, '$.%s_access')), 1) = '1']], con:escape(req.user), con:escape(req.domain), con:escape(req.service)))
|
||||
local row = cur:fetch ({}, "a")
|
||||
while row do
|
||||
if req.password_verify(req, row.password, pass) == 1 then
|
||||
con:execute(string.format([[REPLACE INTO sasl_log (service, app_password, username, real_rip)
|
||||
VALUES ("%s", 0, "%s", "%s")]], con:escape(req.service), con:escape(req.user), con:escape(req.real_rip)))
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
end
|
||||
row = cur:fetch (row, "a")
|
||||
end
|
||||
|
||||
-- check against app passwds for imap and smtp
|
||||
-- app passwords are only available for imap, smtp, sieve and pop3 when using sasl
|
||||
if req.service == "smtp" or req.service == "imap" or req.service == "sieve" or req.service == "pop3" then
|
||||
local cur,errorString = con:execute(string.format([[SELECT app_passwd.id, %s_access AS has_prot_access, app_passwd.password FROM app_passwd
|
||||
INNER JOIN mailbox ON mailbox.username = app_passwd.mailbox
|
||||
WHERE mailbox = '%s'
|
||||
AND app_passwd.active = '1'
|
||||
AND mailbox.active = '1'
|
||||
AND app_passwd.domain IN (SELECT domain FROM domain WHERE domain='%s' AND active='1')]], con:escape(req.service), con:escape(req.user), con:escape(req.domain)))
|
||||
local row = cur:fetch ({}, "a")
|
||||
while row do
|
||||
if req.password_verify(req, row.password, pass) == 1 then
|
||||
-- if password is valid and protocol access is 1 OR real_rip matches SOGo, proceed
|
||||
if tostring(req.real_rip) == "__IPV4_SOGO__" then
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
elseif row.has_prot_access == "1" then
|
||||
con:execute(string.format([[REPLACE INTO sasl_log (service, app_password, username, real_rip)
|
||||
VALUES ("%s", %d, "%s", "%s")]], con:escape(req.service), row.id, con:escape(req.user), con:escape(req.real_rip)))
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
end
|
||||
end
|
||||
row = cur:fetch (row, "a")
|
||||
end
|
||||
end
|
||||
|
||||
cur:close()
|
||||
con:close()
|
||||
|
||||
return dovecot.auth.PASSDB_RESULT_PASSWORD_MISMATCH, "Failed to authenticate"
|
||||
|
||||
-- PoC
|
||||
-- local reqbody = string.format([[{
|
||||
-- "success":0,
|
||||
-- "service":"%s",
|
||||
-- "app_password":false,
|
||||
-- "username":"%s",
|
||||
-- "real_rip":"%s"
|
||||
-- }]], con:escape(req.service), con:escape(req.user), con:escape(req.real_rip))
|
||||
-- http.request {
|
||||
-- method = "POST",
|
||||
-- url = "http://nginx:8081/sasl_log.php",
|
||||
-- source = ltn12.source.string(reqbody),
|
||||
-- headers = {
|
||||
-- ["content-type"] = "application/json",
|
||||
-- ["content-length"] = tostring(#reqbody)
|
||||
-- },
|
||||
-- sink = ltn12.sink.table(respbody)
|
||||
-- }
|
||||
|
||||
end
|
||||
|
||||
function auth_passdb_lookup(req)
|
||||
return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, ""
|
||||
end
|
||||
|
||||
function script_init()
|
||||
mysql = require "luasql.mysql"
|
||||
http = require "socket.http"
|
||||
http.TIMEOUT = 5
|
||||
ltn12 = require "ltn12"
|
||||
env = mysql.mysql()
|
||||
con = env:connect("__DBNAME__","__DBUSER__","__DBPASS__","localhost")
|
||||
return 0
|
||||
end
|
||||
|
||||
function script_deinit()
|
||||
con:close()
|
||||
env:close()
|
||||
end
|
||||
EOF
|
||||
|
||||
# Replace patterns in app-passdb.lua
|
||||
sed -i "s/__DBUSER__/${DBUSER}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__DBPASS__/${DBPASS}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__DBNAME__/${DBNAME}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__IPV4_SOGO__/${IPV4_NETWORK}.248/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
|
||||
|
||||
# Migrate old sieve_after file
|
||||
[[ -f /etc/dovecot/sieve_after ]] && mv /etc/dovecot/sieve_after /etc/dovecot/global_sieve_after
|
||||
|
|
@ -322,10 +208,13 @@ cat <<EOF > /etc/dovecot/sogo-sso.conf
|
|||
# Autogenerated by mailcow
|
||||
passdb {
|
||||
driver = static
|
||||
args = allow_real_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
|
||||
args = allow_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Creating additional creds file for SOGo notify crons (calendars, etc) (dummy user, sso password)
|
||||
echo -n ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/cron.creds
|
||||
|
||||
if [[ "${MASTER}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
# Toggling MASTER will result in a rebuild of containers, so the quota script will be recreated
|
||||
cat <<'EOF' > /usr/local/bin/quota_notify.py
|
||||
|
|
@ -335,6 +224,23 @@ sys.exit()
|
|||
EOF
|
||||
fi
|
||||
|
||||
# Set mail_replica for HA setups
|
||||
if [[ -n ${MAILCOW_REPLICA_IP} && -n ${DOVEADM_REPLICA_PORT} ]]; then
|
||||
cat <<EOF > /etc/dovecot/mail_replica.conf
|
||||
# Autogenerated by mailcow
|
||||
mail_replica = tcp:${MAILCOW_REPLICA_IP}:${DOVEADM_REPLICA_PORT}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Setting variables for indexer-worker inside fts.conf automatically according to mailcow.conf settings
|
||||
if [[ "${SKIP_FTS}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
echo -e "\e[94mConfiguring FTS Settings...\e[0m"
|
||||
echo -e "\e[94mSetting FTS Memory Limit (per process) to ${FTS_HEAP} MB\e[0m"
|
||||
sed -i "s/vsz_limit\s*=\s*[0-9]*\s*MB*/vsz_limit=${FTS_HEAP} MB/" /etc/dovecot/conf.d/fts.conf
|
||||
echo -e "\e[94mSetting FTS Process Limit to ${FTS_PROCS}\e[0m"
|
||||
sed -i "s/process_limit\s*=\s*[0-9]*/process_limit=${FTS_PROCS}/" /etc/dovecot/conf.d/fts.conf
|
||||
fi
|
||||
|
||||
# 401 is user dovecot
|
||||
if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
|
||||
openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
|
||||
|
|
@ -344,24 +250,27 @@ else
|
|||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
||||
fi
|
||||
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.conf /etc/dovecot/extra.conf; then
|
||||
sed -i '/\[openssl_init\]/a ssl_conf = ssl_configuration' /etc/ssl/openssl.cnf
|
||||
|
||||
echo "[ssl_configuration]" >> /etc/ssl/openssl.cnf
|
||||
echo "system_default = tls_system_default" >> /etc/ssl/openssl.cnf
|
||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
|
||||
# Compile sieve scripts
|
||||
sievec /var/vmail/sieve/global_sieve_before.sieve
|
||||
sievec /var/vmail/sieve/global_sieve_after.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
|
||||
for file in /var/vmail/*/*/sieve/*.sieve ; do
|
||||
if [[ "$file" == "/var/vmail/*/*/sieve/*.sieve" ]]; then
|
||||
continue
|
||||
fi
|
||||
sievec "$file" "$(dirname "$file")/../.dovecot.svbin"
|
||||
chown vmail:vmail "$(dirname "$file")/../.dovecot.svbin"
|
||||
done
|
||||
|
||||
# Fix permissions
|
||||
chown root:root /etc/dovecot/sql/*.conf
|
||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/lua/passwd-verify.lua
|
||||
chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/lua/passwd-verify.lua
|
||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/auth/passwd-verify.lua
|
||||
chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/auth/passwd-verify.lua
|
||||
chown -R vmail:vmail /var/vmail/sieve
|
||||
chown -R vmail:vmail /var/volatile
|
||||
chown -R vmail:vmail /var/vmail_index
|
||||
|
|
@ -378,7 +287,8 @@ chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
|
|||
/usr/local/bin/maildir_gc.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/usr/local/bin/quota_notify.py \
|
||||
/usr/local/bin/repl_health.sh
|
||||
/usr/local/bin/repl_health.sh \
|
||||
/usr/local/bin/optimize-fts.sh
|
||||
|
||||
# Prepare environment file for cronjobs
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
||||
|
|
@ -388,15 +298,15 @@ printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
|||
|
||||
# Clean stopped imapsync jobs
|
||||
rm -f /tmp/imapsync_busy.lock
|
||||
IMAPSYNC_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
|
||||
[[ ! -z ${IMAPSYNC_TABLE} ]] && mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
|
||||
IMAPSYNC_TABLE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
|
||||
[[ ! -z ${IMAPSYNC_TABLE} ]] && mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
|
||||
|
||||
# Envsubst maildir_gc
|
||||
echo "$(envsubst < /usr/local/bin/maildir_gc.sh)" > /usr/local/bin/maildir_gc.sh
|
||||
|
||||
# GUID generation
|
||||
while [[ ${VERSIONS_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
|
||||
if [[ ! -z $(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
|
||||
VERSIONS_OK=OK
|
||||
else
|
||||
echo "Waiting for versions table to be created..."
|
||||
|
|
@ -407,11 +317,11 @@ PUBKEY_MCRYPT=$(doveconf -P 2> /dev/null | grep -i mail_crypt_global_public_key
|
|||
if [ -f ${PUBKEY_MCRYPT} ]; then
|
||||
GUID=$(cat <(echo ${MAILCOW_HOSTNAME}) /mail_crypt/ecpubkey.pem | sha256sum | cut -d ' ' -f1 | tr -cd "[a-fA-F0-9.:/] ")
|
||||
if [ ${#GUID} -eq 64 ]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "${GUID}");
|
||||
EOF
|
||||
else
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "INVALID");
|
||||
EOF
|
||||
fi
|
||||
|
|
@ -430,6 +340,10 @@ done
|
|||
|
||||
# For some strange, unknown and stupid reason, Dovecot may run into a race condition, when this file is not touched before it is read by dovecot/auth
|
||||
# May be related to something inside Docker, I seriously don't know
|
||||
touch /etc/dovecot/lua/passwd-verify.lua
|
||||
touch /etc/dovecot/auth/passwd-verify.lua
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
|
|
|||
|
|
@ -75,7 +75,8 @@ my $sth = $dbh->prepare("SELECT id,
|
|||
custom_params,
|
||||
subscribeall,
|
||||
timeout1,
|
||||
timeout2
|
||||
timeout2,
|
||||
dry
|
||||
FROM imapsync
|
||||
WHERE active = 1
|
||||
AND is_running = 0
|
||||
|
|
@ -111,13 +112,16 @@ while ($row = $sth->fetchrow_arrayref()) {
|
|||
$subscribeall = @$row[18];
|
||||
$timeout1 = @$row[19];
|
||||
$timeout2 = @$row[20];
|
||||
$dry = @$row[21];
|
||||
|
||||
if ($enc1 eq "TLS") { $enc1 = "--tls1"; } elsif ($enc1 eq "SSL") { $enc1 = "--ssl1"; } else { undef $enc1; }
|
||||
|
||||
my $template = $run_dir . '/imapsync.XXXXXXX';
|
||||
my $passfile1 = File::Temp->new(TEMPLATE => $template);
|
||||
my $passfile2 = File::Temp->new(TEMPLATE => $template);
|
||||
|
||||
|
||||
binmode( $passfile1, ":utf8" );
|
||||
|
||||
print $passfile1 "$password1\n";
|
||||
print $passfile2 trim($master_pass) . "\n";
|
||||
|
||||
|
|
@ -128,8 +132,8 @@ while ($row = $sth->fetchrow_arrayref()) {
|
|||
"--tmpdir", "/tmp",
|
||||
"--nofoldersizes",
|
||||
"--addheader",
|
||||
($timeout1 gt "0" ? () : ('--timeout1', $timeout1)),
|
||||
($timeout2 gt "0" ? () : ('--timeout2', $timeout2)),
|
||||
($timeout1 le "0" ? () : ('--timeout1', $timeout1)),
|
||||
($timeout2 le "0" ? () : ('--timeout2', $timeout2)),
|
||||
($exclude eq "" ? () : ("--exclude", $exclude)),
|
||||
($subfolder2 eq "" ? () : ('--subfolder2', $subfolder2)),
|
||||
($maxage eq "0" ? () : ('--maxage', $maxage)),
|
||||
|
|
@ -148,6 +152,7 @@ while ($row = $sth->fetchrow_arrayref()) {
|
|||
"--host2", "localhost",
|
||||
"--user2", $user2 . '*' . trim($master_user),
|
||||
"--passfile2", $passfile2->filename,
|
||||
($dry eq "1" ? ('--dry') : ()),
|
||||
'--no-modulesversion',
|
||||
'--noreleasecheck'];
|
||||
|
||||
|
|
|
|||
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_FTS}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
exit 0
|
||||
else
|
||||
doveadm fts optimize -A
|
||||
fi
|
||||
|
|
@ -3,13 +3,13 @@
|
|||
import smtplib
|
||||
import os
|
||||
import sys
|
||||
import mysql.connector
|
||||
import MySQLdb
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import COMMASPACE, formatdate
|
||||
import cgi
|
||||
import jinja2
|
||||
from jinja2 import Template
|
||||
from jinja2 import TemplateError
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
import json
|
||||
import redis
|
||||
import time
|
||||
|
|
@ -32,7 +32,7 @@ try:
|
|||
|
||||
while True:
|
||||
try:
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
|
|
@ -50,7 +50,7 @@ try:
|
|||
def query_mysql(query, headers = True, update = False):
|
||||
while True:
|
||||
try:
|
||||
cnx = mysql.connector.connect(unix_socket = '/var/run/mysqld/mysqld.sock', user=os.environ.get('DBUSER'), passwd=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8mb4", collation="utf8mb4_general_ci")
|
||||
cnx = MySQLdb.connect(user=os.environ.get('DBUSER'), password=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8mb4", collation="utf8mb4_general_ci")
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
time.sleep(3)
|
||||
|
|
@ -76,22 +76,27 @@ try:
|
|||
|
||||
def notify_rcpt(rcpt, msg_count, quarantine_acl, category):
|
||||
if category == "add_header": category = "add header"
|
||||
meta_query = query_mysql('SELECT SHA2(CONCAT(id, qid), 256) AS qhash, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
||||
meta_query = query_mysql('SELECT `qhash`, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
||||
print("%s: %d of %d messages qualify for notification" % (rcpt, len(meta_query), msg_count))
|
||||
if len(meta_query) == 0:
|
||||
return
|
||||
msg_count = len(meta_query)
|
||||
env = SandboxedEnvironment()
|
||||
if r.get('Q_HTML'):
|
||||
try:
|
||||
template = Template(r.get('Q_HTML'))
|
||||
except:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
try:
|
||||
template = env.from_string(r.get('Q_HTML'))
|
||||
except Exception:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = env.from_string(file_.read())
|
||||
else:
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = env.from_string(file_.read())
|
||||
try:
|
||||
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
||||
except (jinja2.exceptions.SecurityError, TemplateError) as ex:
|
||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
||||
return
|
||||
text = html2text.html2text(html)
|
||||
count = 0
|
||||
while count < 15:
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from email.mime.multipart import MIMEMultipart
|
|||
from email.mime.text import MIMEText
|
||||
from email.utils import COMMASPACE, formatdate
|
||||
import jinja2
|
||||
from jinja2 import Template
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
import redis
|
||||
import time
|
||||
import json
|
||||
|
|
@ -23,7 +23,7 @@ else:
|
|||
|
||||
while True:
|
||||
try:
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0, username='quota_notify', password='')
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
|
|
@ -33,16 +33,24 @@ while True:
|
|||
|
||||
if r.get('QW_HTML'):
|
||||
try:
|
||||
template = Template(r.get('QW_HTML'))
|
||||
except:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(r.get('QW_HTML'))
|
||||
except Exception:
|
||||
print("Error: Cannot parse quota template, falling back to default template.")
|
||||
with open('/templates/quota.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(file_.read())
|
||||
else:
|
||||
with open('/templates/quota.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(file_.read())
|
||||
|
||||
try:
|
||||
html = template.render(username=username, percent=percent)
|
||||
except (jinja2.exceptions.SecurityError, jinja2.TemplateError) as ex:
|
||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
||||
sys.exit(1)
|
||||
|
||||
html = template.render(username=username, percent=percent)
|
||||
text = html2text.html2text(html)
|
||||
|
||||
try:
|
||||
|
|
@ -55,7 +63,7 @@ try:
|
|||
msg.attach(text_part)
|
||||
msg.attach(html_part)
|
||||
msg['To'] = username
|
||||
p = Popen(['/usr/lib/dovecot/dovecot-lda', '-d', username, '-o', '"plugin/quota=maildir:User quota:noenforcing"'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
|
||||
p = Popen(['/usr/libexec/dovecot/dovecot-lda', '-d', username, '-o', '"plugin/quota=maildir:User quota:noenforcing"'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
|
||||
p.communicate(input=bytes(msg.as_string(), 'utf-8'))
|
||||
|
||||
domain = username.split("@")[-1]
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@ source /source_env.sh
|
|||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
# Is replication active?
|
||||
# grep on file is less expensive than doveconf
|
||||
if ! grep -qi mail_replica /etc/dovecot/dovecot.conf; then
|
||||
if [ -n ${MAILCOW_REPLICA_IP} ]; then
|
||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
exit
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
|||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
|||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
|
|
|||
|
|
@ -11,21 +11,25 @@ else
|
|||
fi
|
||||
|
||||
# Deploy
|
||||
curl --connect-timeout 15 --retry 10 --max-time 30 http://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
if curl --connect-timeout 15 --retry 5 --max-time 30 https://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz; then
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
fi
|
||||
else
|
||||
echo "Failed to download SA rules. Exiting."
|
||||
exit 0 # Must be 0 otherwise dovecot would not start at all
|
||||
fi
|
||||
|
||||
sed -i -e 's/\([^\\]\)\$\([^\/]\)/\1\\$\2/g' /etc/rspamd/custom/sa-rules
|
||||
|
||||
if [[ "$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)" != "${HASH_SA_RULES}" ]]; then
|
||||
CONTAINER_NAME=rspamd-mailcow
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | \
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | \
|
||||
jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | \
|
||||
jq -rc "select( .name | tostring | contains(\"${CONTAINER_NAME}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,10 @@ autostart=true
|
|||
|
||||
[program:dovecot]
|
||||
command=/usr/sbin/dovecot -F
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[eventlistener:processes]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -6,11 +6,12 @@ options {
|
|||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
|
|
@ -19,6 +20,7 @@ destination d_redis_ui_log {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -27,6 +29,7 @@ destination d_redis_f2b_channel {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
@ -35,8 +38,13 @@ filter f_replica {
|
|||
not match("User has no mail_replica in userdb" value("MESSAGE"));
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
filter f_dovecot_auth_try {
|
||||
not match("- trying the next passdb" value("MESSAGE")) and
|
||||
not match("- trying the next userdb" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_src);
|
||||
source(s_dgram);
|
||||
filter(f_dovecot_auth_try);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -6,11 +6,12 @@ options {
|
|||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
|
|
@ -19,6 +20,7 @@ destination d_redis_ui_log {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -27,6 +29,7 @@ destination d_redis_f2b_channel {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
@ -35,8 +38,13 @@ filter f_replica {
|
|||
not match("User has no mail_replica in userdb" value("MESSAGE"));
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
filter f_dovecot_auth_try {
|
||||
not match("- trying the next passdb" value("MESSAGE")) and
|
||||
not match("- trying the next userdb" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_src);
|
||||
source(s_dgram);
|
||||
filter(f_dovecot_auth_try);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ catch_non_zero() {
|
|||
source /source_env.sh
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM ACME_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM POSTFIX_MAILLOG 0 ${LOG_LINES}"
|
||||
|
|
@ -23,3 +23,4 @@ catch_non_zero "${REDIS_CMDLINE} LTRIM AUTODISCOVER_LOG 0 ${LOG_LINES}"
|
|||
catch_non_zero "${REDIS_CMDLINE} LTRIM API_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM RL_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM WATCHDOG_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM CRON_LOG 0 ${LOG_LINES}"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
FROM alpine:3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
ENV XTABLES_LIBDIR /usr/lib/xtables
|
||||
ENV PYTHON_IPTABLES_XTABLES_VERSION 12
|
||||
ENV IPTABLES_LIBDIR /usr/lib
|
||||
|
|
@ -12,12 +16,16 @@ RUN apk add --virtual .build-deps \
|
|||
openssl-dev \
|
||||
&& apk add -U python3 \
|
||||
iptables \
|
||||
iptables-dev \
|
||||
ip6tables \
|
||||
xtables-addons \
|
||||
nftables \
|
||||
tzdata \
|
||||
py3-pip \
|
||||
py3-nftables \
|
||||
musl-dev \
|
||||
&& pip3 install --ignore-installed --upgrade pip \
|
||||
jsonschema \
|
||||
python-iptables \
|
||||
redis \
|
||||
ipaddress \
|
||||
|
|
@ -26,5 +34,10 @@ RUN apk add --virtual .build-deps \
|
|||
|
||||
# && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \
|
||||
|
||||
COPY server.py /
|
||||
CMD ["python3", "-u", "/server.py"]
|
||||
COPY modules /app/modules
|
||||
COPY main.py /app/
|
||||
COPY ./docker-entrypoint.sh /app/
|
||||
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
CMD ["/bin/sh", "-c", "/app/docker-entrypoint.sh"]
|
||||
29
data/Dockerfiles/netfilter/docker-entrypoint.sh
Executable file
29
data/Dockerfiles/netfilter/docker-entrypoint.sh
Executable file
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/sh
|
||||
|
||||
backend=nftables
|
||||
|
||||
nft list table ip filter &>/dev/null
|
||||
nftables_found=$?
|
||||
|
||||
iptables -L -n &>/dev/null
|
||||
iptables_found=$?
|
||||
|
||||
if [ $nftables_found -lt $iptables_found ]; then
|
||||
backend=nftables
|
||||
fi
|
||||
|
||||
if [ $nftables_found -gt $iptables_found ]; then
|
||||
backend=iptables
|
||||
fi
|
||||
|
||||
if [ $nftables_found -eq 0 ] && [ $nftables_found -eq $iptables_found ]; then
|
||||
nftables_lines=$(nft list ruleset | wc -l)
|
||||
iptables_lines=$(iptables-save | wc -l)
|
||||
if [ $nftables_lines -gt $iptables_lines ]; then
|
||||
backend=nftables
|
||||
else
|
||||
backend=iptables
|
||||
fi
|
||||
fi
|
||||
|
||||
exec python -u /app/main.py $backend
|
||||
548
data/Dockerfiles/netfilter/main.py
Normal file
548
data/Dockerfiles/netfilter/main.py
Normal file
|
|
@ -0,0 +1,548 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
DEBUG = False
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
import signal
|
||||
import ipaddress
|
||||
from collections import Counter
|
||||
from random import randint
|
||||
from threading import Thread
|
||||
from threading import Lock
|
||||
import redis
|
||||
import json
|
||||
import dns.resolver
|
||||
import dns.exception
|
||||
import uuid
|
||||
from modules.Logger import Logger
|
||||
from modules.IPTables import IPTables
|
||||
from modules.NFTables import NFTables
|
||||
|
||||
def logdebug(msg):
|
||||
if DEBUG:
|
||||
logger.logInfo("DEBUG: %s" % msg)
|
||||
|
||||
# Globals
|
||||
WHITELIST = []
|
||||
BLACKLIST = []
|
||||
bans = {}
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
lock = Lock()
|
||||
chain_name = "MAILCOW"
|
||||
r = None
|
||||
pubsub = None
|
||||
clear_before_quit = False
|
||||
|
||||
def refreshF2boptions():
|
||||
global f2boptions
|
||||
global quit_now
|
||||
global exit_code
|
||||
f2boptions = {}
|
||||
|
||||
if not r.get('F2B_OPTIONS'):
|
||||
f2boptions['ban_time'] = r.get('F2B_BAN_TIME')
|
||||
f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME')
|
||||
f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT')
|
||||
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS')
|
||||
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW')
|
||||
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4')
|
||||
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6')
|
||||
else:
|
||||
try:
|
||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||
except ValueError as e:
|
||||
logger.logCrit(
|
||||
'Error loading F2B options: F2B_OPTIONS is not json. Exception: %s' % e)
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
verifyF2boptions(f2boptions)
|
||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||
|
||||
def verifyF2boptions(f2boptions):
|
||||
verifyF2boption(f2boptions, 'ban_time', 1800)
|
||||
verifyF2boption(f2boptions, 'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions, 'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions, 'max_attempts', 10)
|
||||
verifyF2boption(f2boptions, 'retry_window', 600)
|
||||
verifyF2boption(f2boptions, 'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions, 'netban_ipv6', 128)
|
||||
verifyF2boption(f2boptions, 'banlist_id', str(uuid.uuid4()))
|
||||
verifyF2boption(f2boptions, 'manage_external', 0)
|
||||
|
||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||
|
||||
def refreshF2bregex():
|
||||
global f2bregex
|
||||
global quit_now
|
||||
global exit_code
|
||||
if not r.get('F2B_REGEX'):
|
||||
f2bregex = {}
|
||||
f2bregex[1] = r'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = r'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = r'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = r'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = r'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = r'\w+\([^,]+,([0-9a-f\.:]+),<[^>]+>\): Password mismatch \(SHA1 of given password: [a-f0-9]+\)'
|
||||
f2bregex[7] = r'\w+\([^,]+,([0-9a-f\.:]+),<[^>]+>\): unknown user \(SHA1 of given password: [a-f0-9]+\)'
|
||||
f2bregex[8] = r'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[9] = r'([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
|
||||
else:
|
||||
try:
|
||||
f2bregex = {}
|
||||
f2bregex = json.loads(r.get('F2B_REGEX'))
|
||||
except ValueError:
|
||||
logger.logCrit('Error loading F2B options: F2B_REGEX is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def get_ip(address):
|
||||
ip = ipaddress.ip_address(address)
|
||||
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
|
||||
ip = ip.ipv4_mapped
|
||||
if ip.is_private or ip.is_loopback:
|
||||
return False
|
||||
|
||||
return ip
|
||||
|
||||
def ban(address):
|
||||
global f2boptions
|
||||
global lock
|
||||
logdebug("ban() called with address=%s" % address)
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
|
||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||
|
||||
ip = get_ip(address)
|
||||
if not ip:
|
||||
logdebug("No valid IP -- skipping ban()")
|
||||
return
|
||||
address = str(ip)
|
||||
self_network = ipaddress.ip_network(address)
|
||||
|
||||
with lock:
|
||||
temp_whitelist = set(WHITELIST)
|
||||
logdebug("Checking if %s overlaps with any WHITELIST entries" % self_network)
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
logdebug("Checking overlap between %s and %s" % (self_network, wl_net))
|
||||
if wl_net.overlaps(self_network):
|
||||
logger.logInfo(
|
||||
'Address %s is allowlisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
|
||||
net = ipaddress.ip_network(
|
||||
(address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = str(net)
|
||||
logdebug("Ban net: %s" % net)
|
||||
|
||||
if not net in bans:
|
||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||
logdebug("Initing new ban counter for %s" % net)
|
||||
|
||||
current_attempt = time.time()
|
||||
logdebug("Current attempt ts=%s, previous: %s, retry_window: %s" %
|
||||
(current_attempt, bans[net]['last_attempt'], RETRY_WINDOW))
|
||||
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
|
||||
bans[net]['attempts'] = 0
|
||||
logdebug("Ban counter for %s reset as window expired" % net)
|
||||
|
||||
bans[net]['attempts'] += 1
|
||||
bans[net]['last_attempt'] = current_attempt
|
||||
logdebug("%s attempts now %d" % (net, bans[net]['attempts']))
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
logdebug("Calling tables.banIPv4(%s)" % net)
|
||||
tables.banIPv4(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
logdebug("Calling tables.banIPv6(%s)" % net)
|
||||
tables.banIPv6(net)
|
||||
|
||||
logdebug("Updating F2B_ACTIVE_BANS[%s]=%d" %
|
||||
(net, cur_time + NET_BAN_TIME))
|
||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||
else:
|
||||
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (
|
||||
MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
|
||||
def unban(net):
|
||||
global lock
|
||||
logdebug("Calling unban() with net=%s" % net)
|
||||
if not net in bans:
|
||||
logger.logInfo(
|
||||
'%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
logger.logInfo('Unbanning %s' % net)
|
||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
logdebug("Calling tables.unbanIPv4(%s)" % net)
|
||||
tables.unbanIPv4(net)
|
||||
else:
|
||||
with lock:
|
||||
logdebug("Calling tables.unbanIPv6(%s)" % net)
|
||||
tables.unbanIPv6(net)
|
||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
if net in bans:
|
||||
logdebug("Unban for %s, setting attempts=0, ban_counter+=1" % net)
|
||||
bans[net]['attempts'] = 0
|
||||
bans[net]['ban_counter'] += 1
|
||||
|
||||
def permBan(net, unban=False):
|
||||
global f2boptions
|
||||
global lock
|
||||
|
||||
is_unbanned = False
|
||||
is_banned = False
|
||||
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
if unban:
|
||||
is_unbanned = tables.unbanIPv4(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
is_banned = tables.banIPv4(net)
|
||||
else:
|
||||
with lock:
|
||||
if unban:
|
||||
is_unbanned = tables.unbanIPv6(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
is_banned = tables.banIPv6(net)
|
||||
|
||||
|
||||
if is_unbanned:
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
logger.logCrit('Removed host/network %s from denylist' % net)
|
||||
elif is_banned:
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
logger.logCrit('Added host/network %s to denylist' % net)
|
||||
|
||||
def clear():
|
||||
global lock
|
||||
logger.logInfo('Clearing all bans')
|
||||
for net in bans.copy():
|
||||
logdebug("Unbanning net: %s" % net)
|
||||
unban(net)
|
||||
with lock:
|
||||
logdebug("Clearing IPv4/IPv6 table")
|
||||
tables.clearIPv4Table()
|
||||
tables.clearIPv6Table()
|
||||
try:
|
||||
if r is not None:
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error clearing redis keys F2B_ACTIVE_BANS and F2B_PERM_BANS: %s' % ex)
|
||||
|
||||
def watch():
|
||||
global pubsub
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
logger.logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
while not quit_now:
|
||||
try:
|
||||
for item in pubsub.listen():
|
||||
refreshF2bregex()
|
||||
for rule_id, rule_regex in f2bregex.items():
|
||||
if item['data'] and item['type'] == 'message':
|
||||
try:
|
||||
result = re.search(rule_regex, item['data'])
|
||||
except re.error:
|
||||
result = False
|
||||
if result:
|
||||
addr = result.group(1)
|
||||
ip = ipaddress.ip_address(addr)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
continue
|
||||
logger.logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
|
||||
ban(addr)
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error reading log line from pubsub: %s' % ex)
|
||||
pubsub = None
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def snat4(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
tables.snat4(snat_target, os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24')
|
||||
|
||||
def snat6(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
tables.snat6(snat_target, os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64'))
|
||||
|
||||
def autopurge():
|
||||
global f2boptions
|
||||
logdebug("autopurge thread started")
|
||||
while not quit_now:
|
||||
logdebug("autopurge tick")
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
logdebug("QUEUE_UNBAN: %s" % QUEUE_UNBAN)
|
||||
if QUEUE_UNBAN:
|
||||
for net in QUEUE_UNBAN:
|
||||
logdebug("Autopurge: unbanning queued net: %s" % net)
|
||||
unban(str(net))
|
||||
# Only check expiry for actively banned IPs:
|
||||
active_bans = r.hgetall('F2B_ACTIVE_BANS')
|
||||
now = time.time()
|
||||
for net_str, expire_str in active_bans.items():
|
||||
logdebug("Checking ban expiry for (actively banned): %s" % net_str)
|
||||
# Defensive: always process if timer missing or expired
|
||||
try:
|
||||
expire = float(expire_str)
|
||||
except Exception:
|
||||
logdebug("Invalid expire time for %s; unbanning" % net_str)
|
||||
unban(net_str)
|
||||
continue
|
||||
time_left = expire - now
|
||||
logdebug("Time left for %s: %.1f seconds" % (net_str, time_left))
|
||||
if time_left <= 0:
|
||||
logdebug("Ban expired for %s" % net_str)
|
||||
unban(net_str)
|
||||
|
||||
def mailcowChainOrder():
|
||||
global lock
|
||||
global quit_now
|
||||
global exit_code
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
quit_now, exit_code = tables.checkIPv4ChainOrder()
|
||||
if quit_now: return
|
||||
quit_now, exit_code = tables.checkIPv6ChainOrder()
|
||||
|
||||
def calcNetBanTime(ban_counter):
|
||||
global f2boptions
|
||||
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** ban_counter
|
||||
NET_BAN_TIME = max([BAN_TIME, min([NET_BAN_TIME, MAX_BAN_TIME])])
|
||||
return NET_BAN_TIME
|
||||
|
||||
def isIpNetwork(address):
|
||||
try:
|
||||
ipaddress.ip_network(address, False)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def genNetworkList(list):
|
||||
resolver = dns.resolver.Resolver()
|
||||
hostnames = []
|
||||
networks = []
|
||||
for key in list:
|
||||
if isIpNetwork(key):
|
||||
networks.append(key)
|
||||
else:
|
||||
hostnames.append(key)
|
||||
for hostname in hostnames:
|
||||
hostname_ips = []
|
||||
for rdtype in ['A', 'AAAA']:
|
||||
try:
|
||||
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
|
||||
except dns.exception.Timeout:
|
||||
logger.logInfo('Hostname %s timedout on resolve' % hostname)
|
||||
break
|
||||
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
continue
|
||||
except dns.exception.DNSException as dnsexception:
|
||||
logger.logInfo('%s' % dnsexception)
|
||||
continue
|
||||
for rdata in answer:
|
||||
hostname_ips.append(rdata.to_text())
|
||||
networks.extend(hostname_ips)
|
||||
return set(networks)
|
||||
|
||||
def whitelistUpdate():
|
||||
global lock
|
||||
global quit_now
|
||||
global WHITELIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_WHITELIST')
|
||||
new_whitelist = []
|
||||
if list:
|
||||
new_whitelist = genNetworkList(list)
|
||||
with lock:
|
||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||
WHITELIST = new_whitelist
|
||||
logger.logInfo('Allowlist was changed, it has %s entries' % len(WHITELIST))
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def blacklistUpdate():
|
||||
global quit_now
|
||||
global BLACKLIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_BLACKLIST')
|
||||
new_blacklist = []
|
||||
if list:
|
||||
new_blacklist = genNetworkList(list)
|
||||
if Counter(new_blacklist) != Counter(BLACKLIST):
|
||||
addban = set(new_blacklist).difference(BLACKLIST)
|
||||
delban = set(BLACKLIST).difference(new_blacklist)
|
||||
BLACKLIST = new_blacklist
|
||||
logger.logInfo('Denylist was changed, it has %s entries' % len(BLACKLIST))
|
||||
if addban:
|
||||
for net in addban:
|
||||
permBan(net=net)
|
||||
if delban:
|
||||
for net in delban:
|
||||
permBan(net=net, unban=True)
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def sigterm_quit(signum, frame):
|
||||
global clear_before_quit
|
||||
logdebug("SIGTERM received, setting clear_before_quit to True and exiting")
|
||||
clear_before_quit = True
|
||||
sys.exit(exit_code)
|
||||
|
||||
def before_quit():
|
||||
logdebug("before_quit called, clear_before_quit=%s" % clear_before_quit)
|
||||
if clear_before_quit:
|
||||
clear()
|
||||
if pubsub is not None:
|
||||
pubsub.unsubscribe()
|
||||
|
||||
if __name__ == '__main__':
|
||||
logger = Logger()
|
||||
logdebug("Sys.argv: %s" % sys.argv)
|
||||
atexit.register(before_quit)
|
||||
signal.signal(signal.SIGTERM, sigterm_quit)
|
||||
|
||||
backend = sys.argv[1]
|
||||
logdebug("Backend: %s" % backend)
|
||||
if backend == "nftables":
|
||||
logger.logInfo('Using NFTables backend')
|
||||
tables = NFTables(chain_name, logger)
|
||||
else:
|
||||
logger.logInfo('Using IPTables backend')
|
||||
logger.logWarn(
|
||||
"DEPRECATION: iptables-legacy is deprecated and will be removed in future releases. "
|
||||
"Please switch to nftables on your host to ensure complete compatibility."
|
||||
)
|
||||
time.sleep(5)
|
||||
tables = IPTables(chain_name, logger)
|
||||
|
||||
clear()
|
||||
logger.logInfo("Initializing mailcow netfilter chain")
|
||||
tables.initChainIPv4()
|
||||
tables.initChainIPv6()
|
||||
|
||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE", "").lower() in ("y", "yes"):
|
||||
logger.logInfo(f"Skipping {chain_name} isolation")
|
||||
else:
|
||||
logger.logInfo(f"Setting {chain_name} isolation")
|
||||
tables.create_mailcow_isolation_rule("br-mailcow", [3306, 6379, 8983, 12345], os.getenv("MAILCOW_REPLICA_IP"))
|
||||
|
||||
# connect to redis
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
logdebug(
|
||||
"Connecting redis (SLAVEOF_IP:%s, PORT:%s)" % (redis_slaveof_ip, redis_slaveof_port))
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(
|
||||
host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
else:
|
||||
r = redis.StrictRedis(
|
||||
host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
||||
r.ping()
|
||||
pubsub = r.pubsub()
|
||||
except Exception as ex:
|
||||
logdebug(
|
||||
'Redis connection failed: %s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
logger.set_redis(r)
|
||||
logdebug("Redis connection established, setting up F2B keys")
|
||||
|
||||
if r.exists('F2B_LOG'):
|
||||
logdebug("Renaming F2B_LOG to NETFILTER_LOG")
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
|
||||
refreshF2boptions()
|
||||
|
||||
watch_thread = Thread(target=watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
|
||||
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||
snat4_thread = Thread(target=snat4, args=(snat_ip,))
|
||||
snat4_thread.daemon = True
|
||||
snat4_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
|
||||
|
||||
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT6_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv6Address:
|
||||
snat6_thread = Thread(target=snat6,args=(snat_ip,))
|
||||
snat6_thread.daemon = True
|
||||
snat6_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
|
||||
|
||||
autopurge_thread = Thread(target=autopurge)
|
||||
autopurge_thread.daemon = True
|
||||
autopurge_thread.start()
|
||||
|
||||
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
|
||||
mailcowchainwatch_thread.daemon = True
|
||||
mailcowchainwatch_thread.start()
|
||||
|
||||
blacklistupdate_thread = Thread(target=blacklistUpdate)
|
||||
blacklistupdate_thread.daemon = True
|
||||
blacklistupdate_thread.start()
|
||||
|
||||
whitelistupdate_thread = Thread(target=whitelistUpdate)
|
||||
whitelistupdate_thread.daemon = True
|
||||
whitelistupdate_thread.start()
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
logdebug("Exiting with code %s" % exit_code)
|
||||
sys.exit(exit_code)
|
||||
252
data/Dockerfiles/netfilter/modules/IPTables.py
Normal file
252
data/Dockerfiles/netfilter/modules/IPTables.py
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
import iptc
|
||||
import time
|
||||
import os
|
||||
|
||||
class IPTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
self.chain_name = chain_name
|
||||
self.logger = logger
|
||||
|
||||
def initChainIPv4(self):
|
||||
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) in iptc.Table(iptc.Table.FILTER).chains:
|
||||
iptc.Table(iptc.Table.FILTER).create_chain(self.chain_name)
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
|
||||
rule = iptc.Rule()
|
||||
rule.src = '0.0.0.0/0'
|
||||
rule.dst = '0.0.0.0/0'
|
||||
target = iptc.Target(rule, self.chain_name)
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
def initChainIPv6(self):
|
||||
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) in iptc.Table6(iptc.Table6.FILTER).chains:
|
||||
iptc.Table6(iptc.Table6.FILTER).create_chain(self.chain_name)
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = '::/0'
|
||||
rule.dst = '::/0'
|
||||
target = iptc.Target(rule, self.chain_name)
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
def checkIPv4ChainOrder(self):
|
||||
filter_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter_table.refresh()
|
||||
return self.checkChainOrder(filter_table)
|
||||
|
||||
def checkIPv6ChainOrder(self):
|
||||
filter_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
filter_table.refresh()
|
||||
return self.checkChainOrder(filter_table)
|
||||
|
||||
def checkChainOrder(self, filter_table):
|
||||
err = False
|
||||
exit_code = None
|
||||
|
||||
forward_chain = iptc.Chain(filter_table, 'FORWARD')
|
||||
input_chain = iptc.Chain(filter_table, 'INPUT')
|
||||
for chain in [forward_chain, input_chain]:
|
||||
target_found = False
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item.target.name == self.chain_name:
|
||||
target_found = True
|
||||
if position > 2:
|
||||
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
|
||||
err = True
|
||||
exit_code = 2
|
||||
if not target_found:
|
||||
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
return err, exit_code
|
||||
|
||||
def clearIPv4Table(self):
|
||||
self.clearTable(iptc.Table(iptc.Table.FILTER))
|
||||
|
||||
def clearIPv6Table(self):
|
||||
self.clearTable(iptc.Table6(iptc.Table6.FILTER))
|
||||
|
||||
def clearTable(self, filter_table):
|
||||
filter_table.autocommit = False
|
||||
forward_chain = iptc.Chain(filter_table, "FORWARD")
|
||||
input_chain = iptc.Chain(filter_table, "INPUT")
|
||||
mailcow_chain = iptc.Chain(filter_table, self.chain_name)
|
||||
if mailcow_chain in filter_table.chains:
|
||||
for rule in mailcow_chain.rules:
|
||||
mailcow_chain.delete_rule(rule)
|
||||
for rule in forward_chain.rules:
|
||||
if rule.target.name == self.chain_name:
|
||||
forward_chain.delete_rule(rule)
|
||||
for rule in input_chain.rules:
|
||||
if rule.target.name == self.chain_name:
|
||||
input_chain.delete_rule(rule)
|
||||
filter_table.delete_chain(self.chain_name)
|
||||
filter_table.commit()
|
||||
filter_table.refresh()
|
||||
filter_table.autocommit = True
|
||||
|
||||
def banIPv4(self, source):
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
return False
|
||||
chain.insert_rule(rule)
|
||||
return True
|
||||
|
||||
def banIPv6(self, source):
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
return False
|
||||
chain.insert_rule(rule)
|
||||
return True
|
||||
|
||||
def unbanIPv4(self, source):
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
return False
|
||||
chain.delete_rule(rule)
|
||||
return True
|
||||
|
||||
def unbanIPv6(self, source):
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
return False
|
||||
chain.delete_rule(rule)
|
||||
return True
|
||||
|
||||
def snat4(self, snat_target, source):
|
||||
try:
|
||||
table = iptc.Table('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = self.getSnat4Rule(snat_target, source)
|
||||
|
||||
if not chain.rules:
|
||||
# if there are no rules in the chain, insert the new rule directly
|
||||
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, rule in enumerate(chain.rules):
|
||||
if not hasattr(rule.target, 'parameter'):
|
||||
continue
|
||||
match = all((
|
||||
new_rule.get_src() == rule.get_src(),
|
||||
new_rule.get_dst() == rule.get_dst(),
|
||||
new_rule.target.parameters == rule.target.parameters,
|
||||
new_rule.target.name == rule.target.name
|
||||
))
|
||||
if position == 0:
|
||||
if not match:
|
||||
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
if match:
|
||||
self.logger.logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
|
||||
chain.delete_rule(rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
return True
|
||||
except:
|
||||
self.logger.logCrit('Error running SNAT4, retrying...')
|
||||
return False
|
||||
|
||||
def snat6(self, snat_target, source):
|
||||
try:
|
||||
table = iptc.Table6('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = self.getSnat6Rule(snat_target, source)
|
||||
|
||||
if new_rule not in chain.rules:
|
||||
self.logger.logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (new_rule.src, snat_target))
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item == new_rule:
|
||||
if position != 0:
|
||||
chain.delete_rule(new_rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
self.logger.logCrit('Error running SNAT6, retrying...')
|
||||
|
||||
|
||||
def getSnat4Rule(self, snat_target, source):
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
match = rule.create_match("comment")
|
||||
match.comment = f'{int(round(time.time()))}'
|
||||
return rule
|
||||
|
||||
def getSnat6Rule(self, snat_target, source):
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
return rule
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
try:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
rule = iptc.Rule()
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("DROP")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
# insert mailcow isolation exception rule
|
||||
if _allow != "":
|
||||
rule = iptc.Rule()
|
||||
rule.src = _allow
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("ACCEPT")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation: {e}")
|
||||
return False
|
||||
42
data/Dockerfiles/netfilter/modules/Logger.py
Normal file
42
data/Dockerfiles/netfilter/modules/Logger.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import time
|
||||
import json
|
||||
import datetime
|
||||
|
||||
class Logger:
|
||||
def __init__(self):
|
||||
self.r = None
|
||||
|
||||
def set_redis(self, redis):
|
||||
self.r = redis
|
||||
|
||||
def _format_timestamp(self):
|
||||
# Local time with milliseconds
|
||||
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def log(self, priority, message):
|
||||
# build redis-friendly dict
|
||||
tolog = {
|
||||
'time': int(round(time.time())), # keep raw timestamp for Redis
|
||||
'priority': priority,
|
||||
'message': message
|
||||
}
|
||||
|
||||
# print human-readable message with timestamp
|
||||
ts = self._format_timestamp()
|
||||
print(f"{ts} {priority.upper()}: {message}", flush=True)
|
||||
|
||||
# also push JSON to Redis if connected
|
||||
if self.r is not None:
|
||||
try:
|
||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
except Exception as ex:
|
||||
print(f'{ts} WARN: Failed logging to redis: {ex}', flush=True)
|
||||
|
||||
def logWarn(self, message):
|
||||
self.log('warn', message)
|
||||
|
||||
def logCrit(self, message):
|
||||
self.log('crit', message)
|
||||
|
||||
def logInfo(self, message):
|
||||
self.log('info', message)
|
||||
659
data/Dockerfiles/netfilter/modules/NFTables.py
Normal file
659
data/Dockerfiles/netfilter/modules/NFTables.py
Normal file
|
|
@ -0,0 +1,659 @@
|
|||
import nftables
|
||||
import ipaddress
|
||||
import os
|
||||
|
||||
class NFTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
self.chain_name = chain_name
|
||||
self.logger = logger
|
||||
|
||||
self.nft = nftables.Nftables()
|
||||
self.nft.set_json_output(True)
|
||||
self.nft.set_handle_output(True)
|
||||
self.nft_chain_names = {'ip': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} },
|
||||
'ip6': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} } }
|
||||
|
||||
self.search_current_chains()
|
||||
|
||||
def initChainIPv4(self):
|
||||
self.insert_mailcow_chains("ip")
|
||||
|
||||
def initChainIPv6(self):
|
||||
self.insert_mailcow_chains("ip6")
|
||||
|
||||
def checkIPv4ChainOrder(self):
|
||||
return self.checkChainOrder("ip")
|
||||
|
||||
def checkIPv6ChainOrder(self):
|
||||
return self.checkChainOrder("ip6")
|
||||
|
||||
def checkChainOrder(self, filter_table):
|
||||
err = False
|
||||
exit_code = None
|
||||
|
||||
for chain in ['input', 'forward']:
|
||||
chain_position = self.check_mailcow_chains(filter_table, chain)
|
||||
if chain_position is None: continue
|
||||
|
||||
if chain_position is False:
|
||||
self.logger.logCrit(f'MAILCOW target not found in {filter_table} {chain} table, restarting container to fix it...')
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
if chain_position > 0:
|
||||
chain_position += 1
|
||||
self.logger.logCrit(f'MAILCOW target is in position {chain_position} in the {filter_table} {chain} table, restarting container to fix it...')
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
return err, exit_code
|
||||
|
||||
def clearIPv4Table(self):
|
||||
self.clearTable("ip")
|
||||
|
||||
def clearIPv6Table(self):
|
||||
self.clearTable("ip6")
|
||||
|
||||
def clearTable(self, _family):
|
||||
is_empty_dict = True
|
||||
json_command = self.get_base_dict()
|
||||
chain_handle = self.get_chain_handle(_family, "filter", self.chain_name)
|
||||
# if no handle, the chain doesn't exists
|
||||
if chain_handle is not None:
|
||||
is_empty_dict = False
|
||||
# flush chain
|
||||
mailcow_chain = {'family': _family, 'table': 'filter', 'name': self.chain_name}
|
||||
flush_chain = {'flush': {'chain': mailcow_chain}}
|
||||
json_command["nftables"].append(flush_chain)
|
||||
|
||||
# remove rule in forward chain
|
||||
# remove rule in input chain
|
||||
chains_family = [self.nft_chain_names[_family]['filter']['input'],
|
||||
self.nft_chain_names[_family]['filter']['forward'] ]
|
||||
|
||||
for chain_base in chains_family:
|
||||
if not chain_base: continue
|
||||
|
||||
rules_handle = self.get_rules_handle(_family, "filter", chain_base)
|
||||
if rules_handle is not None:
|
||||
for r_handle in rules_handle:
|
||||
is_empty_dict = False
|
||||
mailcow_rule = {'family':_family,
|
||||
'table': 'filter',
|
||||
'chain': chain_base,
|
||||
'handle': r_handle }
|
||||
delete_rules = {'delete': {'rule': mailcow_rule} }
|
||||
json_command["nftables"].append(delete_rules)
|
||||
|
||||
# remove chain
|
||||
# after delete all rules referencing this chain
|
||||
if chain_handle is not None:
|
||||
mc_chain_handle = {'family':_family,
|
||||
'table': 'filter',
|
||||
'name': self.chain_name,
|
||||
'handle': chain_handle }
|
||||
delete_chain = {'delete': {'chain': mc_chain_handle} }
|
||||
json_command["nftables"].append(delete_chain)
|
||||
|
||||
if is_empty_dict == False:
|
||||
if self.nft_exec_dict(json_command):
|
||||
self.logger.logInfo(f"Clear completed: {_family}")
|
||||
|
||||
def banIPv4(self, source):
|
||||
ban_dict = self.get_ban_ip_dict(source, "ip")
|
||||
return self.nft_exec_dict(ban_dict)
|
||||
|
||||
def banIPv6(self, source):
|
||||
ban_dict = self.get_ban_ip_dict(source, "ip6")
|
||||
return self.nft_exec_dict(ban_dict)
|
||||
|
||||
def unbanIPv4(self, source):
|
||||
unban_dict = self.get_unban_ip_dict(source, "ip")
|
||||
if not unban_dict:
|
||||
return False
|
||||
return self.nft_exec_dict(unban_dict)
|
||||
|
||||
def unbanIPv6(self, source):
|
||||
unban_dict = self.get_unban_ip_dict(source, "ip6")
|
||||
if not unban_dict:
|
||||
return False
|
||||
return self.nft_exec_dict(unban_dict)
|
||||
|
||||
def snat4(self, snat_target, source):
|
||||
self.snat_rule("ip", snat_target, source)
|
||||
|
||||
def snat6(self, snat_target, source):
|
||||
self.snat_rule("ip6", snat_target, source)
|
||||
|
||||
|
||||
def nft_exec_dict(self, query: dict):
|
||||
if not query: return False
|
||||
|
||||
rc, output, error = self.nft.json_cmd(query)
|
||||
if rc != 0:
|
||||
#self.logger.logCrit(f"Nftables Error: {error}")
|
||||
return False
|
||||
|
||||
# Prevent returning False or empty string on commands that do not produce output
|
||||
if rc == 0 and len(output) == 0:
|
||||
return True
|
||||
|
||||
return output
|
||||
|
||||
def get_base_dict(self):
|
||||
return {'nftables': [{ 'metainfo': { 'json_schema_version': 1} } ] }
|
||||
|
||||
def search_current_chains(self):
|
||||
nft_chain_priority = {'ip': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} },
|
||||
'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } }
|
||||
|
||||
# Command: 'nft list chains'
|
||||
_list = {'list' : {'chains': 'null'} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset['nftables']:
|
||||
chain = _object.get("chain")
|
||||
if not chain: continue
|
||||
|
||||
_family = chain['family']
|
||||
_table = chain['table']
|
||||
_hook = chain.get("hook")
|
||||
_priority = chain.get("prio")
|
||||
_name = chain['name']
|
||||
|
||||
if _family not in self.nft_chain_names: continue
|
||||
if _table not in self.nft_chain_names[_family]: continue
|
||||
if _hook not in self.nft_chain_names[_family][_table]: continue
|
||||
if _priority is None: continue
|
||||
|
||||
_saved_priority = nft_chain_priority[_family][_table][_hook]
|
||||
if _saved_priority is None or _priority < _saved_priority:
|
||||
# at this point, we know the chain has:
|
||||
# hook and priority set
|
||||
# and it has the lowest priority
|
||||
nft_chain_priority[_family][_table][_hook] = _priority
|
||||
self.nft_chain_names[_family][_table][_hook] = _name
|
||||
|
||||
def search_for_chain(self, kernel_ruleset: dict, chain_name: str):
|
||||
found = False
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
chain = _object.get("chain")
|
||||
if not chain:
|
||||
continue
|
||||
ch_name = chain.get("name")
|
||||
if ch_name == chain_name:
|
||||
found = True
|
||||
break
|
||||
return found
|
||||
|
||||
def get_chain_dict(self, _family: str, _name: str):
|
||||
# nft (add | create) chain [<family>] <table> <name>
|
||||
_chain_opts = {'family': _family, 'table': 'filter', 'name': _name }
|
||||
_add = {'add': {'chain': _chain_opts} }
|
||||
final_chain = self.get_base_dict()
|
||||
final_chain["nftables"].append(_add)
|
||||
return final_chain
|
||||
|
||||
def get_mailcow_jump_rule_dict(self, _family: str, _chain: str):
|
||||
_jump_rule = self.get_base_dict()
|
||||
_expr_opt=[]
|
||||
_expr_counter = {'family': _family, 'table': 'filter', 'packets': 0, 'bytes': 0}
|
||||
_counter_dict = {'counter': _expr_counter}
|
||||
_expr_opt.append(_counter_dict)
|
||||
|
||||
_jump_opts = {'jump': {'target': self.chain_name} }
|
||||
|
||||
_expr_opt.append(_jump_opts)
|
||||
|
||||
_rule_params = {'family': _family,
|
||||
'table': 'filter',
|
||||
'chain': _chain,
|
||||
'expr': _expr_opt,
|
||||
'comment': "mailcow" }
|
||||
|
||||
_add_rule = {'insert': {'rule': _rule_params} }
|
||||
|
||||
_jump_rule["nftables"].append(_add_rule)
|
||||
|
||||
return _jump_rule
|
||||
|
||||
def insert_mailcow_chains(self, _family: str):
|
||||
nft_input_chain = self.nft_chain_names[_family]['filter']['input']
|
||||
nft_forward_chain = self.nft_chain_names[_family]['filter']['forward']
|
||||
# Command: 'nft list table <family> filter'
|
||||
_table_opts = {'family': _family, 'name': 'filter'}
|
||||
_list = {'list': {'table': _table_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
# chain
|
||||
if not self.search_for_chain(kernel_ruleset, self.chain_name):
|
||||
cadena = self.get_chain_dict(_family, self.chain_name)
|
||||
if self.nft_exec_dict(cadena):
|
||||
self.logger.logInfo(f"MAILCOW {_family} chain created successfully.")
|
||||
|
||||
input_jump_found, forward_jump_found = False, False
|
||||
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if nft_input_chain and rule["chain"] == nft_input_chain:
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
input_jump_found = True
|
||||
if nft_forward_chain and rule["chain"] == nft_forward_chain:
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
forward_jump_found = True
|
||||
|
||||
if not input_jump_found:
|
||||
command = self.get_mailcow_jump_rule_dict(_family, nft_input_chain)
|
||||
self.nft_exec_dict(command)
|
||||
|
||||
if not forward_jump_found:
|
||||
command = self.get_mailcow_jump_rule_dict(_family, nft_forward_chain)
|
||||
self.nft_exec_dict(command)
|
||||
|
||||
def delete_nat_rule(self, _family:str, _chain: str, _handle:str):
|
||||
delete_command = self.get_base_dict()
|
||||
_rule_opts = {'family': _family,
|
||||
'table': 'nat',
|
||||
'chain': _chain,
|
||||
'handle': _handle }
|
||||
_delete = {'delete': {'rule': _rule_opts} }
|
||||
delete_command["nftables"].append(_delete)
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def delete_filter_rule(self, _family:str, _chain: str, _handle:str):
|
||||
delete_command = self.get_base_dict()
|
||||
_rule_opts = {'family': _family,
|
||||
'table': 'filter',
|
||||
'chain': _chain,
|
||||
'handle': _handle }
|
||||
_delete = {'delete': {'rule': _rule_opts} }
|
||||
delete_command["nftables"].append(_delete)
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def snat_rule(self, _family: str, snat_target: str, source_address: str):
|
||||
chain_name = self.nft_chain_names[_family]['nat']['postrouting']
|
||||
|
||||
# no postrouting chain, may occur if docker has ipv6 disabled.
|
||||
if not chain_name: return
|
||||
|
||||
# Command: nft list chain <family> nat <chain_name>
|
||||
_chain_opts = {'family': _family, 'table': 'nat', 'name': chain_name}
|
||||
_list = {'list':{'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if not kernel_ruleset:
|
||||
return
|
||||
|
||||
rule_position = 0
|
||||
rule_handle = None
|
||||
rule_found = False
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if not rule.get("comment") or not rule["comment"] == "mailcow":
|
||||
rule_position +=1
|
||||
continue
|
||||
|
||||
rule_found = True
|
||||
rule_handle = rule["handle"]
|
||||
break
|
||||
|
||||
dest_net = ipaddress.ip_network(source_address, strict=False)
|
||||
target_net = ipaddress.ip_network(snat_target, strict=False)
|
||||
|
||||
if rule_found:
|
||||
saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"]
|
||||
saddr_len = int(rule["expr"][0]["match"]["right"]["prefix"]["len"])
|
||||
|
||||
daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"]
|
||||
daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"])
|
||||
|
||||
target_ip = rule["expr"][3]["snat"]["addr"]
|
||||
|
||||
saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len), strict=False)
|
||||
daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len), strict=False)
|
||||
current_target_net = ipaddress.ip_network(target_ip, strict=False)
|
||||
|
||||
match = all((
|
||||
dest_net == saddr_net,
|
||||
dest_net == daddr_net,
|
||||
target_net == current_target_net
|
||||
))
|
||||
try:
|
||||
if rule_position == 0:
|
||||
if not match:
|
||||
# Position 0 , it is a mailcow rule , but it does not have the same parameters
|
||||
if self.delete_nat_rule(_family, chain_name, rule_handle):
|
||||
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule does not match configured parameters')
|
||||
else:
|
||||
# Position > 0 and is mailcow rule
|
||||
if self.delete_nat_rule(_family, chain_name, rule_handle):
|
||||
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule is at position {rule_position}')
|
||||
except:
|
||||
self.logger.logCrit(f"Error running SNAT on {_family}, retrying..." )
|
||||
else:
|
||||
# rule not found
|
||||
json_command = self.get_base_dict()
|
||||
try:
|
||||
snat_dict = {'snat': {'addr': str(target_net.network_address)} }
|
||||
|
||||
expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0}
|
||||
counter_dict = {'counter': expr_counter}
|
||||
|
||||
prefix_dict = {'prefix': {'addr': str(dest_net.network_address), 'len': int(dest_net.prefixlen)} }
|
||||
payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} }
|
||||
match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} }
|
||||
|
||||
payload_dict2 = {'payload': {'protocol': _family, 'field': "daddr"} }
|
||||
match_dict2 = {'match': {'op': '!=', 'left': payload_dict2, 'right': prefix_dict } }
|
||||
expr_list = [
|
||||
match_dict1,
|
||||
match_dict2,
|
||||
counter_dict,
|
||||
snat_dict
|
||||
]
|
||||
rule_fields = {'family': _family,
|
||||
'table': 'nat',
|
||||
'chain': chain_name,
|
||||
'comment': "mailcow",
|
||||
'expr': expr_list }
|
||||
|
||||
insert_dict = {'insert': {'rule': rule_fields} }
|
||||
json_command["nftables"].append(insert_dict)
|
||||
if self.nft_exec_dict(json_command):
|
||||
self.logger.logInfo(f'Added {_family} nat {chain_name} rule for source network {dest_net} to {target_net}')
|
||||
except:
|
||||
self.logger.logCrit(f"Error running SNAT on {_family}, retrying...")
|
||||
|
||||
def get_chain_handle(self, _family: str, _table: str, chain_name: str):
|
||||
chain_handle = None
|
||||
# Command: 'nft list chains {family}'
|
||||
_list = {'list': {'chains': {'family': _family} } }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("chain"):
|
||||
continue
|
||||
chain = _object["chain"]
|
||||
if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name:
|
||||
chain_handle = chain["handle"]
|
||||
break
|
||||
return chain_handle
|
||||
|
||||
def get_rules_handle(self, _family: str, _table: str, chain_name: str, _comment_filter = "mailcow"):
|
||||
rule_handle = []
|
||||
# Command: 'nft list chain {family} {table} {chain_name}'
|
||||
_chain_opts = {'family': _family, 'table': _table, 'name': chain_name}
|
||||
_list = {'list': {'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name:
|
||||
if rule.get("comment") and rule["comment"] == _comment_filter:
|
||||
rule_handle.append(rule["handle"])
|
||||
return rule_handle
|
||||
|
||||
def get_ban_ip_dict(self, ipaddr: str, _family: str):
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
expr_opt = []
|
||||
ipaddr_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } }
|
||||
|
||||
left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} }
|
||||
match_dict = {'op': '==', 'left': left_dict, 'right': right_dict }
|
||||
expr_opt.append({'match': match_dict})
|
||||
|
||||
counter_dict = {'counter': {'family': _family, 'table': "filter", 'packets': 0, 'bytes': 0} }
|
||||
expr_opt.append(counter_dict)
|
||||
|
||||
expr_opt.append({'drop': "null"})
|
||||
|
||||
rule_dict = {'family': _family, 'table': "filter", 'chain': self.chain_name, 'expr': expr_opt}
|
||||
|
||||
base_dict = {'insert': {'rule': rule_dict} }
|
||||
json_command["nftables"].append(base_dict)
|
||||
|
||||
return json_command
|
||||
|
||||
def get_unban_ip_dict(self, ipaddr:str, _family: str):
|
||||
json_command = self.get_base_dict()
|
||||
# Command: 'nft list chain {s_family} filter MAILCOW'
|
||||
_chain_opts = {'family': _family, 'table': 'filter', 'name': self.chain_name}
|
||||
_list = {'list': {'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
rule_handle = None
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]["expr"][0]["match"]
|
||||
if not "payload" in rule["left"]:
|
||||
continue
|
||||
left_opt = rule["left"]["payload"]
|
||||
if not left_opt["protocol"] == _family:
|
||||
continue
|
||||
if not left_opt["field"] =="saddr":
|
||||
continue
|
||||
|
||||
# ip currently banned
|
||||
rule_right = rule["right"]
|
||||
if isinstance(rule_right, dict):
|
||||
current_rule_ip = rule_right["prefix"]["addr"] + '/' + str(rule_right["prefix"]["len"])
|
||||
else:
|
||||
current_rule_ip = rule_right
|
||||
current_rule_net = ipaddress.ip_network(current_rule_ip)
|
||||
|
||||
# ip to ban
|
||||
candidate_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
|
||||
if current_rule_net == candidate_net:
|
||||
rule_handle = _object["rule"]["handle"]
|
||||
break
|
||||
|
||||
if rule_handle is not None:
|
||||
mailcow_rule = {'family': _family, 'table': 'filter', 'chain': self.chain_name, 'handle': rule_handle}
|
||||
delete_rule = {'delete': {'rule': mailcow_rule} }
|
||||
json_command["nftables"].append(delete_rule)
|
||||
else:
|
||||
return False
|
||||
|
||||
return json_command
|
||||
|
||||
def check_mailcow_chains(self, family: str, chain: str):
|
||||
position = 0
|
||||
rule_found = False
|
||||
chain_name = self.nft_chain_names[family]['filter'][chain]
|
||||
|
||||
if not chain_name: return None
|
||||
|
||||
_chain_opts = {'family': family, 'table': 'filter', 'name': chain_name}
|
||||
_list = {'list': {'chain': _chain_opts}}
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
rule = _object["rule"]
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
rule_found = True
|
||||
break
|
||||
|
||||
position+=1
|
||||
|
||||
return position if rule_found else False
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
family = "ip"
|
||||
table = "filter"
|
||||
comment_filter_drop = "mailcow isolation"
|
||||
comment_filter_allow = "mailcow isolation allow"
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
# Delete old mailcow isolation rules
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_drop)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_allow)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
_match_dict_drop = [
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": None
|
||||
}
|
||||
]
|
||||
rule_drop = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_drop,
|
||||
"expr": _match_dict_drop
|
||||
}}}
|
||||
json_command["nftables"].append(rule_drop)
|
||||
|
||||
# insert mailcow isolation allow rule
|
||||
if _allow != "":
|
||||
_match_dict_allow = [
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "ip",
|
||||
"field": "saddr"
|
||||
}
|
||||
},
|
||||
"right": _allow
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"accept": None
|
||||
}
|
||||
]
|
||||
rule_allow = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_allow,
|
||||
"expr": _match_dict_allow
|
||||
}}}
|
||||
json_command["nftables"].append(rule_allow)
|
||||
|
||||
success = self.nft_exec_dict(json_command)
|
||||
if success == False:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation")
|
||||
return False
|
||||
|
||||
return True
|
||||
0
data/Dockerfiles/netfilter/modules/__init__.py
Normal file
0
data/Dockerfiles/netfilter/modules/__init__.py
Normal file
|
|
@ -1,610 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
import signal
|
||||
import ipaddress
|
||||
from collections import Counter
|
||||
from random import randint
|
||||
from threading import Thread
|
||||
from threading import Lock
|
||||
import redis
|
||||
import json
|
||||
import iptc
|
||||
import dns.resolver
|
||||
import dns.exception
|
||||
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
|
||||
pubsub = r.pubsub()
|
||||
|
||||
WHITELIST = []
|
||||
BLACKLIST= []
|
||||
|
||||
bans = {}
|
||||
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
lock = Lock()
|
||||
|
||||
def log(priority, message):
|
||||
tolog = {}
|
||||
tolog['time'] = int(round(time.time()))
|
||||
tolog['priority'] = priority
|
||||
tolog['message'] = message
|
||||
r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
print(message)
|
||||
|
||||
def logWarn(message):
|
||||
log('warn', message)
|
||||
|
||||
def logCrit(message):
|
||||
log('crit', message)
|
||||
|
||||
def logInfo(message):
|
||||
log('info', message)
|
||||
|
||||
def refreshF2boptions():
|
||||
global f2boptions
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
f2boptions = {}
|
||||
|
||||
if not r.get('F2B_OPTIONS'):
|
||||
f2boptions['ban_time'] = r.get('F2B_BAN_TIME')
|
||||
f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME')
|
||||
f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT')
|
||||
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS')
|
||||
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW')
|
||||
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4')
|
||||
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6')
|
||||
else:
|
||||
try:
|
||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||
except ValueError:
|
||||
print('Error loading F2B options: F2B_OPTIONS is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
verifyF2boptions(f2boptions)
|
||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||
|
||||
def verifyF2boptions(f2boptions):
|
||||
verifyF2boption(f2boptions,'ban_time', 1800)
|
||||
verifyF2boption(f2boptions,'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions,'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions,'max_attempts', 10)
|
||||
verifyF2boption(f2boptions,'retry_window', 600)
|
||||
verifyF2boption(f2boptions,'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions,'netban_ipv6', 128)
|
||||
|
||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||
|
||||
def refreshF2bregex():
|
||||
global f2bregex
|
||||
global quit_now
|
||||
global exit_code
|
||||
if not r.get('F2B_REGEX'):
|
||||
f2bregex = {}
|
||||
f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
|
||||
f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
|
||||
else:
|
||||
try:
|
||||
f2bregex = {}
|
||||
f2bregex = json.loads(r.get('F2B_REGEX'))
|
||||
except ValueError:
|
||||
print('Error loading F2B options: F2B_REGEX is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
if r.exists('F2B_LOG'):
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
|
||||
def mailcowChainOrder():
|
||||
global lock
|
||||
global quit_now
|
||||
global exit_code
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
filter4_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter6_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
filter4_table.refresh()
|
||||
filter6_table.refresh()
|
||||
for f in [filter4_table, filter6_table]:
|
||||
forward_chain = iptc.Chain(f, 'FORWARD')
|
||||
input_chain = iptc.Chain(f, 'INPUT')
|
||||
for chain in [forward_chain, input_chain]:
|
||||
target_found = False
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item.target.name == 'MAILCOW':
|
||||
target_found = True
|
||||
if position > 2:
|
||||
logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
if not target_found:
|
||||
logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def ban(address):
|
||||
global lock
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
|
||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||
|
||||
ip = ipaddress.ip_address(address)
|
||||
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
|
||||
ip = ip.ipv4_mapped
|
||||
address = str(ip)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
return
|
||||
|
||||
self_network = ipaddress.ip_network(address)
|
||||
|
||||
with lock:
|
||||
temp_whitelist = set(WHITELIST)
|
||||
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
if wl_net.overlaps(self_network):
|
||||
logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
|
||||
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = str(net)
|
||||
|
||||
if not net in bans:
|
||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||
|
||||
bans[net]['attempts'] += 1
|
||||
bans[net]['last_attempt'] = time.time()
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||
else:
|
||||
logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
|
||||
def unban(net):
|
||||
global lock
|
||||
if not net in bans:
|
||||
logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
logInfo('Unbanning %s' % net)
|
||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
if net in bans:
|
||||
bans[net]['attempts'] = 0
|
||||
bans[net]['ban_counter'] += 1
|
||||
|
||||
def permBan(net, unban=False):
|
||||
global lock
|
||||
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules and not unban:
|
||||
logCrit('Add host/network %s to blacklist' % net)
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
elif rule in chain.rules and unban:
|
||||
logCrit('Remove host/network %s from blacklist' % net)
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules and not unban:
|
||||
logCrit('Add host/network %s to blacklist' % net)
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
elif rule in chain.rules and unban:
|
||||
logCrit('Remove host/network %s from blacklist' % net)
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
|
||||
def quit(signum, frame):
|
||||
global quit_now
|
||||
quit_now = True
|
||||
|
||||
def clear():
|
||||
global lock
|
||||
logInfo('Clearing all bans')
|
||||
for net in bans.copy():
|
||||
unban(net)
|
||||
with lock:
|
||||
filter4_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter6_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
for filter_table in [filter4_table, filter6_table]:
|
||||
filter_table.autocommit = False
|
||||
forward_chain = iptc.Chain(filter_table, "FORWARD")
|
||||
input_chain = iptc.Chain(filter_table, "INPUT")
|
||||
mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
|
||||
if mailcow_chain in filter_table.chains:
|
||||
for rule in mailcow_chain.rules:
|
||||
mailcow_chain.delete_rule(rule)
|
||||
for rule in forward_chain.rules:
|
||||
if rule.target.name == 'MAILCOW':
|
||||
forward_chain.delete_rule(rule)
|
||||
for rule in input_chain.rules:
|
||||
if rule.target.name == 'MAILCOW':
|
||||
input_chain.delete_rule(rule)
|
||||
filter_table.delete_chain("MAILCOW")
|
||||
filter_table.commit()
|
||||
filter_table.refresh()
|
||||
filter_table.autocommit = True
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
pubsub.unsubscribe()
|
||||
|
||||
def watch():
|
||||
logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
while not quit_now:
|
||||
try:
|
||||
for item in pubsub.listen():
|
||||
refreshF2bregex()
|
||||
for rule_id, rule_regex in f2bregex.items():
|
||||
if item['data'] and item['type'] == 'message':
|
||||
try:
|
||||
result = re.search(rule_regex, item['data'])
|
||||
except re.error:
|
||||
result = False
|
||||
if result:
|
||||
addr = result.group(1)
|
||||
ip = ipaddress.ip_address(addr)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
continue
|
||||
logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
|
||||
ban(addr)
|
||||
except Exception as ex:
|
||||
logWarn('Error reading log line from pubsub: %s' % ex)
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def snat4(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
def get_snat4_rule():
|
||||
rule = iptc.Rule()
|
||||
rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
match = rule.create_match("comment")
|
||||
match.comment = f'{int(round(time.time()))}'
|
||||
return rule
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
try:
|
||||
table = iptc.Table('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = get_snat4_rule()
|
||||
|
||||
if not chain.rules:
|
||||
# if there are no rules in the chain, insert the new rule directly
|
||||
logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, rule in enumerate(chain.rules):
|
||||
if not hasattr(rule.target, 'parameter'):
|
||||
continue
|
||||
match = all((
|
||||
new_rule.get_src() == rule.get_src(),
|
||||
new_rule.get_dst() == rule.get_dst(),
|
||||
new_rule.target.parameters == rule.target.parameters,
|
||||
new_rule.target.name == rule.target.name
|
||||
))
|
||||
if position == 0:
|
||||
if not match:
|
||||
logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
if match:
|
||||
logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
|
||||
chain.delete_rule(rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
print('Error running SNAT4, retrying...')
|
||||
|
||||
def snat6(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
def get_snat6_rule():
|
||||
rule = iptc.Rule6()
|
||||
rule.src = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64')
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
return rule
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
try:
|
||||
table = iptc.Table6('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
if get_snat6_rule() not in chain.rules:
|
||||
logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target))
|
||||
chain.insert_rule(get_snat6_rule())
|
||||
table.commit()
|
||||
else:
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item == get_snat6_rule():
|
||||
if position != 0:
|
||||
chain.delete_rule(get_snat6_rule())
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
print('Error running SNAT6, retrying...')
|
||||
|
||||
def autopurge():
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
if QUEUE_UNBAN:
|
||||
for net in QUEUE_UNBAN:
|
||||
unban(str(net))
|
||||
for net in bans.copy():
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME:
|
||||
unban(net)
|
||||
|
||||
def isIpNetwork(address):
|
||||
try:
|
||||
ipaddress.ip_network(address, False)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def genNetworkList(list):
|
||||
resolver = dns.resolver.Resolver()
|
||||
hostnames = []
|
||||
networks = []
|
||||
for key in list:
|
||||
if isIpNetwork(key):
|
||||
networks.append(key)
|
||||
else:
|
||||
hostnames.append(key)
|
||||
for hostname in hostnames:
|
||||
hostname_ips = []
|
||||
for rdtype in ['A', 'AAAA']:
|
||||
try:
|
||||
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
|
||||
except dns.exception.Timeout:
|
||||
logInfo('Hostname %s timedout on resolve' % hostname)
|
||||
break
|
||||
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
continue
|
||||
except dns.exception.DNSException as dnsexception:
|
||||
logInfo('%s' % dnsexception)
|
||||
continue
|
||||
for rdata in answer:
|
||||
hostname_ips.append(rdata.to_text())
|
||||
networks.extend(hostname_ips)
|
||||
return set(networks)
|
||||
|
||||
def whitelistUpdate():
|
||||
global lock
|
||||
global quit_now
|
||||
global WHITELIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_WHITELIST')
|
||||
new_whitelist = []
|
||||
if list:
|
||||
new_whitelist = genNetworkList(list)
|
||||
with lock:
|
||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||
WHITELIST = new_whitelist
|
||||
logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def blacklistUpdate():
|
||||
global quit_now
|
||||
global BLACKLIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_BLACKLIST')
|
||||
new_blacklist = []
|
||||
if list:
|
||||
new_blacklist = genNetworkList(list)
|
||||
if Counter(new_blacklist) != Counter(BLACKLIST):
|
||||
addban = set(new_blacklist).difference(BLACKLIST)
|
||||
delban = set(BLACKLIST).difference(new_blacklist)
|
||||
BLACKLIST = new_blacklist
|
||||
logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
|
||||
if addban:
|
||||
for net in addban:
|
||||
permBan(net=net)
|
||||
if delban:
|
||||
for net in delban:
|
||||
permBan(net=net, unban=True)
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def initChain():
|
||||
# Is called before threads start, no locking
|
||||
print("Initializing mailcow netfilter chain")
|
||||
# IPv4
|
||||
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
|
||||
iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
|
||||
rule = iptc.Rule()
|
||||
rule.src = '0.0.0.0/0'
|
||||
rule.dst = '0.0.0.0/0'
|
||||
target = iptc.Target(rule, "MAILCOW")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
# IPv6
|
||||
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains:
|
||||
iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW")
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = '::/0'
|
||||
rule.dst = '::/0'
|
||||
target = iptc.Target(rule, "MAILCOW")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# In case a previous session was killed without cleanup
|
||||
clear()
|
||||
# Reinit MAILCOW chain
|
||||
initChain()
|
||||
|
||||
watch_thread = Thread(target=watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
|
||||
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||
snat4_thread = Thread(target=snat4,args=(snat_ip,))
|
||||
snat4_thread.daemon = True
|
||||
snat4_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
|
||||
|
||||
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT6_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv6Address:
|
||||
snat6_thread = Thread(target=snat6,args=(snat_ip,))
|
||||
snat6_thread.daemon = True
|
||||
snat6_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
|
||||
|
||||
autopurge_thread = Thread(target=autopurge)
|
||||
autopurge_thread.daemon = True
|
||||
autopurge_thread.start()
|
||||
|
||||
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
|
||||
mailcowchainwatch_thread.daemon = True
|
||||
mailcowchainwatch_thread.start()
|
||||
|
||||
blacklistupdate_thread = Thread(target=blacklistUpdate)
|
||||
blacklistupdate_thread.daemon = True
|
||||
blacklistupdate_thread.start()
|
||||
|
||||
whitelistupdate_thread = Thread(target=whitelistUpdate)
|
||||
whitelistupdate_thread.daemon = True
|
||||
whitelistupdate_thread.start()
|
||||
|
||||
signal.signal(signal.SIGTERM, quit)
|
||||
atexit.register(clear)
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
sys.exit(exit_code)
|
||||
18
data/Dockerfiles/nginx/Dockerfile
Normal file
18
data/Dockerfiles/nginx/Dockerfile
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
FROM nginx:alpine
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ENV PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
RUN apk add --no-cache nginx \
|
||||
python3 \
|
||||
py3-pip && \
|
||||
pip install --upgrade pip && \
|
||||
pip install Jinja2
|
||||
|
||||
RUN mkdir -p /etc/nginx/includes
|
||||
|
||||
COPY ./bootstrap.py /
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
100
data/Dockerfiles/nginx/bootstrap.py
Normal file
100
data/Dockerfiles/nginx/bootstrap.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import os
|
||||
import subprocess
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
def includes_conf(env, template_vars):
|
||||
server_name = "server_name.active"
|
||||
listen_plain = "listen_plain.active"
|
||||
listen_ssl = "listen_ssl.active"
|
||||
|
||||
server_name_config = f"server_name {template_vars['MAILCOW_HOSTNAME']} autodiscover.* autoconfig.* {' '.join(template_vars['ADDITIONAL_SERVER_NAMES'])};"
|
||||
listen_plain_config = f"listen {template_vars['HTTP_PORT']};"
|
||||
listen_ssl_config = f"listen {template_vars['HTTPS_PORT']};"
|
||||
if template_vars['ENABLE_IPV6']:
|
||||
listen_plain_config += f"\nlisten [::]:{template_vars['HTTP_PORT']};"
|
||||
listen_ssl_config += f"\nlisten [::]:{template_vars['HTTPS_PORT']} ssl;"
|
||||
listen_ssl_config += "\nhttp2 on;"
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{server_name}", "w") as f:
|
||||
f.write(server_name_config)
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{listen_plain}", "w") as f:
|
||||
f.write(listen_plain_config)
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{listen_ssl}", "w") as f:
|
||||
f.write(listen_ssl_config)
|
||||
|
||||
def sites_default_conf(env, template_vars):
|
||||
config_name = "sites-default.conf"
|
||||
template = env.get_template(f"{config_name}.j2")
|
||||
config = template.render(template_vars)
|
||||
|
||||
with open(f"/etc/nginx/includes/{config_name}", "w") as f:
|
||||
f.write(config)
|
||||
|
||||
def nginx_conf(env, template_vars):
|
||||
config_name = "nginx.conf"
|
||||
template = env.get_template(f"{config_name}.j2")
|
||||
config = template.render(template_vars)
|
||||
|
||||
with open(f"/etc/nginx/{config_name}", "w") as f:
|
||||
f.write(config)
|
||||
|
||||
def prepare_template_vars():
|
||||
ipv4_network = os.getenv("IPV4_NETWORK", "172.22.1")
|
||||
additional_server_names = os.getenv("ADDITIONAL_SERVER_NAMES", "")
|
||||
trusted_proxies = os.getenv("TRUSTED_PROXIES", "")
|
||||
|
||||
template_vars = {
|
||||
'IPV4_NETWORK': ipv4_network,
|
||||
'TRUSTED_PROXIES': [item.strip() for item in trusted_proxies.split(",") if item.strip()],
|
||||
'SKIP_RSPAMD': os.getenv("SKIP_RSPAMD", "n").lower() in ("y", "yes"),
|
||||
'SKIP_SOGO': os.getenv("SKIP_SOGO", "n").lower() in ("y", "yes"),
|
||||
'NGINX_USE_PROXY_PROTOCOL': os.getenv("NGINX_USE_PROXY_PROTOCOL", "n").lower() in ("y", "yes"),
|
||||
'MAILCOW_HOSTNAME': os.getenv("MAILCOW_HOSTNAME", ""),
|
||||
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in additional_server_names.split(",") if item.strip()],
|
||||
'HTTP_PORT': os.getenv("HTTP_PORT", "80"),
|
||||
'HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
|
||||
'SOGOHOST': os.getenv("SOGOHOST", ipv4_network + ".248"),
|
||||
'RSPAMDHOST': os.getenv("RSPAMDHOST", "rspamd-mailcow"),
|
||||
'PHPFPMHOST': os.getenv("PHPFPMHOST", "php-fpm-mailcow"),
|
||||
'ENABLE_IPV6': os.getenv("ENABLE_IPV6", "true").lower() != "false",
|
||||
'HTTP_REDIRECT': os.getenv("HTTP_REDIRECT", "n").lower() in ("y", "yes"),
|
||||
}
|
||||
|
||||
ssl_dir = '/etc/ssl/mail/'
|
||||
template_vars['valid_cert_dirs'] = []
|
||||
for d in os.listdir(ssl_dir):
|
||||
full_path = os.path.join(ssl_dir, d)
|
||||
if not os.path.isdir(full_path):
|
||||
continue
|
||||
|
||||
cert_path = os.path.join(full_path, 'cert.pem')
|
||||
key_path = os.path.join(full_path, 'key.pem')
|
||||
domains_path = os.path.join(full_path, 'domains')
|
||||
|
||||
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
||||
with open(domains_path, 'r') as file:
|
||||
domains = file.read().strip()
|
||||
domains_list = domains.split()
|
||||
if domains_list and template_vars["MAILCOW_HOSTNAME"] not in domains_list:
|
||||
template_vars['valid_cert_dirs'].append({
|
||||
'cert_path': full_path + '/',
|
||||
'domains': domains
|
||||
})
|
||||
|
||||
return template_vars
|
||||
|
||||
def main():
|
||||
env = Environment(loader=FileSystemLoader('./etc/nginx/conf.d/templates'))
|
||||
|
||||
# Render config
|
||||
print("Render config")
|
||||
template_vars = prepare_template_vars()
|
||||
sites_default_conf(env, template_vars)
|
||||
nginx_conf(env, template_vars)
|
||||
includes_conf(env, template_vars)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
26
data/Dockerfiles/nginx/docker-entrypoint.sh
Executable file
26
data/Dockerfiles/nginx/docker-entrypoint.sh
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
#!/bin/sh
|
||||
|
||||
PHPFPMHOST=${PHPFPMHOST:-"php-fpm-mailcow"}
|
||||
SOGOHOST=${SOGOHOST:-"$IPV4_NETWORK.248"}
|
||||
RSPAMDHOST=${RSPAMDHOST:-"rspamd-mailcow"}
|
||||
|
||||
until ping ${PHPFPMHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for PHP..."
|
||||
sleep 1
|
||||
done
|
||||
if ! printf "%s\n" "${SKIP_SOGO}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
||||
until ping ${SOGOHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for SOGo..."
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
if ! printf "%s\n" "${SKIP_RSPAMD}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
||||
until ping ${RSPAMDHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for Rspamd..."
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
python3 /bootstrap.py
|
||||
|
||||
exec "$@"
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
FROM alpine:3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
#RUN addgroup -S olefy && adduser -S olefy -G olefy \
|
||||
|
|
|
|||
|
|
@ -32,6 +32,13 @@ import time
|
|||
import magic
|
||||
import re
|
||||
|
||||
skip_olefy = os.getenv('SKIP_OLEFY', '')
|
||||
|
||||
if skip_olefy.lower() in ['yes', 'y']:
|
||||
print("SKIP_OLEFY=y, skipping Olefy...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(0)
|
||||
|
||||
# merge variables from /etc/olefy.conf and the defaults
|
||||
olefy_listen_addr_string = os.getenv('OLEFY_BINDADDRESS', '127.0.0.1,::1')
|
||||
olefy_listen_port = int(os.getenv('OLEFY_BINDPORT', '10050'))
|
||||
|
|
@ -113,7 +120,7 @@ def oletools( stream, tmp_file_name, lid ):
|
|||
out = bytes(out.decode('utf-8', 'ignore').replace(' ', ' ').replace('\t', '').replace('\n', '').replace('XLMMacroDeobfuscator: pywin32 is not installed (only is required if you want to use MS Excel)', ''), encoding="utf-8")
|
||||
failed = False
|
||||
if out.__len__() < 30:
|
||||
logger.error('{} olevba returned <30 chars - rc: {!r}, response: {!r}, error: {!r}'.format(lid,cmd_tmp.returncode,
|
||||
logger.error('{} olevba returned <30 chars - rc: {!r}, response: {!r}, error: {!r}'.format(lid,cmd_tmp.returncode,
|
||||
out.decode('utf-8', 'ignore'), err.decode('utf-8', 'ignore')))
|
||||
out = b'[ { "error": "Unhandled error - too short olevba response" } ]'
|
||||
failed = True
|
||||
|
|
|
|||
|
|
@ -1,18 +1,19 @@
|
|||
FROM php:8.2-fpm-alpine3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM php:8.2-fpm-alpine3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG APCU_PECL_VERSION=5.1.22
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG IMAGICK_PECL_VERSION=3.7.0
|
||||
ARG APCU_PECL_VERSION=5.1.27
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG IMAGICK_PECL_VERSION=3.8.0
|
||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.6
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.9
|
||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MEMCACHED_PECL_VERSION=3.2.0
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG REDIS_PECL_VERSION=6.0.1
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG COMPOSER_VERSION=2.6.5
|
||||
ARG MEMCACHED_PECL_VERSION=3.3.0
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG REDIS_PECL_VERSION=6.2.0
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG COMPOSER_VERSION=2.8.6
|
||||
|
||||
RUN apk add -U --no-cache autoconf \
|
||||
aspell-dev \
|
||||
|
|
@ -76,7 +77,7 @@ RUN apk add -U --no-cache autoconf \
|
|||
--with-webp \
|
||||
--with-xpm \
|
||||
--with-avif \
|
||||
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets sysvsem zip bcmath gmp \
|
||||
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \
|
||||
&& docker-php-ext-configure imap --with-imap --with-imap-ssl \
|
||||
&& docker-php-ext-install -j 4 imap \
|
||||
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \
|
||||
|
|
|
|||
|
|
@ -3,27 +3,37 @@
|
|||
function array_by_comma { local IFS=","; echo "$*"; }
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_HOST=$REDIS_SLAVEOF_IP
|
||||
REDIS_PORT=$REDIS_SLAVEOF_PORT
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_HOST="redis"
|
||||
REDIS_PORT="6379"
|
||||
fi
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_HOST} -p ${REDIS_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Set redis session store
|
||||
echo -n '
|
||||
session.save_handler = redis
|
||||
session.save_path = "tcp://'${REDIS_HOST}':'${REDIS_PORT}'?auth='${REDISPASS}'"
|
||||
' > /usr/local/etc/php/conf.d/session_store.ini
|
||||
|
||||
# Check mysql_upgrade (master and slave)
|
||||
CONTAINER_ID=
|
||||
until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
echo "Could not get mysql-mailcow container id... trying again"
|
||||
sleep 2
|
||||
done
|
||||
echo "MySQL @ ${CONTAINER_ID}"
|
||||
|
|
@ -34,7 +44,7 @@ until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
|||
echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
|
||||
break
|
||||
fi
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
|
||||
SQL_LOOP_C=$((SQL_LOOP_C+1))
|
||||
echo "SQL upgrade iteration #${SQL_LOOP_C}"
|
||||
|
|
@ -43,7 +53,7 @@ until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
|||
echo "MySQL applied an upgrade, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
sleep 3
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL to return, please wait"
|
||||
sleep 2
|
||||
done
|
||||
|
|
@ -59,21 +69,21 @@ done
|
|||
|
||||
# doing post-installation stuff, if SQL was upgraded (master and slave)
|
||||
if [ ${SQL_CHANGED} -eq 1 ]; then
|
||||
POSTFIX=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
POSTFIX=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
|
||||
echo "Could not determine Postfix container ID, skipping Postfix restart."
|
||||
else
|
||||
echo "Restarting Postfix"
|
||||
curl -X POST --silent --insecure https://dockerapi/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
curl -X POST --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
echo "Sleeping 5 seconds..."
|
||||
sleep 5
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check mysql tz import (master and slave)
|
||||
TZ_CHECK=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
||||
TZ_CHECK=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
||||
if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
echo "MySQL mysql_tzinfo_to_sql - debug output:"
|
||||
echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
|
||||
fi
|
||||
|
|
@ -110,11 +120,11 @@ if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
|
||||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
|
||||
|
||||
if [[ ! -z ${DOMAIN_ARR} ]]; then
|
||||
for domain in "${DOMAIN_ARR[@]}"; do
|
||||
|
|
@ -136,13 +146,13 @@ if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|||
VALIDATED_IPS=$(array_by_comma ${VALIDATED_API_ALLOW_FROM_ARR[*]})
|
||||
if [[ ! -z ${VALIDATED_IPS} ]]; then
|
||||
if [[ ${API_KEY} != "invalid" ]] && [[ ! -z ${API_KEY} ]]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'rw';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY}", "1", "${VALIDATED_IPS}", "rw");
|
||||
EOF
|
||||
fi
|
||||
if [[ ${API_KEY_READ_ONLY} != "invalid" ]] && [[ ! -z ${API_KEY_READ_ONLY} ]]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'ro';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY_READ_ONLY}", "1", "${VALIDATED_IPS}", "ro");
|
||||
EOF
|
||||
|
|
@ -151,13 +161,13 @@ EOF
|
|||
fi
|
||||
|
||||
# Create events (master only, STATUS for event on slave will be SLAVESIDE_DISABLED)
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DROP EVENT IF EXISTS clean_spamalias;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_spamalias
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP() AND permanent = 0;
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
|
|
|
|||
50
data/Dockerfiles/postfix-tlspol/Dockerfile
Normal file
50
data/Dockerfiles/postfix-tlspol/Dockerfile
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
FROM golang:1.25-bookworm AS builder
|
||||
WORKDIR /src
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
NOOPT=1 \
|
||||
VERSION=1.8.22
|
||||
|
||||
RUN git clone --branch v${VERSION} https://github.com/Zuplu/postfix-tlspol && \
|
||||
cd /src/postfix-tlspol && \
|
||||
scripts/build.sh build-only
|
||||
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
dnsutils \
|
||||
iputils-ping \
|
||||
sudo \
|
||||
supervisor \
|
||||
redis-tools \
|
||||
syslog-ng \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale
|
||||
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY postfix-tlspol.sh /opt/postfix-tlspol.sh
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
COPY --from=builder /src/postfix-tlspol/build/postfix-tlspol /usr/local/bin/postfix-tlspol
|
||||
|
||||
RUN chmod +x /opt/postfix-tlspol.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/docker-entrypoint.sh
|
||||
RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
7
data/Dockerfiles/postfix-tlspol/docker-entrypoint.sh
Executable file
7
data/Dockerfiles/postfix-tlspol/docker-entrypoint.sh
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
52
data/Dockerfiles/postfix-tlspol/postfix-tlspol.sh
Executable file
52
data/Dockerfiles/postfix-tlspol/postfix-tlspol.sh
Executable file
|
|
@ -0,0 +1,52 @@
|
|||
#!/bin/bash
|
||||
|
||||
LOGLVL=info
|
||||
|
||||
if [ ${DEV_MODE} != "n" ]; then
|
||||
echo -e "\e[31mEnabling debug mode\e[0m"
|
||||
set -x
|
||||
LOGLVL=debug
|
||||
fi
|
||||
|
||||
[[ ! -d /etc/postfix-tlspol ]] && mkdir -p /etc/postfix-tlspol
|
||||
[[ ! -d /var/lib/postfix-tlspol ]] && mkdir -p /var/lib/postfix-tlspol
|
||||
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Waiting for Postfix..."
|
||||
until ping postfix -c1 > /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
echo "Postfix OK"
|
||||
|
||||
cat <<EOF > /etc/postfix-tlspol/config.yaml
|
||||
server:
|
||||
address: 0.0.0.0:8642
|
||||
|
||||
log-level: ${LOGLVL}
|
||||
|
||||
prefetch: true
|
||||
|
||||
cache-file: /var/lib/postfix-tlspol/cache.db
|
||||
|
||||
dns:
|
||||
# must support DNSSEC
|
||||
address: 127.0.0.11:53
|
||||
EOF
|
||||
|
||||
/usr/local/bin/postfix-tlspol -config /etc/postfix-tlspol/config.yaml
|
||||
8
data/Dockerfiles/postfix-tlspol/stop-supervisor.sh
Executable file
8
data/Dockerfiles/postfix-tlspol/stop-supervisor.sh
Executable file
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
25
data/Dockerfiles/postfix-tlspol/supervisord.conf
Normal file
25
data/Dockerfiles/postfix-tlspol/supervisord.conf
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
[supervisord]
|
||||
pidfile=/var/run/supervisord.pid
|
||||
nodaemon=true
|
||||
user=root
|
||||
|
||||
[program:syslog-ng]
|
||||
command=/usr/sbin/syslog-ng --foreground --no-caps
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:postfix-tlspol]
|
||||
startsecs=10
|
||||
autorestart=true
|
||||
command=/opt/postfix-tlspol.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
45
data/Dockerfiles/postfix-tlspol/syslog-ng-redis_slave.conf
Normal file
45
data/Dockerfiles/postfix-tlspol/syslog-ng-redis_slave.conf
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
dns_cache(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
destination d_redis_ui_log {
|
||||
redis(
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
filter f_mail { facility(mail); };
|
||||
# start
|
||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
||||
# Some other warnings are ignored
|
||||
filter f_ignore {
|
||||
not match("overriding earlier entry" value("MESSAGE"));
|
||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
||||
not match("no SASL support" value("MESSAGE"));
|
||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
||||
};
|
||||
# end
|
||||
log {
|
||||
source(s_src);
|
||||
filter(f_ignore);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
destination(d_redis_ui_log);
|
||||
};
|
||||
45
data/Dockerfiles/postfix-tlspol/syslog-ng.conf
Normal file
45
data/Dockerfiles/postfix-tlspol/syslog-ng.conf
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
dns_cache(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
destination d_redis_ui_log {
|
||||
redis(
|
||||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
filter f_mail { facility(mail); };
|
||||
# start
|
||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
||||
# Some other warnings are ignored
|
||||
filter f_ignore {
|
||||
not match("overriding earlier entry" value("MESSAGE"));
|
||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
||||
not match("no SASL support" value("MESSAGE"));
|
||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
||||
};
|
||||
# end
|
||||
log {
|
||||
source(s_src);
|
||||
filter(f_ignore);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
destination(d_redis_ui_log);
|
||||
};
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL C
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl \
|
||||
&& ln -sf /bin/true /sbin/initctl \
|
||||
|
|
@ -59,4 +60,4 @@ EXPOSE 588
|
|||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
|
|
|||
|
|
@ -12,4 +12,15 @@ if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
if grep -qE '\!SSLv2|\!SSLv3|>=TLSv1(\.[0-1])?$' /opt/postfix/conf/main.cf /opt/postfix/conf/extra.cf; then
|
||||
sed -i '/\[openssl_init\]/a ssl_conf = ssl_configuration' /etc/ssl/openssl.cnf
|
||||
|
||||
echo "[ssl_configuration]" >> /etc/ssl/openssl.cnf
|
||||
echo "system_default = tls_system_default" >> /etc/ssl/openssl.cnf
|
||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ trap "postfix stop" EXIT
|
|||
[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
|
@ -390,12 +390,12 @@ hosts = unix:/var/run/mysqld/mysqld.sock
|
|||
dbname = ${DBNAME}
|
||||
query = SELECT goto FROM spamalias
|
||||
WHERE address='%s'
|
||||
AND validity >= UNIX_TIMESTAMP()
|
||||
AND (validity >= UNIX_TIMESTAMP() OR permanent != 0)
|
||||
EOF
|
||||
|
||||
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# This file can be edited.
|
||||
# This file can be edited.
|
||||
# Delete this file and restart postfix container to revert any changes.
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
|
|
@ -403,7 +403,6 @@ postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
|||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
|
|
@ -415,14 +414,12 @@ postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
|||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
dnsbl.sorbs.net=127.0.0.10*8
|
||||
dnsbl.sorbs.net=127.0.0.5*6
|
||||
dnsbl.sorbs.net=127.0.0.7*3
|
||||
dnsbl.sorbs.net=127.0.0.8*2
|
||||
dnsbl.sorbs.net=127.0.0.6*2
|
||||
dnsbl.sorbs.net=127.0.0.9*2
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Remove discontinued DNSBLs from existing dns_blocklists.cf
|
||||
sed -i '/ix\.dnsbl\.manitu\.net\*2/d' /opt/postfix/conf/dns_blocklists.cf # Nixspam
|
||||
|
||||
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
|
||||
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
|
|
@ -513,6 +510,11 @@ chgrp -R postdrop /var/spool/postfix/public
|
|||
chgrp -R postdrop /var/spool/postfix/maildrop
|
||||
postfix set-permissions
|
||||
|
||||
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
||||
if [ -e /var/spool/postfix/pid/master.pid ]; then
|
||||
rm -rf /var/spool/postfix/pid/master.pid
|
||||
fi
|
||||
|
||||
# Check Postfix configuration
|
||||
postconf -c /opt/postfix/conf > /dev/null
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ stdout_logfile_maxbytes=0
|
|||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
startsecs=10
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -20,6 +20,7 @@ destination d_redis_ui_log {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -28,6 +29,7 @@ destination d_redis_f2b_channel {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -20,6 +20,7 @@ destination d_redis_ui_log {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -28,6 +29,7 @@ destination d_redis_f2b_channel {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,22 +1,27 @@
|
|||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG CODENAME=bullseye
|
||||
ENV LC_ALL C
|
||||
ARG RSPAMD_VER=rspamd_3.13.2-1~8bf602278
|
||||
ARG CODENAME=bookworm
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
gnupg2 \
|
||||
apt-transport-https \
|
||||
dnsutils \
|
||||
netcat \
|
||||
&& apt-key adv --fetch-keys https://rspamd.com/apt-stable/gpg.key \
|
||||
&& echo "deb [arch=amd64] https://rspamd.com/apt-stable/ $CODENAME main" > /etc/apt/sources.list.d/rspamd.list \
|
||||
&& apt-get update \
|
||||
&& apt-get --no-install-recommends -y install rspamd redis-tools procps nano \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
netcat-traditional \
|
||||
wget \
|
||||
redis-tools \
|
||||
procps \
|
||||
nano \
|
||||
lua-cjson \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
||||
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/*\
|
||||
&& apt-get autoremove --purge \
|
||||
&& apt-get clean \
|
||||
&& mkdir -p /run/rspamd \
|
||||
|
|
@ -25,7 +30,6 @@ RUN apt-get update && apt-get install -y \
|
|||
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
||||
|
||||
COPY settings.conf /etc/rspamd/settings.conf
|
||||
COPY metadata_exporter.lua /usr/share/rspamd/plugins/metadata_exporter.lua
|
||||
COPY set_worker_password.sh /set_worker_password.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
|
|
|
|||
|
|
@ -56,29 +56,57 @@ if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
read_servers = "redis:6379";
|
||||
write_servers = "${REDIS_SLAVEOF_IP}:${REDIS_SLAVEOF_PORT}";
|
||||
password = "${REDISPASS}";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
|
||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @redis-mailcow..."
|
||||
sleep 2
|
||||
done
|
||||
until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} PING) == "PONG" ]]; do
|
||||
until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @${REDIS_SLAVEOF_IP}..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
|
||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
|
||||
else
|
||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
servers = "redis:6379";
|
||||
password = "${REDISPASS}";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
|
||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis slave..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow SLAVEOF NO ONE
|
||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF NO ONE
|
||||
fi
|
||||
|
||||
if [[ "${SKIP_OLEFY}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
if [[ -f /etc/rspamd/local.d/external_services.conf ]]; then
|
||||
rm /etc/rspamd/local.d/external_services.conf
|
||||
fi
|
||||
else
|
||||
if [[ ! -f /etc/rspamd/local.d/external_services.conf ]]; then
|
||||
cat <<EOF > /etc/rspamd/local.d/external_services.conf
|
||||
oletools {
|
||||
# default olefy settings
|
||||
servers = "olefy:10055";
|
||||
# needs to be set explicitly for Rspamd < 1.9.5
|
||||
scan_mime_parts = true;
|
||||
# mime-part regex matching in content-type or filename
|
||||
# block all macros
|
||||
extended = true;
|
||||
max_size = 3145728;
|
||||
timeout = 20.0;
|
||||
retransmits = 1;
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
# Provide additional lua modules
|
||||
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
|
||||
|
||||
chown -R _rspamd:_rspamd /var/lib/rspamd \
|
||||
/etc/rspamd/local.d \
|
||||
/etc/rspamd/override.d \
|
||||
|
|
@ -121,4 +149,190 @@ for file in /hooks/*; do
|
|||
fi
|
||||
done
|
||||
|
||||
# If DQS KEY is set in mailcow.conf add Spamhaus DQS RBLs
|
||||
if [[ ! -z ${SPAMHAUS_DQS_KEY} ]]; then
|
||||
cat <<EOF > /etc/rspamd/custom/dqs-rbl.conf
|
||||
# Autogenerated by mailcow. DO NOT TOUCH!
|
||||
spamhaus {
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
from = false;
|
||||
}
|
||||
spamhaus_from {
|
||||
from = true;
|
||||
received = false;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
returncodes {
|
||||
SPAMHAUS_ZEN = [ "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.9", "127.0.0.10", "127.0.0.11" ];
|
||||
}
|
||||
}
|
||||
spamhaus_authbl_received {
|
||||
# Check if the sender client is listed in AuthBL (AuthBL is *not* part of ZEN)
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.authbl.dq.spamhaus.net";
|
||||
from = false;
|
||||
received = true;
|
||||
ipv6 = true;
|
||||
returncodes {
|
||||
SH_AUTHBL_RECEIVED = "127.0.0.20"
|
||||
}
|
||||
}
|
||||
spamhaus_dbl {
|
||||
# Add checks on the HELO string
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_DBL_SPAM = "127.0.1.2";
|
||||
RBL_DBL_PHISH = "127.0.1.4";
|
||||
RBL_DBL_MALWARE = "127.0.1.5";
|
||||
RBL_DBL_BOTNET = "127.0.1.6";
|
||||
RBL_DBL_ABUSED_SPAM = "127.0.1.102";
|
||||
RBL_DBL_ABUSED_PHISH = "127.0.1.104";
|
||||
RBL_DBL_ABUSED_MALWARE = "127.0.1.105";
|
||||
RBL_DBL_ABUSED_BOTNET = "127.0.1.106";
|
||||
RBL_DBL_DONT_QUERY_IPS = "127.0.1.255";
|
||||
}
|
||||
}
|
||||
spamhaus_dbl_fullurls {
|
||||
ignore_defaults = true;
|
||||
no_ip = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
selector = 'urls:get_host'
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
DBLABUSED_SPAM_FULLURLS = "127.0.1.102";
|
||||
DBLABUSED_PHISH_FULLURLS = "127.0.1.104";
|
||||
DBLABUSED_MALWARE_FULLURLS = "127.0.1.105";
|
||||
DBLABUSED_BOTNET_FULLURLS = "127.0.1.106";
|
||||
}
|
||||
}
|
||||
spamhaus_zrd {
|
||||
# Add checks on the HELO string also for DQS
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
RBL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
RBL_ZRD_DONT_QUERY_IPS = "127.0.2.255";
|
||||
}
|
||||
}
|
||||
"SPAMHAUS_ZEN_URIBL" {
|
||||
enabled = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
resolve_ip = true;
|
||||
checks = ['urls'];
|
||||
replyto = true;
|
||||
emails = true;
|
||||
ipv4 = true;
|
||||
ipv6 = true;
|
||||
emails_domainonly = true;
|
||||
returncodes {
|
||||
URIBL_SBL = "127.0.0.2";
|
||||
URIBL_SBL_CSS = "127.0.0.3";
|
||||
URIBL_XBL = ["127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7"];
|
||||
URIBL_PBL = ["127.0.0.10", "127.0.0.11"];
|
||||
URIBL_DROP = "127.0.0.9";
|
||||
}
|
||||
}
|
||||
SH_EMAIL_DBL {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_DBL = [
|
||||
"127.0.1.2",
|
||||
"127.0.1.4",
|
||||
"127.0.1.5",
|
||||
"127.0.1.6"
|
||||
];
|
||||
SH_EMAIL_DBL_ABUSED = [
|
||||
"127.0.1.102",
|
||||
"127.0.1.104",
|
||||
"127.0.1.105",
|
||||
"127.0.1.106"
|
||||
];
|
||||
SH_EMAIL_DBL_DONT_QUERY_IPS = [ "127.0.1.255" ];
|
||||
}
|
||||
}
|
||||
SH_EMAIL_ZRD {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
SH_EMAIL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
SH_EMAIL_ZRD_DONT_QUERY_IPS = [ "127.0.2.255" ];
|
||||
}
|
||||
}
|
||||
"DBL" {
|
||||
# override the defaults for DBL defined in modules.d/rbl.conf
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
disable_monitoring = true;
|
||||
}
|
||||
"ZRD" {
|
||||
ignore_defaults = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
no_ip = true;
|
||||
dkim = true;
|
||||
emails = true;
|
||||
emails_domainonly = true;
|
||||
urls = true;
|
||||
returncodes = {
|
||||
ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
ZRD_FRESH_DOMAIN = ["127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"];
|
||||
}
|
||||
}
|
||||
spamhaus_sbl_url {
|
||||
ignore_defaults = true
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net";
|
||||
checks = ['urls'];
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
SPAMHAUS_SBL_URL = "127.0.0.2";
|
||||
}
|
||||
}
|
||||
|
||||
SH_HBL_EMAIL {
|
||||
ignore_defaults = true;
|
||||
rbl = "_email.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net";
|
||||
emails_domainonly = false;
|
||||
selector = "from('smtp').lower;from('mime').lower";
|
||||
ignore_whitelist = true;
|
||||
checks = ['emails', 'replyto'];
|
||||
hash = "sha1";
|
||||
returncodes = {
|
||||
SH_HBL_EMAIL = [
|
||||
"127.0.3.2"
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
spamhaus_dqs_hbl {
|
||||
symbol = "HBL_FILE_UNKNOWN";
|
||||
rbl = "_file.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net.";
|
||||
selector = "attachments('rbase32', 'sha256')";
|
||||
ignore_whitelist = true;
|
||||
ignore_defaults = true;
|
||||
returncodes {
|
||||
SH_HBL_FILE_MALICIOUS = "127.0.3.10";
|
||||
SH_HBL_FILE_SUSPICIOUS = "127.0.3.15";
|
||||
}
|
||||
}
|
||||
EOF
|
||||
else
|
||||
rm -rf /etc/rspamd/custom/dqs-rbl.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
|
|
|||
|
|
@ -1,632 +0,0 @@
|
|||
--[[
|
||||
Copyright (c) 2016, Andrew Lewis <nerf@judo.za.org>
|
||||
Copyright (c) 2016, Vsevolod Stakhov <vsevolod@highsecure.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
]]--
|
||||
|
||||
if confighelp then
|
||||
return
|
||||
end
|
||||
|
||||
-- A plugin that pushes metadata (or whole messages) to external services
|
||||
|
||||
local redis_params
|
||||
local lua_util = require "lua_util"
|
||||
local rspamd_http = require "rspamd_http"
|
||||
local rspamd_util = require "rspamd_util"
|
||||
local rspamd_logger = require "rspamd_logger"
|
||||
local ucl = require "ucl"
|
||||
local E = {}
|
||||
local N = 'metadata_exporter'
|
||||
|
||||
local settings = {
|
||||
pusher_enabled = {},
|
||||
pusher_format = {},
|
||||
pusher_select = {},
|
||||
mime_type = 'text/plain',
|
||||
defer = false,
|
||||
mail_from = '',
|
||||
mail_to = 'postmaster@localhost',
|
||||
helo = 'rspamd',
|
||||
email_template = [[From: "Rspamd" <$mail_from>
|
||||
To: $mail_to
|
||||
Subject: Spam alert
|
||||
Date: $date
|
||||
MIME-Version: 1.0
|
||||
Message-ID: <$our_message_id>
|
||||
Content-type: text/plain; charset=utf-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Authenticated username: $user
|
||||
IP: $ip
|
||||
Queue ID: $qid
|
||||
SMTP FROM: $from
|
||||
SMTP RCPT: $rcpt
|
||||
MIME From: $header_from
|
||||
MIME To: $header_to
|
||||
MIME Date: $header_date
|
||||
Subject: $header_subject
|
||||
Message-ID: $message_id
|
||||
Action: $action
|
||||
Score: $score
|
||||
Symbols: $symbols]],
|
||||
}
|
||||
|
||||
local function get_general_metadata(task, flatten, no_content)
|
||||
local r = {}
|
||||
local ip = task:get_from_ip()
|
||||
if ip and ip:is_valid() then
|
||||
r.ip = tostring(ip)
|
||||
else
|
||||
r.ip = 'unknown'
|
||||
end
|
||||
r.user = task:get_user() or 'unknown'
|
||||
r.qid = task:get_queue_id() or 'unknown'
|
||||
r.subject = task:get_subject() or 'unknown'
|
||||
r.action = task:get_metric_action('default')
|
||||
|
||||
local s = task:get_metric_score('default')[1]
|
||||
r.score = flatten and string.format('%.2f', s) or s
|
||||
|
||||
local fuzzy = task:get_mempool():get_variable("fuzzy_hashes", "fstrings")
|
||||
if fuzzy and #fuzzy > 0 then
|
||||
local fz = {}
|
||||
for _,h in ipairs(fuzzy) do
|
||||
table.insert(fz, h)
|
||||
end
|
||||
if not flatten then
|
||||
r.fuzzy = fz
|
||||
else
|
||||
r.fuzzy = table.concat(fz, ', ')
|
||||
end
|
||||
else
|
||||
r.fuzzy = 'unknown'
|
||||
end
|
||||
|
||||
local rcpt = task:get_recipients('smtp')
|
||||
if rcpt then
|
||||
local l = {}
|
||||
for _, a in ipairs(rcpt) do
|
||||
table.insert(l, a['addr'])
|
||||
end
|
||||
if not flatten then
|
||||
r.rcpt = l
|
||||
else
|
||||
r.rcpt = table.concat(l, ', ')
|
||||
end
|
||||
else
|
||||
r.rcpt = 'unknown'
|
||||
end
|
||||
local from = task:get_from('smtp')
|
||||
if ((from or E)[1] or E).addr then
|
||||
r.from = from[1].addr
|
||||
else
|
||||
r.from = 'unknown'
|
||||
end
|
||||
local syminf = task:get_symbols_all()
|
||||
if flatten then
|
||||
local l = {}
|
||||
for _, sym in ipairs(syminf) do
|
||||
local txt
|
||||
if sym.options then
|
||||
local topt = table.concat(sym.options, ', ')
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')' .. ' [' .. topt .. ']'
|
||||
else
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')'
|
||||
end
|
||||
table.insert(l, txt)
|
||||
end
|
||||
r.symbols = table.concat(l, '\n\t')
|
||||
else
|
||||
r.symbols = syminf
|
||||
end
|
||||
local function process_header(name)
|
||||
local hdr = task:get_header_full(name)
|
||||
if hdr then
|
||||
local l = {}
|
||||
for _, h in ipairs(hdr) do
|
||||
table.insert(l, h.decoded)
|
||||
end
|
||||
if not flatten then
|
||||
return l
|
||||
else
|
||||
return table.concat(l, '\n')
|
||||
end
|
||||
else
|
||||
return 'unknown'
|
||||
end
|
||||
end
|
||||
if not no_content then
|
||||
r.header_from = process_header('from')
|
||||
r.header_to = process_header('to')
|
||||
r.header_subject = process_header('subject')
|
||||
r.header_date = process_header('date')
|
||||
r.message_id = task:get_message_id()
|
||||
end
|
||||
return r
|
||||
end
|
||||
|
||||
local formatters = {
|
||||
default = function(task)
|
||||
return task:get_content(), {}
|
||||
end,
|
||||
email_alert = function(task, rule, extra)
|
||||
local meta = get_general_metadata(task, true)
|
||||
local display_emails = {}
|
||||
local mail_targets = {}
|
||||
meta.mail_from = rule.mail_from or settings.mail_from
|
||||
local mail_rcpt = rule.mail_to or settings.mail_to
|
||||
if type(mail_rcpt) ~= 'table' then
|
||||
table.insert(display_emails, string.format('<%s>', mail_rcpt))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
else
|
||||
for _, e in ipairs(mail_rcpt) do
|
||||
table.insert(display_emails, string.format('<%s>', e))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
end
|
||||
end
|
||||
if rule.email_alert_sender then
|
||||
local x = task:get_from('smtp')
|
||||
if x and string.len(x[1].addr) > 0 then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x[1].addr))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_user then
|
||||
local x = task:get_user()
|
||||
if x then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_recipients then
|
||||
local x = task:get_recipients('smtp')
|
||||
if x then
|
||||
for _, e in ipairs(x) do
|
||||
if string.len(e.addr) > 0 then
|
||||
table.insert(mail_targets, e.addr)
|
||||
table.insert(display_emails, string.format('<%s>', e.addr))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
meta.mail_to = table.concat(display_emails, ', ')
|
||||
meta.our_message_id = rspamd_util.random_hex(12) .. '@rspamd'
|
||||
meta.date = rspamd_util.time_to_string(rspamd_util.get_time())
|
||||
return lua_util.template(rule.email_template or settings.email_template, meta), { mail_targets = mail_targets}
|
||||
end,
|
||||
json = function(task)
|
||||
return ucl.to_format(get_general_metadata(task), 'json-compact')
|
||||
end
|
||||
}
|
||||
|
||||
local function is_spam(action)
|
||||
return (action == 'reject' or action == 'add header' or action == 'rewrite subject')
|
||||
end
|
||||
|
||||
local selectors = {
|
||||
default = function(task)
|
||||
return true
|
||||
end,
|
||||
is_spam = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_spam_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_reject = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
is_reject_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
}
|
||||
|
||||
local function maybe_defer(task, rule)
|
||||
if rule.defer then
|
||||
rspamd_logger.warnx(task, 'deferring message')
|
||||
task:set_pre_result('soft reject', 'deferred', N)
|
||||
end
|
||||
end
|
||||
|
||||
local pushers = {
|
||||
redis_pubsub = function(task, formatted, rule)
|
||||
local _,ret,upstream
|
||||
local function redis_pub_cb(err)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s when publishing on server %s',
|
||||
err, upstream:get_addr())
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
ret,_,upstream = rspamd_redis_make_request(task,
|
||||
redis_params, -- connect params
|
||||
nil, -- hash key
|
||||
true, -- is write
|
||||
redis_pub_cb, --callback
|
||||
'PUBLISH', -- command
|
||||
{rule.channel, formatted} -- arguments
|
||||
)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'error connecting to redis')
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end,
|
||||
http = function(task, formatted, rule)
|
||||
local function http_callback(err, code)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s in http callback', err)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
if code ~= 200 then
|
||||
rspamd_logger.errx(task, 'got unexpected http status: %s', code)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
local hdrs = {}
|
||||
if rule.meta_headers then
|
||||
local gm = get_general_metadata(task, false, true)
|
||||
local pfx = rule.meta_header_prefix or 'X-Rspamd-'
|
||||
for k, v in pairs(gm) do
|
||||
if type(v) == 'table' then
|
||||
hdrs[pfx .. k] = ucl.to_format(v, 'json-compact')
|
||||
else
|
||||
hdrs[pfx .. k] = v
|
||||
end
|
||||
end
|
||||
end
|
||||
rspamd_http.request({
|
||||
task=task,
|
||||
url=rule.url,
|
||||
body=formatted,
|
||||
callback=http_callback,
|
||||
mime_type=rule.mime_type or settings.mime_type,
|
||||
headers=hdrs,
|
||||
})
|
||||
end,
|
||||
send_mail = function(task, formatted, rule, extra)
|
||||
local lua_smtp = require "lua_smtp"
|
||||
local function sendmail_cb(ret, err)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'SMTP export error: %s', err)
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end
|
||||
|
||||
lua_smtp.sendmail({
|
||||
task = task,
|
||||
host = rule.smtp,
|
||||
port = rule.smtp_port or settings.smtp_port or 25,
|
||||
from = rule.mail_from or settings.mail_from,
|
||||
recipients = extra.mail_targets or rule.mail_to or settings.mail_to,
|
||||
helo = rule.helo or settings.helo,
|
||||
timeout = rule.timeout or settings.timeout,
|
||||
}, formatted, sendmail_cb)
|
||||
end,
|
||||
}
|
||||
|
||||
local opts = rspamd_config:get_all_opt(N)
|
||||
if not opts then return end
|
||||
local process_settings = {
|
||||
select = function(val)
|
||||
selectors.custom = assert(load(val))()
|
||||
end,
|
||||
format = function(val)
|
||||
formatters.custom = assert(load(val))()
|
||||
end,
|
||||
push = function(val)
|
||||
pushers.custom = assert(load(val))()
|
||||
end,
|
||||
custom_push = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
pushers[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_select = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
selectors[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_format = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
formatters[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
pusher_enabled = function(val)
|
||||
if type(val) == 'string' then
|
||||
if pushers[val] then
|
||||
settings.pusher_enabled[val] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
elseif type(val) == 'table' then
|
||||
for _, v in ipairs(val) do
|
||||
if pushers[v] then
|
||||
settings.pusher_enabled[v] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
for k, v in pairs(opts) do
|
||||
local f = process_settings[k]
|
||||
if f then
|
||||
f(opts[k])
|
||||
else
|
||||
settings[k] = v
|
||||
end
|
||||
end
|
||||
if type(settings.rules) ~= 'table' then
|
||||
-- Legacy config
|
||||
settings.rules = {}
|
||||
if not next(settings.pusher_enabled) then
|
||||
if pushers.custom then
|
||||
rspamd_logger.infox(rspamd_config, 'Custom pusher implicitly enabled')
|
||||
settings.pusher_enabled.custom = true
|
||||
else
|
||||
-- Check legacy options
|
||||
if settings.url then
|
||||
rspamd_logger.warnx(rspamd_config, 'HTTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.http = true
|
||||
end
|
||||
if settings.channel then
|
||||
rspamd_logger.warnx(rspamd_config, 'Redis Pubsub pusher implicitly enabled')
|
||||
settings.pusher_enabled.redis_pubsub = true
|
||||
end
|
||||
if settings.smtp and settings.mail_to then
|
||||
rspamd_logger.warnx(rspamd_config, 'SMTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.send_mail = true
|
||||
end
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
if settings.formatter then
|
||||
settings.format = formatters[settings.formatter]
|
||||
if not settings.format then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s', settings.formatter)
|
||||
return
|
||||
end
|
||||
end
|
||||
if settings.selector then
|
||||
settings.select = selectors[settings.selector]
|
||||
if not settings.select then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s', settings.selector)
|
||||
return
|
||||
end
|
||||
end
|
||||
for k in pairs(settings.pusher_enabled) do
|
||||
local formatter = settings.pusher_format[k]
|
||||
local selector = settings.pusher_select[k]
|
||||
if not formatter then
|
||||
settings.pusher_format[k] = settings.formatter or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default formatter for %s pusher', k)
|
||||
else
|
||||
if not formatters[formatter] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s - disabling %s', formatter, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
if not selector then
|
||||
settings.pusher_select[k] = settings.selector or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default selector for %s pusher', k)
|
||||
else
|
||||
if not selectors[selector] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s - disabling %s', selector, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.redis_pubsub then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.pusher_enabled.redis_pubsub = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'redis_pubsub'
|
||||
r.channel = settings.channel
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.redis_pubsub
|
||||
r.formatter = settings.pusher_format.redis_pubsub
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.http then
|
||||
if not settings.url then
|
||||
rspamd_logger.errx(rspamd_config, 'No URL is specified')
|
||||
settings.pusher_enabled.http = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'http'
|
||||
r.url = settings.url
|
||||
r.mime_type = settings.mime_type
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.http
|
||||
r.formatter = settings.pusher_format.http
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.send_mail then
|
||||
if not (settings.mail_to and settings.smtp) then
|
||||
rspamd_logger.errx(rspamd_config, 'No mail_to and/or smtp setting is specified')
|
||||
settings.pusher_enabled.send_mail = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'send_mail'
|
||||
r.mail_to = settings.mail_to
|
||||
r.mail_from = settings.mail_from
|
||||
r.helo = settings.hello
|
||||
r.smtp = settings.smtp
|
||||
r.smtp_port = settings.smtp_port
|
||||
r.email_template = settings.email_template
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.send_mail
|
||||
r.formatter = settings.pusher_format.send_mail
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
elseif not next(settings.rules) then
|
||||
lua_util.debugm(N, rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
if not settings.rules or not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
local backend_required_elements = {
|
||||
http = {
|
||||
'url',
|
||||
},
|
||||
smtp = {
|
||||
'mail_to',
|
||||
'smtp',
|
||||
},
|
||||
redis_pubsub = {
|
||||
'channel',
|
||||
},
|
||||
}
|
||||
local check_element = {
|
||||
selector = function(k, v)
|
||||
if not selectors[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid selector %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
formatter = function(k, v)
|
||||
if not formatters[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid formatter %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
}
|
||||
local backend_check = {
|
||||
default = function(k, rule)
|
||||
local reqset = backend_required_elements[rule.backend]
|
||||
if reqset then
|
||||
for _, e in ipairs(reqset) do
|
||||
if not rule[e] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s misses required setting %s', k, e)
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
for sett, v in pairs(rule) do
|
||||
local f = check_element[sett]
|
||||
if f then
|
||||
if not f(sett, v) then
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
backend_check.redis_pubsub = function(k, rule)
|
||||
if not redis_params then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
end
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
backend_check.default(k, rule)
|
||||
end
|
||||
end
|
||||
setmetatable(backend_check, {
|
||||
__index = function()
|
||||
return backend_check.default
|
||||
end,
|
||||
})
|
||||
for k, v in pairs(settings.rules) do
|
||||
if type(v) == 'table' then
|
||||
local backend = v.backend
|
||||
if not backend then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has no backend', k)
|
||||
settings.rules[k] = nil
|
||||
elseif not pushers[backend] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid backend %s', k, backend)
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
local f = backend_check[backend]
|
||||
f(k, v)
|
||||
end
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has bad type: %s', k, type(v))
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
|
||||
local function gen_exporter(rule)
|
||||
return function (task)
|
||||
if task:has_flag('skip') then return end
|
||||
local selector = rule.selector or 'default'
|
||||
local selected = selectors[selector](task)
|
||||
if selected then
|
||||
lua_util.debugm(N, task, 'Message selected for processing')
|
||||
local formatter = rule.formatter or 'default'
|
||||
local formatted, extra = formatters[formatter](task, rule)
|
||||
if formatted then
|
||||
pushers[rule.backend](task, formatted, rule, extra)
|
||||
else
|
||||
lua_util.debugm(N, task, 'Formatter [%s] returned non-truthy value [%s]', formatter, formatted)
|
||||
end
|
||||
else
|
||||
lua_util.debugm(N, task, 'Selector [%s] returned non-truthy value [%s]', selector, selected)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
lua_util.disable_module(N, "config")
|
||||
end
|
||||
for k, r in pairs(settings.rules) do
|
||||
rspamd_config:register_symbol({
|
||||
name = 'EXPORT_METADATA_' .. k,
|
||||
type = 'idempotent',
|
||||
callback = gen_exporter(r),
|
||||
priority = 10,
|
||||
flags = 'empty,explicit_disable,ignore_passthrough',
|
||||
})
|
||||
end
|
||||
|
|
@ -1,11 +1,13 @@
|
|||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SOGO_DEBIAN_REPOSITORY=http://packages.sogo.nu/nightly/5/debian/
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
ENV LC_ALL C
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG SOGO_DEBIAN_REPOSITORY=https://packagingv2.sogo.nu/sogo-nightly-debian/
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
ENV LC_ALL=C
|
||||
|
||||
# Prerequisites
|
||||
RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
|
|
@ -21,7 +23,7 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
|||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
dirmngr \
|
||||
netcat \
|
||||
netcat-traditional \
|
||||
psmisc \
|
||||
wget \
|
||||
patch \
|
||||
|
|
@ -31,13 +33,13 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
|||
&& gosu nobody true \
|
||||
&& mkdir /usr/share/doc/sogo \
|
||||
&& touch /usr/share/doc/sogo/empty.sh \
|
||||
&& apt-key adv --keyserver keys.openpgp.org --recv-key 74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 \
|
||||
&& echo "deb ${SOGO_DEBIAN_REPOSITORY} bullseye bullseye" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& wget -O- https://keys.openpgp.org/vks/v1/by-fingerprint/74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 | gpg --dearmor | apt-key add - \
|
||||
&& echo "deb [trusted=yes] ${SOGO_DEBIAN_REPOSITORY} ${DEBIAN_VERSION} main" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
sogo \
|
||||
sogo-activesync \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/sogo.list \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale
|
||||
|
||||
COPY ./bootstrap-sogo.sh /bootstrap-sogo.sh
|
||||
|
|
@ -45,6 +47,7 @@ COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
|||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY acl.diff /acl.diff
|
||||
COPY navMailcowBtns.diff /navMailcowBtns.diff
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /
|
||||
|
||||
|
|
@ -53,4 +56,4 @@ RUN chmod +x /bootstrap-sogo.sh \
|
|||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
|
@ -14,118 +14,18 @@ do
|
|||
done
|
||||
|
||||
# Wait for updated schema
|
||||
DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
while [[ "${DBV_NOW}" != "${DBV_NEW}" ]]; do
|
||||
echo "Waiting for schema update..."
|
||||
DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
sleep 5
|
||||
done
|
||||
echo "DB schema is ${DBV_NOW}"
|
||||
|
||||
# Recreate view
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing sogo_view..."
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP VIEW IF EXISTS sogo_view"
|
||||
while [[ ${VIEW_OK} != 'OK' ]]; do
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
CREATE VIEW sogo_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) AS
|
||||
SELECT
|
||||
mailbox.username,
|
||||
mailbox.domain,
|
||||
mailbox.username,
|
||||
IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.force_pw_update')) = '0', IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.sogo_access')) = 1, password, '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'), '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'),
|
||||
mailbox.name,
|
||||
mailbox.username,
|
||||
IFNULL(GROUP_CONCAT(ga.aliases ORDER BY ga.aliases SEPARATOR ' '), ''),
|
||||
IFNULL(gda.ad_alias, ''),
|
||||
IFNULL(external_acl.send_as_acl, ''),
|
||||
mailbox.kind,
|
||||
mailbox.multiple_bookings
|
||||
FROM
|
||||
mailbox
|
||||
LEFT OUTER JOIN
|
||||
grouped_mail_aliases ga
|
||||
ON ga.username REGEXP CONCAT('(^|,)', mailbox.username, '($|,)')
|
||||
LEFT OUTER JOIN
|
||||
grouped_domain_alias_address gda
|
||||
ON gda.username = mailbox.username
|
||||
LEFT OUTER JOIN
|
||||
grouped_sender_acl_external external_acl
|
||||
ON external_acl.username = mailbox.username
|
||||
WHERE
|
||||
mailbox.active = '1'
|
||||
GROUP BY
|
||||
mailbox.username;
|
||||
EOF
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
|
||||
VIEW_OK=OK
|
||||
else
|
||||
echo "Will retry to setup SOGo view in 3s..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
else
|
||||
while [[ ${VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
|
||||
VIEW_OK=OK
|
||||
else
|
||||
echo "Waiting for SOGo view to be created by master..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Wait for static view table if missing after update and update content
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing _sogo_static_view..."
|
||||
while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
|
||||
STATIC_VIEW_OK=OK
|
||||
echo "Updating _sogo_static_view content..."
|
||||
# If changed, also update init_db.inc.php
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "REPLACE INTO _sogo_static_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) SELECT c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings from sogo_view;"
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "DELETE FROM _sogo_static_view WHERE c_uid NOT IN (SELECT username FROM mailbox WHERE active = '1')"
|
||||
else
|
||||
echo "Waiting for database initialization..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
else
|
||||
while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
|
||||
STATIC_VIEW_OK=OK
|
||||
else
|
||||
echo "Waiting for database initialization by master..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Recreate password update trigger
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing update trigger..."
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
|
||||
while [[ ${TRIGGER_OK} != 'OK' ]]; do
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELIMITER -
|
||||
CREATE TRIGGER sogo_update_password AFTER UPDATE ON _sogo_static_view
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE mailbox SET password = NEW.c_password WHERE NEW.c_uid = username;
|
||||
END;
|
||||
-
|
||||
DELIMITER ;
|
||||
EOF
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME = 'sogo_update_password'") ]]; then
|
||||
TRIGGER_OK=OK
|
||||
else
|
||||
echo "Will retry to setup SOGo password update trigger in 3s"
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
|
||||
fi
|
||||
|
||||
# cat /dev/urandom seems to hang here occasionally and is not recommended anyway, better use openssl
|
||||
|
|
@ -150,6 +50,8 @@ cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|||
<string>YES</string>
|
||||
<key>SOGoEncryptionKey</key>
|
||||
<string>${RAND_PASS}</string>
|
||||
<key>OCSAdminURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_admin</string>
|
||||
<key>OCSCacheFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
|
||||
<key>OCSEMailAlarmsFolderURL</key>
|
||||
|
|
@ -211,10 +113,10 @@ while read -r line gal
|
|||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
# Generate alternative LDAP authentication dict, when SQL authentication fails
|
||||
# This will nevertheless read attributes from LDAP
|
||||
line=${line} envsubst < /etc/sogo/plist_ldap >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
/etc/sogo/plist_ldap.sh ${line} ${gal} >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
echo " </array>
|
||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
|
||||
|
||||
# Generate footer
|
||||
echo ' </dict>
|
||||
|
|
@ -238,8 +140,12 @@ chmod 600 /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|||
# fi
|
||||
#fi
|
||||
|
||||
# Copy logo, if any
|
||||
[[ -f /etc/sogo/sogo-full.svg ]] && cp /etc/sogo/sogo-full.svg /usr/lib/GNUstep/SOGo/WebServerResources/img/sogo-full.svg
|
||||
if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff > /dev/null; then
|
||||
patch -R /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff;
|
||||
fi
|
||||
|
||||
# Rename custom logo, if any
|
||||
[[ -f /etc/sogo/sogo-full.svg ]] && mv /etc/sogo/sogo-full.svg /etc/sogo/custom-fulllogo.svg
|
||||
|
||||
# Rsync web content
|
||||
echo "Syncing web content with named volume"
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
echo "$TZ" > /etc/timezone
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
|
|
|
|||
15
data/Dockerfiles/sogo/navMailcowBtns.diff
Normal file
15
data/Dockerfiles/sogo/navMailcowBtns.diff
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
60,65d58
|
||||
< var:ng-click="navButtonClick"
|
||||
< ng-href="/user">
|
||||
< <md-icon>build</md-icon>
|
||||
< <md-tooltip>mailcow <var:string label:value="Preferences"/></md-tooltip>
|
||||
< </md-button>
|
||||
< <md-button class="md-icon-button"
|
||||
83c76
|
||||
< onclick="mc_logout();"
|
||||
---
|
||||
> ng-show="::activeUser.path.logoff.length"
|
||||
85c78
|
||||
< ng-href="#">
|
||||
---
|
||||
> ng-href="{{::activeUser.path.logoff}}">
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -22,6 +22,7 @@ destination d_redis_ui_log {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -30,6 +31,7 @@ destination d_redis_f2b_channel {
|
|||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
|
@ -22,6 +22,7 @@ destination d_redis_ui_log {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
|
|
@ -30,6 +31,7 @@ destination d_redis_f2b_channel {
|
|||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
FROM solr:7.7-slim
|
||||
|
||||
USER root
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
|
||||
COPY solr.sh /
|
||||
COPY solr-config-7.7.0.xml /
|
||||
COPY solr-schema-7.7.0.xml /
|
||||
|
||||
RUN dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
tzdata \
|
||||
curl \
|
||||
bash \
|
||||
zip \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& chmod +x /solr.sh \
|
||||
&& sync \
|
||||
&& bash /solr.sh --bootstrap
|
||||
|
||||
RUN zip -q -d /opt/solr/server/lib/ext/log4j-core-*.jar org/apache/logging/log4j/core/lookup/JndiLookup.class
|
||||
|
||||
RUN apt remove zip -y
|
||||
|
||||
CMD ["/solr.sh"]
|
||||
|
|
@ -1,289 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
|
||||
<!-- This is the default config with stuff non-essential to Dovecot removed. -->
|
||||
|
||||
<config>
|
||||
<!-- Controls what version of Lucene various components of Solr
|
||||
adhere to. Generally, you want to use the latest version to
|
||||
get all bug fixes and improvements. It is highly recommended
|
||||
that you fully re-index after changing this setting as it can
|
||||
affect both how text is indexed and queried.
|
||||
-->
|
||||
<luceneMatchVersion>7.7.0</luceneMatchVersion>
|
||||
|
||||
<!-- A 'dir' option by itself adds any files found in the directory
|
||||
to the classpath, this is useful for including all jars in a
|
||||
directory.
|
||||
|
||||
When a 'regex' is specified in addition to a 'dir', only the
|
||||
files in that directory which completely match the regex
|
||||
(anchored on both ends) will be included.
|
||||
|
||||
If a 'dir' option (with or without a regex) is used and nothing
|
||||
is found that matches, a warning will be logged.
|
||||
|
||||
The examples below can be used to load some solr-contribs along
|
||||
with their external dependencies.
|
||||
-->
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
|
||||
|
||||
<!-- Data Directory
|
||||
|
||||
Used to specify an alternate directory to hold all index data
|
||||
other than the default ./data under the Solr home. If
|
||||
replication is in use, this should match the replication
|
||||
configuration.
|
||||
-->
|
||||
<dataDir>${solr.data.dir:}</dataDir>
|
||||
|
||||
<!-- The default high-performance update handler -->
|
||||
<updateHandler class="solr.DirectUpdateHandler2">
|
||||
|
||||
<!-- Enables a transaction log, used for real-time get, durability, and
|
||||
and solr cloud replica recovery. The log can grow as big as
|
||||
uncommitted changes to the index, so use of a hard autoCommit
|
||||
is recommended (see below).
|
||||
"dir" - the target directory for transaction logs, defaults to the
|
||||
solr data directory.
|
||||
"numVersionBuckets" - sets the number of buckets used to keep
|
||||
track of max version values when checking for re-ordered
|
||||
updates; increase this value to reduce the cost of
|
||||
synchronizing access to version buckets during high-volume
|
||||
indexing, this requires 8 bytes (long) * numVersionBuckets
|
||||
of heap space per Solr core.
|
||||
-->
|
||||
<updateLog>
|
||||
<str name="dir">${solr.ulog.dir:}</str>
|
||||
<int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
|
||||
</updateLog>
|
||||
|
||||
<!-- AutoCommit
|
||||
|
||||
Perform a hard commit automatically under certain conditions.
|
||||
Instead of enabling autoCommit, consider using "commitWithin"
|
||||
when adding documents.
|
||||
|
||||
http://wiki.apache.org/solr/UpdateXmlMessages
|
||||
|
||||
maxDocs - Maximum number of documents to add since the last
|
||||
commit before automatically triggering a new commit.
|
||||
|
||||
maxTime - Maximum amount of time in ms that is allowed to pass
|
||||
since a document was added before automatically
|
||||
triggering a new commit.
|
||||
openSearcher - if false, the commit causes recent index changes
|
||||
to be flushed to stable storage, but does not cause a new
|
||||
searcher to be opened to make those changes visible.
|
||||
|
||||
If the updateLog is enabled, then it's highly recommended to
|
||||
have some sort of hard autoCommit to limit the log size.
|
||||
-->
|
||||
<autoCommit>
|
||||
<maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
|
||||
<openSearcher>false</openSearcher>
|
||||
</autoCommit>
|
||||
|
||||
<!-- softAutoCommit is like autoCommit except it causes a
|
||||
'soft' commit which only ensures that changes are visible
|
||||
but does not ensure that data is synced to disk. This is
|
||||
faster and more near-realtime friendly than a hard commit.
|
||||
-->
|
||||
<autoSoftCommit>
|
||||
<maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
|
||||
</autoSoftCommit>
|
||||
|
||||
<!-- Update Related Event Listeners
|
||||
|
||||
Various IndexWriter related events can trigger Listeners to
|
||||
take actions.
|
||||
|
||||
postCommit - fired after every commit or optimize command
|
||||
postOptimize - fired after every optimize command
|
||||
-->
|
||||
|
||||
</updateHandler>
|
||||
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Query section - these settings control query time things like caches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<query>
|
||||
<!-- Solr Internal Query Caches
|
||||
|
||||
There are two implementations of cache available for Solr,
|
||||
LRUCache, based on a synchronized LinkedHashMap, and
|
||||
FastLRUCache, based on a ConcurrentHashMap.
|
||||
|
||||
FastLRUCache has faster gets and slower puts in single
|
||||
threaded operation and thus is generally faster than LRUCache
|
||||
when the hit ratio of the cache is high (> 75%), and may be
|
||||
faster under other scenarios on multi-cpu systems.
|
||||
-->
|
||||
|
||||
<!-- Filter Cache
|
||||
|
||||
Cache used by SolrIndexSearcher for filters (DocSets),
|
||||
unordered sets of *all* documents that match a query. When a
|
||||
new searcher is opened, its caches may be prepopulated or
|
||||
"autowarmed" using data from caches in the old searcher.
|
||||
autowarmCount is the number of items to prepopulate. For
|
||||
LRUCache, the autowarmed items will be the most recently
|
||||
accessed items.
|
||||
|
||||
Parameters:
|
||||
class - the SolrCache implementation LRUCache or
|
||||
(LRUCache or FastLRUCache)
|
||||
size - the maximum number of entries in the cache
|
||||
initialSize - the initial capacity (number of entries) of
|
||||
the cache. (see java.util.HashMap)
|
||||
autowarmCount - the number of entries to prepopulate from
|
||||
and old cache.
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy. Note that when this option is specified, the size
|
||||
and initialSize parameters are ignored.
|
||||
-->
|
||||
<filterCache class="solr.FastLRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- Query Result Cache
|
||||
|
||||
Caches results of searches - ordered lists of document ids
|
||||
(DocList) based on a query, a sort, and the range of documents requested.
|
||||
Additional supported parameter by LRUCache:
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy
|
||||
-->
|
||||
<queryResultCache class="solr.LRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- Document Cache
|
||||
|
||||
Caches Lucene Document objects (the stored fields for each
|
||||
document). Since Lucene internal document ids are transient,
|
||||
this cache will not be autowarmed.
|
||||
-->
|
||||
<documentCache class="solr.LRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- custom cache currently used by block join -->
|
||||
<cache name="perSegFilter"
|
||||
class="solr.search.LRUCache"
|
||||
size="10"
|
||||
initialSize="0"
|
||||
autowarmCount="10"
|
||||
regenerator="solr.NoOpRegenerator" />
|
||||
|
||||
<!-- Lazy Field Loading
|
||||
|
||||
If true, stored fields that are not requested will be loaded
|
||||
lazily. This can result in a significant speed improvement
|
||||
if the usual case is to not load all stored fields,
|
||||
especially if the skipped fields are large compressed text
|
||||
fields.
|
||||
-->
|
||||
<enableLazyFieldLoading>true</enableLazyFieldLoading>
|
||||
|
||||
<!-- Result Window Size
|
||||
|
||||
An optimization for use with the queryResultCache. When a search
|
||||
is requested, a superset of the requested number of document ids
|
||||
are collected. For example, if a search for a particular query
|
||||
requests matching documents 10 through 19, and queryWindowSize is 50,
|
||||
then documents 0 through 49 will be collected and cached. Any further
|
||||
requests in that range can be satisfied via the cache.
|
||||
-->
|
||||
<queryResultWindowSize>20</queryResultWindowSize>
|
||||
|
||||
<!-- Maximum number of documents to cache for any entry in the
|
||||
queryResultCache.
|
||||
-->
|
||||
<queryResultMaxDocsCached>200</queryResultMaxDocsCached>
|
||||
|
||||
<!-- Use Cold Searcher
|
||||
|
||||
If a search request comes in and there is no current
|
||||
registered searcher, then immediately register the still
|
||||
warming searcher and use it. If "false" then all requests
|
||||
will block until the first searcher is done warming.
|
||||
-->
|
||||
<useColdSearcher>false</useColdSearcher>
|
||||
|
||||
</query>
|
||||
|
||||
|
||||
<!-- Request Dispatcher
|
||||
|
||||
This section contains instructions for how the SolrDispatchFilter
|
||||
should behave when processing requests for this SolrCore.
|
||||
|
||||
-->
|
||||
<requestDispatcher>
|
||||
<httpCaching never304="true" />
|
||||
</requestDispatcher>
|
||||
|
||||
<!-- Request Handlers
|
||||
|
||||
http://wiki.apache.org/solr/SolrRequestHandler
|
||||
|
||||
Incoming queries will be dispatched to a specific handler by name
|
||||
based on the path specified in the request.
|
||||
|
||||
If a Request Handler is declared with startup="lazy", then it will
|
||||
not be initialized until the first request that uses it.
|
||||
|
||||
-->
|
||||
<!-- SearchHandler
|
||||
|
||||
http://wiki.apache.org/solr/SearchHandler
|
||||
|
||||
For processing Search Queries, the primary Request Handler
|
||||
provided with Solr is "SearchHandler" It delegates to a sequent
|
||||
of SearchComponents (see below) and supports distributed
|
||||
queries across multiple shards
|
||||
-->
|
||||
<requestHandler name="/select" class="solr.SearchHandler">
|
||||
<!-- default values for query parameters can be specified, these
|
||||
will be overridden by parameters in the request
|
||||
-->
|
||||
<lst name="defaults">
|
||||
<str name="echoParams">explicit</str>
|
||||
<int name="rows">10</int>
|
||||
</lst>
|
||||
</requestHandler>
|
||||
|
||||
<initParams path="/update/**,/select">
|
||||
<lst name="defaults">
|
||||
<str name="df">_text_</str>
|
||||
</lst>
|
||||
</initParams>
|
||||
|
||||
<!-- Response Writers
|
||||
|
||||
http://wiki.apache.org/solr/QueryResponseWriter
|
||||
|
||||
Request responses will be written using the writer specified by
|
||||
the 'wt' request parameter matching the name of a registered
|
||||
writer.
|
||||
|
||||
The "default" writer is the default and will be used if 'wt' is
|
||||
not specified in the request.
|
||||
-->
|
||||
<queryResponseWriter name="xml"
|
||||
default="true"
|
||||
class="solr.XMLResponseWriter" />
|
||||
</config>
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<schema name="dovecot-fts" version="2.0">
|
||||
<fieldType name="string" class="solr.StrField" omitNorms="true" sortMissingLast="true"/>
|
||||
<fieldType name="long" class="solr.LongPointField" positionIncrementGap="0"/>
|
||||
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
|
||||
|
||||
<fieldType name="text" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
|
||||
<analyzer type="index">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.EdgeNGramFilterFactory" minGramSize="3" maxGramSize="20"/>
|
||||
<filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
|
||||
<filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
|
||||
<filter class="solr.FlattenGraphFilterFactory"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
|
||||
<filter class="solr.PorterStemFilterFactory"/>
|
||||
</analyzer>
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
|
||||
<filter class="solr.FlattenGraphFilterFactory"/>
|
||||
<filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
|
||||
<filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
|
||||
<filter class="solr.PorterStemFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<field name="id" type="string" indexed="true" required="true" stored="true"/>
|
||||
<field name="uid" type="long" indexed="true" required="true" stored="true"/>
|
||||
<field name="box" type="string" indexed="true" required="true" stored="true"/>
|
||||
<field name="user" type="string" indexed="true" required="true" stored="true"/>
|
||||
|
||||
<field name="hdr" type="text" indexed="true" stored="false"/>
|
||||
<field name="body" type="text" indexed="true" stored="false"/>
|
||||
|
||||
<field name="from" type="text" indexed="true" stored="false"/>
|
||||
<field name="to" type="text" indexed="true" stored="false"/>
|
||||
<field name="cc" type="text" indexed="true" stored="false"/>
|
||||
<field name="bcc" type="text" indexed="true" stored="false"/>
|
||||
<field name="subject" type="text" indexed="true" stored="false"/>
|
||||
|
||||
<!-- Used by Solr internally: -->
|
||||
<field name="_version_" type="long" indexed="true" stored="true"/>
|
||||
|
||||
<uniqueKey>id</uniqueKey>
|
||||
</schema>
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_SOLR=y, skipping Solr..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MEM_TOTAL=$(awk '/MemTotal/ {print $2}' /proc/meminfo)
|
||||
|
||||
if [[ "${1}" != "--bootstrap" ]]; then
|
||||
if [ ${MEM_TOTAL} -lt "2097152" ]; then
|
||||
echo "System memory less than 2 GB, skipping Solr..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
# run the optional initdb
|
||||
. /opt/docker-solr/scripts/run-initdb
|
||||
|
||||
# fixing volume permission
|
||||
[[ -d /opt/solr/server/solr/dovecot-fts/data ]] && chown -R solr:solr /opt/solr/server/solr/dovecot-fts/data
|
||||
if [[ "${1}" != "--bootstrap" ]]; then
|
||||
sed -i '/SOLR_HEAP=/c\SOLR_HEAP="'${SOLR_HEAP:-1024}'m"' /opt/solr/bin/solr.in.sh
|
||||
else
|
||||
sed -i '/SOLR_HEAP=/c\SOLR_HEAP="256m"' /opt/solr/bin/solr.in.sh
|
||||
fi
|
||||
|
||||
if [[ "${1}" == "--bootstrap" ]]; then
|
||||
echo "Creating initial configuration"
|
||||
echo "Modifying default config set"
|
||||
cp /solr-config-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/solrconfig.xml
|
||||
cp /solr-schema-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/schema.xml
|
||||
rm /opt/solr/server/solr/configsets/_default/conf/managed-schema
|
||||
|
||||
echo "Starting local Solr instance to setup configuration"
|
||||
gosu solr start-local-solr
|
||||
|
||||
echo "Creating core \"dovecot-fts\""
|
||||
gosu solr /opt/solr/bin/solr create -c "dovecot-fts"
|
||||
|
||||
# See https://github.com/docker-solr/docker-solr/issues/27
|
||||
echo "Checking core"
|
||||
while ! wget -O - 'http://localhost:8983/solr/admin/cores?action=STATUS' | grep -q instanceDir; do
|
||||
echo "Could not find any cores, waiting..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
echo "Created core \"dovecot-fts\""
|
||||
|
||||
echo "Stopping local Solr"
|
||||
gosu solr stop-local-solr
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exec gosu solr solr-foreground
|
||||
|
||||
|
|
@ -1,28 +1,36 @@
|
|||
FROM alpine:3.17
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk add --update --no-cache \
|
||||
curl \
|
||||
bind-tools \
|
||||
coreutils \
|
||||
unbound \
|
||||
bash \
|
||||
openssl \
|
||||
drill \
|
||||
tzdata \
|
||||
syslog-ng \
|
||||
supervisor \
|
||||
&& curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache \
|
||||
&& chown root:unbound /etc/unbound \
|
||||
&& adduser unbound tty \
|
||||
&& adduser unbound tty \
|
||||
&& chmod 775 /etc/unbound
|
||||
|
||||
EXPOSE 53/udp 53/tcp
|
||||
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
# healthcheck (nslookup)
|
||||
# healthcheck (dig, ping)
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
RUN chmod +x /healthcheck.sh
|
||||
HEALTHCHECK --interval=30s --timeout=10s CMD [ "/healthcheck.sh" ]
|
||||
HEALTHCHECK --interval=30s --timeout=10s \
|
||||
CMD sh -c '[ -f /tmp/healthcheck_status ] && [ "$(cat /tmp/healthcheck_status)" -eq 0 ] || exit 1'
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/sbin/unbound"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
|
|
|||
|
|
@ -1,12 +1,102 @@
|
|||
#!/bin/bash
|
||||
|
||||
nslookup mailcow.email 127.0.0.1 1> /dev/null
|
||||
STATUS_FILE="/tmp/healthcheck_status"
|
||||
RUNS=0
|
||||
|
||||
if [ $? == 0 ]; then
|
||||
echo "DNS resolution is working!"
|
||||
exit 0
|
||||
else
|
||||
echo "DNS resolution is not working correctly..."
|
||||
echo "Maybe check your outbound firewall, as it needs to resolve DNS over TCP AND UDP!"
|
||||
exit 1
|
||||
# Declare log function for logfile to stdout
|
||||
function log_to_stdout() {
|
||||
echo "$(date +"%Y-%m-%d %H:%M:%S"): $1"
|
||||
}
|
||||
|
||||
# General Ping function to check general pingability
|
||||
function check_ping() {
|
||||
declare -a ipstoping=("1.1.1.1" "8.8.8.8" "9.9.9.9")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for ip in "${ipstoping[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
ping -q -c 3 -w 5 "$ip" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
success=true
|
||||
break
|
||||
else
|
||||
log_to_stdout "Healthcheck: Failed to ping $ip on attempt $i. Trying again..."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: Couldn't ping $ip after 3 attempts. Marking this IP as failed."
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many ping failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
|
||||
}
|
||||
|
||||
# General DNS Resolve Check against Unbound Resolver himself
|
||||
function check_dns() {
|
||||
declare -a domains=("fuzzy.mailcow.email" "github.com" "hub.docker.com")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for domain in "${domains[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
dig_output=$(dig +short +timeout=2 +tries=1 "$domain" @127.0.0.1 2>/dev/null)
|
||||
dig_rc=$?
|
||||
|
||||
if [ $dig_rc -ne 0 ] || [ -z "$dig_output" ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution Failed on attempt $i for $domain! Trying again..."
|
||||
else
|
||||
success=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution not possible after 3 attempts for $domain... Gave up!"
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many DNS failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
while true; do
|
||||
|
||||
if [[ ${SKIP_UNBOUND_HEALTHCHECK} == "y" ]]; then
|
||||
log_to_stdout "Healthcheck: ALL CHECKS WERE SKIPPED! Unbound is healthy!"
|
||||
echo "0" > $STATUS_FILE
|
||||
sleep 365d
|
||||
fi
|
||||
|
||||
# run checks, if check is not returning 0 (return value if check is ok), healthcheck will exit with 1 (marked in docker as unhealthy)
|
||||
check_ping
|
||||
PING_STATUS=$?
|
||||
|
||||
check_dns
|
||||
DNS_STATUS=$?
|
||||
|
||||
if [ $PING_STATUS -ne 0 ] || [ $DNS_STATUS -ne 0 ]; then
|
||||
echo "1" > $STATUS_FILE
|
||||
|
||||
else
|
||||
echo "0" > $STATUS_FILE
|
||||
fi
|
||||
|
||||
sleep 30
|
||||
|
||||
done
|
||||
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
|
||||
rm -rf /tmp/healthcheck_status
|
||||
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:syslog-ng]
|
||||
command=/usr/sbin/syslog-ng --foreground --no-caps
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:unbound]
|
||||
command=/usr/sbin/unbound
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[program:unbound-healthcheck]
|
||||
command=/bin/bash /healthcheck.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
log {
|
||||
source(s_dgram);
|
||||
destination(d_stdout);
|
||||
};
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
FROM alpine:3.17
|
||||
LABEL maintainer "André Peters <andre.peters@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# Installation
|
||||
RUN apk add --update \
|
||||
|
|
@ -15,7 +16,6 @@ RUN apk add --update \
|
|||
fcgi \
|
||||
openssl \
|
||||
nagios-plugins-mysql \
|
||||
nagios-plugins-dns \
|
||||
nagios-plugins-disk \
|
||||
bind-tools \
|
||||
redis \
|
||||
|
|
@ -31,9 +31,11 @@ RUN apk add --update \
|
|||
tzdata \
|
||||
whois \
|
||||
&& curl https://raw.githubusercontent.com/mludvig/smtp-cli/v3.10/smtp-cli -o /smtp-cli \
|
||||
&& chmod +x smtp-cli
|
||||
&& chmod +x smtp-cli \
|
||||
&& mkdir /usr/lib/mailcow
|
||||
|
||||
COPY watchdog.sh /watchdog.sh
|
||||
COPY check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
||||
COPY check_dns.sh /usr/lib/mailcow/check_dns.sh
|
||||
|
||||
CMD /watchdog.sh
|
||||
CMD ["/watchdog.sh"]
|
||||
|
|
|
|||
39
data/Dockerfiles/watchdog/check_dns.sh
Executable file
39
data/Dockerfiles/watchdog/check_dns.sh
Executable file
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/sh
|
||||
|
||||
while getopts "H:s:" opt; do
|
||||
case "$opt" in
|
||||
H) HOST="$OPTARG" ;;
|
||||
s) SERVER="$OPTARG" ;;
|
||||
*) echo "Usage: $0 -H host -s server"; exit 3 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$SERVER" ]; then
|
||||
echo "No DNS Server provided"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
if [ -z "$HOST" ]; then
|
||||
echo "No host to test provided"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# run dig and measure the time it takes to run
|
||||
START_TIME=$(date +%s%3N)
|
||||
dig_output=$(dig +short +timeout=2 +tries=1 "$HOST" @"$SERVER" 2>/dev/null)
|
||||
dig_rc=$?
|
||||
dig_output_ips=$(echo "$dig_output" | grep -E '^[0-9.]+$' | sort | paste -sd ',' -)
|
||||
END_TIME=$(date +%s%3N)
|
||||
ELAPSED_TIME=$((END_TIME - START_TIME))
|
||||
|
||||
# validate and perform nagios like output and exit codes
|
||||
if [ $dig_rc -ne 0 ] || [ -z "$dig_output" ]; then
|
||||
echo "Domain $HOST was not found by the server"
|
||||
exit 2
|
||||
elif [ $dig_rc -eq 0 ]; then
|
||||
echo "DNS OK: $ELAPSED_TIME ms response time. $HOST returns $dig_output_ips"
|
||||
exit 0
|
||||
else
|
||||
echo "Unknown error"
|
||||
exit 3
|
||||
fi
|
||||
|
|
@ -49,7 +49,7 @@
|
|||
# 2013101601 Optical clean up #
|
||||
# 2013101602 Rewrite help output #
|
||||
# 2013101700 Handle Slave IO in 'Connecting' state #
|
||||
# 2013101701 Minor changes in output, handling UNKWNON situations now #
|
||||
# 2013101701 Minor changes in output, handling UNKNOWN situations now #
|
||||
# 2013101702 Exit CRITICAL when Slave IO in Connecting state #
|
||||
# 2013123000 Slave_SQL_Running also matched Slave_SQL_Running_State #
|
||||
# 2015011600 Added 'moving' check to catch possible connection issues #
|
||||
|
|
@ -131,10 +131,10 @@ elif [[ -n "${socket}" && (-z "${user}" || -z "${password}") ]]; then
|
|||
fi
|
||||
|
||||
# Connect to the DB server and store output in vars
|
||||
if [[ -n $socket ]]; then
|
||||
ConnectionResult=$(mysql ${optfile} ${socket} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
if [[ -n $socket ]]; then
|
||||
ConnectionResult=$(mariadb --skip-ssl ${optfile} ${socket} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
else
|
||||
ConnectionResult=$(mysql ${optfile} ${host} ${port} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
ConnectionResult=$(mariadb --skip-ssl ${optfile} ${host} ${port} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
fi
|
||||
|
||||
if [ -z "`echo "${ConnectionResult}" |grep Slave_IO_State`" ]; then
|
||||
|
|
@ -178,33 +178,33 @@ if [ ${check} = ${ok} ] && [ ${checkio} = ${ok} ]; then
|
|||
then echo "CRITICAL: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_CRITICAL}
|
||||
elif [[ ${delayinfo} -ge ${warn_delay} ]]
|
||||
then echo "WARNING: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_WARNING}
|
||||
else
|
||||
else
|
||||
# Everything looks OK here but now let us check if the replication is moving
|
||||
if [[ -n ${moving} ]] && [[ -n ${tmpfile} ]] && [[ $readpos -eq $execpos ]]
|
||||
then
|
||||
#echo "Debug: Read pos is $readpos - Exec pos is $execpos"
|
||||
then
|
||||
#echo "Debug: Read pos is $readpos - Exec pos is $execpos"
|
||||
# Check if tmp file exists
|
||||
curtime=`date +%s`
|
||||
if [[ -w $tmpfile ]]
|
||||
then
|
||||
if [[ -w $tmpfile ]]
|
||||
then
|
||||
tmpfiletime=`date +%s -r $tmpfile`
|
||||
if [[ `expr $curtime - $tmpfiletime` -gt ${moving} ]]
|
||||
then
|
||||
exectmp=`cat $tmpfile`
|
||||
#echo "Debug: Exec pos in tmpfile is $exectmp"
|
||||
if [[ $exectmp -eq $execpos ]]
|
||||
then
|
||||
then
|
||||
# The value read from the tmp file and from db are the same. Replication hasnt moved!
|
||||
echo "WARNING: Slave replication has not moved in ${moving} seconds. Manual check required."; exit ${STATE_WARNING}
|
||||
else
|
||||
else
|
||||
# Replication has moved since the tmp file was written. Delete tmp file and output OK.
|
||||
rm $tmpfile
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
else
|
||||
else
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
else
|
||||
else
|
||||
echo "$execpos" > $tmpfile
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ "${DEV_MODE}" != "n" ]; then
|
||||
echo -e "\e[31mEnabled Debug Mode\e[0m"
|
||||
set -x
|
||||
fi
|
||||
|
||||
trap "exit" INT TERM
|
||||
trap "kill 0" EXIT
|
||||
|
||||
|
|
@ -19,9 +24,11 @@ fi
|
|||
|
||||
if [[ "${WATCHDOG_VERBOSE}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
SMTP_VERBOSE="--verbose"
|
||||
CURL_VERBOSE="--verbose"
|
||||
set -xv
|
||||
else
|
||||
SMTP_VERBOSE=""
|
||||
CURL_VERBOSE=""
|
||||
exec 2>/dev/null
|
||||
fi
|
||||
|
||||
|
|
@ -31,16 +38,16 @@ if [[ ! -p /tmp/com_pipe ]]; then
|
|||
fi
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
|
|
@ -97,7 +104,9 @@ log_msg() {
|
|||
echo $(date) $(printf '%s\n' "${1}")
|
||||
}
|
||||
|
||||
function mail_error() {
|
||||
function notify_error() {
|
||||
# Check if one of the notification options is enabled
|
||||
[[ -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ -z ${WATCHDOG_NOTIFY_WEBHOOK} ]] && return 0
|
||||
THROTTLE=
|
||||
[[ -z ${1} ]] && return 1
|
||||
# If exists, body will be the content of "/tmp/${1}", even if ${2} is set
|
||||
|
|
@ -122,37 +131,61 @@ function mail_error() {
|
|||
else
|
||||
SUBJECT="${WATCHDOG_SUBJECT}: ${1}"
|
||||
fi
|
||||
IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
|
||||
for rcpt in "${MAIL_RCPTS[@]}"; do
|
||||
RCPT_DOMAIN=
|
||||
RCPT_MX=
|
||||
RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
|
||||
CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx)
|
||||
if [[ -z ${CHECK_FOR_VALID_MX} ]]; then
|
||||
log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
|
||||
|
||||
# Send mail notification if enabled
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
|
||||
IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
|
||||
for rcpt in "${MAIL_RCPTS[@]}"; do
|
||||
RCPT_DOMAIN=
|
||||
RCPT_MX=
|
||||
RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
|
||||
CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx)
|
||||
if [[ -z ${CHECK_FOR_VALID_MX} ]]; then
|
||||
log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
|
||||
return 1
|
||||
fi
|
||||
[ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
|
||||
timeout 10s ./smtp-cli --missing-modules-ok \
|
||||
"${SMTP_VERBOSE}" \
|
||||
--charset=UTF-8 \
|
||||
--subject="${SUBJECT}" \
|
||||
--body-plain="${BODY}" \
|
||||
--add-header="X-Priority: 1" \
|
||||
--to=${rcpt} \
|
||||
--from="watchdog@${MAILCOW_HOSTNAME}" \
|
||||
--hello-host=${MAILCOW_HOSTNAME} \
|
||||
--ipv4
|
||||
if [[ $? -eq 1 ]]; then # exit code 1 is fine
|
||||
log_msg "Sent notification email to ${rcpt}"
|
||||
else
|
||||
if [[ "${SMTP_VERBOSE}" == "" ]]; then
|
||||
log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf."
|
||||
else
|
||||
log_msg "Error while sending notification email to ${rcpt}."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Send webhook notification if enabled
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_WEBHOOK} ]]; then
|
||||
if [[ -z ${WATCHDOG_NOTIFY_WEBHOOK_BODY} ]]; then
|
||||
log_msg "No webhook body set, skipping webhook notification..."
|
||||
return 1
|
||||
fi
|
||||
[ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
|
||||
timeout 10s ./smtp-cli --missing-modules-ok \
|
||||
"${SMTP_VERBOSE}" \
|
||||
--charset=UTF-8 \
|
||||
--subject="${SUBJECT}" \
|
||||
--body-plain="${BODY}" \
|
||||
--add-header="X-Priority: 1" \
|
||||
--to=${rcpt} \
|
||||
--from="watchdog@${MAILCOW_HOSTNAME}" \
|
||||
--hello-host=${MAILCOW_HOSTNAME} \
|
||||
--ipv4
|
||||
if [[ $? -eq 1 ]]; then # exit code 1 is fine
|
||||
log_msg "Sent notification email to ${rcpt}"
|
||||
else
|
||||
if [[ "${SMTP_VERBOSE}" == "" ]]; then
|
||||
log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf."
|
||||
else
|
||||
log_msg "Error while sending notification email to ${rcpt}."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Escape subject and body (https://stackoverflow.com/a/2705678)
|
||||
ESCAPED_SUBJECT=$(echo ${SUBJECT} | sed -e 's/[\/&]/\\&/g')
|
||||
ESCAPED_BODY=$(echo ${BODY} | sed -e 's/[\/&]/\\&/g')
|
||||
|
||||
# Replace subject and body placeholders
|
||||
WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed -e "s/\$SUBJECT\|\${SUBJECT}/$ESCAPED_SUBJECT/g" -e "s/\$BODY\|\${BODY}/$ESCAPED_BODY/g")
|
||||
|
||||
# POST to webhook
|
||||
curl -X POST -H "Content-Type: application/json" ${CURL_VERBOSE} -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK}
|
||||
|
||||
log_msg "Sent notification using webhook"
|
||||
fi
|
||||
}
|
||||
|
||||
get_container_ip() {
|
||||
|
|
@ -167,12 +200,12 @@ get_container_ip() {
|
|||
else
|
||||
sleep 0.5
|
||||
# get long container id for exact match
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
# returned id can have multiple elements (if scaled), shuffle for random test
|
||||
CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
for matched_container in "${CONTAINER_ID[@]}"; do
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
for ip_match in "${CONTAINER_IPS[@]}"; do
|
||||
# grep will do nothing if one of these vars is empty
|
||||
[[ -z ${ip_match} ]] && continue
|
||||
|
|
@ -197,7 +230,7 @@ get_container_ip() {
|
|||
# One-time check
|
||||
if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then
|
||||
if [[ -z "$(get_ipv6)" ]]; then
|
||||
mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
|
||||
notify_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
@ -206,7 +239,7 @@ external_checks() {
|
|||
diff_c=0
|
||||
THRESHOLD=${EXTERNAL_CHECKS_THRESHOLD}
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
GUID=$(mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
|
||||
GUID=$(mariadb --skip-ssl -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
|
|
@ -269,7 +302,7 @@ unbound_checks() {
|
|||
touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
|
||||
host_ip=$(get_container_ip unbound-mailcow)
|
||||
err_c_cur=${err_count}
|
||||
/usr/lib/nagios/plugins/check_dns -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
/usr/lib/mailcow/check_dns.sh -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
|
||||
if [[ -z ${DNSSEC} ]]; then
|
||||
echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
|
||||
|
|
@ -302,7 +335,7 @@ redis_checks() {
|
|||
touch /tmp/redis-mailcow; echo "$(tail -50 /tmp/redis-mailcow)" > /tmp/redis-mailcow
|
||||
host_ip=$(get_container_ip redis-mailcow)
|
||||
err_c_cur=${err_count}
|
||||
/usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "PING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
/usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "AUTH ${REDISPASS}\nPING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
||||
progress "Redis" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
|
||||
|
|
@ -417,6 +450,31 @@ postfix_checks() {
|
|||
return 1
|
||||
}
|
||||
|
||||
postfix-tlspol_checks() {
|
||||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${POSTFIX_TLSPOL_THRESHOLD}
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
touch /tmp/postfix-tlspol-mailcow; echo "$(tail -50 /tmp/postfix-tlspol-mailcow)" > /tmp/postfix-tlspol-mailcow
|
||||
host_ip=$(get_container_ip postfix-tlspol-mailcow)
|
||||
err_c_cur=${err_count}
|
||||
/usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 8642 2>> /tmp/postfix-tlspol-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
||||
progress "Postfix TLS Policy companion" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
|
||||
if [[ $? == 10 ]]; then
|
||||
diff_c=0
|
||||
sleep 1
|
||||
else
|
||||
diff_c=0
|
||||
sleep $(( ( RANDOM % 60 ) + 20 ))
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
clamd_checks() {
|
||||
err_count=0
|
||||
diff_c=0
|
||||
|
|
@ -475,12 +533,12 @@ dovecot_repl_checks() {
|
|||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${DOVECOT_REPL_THRESHOLD}
|
||||
D_REPL_STATUS=$(redis-cli -h redis -r GET DOVECOT_REPL_HEALTH)
|
||||
D_REPL_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning -r GET DOVECOT_REPL_HEALTH)
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
D_REPL_STATUS=$(redis-cli --raw -h redis GET DOVECOT_REPL_HEALTH)
|
||||
D_REPL_STATUS=$(redis-cli --raw -h redis -a ${REDISPASS} --no-auth-warning GET DOVECOT_REPL_HEALTH)
|
||||
if [[ "${D_REPL_STATUS}" != "1" ]]; then
|
||||
err_count=$(( ${err_count} + 1 ))
|
||||
fi
|
||||
|
|
@ -550,19 +608,19 @@ ratelimit_checks() {
|
|||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${RATELIMIT_THRESHOLD}
|
||||
RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
|
||||
RL_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 0 | jq .qid)
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
RL_LOG_STATUS_PREV=${RL_LOG_STATUS}
|
||||
RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
|
||||
RL_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 0 | jq .qid)
|
||||
if [[ ${RL_LOG_STATUS_PREV} != ${RL_LOG_STATUS} ]]; then
|
||||
err_count=$(( ${err_count} + 1 ))
|
||||
echo 'Last 10 applied ratelimits (may overlap with previous reports).' > /tmp/ratelimit
|
||||
echo 'Full ratelimit buckets can be emptied by deleting the ratelimit hash from within mailcow UI (see /debug -> Protocols -> Ratelimit):' >> /tmp/ratelimit
|
||||
echo >> /tmp/ratelimit
|
||||
redis-cli --raw -h redis LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
|
||||
redis-cli --raw -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
|
||||
fi
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
||||
|
|
@ -645,7 +703,7 @@ acme_checks() {
|
|||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${ACME_THRESHOLD}
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME)
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning GET ACME_FAIL_TIME)
|
||||
if [[ -z "${ACME_LOG_STATUS}" ]]; then
|
||||
${REDIS_CMDLINE} SET ACME_FAIL_TIME 0
|
||||
ACME_LOG_STATUS=0
|
||||
|
|
@ -657,7 +715,7 @@ acme_checks() {
|
|||
ACME_LOG_STATUS_PREV=${ACME_LOG_STATUS}
|
||||
ACME_LC=0
|
||||
until [[ ! -z ${ACME_LOG_STATUS} ]] || [ ${ACME_LC} -ge 3 ]; do
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME 2> /dev/null)
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning GET ACME_FAIL_TIME 2> /dev/null)
|
||||
sleep 3
|
||||
ACME_LC=$((ACME_LC+1))
|
||||
done
|
||||
|
|
@ -692,8 +750,8 @@ rspamd_checks() {
|
|||
From: watchdog@localhost
|
||||
|
||||
Empty
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score)
|
||||
if [[ ${SCORE} != "9999" ]]; then
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/scan | jq -rc .default.required_score | sed 's/\..*//' )
|
||||
if [[ ${SCORE} -ne 9999 ]]; then
|
||||
echo "Rspamd settings check failed, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
|
||||
err_count=$(( ${err_count} + 1))
|
||||
else
|
||||
|
|
@ -746,8 +804,8 @@ olefy_checks() {
|
|||
}
|
||||
|
||||
# Notify about start
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
|
||||
mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
|
||||
if [[ ${WATCHDOG_NOTIFY_START} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
notify_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
|
||||
fi
|
||||
|
||||
# Create watchdog agents
|
||||
|
|
@ -894,6 +952,18 @@ PID=$!
|
|||
echo "Spawned mailq_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
|
||||
(
|
||||
while true; do
|
||||
if ! postfix-tlspol_checks; then
|
||||
log_msg "Postfix TLS Policy hit error limit"
|
||||
echo postfix-tlspol-mailcow > /tmp/com_pipe
|
||||
fi
|
||||
done
|
||||
) &
|
||||
PID=$!
|
||||
echo "Spawned postfix-tlspol_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
|
||||
(
|
||||
while true; do
|
||||
if ! dovecot_checks; then
|
||||
|
|
@ -966,6 +1036,7 @@ PID=$!
|
|||
echo "Spawned cert_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
|
||||
if [[ "${SKIP_OLEFY}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
(
|
||||
while true; do
|
||||
if ! olefy_checks; then
|
||||
|
|
@ -977,6 +1048,7 @@ done
|
|||
PID=$!
|
||||
echo "Spawned olefy_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
fi
|
||||
|
||||
(
|
||||
while true; do
|
||||
|
|
@ -1029,33 +1101,33 @@ while true; do
|
|||
fi
|
||||
if [[ ${com_pipe_answer} == "ratelimit" ]]; then
|
||||
log_msg "At least one ratelimit was applied"
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
notify_error "${com_pipe_answer}"
|
||||
elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then
|
||||
log_msg "Mail queue status is critical"
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
notify_error "${com_pipe_answer}"
|
||||
elif [[ ${com_pipe_answer} == "external_checks" ]]; then
|
||||
log_msg "Your mailcow is an open relay!"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
|
||||
notify_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
|
||||
elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then
|
||||
log_msg "MySQL replication is not working properly"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Once mail per 10 minutes
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status" 600
|
||||
notify_error "${com_pipe_answer}" "Please check the SQL replication status" 600
|
||||
elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then
|
||||
log_msg "Dovecot replication is not working properly"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Once mail per 10 minutes
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
|
||||
notify_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
|
||||
elif [[ ${com_pipe_answer} == "certcheck" ]]; then
|
||||
log_msg "Certificates are about to expire"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Only mail once a day
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please renew your certificate" 86400
|
||||
notify_error "${com_pipe_answer}" "Please renew your certificate" 86400
|
||||
elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then
|
||||
log_msg "acme-mailcow did not complete successfully"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
|
||||
notify_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
|
||||
elif [[ ${com_pipe_answer} == "fail2ban" ]]; then
|
||||
F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null))
|
||||
if [[ ! -z "${F2B_RES}" ]]; then
|
||||
|
|
@ -1065,18 +1137,18 @@ while true; do
|
|||
log_msg "Banned ${host}"
|
||||
rm /tmp/fail2ban 2> /dev/null
|
||||
timeout 2s whois "${host}" > /tmp/fail2ban
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}"
|
||||
[[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && notify_error "${com_pipe_answer}" "IP ban: ${host}"
|
||||
done
|
||||
fi
|
||||
elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
|
||||
kill -STOP ${BACKGROUND_TASKS[*]}
|
||||
sleep 10
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
fi
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
if [ ${S_RUNNING} -lt 360 ]; then
|
||||
log_msg "Container is running for less than 360 seconds, skipping action..."
|
||||
elif [[ ! -z ${HAS_INITDB} ]]; then
|
||||
|
|
@ -1084,8 +1156,8 @@ while true; do
|
|||
sleep 60
|
||||
else
|
||||
log_msg "Sending restart command to ${CONTAINER_ID}..."
|
||||
curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
notify_error "${com_pipe_answer}"
|
||||
log_msg "Wait for restarted container to settle and continue watching..."
|
||||
sleep 35
|
||||
fi
|
||||
|
|
@ -1095,3 +1167,4 @@ while true; do
|
|||
kill -USR1 ${BACKGROUND_TASKS[*]}
|
||||
fi
|
||||
done
|
||||
|
||||
|
|
|
|||
|
|
@ -1,130 +0,0 @@
|
|||
map $http_x_forwarded_proto $client_req_scheme_nc {
|
||||
default $scheme;
|
||||
https https;
|
||||
}
|
||||
|
||||
server {
|
||||
include /etc/nginx/conf.d/listen_ssl.active;
|
||||
include /etc/nginx/conf.d/listen_plain.active;
|
||||
include /etc/nginx/mime.types;
|
||||
charset utf-8;
|
||||
override_charset on;
|
||||
|
||||
ssl_certificate /etc/ssl/mail/cert.pem;
|
||||
ssl_certificate_key /etc/ssl/mail/key.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305;
|
||||
ssl_ecdh_curve X25519:X448:secp384r1:secp256k1;
|
||||
ssl_session_cache shared:SSL:50m;
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_tickets off;
|
||||
add_header Referrer-Policy "no-referrer" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Download-Options "noopen" always;
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none" always;
|
||||
add_header X-Robots-Tag "noindex, nofollow" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
|
||||
fastcgi_hide_header X-Powered-By;
|
||||
|
||||
server_name NC_SUBD;
|
||||
|
||||
root /web/nextcloud/;
|
||||
|
||||
location = /robots.txt {
|
||||
allow all;
|
||||
log_not_found off;
|
||||
access_log off;
|
||||
}
|
||||
|
||||
location = /.well-known/carddav {
|
||||
return 301 $client_req_scheme_nc://$host/remote.php/dav;
|
||||
}
|
||||
|
||||
location = /.well-known/caldav {
|
||||
return 301 $client_req_scheme_nc://$host/remote.php/dav;
|
||||
}
|
||||
|
||||
location = /.well-known/webfinger {
|
||||
return 301 $client_req_scheme_nc://$host/index.php/.well-known/webfinger;
|
||||
}
|
||||
|
||||
location = /.well-known/nodeinfo {
|
||||
return 301 $client_req_scheme_nc://$host/index.php/.well-known/nodeinfo;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /web;
|
||||
}
|
||||
|
||||
fastcgi_buffers 64 4K;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 4;
|
||||
gzip_min_length 256;
|
||||
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
|
||||
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
|
||||
set_real_ip_from fc00::/7;
|
||||
set_real_ip_from 10.0.0.0/8;
|
||||
set_real_ip_from 172.16.0.0/12;
|
||||
set_real_ip_from 192.168.0.0/16;
|
||||
real_ip_header X-Forwarded-For;
|
||||
real_ip_recursive on;
|
||||
|
||||
location / {
|
||||
rewrite ^ /index.php$uri;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:build|tests|config|lib|3rdparty|templates|data)\/ {
|
||||
deny all;
|
||||
}
|
||||
location ~ ^\/(?:\.|autotest|occ|issue|indie|db_|console) {
|
||||
deny all;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+)\.php(?:$|\/) {
|
||||
fastcgi_split_path_info ^(.+?\.php)(\/.*|)$;
|
||||
set $path_info $fastcgi_path_info;
|
||||
try_files $fastcgi_script_name =404;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param PATH_INFO $path_info;
|
||||
fastcgi_param HTTPS on;
|
||||
# Avoid sending the security headers twice
|
||||
fastcgi_param modHeadersAvailable true;
|
||||
# Enable pretty urls
|
||||
fastcgi_param front_controller_active true;
|
||||
fastcgi_pass phpfpm:9002;
|
||||
fastcgi_intercept_errors on;
|
||||
fastcgi_request_buffering off;
|
||||
client_max_body_size 0;
|
||||
fastcgi_read_timeout 1200;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:updater|oc[ms]-provider)(?:$|\/) {
|
||||
try_files $uri/ =404;
|
||||
index index.php;
|
||||
}
|
||||
|
||||
location ~ \.(?:css|js|woff2?|svg|gif|map)$ {
|
||||
try_files $uri /index.php$request_uri;
|
||||
add_header Cache-Control "public, max-age=15778463";
|
||||
add_header Referrer-Policy "no-referrer" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Download-Options "noopen" always;
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none" always;
|
||||
add_header X-Robots-Tag "none" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
access_log off;
|
||||
}
|
||||
|
||||
location ~ \.(?:png|html|ttf|ico|jpg|jpeg|bcmap)$ {
|
||||
try_files $uri /index.php$request_uri;
|
||||
access_log off;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/bash
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) php /web/nextcloud/occ ${@}
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA9iHB0CRDhV8wfBgqnmvuJpl0fzL3qL75R4ZvQHlfMNLrxuIz2x9D
|
||||
9zcDhPcBTVzV5Ay0AAkke4wP6r6wDQqXqBP4Y8IOkYAyLh3jM40jfHQzQt+5JdQl
|
||||
ond3kiscBsFOch/vMfSLMu3lAb0YhPNTvrxhMz7LcVAWYl82swASupdiKR+MgaQr
|
||||
XsugpmDKsHW60VmIM9B7K9Y+rNHwvMWkmISd0KxA8oOy1WJvsVEissMALZDE3c4w
|
||||
2xHmO2lXxgEx3aez28736t4m/KW3g9Zr31a1M0KusmfY//fGkPk4NUrLBOS2xrgp
|
||||
Y/rG1qSBdcVyerM0Ki93qCyHKYu4ene0OwIBAg==
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
||||
|
|
|
|||
29
data/assets/templates/pw_reset_html.tpl
Normal file
29
data/assets/templates/pw_reset_html.tpl
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
<html>
|
||||
<head>
|
||||
<meta name="x-apple-disable-message-reformatting" />
|
||||
<style>
|
||||
body {
|
||||
font-family: Helvetica, Arial, Sans-Serif;
|
||||
}
|
||||
/* mobile devices */
|
||||
@media all and (max-width: 480px) {
|
||||
.mob {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
Hello {{username2}},<br><br>
|
||||
|
||||
Somebody requested a new password for the {{hostname}} account associated with {{username}}.<br>
|
||||
<small>Date of the password reset request: {{date}}</small><br><br>
|
||||
|
||||
You can reset your password by clicking the link below:<br>
|
||||
<a href="{{link}}">{{link}}</a><br><br>
|
||||
|
||||
The link will be valid for the next {{token_lifetime}} minutes.<br><br>
|
||||
|
||||
If you did not request a new password, please ignore this email.<br>
|
||||
</body>
|
||||
</html>
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue