Compare commits
1250 commits
v0.6.5
...
py3-latest
Author | SHA1 | Date | |
---|---|---|---|
7edbda70f5 | |||
![]() |
290025958f | ||
![]() |
25c5658b72 | ||
![]() |
2970e3a205 | ||
![]() |
866179f6a3 | ||
![]() |
e8cf14bcf5 | ||
![]() |
fedcf9c1c6 | ||
![]() |
117bcf25d9 | ||
![]() |
a429349cd4 | ||
![]() |
d8e52eaabd | ||
![]() |
f2ef6e5d9c | ||
![]() |
dd2bb07cfb | ||
![]() |
06a9d1e0ff | ||
![]() |
c354f9e24d | ||
![]() |
77b4297224 | ||
![]() |
edc5310cd2 | ||
![]() |
99a8409513 | ||
![]() |
3550a64837 | ||
![]() |
85ef28e6fb | ||
![]() |
1500d9356b | ||
![]() |
f1a71770fa | ||
![]() |
f79a73cef4 | ||
![]() |
0731787518 | ||
![]() |
ad95eede10 | ||
![]() |
459b0a73ca | ||
![]() |
b7870edd2e | ||
![]() |
d5703541be | ||
![]() |
ba96654e1d | ||
![]() |
ac72d623f0 | ||
![]() |
fd857985f6 | ||
![]() |
966f671efe | ||
![]() |
86109ae4b2 | ||
![]() |
611fc774c8 | ||
![]() |
0ed0b746a4 | ||
![]() |
49e68c3a78 | ||
![]() |
3ac677c9a7 | ||
![]() |
016cfe9e16 | ||
![]() |
712ee18634 | ||
![]() |
5579c6b3cc | ||
![]() |
c3815c56ea | ||
![]() |
b257338b0a | ||
![]() |
ac70f83879 | ||
![]() |
2ad80afa10 | ||
![]() |
fe048cd08c | ||
![]() |
f9d7ccd83c | ||
![]() |
b29884db78 | ||
![]() |
a5190234ab | ||
![]() |
00db9c9f87 | ||
![]() |
02ceb70a4f | ||
![]() |
7ce118d645 | ||
![]() |
eb397cf4c7 | ||
![]() |
f8c9f2da4f | ||
![]() |
69d7eacfa4 | ||
![]() |
f498aedb96 | ||
![]() |
5ee928852b | ||
![]() |
7078badefa | ||
![]() |
edd2760fed | ||
![]() |
7d1ec41d09 | ||
![]() |
7acd8df906 | ||
![]() |
a1eb6eede5 | ||
![]() |
eab7fc2be4 | ||
![]() |
454c0b2e7e | ||
![]() |
03da34c5d6 | ||
![]() |
c3f4591f91 | ||
![]() |
3ad7bc87e5 | ||
![]() |
b4f4c12521 | ||
![]() |
39d86fec9c | ||
![]() |
02c27b841f | ||
![]() |
8dafbef6ad | ||
![]() |
c831d175ad | ||
![]() |
3cbfbae42d | ||
![]() |
a1105562cd | ||
![]() |
b3c9de5e47 | ||
![]() |
3ffa3c2f79 | ||
![]() |
03273527da | ||
![]() |
2795e20b0c | ||
![]() |
eb86df5fb6 | ||
![]() |
ecfb6b6b3a | ||
![]() |
1b4f93f14b | ||
![]() |
40db30a260 | ||
![]() |
3a4a5404c0 | ||
![]() |
2bb12a247b | ||
![]() |
38a3ea6373 | ||
![]() |
2798ad6fb2 | ||
![]() |
2e7b0071a3 | ||
![]() |
12e82bc9c4 | ||
![]() |
774691fa39 | ||
![]() |
52d6c9fedf | ||
![]() |
d68c635e9a | ||
![]() |
21557b5517 | ||
![]() |
11fe0ece67 | ||
![]() |
92363d5227 | ||
![]() |
cf0c5db5b9 | ||
![]() |
57dda4e6d6 | ||
![]() |
0a3bf43e1c | ||
![]() |
48455e3e45 | ||
![]() |
c515e26cd6 | ||
![]() |
5cac059ef4 | ||
![]() |
8f6e27904c | ||
![]() |
e757f2a2d4 | ||
![]() |
b104d5dd41 | ||
![]() |
f6106be733 | ||
![]() |
9305a2e7ac | ||
![]() |
4f6833c488 | ||
![]() |
e2b1cf3938 | ||
![]() |
6fcfe5b394 | ||
![]() |
12013d64c8 | ||
![]() |
54fb2fde7c | ||
![]() |
147dd4bc35 | ||
![]() |
32c9d5fa70 | ||
![]() |
dd08b89c81 | ||
![]() |
6770b450b3 | ||
![]() |
9b2772b171 | ||
![]() |
29dac8a188 | ||
![]() |
352da6bf62 | ||
![]() |
288050e5b4 | ||
![]() |
785d2351eb | ||
![]() |
9d1bed11af | ||
![]() |
b2342e64bd | ||
![]() |
dcbfb8afe0 | ||
![]() |
4b8dfc5114 | ||
![]() |
c0baf8b68d | ||
![]() |
73dc69605b | ||
![]() |
52ed8c18ca | ||
![]() |
19bc0358b5 | ||
![]() |
392350ff79 | ||
![]() |
f0b0f57643 | ||
![]() |
85790f8866 | ||
![]() |
ce5b4c3eda | ||
![]() |
fde3b51129 | ||
![]() |
550d02d473 | ||
![]() |
4da89580c1 | ||
![]() |
bf092b83ab | ||
![]() |
0309b81695 | ||
![]() |
e74fdc4036 | ||
![]() |
b9c65d75ef | ||
![]() |
49f8e0bc3a | ||
![]() |
c4f8c0177e | ||
![]() |
8c20927f68 | ||
![]() |
5b09f7af41 | ||
![]() |
1695571afa | ||
![]() |
8dc5aee8aa | ||
![]() |
94765af0f3 | ||
![]() |
5a226baaa5 | ||
![]() |
b7bc197012 | ||
![]() |
a0dfbe31f6 | ||
![]() |
964545dd1f | ||
![]() |
817ab04941 | ||
![]() |
91d0ce3a50 | ||
![]() |
e97236201c | ||
![]() |
e14f5bf847 | ||
![]() |
79f10ffe0c | ||
![]() |
0bc9374a7d | ||
![]() |
8a71bf65cd | ||
![]() |
9d198ff7f2 | ||
![]() |
cafeebf120 | ||
![]() |
46fba195da | ||
![]() |
501bd51bd1 | ||
![]() |
f7874e1ca3 | ||
![]() |
8d964d1b8e | ||
![]() |
051e404a80 | ||
![]() |
6c1abf4004 | ||
![]() |
0907edb6b1 | ||
![]() |
6ff14d1bbd | ||
![]() |
4ad5c065f1 | ||
![]() |
c17b8d53d3 | ||
![]() |
9022a1098a | ||
![]() |
6e758ff363 | ||
![]() |
29c3523353 | ||
![]() |
47ff6c6801 | ||
![]() |
6bd49e8aff | ||
![]() |
ddbd5c7b19 | ||
![]() |
635c3b27cd | ||
![]() |
6776dabdb3 | ||
![]() |
14cbaf47c8 | ||
![]() |
4eb50377c3 | ||
![]() |
ea6016d004 | ||
![]() |
79d26060b3 | ||
![]() |
97ad084c21 | ||
![]() |
179e5cb651 | ||
![]() |
367745b5ea | ||
![]() |
5c38a78b79 | ||
![]() |
a02ed56c69 | ||
![]() |
f868fed51d | ||
![]() |
e4f42b8ce3 | ||
![]() |
eeb48fc72e | ||
![]() |
85733abade | ||
![]() |
8db4344171 | ||
![]() |
0a9391d28b | ||
![]() |
cfef7ab071 | ||
![]() |
38c1727b94 | ||
![]() |
36d96d484e | ||
![]() |
439f8fc476 | ||
![]() |
07faa3d6d3 | ||
![]() |
3c7022ea9d | ||
![]() |
a657afcd47 | ||
![]() |
f3a839f422 | ||
![]() |
ad3920b26a | ||
![]() |
8ffd8d7a3e | ||
![]() |
71001491df | ||
![]() |
701765b53b | ||
![]() |
fa880d99f1 | ||
![]() |
0a9a9b5a57 | ||
![]() |
56acac8cd3 | ||
![]() |
995d3bf717 | ||
![]() |
1de7485858 | ||
![]() |
e1c0fd6984 | ||
![]() |
108a3de433 | ||
![]() |
740fe65355 | ||
![]() |
abde3d4cf7 | ||
![]() |
c90c887f8f | ||
![]() |
a4d91f7081 | ||
![]() |
31d4304915 | ||
![]() |
1eec388252 | ||
![]() |
70de3213d6 | ||
![]() |
f41d022038 | ||
![]() |
723d1f4370 | ||
![]() |
ca94703fc3 | ||
![]() |
a5971adbe6 | ||
![]() |
dfeebbabe8 | ||
![]() |
66194ce435 | ||
![]() |
2de3c9a544 | ||
![]() |
5fb342a825 | ||
![]() |
3156d2f94b | ||
![]() |
ba156bbdec | ||
![]() |
6beb76eac8 | ||
![]() |
d3d18234df | ||
![]() |
faa24a8b41 | ||
![]() |
f749228a2c | ||
![]() |
7e17a4e967 | ||
![]() |
19f003141b | ||
![]() |
53a6063576 | ||
![]() |
33af83b2cd | ||
![]() |
3426d5fe63 | ||
![]() |
f2934c10b4 | ||
![]() |
a2457b2488 | ||
![]() |
193632c3f9 | ||
![]() |
a1c176bb3f | ||
![]() |
02fd1dc4d0 | ||
![]() |
296e4aab57 | ||
![]() |
7ba2c9344d | ||
![]() |
09e65e1d95 | ||
![]() |
c4f65a5d7b | ||
![]() |
37a401fdef | ||
![]() |
e7d1e1f097 | ||
![]() |
6df3036f11 | ||
![]() |
e2a582d892 | ||
![]() |
aaabcb6b1a | ||
![]() |
7bf790003e | ||
![]() |
f46b945cdc | ||
![]() |
27761c5045 | ||
![]() |
e0bf4dc9ec | ||
![]() |
1fc67a3d71 | ||
![]() |
5baacf963d | ||
![]() |
b790bcac9b | ||
![]() |
219b90668f | ||
![]() |
2862587c15 | ||
![]() |
6218a92895 | ||
![]() |
58f03e21ef | ||
![]() |
b85477787d | ||
![]() |
6a1235bd45 | ||
![]() |
33d6a9c402 | ||
![]() |
17f65a5179 | ||
![]() |
f8e2cbe429 | ||
![]() |
f0a706f6ab | ||
![]() |
8b994e42c2 | ||
![]() |
ae9a76a6c9 | ||
![]() |
9b85d8638d | ||
![]() |
a9c75a3146 | ||
![]() |
1cc0ec3f31 | ||
![]() |
b1819ff71d | ||
![]() |
fca1033f83 | ||
![]() |
32855d0479 | ||
![]() |
2c826eba2d | ||
![]() |
8facd9ff84 | ||
![]() |
64e5e0c80e | ||
![]() |
8aa4e27938 | ||
![]() |
bc76bf291a | ||
![]() |
70cc982e2e | ||
![]() |
61ac6a30d3 | ||
![]() |
d2627f36d5 | ||
![]() |
d36324e0d3 | ||
![]() |
113b57415f | ||
![]() |
fefd2474b1 | ||
![]() |
28ce08de8e | ||
![]() |
037f0a3ff4 | ||
![]() |
a3546d56b0 | ||
![]() |
95bf4ecb42 | ||
![]() |
c91f2f0a09 | ||
![]() |
6d425f30fe | ||
![]() |
8e79a7da63 | ||
![]() |
10c02c31c2 | ||
![]() |
a0f5e1bde8 | ||
![]() |
2e9cff928c | ||
![]() |
46210b2f04 | ||
![]() |
6dae187e22 | ||
![]() |
a7e783a26b | ||
![]() |
60af3ceda9 | ||
![]() |
11415fe082 | ||
![]() |
df93fa0ffe | ||
![]() |
849d514f28 | ||
![]() |
4d8ee4bafb | ||
![]() |
ac8aaaff75 | ||
![]() |
238ede9419 | ||
![]() |
835174270e | ||
![]() |
62a2ec7254 | ||
![]() |
a9368bb3c8 | ||
![]() |
e75e199334 | ||
![]() |
2b7aebd89d | ||
![]() |
3e08eabc86 | ||
![]() |
a16d55c863 | ||
![]() |
e51ae580b9 | ||
![]() |
914576b9db | ||
![]() |
3edb34ec56 | ||
![]() |
224093b3dd | ||
![]() |
77c3e43978 | ||
![]() |
03350d7454 | ||
![]() |
2b5e57e840 | ||
![]() |
39442977db | ||
![]() |
0af90aad37 | ||
![]() |
c5d51c9cab | ||
![]() |
0dbcec8092 | ||
![]() |
76e4b75c2d | ||
![]() |
c1ad7914f1 | ||
![]() |
9085a4b0cc | ||
![]() |
820346c98d | ||
![]() |
995d87c167 | ||
![]() |
fe739fa848 | ||
![]() |
b6d0bf8f6b | ||
![]() |
aec1ab4ed2 | ||
![]() |
7d5f3354b6 | ||
![]() |
feb58e4b0e | ||
![]() |
163825c03e | ||
![]() |
3fc80f834d | ||
![]() |
20b0db7ddb | ||
![]() |
b2e7cbb927 | ||
![]() |
5987274edf | ||
![]() |
ba218974c4 | ||
![]() |
721d4a22f1 | ||
![]() |
32b0153d34 | ||
![]() |
71d32d7414 | ||
![]() |
796ee572ce | ||
![]() |
60146a083c | ||
![]() |
df87bd41b4 | ||
![]() |
3d73599deb | ||
![]() |
48124e12d9 | ||
![]() |
17fb740c51 | ||
![]() |
c6b07f1294 | ||
![]() |
3ccce46314 | ||
![]() |
7c1da5da52 | ||
![]() |
c5de1447c8 | ||
![]() |
e16ace433c | ||
![]() |
975f53b95b | ||
![]() |
8a994b5559 | ||
![]() |
2acf24c336 | ||
![]() |
2c3f1ba7ad | ||
![]() |
c01245a4e0 | ||
![]() |
f119f7d0d2 | ||
![]() |
62d4edadf6 | ||
![]() |
8bf17d3a69 | ||
![]() |
0881e274a9 | ||
![]() |
7ca09ba75b | ||
![]() |
bde8b30d5c | ||
![]() |
87d1c736e2 | ||
![]() |
eba81cc7d2 | ||
![]() |
69eb831c7e | ||
![]() |
99e6326974 | ||
![]() |
50bbe47bf2 | ||
![]() |
8bfef12ad4 | ||
![]() |
6085cfd1a7 | ||
![]() |
d660a268e8 | ||
![]() |
c161140a90 | ||
![]() |
7af8d1cd93 | ||
![]() |
845b50915d | ||
![]() |
dbbad3097c | ||
![]() |
1abaa6fddc | ||
![]() |
7ecf09a496 | ||
![]() |
c0639fef75 | ||
![]() |
c08d266822 | ||
![]() |
6bc3c168c6 | ||
![]() |
1fe7127082 | ||
![]() |
cfaaaf57ec | ||
![]() |
93d2ee65fe | ||
![]() |
9c08e41b9e | ||
![]() |
abee87bbec | ||
![]() |
d4b6f79746 | ||
![]() |
a7c26f893f | ||
![]() |
24b8cdf87a | ||
![]() |
fd43aa61ef | ||
![]() |
77869830c5 | ||
![]() |
87fc8ced5e | ||
![]() |
909967629b | ||
![]() |
afe0d82f18 | ||
![]() |
1ad97a6696 | ||
![]() |
e7e8e59c1e | ||
![]() |
f3665b172f | ||
![]() |
23b3cd3986 | ||
![]() |
f7ee6744af | ||
![]() |
ac45217816 | ||
![]() |
8c51e81a0b | ||
![]() |
9d777951dd | ||
![]() |
2778b17f8d | ||
![]() |
98c98fbac7 | ||
![]() |
9b1f6337c3 | ||
![]() |
2019093431 | ||
![]() |
eac25caf28 | ||
![]() |
b421893434 | ||
![]() |
f1b19f5fc7 | ||
![]() |
61f1a741fc | ||
![]() |
f01d335835 | ||
![]() |
5c1b34387c | ||
![]() |
dfd55c3957 | ||
![]() |
b21895fa78 | ||
![]() |
495d695c5a | ||
![]() |
3309489c24 | ||
![]() |
8a5a75e68f | ||
![]() |
c1df78b97f | ||
![]() |
4c31aae97b | ||
![]() |
0839fdfc5e | ||
![]() |
d062f01127 | ||
![]() |
e91fb90a45 | ||
![]() |
6539ca5eb0 | ||
![]() |
b138ebc519 | ||
![]() |
79c1cd15ab | ||
![]() |
10c1986c54 | ||
![]() |
d7cabb47ca | ||
![]() |
8de1714f08 | ||
![]() |
20ba9cd589 | ||
![]() |
af1ac9bce8 | ||
![]() |
31a6e3ee9a | ||
![]() |
dca1dcdd2d | ||
![]() |
a54f5f3e9f | ||
![]() |
51f49cd45a | ||
![]() |
eb63eb7b1d | ||
![]() |
b4f7e51e96 | ||
![]() |
c2d2189039 | ||
![]() |
1eda3258de | ||
![]() |
0171cb0844 | ||
![]() |
08a0a63631 | ||
![]() |
8ed7d0385d | ||
![]() |
02d45e9c39 | ||
![]() |
2a402a0674 | ||
![]() |
1be56b5a39 | ||
![]() |
1e175bc41f | ||
![]() |
c16569a6ab | ||
![]() |
d19cc64611 | ||
![]() |
958882c1c5 | ||
![]() |
2f7323043f | ||
![]() |
fbc7b6fc4f | ||
![]() |
3178b69172 | ||
![]() |
28fcf3c1ea | ||
![]() |
71939097b0 | ||
![]() |
2fd337bb55 | ||
![]() |
5e26161e84 | ||
![]() |
04ecb89e9a | ||
![]() |
23f851343f | ||
![]() |
5ce1782d05 | ||
![]() |
daee14533c | ||
![]() |
31f505b309 | ||
![]() |
c8214bf3ea | ||
![]() |
1935a69c04 | ||
![]() |
ea5f64bfea | ||
![]() |
3dd04b27de | ||
![]() |
9940b7bff3 | ||
![]() |
901ccf2d14 | ||
![]() |
6a1a821ed4 | ||
![]() |
aa9fe09337 | ||
![]() |
bdb655243f | ||
![]() |
566c29363f | ||
![]() |
37b8c0241f | ||
![]() |
1a17645e93 | ||
![]() |
5fba850d74 | ||
![]() |
bd90e0ce52 | ||
![]() |
c24cfa721b | ||
![]() |
1670d96908 | ||
![]() |
ec3c44c5b3 | ||
![]() |
12bfad8fe6 | ||
![]() |
594edc6e9a | ||
![]() |
99304a09ca | ||
![]() |
5c93aadce3 | ||
![]() |
f0c10efca6 | ||
![]() |
c10dd5239e | ||
![]() |
fa0d1a50b5 | ||
![]() |
66a1c4d242 | ||
![]() |
b7c6b84826 | ||
![]() |
1c587bde25 | ||
![]() |
e1dc29c374 | ||
![]() |
59e0ffd8e0 | ||
![]() |
f7c767c1c8 | ||
![]() |
fca9db7972 | ||
![]() |
afd23849a6 | ||
![]() |
1b2eee058c | ||
![]() |
777486a5be | ||
![]() |
8b6f221e22 | ||
![]() |
97ecb7e3aa | ||
![]() |
5df5e25d68 | ||
![]() |
66a950a481 | ||
![]() |
29346cdef5 | ||
![]() |
4f8e941e39 | ||
![]() |
756f5a1608 | ||
![]() |
416e7d6fe0 | ||
![]() |
7b210429b5 | ||
![]() |
c52d47b15f | ||
![]() |
9a43626aa6 | ||
![]() |
c14e722303 | ||
![]() |
07633ba79d | ||
![]() |
6ff7fe55fc | ||
![]() |
a14c36cd3e | ||
![]() |
c21fe3d23a | ||
![]() |
89e8fd3d3a | ||
![]() |
966f393e20 | ||
![]() |
d85c27e67b | ||
![]() |
a5f8a53196 | ||
![]() |
9299e5b614 | ||
![]() |
6c31a3b77e | ||
![]() |
6262c80886 | ||
![]() |
5aa115c88a | ||
![]() |
511587dd8b | ||
![]() |
5d34bb9062 | ||
![]() |
4025d753e3 | ||
![]() |
58214c0ac3 | ||
![]() |
5d113757df | ||
![]() |
b41a03674f | ||
![]() |
8c1f64243f | ||
![]() |
cdd9dd4f6f | ||
![]() |
57f2a43864 | ||
![]() |
74d7fb7835 | ||
![]() |
dd61429e2f | ||
![]() |
8f27f50b34 | ||
![]() |
96e7fbdca1 | ||
![]() |
39352eb97e | ||
![]() |
1c607645c7 | ||
![]() |
2ad3493fb0 | ||
![]() |
331dc99086 | ||
![]() |
4424c8272d | ||
![]() |
16162955af | ||
![]() |
23006e495f | ||
![]() |
4351af35f4 | ||
![]() |
e8af5db2e8 | ||
![]() |
9d048371b7 | ||
![]() |
a187726ba8 | ||
![]() |
0ff1bcfd19 | ||
![]() |
08fee35bcf | ||
![]() |
08574bf676 | ||
![]() |
5c27a0efcc | ||
![]() |
7576f96604 | ||
![]() |
456e330854 | ||
![]() |
8d95eb937f | ||
![]() |
2f50fef787 | ||
![]() |
ac69007292 | ||
![]() |
e8b0a3d1c4 | ||
![]() |
dfbbbd9381 | ||
![]() |
4ab339b375 | ||
![]() |
fce24cedbd | ||
![]() |
f9b62564ca | ||
![]() |
d569d9488a | ||
![]() |
f172751df3 | ||
![]() |
13233d47bd | ||
![]() |
74d7d92a4d | ||
![]() |
d3a0f5c268 | ||
![]() |
63f213a5d5 | ||
![]() |
6d4c4d9f27 | ||
![]() |
b2be4672ec | ||
![]() |
1bfe328a1b | ||
![]() |
1f453b6c13 | ||
![]() |
ee8e3c3c9c | ||
![]() |
37f315dfc2 | ||
![]() |
1e1e560795 | ||
![]() |
8d88cfcd68 | ||
![]() |
74badf9c9c | ||
![]() |
86087550f1 | ||
![]() |
8dfc200f24 | ||
![]() |
cb4a4bd707 | ||
![]() |
24ba2a150b | ||
![]() |
e1d92bf0ec | ||
![]() |
270f3e9ffd | ||
![]() |
e1f73697ff | ||
![]() |
e82155aac4 | ||
![]() |
d7669413af | ||
![]() |
28d4fc5d12 | ||
![]() |
dac4fcd52b | ||
![]() |
8dff33b38a | ||
![]() |
6bae1f8a4b | ||
![]() |
10ceeb7f02 | ||
![]() |
448bb3ce98 | ||
![]() |
0531d47721 | ||
![]() |
b21719e2f2 | ||
![]() |
2960db2352 | ||
![]() |
cb3629343b | ||
![]() |
fa7013fdf7 | ||
![]() |
db868dba81 | ||
![]() |
608a411d97 | ||
![]() |
20c63c73b3 | ||
![]() |
5ca3401eb9 | ||
![]() |
435a3c285e | ||
![]() |
6405cae706 | ||
![]() |
6451e7f9f1 | ||
![]() |
127fa5fa82 | ||
![]() |
63fd0a9fa1 | ||
![]() |
344ad44854 | ||
![]() |
43a5742258 | ||
![]() |
924a61309a | ||
![]() |
9dd5c88da4 | ||
![]() |
0598bcf332 | ||
![]() |
ead1b3e5f5 | ||
![]() |
dd493c87fa | ||
![]() |
29640e614c | ||
![]() |
73e0aa17c4 | ||
![]() |
917a2e59ce | ||
![]() |
119e1a9bf0 | ||
![]() |
6eb79ba75e | ||
![]() |
1f9eafa619 | ||
![]() |
d5da404ed4 | ||
![]() |
1b41aa70cc | ||
![]() |
284b1a4f8a | ||
![]() |
fe432ad843 | ||
![]() |
15fca6bd12 | ||
![]() |
57c0daa294 | ||
![]() |
fee95654fa | ||
![]() |
bb436f9931 | ||
![]() |
3682f0aed4 | ||
![]() |
43c366d2fb | ||
![]() |
b21b885aa9 | ||
![]() |
6bb929a896 | ||
![]() |
f5829f6012 | ||
![]() |
d06b4abecf | ||
![]() |
d7db631b95 | ||
![]() |
93e6ec4933 | ||
![]() |
2fbf2c7771 | ||
![]() |
b474677db1 | ||
![]() |
dbcd8602c5 | ||
![]() |
1793407748 | ||
![]() |
6f0d4a50d1 | ||
![]() |
10817aefae | ||
![]() |
4293a44c93 | ||
![]() |
96759e9303 | ||
![]() |
448483371c | ||
![]() |
0738964e64 | ||
![]() |
deec2e62ce | ||
![]() |
c52da69367 | ||
![]() |
2de35266c4 | ||
![]() |
e6b8097b43 | ||
![]() |
55c7585334 | ||
![]() |
62d278a367 | ||
![]() |
38e20b7c31 | ||
![]() |
d3fce8ca36 | ||
![]() |
2a7d7acce0 | ||
![]() |
eab63c6af8 | ||
![]() |
4f0613689a | ||
![]() |
743463dce9 | ||
![]() |
0b04176f18 | ||
![]() |
166a65e1b1 | ||
![]() |
5da4537d7c | ||
![]() |
b9e71c9f6f | ||
![]() |
500c96abe2 | ||
![]() |
149c4f5c7b | ||
![]() |
76bc9fcddf | ||
![]() |
f999f167b1 | ||
![]() |
9ac96cdd50 | ||
![]() |
3c4bc6ae35 | ||
![]() |
879b504b0f | ||
![]() |
baa5df1d01 | ||
![]() |
912c958ac0 | ||
![]() |
d166a16a24 | ||
![]() |
1bd1ddf410 | ||
![]() |
7890771faa | ||
![]() |
376fd0d439 | ||
![]() |
c414e6caa2 | ||
![]() |
ed7a3b2356 | ||
![]() |
adffbd1973 | ||
![]() |
6750682e4f | ||
![]() |
d1fb4067e7 | ||
![]() |
ab9fe173a8 | ||
![]() |
8a7ae368d8 | ||
![]() |
248fc5f015 | ||
![]() |
e16611f15a | ||
![]() |
24b3651d2e | ||
![]() |
0e236e53fd | ||
![]() |
61ba9848e5 | ||
![]() |
01ff89315b | ||
![]() |
155d8d4dfd | ||
![]() |
c7822ed6e6 | ||
![]() |
1ed40b3b82 | ||
![]() |
18dc359cfc | ||
![]() |
b871849df4 | ||
![]() |
7d1ca3862d | ||
![]() |
2a887870ff | ||
![]() |
1d5bde01cc | ||
![]() |
8537939d26 | ||
![]() |
fcb3ac3917 | ||
![]() |
d63a4b3912 | ||
![]() |
6a245a202c | ||
![]() |
429043f60c | ||
![]() |
8f491fe6e1 | ||
![]() |
92358bafc0 | ||
![]() |
d93e89899b | ||
![]() |
2bdd073608 | ||
![]() |
7801937f74 | ||
![]() |
1d7e0c47dd | ||
![]() |
7b9b48e62d | ||
![]() |
d610f94e7d | ||
![]() |
7742f2f5fb | ||
![]() |
3f7e22497d | ||
![]() |
e745760520 | ||
![]() |
bd5c2b1daa | ||
![]() |
0bbeede975 | ||
![]() |
30865c9d1c | ||
![]() |
1cfe874893 | ||
![]() |
5da46ca29c | ||
![]() |
cc21cbd1bd | ||
![]() |
79ba4a9d23 | ||
![]() |
44ef0cbe59 | ||
![]() |
88f2b39576 | ||
![]() |
bf10cdef63 | ||
![]() |
3696db89ab | ||
![]() |
eeaa5d21d8 | ||
![]() |
f4bec3bb4d | ||
![]() |
dc6f3cf0b2 | ||
![]() |
b5a1310add | ||
![]() |
b22343f65c | ||
![]() |
b9b317e213 | ||
![]() |
6cd18bbf04 | ||
![]() |
8c6400e4d6 | ||
![]() |
b6e1559a80 | ||
![]() |
605ae75dda | ||
![]() |
39f318fbd5 | ||
![]() |
21def81439 | ||
![]() |
7e9ab8321a | ||
![]() |
4094d3a9bf | ||
![]() |
0877fec638 | ||
![]() |
f40c3e6b81 | ||
![]() |
bb705ae863 | ||
![]() |
c4a3a53be0 | ||
![]() |
713ff17e91 | ||
![]() |
0c659a477d | ||
![]() |
26678a65f8 | ||
![]() |
c5116fb318 | ||
![]() |
fa970fa102 | ||
![]() |
fbafd23177 | ||
![]() |
be742c78e7 | ||
![]() |
3e97c154a0 | ||
![]() |
1eb97ea381 | ||
![]() |
f6e06456b0 | ||
![]() |
5e90cd9714 | ||
![]() |
06406fa46c | ||
![]() |
08b7034d6f | ||
![]() |
5b91aef4ec | ||
![]() |
d8a121cd06 | ||
![]() |
902a1b1c88 | ||
![]() |
c9a2b86c16 | ||
![]() |
27fcb70774 | ||
![]() |
e488841031 | ||
![]() |
6cffa1c0ca | ||
![]() |
d3e8fcea47 | ||
![]() |
9526424a47 | ||
![]() |
149278abd0 | ||
![]() |
314c8b22db | ||
![]() |
c502688ce3 | ||
![]() |
866346b059 | ||
![]() |
de8286829a | ||
![]() |
18c407bfc2 | ||
![]() |
076684176b | ||
![]() |
a2cb1615b3 | ||
![]() |
6b5fa140b9 | ||
![]() |
356d0521e6 | ||
![]() |
5a08ab93d3 | ||
![]() |
8185f4dfda | ||
![]() |
f4f0e2afa8 | ||
![]() |
67d6b1e724 | ||
![]() |
e34a9d452a | ||
![]() |
2819a36469 | ||
![]() |
8815b4e0c3 | ||
![]() |
960635b993 | ||
![]() |
f9dcb29e92 | ||
![]() |
5a746769d0 | ||
![]() |
87b4500467 | ||
![]() |
951e47469a | ||
![]() |
c1db963c76 | ||
![]() |
a252ec36f0 | ||
![]() |
33b478199a | ||
![]() |
21f285e099 | ||
![]() |
fec312ed09 | ||
![]() |
eb2627721e | ||
![]() |
ff32f822ba | ||
![]() |
80bfccd9d3 | ||
![]() |
eb5a24064a | ||
![]() |
b971ccc673 | ||
![]() |
945687bdad | ||
![]() |
6f56d0a944 | ||
![]() |
66c48ba4ec | ||
![]() |
f83ade8d33 | ||
![]() |
eae0d1b2a6 | ||
![]() |
743f92d15e | ||
![]() |
822dec5c03 | ||
![]() |
40b84755de | ||
![]() |
1b307166ee | ||
![]() |
7a483e7912 | ||
![]() |
841230fe80 | ||
![]() |
900ae4e1ea | ||
![]() |
72b6d6c676 | ||
![]() |
f979ed133f | ||
![]() |
fb2cf5f04d | ||
![]() |
62401b24ec | ||
![]() |
43f833e604 | ||
![]() |
612a3f4401 | ||
![]() |
4c2cf99fd2 | ||
![]() |
aebd9b410d | ||
![]() |
8eee9caa01 | ||
![]() |
d278a30d19 | ||
![]() |
1117569148 | ||
![]() |
753396ac0c | ||
![]() |
9a267ffcaf | ||
![]() |
6254143fc6 | ||
![]() |
862e19a263 | ||
![]() |
eeef6fe65f | ||
![]() |
c05916477c | ||
![]() |
63d7e73cff | ||
![]() |
8cb629fb55 | ||
![]() |
d596f28f46 | ||
![]() |
8f26c0aeae | ||
![]() |
e4978d8a05 | ||
![]() |
4f43d977ed | ||
![]() |
d0c39e6bf4 | ||
![]() |
0965d98dbd | ||
![]() |
45fea827af | ||
![]() |
350ee13d66 | ||
![]() |
bb7af2e8ed | ||
![]() |
9cda561091 | ||
![]() |
d38846f126 | ||
![]() |
3b764439af | ||
![]() |
d1e404f093 | ||
![]() |
6c4440c2d1 | ||
![]() |
8e2a7c2b2d | ||
![]() |
fce30baa12 | ||
![]() |
c63215c992 | ||
![]() |
815fe02c83 | ||
![]() |
e2e1a5b38c | ||
![]() |
4222c31b3e | ||
![]() |
e5d3b0e7b8 | ||
![]() |
422064e092 | ||
![]() |
d548c6bdfa | ||
![]() |
efb7b147af | ||
![]() |
0f8b220f59 | ||
![]() |
1bba253156 | ||
![]() |
a2d29a4531 | ||
![]() |
589869c5ed | ||
![]() |
7b41922c2d | ||
![]() |
7262fbfb4e | ||
![]() |
9c0e8ee833 | ||
![]() |
9119d72b9b | ||
![]() |
891aac4713 | ||
![]() |
2fa006d74e | ||
![]() |
a6c97a304f | ||
![]() |
cfa4f8fa63 | ||
![]() |
ce0cf09b10 | ||
![]() |
1567fb745d | ||
![]() |
416f563261 | ||
![]() |
25d6eea906 | ||
![]() |
4e819ac035 | ||
![]() |
dbcaa6bf85 | ||
![]() |
3205187090 | ||
![]() |
ed85981409 | ||
![]() |
5d920ff7df | ||
![]() |
5456f0e106 | ||
![]() |
8962c16670 | ||
![]() |
2ed1572c3c | ||
![]() |
89cb673502 | ||
![]() |
98c9c8dd43 | ||
![]() |
fd46f141ea | ||
![]() |
41aec089bc | ||
![]() |
31697022fd | ||
![]() |
948a1c3d03 | ||
![]() |
20371895c9 | ||
![]() |
617027eb52 | ||
![]() |
6b9106b178 | ||
![]() |
6207ccd559 | ||
![]() |
043ac5a510 | ||
![]() |
4eaeade618 | ||
![]() |
f318f76994 | ||
![]() |
fd085d2d37 | ||
![]() |
327f580218 | ||
![]() |
7bef78e10f | ||
![]() |
b54916b1dc | ||
![]() |
c2ab102c0e | ||
![]() |
3f3e73455b | ||
![]() |
4f09a5111b | ||
![]() |
baf820bcdb | ||
![]() |
538f69235f | ||
![]() |
2b9f1257be | ||
![]() |
6e58e8d50f | ||
![]() |
90420f1a89 | ||
![]() |
021b822c4f | ||
![]() |
4ac54845fc | ||
![]() |
4c9d3ee3a6 | ||
![]() |
e688671972 | ||
![]() |
6bd63ff42a | ||
![]() |
efc5211451 | ||
![]() |
f2bf5b12bd | ||
![]() |
dccda1af92 | ||
![]() |
4ca0e6b781 | ||
![]() |
4016e7c217 | ||
![]() |
907a26a8b9 | ||
![]() |
dc23bfeb87 | ||
![]() |
4bfd4bd714 | ||
![]() |
9ddb984004 | ||
![]() |
e618c0e9ef | ||
![]() |
b55d2b53df | ||
![]() |
5733ec8363 | ||
![]() |
d7d75a1fe8 | ||
![]() |
ef6ccb330b | ||
![]() |
afbacdfc96 | ||
![]() |
5842441062 | ||
![]() |
f083301b4c | ||
![]() |
8edbecce3c | ||
![]() |
b114c52c0d | ||
![]() |
4671f47222 | ||
![]() |
4be0e1ee7f | ||
![]() |
dd4c213805 | ||
![]() |
7e57a8f71e | ||
![]() |
86d3d35619 | ||
![]() |
f195111354 | ||
![]() |
ec6fd48b86 | ||
![]() |
1516d55a88 | ||
![]() |
bdb0dc32a7 | ||
![]() |
5ff2f792e6 | ||
![]() |
8246505289 | ||
![]() |
526a5d3fb1 | ||
![]() |
f970815645 | ||
![]() |
f83c77e7ea | ||
![]() |
654cce92cd | ||
![]() |
8f0bfbc553 | ||
![]() |
bc39e52f56 | ||
![]() |
a822238735 | ||
![]() |
b168772d7f | ||
![]() |
034e104c06 | ||
![]() |
0c0f117bc3 | ||
![]() |
1d4ab8833b | ||
![]() |
90fee9788d | ||
![]() |
bf7597e1b2 | ||
![]() |
54ff940c2b | ||
![]() |
446641c31c | ||
![]() |
572d55752c | ||
![]() |
04394d8ced | ||
![]() |
c7ea66bfef | ||
![]() |
bfc5e2dce6 | ||
![]() |
a7e8293d1a | ||
![]() |
698f0cc230 | ||
![]() |
f414f0746c | ||
![]() |
235b8f359c | ||
![]() |
2326cf3de8 | ||
![]() |
b8879853d5 | ||
![]() |
498fd4bf01 | ||
![]() |
996f326c74 | ||
![]() |
43b68faf73 | ||
![]() |
8429ad7db7 | ||
![]() |
879b522914 | ||
![]() |
be584aa3d1 | ||
![]() |
b82f57e7a2 | ||
![]() |
998ec3eb4f | ||
![]() |
f6e3a74567 | ||
![]() |
6d8f55cf75 | ||
![]() |
c4d8466195 | ||
![]() |
36ff506dfe | ||
![]() |
af1fb7aaa6 | ||
![]() |
1a944735df | ||
![]() |
1c8fba4286 | ||
![]() |
0260b30335 | ||
![]() |
0f72085c2a | ||
![]() |
6ad8a10f37 | ||
![]() |
8a38983dfc | ||
![]() |
9f5600b7f7 | ||
![]() |
a7632889a2 | ||
![]() |
58a4bf479c | ||
![]() |
1ce4f99b80 | ||
![]() |
f94ecb3ec5 | ||
![]() |
ce7c22fd57 | ||
![]() |
a5c7e59601 | ||
![]() |
efbf70726f | ||
![]() |
490b1dc01b | ||
![]() |
ec81965393 | ||
![]() |
21536b8948 | ||
![]() |
100c2c8741 | ||
![]() |
d47e4a3e0e | ||
![]() |
0c9ea8f580 | ||
![]() |
2320eb8723 | ||
![]() |
17bbeefeca | ||
![]() |
31372e269d | ||
![]() |
ac799a60da | ||
![]() |
6a1d716ba1 | ||
![]() |
ed12cc1186 | ||
![]() |
8370ac8426 | ||
![]() |
a20ff59572 | ||
![]() |
8587f01caa | ||
![]() |
c7078be407 | ||
![]() |
718a00974b | ||
![]() |
d612676a80 | ||
![]() |
d7bcfb415b | ||
![]() |
6928a17e61 | ||
![]() |
d097092e8e | ||
![]() |
79eb6573be | ||
![]() |
ffed8c9181 | ||
![]() |
ff8573635d | ||
![]() |
643244ffd1 | ||
![]() |
9fd059aef8 | ||
![]() |
6764a7ad2f | ||
![]() |
5642d0aae6 | ||
![]() |
763e5f4ac0 | ||
![]() |
87abdb92e9 | ||
![]() |
447ab47d59 | ||
![]() |
9ed88f25f0 | ||
![]() |
73814550e5 | ||
![]() |
226f7dea65 | ||
![]() |
ed3de771e8 | ||
![]() |
edf3cf3b65 | ||
![]() |
7a54615156 | ||
![]() |
1e1f967292 | ||
![]() |
6f5d4fdc51 | ||
![]() |
e7a6be035e | ||
![]() |
ee762f349c | ||
![]() |
4d98b05e6c | ||
![]() |
4f4591658d | ||
![]() |
0c70e95232 | ||
![]() |
594e8b8c20 | ||
![]() |
4c358b9f08 | ||
![]() |
7b1594c69c | ||
![]() |
6d27feba97 | ||
![]() |
4363dcbbc1 | ||
![]() |
a208f47b6a | ||
![]() |
84268cd43c | ||
![]() |
380c32dee2 | ||
![]() |
bfc7e7c33f | ||
![]() |
8594e4ce4a | ||
![]() |
752dabe554 | ||
![]() |
042db64a00 | ||
![]() |
f55fd8d861 | ||
![]() |
cc41572d48 | ||
![]() |
42de962cbf | ||
![]() |
f527b8225f | ||
![]() |
dd9ccfa3d2 | ||
![]() |
24b6780c1f | ||
![]() |
941571f71f | ||
![]() |
65be9f438b | ||
![]() |
15d13ac9f6 | ||
![]() |
6df3acaf1e | ||
![]() |
b6ee24dcd5 | ||
![]() |
1a3e5b7893 | ||
![]() |
5df8e10b95 | ||
![]() |
bddf2d6537 | ||
![]() |
3d05bdcb63 | ||
![]() |
52e28eefce | ||
![]() |
b5b0626251 | ||
![]() |
4a4f311cf8 | ||
![]() |
bad4d14cf6 | ||
![]() |
b814a633c6 | ||
![]() |
73524d70dc | ||
![]() |
9fad83e46c | ||
![]() |
9fbf4771f2 | ||
![]() |
41cd7da5bd | ||
![]() |
3d975fd767 | ||
![]() |
f8f857c820 | ||
![]() |
9546ed0bb6 | ||
![]() |
a3f957427f | ||
![]() |
9b36c55422 | ||
![]() |
b6286372fb | ||
![]() |
74e71a1971 | ||
![]() |
9050f1a039 | ||
![]() |
de303bf453 | ||
![]() |
3d8d3a9237 | ||
![]() |
cf354d59fb | ||
![]() |
706852d9a7 | ||
![]() |
63e405c27e | ||
![]() |
33e8c6fb73 | ||
![]() |
faba28dd94 | ||
![]() |
16f36824e6 | ||
![]() |
5c1ec0b141 | ||
![]() |
e24d1016a5 | ||
![]() |
a82ee338ef | ||
![]() |
60405bf222 | ||
![]() |
1da6c8c84e | ||
![]() |
32329c1817 | ||
![]() |
4aee7a6c61 | ||
![]() |
e6c2937c1b | ||
![]() |
1bbf9b62ad | ||
![]() |
fa9e024b42 | ||
![]() |
8c52038671 | ||
![]() |
7aff97b6ff | ||
![]() |
77530f13ee | ||
![]() |
0a1c22530a | ||
![]() |
e6c0fe0370 | ||
![]() |
63ba0a5551 | ||
![]() |
c7bfe0d537 | ||
![]() |
05887c976a | ||
![]() |
ccc8fda24f | ||
![]() |
abb458bdd3 | ||
![]() |
41429dd254 | ||
![]() |
268a39e93b | ||
![]() |
8411c60d4a | ||
![]() |
de91f7ec15 | ||
![]() |
6094af819b | ||
![]() |
5f21563d7d | ||
![]() |
5b9afe70b2 | ||
![]() |
27f47460e3 | ||
![]() |
ad1bd045f7 | ||
![]() |
9a9a8bfdc7 | ||
![]() |
c88152cac2 | ||
![]() |
61c72ac3ea | ||
![]() |
84c39f3baa | ||
![]() |
7d6ef195fd | ||
![]() |
52ac972332 | ||
![]() |
9aa599f9d2 | ||
![]() |
a5ce7e5a1f | ||
![]() |
f8511bf199 | ||
![]() |
cfdc6bac7b | ||
![]() |
33e4c088b9 | ||
![]() |
a620bf2174 | ||
![]() |
e77d63294e | ||
![]() |
82c55ba038 | ||
![]() |
e1394d7a7d | ||
![]() |
9f99fa8edc | ||
![]() |
02e67a901f | ||
![]() |
f331f5e92c | ||
![]() |
6e5bf5fef6 | ||
![]() |
c7b4e28f82 | ||
![]() |
9235ecfe7b | ||
![]() |
7f234721ec | ||
![]() |
242b3edbc4 | ||
![]() |
b7894faa96 | ||
![]() |
f3a4b9c709 | ||
![]() |
ea638dd0e0 | ||
![]() |
f0b53c4cbb | ||
![]() |
3eae349a0a | ||
![]() |
ff5004cb8d | ||
![]() |
567855e2d2 | ||
![]() |
d20da5d803 | ||
![]() |
b98a9d2e80 | ||
![]() |
955164aa3c | ||
![]() |
db8f9988eb | ||
![]() |
9b2cae8e33 | ||
![]() |
ac325c5c5e | ||
![]() |
e92f3ea100 | ||
![]() |
75d8338f2d | ||
![]() |
a1b5dad1c8 | ||
![]() |
0e2f7fb122 | ||
![]() |
ee631730c7 | ||
![]() |
f7fd445c73 | ||
![]() |
5c0fc38272 | ||
![]() |
6df0321962 | ||
![]() |
65d19d350c | ||
![]() |
bc93796727 | ||
![]() |
6f0531c663 | ||
![]() |
545acebbaf | ||
![]() |
af49404320 | ||
![]() |
717802860d | ||
![]() |
edd3f35790 | ||
![]() |
20806a8c97 | ||
![]() |
f071cc5c04 | ||
![]() |
90c9078bf5 | ||
![]() |
203e70afbc | ||
![]() |
2599e54fd0 | ||
![]() |
2737425242 | ||
![]() |
40569eee2e | ||
![]() |
883c2851ff | ||
![]() |
4fe4d0a7e7 | ||
![]() |
a46d8fe7f3 | ||
![]() |
dd70d27a0e | ||
![]() |
b46ee0c495 | ||
![]() |
28ffb3fd18 | ||
![]() |
d1456850d1 | ||
![]() |
050e2febab | ||
![]() |
f56c8ef755 | ||
![]() |
1a9529157f | ||
![]() |
8c5c3cb7a6 | ||
![]() |
8ab9b06185 | ||
![]() |
91c5556f21 | ||
![]() |
95cf47d9a4 | ||
![]() |
dfad2370aa | ||
![]() |
331e25cc41 | ||
![]() |
6dcf7e8088 | ||
![]() |
99690a6145 | ||
![]() |
27bcc3c685 | ||
![]() |
bb60558968 | ||
![]() |
af38a3927a | ||
![]() |
bf6771152e | ||
![]() |
48b6c81b36 | ||
![]() |
d95da7372a | ||
![]() |
231037b0fe | ||
![]() |
c481d20ce8 | ||
![]() |
dc32556983 | ||
![]() |
d7b43f4722 | ||
![]() |
ca29fcec7d | ||
![]() |
bd637d661a | ||
![]() |
ef8174af70 | ||
![]() |
dee562437b | ||
![]() |
a1a4a73260 | ||
![]() |
627edeb0f2 | ||
![]() |
16f29b65f2 | ||
![]() |
6d2a863af5 | ||
![]() |
35e61a0c69 | ||
![]() |
c474699695 | ||
![]() |
1e2dadf75e | ||
![]() |
002303a765 | ||
![]() |
59426c31f7 | ||
![]() |
fd895d0ef5 | ||
![]() |
8220272953 | ||
![]() |
c8fc1ebefa | ||
![]() |
ac9531eb98 | ||
![]() |
56d68ce161 | ||
![]() |
a3ef3b34e1 | ||
![]() |
30e348f965 | ||
![]() |
b981ddadca | ||
![]() |
bcd721e2ef | ||
![]() |
a96ff8399f | ||
![]() |
2f4dec45a6 | ||
![]() |
b216e42397 | ||
![]() |
9b6c624554 | ||
![]() |
12154613c2 | ||
![]() |
a42dee5a44 | ||
![]() |
d4d86172f0 | ||
![]() |
a49f454826 | ||
![]() |
cd9a965057 | ||
![]() |
f5bc26e9fe | ||
![]() |
c55d69d587 | ||
![]() |
e508357cfb | ||
![]() |
1c578b2b3f | ||
![]() |
4ce2ef732d | ||
![]() |
65705aba10 | ||
![]() |
e97873fb7e | ||
![]() |
13d1df3cef | ||
![]() |
7ffb7db888 | ||
![]() |
74366379ba | ||
![]() |
6b89d05a3c | ||
![]() |
b0b9a4d33c | ||
![]() |
fc0fe0557b | ||
![]() |
116347ef66 | ||
![]() |
58516913b4 | ||
![]() |
fb836fcf6f | ||
![]() |
1ad44ace0a | ||
![]() |
32ddaed376 | ||
![]() |
4e388e5dc2 | ||
![]() |
44c467f472 | ||
![]() |
f90d0d2dae | ||
![]() |
b47920169f | ||
![]() |
81bf349871 | ||
![]() |
9d849a16ec | ||
![]() |
383749a9c0 | ||
![]() |
5e2feb5803 | ||
![]() |
93645681b0 | ||
![]() |
174e8d3c19 | ||
![]() |
511a5c0d1f | ||
![]() |
c92c840c41 |
503 changed files with 12074 additions and 55873 deletions
40
.forgejo/workflows/build-on-commit.yml
Normal file
40
.forgejo/workflows/build-on-commit.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
name: Build Docker Image on Commit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '!' # Exclude tags
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: docker-builder
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set REPO_VARS
|
||||
id: repo-url
|
||||
run: |
|
||||
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
|
||||
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to OCI registry
|
||||
run: |
|
||||
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Build Docker image with commit SHA
|
||||
docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
|
||||
docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
|
||||
|
||||
# Build Docker image with nightly tag
|
||||
docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
|
||||
docker push $REPO_HOST/$REPO_PATH:nightly
|
||||
|
||||
# Remove local images to save storage
|
||||
docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
|
||||
docker rmi $REPO_HOST/$REPO_PATH:nightly
|
37
.forgejo/workflows/build-on-tag.yml
Normal file
37
.forgejo/workflows/build-on-tag.yml
Normal file
|
@ -0,0 +1,37 @@
|
|||
name: Build and Publish Docker Image on Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: docker-builder
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set REPO_VARS
|
||||
id: repo-url
|
||||
run: |
|
||||
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
|
||||
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to OCI registry
|
||||
run: |
|
||||
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
|
||||
|
||||
- name: Build and push Docker image
|
||||
run: |
|
||||
TAG=${{ github.ref_name }} # Get the tag name from the context
|
||||
# Build and push multi-platform Docker images
|
||||
docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
|
||||
# Tag and push latest
|
||||
docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
|
||||
docker push $REPO_HOST/$REPO_PATH:latest
|
||||
|
||||
# Remove the local image to save storage
|
||||
docker rmi $REPO_HOST/$REPO_PATH:$TAG
|
||||
docker rmi $REPO_HOST/$REPO_PATH:latest
|
10
.github/FUNDING.yml
vendored
Normal file
10
.github/FUNDING.yml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
github: canewsin
|
||||
patreon: # Replace with a single Patreon username e.g., user1
|
||||
open_collective: # Replace with a single Open Collective username e.g., user1
|
||||
ko_fi: canewsin
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: canewsin
|
||||
issuehunt: # Replace with a single IssueHunt username e.g., user1
|
||||
otechie: # Replace with a single Otechie username e.g., user1
|
||||
custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']
|
72
.github/workflows/codeql-analysis.yml
vendored
Normal file
72
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ py3-latest ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ py3-latest ]
|
||||
schedule:
|
||||
- cron: '32 19 * * 2'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'javascript', 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
||||
# If the Autobuild fails above, remove it and uncomment the following three lines.
|
||||
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
|
||||
|
||||
# - run: |
|
||||
# echo "Run, Build Application using script"
|
||||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
51
.github/workflows/tests.yml
vendored
Normal file
51
.github/workflows/tests.yml
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
name: tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
max-parallel: 16
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9"]
|
||||
|
||||
steps:
|
||||
- name: Checkout ZeroNet
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: "true"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Prepare for installation
|
||||
run: |
|
||||
python3 -m pip install setuptools
|
||||
python3 -m pip install --upgrade pip wheel
|
||||
python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
python3 -m pip install --upgrade -r requirements.txt
|
||||
python3 -m pip list
|
||||
|
||||
- name: Prepare for tests
|
||||
run: |
|
||||
openssl version -a
|
||||
echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
|
||||
export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
|
||||
export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
|
||||
export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
|
||||
export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
|
||||
export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
|
||||
export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
|
||||
find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||
find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||
flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
|
8
.gitignore
vendored
8
.gitignore
vendored
|
@ -7,9 +7,14 @@ __pycache__/
|
|||
|
||||
# Hidden files
|
||||
.*
|
||||
!/.forgejo
|
||||
!/.github
|
||||
!/.gitignore
|
||||
!/.travis.yml
|
||||
!/.gitlab-ci.yml
|
||||
|
||||
# Temporary files
|
||||
*.bak
|
||||
|
||||
# Data dir
|
||||
data/*
|
||||
|
@ -26,3 +31,6 @@ tools/phantomjs
|
|||
|
||||
# ZeroNet config file
|
||||
zeronet.conf
|
||||
|
||||
# ZeroNet log files
|
||||
log/*
|
||||
|
|
48
.gitlab-ci.yml
Normal file
48
.gitlab-ci.yml
Normal file
|
@ -0,0 +1,48 @@
|
|||
stages:
|
||||
- test
|
||||
|
||||
.test_template: &test_template
|
||||
stage: test
|
||||
before_script:
|
||||
- pip install --upgrade pip wheel
|
||||
# Selenium and requests can't be installed without a requests hint on Python 3.4
|
||||
- pip install --upgrade requests>=2.22.0
|
||||
- pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
|
||||
- pip install --upgrade -r requirements.txt
|
||||
script:
|
||||
- pip list
|
||||
- openssl version -a
|
||||
- python -m pytest -x plugins/CryptMessage/Test --color=yes
|
||||
- python -m pytest -x plugins/Bigfile/Test --color=yes
|
||||
- python -m pytest -x plugins/AnnounceLocal/Test --color=yes
|
||||
- python -m pytest -x plugins/OptionalManager/Test --color=yes
|
||||
- python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini --color=yes
|
||||
- mv plugins/disabled-Multiuser plugins/Multiuser
|
||||
- python -m pytest -x plugins/Multiuser/Test --color=yes
|
||||
- mv plugins/disabled-Bootstrapper plugins/Bootstrapper
|
||||
- python -m pytest -x plugins/Bootstrapper/Test --color=yes
|
||||
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
|
||||
|
||||
test:py3.4:
|
||||
image: python:3.4.3
|
||||
<<: *test_template
|
||||
|
||||
test:py3.5:
|
||||
image: python:3.5.7
|
||||
<<: *test_template
|
||||
|
||||
test:py3.6:
|
||||
image: python:3.6.9
|
||||
<<: *test_template
|
||||
|
||||
test:py3.7-openssl1.1.0:
|
||||
image: python:3.7.0b5
|
||||
<<: *test_template
|
||||
|
||||
test:py3.7-openssl1.1.1:
|
||||
image: python:3.7.4
|
||||
<<: *test_template
|
||||
|
||||
test:py3.8:
|
||||
image: python:3.8.0b3
|
||||
<<: *test_template
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
[submodule "plugins"]
|
||||
path = plugins
|
||||
url = https://github.com/ZeroNetX/ZeroNet-Plugins.git
|
42
.travis.yml
42
.travis.yml
|
@ -1,11 +1,20 @@
|
|||
language: python
|
||||
python:
|
||||
- 2.7
|
||||
- 3.4
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
services:
|
||||
- docker
|
||||
cache: pip
|
||||
before_install:
|
||||
- pip install --upgrade pip wheel
|
||||
- pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
|
||||
# - docker build -t zeronet .
|
||||
# - docker run -d -v $PWD:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 zeronet
|
||||
install:
|
||||
- pip install -U pip wheel
|
||||
- pip install -r requirements.txt
|
||||
- pip install --upgrade -r requirements.txt
|
||||
- pip list
|
||||
before_script:
|
||||
- openssl version -a
|
||||
|
@ -15,23 +24,22 @@ before_script:
|
|||
sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6';
|
||||
fi
|
||||
script:
|
||||
- python -m pytest -x plugins/CryptMessage/Test
|
||||
- python -m pytest -x plugins/Bigfile/Test
|
||||
- python -m pytest -x plugins/AnnounceLocal/Test
|
||||
- python -m pytest -x plugins/OptionalManager/Test
|
||||
- python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
|
||||
before_install:
|
||||
- pip install -U pytest mock pytest-cov selenium
|
||||
- pip install codecov
|
||||
- pip install coveralls
|
||||
- docker build -t zeronet .
|
||||
- docker run -d -v $PWD:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 zeronet
|
||||
- catchsegv python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
|
||||
- export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python -m pytest -x plugins/CryptMessage/Test
|
||||
- export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python -m pytest -x plugins/Bigfile/Test
|
||||
- export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python -m pytest -x plugins/AnnounceLocal/Test
|
||||
- export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python -m pytest -x plugins/OptionalManager/Test
|
||||
- export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
|
||||
- export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
|
||||
- find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||
- find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
|
||||
after_failure:
|
||||
- zip -r log.zip log/
|
||||
- curl --upload-file ./log.zip https://transfer.sh/log.zip
|
||||
after_success:
|
||||
- codecov
|
||||
- coveralls --rcfile=src/Test/coverage.ini
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pip
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
|
|
|
@ -1,134 +0,0 @@
|
|||
## ZeroNet 0.5.1 (2016-11-18)
|
||||
### 新增
|
||||
- 多语言界面
|
||||
- 新插件:为站点 HTML 与 JS 文件提供的翻译助手
|
||||
- 每个站点独立的 favicon
|
||||
|
||||
### 修复
|
||||
- 并行可选文件下载
|
||||
|
||||
## ZeroNet 0.5.0 (2016-11-08)
|
||||
### 新增
|
||||
- 新插件:允许在 ZeroHello 列出/删除/固定/管理文件
|
||||
- 新的 API 命令来关注用户的可选文件,与可选文件的请求统计
|
||||
- 新的可选文件总大小限制
|
||||
- 新插件:保存节点到数据库并在重启时保持它们,使得更快的可选文件搜索以及在没有 Tracker 的情况下工作
|
||||
- 重写 UPnP 端口打开器 + 退出时关闭端口(感谢 sirMackk!)
|
||||
- 通过懒惰 PeerHashfield 创建来减少内存占用
|
||||
- 在 /Stats 页面加载 JSON 文件统计与数据库信息
|
||||
|
||||
### 更改
|
||||
- 独立的锁定文件来获得更好的 Windows 兼容性
|
||||
- 当执行 start.py 时,即使 ZeroNet 已经运行也打开浏览器
|
||||
- 在重载时保持插件顺序来允许插件扩展另一个插件
|
||||
- 只在完整加载 sites.json 时保存来避免数据丢失
|
||||
- 将更多的 Tracker 更改为更可靠的 Tracker
|
||||
- 更少的 findhashid CPU 使用率
|
||||
- 合并下载大量可选文件
|
||||
- 更多对于可选文件的其他优化
|
||||
- 如果一个站点有 1000 个节点,更积极地清理
|
||||
- 为验证错误使用警告而不是错误
|
||||
- 首先推送更新到更新的客户端
|
||||
- 损坏文件重置改进
|
||||
|
||||
### 修复
|
||||
- 修复启动时出现的站点删除错误
|
||||
- 延迟 WebSocket 消息直到连接上
|
||||
- 修复如果文件包含额外数据时的数据库导入
|
||||
- 修复大站点下载
|
||||
- 修复 diff 发送 bug (跟踪它好长时间了)
|
||||
- 修复当 JSON 文件包含 [] 字符时随机出现的发布错误
|
||||
- 修复 siteDelete 与 siteCreate bug
|
||||
- 修复文件写入确认对话框
|
||||
|
||||
|
||||
## ZeroNet 0.4.1 (2016-09-05)
|
||||
### 新增
|
||||
- 更快启动与更少内存使用的内核改变
|
||||
- 尝试连接丢失时重新连接 Tor
|
||||
- 侧边栏滑入
|
||||
- 尝试避免不完整的数据文件被覆盖
|
||||
- 更快地打开数据库
|
||||
- 在侧边栏显示用户文件大小
|
||||
- 依赖 --connection_limit 的并发 worker 数量
|
||||
|
||||
|
||||
### 更改
|
||||
- 在空闲 5 分钟后关闭数据库
|
||||
- 更好的站点大小计算
|
||||
- 允许在域名中使用“-”符号
|
||||
- 总是尝试为站点保持连接
|
||||
- 移除已合并站点的合并权限
|
||||
- 只扫描最后 3 天的新闻源来加快数据库请求
|
||||
- 更新 ZeroBundle-win 到 Python 2.7.12
|
||||
|
||||
|
||||
### 修复
|
||||
- 修复重要的安全问题:允许任意用户无需有效的来自 ID 提供者的证书发布新内容,感谢 Kaffie 指出
|
||||
- 修复在没有选择提供证书提供者时的侧边栏错误
|
||||
- 在数据库重建时跳过无效文件
|
||||
- 修复随机弹出的 WebSocket 连接错误
|
||||
- 修复新的 siteCreate 命令
|
||||
- 修复站点大小计算
|
||||
- 修复计算机唤醒后的端口打开检查
|
||||
- 修复 --size_limit 的命令行解析
|
||||
|
||||
|
||||
## ZeroNet 0.4.0 (2016-08-11)
|
||||
### 新增
|
||||
- 合并站点插件
|
||||
- Live source code reloading: Faster core development by allowing me to make changes in ZeroNet source code without restarting it.
|
||||
- 为合并站点设计的新 JSON 表
|
||||
- 从侧边栏重建数据库
|
||||
- 允许直接在 JSON 表中存储自定义数据:更简单与快速的 SQL 查询
|
||||
- 用户文件存档:允许站点拥有者存档不活跃的用户内容到单个文件(减少初始同步的时间/CPU/内存使用率)
|
||||
- 在文件删除时同时触发数据库 onUpdated/update
|
||||
- 从 ZeroFrame API 请求权限
|
||||
- 允许使用 fileWrite API 命令在 content.json 存储额外数据
|
||||
- 更快的可选文件下载
|
||||
- 使用替代源 (Gogs, Gitlab) 来下载更新
|
||||
- Track provided sites/connection and prefer to keep the ones with more sites to reduce connection number
|
||||
|
||||
### 更改
|
||||
- 保持每个站点至少 5 个连接
|
||||
- 将目标站点连接从 10 更改到 15
|
||||
- ZeroHello 搜索功能稳定性/速度改进
|
||||
- 提升机械硬盘下的客户端性能
|
||||
|
||||
### 修复
|
||||
- 修复 IE11 wrapper nonce 错误
|
||||
- 修复在移动设备上的侧边栏
|
||||
- 修复站点大小计算
|
||||
- 修复 IE10 兼容性
|
||||
- Windows XP ZeroBundle 兼容性(感谢中国人民)
|
||||
|
||||
|
||||
## ZeroNet 0.3.7 (2016-05-27)
|
||||
### 更改
|
||||
- 通过只传输补丁来减少带宽使用
|
||||
- 其他 CPU /内存优化
|
||||
|
||||
|
||||
## ZeroNet 0.3.6 (2016-05-27)
|
||||
### 新增
|
||||
- 新的 ZeroHello
|
||||
- Newsfeed 函数
|
||||
|
||||
### 修复
|
||||
- 安全性修复
|
||||
|
||||
|
||||
## ZeroNet 0.3.5 (2016-02-02)
|
||||
### 新增
|
||||
- 带有 .onion 隐藏服务的完整 Tor 支持
|
||||
- 使用 ZeroNet 协议的 Bootstrap
|
||||
|
||||
### 修复
|
||||
- 修复 Gevent 1.0.2 兼容性
|
||||
|
||||
|
||||
## ZeroNet 0.3.4 (2015-12-28)
|
||||
### 新增
|
||||
- AES, ECIES API 函数支持
|
||||
- PushState 与 ReplaceState URL 通过 API 的操作支持
|
||||
- 多用户 localstorage
|
198
CHANGELOG.md
198
CHANGELOG.md
|
@ -1,3 +1,201 @@
|
|||
### ZeroNet 0.9.0 (2023-07-12) Rev4630
|
||||
- Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
|
||||
- Add trackers to Config.py for failsafety incase missing trackers.txt
|
||||
- Added Proxy links
|
||||
- Fix pysha3 dep installation issue
|
||||
- FileRequest -> Remove Unnecessary check, Fix error wording
|
||||
- Fix Response when site is missing for `actionAs`
|
||||
|
||||
|
||||
### ZeroNet 0.8.5 (2023-02-12) Rev4625
|
||||
- Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
|
||||
- default theme-class for missing value in `users.json`.
|
||||
- Fetch Stats Plugin changes.
|
||||
|
||||
### ZeroNet 0.8.4 (2022-12-12) Rev4620
|
||||
- Increase Minimum Site size to 25MB.
|
||||
|
||||
### ZeroNet 0.8.3 (2022-12-11) Rev4611
|
||||
- main.py -> Fix accessing unassigned varible
|
||||
- ContentManager -> Support for multiSig
|
||||
- SiteStrorage.py -> Fix accessing unassigned varible
|
||||
- ContentManager.py Improve Logging of Valid Signers
|
||||
|
||||
### ZeroNet 0.8.2 (2022-11-01) Rev4610
|
||||
- Fix Startup Error when plugins dir missing
|
||||
- Move trackers to seperate file & Add more trackers
|
||||
- Config:: Skip loading missing tracker files
|
||||
- Added documentation for getRandomPort fn
|
||||
|
||||
### ZeroNet 0.8.1 (2022-10-01) Rev4600
|
||||
- fix readdress loop (cherry-pick previously added commit from conservancy)
|
||||
- Remove Patreon badge
|
||||
- Update README-ru.md (#177)
|
||||
- Include inner_path of failed request for signing in error msg and response
|
||||
- Don't Fail Silently When Cert is Not Selected
|
||||
- Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
|
||||
- Update FUNDING.yml
|
||||
|
||||
### ZeroNet 0.8.0 (2022-05-27) Rev4591
|
||||
- Revert File Open to catch File Access Errors.
|
||||
|
||||
### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
|
||||
- Use xescape(s) from zeronet-conservancy
|
||||
- actionUpdate response Optimisation
|
||||
- Fetch Plugins Repo Updates
|
||||
- Fix Unhandled File Access Errors
|
||||
- Create codeql-analysis.yml
|
||||
|
||||
### ZeroNet 0.7.9 (2022-05-26) Rev4585
|
||||
- Rust Version Compatibility for update Protocol msg
|
||||
- Removed Non Working Trakers.
|
||||
- Dynamically Load Trackers from Dashboard Site.
|
||||
- Tracker Supply Improvements.
|
||||
- Fix Repo Url for Bug Report
|
||||
- First Party Tracker Update Service using Dashboard Site.
|
||||
- remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
|
||||
|
||||
### ZeroNet 0.7.8 (2022-03-02) Rev4580
|
||||
- Update Plugins with some bug fixes and Improvements
|
||||
|
||||
### ZeroNet 0.7.6 (2022-01-12) Rev4565
|
||||
- Sync Plugin Updates
|
||||
- Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
|
||||
- Add More Default Plugins to Repo
|
||||
- Doubled Site Publish Limits
|
||||
- Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
|
||||
- UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
|
||||
- Moved Plugins to Seperate Repo
|
||||
- Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
|
||||
- Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
|
||||
- Added current version: connections to /Stats page. see the previous point.
|
||||
|
||||
### ZeroNet 0.7.5 (2021-11-28) Rev4560
|
||||
- Add more default trackers
|
||||
- Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
|
||||
- Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
|
||||
|
||||
### ZeroNet 0.7.3 (2021-11-28) Rev4555
|
||||
- Fix xrange is undefined error
|
||||
- Fix Incorrect viewport on mobile while loading
|
||||
- Tor-V3 Patch by anonymoose
|
||||
|
||||
|
||||
### ZeroNet 0.7.1 (2019-07-01) Rev4206
|
||||
### Added
|
||||
- Built-in logging console in the web UI to see what's happening in the background. (pull down top-right 0 button to see it)
|
||||
- Display database rebuild errors [Thanks to Lola]
|
||||
- New plugin system that allows to install and manage builtin/third party extensions to the ZeroNet client using the web interface.
|
||||
- Support multiple trackers_file
|
||||
- Add OpenSSL 1.1 support to CryptMessage plugin based on Bitmessage modifications [Thanks to radfish]
|
||||
- Display visual error message on startup errors
|
||||
- Fix max opened files changing on Windows platform
|
||||
- Display TLS1.3 compatibility on /Stats page
|
||||
- Add fake SNI and ALPN to peer connections to make it more like standard https connections
|
||||
- Hide and ignore tracker_proxy setting in Tor: Always mode as it's going to use Tor anyway.
|
||||
- Deny websocket connections from unknown origins
|
||||
- Restrict open_browser values to avoid RCE on sandbox escape
|
||||
- Offer access web interface by IP address in case of unknown host
|
||||
- Link to site's sidebar with "#ZeroNet:OpenSidebar" hash
|
||||
|
||||
### Changed
|
||||
- Allow .. in file names [Thanks to imachug]
|
||||
- Change unstable trackers
|
||||
- More clean errors on sites.json/users.json load error
|
||||
- Various tweaks for tracker rating on unstable connections
|
||||
- Use OpenSSL 1.1 dlls from default Python Windows distribution if possible
|
||||
- Re-factor domain resolving for easier domain plugins
|
||||
- Disable UDP connections if --proxy is used
|
||||
- New, decorator-based Websocket API permission system to avoid future typo mistakes
|
||||
|
||||
### Fixed
|
||||
- Fix parsing config lines that have no value
|
||||
- Fix start.py [Thanks to imachug]
|
||||
- Allow multiple values of the same key in the config file [Thanks ssdifnskdjfnsdjk for reporting]
|
||||
- Fix parsing config file lines that has % in the value [Thanks slrslr for reporting]
|
||||
- Fix bootstrapper plugin hash reloads [Thanks geekless for reporting]
|
||||
- Fix CryptMessage plugin OpenSSL dll loading on Windows (ZeroMail errors) [Thanks cxgreat2014 for reporting]
|
||||
- Fix startup error when using OpenSSL 1.1 [Thanks to imachug]
|
||||
- Fix a bug that did not loaded merged site data for 5 sec after the merged site got added
|
||||
- Fix typo that allowed to add new plugins in public proxy mode. [Thanks styromaniac for reporting]
|
||||
- Fix loading non-big files with "|all" postfix [Thanks to krzotr]
|
||||
- Fix OpenSSL cert generation error crash by change Windows console encoding to utf8
|
||||
|
||||
#### Wrapper html injection vulnerability [Reported by ivanq]
|
||||
|
||||
In ZeroNet before rev4188 the wrapper template variables was rendered incorrectly.
|
||||
|
||||
Result: The opened site was able to gain WebSocket connection with unrestricted ADMIN/NOSANDBOX access, change configuration values and possible RCE on client's machine.
|
||||
|
||||
Fix: Fixed the template rendering code, disallowed WebSocket connections from unknown locations, restricted open_browser configuration values to avoid possible RCE in case of sandbox escape.
|
||||
|
||||
Note: The fix is also back ported to ZeroNet Py 2.x version (Rev3870)
|
||||
|
||||
|
||||
### ZeroNet 0.7.0 (2019-06-12) Rev4106 (First release targeting Python 3.4+)
|
||||
### Added
|
||||
- 5-10x faster signature verification by using libsecp256k1 (Thanks to ZeroMux)
|
||||
- Generated SSL certificate randomization to avoid protocol filters (Thanks to ValdikSS)
|
||||
- Offline mode
|
||||
- P2P source code update using ZeroNet protocol
|
||||
- ecdsaSign/Verify commands to CryptMessage plugin (Thanks to imachug)
|
||||
- Efficient file rename: change file names instead of re-downloading the file.
|
||||
- Make redirect optional on site cloning (Thanks to Lola)
|
||||
- EccPrivToPub / EccPubToPriv functions (Thanks to imachug)
|
||||
- Detect and change dark/light theme based on OS setting (Thanks to filips123)
|
||||
|
||||
### Changed
|
||||
- Re-factored code to Python3 runtime (compatible with Python 3.4-3.8)
|
||||
- More safe database sync mode
|
||||
- Removed bundled third-party libraries where it's possible
|
||||
- Use lang=en instead of lang={lang} in urls to avoid url encode problems
|
||||
- Remove environment details from error page
|
||||
- Don't push content.json updates larger than 10kb to significantly reduce bw usage for site with many files
|
||||
|
||||
### Fixed
|
||||
- Fix sending files with \0 characters
|
||||
- Security fix: Escape error detail to avoid XSS (reported by krzotr)
|
||||
- Fix signature verification using libsecp256k1 for compressed addresses (mostly certificates generated in the browser)
|
||||
- Fix newsfeed if you have more than 1000 followed topic/post on one site.
|
||||
- Fix site download as zip file
|
||||
- Fix displaying sites with utf8 title
|
||||
- Error message if dbRebuild fails (Thanks to Lola)
|
||||
- Fix browser reopen if executing start.py again. (Thanks to imachug)
|
||||
|
||||
|
||||
### ZeroNet 0.6.5 (2019-02-16) Rev3851 (Last release targeting Python 2.7.x)
|
||||
### Added
|
||||
- IPv6 support in peer exchange, bigfiles, optional file finding, tracker sharing, socket listening and connecting (based on tangdou1 modifications)
|
||||
- New tracker database format with IPv6 support
|
||||
- Display notification if there is an unpublished modification for your site
|
||||
- Listen and shut down normally for SIGTERM (Thanks to blurHY)
|
||||
- Support tilde `~` in filenames (by d14na)
|
||||
- Support map for Namecoin subdomain names (Thanks to lola)
|
||||
- Add log level to config page
|
||||
- Support `{data}` for data dir variable in trackers_file value
|
||||
- Quick check content.db on startup and rebuild if necessary
|
||||
- Don't show meek proxy option if the tor client does not supports it
|
||||
|
||||
### Changed
|
||||
- Refactored port open checking with IPv6 support
|
||||
- Consider non-local IPs as external even is the open port check fails (for CJDNS and Yggdrasil support)
|
||||
- Add IPv6 tracker and change unstable tracker
|
||||
- Don't correct sent local time with the calculated time correction
|
||||
- Disable CSP for Edge
|
||||
- Only support CREATE commands in dbschema indexes node and SELECT from storage.query
|
||||
|
||||
### Fixed
|
||||
- Check the length of master seed when executing cryptGetPrivatekey CLI command
|
||||
- Only reload source code on file modification / creation
|
||||
- Detection and issue warning for latest no-script plugin
|
||||
- Fix atomic write of a non-existent file
|
||||
- Fix sql queries with lots of variables and sites with lots of content.json
|
||||
- Fix multi-line parsing of zeronet.conf
|
||||
- Fix site deletion from users.json
|
||||
- Fix site cloning before site downloaded (Reported by unsystemizer)
|
||||
- Fix queryJson for non-list nodes (Reported by MingchenZhang)
|
||||
|
||||
|
||||
## ZeroNet 0.6.4 (2018-10-20) Rev3660
|
||||
### Added
|
||||
- New plugin: UiConfig. A web interface that allows changing ZeroNet settings.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
@ -645,7 +645,7 @@ the "copyright" line and a pointer to where the full notice is found.
|
|||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
|
@ -664,11 +664,11 @@ might be different; for a GUI interface, you would use an "about box".
|
|||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
21
Dockerfile
21
Dockerfile
|
@ -1,26 +1,33 @@
|
|||
FROM alpine:3.8
|
||||
FROM alpine:3.15
|
||||
|
||||
#Base settings
|
||||
ENV HOME /root
|
||||
|
||||
COPY requirements.txt /root/requirements.txt
|
||||
|
||||
#Install ZeroNet
|
||||
RUN apk --no-cache --no-progress add musl-dev gcc python python-dev py2-pip tor \
|
||||
&& pip install --no-cache-dir gevent msgpack \
|
||||
&& apk del musl-dev gcc python-dev py2-pip \
|
||||
RUN apk --update --no-cache --no-progress add python3 python3-dev py3-pip gcc g++ autoconf automake libtool libffi-dev musl-dev make tor openssl \
|
||||
&& pip3 install -r /root/requirements.txt \
|
||||
&& apk del python3-dev gcc g++ autoconf automake libtool libffi-dev musl-dev make \
|
||||
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
||||
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
||||
|
||||
RUN python3 -V \
|
||||
&& python3 -m pip list \
|
||||
&& tor --version \
|
||||
&& openssl version
|
||||
|
||||
#Add Zeronet source
|
||||
COPY . /root
|
||||
VOLUME /root/data
|
||||
|
||||
#Control if Tor proxy is started
|
||||
ENV ENABLE_TOR false
|
||||
ENV ENABLE_TOR true
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
#Set upstart command
|
||||
CMD (! ${ENABLE_TOR} || tor&) && python zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
||||
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26117
|
||||
|
||||
#Expose ports
|
||||
EXPOSE 43110 26552
|
||||
EXPOSE 43110 26117
|
||||
|
|
34
Dockerfile.arm64v8
Normal file
34
Dockerfile.arm64v8
Normal file
|
@ -0,0 +1,34 @@
|
|||
FROM alpine:3.12
|
||||
|
||||
#Base settings
|
||||
ENV HOME /root
|
||||
|
||||
COPY requirements.txt /root/requirements.txt
|
||||
|
||||
#Install ZeroNet
|
||||
RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
|
||||
&& pip3 install -r /root/requirements.txt \
|
||||
&& apk del python3-dev gcc libffi-dev musl-dev make \
|
||||
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
||||
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
||||
|
||||
RUN python3 -V \
|
||||
&& python3 -m pip list \
|
||||
&& tor --version \
|
||||
&& openssl version
|
||||
|
||||
#Add Zeronet source
|
||||
COPY . /root
|
||||
VOLUME /root/data
|
||||
|
||||
#Control if Tor proxy is started
|
||||
ENV ENABLE_TOR false
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
#Set upstart command
|
||||
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
||||
|
||||
#Expose ports
|
||||
EXPOSE 43110 26552
|
||||
|
367
LICENSE
367
LICENSE
|
@ -1,340 +1,27 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
{description}
|
||||
Copyright (C) {year} {fullname}
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
{signature of Ty Coon}, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
Additional Conditions :
|
||||
|
||||
Contributing to this repo
|
||||
This repo is governed by GPLv3, same is located at the root of the ZeroNet git repo,
|
||||
unless specified separately all code is governed by that license, contributions to this repo
|
||||
are divided into two key types, key contributions and non-key contributions, key contributions
|
||||
are which, directly affects the code performance, quality and features of software,
|
||||
non key contributions include things like translation datasets, image, graphic or video
|
||||
contributions that does not affect the main usability of software but improves the existing
|
||||
usability of certain thing or feature, these also include tests written with code, since their
|
||||
purpose is to check, whether something is working or not as intended. All the non-key contributions
|
||||
are governed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/), unless specified
|
||||
above, a contribution is ruled by the type of contribution if there is a conflict between two
|
||||
contributing parties of repo in any case.
|
||||
|
|
250
README-ru.md
250
README-ru.md
|
@ -1,211 +1,133 @@
|
|||
# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
|
||||
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
|
||||
|
||||
[简体中文](./README-zh-cn.md)
|
||||
[English](./README.md)
|
||||
|
||||
Децентрализованные вебсайты использующие Bitcoin криптографию и BitTorrent сеть - https://zeronet.io
|
||||
|
||||
Децентрализованные вебсайты, использующие криптографию Bitcoin и протокол BitTorrent — https://zeronet.dev ([Зеркало в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). В отличии от Bitcoin, ZeroNet'у не требуется блокчейн для работы, однако он использует ту же криптографию, чтобы обеспечить сохранность и проверку данных.
|
||||
|
||||
## Зачем?
|
||||
|
||||
* Мы верим в открытую, свободную, и не отцензуренную сеть и коммуникацию.
|
||||
* Нет единой точки отказа: Сайт онлайн пока по крайней мере 1 пир обслуживает его.
|
||||
* Никаких затрат на хостинг: Сайты обслуживаются посетителями.
|
||||
* Невозможно отключить: Он нигде, потому что он везде.
|
||||
* Быстр и работает оффлайн: Вы можете получить доступ к сайту, даже если Интернет недоступен.
|
||||
|
||||
- Мы верим в открытую, свободную, и неподдающуюся цензуре сеть и связь.
|
||||
- Нет единой точки отказа: Сайт остаётся онлайн, пока его обслуживает хотя бы 1 пир.
|
||||
- Нет затрат на хостинг: Сайты обслуживаются посетителями.
|
||||
- Невозможно отключить: Он нигде, потому что он везде.
|
||||
- Скорость и возможность работать без Интернета: Вы сможете получить доступ к сайту, потому что его копия хранится на вашем компьютере и у ваших пиров.
|
||||
|
||||
## Особенности
|
||||
* Обновляемые в реальном времени сайты
|
||||
* Поддержка Namecoin .bit доменов
|
||||
* Лёгок в установке: распаковал & запустил
|
||||
* Клонирование вебсайтов в один клик
|
||||
* Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
based authorization: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
|
||||
* Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
|
||||
* Анонимность: Полная поддержка сети Tor с помощью скрытых служб .onion вместо адресов IPv4
|
||||
* TLS зашифрованные связи
|
||||
* Автоматическое открытие uPnP порта
|
||||
* Плагин для поддержки многопользовательской (openproxy)
|
||||
* Работает с любыми браузерами и операционными системами
|
||||
|
||||
- Обновление сайтов в реальном времени
|
||||
- Поддержка доменов `.bit` ([Namecoin](https://www.namecoin.org))
|
||||
- Легкая установка: просто распакуйте и запустите
|
||||
- Клонирование сайтов "в один клик"
|
||||
- Беспарольная [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
авторизация: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
|
||||
- Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
|
||||
- Анонимность: Полная поддержка сети Tor, используя скрытые службы `.onion` вместо адресов IPv4
|
||||
- Зашифрованное TLS подключение
|
||||
- Автоматическое открытие UPnP–порта
|
||||
- Плагин для поддержки нескольких пользователей (openproxy)
|
||||
- Работа с любыми браузерами и операционными системами
|
||||
|
||||
## Текущие ограничения
|
||||
|
||||
- Файловые транзакции не сжаты
|
||||
- Нет приватных сайтов
|
||||
|
||||
## Как это работает?
|
||||
|
||||
* После запуска `zeronet.py` вы сможете посетить зайты (zeronet сайты) используя адрес
|
||||
`http://127.0.0.1:43110/{zeronet_address}`
|
||||
(например. `http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`).
|
||||
* Когда вы посещаете новый сайт zeronet, он пытается найти пиров с помощью BitTorrent
|
||||
чтобы загрузить файлы сайтов (html, css, js ...) из них.
|
||||
* Каждый посещенный зайт также обслуживается вами. (Т.е хранится у вас на компьютере)
|
||||
* Каждый сайт содержит файл `content.json`, который содержит все остальные файлы в хэше sha512
|
||||
и подпись, созданную с использованием частного ключа сайта.
|
||||
* Если владелец сайта (у которого есть закрытый ключ для адреса сайта) изменяет сайт, то он/она
|
||||
- После запуска `zeronet.py` вы сможете посещать сайты в ZeroNet, используя адрес
|
||||
`http://127.0.0.1:43110/{zeronet_адрес}`
|
||||
(Например: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
|
||||
- Когда вы посещаете новый сайт в ZeroNet, он пытается найти пиров с помощью протокола BitTorrent,
|
||||
чтобы скачать у них файлы сайта (HTML, CSS, JS и т.д.).
|
||||
- После посещения сайта вы тоже становитесь его пиром.
|
||||
- Каждый сайт содержит файл `content.json`, который содержит SHA512 хеши всех остальные файлы
|
||||
и подпись, созданную с помощью закрытого ключа сайта.
|
||||
- Если владелец сайта (тот, кто владеет закрытым ключом для адреса сайта) изменяет сайт, он
|
||||
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
|
||||
(используя подпись), они загружают измененные файлы и публикуют новый контент для других пиров.
|
||||
|
||||
#### [Слайд-шоу о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
||||
#### [Часто задаваемые вопросы »](https://zeronet.io/docs/faq/)
|
||||
|
||||
#### [Документация разработчика ZeroNet »](https://zeronet.io/docs/site_development/getting_started/)
|
||||
(используя подпись), скачвают изменённые файлы и распространяют новый контент для других пиров.
|
||||
|
||||
[Презентация о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
||||
[Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
|
||||
[Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
## Скриншоты
|
||||
|
||||

|
||||

|
||||
[Больше скриншотов в документации ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
|
||||
|
||||
#### [Больше скриншотов в ZeroNet документации »](https://zeronet.io/docs/using_zeronet/sample_sites/)
|
||||
## Как присоединиться?
|
||||
|
||||
### Windows
|
||||
|
||||
## Как вступить
|
||||
- Скачайте и распакуйте архив [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26МБ)
|
||||
- Запустите `ZeroNet.exe`
|
||||
|
||||
* Скачайте ZeroBundle пакет:
|
||||
* [Microsoft Windows](https://github.com/HelloZeroNet/ZeroNet-win/archive/dist/ZeroNet-win.zip)
|
||||
* [Apple macOS](https://github.com/HelloZeroNet/ZeroNet-mac/archive/dist/ZeroNet-mac.zip)
|
||||
* [Linux 64-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz)
|
||||
* [Linux 32-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux32.tar.gz)
|
||||
* Распакуйте где угодно
|
||||
* Запустите `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
|
||||
### macOS
|
||||
|
||||
### Linux терминал
|
||||
- Скачайте и распакуйте архив [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14МБ)
|
||||
- Запустите `ZeroNet.app`
|
||||
|
||||
* `wget https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz`
|
||||
* `tar xvpfz ZeroBundle-linux64.tar.gz`
|
||||
* `cd ZeroBundle`
|
||||
* Запустите с помощью `./ZeroNet.sh`
|
||||
### Linux (64 бит)
|
||||
|
||||
Он загружает последнюю версию ZeroNet, затем запускает её автоматически.
|
||||
- Скачайте и распакуйте архив [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14МБ)
|
||||
- Запустите `./ZeroNet.sh`
|
||||
|
||||
#### Ручная установка для Debian Linux
|
||||
> **Note**
|
||||
> Запустите таким образом: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваш_ip_адрес`, чтобы разрешить удалённое подключение к веб–интерфейсу.
|
||||
|
||||
* `sudo apt-get update`
|
||||
* `sudo apt-get install msgpack-python python-gevent`
|
||||
* `wget https://github.com/HelloZeroNet/ZeroNet/archive/master.tar.gz`
|
||||
* `tar xvpfz master.tar.gz`
|
||||
* `cd ZeroNet-master`
|
||||
* Запустите с помощью `python2 zeronet.py`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
### Docker
|
||||
|
||||
### [Arch Linux](https://www.archlinux.org)
|
||||
Официальный образ находится здесь: https://hub.docker.com/r/canewsin/zeronet/
|
||||
|
||||
* `git clone https://aur.archlinux.org/zeronet.git`
|
||||
* `cd zeronet`
|
||||
* `makepkg -srci`
|
||||
* `systemctl start zeronet`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
### Android (arm, arm64, x86)
|
||||
|
||||
Смотрите [ArchWiki](https://wiki.archlinux.org)'s [ZeroNet
|
||||
article](https://wiki.archlinux.org/index.php/ZeroNet) для дальнейшей помощи.
|
||||
- Для работы требуется Android как минимум версии 5.0 Lollipop
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
|
||||
- Скачать APK: https://github.com/canewsin/zeronet_mobile/releases
|
||||
|
||||
### [Gentoo Linux](https://www.gentoo.org)
|
||||
### Android (arm, arm64, x86) Облегчённый клиент только для просмотра (1МБ)
|
||||
|
||||
* [`layman -a raiagent`](https://github.com/leycec/raiagent)
|
||||
* `echo '>=net-vpn/zeronet-0.5.4' >> /etc/portage/package.accept_keywords`
|
||||
* *(Опционально)* Включить поддержку Tor: `echo 'net-vpn/zeronet tor' >>
|
||||
/etc/portage/package.use`
|
||||
* `emerge zeronet`
|
||||
* `rc-service zeronet start`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
- Для работы требуется Android как минимум версии 4.1 Jelly Bean
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
|
||||
|
||||
Смотрите `/usr/share/doc/zeronet-*/README.gentoo.bz2` для дальнейшей помощи.
|
||||
### Установка из исходного кода
|
||||
|
||||
### [FreeBSD](https://www.freebsd.org/)
|
||||
|
||||
* `pkg install zeronet` or `cd /usr/ports/security/zeronet/ && make install clean`
|
||||
* `sysrc zeronet_enable="YES"`
|
||||
* `service zeronet start`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
|
||||
### [Vagrant](https://www.vagrantup.com/)
|
||||
|
||||
* `vagrant up`
|
||||
* Подключитесь к VM с помощью `vagrant ssh`
|
||||
* `cd /vagrant`
|
||||
* Запустите `python2 zeronet.py --ui_ip 0.0.0.0`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
|
||||
### [Docker](https://www.docker.com/)
|
||||
* `docker run -d -v <local_data_folder>:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 nofish/zeronet`
|
||||
* Это изображение Docker включает в себя прокси-сервер Tor, который по умолчанию отключён.
|
||||
Остерегайтесь что некоторые хостинг-провайдеры могут не позволить вам запускать Tor на своих серверах.
|
||||
Если вы хотите включить его,установите переменную среды `ENABLE_TOR` в` true` (по умолчанию: `false`) Например:
|
||||
|
||||
`docker run -d -e "ENABLE_TOR=true" -v <local_data_folder>:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 nofish/zeronet`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
|
||||
### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
|
||||
|
||||
* `virtualenv env`
|
||||
* `source env/bin/activate`
|
||||
* `pip install msgpack gevent`
|
||||
* `python2 zeronet.py`
|
||||
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||
|
||||
## Текущие ограничения
|
||||
|
||||
* ~~Нет torrent-похожего файла разделения для поддержки больших файлов~~ (поддержка больших файлов добавлена)
|
||||
* ~~Не анонимнее чем Bittorrent~~ (добавлена встроенная поддержка Tor)
|
||||
* Файловые транзакции не сжаты ~~ или незашифрованы еще ~~ (добавлено шифрование TLS)
|
||||
* Нет приватных сайтов
|
||||
|
||||
|
||||
## Как я могу создать сайт в Zeronet?
|
||||
|
||||
Завершите работу zeronet, если он запущен
|
||||
|
||||
```bash
|
||||
$ zeronet.py siteCreate
|
||||
...
|
||||
- Site private key (Приватный ключ сайта): 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
|
||||
- Site address (Адрес сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
- Site created! (Сайт создан)
|
||||
$ zeronet.py
|
||||
...
|
||||
```sh
|
||||
wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
|
||||
unzip ZeroNet-src.zip
|
||||
cd ZeroNet
|
||||
sudo apt-get update
|
||||
sudo apt-get install python3-pip
|
||||
sudo python3 -m pip install -r requirements.txt
|
||||
```
|
||||
- Запустите `python3 zeronet.py`
|
||||
|
||||
Поздравляем, вы закончили! Теперь каждый может получить доступ к вашему зайту используя
|
||||
`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
|
||||
Откройте приветственную страницу ZeroHello в вашем браузере по ссылке http://127.0.0.1:43110/
|
||||
|
||||
Следующие шаги: [ZeroNet Developer Documentation](https://zeronet.io/docs/site_development/getting_started/)
|
||||
## Как мне создать сайт в ZeroNet?
|
||||
|
||||
- Кликните на **⋮** > **"Create new, empty site"** в меню на сайте [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
|
||||
- Вы будете **перенаправлены** на совершенно новый сайт, который может быть изменён только вами!
|
||||
- Вы можете найти и изменить контент вашего сайта в каталоге **data/[адрес_вашего_сайта]**
|
||||
- После изменений откройте ваш сайт, переключите влево кнопку "0" в правом верхнем углу, затем нажмите кнопки **sign** и **publish** внизу
|
||||
|
||||
## Как я могу модифицировать Zeronet сайт?
|
||||
|
||||
* Измените файлы расположенные в data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 директории.
|
||||
Когда закончите с изменением:
|
||||
|
||||
```bash
|
||||
$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
- Signing site (Подпись сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
|
||||
Private key (Приватный ключ) (input hidden):
|
||||
```
|
||||
|
||||
* Введите секретный ключ, который вы получили при создании сайта, потом:
|
||||
|
||||
```bash
|
||||
$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
Site:13DNDk..bhC2 Publishing to 3/10 peers...
|
||||
Site:13DNDk..bhC2 Successfuly published to 3 peers
|
||||
- Serving files....
|
||||
```
|
||||
|
||||
* Вот и всё! Вы успешно подписали и опубликовали свои изменения.
|
||||
|
||||
Следующие шаги: [Документация разработчика ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
## Поддержите проект
|
||||
|
||||
- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
|
||||
- Paypal: https://zeronet.io/docs/help_zeronet/donate/
|
||||
|
||||
### Спонсоры
|
||||
|
||||
* Улучшенная совместимость с MacOS / Safari стала возможной благодаря [BrowserStack.com](https://www.browserstack.com)
|
||||
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Рекомендуем)
|
||||
- LiberaPay: https://liberapay.com/PramUkesh
|
||||
- Paypal: https://paypal.me/PramUkesh
|
||||
- Другие способы: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
|
||||
|
||||
#### Спасибо!
|
||||
|
||||
* Больше информации, помощь, журнал изменений, zeronet сайты: https://www.reddit.com/r/zeronet/
|
||||
* Приходите, пообщайтесь с нами: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или на [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
|
||||
* Email: hello@zeronet.io (PGP: CB9613AE)
|
||||
- Здесь вы можете получить больше информации, помощь, прочитать список изменений и исследовать ZeroNet сайты: https://www.reddit.com/r/zeronetx/
|
||||
- Общение происходит на канале [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или в [Gitter](https://gitter.im/canewsin/ZeroNet)
|
||||
- Электронная почта: canews.in@gmail.com
|
||||
|
|
218
README-zh-cn.md
218
README-zh-cn.md
|
@ -1,51 +1,49 @@
|
|||
# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
|
||||
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
|
||||
|
||||
[English](./README.md)
|
||||
|
||||
使用 Bitcoin 加密和 BitTorrent 网络的去中心化网络 - https://zeronet.io
|
||||
使用 Bitcoin 加密和 BitTorrent 网络的去中心化网络 - https://zeronet.dev
|
||||
|
||||
|
||||
## 为什么?
|
||||
## 为什么?
|
||||
|
||||
* 我们相信开放,自由,无审查的网络
|
||||
* 我们相信开放,自由,无审查的网络和通讯
|
||||
* 不会受单点故障影响:只要有在线的节点,站点就会保持在线
|
||||
* 无托管费用: 站点由访问者托管
|
||||
* 无法关闭: 因为节点无处不在
|
||||
* 快速并可离线运行: 即使没有互联网连接也可以使用
|
||||
* 无托管费用:站点由访问者托管
|
||||
* 无法关闭:因为节点无处不在
|
||||
* 快速并可离线运行:即使没有互联网连接也可以使用
|
||||
|
||||
|
||||
## 功能
|
||||
* 实时站点更新
|
||||
* 支持 Namecoin 的 .bit 域名
|
||||
* 安装方便: 只需解压并运行
|
||||
* 安装方便:只需解压并运行
|
||||
* 一键克隆存在的站点
|
||||
* 无需密码、基于 [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki) 的认证:用与比特币钱包相同的加密方法用来保护你的账户
|
||||
你的账户被使用和比特币钱包相同的加密方法
|
||||
* 内建 SQL 服务器和 P2P 数据同步: 让开发更简单并提升加载速度
|
||||
* 匿名性: 完整的 Tor 网络支持,支持通过 .onion 隐藏服务相互连接而不是通过IPv4地址连接
|
||||
* 无需密码、基于 [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
的认证:您的账户被与比特币钱包相同的加密方法保护
|
||||
* 内建 SQL 服务器和 P2P 数据同步:让开发更简单并提升加载速度
|
||||
* 匿名性:完整的 Tor 网络支持,支持通过 .onion 隐藏服务相互连接而不是通过 IPv4 地址连接
|
||||
* TLS 加密连接
|
||||
* 自动打开 uPnP 端口
|
||||
* 插件和多用户 (开放式代理) 支持
|
||||
* 全平台兼容
|
||||
* 多用户(openproxy)支持的插件
|
||||
* 适用于任何浏览器 / 操作系统
|
||||
|
||||
|
||||
## 原理
|
||||
|
||||
* 在你运行`zeronet.py`后你将可以通过`http://127.0.0.1:43110/{zeronet_address}` (比如.
|
||||
`http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`)。访问 zeronet 中的站点。
|
||||
* 在运行 `zeronet.py` 后,您将可以通过
|
||||
`http://127.0.0.1:43110/{zeronet_address}`(例如:
|
||||
`http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`)访问 zeronet 中的站点
|
||||
* 在您浏览 zeronet 站点时,客户端会尝试通过 BitTorrent 网络来寻找可用的节点,从而下载需要的文件(html,css,js...)
|
||||
* 您将会储存每一个浏览过的站点
|
||||
* 每个站点都包含一个名为 `content.json` 的文件,它储存了其他所有文件的 sha512 散列值以及一个通过站点私钥生成的签名
|
||||
* 如果站点的所有者(拥有站点地址的私钥)修改了站点,并且他 / 她签名了新的 `content.json` 然后推送至其他节点,
|
||||
那么这些节点将会在使用签名验证 `content.json` 的真实性后,下载修改后的文件并将新内容推送至另外的节点
|
||||
|
||||
* 在你浏览 zeronet 站点时,客户端会尝试通过 BitTorrent 网络来寻找可用的节点,从而下载需要的文件 (html, css, js...)
|
||||
#### [关于 ZeroNet 加密,站点更新,多用户站点的幻灯片 »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
||||
#### [常见问题 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
|
||||
|
||||
* 你将会储存每一个浏览过的站点
|
||||
* 每个站点都包含一个名为 `content.json` ,它储存了其他所有文件的 sha512 hash 值
|
||||
和一个通过站点私钥建立的签名
|
||||
* 如果站点的所有者 (拥有私钥的那个人) 修改了站点, 并且他/她签名了新的 `content.json` 然后推送至其他节点,
|
||||
那么所有节点将会在验证 `content.json` 的真实性 (使用签名)后, 下载修改后的文件并推送至其他节点。
|
||||
|
||||
#### [有关于 ZeroNet 加密, 站点更新, 多用户站点的幻灯片 »](https://docs.google.com/presentation/d/1qBxkroB_iiX2zHEn0dt-N-qRZgyEzui46XS2hEa3AA4/pub?start=false&loop=false&delayms=3000)
|
||||
#### [常见问题 »](https://zeronet.io/docs/faq/)
|
||||
|
||||
#### [ZeroNet开发者文档 »](https://zeronet.io/docs/site_development/getting_started/)
|
||||
#### [ZeroNet 开发者文档 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
|
||||
## 屏幕截图
|
||||
|
@ -53,136 +51,82 @@
|
|||

|
||||

|
||||
|
||||
#### [在 ZeroNet 文档里查看更多的屏幕截图 »](https://zeronet.io/docs/using_zeronet/sample_sites/)
|
||||
#### [ZeroNet 文档中的更多屏幕截图 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
|
||||
|
||||
|
||||
## 如何加入 ?
|
||||
## 如何加入
|
||||
|
||||
* 下载 ZeroBundle 文件包:
|
||||
* [Microsoft Windows](https://github.com/HelloZeroNet/ZeroNet-win/archive/dist/ZeroNet-win.zip)
|
||||
* [Apple macOS](https://github.com/HelloZeroNet/ZeroNet-mac/archive/dist/ZeroNet-mac.zip)
|
||||
* [Linux 64bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz)
|
||||
* [Linux 32bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux32.tar.gz)
|
||||
* 解压缩
|
||||
* 运行 `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
|
||||
### Windows
|
||||
|
||||
### Linux 命令行
|
||||
- 下载 [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
|
||||
- 在任意位置解压缩
|
||||
- 运行 `ZeroNet.exe`
|
||||
|
||||
### macOS
|
||||
|
||||
* `wget https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz`
|
||||
* `tar xvpfz ZeroBundle-linux64.tar.gz`
|
||||
* `cd ZeroBundle`
|
||||
* 执行 `./ZeroNet.sh` 来启动
|
||||
- 下载 [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
|
||||
- 在任意位置解压缩
|
||||
- 运行 `ZeroNet.app`
|
||||
|
||||
### Linux (x86-64bit)
|
||||
|
||||
在你打开时他将会自动下载最新版本的 ZeroNet 。
|
||||
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
|
||||
- `unzip ZeroNet-linux.zip`
|
||||
- `cd ZeroNet-linux`
|
||||
- 使用以下命令启动 `./ZeroNet.sh`
|
||||
- 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
|
||||
|
||||
__提示:__ 若要允许在 Web 界面上的远程连接,使用以下命令启动 `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address`
|
||||
|
||||
#### 在 Debian Linux 中手动安装
|
||||
### 从源代码安装
|
||||
|
||||
* `sudo apt-get update`
|
||||
* `sudo apt-get install msgpack-python python-gevent`
|
||||
* `wget https://github.com/HelloZeroNet/ZeroNet/archive/master.tar.gz`
|
||||
* `tar xvpfz master.tar.gz`
|
||||
* `cd ZeroNet-master`
|
||||
* 执行 `python2 zeronet.py` 来启动
|
||||
* 在你的浏览器中打开 http://127.0.0.1:43110/
|
||||
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
|
||||
- `unzip ZeroNet-src.zip`
|
||||
- `cd ZeroNet`
|
||||
- `sudo apt-get update`
|
||||
- `sudo apt-get install python3-pip`
|
||||
- `sudo python3 -m pip install -r requirements.txt`
|
||||
- 使用以下命令启动 `python3 zeronet.py`
|
||||
- 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
|
||||
|
||||
### [FreeBSD](https://www.freebsd.org/)
|
||||
### Android (arm, arm64, x86)
|
||||
- minimum Android version supported 21 (Android 5.0 Lollipop)
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
|
||||
- APK download: https://github.com/canewsin/zeronet_mobile/releases
|
||||
|
||||
* `pkg install zeronet` 或者 `cd /usr/ports/security/zeronet/ && make install clean`
|
||||
* `sysrc zeronet_enable="YES"`
|
||||
* `service zeronet start`
|
||||
* 在你的浏览器中打开 http://127.0.0.1:43110/
|
||||
|
||||
### [Vagrant](https://www.vagrantup.com/)
|
||||
|
||||
* `vagrant up`
|
||||
* 通过 `vagrant ssh` 连接到 VM
|
||||
* `cd /vagrant`
|
||||
* 运行 `python2 zeronet.py --ui_ip 0.0.0.0`
|
||||
* 在你的浏览器中打开 http://127.0.0.1:43110/
|
||||
|
||||
### [Docker](https://www.docker.com/)
|
||||
* `docker run -d -v <local_data_folder>:/root/data -p 26552:26552 -p 43110:43110 nofish/zeronet`
|
||||
* 这个 Docker 镜像包含了 Tor ,但默认是禁用的,因为一些托管商不允许你在他们的服务器上运行 Tor。如果你希望启用它,
|
||||
设置 `ENABLE_TOR` 环境变量为 `true` (默认: `false`). E.g.:
|
||||
|
||||
`docker run -d -e "ENABLE_TOR=true" -v <local_data_folder>:/root/data -p 26552:26552 -p 43110:43110 nofish/zeronet`
|
||||
* 在你的浏览器中打开 http://127.0.0.1:43110/
|
||||
|
||||
### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
|
||||
|
||||
* `virtualenv env`
|
||||
* `source env/bin/activate`
|
||||
* `pip install msgpack gevent`
|
||||
* `python2 zeronet.py`
|
||||
* 在你的浏览器中打开 http://127.0.0.1:43110/
|
||||
### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
|
||||
- minimum Android version supported 16 (JellyBean)
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
|
||||
|
||||
## 现有限制
|
||||
|
||||
* ~~没有类似于 BitTorrent 的文件拆分来支持大文件~~ (已添加大文件支持)
|
||||
* ~~没有比 BitTorrent 更好的匿名性~~ (已添加内置的完整 Tor 支持)
|
||||
* 传输文件时没有压缩~~和加密~~ (已添加 TLS 支持)
|
||||
* 传输文件时没有压缩
|
||||
* 不支持私有站点
|
||||
|
||||
|
||||
## 如何创建一个 ZeroNet 站点?
|
||||
## 如何创建一个 ZeroNet 站点?
|
||||
|
||||
* 点击 [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d) 站点的 **⋮** > **「新建空站点」** 菜单项
|
||||
* 您将被**重定向**到一个全新的站点,该站点只能由您修改
|
||||
* 您可以在 **data/[您的站点地址]** 目录中找到并修改网站的内容
|
||||
* 修改后打开您的网站,将右上角的「0」按钮拖到左侧,然后点击底部的**签名**并**发布**按钮
|
||||
|
||||
如果 zeronet 在运行,把它关掉
|
||||
执行:
|
||||
```bash
|
||||
$ zeronet.py siteCreate
|
||||
...
|
||||
- Site private key: 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
|
||||
- Site address: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
- Site created!
|
||||
$ zeronet.py
|
||||
...
|
||||
```
|
||||
|
||||
你已经完成了! 现在任何人都可以通过
|
||||
`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
|
||||
来访问你的站点
|
||||
|
||||
下一步: [ZeroNet 开发者文档](https://zeronet.io/docs/site_development/getting_started/)
|
||||
|
||||
|
||||
## 我要如何修改 ZeroNet 站点?
|
||||
|
||||
* 修改位于 data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 的目录.
|
||||
在你改好之后:
|
||||
|
||||
```bash
|
||||
$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
- Signing site: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
|
||||
Private key (input hidden):
|
||||
```
|
||||
|
||||
* 输入你在创建站点时获得的私钥
|
||||
|
||||
```bash
|
||||
$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
Site:13DNDk..bhC2 Publishing to 3/10 peers...
|
||||
Site:13DNDk..bhC2 Successfuly published to 3 peers
|
||||
- Serving files....
|
||||
```
|
||||
|
||||
* 就是这样! 你现在已经成功的签名并推送了你的更改。
|
||||
|
||||
接下来的步骤:[ZeroNet 开发者文档](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
## 帮助这个项目
|
||||
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
|
||||
- LiberaPay: https://liberapay.com/PramUkesh
|
||||
- Paypal: https://paypal.me/PramUkesh
|
||||
- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
|
||||
|
||||
- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
|
||||
- Paypal: https://zeronet.io/docs/help_zeronet/donate/
|
||||
|
||||
### 赞助商
|
||||
#### 感谢您!
|
||||
|
||||
* 在 OSX/Safari 下 [BrowserStack.com](https://www.browserstack.com) 带来更好的兼容性
|
||||
|
||||
#### 感谢!
|
||||
|
||||
* 更多信息, 帮助, 变更记录和 zeronet 站点: https://www.reddit.com/r/zeronet/
|
||||
* 在: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) 和我们聊天,或者使用 [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
|
||||
* [这里](https://gitter.im/ZeroNet-zh/Lobby)是一个 gitter 上的中文聊天室
|
||||
* Email: hello@noloop.me
|
||||
* 更多信息,帮助,变更记录和 zeronet 站点:https://www.reddit.com/r/zeronetx/
|
||||
* 前往 [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) 或 [gitter](https://gitter.im/canewsin/ZeroNet) 和我们聊天
|
||||
* [这里](https://gitter.im/canewsin/ZeroNet)是一个 gitter 上的中文聊天室
|
||||
* Email: canews.in@gmail.com
|
||||
|
|
207
README.md
207
README.md
|
@ -1,9 +1,6 @@
|
|||
# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
|
||||
|
||||
[简体中文](./README-zh-cn.md)
|
||||
[Русский](./README-ru.md)
|
||||
|
||||
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.io
|
||||
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
|
||||
<!--TODO: Update Onion Site -->
|
||||
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
|
||||
|
||||
|
||||
## Why?
|
||||
|
@ -36,22 +33,22 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
|
|||
|
||||
* After starting `zeronet.py` you will be able to visit zeronet sites using
|
||||
`http://127.0.0.1:43110/{zeronet_address}` (eg.
|
||||
`http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`).
|
||||
`http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
|
||||
* When you visit a new zeronet site, it tries to find peers using the BitTorrent
|
||||
network so it can download the site files (html, css, js...) from them.
|
||||
* Each visited site is also served by you.
|
||||
* Every site contains a `content.json` file which holds all other files in a sha512 hash
|
||||
and a signature generated using the site's private key.
|
||||
* If the site owner (who has the private key for the site address) modifies the
|
||||
site, then he/she signs the new `content.json` and publishes it to the peers.
|
||||
site and signs the new `content.json` and publishes it to the peers.
|
||||
Afterwards, the peers verify the `content.json` integrity (using the
|
||||
signature), they download the modified files and publish the new content to
|
||||
other peers.
|
||||
|
||||
#### [Slideshow about ZeroNet cryptography, site updates, multi-user sites »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
||||
#### [Frequently asked questions »](https://zeronet.io/docs/faq/)
|
||||
#### [Frequently asked questions »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
|
||||
|
||||
#### [ZeroNet Developer Documentation »](https://zeronet.io/docs/site_development/getting_started/)
|
||||
#### [ZeroNet Developer Documentation »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
|
||||
## Screenshots
|
||||
|
@ -59,163 +56,101 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
|
|||

|
||||

|
||||
|
||||
#### [More screenshots in ZeroNet docs »](https://zeronet.io/docs/using_zeronet/sample_sites/)
|
||||
#### [More screenshots in ZeroNet docs »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
|
||||
|
||||
|
||||
## How to join
|
||||
|
||||
* Download ZeroBundle package:
|
||||
* [Microsoft Windows](https://github.com/HelloZeroNet/ZeroNet-win/archive/dist/ZeroNet-win.zip)
|
||||
* [Apple macOS](https://github.com/HelloZeroNet/ZeroNet-mac/archive/dist/ZeroNet-mac.zip)
|
||||
* [Linux x86/64-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz)
|
||||
* [Linux x86/32-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux32.tar.gz)
|
||||
* Unpack anywhere
|
||||
* Run `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
|
||||
### Windows
|
||||
|
||||
### Linux terminal on x86-64
|
||||
- Download [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
|
||||
- Unpack anywhere
|
||||
- Run `ZeroNet.exe`
|
||||
|
||||
### macOS
|
||||
|
||||
* `wget https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz`
|
||||
* `tar xvpfz ZeroBundle-linux64.tar.gz`
|
||||
* `cd ZeroBundle`
|
||||
* Start with `./ZeroNet.sh`
|
||||
- Download [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
|
||||
- Unpack anywhere
|
||||
- Run `ZeroNet.app`
|
||||
|
||||
### Linux (x86-64bit)
|
||||
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
|
||||
- `unzip ZeroNet-linux.zip`
|
||||
- `cd ZeroNet-linux`
|
||||
- Start with: `./ZeroNet.sh`
|
||||
- Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
|
||||
|
||||
__Tip:__ Start with `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address` to allow remote connections on the web interface.
|
||||
|
||||
### Android (arm, arm64, x86)
|
||||
- minimum Android version supported 21 (Android 5.0 Lollipop)
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
|
||||
- APK download: https://github.com/canewsin/zeronet_mobile/releases
|
||||
|
||||
It downloads the latest version of ZeroNet then starts it automatically.
|
||||
### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
|
||||
- minimum Android version supported 16 (JellyBean)
|
||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
||||
alt="Download from Google Play"
|
||||
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
|
||||
|
||||
#### Manual install for Debian Linux
|
||||
|
||||
* `sudo apt-get update`
|
||||
* `sudo apt-get install msgpack-python python-gevent`
|
||||
* `wget https://github.com/HelloZeroNet/ZeroNet/archive/master.tar.gz`
|
||||
* `tar xvpfz master.tar.gz`
|
||||
* `cd ZeroNet-master`
|
||||
* Start with `python2 zeronet.py`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
#### Docker
|
||||
There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
|
||||
|
||||
### [Whonix](https://www.whonix.org)
|
||||
### Online Proxies
|
||||
Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
|
||||
|
||||
* [Instructions](https://www.whonix.org/wiki/ZeroNet)
|
||||
#### Official ZNX Proxy :
|
||||
|
||||
### [Arch Linux](https://www.archlinux.org)
|
||||
https://proxy.zeronet.dev/
|
||||
|
||||
* `git clone https://aur.archlinux.org/zeronet.git`
|
||||
* `cd zeronet`
|
||||
* `makepkg -srci`
|
||||
* `systemctl start zeronet`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
https://zeronet.dev/
|
||||
|
||||
See [ArchWiki](https://wiki.archlinux.org)'s [ZeroNet
|
||||
article](https://wiki.archlinux.org/index.php/ZeroNet) for further assistance.
|
||||
#### From Community
|
||||
|
||||
### [Gentoo Linux](https://www.gentoo.org)
|
||||
https://0net-preview.com/
|
||||
|
||||
* [`eselect repository enable raiagent`](https://github.com/leycec/raiagent)
|
||||
* `emerge --sync`
|
||||
* `echo 'net-vpn/zeronet' >> /etc/portage/package.accept_keywords`
|
||||
* *(Optional)* Enable Tor support: `echo 'net-vpn/zeronet tor' >>
|
||||
/etc/portage/package.use`
|
||||
* `emerge zeronet`
|
||||
* `rc-service zeronet start`
|
||||
* *(Optional)* Enable zeronet at runlevel "default": `rc-update add zeronet`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
https://portal.ngnoid.tv/
|
||||
|
||||
See `/usr/share/doc/zeronet-*/README.gentoo.bz2` for further assistance.
|
||||
https://zeronet.ipfsscan.io/
|
||||
|
||||
### [FreeBSD](https://www.freebsd.org/)
|
||||
|
||||
* `pkg install zeronet` or `cd /usr/ports/security/zeronet/ && make install clean`
|
||||
* `sysrc zeronet_enable="YES"`
|
||||
* `service zeronet start`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
### Install from source
|
||||
|
||||
### [Vagrant](https://www.vagrantup.com/)
|
||||
|
||||
* `vagrant up`
|
||||
* Access VM with `vagrant ssh`
|
||||
* `cd /vagrant`
|
||||
* Run `python2 zeronet.py --ui_ip 0.0.0.0`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
|
||||
### [Docker](https://www.docker.com/)
|
||||
* `docker run -d -v <local_data_folder>:/root/data -p 26552:26552 -p 127.0.0.1:43110:43110 nofish/zeronet`
|
||||
* This Docker image includes the Tor proxy, which is disabled by default. Beware that some
|
||||
hosting providers may not allow you running Tor in their servers. If you want to enable it,
|
||||
set `ENABLE_TOR` environment variable to `true` (Default: `false`). E.g.:
|
||||
|
||||
`docker run -d -e "ENABLE_TOR=true" -v <local_data_folder>:/root/data -p 26552:26552 -p 127.0.0.1:43110:43110 nofish/zeronet`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
|
||||
### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
|
||||
|
||||
* `virtualenv env`
|
||||
* `source env/bin/activate`
|
||||
* `pip install msgpack gevent`
|
||||
* `python2 zeronet.py`
|
||||
* Open http://127.0.0.1:43110/ in your browser
|
||||
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
|
||||
- `unzip ZeroNet-src.zip`
|
||||
- `cd ZeroNet`
|
||||
- `sudo apt-get update`
|
||||
- `sudo apt-get install python3-pip`
|
||||
- `sudo python3 -m pip install -r requirements.txt`
|
||||
- Start with: `python3 zeronet.py`
|
||||
- Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
|
||||
|
||||
## Current limitations
|
||||
|
||||
* ~~No torrent-like file splitting for big file support~~ (big file support added)
|
||||
* ~~No more anonymous than Bittorrent~~ (built-in full Tor support added)
|
||||
* File transactions are not compressed ~~or encrypted yet~~ (TLS encryption added)
|
||||
* File transactions are not compressed
|
||||
* No private sites
|
||||
|
||||
|
||||
## How can I create a ZeroNet site?
|
||||
|
||||
Shut down zeronet if you are running it already
|
||||
|
||||
```bash
|
||||
$ zeronet.py siteCreate
|
||||
...
|
||||
- Site private key: 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
|
||||
- Site address: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
- Site created!
|
||||
$ zeronet.py
|
||||
...
|
||||
```
|
||||
|
||||
Congratulations, you're finished! Now anyone can access your site using
|
||||
`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
|
||||
|
||||
Next steps: [ZeroNet Developer Documentation](https://zeronet.io/docs/site_development/getting_started/)
|
||||
|
||||
|
||||
## How can I modify a ZeroNet site?
|
||||
|
||||
* Modify files located in data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 directory.
|
||||
After you're finished:
|
||||
|
||||
```bash
|
||||
$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
- Signing site: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
|
||||
Private key (input hidden):
|
||||
```
|
||||
|
||||
* Enter the private key you got when you created the site, then:
|
||||
|
||||
```bash
|
||||
$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||
...
|
||||
Site:13DNDk..bhC2 Publishing to 3/10 peers...
|
||||
Site:13DNDk..bhC2 Successfuly published to 3 peers
|
||||
- Serving files....
|
||||
```
|
||||
|
||||
* That's it! You've successfully signed and published your modifications.
|
||||
* Click on **⋮** > **"Create new, empty site"** menu item on the site [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
|
||||
* You will be **redirected** to a completely new site that is only modifiable by you!
|
||||
* You can find and modify your site's content in **data/[yoursiteaddress]** directory
|
||||
* After the modifications open your site, drag the topright "0" button to left, then press **sign** and **publish** buttons on the bottom
|
||||
|
||||
Next steps: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||
|
||||
## Help keep this project alive
|
||||
|
||||
- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
|
||||
- Paypal: https://zeronet.io/docs/help_zeronet/donate/
|
||||
|
||||
### Sponsors
|
||||
|
||||
* Better macOS/Safari compatibility made possible by [BrowserStack.com](https://www.browserstack.com)
|
||||
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
|
||||
- LiberaPay: https://liberapay.com/PramUkesh
|
||||
- Paypal: https://paypal.me/PramUkesh
|
||||
- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
|
||||
|
||||
#### Thank you!
|
||||
|
||||
* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronet/
|
||||
* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
|
||||
* Email: hello@zeronet.io (PGP: CB9613AE)
|
||||
* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronetx/
|
||||
* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/canewsin/ZeroNet)
|
||||
* Email: canews.in@gmail.com
|
||||
|
|
1
plugins
Submodule
1
plugins
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
|
|
@ -1,148 +0,0 @@
|
|||
import time
|
||||
|
||||
import gevent
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Config import config
|
||||
import BroadcastServer
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteAnnouncer")
|
||||
class SiteAnnouncerPlugin(object):
|
||||
def announce(self, force=False, *args, **kwargs):
|
||||
local_announcer = self.site.connection_server.local_announcer
|
||||
|
||||
thread = None
|
||||
if local_announcer and (force or time.time() - local_announcer.last_discover > 5 * 60):
|
||||
thread = gevent.spawn(local_announcer.discover, force=force)
|
||||
back = super(SiteAnnouncerPlugin, self).announce(force=force, *args, **kwargs)
|
||||
|
||||
if thread:
|
||||
thread.join()
|
||||
|
||||
return back
|
||||
|
||||
|
||||
class LocalAnnouncer(BroadcastServer.BroadcastServer):
|
||||
def __init__(self, server, listen_port):
|
||||
super(LocalAnnouncer, self).__init__("zeronet", listen_port=listen_port)
|
||||
self.server = server
|
||||
|
||||
self.sender_info["peer_id"] = self.server.peer_id
|
||||
self.sender_info["port"] = self.server.port
|
||||
self.sender_info["broadcast_port"] = listen_port
|
||||
self.sender_info["rev"] = config.rev
|
||||
|
||||
self.known_peers = {}
|
||||
self.last_discover = 0
|
||||
|
||||
def discover(self, force=False):
|
||||
self.log.debug("Sending discover request (force: %s)" % force)
|
||||
self.last_discover = time.time()
|
||||
if force: # Probably new site added, clean cache
|
||||
self.known_peers = {}
|
||||
|
||||
for peer_id, known_peer in self.known_peers.items():
|
||||
if time.time() - known_peer["found"] > 20 * 60:
|
||||
del(self.known_peers[peer_id])
|
||||
self.log.debug("Timeout, removing from known_peers: %s" % peer_id)
|
||||
self.broadcast({"cmd": "discoverRequest", "params": {}}, port=self.listen_port)
|
||||
|
||||
def actionDiscoverRequest(self, sender, params):
|
||||
back = {
|
||||
"cmd": "discoverResponse",
|
||||
"params": {
|
||||
"sites_changed": self.server.site_manager.sites_changed
|
||||
}
|
||||
}
|
||||
|
||||
if sender["peer_id"] not in self.known_peers:
|
||||
self.known_peers[sender["peer_id"]] = {"added": time.time(), "sites_changed": 0, "updated": 0, "found": time.time()}
|
||||
self.log.debug("Got discover request from unknown peer %s (%s), time to refresh known peers" % (sender["ip"], sender["peer_id"]))
|
||||
gevent.spawn_later(1.0, self.discover) # Let the response arrive first to the requester
|
||||
|
||||
return back
|
||||
|
||||
def actionDiscoverResponse(self, sender, params):
|
||||
if sender["peer_id"] in self.known_peers:
|
||||
self.known_peers[sender["peer_id"]]["found"] = time.time()
|
||||
if params["sites_changed"] != self.known_peers.get(sender["peer_id"], {}).get("sites_changed"):
|
||||
# Peer's site list changed, request the list of new sites
|
||||
return {"cmd": "siteListRequest"}
|
||||
else:
|
||||
# Peer's site list is the same
|
||||
for site in self.server.sites.values():
|
||||
peer = site.peers.get("%s:%s" % (sender["ip"], sender["port"]))
|
||||
if peer:
|
||||
peer.found("local")
|
||||
|
||||
def actionSiteListRequest(self, sender, params):
|
||||
back = []
|
||||
sites = self.server.sites.values()
|
||||
|
||||
# Split adresses to group of 100 to avoid UDP size limit
|
||||
site_groups = [sites[i:i + 100] for i in range(0, len(sites), 100)]
|
||||
for site_group in site_groups:
|
||||
res = {}
|
||||
res["sites_changed"] = self.server.site_manager.sites_changed
|
||||
res["sites"] = [site.address_hash for site in site_group]
|
||||
back.append({"cmd": "siteListResponse", "params": res})
|
||||
return back
|
||||
|
||||
def actionSiteListResponse(self, sender, params):
|
||||
s = time.time()
|
||||
peer_sites = set(params["sites"])
|
||||
num_found = 0
|
||||
added_sites = []
|
||||
for site in self.server.sites.values():
|
||||
if site.address_hash in peer_sites:
|
||||
added = site.addPeer(sender["ip"], sender["port"], source="local")
|
||||
num_found += 1
|
||||
if added:
|
||||
site.worker_manager.onPeers()
|
||||
site.updateWebsocket(peers_added=1)
|
||||
added_sites.append(site)
|
||||
|
||||
# Save sites changed value to avoid unnecessary site list download
|
||||
if sender["peer_id"] not in self.known_peers:
|
||||
self.known_peers[sender["peer_id"]] = {"added": time.time()}
|
||||
|
||||
self.known_peers[sender["peer_id"]]["sites_changed"] = params["sites_changed"]
|
||||
self.known_peers[sender["peer_id"]]["updated"] = time.time()
|
||||
self.known_peers[sender["peer_id"]]["found"] = time.time()
|
||||
|
||||
self.log.debug(
|
||||
"Tracker result: Discover from %s response parsed in %.3fs, found: %s added: %s of %s" %
|
||||
(sender["ip"], time.time() - s, num_found, added_sites, len(peer_sites))
|
||||
)
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileServer")
|
||||
class FileServerPlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
res = super(FileServerPlugin, self).__init__(*args, **kwargs)
|
||||
if config.broadcast_port and config.tor != "always" and not config.disable_udp:
|
||||
self.local_announcer = LocalAnnouncer(self, config.broadcast_port)
|
||||
else:
|
||||
self.local_announcer = None
|
||||
return res
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
if self.local_announcer:
|
||||
gevent.spawn(self.local_announcer.start)
|
||||
return super(FileServerPlugin, self).start(*args, **kwargs)
|
||||
|
||||
def stop(self):
|
||||
if self.local_announcer:
|
||||
self.local_announcer.stop()
|
||||
res = super(FileServerPlugin, self).stop()
|
||||
return res
|
||||
|
||||
|
||||
@PluginManager.registerTo("ConfigPlugin")
|
||||
class ConfigPlugin(object):
|
||||
def createArguments(self):
|
||||
group = self.parser.add_argument_group("AnnounceLocal plugin")
|
||||
group.add_argument('--broadcast_port', help='UDP broadcasting port for local peer discovery', default=1544, type=int, metavar='port')
|
||||
|
||||
return super(ConfigPlugin, self).createArguments()
|
|
@ -1,140 +0,0 @@
|
|||
import socket
|
||||
import logging
|
||||
import time
|
||||
from contextlib import closing
|
||||
|
||||
import msgpack
|
||||
|
||||
from Debug import Debug
|
||||
from util import UpnpPunch
|
||||
|
||||
|
||||
class BroadcastServer(object):
|
||||
def __init__(self, service_name, listen_port=1544, listen_ip=''):
|
||||
self.log = logging.getLogger("BroadcastServer")
|
||||
self.listen_port = listen_port
|
||||
self.listen_ip = listen_ip
|
||||
|
||||
self.running = False
|
||||
self.sock = None
|
||||
self.sender_info = {"service": service_name}
|
||||
|
||||
def createBroadcastSocket(self):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
if hasattr(socket, 'SO_REUSEPORT'):
|
||||
try:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
except Exception as err:
|
||||
self.log.warning("Error setting SO_REUSEPORT: %s" % err)
|
||||
|
||||
binded = False
|
||||
for retry in range(3):
|
||||
try:
|
||||
sock.bind((self.listen_ip, self.listen_port))
|
||||
binded = True
|
||||
break
|
||||
except Exception as err:
|
||||
self.log.error(
|
||||
"Socket bind to %s:%s error: %s, retry #%s" %
|
||||
(self.listen_ip, self.listen_port, Debug.formatException(err), retry)
|
||||
)
|
||||
time.sleep(retry)
|
||||
|
||||
if binded:
|
||||
return sock
|
||||
else:
|
||||
return False
|
||||
|
||||
def start(self): # Listens for discover requests
|
||||
self.sock = self.createBroadcastSocket()
|
||||
if not self.sock:
|
||||
self.log.error("Unable to listen on port %s" % self.listen_port)
|
||||
return
|
||||
|
||||
self.log.debug("Started on port %s" % self.listen_port)
|
||||
|
||||
self.running = True
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
data, addr = self.sock.recvfrom(8192)
|
||||
except Exception as err:
|
||||
if self.running:
|
||||
self.log.error("Listener receive error: %s" % err)
|
||||
continue
|
||||
|
||||
if not self.running:
|
||||
break
|
||||
|
||||
try:
|
||||
message = msgpack.unpackb(data)
|
||||
response_addr, message = self.handleMessage(addr, message)
|
||||
if message:
|
||||
self.send(response_addr, message)
|
||||
except Exception as err:
|
||||
self.log.error("Handlemessage error: %s" % Debug.formatException(err))
|
||||
self.log.debug("Stopped listening on port %s" % self.listen_port)
|
||||
|
||||
def stop(self):
|
||||
self.log.debug("Stopping, socket: %s" % self.sock)
|
||||
self.running = False
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
|
||||
def send(self, addr, message):
|
||||
if type(message) is not list:
|
||||
message = [message]
|
||||
|
||||
for message_part in message:
|
||||
message_part["sender"] = self.sender_info
|
||||
|
||||
self.log.debug("Send to %s: %s" % (addr, message_part["cmd"]))
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.sendto(msgpack.packb(message_part), addr)
|
||||
|
||||
def getMyIps(self):
|
||||
return UpnpPunch._get_local_ips()
|
||||
|
||||
def broadcast(self, message, port=None):
|
||||
if not port:
|
||||
port = self.listen_port
|
||||
|
||||
my_ips = self.getMyIps()
|
||||
addr = ("255.255.255.255", port)
|
||||
|
||||
message["sender"] = self.sender_info
|
||||
self.log.debug("Broadcast using ips %s on port %s: %s" % (my_ips, port, message["cmd"]))
|
||||
|
||||
for my_ip in my_ips:
|
||||
try:
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
sock.bind((my_ip, 0))
|
||||
sock.sendto(msgpack.packb(message), addr)
|
||||
except Exception as err:
|
||||
self.log.warning("Error sending broadcast using ip %s: %s" % (my_ip, err))
|
||||
|
||||
def handleMessage(self, addr, message):
|
||||
self.log.debug("Got from %s: %s" % (addr, message["cmd"]))
|
||||
cmd = message["cmd"]
|
||||
params = message.get("params", {})
|
||||
sender = message["sender"]
|
||||
sender["ip"] = addr[0]
|
||||
|
||||
func_name = "action" + cmd[0].upper() + cmd[1:]
|
||||
func = getattr(self, func_name, None)
|
||||
|
||||
if sender["service"] != "zeronet" or sender["peer_id"] == self.sender_info["peer_id"]:
|
||||
# Skip messages not for us or sent by us
|
||||
message = None
|
||||
elif func:
|
||||
message = func(sender, params)
|
||||
else:
|
||||
self.log.debug("Unknown cmd: %s" % cmd)
|
||||
message = None
|
||||
|
||||
return (sender["ip"], sender["broadcast_port"]), message
|
|
@ -1,113 +0,0 @@
|
|||
import time
|
||||
import copy
|
||||
|
||||
import gevent
|
||||
import pytest
|
||||
import mock
|
||||
|
||||
from AnnounceLocal import AnnounceLocalPlugin
|
||||
from File import FileServer
|
||||
from Test import Spy
|
||||
|
||||
@pytest.fixture
|
||||
def announcer(file_server, site):
|
||||
file_server.sites[site.address] = site
|
||||
announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server, listen_port=1100)
|
||||
file_server.local_announcer = announcer
|
||||
announcer.listen_port = 1100
|
||||
announcer.sender_info["broadcast_port"] = 1100
|
||||
announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
|
||||
announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
|
||||
gevent.spawn(announcer.start)
|
||||
time.sleep(0.5)
|
||||
|
||||
assert file_server.local_announcer.running
|
||||
return file_server.local_announcer
|
||||
|
||||
@pytest.fixture
|
||||
def announcer_remote(request, site_temp):
|
||||
file_server_remote = FileServer("127.0.0.1", 1545)
|
||||
file_server_remote.sites[site_temp.address] = site_temp
|
||||
announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server_remote, listen_port=1101)
|
||||
file_server_remote.local_announcer = announcer
|
||||
announcer.listen_port = 1101
|
||||
announcer.sender_info["broadcast_port"] = 1101
|
||||
announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
|
||||
announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
|
||||
gevent.spawn(announcer.start)
|
||||
time.sleep(0.5)
|
||||
|
||||
assert file_server_remote.local_announcer.running
|
||||
|
||||
def cleanup():
|
||||
file_server_remote.stop()
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
|
||||
return file_server_remote.local_announcer
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
@pytest.mark.usefixtures("resetTempSettings")
|
||||
class TestAnnounce:
|
||||
def testSenderInfo(self, announcer):
|
||||
sender_info = announcer.sender_info
|
||||
assert sender_info["port"] > 0
|
||||
assert len(sender_info["peer_id"]) == 20
|
||||
assert sender_info["rev"] > 0
|
||||
|
||||
def testIgnoreSelfMessages(self, announcer):
|
||||
# No response to messages that has same peer_id as server
|
||||
assert not announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": announcer.sender_info, "params": {}})[1]
|
||||
|
||||
# Response to messages with different peer id
|
||||
sender_info = copy.copy(announcer.sender_info)
|
||||
sender_info["peer_id"] += "-"
|
||||
addr, res = announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": sender_info, "params": {}})
|
||||
assert res["params"]["sites_changed"] > 0
|
||||
|
||||
def testDiscoverRequest(self, announcer, announcer_remote):
|
||||
assert len(announcer_remote.known_peers) == 0
|
||||
with Spy.Spy(announcer_remote, "handleMessage") as responses:
|
||||
announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
|
||||
time.sleep(0.1)
|
||||
|
||||
response_cmds = [response[1]["cmd"] for response in responses]
|
||||
assert response_cmds == ["discoverResponse", "siteListResponse"]
|
||||
assert len(responses[-1][1]["params"]["sites"]) == 1
|
||||
|
||||
# It should only request siteList if sites_changed value is different from last response
|
||||
with Spy.Spy(announcer_remote, "handleMessage") as responses:
|
||||
announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
|
||||
time.sleep(0.1)
|
||||
|
||||
response_cmds = [response[1]["cmd"] for response in responses]
|
||||
assert response_cmds == ["discoverResponse"]
|
||||
|
||||
def testPeerDiscover(self, announcer, announcer_remote, site):
|
||||
assert announcer.server.peer_id != announcer_remote.server.peer_id
|
||||
assert len(announcer.server.sites.values()[0].peers) == 0
|
||||
announcer.broadcast({"cmd": "discoverRequest"}, port=announcer_remote.listen_port)
|
||||
time.sleep(0.1)
|
||||
assert len(announcer.server.sites.values()[0].peers) == 1
|
||||
|
||||
def testRecentPeerList(self, announcer, announcer_remote, site):
|
||||
assert len(site.peers_recent) == 0
|
||||
assert len(site.peers) == 0
|
||||
with Spy.Spy(announcer, "handleMessage") as responses:
|
||||
announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
|
||||
time.sleep(0.1)
|
||||
assert [response[1]["cmd"] for response in responses] == ["discoverResponse", "siteListResponse"]
|
||||
assert len(site.peers_recent) == 1
|
||||
assert len(site.peers) == 1
|
||||
|
||||
# It should update peer without siteListResponse
|
||||
last_time_found = site.peers.values()[0].time_found
|
||||
site.peers_recent.clear()
|
||||
with Spy.Spy(announcer, "handleMessage") as responses:
|
||||
announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
|
||||
time.sleep(0.1)
|
||||
assert [response[1]["cmd"] for response in responses] == ["discoverResponse"]
|
||||
assert len(site.peers_recent) == 1
|
||||
assert site.peers.values()[0].time_found > last_time_found
|
||||
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
from src.Test.conftest import *
|
||||
|
||||
from Config import config
|
||||
config.broadcast_port = 0
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1 +0,0 @@
|
|||
import AnnounceLocalPlugin
|
|
@ -1,188 +0,0 @@
|
|||
import time
|
||||
import os
|
||||
import logging
|
||||
import json
|
||||
import atexit
|
||||
|
||||
import gevent
|
||||
|
||||
from Config import config
|
||||
from Plugin import PluginManager
|
||||
from util import helper
|
||||
|
||||
|
||||
class TrackerStorage(object):
|
||||
def __init__(self):
|
||||
self.log = logging.getLogger("TrackerStorage")
|
||||
self.file_path = "%s/trackers.json" % config.data_dir
|
||||
self.load()
|
||||
self.time_discover = 0.0
|
||||
atexit.register(self.save)
|
||||
|
||||
def getDefaultFile(self):
|
||||
return {"shared": {}}
|
||||
|
||||
def onTrackerFound(self, tracker_address, type="shared", my=False):
|
||||
if not tracker_address.startswith("zero://"):
|
||||
return False
|
||||
|
||||
trackers = self.getTrackers()
|
||||
added = False
|
||||
if tracker_address not in trackers:
|
||||
trackers[tracker_address] = {
|
||||
"time_added": time.time(),
|
||||
"time_success": 0,
|
||||
"latency": 99.0,
|
||||
"num_error": 0,
|
||||
"my": False
|
||||
}
|
||||
self.log.debug("New tracker found: %s" % tracker_address)
|
||||
added = True
|
||||
|
||||
trackers[tracker_address]["time_found"] = time.time()
|
||||
trackers[tracker_address]["my"] = my
|
||||
return added
|
||||
|
||||
def onTrackerSuccess(self, tracker_address, latency):
|
||||
trackers = self.getTrackers()
|
||||
if tracker_address not in trackers:
|
||||
return False
|
||||
|
||||
trackers[tracker_address]["latency"] = latency
|
||||
trackers[tracker_address]["time_success"] = time.time()
|
||||
trackers[tracker_address]["num_error"] = 0
|
||||
|
||||
def onTrackerError(self, tracker_address):
|
||||
trackers = self.getTrackers()
|
||||
if tracker_address not in trackers:
|
||||
return False
|
||||
|
||||
trackers[tracker_address]["time_error"] = time.time()
|
||||
trackers[tracker_address]["num_error"] += 1
|
||||
|
||||
if len(self.getWorkingTrackers()) >= config.working_shared_trackers_limit:
|
||||
error_limit = 5
|
||||
else:
|
||||
error_limit = 30
|
||||
error_limit
|
||||
|
||||
if trackers[tracker_address]["num_error"] > error_limit and trackers[tracker_address]["time_success"] < time.time() - 60 * 60:
|
||||
self.log.debug("Tracker %s looks down, removing." % tracker_address)
|
||||
del trackers[tracker_address]
|
||||
|
||||
def getTrackers(self, type="shared"):
|
||||
return self.file_content.setdefault(type, {})
|
||||
|
||||
def getWorkingTrackers(self, type="shared"):
|
||||
trackers = {
|
||||
key: tracker for key, tracker in self.getTrackers(type).iteritems()
|
||||
if tracker["time_success"] > time.time() - 60 * 60
|
||||
}
|
||||
return trackers
|
||||
|
||||
def getFileContent(self):
|
||||
if not os.path.isfile(self.file_path):
|
||||
open(self.file_path, "w").write("{}")
|
||||
return self.getDefaultFile()
|
||||
try:
|
||||
return json.load(open(self.file_path))
|
||||
except Exception as err:
|
||||
self.log.error("Error loading trackers list: %s" % err)
|
||||
return self.getDefaultFile()
|
||||
|
||||
def load(self):
|
||||
self.file_content = self.getFileContent()
|
||||
|
||||
trackers = self.getTrackers()
|
||||
self.log.debug("Loaded %s shared trackers" % len(trackers))
|
||||
for address, tracker in trackers.items():
|
||||
tracker["num_error"] = 0
|
||||
if not address.startswith("zero://"):
|
||||
del trackers[address]
|
||||
|
||||
def save(self):
|
||||
s = time.time()
|
||||
helper.atomicWrite(self.file_path, json.dumps(self.file_content, indent=2, sort_keys=True))
|
||||
self.log.debug("Saved in %.3fs" % (time.time() - s))
|
||||
|
||||
def discoverTrackers(self, peers):
|
||||
if len(self.getWorkingTrackers()) > config.working_shared_trackers_limit:
|
||||
return False
|
||||
s = time.time()
|
||||
num_success = 0
|
||||
for peer in peers:
|
||||
if peer.connection and peer.connection.handshake.get("rev", 0) < 3560:
|
||||
continue # Not supported
|
||||
|
||||
res = peer.request("getTrackers")
|
||||
if not res or "error" in res:
|
||||
continue
|
||||
|
||||
num_success += 1
|
||||
for tracker_address in res["trackers"]:
|
||||
added = self.onTrackerFound(tracker_address)
|
||||
if added: # Only add one tracker from one source
|
||||
break
|
||||
|
||||
if not num_success and len(peers) < 20:
|
||||
self.time_discover = 0.0
|
||||
|
||||
if num_success:
|
||||
self.save()
|
||||
|
||||
self.log.debug("Trackers discovered from %s/%s peers in %.3fs" % (num_success, len(peers), time.time() - s))
|
||||
|
||||
|
||||
if "tracker_storage" not in locals():
|
||||
tracker_storage = TrackerStorage()
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteAnnouncer")
|
||||
class SiteAnnouncerPlugin(object):
|
||||
def getTrackers(self):
|
||||
if tracker_storage.time_discover < time.time() - 5 * 60:
|
||||
tracker_storage.time_discover = time.time()
|
||||
gevent.spawn(tracker_storage.discoverTrackers, self.site.getConnectedPeers())
|
||||
trackers = super(SiteAnnouncerPlugin, self).getTrackers()
|
||||
shared_trackers = tracker_storage.getTrackers("shared").keys()
|
||||
if shared_trackers:
|
||||
return trackers + shared_trackers
|
||||
else:
|
||||
return trackers
|
||||
|
||||
def announceTracker(self, tracker, *args, **kwargs):
|
||||
res = super(SiteAnnouncerPlugin, self).announceTracker(tracker, *args, **kwargs)
|
||||
if res:
|
||||
latency = res
|
||||
tracker_storage.onTrackerSuccess(tracker, latency)
|
||||
elif res is False:
|
||||
tracker_storage.onTrackerError(tracker)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileRequest")
|
||||
class FileRequestPlugin(object):
|
||||
def actionGetTrackers(self, params):
|
||||
shared_trackers = tracker_storage.getWorkingTrackers("shared").keys()
|
||||
self.response({"trackers": shared_trackers})
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileServer")
|
||||
class FileServerPlugin(object):
|
||||
def portCheck(self, *args, **kwargs):
|
||||
res = super(FileServerPlugin, self).portCheck(*args, **kwargs)
|
||||
if res and not config.tor == "always" and "Bootstrapper" in PluginManager.plugin_manager.plugin_names:
|
||||
for ip in self.ip_external_list:
|
||||
my_tracker_address = "zero://%s:%s" % (ip, config.fileserver_port)
|
||||
tracker_storage.onTrackerFound(my_tracker_address, my=True)
|
||||
return res
|
||||
|
||||
|
||||
@PluginManager.registerTo("ConfigPlugin")
|
||||
class ConfigPlugin(object):
|
||||
def createArguments(self):
|
||||
group = self.parser.add_argument_group("AnnounceShare plugin")
|
||||
group.add_argument('--working_shared_trackers_limit', help='Stop discovering new shared trackers after this number of shared trackers reached', default=5, type=int, metavar='limit')
|
||||
|
||||
return super(ConfigPlugin, self).createArguments()
|
|
@ -1,25 +0,0 @@
|
|||
import pytest
|
||||
|
||||
from AnnounceShare import AnnounceSharePlugin
|
||||
from Peer import Peer
|
||||
from Config import config
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
@pytest.mark.usefixtures("resetTempSettings")
|
||||
class TestAnnounceShare:
|
||||
def testAnnounceList(self, file_server):
|
||||
open("%s/trackers.json" % config.data_dir, "w").write("{}")
|
||||
tracker_storage = AnnounceSharePlugin.tracker_storage
|
||||
tracker_storage.load()
|
||||
print tracker_storage.file_path, config.data_dir
|
||||
peer = Peer(file_server.ip, 1544, connection_server=file_server)
|
||||
assert peer.request("getTrackers")["trackers"] == []
|
||||
|
||||
tracker_storage.onTrackerFound("zero://%s:15441" % file_server.ip)
|
||||
assert peer.request("getTrackers")["trackers"] == []
|
||||
|
||||
# It needs to have at least one successfull announce to be shared to other peers
|
||||
tracker_storage.onTrackerSuccess("zero://%s:15441" % file_server.ip, 1.0)
|
||||
assert peer.request("getTrackers")["trackers"] == ["zero://%s:15441" % file_server.ip]
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
from src.Test.conftest import *
|
||||
|
||||
from Config import config
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1 +0,0 @@
|
|||
import AnnounceSharePlugin
|
|
@ -1,138 +0,0 @@
|
|||
import time
|
||||
import itertools
|
||||
|
||||
from Plugin import PluginManager
|
||||
from util import helper
|
||||
from Crypt import CryptRsa
|
||||
|
||||
allow_reload = False # No source reload supported in this plugin
|
||||
time_full_announced = {} # Tracker address: Last announced all site to tracker
|
||||
connection_pool = {} # Tracker address: Peer object
|
||||
|
||||
|
||||
# We can only import plugin host clases after the plugins are loaded
|
||||
@PluginManager.afterLoad
|
||||
def importHostClasses():
|
||||
global Peer, AnnounceError
|
||||
from Peer import Peer
|
||||
from Site.SiteAnnouncer import AnnounceError
|
||||
|
||||
|
||||
# Process result got back from tracker
|
||||
def processPeerRes(tracker_address, site, peers):
|
||||
added = 0
|
||||
# Ip4
|
||||
found_ipv4 = 0
|
||||
peers_normal = itertools.chain(peers.get("ip4", []), peers.get("ipv4", []), peers.get("ipv6", []))
|
||||
for packed_address in peers_normal:
|
||||
found_ipv4 += 1
|
||||
peer_ip, peer_port = helper.unpackAddress(packed_address)
|
||||
if site.addPeer(peer_ip, peer_port, source="tracker"):
|
||||
added += 1
|
||||
# Onion
|
||||
found_onion = 0
|
||||
for packed_address in peers["onion"]:
|
||||
found_onion += 1
|
||||
peer_onion, peer_port = helper.unpackOnionAddress(packed_address)
|
||||
if site.addPeer(peer_onion, peer_port, source="tracker"):
|
||||
added += 1
|
||||
|
||||
if added:
|
||||
site.worker_manager.onPeers()
|
||||
site.updateWebsocket(peers_added=added)
|
||||
return added
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteAnnouncer")
|
||||
class SiteAnnouncerPlugin(object):
|
||||
def getTrackerHandler(self, protocol):
|
||||
if protocol == "zero":
|
||||
return self.announceTrackerZero
|
||||
else:
|
||||
return super(SiteAnnouncerPlugin, self).getTrackerHandler(protocol)
|
||||
|
||||
def announceTrackerZero(self, tracker_address, mode="start", num_want=10):
|
||||
global time_full_announced
|
||||
s = time.time()
|
||||
|
||||
need_types = ["ip4"] # ip4 for backward compatibility reasons
|
||||
need_types += self.site.connection_server.supported_ip_types
|
||||
if self.site.connection_server.tor_manager.enabled:
|
||||
need_types.append("onion")
|
||||
|
||||
if mode == "start" or mode == "more": # Single: Announce only this site
|
||||
sites = [self.site]
|
||||
full_announce = False
|
||||
else: # Multi: Announce all currently serving site
|
||||
full_announce = True
|
||||
if time.time() - time_full_announced.get(tracker_address, 0) < 60 * 15: # No reannounce all sites within short time
|
||||
return None
|
||||
time_full_announced[tracker_address] = time.time()
|
||||
from Site import SiteManager
|
||||
sites = [site for site in SiteManager.site_manager.sites.values() if site.settings["serving"]]
|
||||
|
||||
# Create request
|
||||
add_types = self.getOpenedServiceTypes()
|
||||
request = {
|
||||
"hashes": [], "onions": [], "port": self.fileserver_port, "need_types": need_types, "need_num": 20, "add": add_types
|
||||
}
|
||||
for site in sites:
|
||||
if "onion" in add_types:
|
||||
onion = self.site.connection_server.tor_manager.getOnion(site.address)
|
||||
request["onions"].append(onion)
|
||||
request["hashes"].append(site.address_hash)
|
||||
|
||||
# Tracker can remove sites that we don't announce
|
||||
if full_announce:
|
||||
request["delete"] = True
|
||||
|
||||
# Sent request to tracker
|
||||
tracker_peer = connection_pool.get(tracker_address) # Re-use tracker connection if possible
|
||||
if not tracker_peer:
|
||||
tracker_ip, tracker_port = tracker_address.rsplit(":", 1)
|
||||
tracker_peer = Peer(str(tracker_ip), int(tracker_port), connection_server=self.site.connection_server)
|
||||
tracker_peer.is_tracker_connection = True
|
||||
connection_pool[tracker_address] = tracker_peer
|
||||
|
||||
res = tracker_peer.request("announce", request)
|
||||
|
||||
if not res or "peers" not in res:
|
||||
if full_announce:
|
||||
time_full_announced[tracker_address] = 0
|
||||
raise AnnounceError("Invalid response: %s" % res)
|
||||
|
||||
# Add peers from response to site
|
||||
site_index = 0
|
||||
peers_added = 0
|
||||
for site_res in res["peers"]:
|
||||
site = sites[site_index]
|
||||
peers_added += processPeerRes(tracker_address, site, site_res)
|
||||
site_index += 1
|
||||
|
||||
# Check if we need to sign prove the onion addresses
|
||||
if "onion_sign_this" in res:
|
||||
self.site.log.debug("Signing %s for %s to add %s onions" % (res["onion_sign_this"], tracker_address, len(sites)))
|
||||
request["onion_signs"] = {}
|
||||
request["onion_sign_this"] = res["onion_sign_this"]
|
||||
request["need_num"] = 0
|
||||
for site in sites:
|
||||
onion = self.site.connection_server.tor_manager.getOnion(site.address)
|
||||
publickey = self.site.connection_server.tor_manager.getPublickey(onion)
|
||||
if publickey not in request["onion_signs"]:
|
||||
sign = CryptRsa.sign(res["onion_sign_this"], self.site.connection_server.tor_manager.getPrivatekey(onion))
|
||||
request["onion_signs"][publickey] = sign
|
||||
res = tracker_peer.request("announce", request)
|
||||
if not res or "onion_sign_this" in res:
|
||||
if full_announce:
|
||||
time_full_announced[tracker_address] = 0
|
||||
raise AnnounceError("Announce onion address to failed: %s" % res)
|
||||
|
||||
if full_announce:
|
||||
tracker_peer.remove() # Close connection, we don't need it in next 5 minute
|
||||
|
||||
self.site.log.debug(
|
||||
"Tracker announce result: zero://%s (sites: %s, new peers: %s) in %.3fs" %
|
||||
(tracker_address, site_index, peers_added, time.time() - s)
|
||||
)
|
||||
|
||||
return True
|
|
@ -1 +0,0 @@
|
|||
import AnnounceZeroPlugin
|
|
@ -1,158 +0,0 @@
|
|||
import array
|
||||
|
||||
|
||||
def packPiecefield(data):
|
||||
res = []
|
||||
if not data:
|
||||
return array.array("H", "")
|
||||
|
||||
if data[0] == "0":
|
||||
res.append(0)
|
||||
find = "1"
|
||||
else:
|
||||
find = "0"
|
||||
last_pos = 0
|
||||
pos = 0
|
||||
while 1:
|
||||
pos = data.find(find, pos)
|
||||
if find == "0":
|
||||
find = "1"
|
||||
else:
|
||||
find = "0"
|
||||
if pos == -1:
|
||||
res.append(len(data) - last_pos)
|
||||
break
|
||||
res.append(pos - last_pos)
|
||||
last_pos = pos
|
||||
return array.array("H", res)
|
||||
|
||||
|
||||
def unpackPiecefield(data):
|
||||
if not data:
|
||||
return ""
|
||||
|
||||
res = []
|
||||
char = "1"
|
||||
for times in data:
|
||||
if times > 10000:
|
||||
return ""
|
||||
res.append(char * times)
|
||||
if char == "1":
|
||||
char = "0"
|
||||
else:
|
||||
char = "1"
|
||||
return "".join(res)
|
||||
|
||||
|
||||
class BigfilePiecefield(object):
|
||||
__slots__ = ["data"]
|
||||
|
||||
def __init__(self):
|
||||
self.data = ""
|
||||
|
||||
def fromstring(self, s):
|
||||
self.data = s
|
||||
|
||||
def tostring(self):
|
||||
return self.data
|
||||
|
||||
def pack(self):
|
||||
return packPiecefield(self.data).tostring()
|
||||
|
||||
def unpack(self, s):
|
||||
self.data = unpackPiecefield(array.array("H", s))
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return int(self.data[key])
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
data = self.data
|
||||
if len(data) < key:
|
||||
data = data.ljust(key+1, "0")
|
||||
data = data[:key] + str(int(value)) + data[key + 1:]
|
||||
self.data = data
|
||||
|
||||
|
||||
class BigfilePiecefieldPacked(object):
|
||||
__slots__ = ["data"]
|
||||
|
||||
def __init__(self):
|
||||
self.data = ""
|
||||
|
||||
def fromstring(self, data):
|
||||
self.data = packPiecefield(data).tostring()
|
||||
|
||||
def tostring(self):
|
||||
return unpackPiecefield(array.array("H", self.data))
|
||||
|
||||
def pack(self):
|
||||
return array.array("H", self.data).tostring()
|
||||
|
||||
def unpack(self, data):
|
||||
self.data = data
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return int(self.tostring()[key])
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
data = self.tostring()
|
||||
if len(data) < key:
|
||||
data = data.ljust(key+1, "0")
|
||||
data = data[:key] + str(int(value)) + data[key + 1:]
|
||||
self.fromstring(data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
import psutil
|
||||
import time
|
||||
testdata = "1" * 100 + "0" * 900 + "1" * 4000 + "0" * 4999 + "1"
|
||||
meminfo = psutil.Process(os.getpid()).memory_info
|
||||
|
||||
for storage in [BigfilePiecefieldPacked, BigfilePiecefield]:
|
||||
print "-- Testing storage: %s --" % storage
|
||||
m = meminfo()[0]
|
||||
s = time.time()
|
||||
piecefields = {}
|
||||
for i in range(10000):
|
||||
piecefield = storage()
|
||||
piecefield.fromstring(testdata[:i] + "0" + testdata[i + 1:])
|
||||
piecefields[i] = piecefield
|
||||
|
||||
print "Create x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data))
|
||||
|
||||
m = meminfo()[0]
|
||||
s = time.time()
|
||||
for piecefield in piecefields.values():
|
||||
val = piecefield[1000]
|
||||
|
||||
print "Query one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s)
|
||||
|
||||
m = meminfo()[0]
|
||||
s = time.time()
|
||||
for piecefield in piecefields.values():
|
||||
piecefield[1000] = True
|
||||
|
||||
print "Change one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s)
|
||||
|
||||
m = meminfo()[0]
|
||||
s = time.time()
|
||||
for piecefield in piecefields.values():
|
||||
packed = piecefield.pack()
|
||||
|
||||
print "Pack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(packed))
|
||||
|
||||
m = meminfo()[0]
|
||||
s = time.time()
|
||||
for piecefield in piecefields.values():
|
||||
piecefield.unpack(packed)
|
||||
|
||||
print "Unpack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data))
|
||||
|
||||
piecefields = {}
|
|
@ -1,767 +0,0 @@
|
|||
import time
|
||||
import os
|
||||
import subprocess
|
||||
import shutil
|
||||
import collections
|
||||
import math
|
||||
|
||||
import msgpack
|
||||
import gevent
|
||||
import gevent.lock
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Debug import Debug
|
||||
from Crypt import CryptHash
|
||||
from lib import merkletools
|
||||
from util import helper
|
||||
import util
|
||||
from BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
|
||||
|
||||
|
||||
# We can only import plugin host clases after the plugins are loaded
|
||||
@PluginManager.afterLoad
|
||||
def importPluginnedClasses():
|
||||
global VerifyError, config
|
||||
from Content.ContentManager import VerifyError
|
||||
from Config import config
|
||||
|
||||
if "upload_nonces" not in locals():
|
||||
upload_nonces = {}
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
def isCorsAllowed(self, path):
|
||||
if path == "/ZeroNet-Internal/BigfileUpload":
|
||||
return True
|
||||
else:
|
||||
return super(UiRequestPlugin, self).isCorsAllowed(path)
|
||||
|
||||
def actionBigfileUpload(self):
|
||||
nonce = self.get.get("upload_nonce")
|
||||
if nonce not in upload_nonces:
|
||||
return self.error403("Upload nonce error.")
|
||||
|
||||
upload_info = upload_nonces[nonce]
|
||||
del upload_nonces[nonce]
|
||||
|
||||
self.sendHeader(200, "text/html", noscript=True, extra_headers={
|
||||
"Access-Control-Allow-Origin": "null",
|
||||
"Access-Control-Allow-Credentials": "true"
|
||||
})
|
||||
|
||||
self.readMultipartHeaders(self.env['wsgi.input']) # Skip http headers
|
||||
|
||||
site = upload_info["site"]
|
||||
inner_path = upload_info["inner_path"]
|
||||
|
||||
with site.storage.open(inner_path, "wb", create_dirs=True) as out_file:
|
||||
merkle_root, piece_size, piecemap_info = site.content_manager.hashBigfile(
|
||||
self.env['wsgi.input'], upload_info["size"], upload_info["piece_size"], out_file
|
||||
)
|
||||
|
||||
if len(piecemap_info["sha512_pieces"]) == 1: # Small file, don't split
|
||||
hash = piecemap_info["sha512_pieces"][0].encode("hex")
|
||||
hash_id = site.content_manager.hashfield.getHashId(hash)
|
||||
site.content_manager.optionalDownloaded(inner_path, hash_id, upload_info["size"], own=True)
|
||||
|
||||
else: # Big file
|
||||
file_name = helper.getFilename(inner_path)
|
||||
msgpack.pack({file_name: piecemap_info}, site.storage.open(upload_info["piecemap"], "wb"))
|
||||
|
||||
# Find piecemap and file relative path to content.json
|
||||
file_info = site.content_manager.getFileInfo(inner_path, new_file=True)
|
||||
content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
|
||||
piecemap_relative_path = upload_info["piecemap"][len(content_inner_path_dir):]
|
||||
file_relative_path = inner_path[len(content_inner_path_dir):]
|
||||
|
||||
# Add file to content.json
|
||||
if site.storage.isFile(file_info["content_inner_path"]):
|
||||
content = site.storage.loadJson(file_info["content_inner_path"])
|
||||
else:
|
||||
content = {}
|
||||
if "files_optional" not in content:
|
||||
content["files_optional"] = {}
|
||||
|
||||
content["files_optional"][file_relative_path] = {
|
||||
"sha512": merkle_root,
|
||||
"size": upload_info["size"],
|
||||
"piecemap": piecemap_relative_path,
|
||||
"piece_size": piece_size
|
||||
}
|
||||
|
||||
merkle_root_hash_id = site.content_manager.hashfield.getHashId(merkle_root)
|
||||
site.content_manager.optionalDownloaded(inner_path, merkle_root_hash_id, upload_info["size"], own=True)
|
||||
site.storage.writeJson(file_info["content_inner_path"], content)
|
||||
|
||||
site.content_manager.contents.loadItem(file_info["content_inner_path"]) # reload cache
|
||||
|
||||
return {
|
||||
"merkle_root": merkle_root,
|
||||
"piece_num": len(piecemap_info["sha512_pieces"]),
|
||||
"piece_size": piece_size,
|
||||
"inner_path": inner_path
|
||||
}
|
||||
|
||||
def readMultipartHeaders(self, wsgi_input):
|
||||
for i in range(100):
|
||||
line = wsgi_input.readline()
|
||||
if line == "\r\n":
|
||||
break
|
||||
return i
|
||||
|
||||
def actionFile(self, file_path, *args, **kwargs):
|
||||
if kwargs.get("file_size", 0) > 1024 * 1024 and kwargs.get("path_parts"): # Only check files larger than 1MB
|
||||
path_parts = kwargs["path_parts"]
|
||||
site = self.server.site_manager.get(path_parts["address"])
|
||||
big_file = site.storage.openBigfile(path_parts["inner_path"], prebuffer=2 * 1024 * 1024)
|
||||
if big_file:
|
||||
kwargs["file_obj"] = big_file
|
||||
kwargs["file_size"] = big_file.size
|
||||
|
||||
return super(UiRequestPlugin, self).actionFile(file_path, *args, **kwargs)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def actionBigfileUploadInit(self, to, inner_path, size):
|
||||
valid_signers = self.site.content_manager.getValidSigners(inner_path)
|
||||
auth_address = self.user.getAuthAddress(self.site.address)
|
||||
if not self.site.settings["own"] and auth_address not in valid_signers:
|
||||
self.log.error("FileWrite forbidden %s not in valid_signers %s" % (auth_address, valid_signers))
|
||||
return self.response(to, {"error": "Forbidden, you can only modify your own files"})
|
||||
|
||||
nonce = CryptHash.random()
|
||||
piece_size = 1024 * 1024
|
||||
inner_path = self.site.content_manager.sanitizePath(inner_path)
|
||||
file_info = self.site.content_manager.getFileInfo(inner_path, new_file=True)
|
||||
|
||||
content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
|
||||
file_relative_path = inner_path[len(content_inner_path_dir):]
|
||||
|
||||
upload_nonces[nonce] = {
|
||||
"added": time.time(),
|
||||
"site": self.site,
|
||||
"inner_path": inner_path,
|
||||
"websocket_client": self,
|
||||
"size": size,
|
||||
"piece_size": piece_size,
|
||||
"piecemap": inner_path + ".piecemap.msgpack"
|
||||
}
|
||||
return {
|
||||
"url": "/ZeroNet-Internal/BigfileUpload?upload_nonce=" + nonce,
|
||||
"piece_size": piece_size,
|
||||
"inner_path": inner_path,
|
||||
"file_relative_path": file_relative_path
|
||||
}
|
||||
|
||||
def actionSiteSetAutodownloadBigfileLimit(self, to, limit):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
self.site.settings["autodownload_bigfile_size_limit"] = int(limit)
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionFileDelete(self, to, inner_path):
|
||||
piecemap_inner_path = inner_path + ".piecemap.msgpack"
|
||||
if self.hasFilePermission(inner_path) and self.site.storage.isFile(piecemap_inner_path):
|
||||
# Also delete .piecemap.msgpack file if exists
|
||||
self.log.debug("Deleting piecemap: %s" % piecemap_inner_path)
|
||||
file_info = self.site.content_manager.getFileInfo(piecemap_inner_path)
|
||||
if file_info:
|
||||
content_json = self.site.storage.loadJson(file_info["content_inner_path"])
|
||||
relative_path = file_info["relative_path"]
|
||||
if relative_path in content_json.get("files_optional", {}):
|
||||
del content_json["files_optional"][relative_path]
|
||||
self.site.storage.writeJson(file_info["content_inner_path"], content_json)
|
||||
self.site.content_manager.loadContent(file_info["content_inner_path"], add_bad_files=False, force=True)
|
||||
try:
|
||||
self.site.storage.delete(piecemap_inner_path)
|
||||
except Exception, err:
|
||||
self.log.error("File %s delete error: %s" % (piecemap_inner_path, err))
|
||||
|
||||
return super(UiWebsocketPlugin, self).actionFileDelete(to, inner_path)
|
||||
|
||||
|
||||
@PluginManager.registerTo("ContentManager")
|
||||
class ContentManagerPlugin(object):
|
||||
def getFileInfo(self, inner_path, *args, **kwargs):
|
||||
if "|" not in inner_path:
|
||||
return super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
|
||||
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
file_info = super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
|
||||
return file_info
|
||||
|
||||
def readFile(self, file_in, size, buff_size=1024 * 64):
|
||||
part_num = 0
|
||||
recv_left = size
|
||||
|
||||
while 1:
|
||||
part_num += 1
|
||||
read_size = min(buff_size, recv_left)
|
||||
part = file_in.read(read_size)
|
||||
|
||||
if not part:
|
||||
break
|
||||
yield part
|
||||
|
||||
if part_num % 100 == 0: # Avoid blocking ZeroNet execution during upload
|
||||
time.sleep(0.001)
|
||||
|
||||
recv_left -= read_size
|
||||
if recv_left <= 0:
|
||||
break
|
||||
|
||||
def hashBigfile(self, file_in, size, piece_size=1024 * 1024, file_out=None):
|
||||
self.site.settings["has_bigfile"] = True
|
||||
|
||||
recv = 0
|
||||
try:
|
||||
piece_hash = CryptHash.sha512t()
|
||||
piece_hashes = []
|
||||
piece_recv = 0
|
||||
|
||||
mt = merkletools.MerkleTools()
|
||||
mt.hash_function = CryptHash.sha512t
|
||||
|
||||
part = ""
|
||||
for part in self.readFile(file_in, size):
|
||||
if file_out:
|
||||
file_out.write(part)
|
||||
|
||||
recv += len(part)
|
||||
piece_recv += len(part)
|
||||
piece_hash.update(part)
|
||||
if piece_recv >= piece_size:
|
||||
piece_digest = piece_hash.digest()
|
||||
piece_hashes.append(piece_digest)
|
||||
mt.leaves.append(piece_digest)
|
||||
piece_hash = CryptHash.sha512t()
|
||||
piece_recv = 0
|
||||
|
||||
if len(piece_hashes) % 100 == 0 or recv == size:
|
||||
self.log.info("- [HASHING:%.0f%%] Pieces: %s, %.1fMB/%.1fMB" % (
|
||||
float(recv) / size * 100, len(piece_hashes), recv / 1024 / 1024, size / 1024 / 1024
|
||||
))
|
||||
part = ""
|
||||
if len(part) > 0:
|
||||
piece_digest = piece_hash.digest()
|
||||
piece_hashes.append(piece_digest)
|
||||
mt.leaves.append(piece_digest)
|
||||
except Exception as err:
|
||||
raise err
|
||||
finally:
|
||||
if file_out:
|
||||
file_out.close()
|
||||
|
||||
mt.make_tree()
|
||||
return mt.get_merkle_root(), piece_size, {
|
||||
"sha512_pieces": piece_hashes
|
||||
}
|
||||
|
||||
def hashFile(self, dir_inner_path, file_relative_path, optional=False):
|
||||
inner_path = dir_inner_path + file_relative_path
|
||||
|
||||
file_size = self.site.storage.getSize(inner_path)
|
||||
# Only care about optional files >1MB
|
||||
if not optional or file_size < 1 * 1024 * 1024:
|
||||
return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
|
||||
|
||||
back = {}
|
||||
content = self.contents.get(dir_inner_path + "content.json")
|
||||
|
||||
hash = None
|
||||
piecemap_relative_path = None
|
||||
piece_size = None
|
||||
|
||||
# Don't re-hash if it's already in content.json
|
||||
if content and file_relative_path in content.get("files_optional", {}):
|
||||
file_node = content["files_optional"][file_relative_path]
|
||||
if file_node["size"] == file_size:
|
||||
self.log.info("- [SAME SIZE] %s" % file_relative_path)
|
||||
hash = file_node.get("sha512")
|
||||
piecemap_relative_path = file_node.get("piecemap")
|
||||
piece_size = file_node.get("piece_size")
|
||||
|
||||
if not hash or not piecemap_relative_path: # Not in content.json yet
|
||||
if file_size < 5 * 1024 * 1024: # Don't create piecemap automatically for files smaller than 5MB
|
||||
return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
|
||||
|
||||
self.log.info("- [HASHING] %s" % file_relative_path)
|
||||
merkle_root, piece_size, piecemap_info = self.hashBigfile(self.site.storage.open(inner_path, "rb"), file_size)
|
||||
if not hash:
|
||||
hash = merkle_root
|
||||
|
||||
if not piecemap_relative_path:
|
||||
file_name = helper.getFilename(file_relative_path)
|
||||
piecemap_relative_path = file_relative_path + ".piecemap.msgpack"
|
||||
piecemap_inner_path = inner_path + ".piecemap.msgpack"
|
||||
|
||||
msgpack.pack({file_name: piecemap_info}, self.site.storage.open(piecemap_inner_path, "wb"))
|
||||
|
||||
back.update(super(ContentManagerPlugin, self).hashFile(dir_inner_path, piecemap_relative_path, optional=True))
|
||||
|
||||
piece_num = int(math.ceil(float(file_size) / piece_size))
|
||||
|
||||
# Add the merkle root to hashfield
|
||||
hash_id = self.site.content_manager.hashfield.getHashId(hash)
|
||||
self.optionalDownloaded(inner_path, hash_id, file_size, own=True)
|
||||
self.site.storage.piecefields[hash].fromstring("1" * piece_num)
|
||||
|
||||
back[file_relative_path] = {"sha512": hash, "size": file_size, "piecemap": piecemap_relative_path, "piece_size": piece_size}
|
||||
return back
|
||||
|
||||
def getPiecemap(self, inner_path):
|
||||
file_info = self.site.content_manager.getFileInfo(inner_path)
|
||||
piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
|
||||
self.site.needFile(piecemap_inner_path, priority=20)
|
||||
piecemap = msgpack.unpack(self.site.storage.open(piecemap_inner_path))[helper.getFilename(inner_path)]
|
||||
piecemap["piece_size"] = file_info["piece_size"]
|
||||
return piecemap
|
||||
|
||||
def verifyPiece(self, inner_path, pos, piece):
|
||||
piecemap = self.getPiecemap(inner_path)
|
||||
piece_i = pos / piecemap["piece_size"]
|
||||
if CryptHash.sha512sum(piece, format="digest") != piecemap["sha512_pieces"][piece_i]:
|
||||
raise VerifyError("Invalid hash")
|
||||
return True
|
||||
|
||||
def verifyFile(self, inner_path, file, ignore_same=True):
|
||||
if "|" not in inner_path:
|
||||
return super(ContentManagerPlugin, self).verifyFile(inner_path, file, ignore_same)
|
||||
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
|
||||
return self.verifyPiece(inner_path, pos_from, file)
|
||||
|
||||
def optionalDownloaded(self, inner_path, hash_id, size=None, own=False):
|
||||
if "|" in inner_path:
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
file_info = self.getFileInfo(inner_path)
|
||||
|
||||
# Mark piece downloaded
|
||||
piece_i = pos_from / file_info["piece_size"]
|
||||
self.site.storage.piecefields[file_info["sha512"]][piece_i] = True
|
||||
|
||||
# Only add to site size on first request
|
||||
if hash_id in self.hashfield:
|
||||
size = 0
|
||||
elif size > 1024 * 1024:
|
||||
file_info = self.getFileInfo(inner_path)
|
||||
if file_info and "sha512" in file_info: # We already have the file, but not in piecefield
|
||||
sha512 = file_info["sha512"]
|
||||
if sha512 not in self.site.storage.piecefields:
|
||||
self.site.storage.checkBigfile(inner_path)
|
||||
|
||||
return super(ContentManagerPlugin, self).optionalDownloaded(inner_path, hash_id, size, own)
|
||||
|
||||
def optionalRemoved(self, inner_path, hash_id, size=None):
|
||||
if size and size > 1024 * 1024:
|
||||
file_info = self.getFileInfo(inner_path)
|
||||
sha512 = file_info["sha512"]
|
||||
if sha512 in self.site.storage.piecefields:
|
||||
del self.site.storage.piecefields[sha512]
|
||||
|
||||
# Also remove other pieces of the file from download queue
|
||||
for key in self.site.bad_files.keys():
|
||||
if key.startswith(inner_path + "|"):
|
||||
del self.site.bad_files[key]
|
||||
self.site.worker_manager.removeSolvedFileTasks()
|
||||
return super(ContentManagerPlugin, self).optionalRemoved(inner_path, hash_id, size)
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteStorage")
|
||||
class SiteStoragePlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SiteStoragePlugin, self).__init__(*args, **kwargs)
|
||||
self.piecefields = collections.defaultdict(BigfilePiecefield)
|
||||
if "piecefields" in self.site.settings.get("cache", {}):
|
||||
for sha512, piecefield_packed in self.site.settings["cache"].get("piecefields").iteritems():
|
||||
if piecefield_packed:
|
||||
self.piecefields[sha512].unpack(piecefield_packed.decode("base64"))
|
||||
self.site.settings["cache"]["piecefields"] = {}
|
||||
|
||||
def createSparseFile(self, inner_path, size, sha512=None):
|
||||
file_path = self.getPath(inner_path)
|
||||
|
||||
file_dir = os.path.dirname(file_path)
|
||||
if not os.path.isdir(file_dir):
|
||||
os.makedirs(file_dir)
|
||||
|
||||
f = open(file_path, 'wb')
|
||||
f.truncate(min(1024 * 1024 * 5, size)) # Only pre-allocate up to 5MB
|
||||
f.close()
|
||||
if os.name == "nt":
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
subprocess.call(["fsutil", "sparse", "setflag", file_path], close_fds=True, startupinfo=startupinfo)
|
||||
|
||||
if sha512 and sha512 in self.piecefields:
|
||||
self.log.debug("%s: File not exists, but has piecefield. Deleting piecefield." % inner_path)
|
||||
del self.piecefields[sha512]
|
||||
|
||||
def write(self, inner_path, content):
|
||||
if "|" not in inner_path:
|
||||
return super(SiteStoragePlugin, self).write(inner_path, content)
|
||||
|
||||
# Write to specific position by passing |{pos} after the filename
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
file_path = self.getPath(inner_path)
|
||||
|
||||
# Create dir if not exist
|
||||
file_dir = os.path.dirname(file_path)
|
||||
if not os.path.isdir(file_dir):
|
||||
os.makedirs(file_dir)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
file_info = self.site.content_manager.getFileInfo(inner_path)
|
||||
self.createSparseFile(inner_path, file_info["size"])
|
||||
|
||||
# Write file
|
||||
with open(file_path, "rb+") as file:
|
||||
file.seek(pos_from)
|
||||
if hasattr(content, 'read'): # File-like object
|
||||
shutil.copyfileobj(content, file) # Write buff to disk
|
||||
else: # Simple string
|
||||
file.write(content)
|
||||
del content
|
||||
self.onUpdated(inner_path)
|
||||
|
||||
def checkBigfile(self, inner_path):
|
||||
file_info = self.site.content_manager.getFileInfo(inner_path)
|
||||
if not file_info or (file_info and "piecemap" not in file_info): # It's not a big file
|
||||
return False
|
||||
|
||||
self.site.settings["has_bigfile"] = True
|
||||
file_path = self.getPath(inner_path)
|
||||
sha512 = file_info["sha512"]
|
||||
piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
|
||||
if os.path.isfile(file_path):
|
||||
if sha512 not in self.piecefields:
|
||||
if open(file_path).read(128) == "\0" * 128:
|
||||
piece_data = "0"
|
||||
else:
|
||||
piece_data = "1"
|
||||
self.log.debug("%s: File exists, but not in piecefield. Filling piecefiled with %s * %s." % (inner_path, piece_num, piece_data))
|
||||
self.piecefields[sha512].fromstring(piece_data * piece_num)
|
||||
else:
|
||||
self.log.debug("Creating bigfile: %s" % inner_path)
|
||||
self.createSparseFile(inner_path, file_info["size"], sha512)
|
||||
self.piecefields[sha512].fromstring("0" * piece_num)
|
||||
return True
|
||||
|
||||
def openBigfile(self, inner_path, prebuffer=0):
|
||||
if not self.checkBigfile(inner_path):
|
||||
return False
|
||||
self.site.needFile(inner_path, blocking=False) # Download piecemap
|
||||
return BigFile(self.site, inner_path, prebuffer=prebuffer)
|
||||
|
||||
|
||||
class BigFile(object):
|
||||
def __init__(self, site, inner_path, prebuffer=0):
|
||||
self.site = site
|
||||
self.inner_path = inner_path
|
||||
file_path = site.storage.getPath(inner_path)
|
||||
file_info = self.site.content_manager.getFileInfo(inner_path)
|
||||
self.piece_size = file_info["piece_size"]
|
||||
self.sha512 = file_info["sha512"]
|
||||
self.size = file_info["size"]
|
||||
self.prebuffer = prebuffer
|
||||
self.read_bytes = 0
|
||||
|
||||
self.piecefield = self.site.storage.piecefields[self.sha512]
|
||||
self.f = open(file_path, "rb+")
|
||||
self.read_lock = gevent.lock.Semaphore()
|
||||
|
||||
def read(self, buff=64 * 1024):
|
||||
with self.read_lock:
|
||||
pos = self.f.tell()
|
||||
read_until = min(self.size, pos + buff)
|
||||
requests = []
|
||||
# Request all required blocks
|
||||
while 1:
|
||||
piece_i = pos / self.piece_size
|
||||
if piece_i * self.piece_size >= read_until:
|
||||
break
|
||||
pos_from = piece_i * self.piece_size
|
||||
pos_to = pos_from + self.piece_size
|
||||
if not self.piecefield[piece_i]:
|
||||
requests.append(self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=10))
|
||||
pos += self.piece_size
|
||||
|
||||
if not all(requests):
|
||||
return None
|
||||
|
||||
# Request prebuffer
|
||||
if self.prebuffer:
|
||||
prebuffer_until = min(self.size, read_until + self.prebuffer)
|
||||
priority = 3
|
||||
while 1:
|
||||
piece_i = pos / self.piece_size
|
||||
if piece_i * self.piece_size >= prebuffer_until:
|
||||
break
|
||||
pos_from = piece_i * self.piece_size
|
||||
pos_to = pos_from + self.piece_size
|
||||
if not self.piecefield[piece_i]:
|
||||
self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=max(0, priority))
|
||||
priority -= 1
|
||||
pos += self.piece_size
|
||||
|
||||
gevent.joinall(requests)
|
||||
self.read_bytes += buff
|
||||
|
||||
# Increase buffer for long reads
|
||||
if self.read_bytes > 7 * 1024 * 1024 and self.prebuffer < 5 * 1024 * 1024:
|
||||
self.site.log.debug("%s: Increasing bigfile buffer size to 5MB..." % self.inner_path)
|
||||
self.prebuffer = 5 * 1024 * 1024
|
||||
|
||||
return self.f.read(buff)
|
||||
|
||||
def seek(self, pos, whence=0):
|
||||
with self.read_lock:
|
||||
if whence == 2: # Relative from file end
|
||||
pos = self.size + pos # Use the real size instead of size on the disk
|
||||
whence = 0
|
||||
return self.f.seek(pos, whence)
|
||||
|
||||
def tell(self):
|
||||
return self.f.tell()
|
||||
|
||||
def close(self):
|
||||
self.f.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
|
||||
@PluginManager.registerTo("WorkerManager")
|
||||
class WorkerManagerPlugin(object):
|
||||
def addTask(self, inner_path, *args, **kwargs):
|
||||
file_info = kwargs.get("file_info")
|
||||
if file_info and "piecemap" in file_info: # Bigfile
|
||||
self.site.settings["has_bigfile"] = True
|
||||
|
||||
piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
|
||||
piecemap_task = None
|
||||
if not self.site.storage.isFile(piecemap_inner_path):
|
||||
# Start download piecemap
|
||||
piecemap_task = super(WorkerManagerPlugin, self).addTask(piecemap_inner_path, priority=30)
|
||||
autodownload_bigfile_size_limit = self.site.settings.get("autodownload_bigfile_size_limit", config.autodownload_bigfile_size_limit)
|
||||
if "|" not in inner_path and self.site.isDownloadable(inner_path) and file_info["size"] / 1024 / 1024 <= autodownload_bigfile_size_limit:
|
||||
gevent.spawn_later(0.1, self.site.needFile, inner_path + "|all") # Download all pieces
|
||||
|
||||
if "|" in inner_path:
|
||||
# Start download piece
|
||||
task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
|
||||
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
task["piece_i"] = pos_from / file_info["piece_size"]
|
||||
task["sha512"] = file_info["sha512"]
|
||||
else:
|
||||
if inner_path in self.site.bad_files:
|
||||
del self.site.bad_files[inner_path]
|
||||
if piecemap_task:
|
||||
task = piecemap_task
|
||||
else:
|
||||
fake_evt = gevent.event.AsyncResult() # Don't download anything if no range specified
|
||||
fake_evt.set(True)
|
||||
task = {"evt": fake_evt}
|
||||
|
||||
if not self.site.storage.isFile(inner_path):
|
||||
self.site.storage.createSparseFile(inner_path, file_info["size"], file_info["sha512"])
|
||||
piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
|
||||
self.site.storage.piecefields[file_info["sha512"]].fromstring("0" * piece_num)
|
||||
else:
|
||||
task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
|
||||
return task
|
||||
|
||||
def taskAddPeer(self, task, peer):
|
||||
if "piece_i" in task:
|
||||
if not peer.piecefields[task["sha512"]][task["piece_i"]]:
|
||||
if task["sha512"] not in peer.piecefields:
|
||||
gevent.spawn(peer.updatePiecefields, force=True)
|
||||
elif not task["peers"]:
|
||||
gevent.spawn(peer.updatePiecefields)
|
||||
|
||||
return False # Deny to add peers to task if file not in piecefield
|
||||
return super(WorkerManagerPlugin, self).taskAddPeer(task, peer)
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileRequest")
|
||||
class FileRequestPlugin(object):
|
||||
def isReadable(self, site, inner_path, file, pos):
|
||||
# Peek into file
|
||||
if file.read(10) == "\0" * 10:
|
||||
# Looks empty, but makes sures we don't have that piece
|
||||
file_info = site.content_manager.getFileInfo(inner_path)
|
||||
piece_i = pos / file_info["piece_size"]
|
||||
if not site.storage.piecefields[file_info["sha512"]][piece_i]:
|
||||
return False
|
||||
# Seek back to position we want to read
|
||||
file.seek(pos)
|
||||
return super(FileRequestPlugin, self).isReadable(site, inner_path, file, pos)
|
||||
|
||||
def actionGetPiecefields(self, params):
|
||||
site = self.sites.get(params["site"])
|
||||
if not site or not site.settings["serving"]: # Site unknown or not serving
|
||||
self.response({"error": "Unknown site"})
|
||||
return False
|
||||
|
||||
# Add peer to site if not added before
|
||||
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True)
|
||||
if not peer.connection: # Just added
|
||||
peer.connect(self.connection) # Assign current connection to peer
|
||||
|
||||
piecefields_packed = {sha512: piecefield.pack() for sha512, piecefield in site.storage.piecefields.iteritems()}
|
||||
self.response({"piecefields_packed": piecefields_packed})
|
||||
|
||||
def actionSetPiecefields(self, params):
|
||||
site = self.sites.get(params["site"])
|
||||
if not site or not site.settings["serving"]: # Site unknown or not serving
|
||||
self.response({"error": "Unknown site"})
|
||||
self.connection.badAction(5)
|
||||
return False
|
||||
|
||||
# Add or get peer
|
||||
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection)
|
||||
if not peer.connection:
|
||||
peer.connect(self.connection)
|
||||
|
||||
peer.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
|
||||
for sha512, piecefield_packed in params["piecefields_packed"].iteritems():
|
||||
peer.piecefields[sha512].unpack(piecefield_packed)
|
||||
site.settings["has_bigfile"] = True
|
||||
|
||||
self.response({"ok": "Updated"})
|
||||
|
||||
|
||||
@PluginManager.registerTo("Peer")
|
||||
class PeerPlugin(object):
|
||||
def __getattr__(self, key):
|
||||
if key == "piecefields":
|
||||
self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
|
||||
return self.piecefields
|
||||
elif key == "time_piecefields_updated":
|
||||
self.time_piecefields_updated = None
|
||||
return self.time_piecefields_updated
|
||||
else:
|
||||
return super(PeerPlugin, self).__getattr__(key)
|
||||
|
||||
@util.Noparallel(ignore_args=True)
|
||||
def updatePiecefields(self, force=False):
|
||||
if self.connection and self.connection.handshake.get("rev", 0) < 2190:
|
||||
return False # Not supported
|
||||
|
||||
# Don't update piecefield again in 1 min
|
||||
if self.time_piecefields_updated and time.time() - self.time_piecefields_updated < 60 and not force:
|
||||
return False
|
||||
|
||||
self.time_piecefields_updated = time.time()
|
||||
res = self.request("getPiecefields", {"site": self.site.address})
|
||||
if not res or "error" in res:
|
||||
return False
|
||||
|
||||
self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
|
||||
try:
|
||||
for sha512, piecefield_packed in res["piecefields_packed"].iteritems():
|
||||
self.piecefields[sha512].unpack(piecefield_packed)
|
||||
except Exception as err:
|
||||
self.log("Invalid updatePiecefields response: %s" % Debug.formatException(err))
|
||||
|
||||
return self.piecefields
|
||||
|
||||
def sendMyHashfield(self, *args, **kwargs):
|
||||
return super(PeerPlugin, self).sendMyHashfield(*args, **kwargs)
|
||||
|
||||
def updateHashfield(self, *args, **kwargs):
|
||||
if self.site.settings.get("has_bigfile"):
|
||||
thread = gevent.spawn(self.updatePiecefields, *args, **kwargs)
|
||||
back = super(PeerPlugin, self).updateHashfield(*args, **kwargs)
|
||||
thread.join()
|
||||
return back
|
||||
else:
|
||||
return super(PeerPlugin, self).updateHashfield(*args, **kwargs)
|
||||
|
||||
def getFile(self, site, inner_path, *args, **kwargs):
|
||||
if "|" in inner_path:
|
||||
inner_path, file_range = inner_path.split("|")
|
||||
pos_from, pos_to = map(int, file_range.split("-"))
|
||||
kwargs["pos_from"] = pos_from
|
||||
kwargs["pos_to"] = pos_to
|
||||
return super(PeerPlugin, self).getFile(site, inner_path, *args, **kwargs)
|
||||
|
||||
|
||||
@PluginManager.registerTo("Site")
|
||||
class SitePlugin(object):
|
||||
def isFileDownloadAllowed(self, inner_path, file_info):
|
||||
if "piecemap" in file_info:
|
||||
file_size_mb = file_info["size"] / 1024 / 1024
|
||||
if config.bigfile_size_limit and file_size_mb > config.bigfile_size_limit:
|
||||
self.log.debug(
|
||||
"Bigfile size %s too large: %sMB > %sMB, skipping..." %
|
||||
(inner_path, file_size_mb, config.bigfile_size_limit)
|
||||
)
|
||||
return False
|
||||
|
||||
file_info = file_info.copy()
|
||||
file_info["size"] = file_info["piece_size"]
|
||||
return super(SitePlugin, self).isFileDownloadAllowed(inner_path, file_info)
|
||||
|
||||
def getSettingsCache(self):
|
||||
back = super(SitePlugin, self).getSettingsCache()
|
||||
if self.storage.piecefields:
|
||||
back["piecefields"] = {sha512: piecefield.pack().encode("base64") for sha512, piecefield in self.storage.piecefields.iteritems()}
|
||||
return back
|
||||
|
||||
def needFile(self, inner_path, *args, **kwargs):
|
||||
if inner_path.endswith("|all"):
|
||||
@util.Pooled(20)
|
||||
def pooledNeedBigfile(inner_path, *args, **kwargs):
|
||||
if inner_path not in self.bad_files:
|
||||
self.log.debug("Cancelled piece, skipping %s" % inner_path)
|
||||
return False
|
||||
return self.needFile(inner_path, *args, **kwargs)
|
||||
|
||||
inner_path = inner_path.replace("|all", "")
|
||||
file_info = self.needFileInfo(inner_path)
|
||||
file_size = file_info["size"]
|
||||
piece_size = file_info["piece_size"]
|
||||
|
||||
piece_num = int(math.ceil(float(file_size) / piece_size))
|
||||
|
||||
file_threads = []
|
||||
|
||||
piecefield = self.storage.piecefields.get(file_info["sha512"])
|
||||
|
||||
for piece_i in range(piece_num):
|
||||
piece_from = piece_i * piece_size
|
||||
piece_to = min(file_size, piece_from + piece_size)
|
||||
if not piecefield or not piecefield[piece_i]:
|
||||
inner_path_piece = "%s|%s-%s" % (inner_path, piece_from, piece_to)
|
||||
self.bad_files[inner_path_piece] = self.bad_files.get(inner_path_piece, 1)
|
||||
res = pooledNeedBigfile(inner_path_piece, blocking=False)
|
||||
if res is not True and res is not False:
|
||||
file_threads.append(res)
|
||||
gevent.joinall(file_threads)
|
||||
else:
|
||||
return super(SitePlugin, self).needFile(inner_path, *args, **kwargs)
|
||||
|
||||
|
||||
@PluginManager.registerTo("ConfigPlugin")
|
||||
class ConfigPlugin(object):
|
||||
def createArguments(self):
|
||||
group = self.parser.add_argument_group("Bigfile plugin")
|
||||
group.add_argument('--autodownload_bigfile_size_limit', help='Also download bigfiles smaller than this limit if help distribute option is checked', default=1, metavar="MB", type=int)
|
||||
group.add_argument('--bigfile_size_limit', help='Maximum size of downloaded big files', default=False, metavar="MB", type=int)
|
||||
|
||||
return super(ConfigPlugin, self).createArguments()
|
|
@ -1,493 +0,0 @@
|
|||
import time
|
||||
from cStringIO import StringIO
|
||||
|
||||
import pytest
|
||||
import msgpack
|
||||
import mock
|
||||
|
||||
from Connection import ConnectionServer
|
||||
from Content.ContentManager import VerifyError
|
||||
from File import FileServer
|
||||
from File import FileRequest
|
||||
from Worker import WorkerManager
|
||||
from Peer import Peer
|
||||
from Bigfile import BigfilePiecefield, BigfilePiecefieldPacked
|
||||
from Test import Spy
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
@pytest.mark.usefixtures("resetTempSettings")
|
||||
class TestBigfile:
|
||||
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
|
||||
|
||||
def createBigfile(self, site, inner_path="data/optional.any.iso", pieces=10):
|
||||
f = site.storage.open(inner_path, "w")
|
||||
for i in range(pieces * 100):
|
||||
f.write(("Test%s" % i).ljust(10, "-") * 1000)
|
||||
f.close()
|
||||
assert site.content_manager.sign("content.json", self.privatekey)
|
||||
return inner_path
|
||||
|
||||
def testPiecemapCreate(self, site):
|
||||
inner_path = self.createBigfile(site)
|
||||
content = site.storage.loadJson("content.json")
|
||||
assert "data/optional.any.iso" in content["files_optional"]
|
||||
file_node = content["files_optional"][inner_path]
|
||||
assert file_node["size"] == 10 * 1000 * 1000
|
||||
assert file_node["sha512"] == "47a72cde3be80b4a829e7674f72b7c6878cf6a70b0c58c6aa6c17d7e9948daf6"
|
||||
assert file_node["piecemap"] == inner_path + ".piecemap.msgpack"
|
||||
|
||||
piecemap = msgpack.unpack(site.storage.open(file_node["piecemap"], "rb"))["optional.any.iso"]
|
||||
assert len(piecemap["sha512_pieces"]) == 10
|
||||
assert piecemap["sha512_pieces"][0] != piecemap["sha512_pieces"][1]
|
||||
assert piecemap["sha512_pieces"][0].encode("hex") == "a73abad9992b3d0b672d0c2a292046695d31bebdcb1e150c8410bbe7c972eff3"
|
||||
|
||||
def testVerifyPiece(self, site):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Verify all 10 piece
|
||||
f = site.storage.open(inner_path, "rb")
|
||||
for i in range(10):
|
||||
piece = StringIO(f.read(1024 * 1024))
|
||||
piece.seek(0)
|
||||
site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
|
||||
f.close()
|
||||
|
||||
# Try to verify piece 0 with piece 1 hash
|
||||
with pytest.raises(VerifyError) as err:
|
||||
i = 1
|
||||
f = site.storage.open(inner_path, "rb")
|
||||
piece = StringIO(f.read(1024 * 1024))
|
||||
f.close()
|
||||
site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
|
||||
assert "Invalid hash" in str(err)
|
||||
|
||||
def testSparseFile(self, site):
|
||||
inner_path = "sparsefile"
|
||||
|
||||
# Create a 100MB sparse file
|
||||
site.storage.createSparseFile(inner_path, 100 * 1024 * 1024)
|
||||
|
||||
# Write to file beginning
|
||||
s = time.time()
|
||||
f = site.storage.write("%s|%s-%s" % (inner_path, 0, 1024 * 1024), "hellostart" * 1024)
|
||||
time_write_start = time.time() - s
|
||||
|
||||
# Write to file end
|
||||
s = time.time()
|
||||
f = site.storage.write("%s|%s-%s" % (inner_path, 99 * 1024 * 1024, 99 * 1024 * 1024 + 1024 * 1024), "helloend" * 1024)
|
||||
time_write_end = time.time() - s
|
||||
|
||||
# Verify writes
|
||||
f = site.storage.open(inner_path)
|
||||
assert f.read(10) == "hellostart"
|
||||
f.seek(99 * 1024 * 1024)
|
||||
assert f.read(8) == "helloend"
|
||||
f.close()
|
||||
|
||||
site.storage.delete(inner_path)
|
||||
|
||||
# Writing to end shold not take much longer, than writing to start
|
||||
assert time_write_end <= max(0.1, time_write_start * 1.1)
|
||||
|
||||
def testRangedFileRequest(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
file_server.sites[site.address] = site
|
||||
client = FileServer(file_server.ip, 1545)
|
||||
client.sites[site_temp.address] = site_temp
|
||||
site_temp.connection_server = client
|
||||
connection = client.getConnection(file_server.ip, 1544)
|
||||
|
||||
# Add file_server as peer to client
|
||||
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
buff = peer_file_server.getFile(site_temp.address, "%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
|
||||
|
||||
assert len(buff.getvalue()) == 1 * 1024 * 1024 # Correct block size
|
||||
assert buff.getvalue().startswith("Test524") # Correct data
|
||||
buff.seek(0)
|
||||
assert site.content_manager.verifyPiece(inner_path, 5 * 1024 * 1024, buff) # Correct hash
|
||||
|
||||
connection.close()
|
||||
client.stop()
|
||||
|
||||
def testRangedFileDownload(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Make sure the file and the piecemap in the optional hashfield
|
||||
file_info = site.content_manager.getFileInfo(inner_path)
|
||||
assert site.content_manager.hashfield.hasHash(file_info["sha512"])
|
||||
|
||||
piecemap_hash = site.content_manager.getFileInfo(file_info["piecemap"])["sha512"]
|
||||
assert site.content_manager.hashfield.hasHash(piecemap_hash)
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
peer_client = site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
bad_files = site_temp.storage.verifyFiles(quick_check=True)["bad_files"]
|
||||
assert not bad_files
|
||||
|
||||
# client_piecefield = peer_client.piecefields[file_info["sha512"]].tostring()
|
||||
# assert client_piecefield == "1" * 10
|
||||
|
||||
# Download 5. and 10. block
|
||||
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
|
||||
|
||||
# Verify 0. block not downloaded
|
||||
f = site_temp.storage.open(inner_path)
|
||||
assert f.read(10) == "\0" * 10
|
||||
# Verify 5. and 10. block downloaded
|
||||
f.seek(5 * 1024 * 1024)
|
||||
assert f.read(7) == "Test524"
|
||||
f.seek(9 * 1024 * 1024)
|
||||
assert f.read(7) == "943---T"
|
||||
|
||||
# Verify hashfield
|
||||
assert set(site_temp.content_manager.hashfield) == set([18343, 30970]) # 18343: data/optional.any.iso, 30970: data/optional.any.iso.hashmap.msgpack
|
||||
|
||||
def testOpenBigfile(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Open virtual file
|
||||
assert not site_temp.storage.isFile(inner_path)
|
||||
|
||||
with site_temp.storage.openBigfile(inner_path) as f:
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
f.seek(5 * 1024 * 1024)
|
||||
assert f.read(7) == "Test524"
|
||||
|
||||
f.seek(9 * 1024 * 1024)
|
||||
assert f.read(7) == "943---T"
|
||||
|
||||
assert len(requests) == 4 # 1x peicemap + 1x getpiecefield + 2x for pieces
|
||||
|
||||
assert set(site_temp.content_manager.hashfield) == set([18343, 30970])
|
||||
|
||||
assert site_temp.storage.piecefields[f.sha512].tostring() == "0000010001"
|
||||
assert f.sha512 in site_temp.getSettingsCache()["piecefields"]
|
||||
|
||||
# Test requesting already downloaded
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
f.seek(5 * 1024 * 1024)
|
||||
assert f.read(7) == "Test524"
|
||||
|
||||
assert len(requests) == 0
|
||||
|
||||
# Test requesting multi-block overflow reads
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
f.seek(5 * 1024 * 1024) # We already have this block
|
||||
data = f.read(1024 * 1024 * 3) # Our read overflow to 6. and 7. block
|
||||
assert data.startswith("Test524")
|
||||
assert data.endswith("Test838-")
|
||||
assert "\0" not in data # No null bytes allowed
|
||||
|
||||
assert len(requests) == 2 # Two block download
|
||||
|
||||
# Test out of range request
|
||||
f.seek(5 * 1024 * 1024)
|
||||
data = f.read(1024 * 1024 * 30)
|
||||
assert len(data) == 10 * 1000 * 1000 - (5 * 1024 * 1024)
|
||||
|
||||
f.seek(30 * 1024 * 1024)
|
||||
data = f.read(1024 * 1024 * 30)
|
||||
assert len(data) == 0
|
||||
|
||||
@pytest.mark.parametrize("piecefield_obj", [BigfilePiecefield, BigfilePiecefieldPacked])
|
||||
def testPiecefield(self, piecefield_obj, site):
|
||||
testdatas = [
|
||||
"1" * 100 + "0" * 900 + "1" * 4000 + "0" * 4999 + "1",
|
||||
"010101" * 10 + "01" * 90 + "10" * 400 + "0" * 4999,
|
||||
"1" * 10000,
|
||||
"0" * 10000
|
||||
]
|
||||
for testdata in testdatas:
|
||||
piecefield = piecefield_obj()
|
||||
|
||||
piecefield.fromstring(testdata)
|
||||
assert piecefield.tostring() == testdata
|
||||
assert piecefield[0] == int(testdata[0])
|
||||
assert piecefield[100] == int(testdata[100])
|
||||
assert piecefield[1000] == int(testdata[1000])
|
||||
assert piecefield[len(testdata) - 1] == int(testdata[len(testdata) - 1])
|
||||
|
||||
packed = piecefield.pack()
|
||||
piecefield_new = piecefield_obj()
|
||||
piecefield_new.unpack(packed)
|
||||
assert piecefield.tostring() == piecefield_new.tostring()
|
||||
assert piecefield_new.tostring() == testdata
|
||||
|
||||
def testFileGet(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
site_temp.connection_server = FileServer(file_server.ip, 1545)
|
||||
site_temp.connection_server.sites[site_temp.address] = site_temp
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Download second block
|
||||
with site_temp.storage.openBigfile(inner_path) as f:
|
||||
f.seek(1024 * 1024)
|
||||
assert f.read(1024)[0] != "\0"
|
||||
|
||||
# Make sure first block not download
|
||||
with site_temp.storage.open(inner_path) as f:
|
||||
assert f.read(1024)[0] == "\0"
|
||||
|
||||
peer2 = site.addPeer(file_server.ip, 1545, return_peer=True)
|
||||
|
||||
# Should drop error on first block request
|
||||
assert not peer2.getFile(site.address, "%s|0-%s" % (inner_path, 1024 * 1024 * 1))
|
||||
|
||||
# Should not drop error for second block request
|
||||
assert peer2.getFile(site.address, "%s|%s-%s" % (inner_path, 1024 * 1024 * 1, 1024 * 1024 * 2))
|
||||
|
||||
def benchmarkPeerMemory(self, site, file_server):
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
import psutil, os
|
||||
meminfo = psutil.Process(os.getpid()).memory_info
|
||||
|
||||
mem_s = meminfo()[0]
|
||||
s = time.time()
|
||||
for i in range(25000):
|
||||
site.addPeer(file_server.ip, i)
|
||||
print "%.3fs MEM: + %sKB" % (time.time() - s, (meminfo()[0] - mem_s) / 1024) # 0.082s MEM: + 6800KB
|
||||
print site.peers.values()[0].piecefields
|
||||
|
||||
def testUpdatePiecefield(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
server1 = file_server
|
||||
server1.sites[site.address] = site
|
||||
server2 = FileServer(file_server.ip, 1545)
|
||||
server2.sites[site_temp.address] = site_temp
|
||||
site_temp.connection_server = server2
|
||||
|
||||
# Add file_server as peer to client
|
||||
server2_peer1 = site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Testing piecefield sync
|
||||
assert len(server2_peer1.piecefields) == 0
|
||||
assert server2_peer1.updatePiecefields() # Query piecefields from peer
|
||||
assert len(server2_peer1.piecefields) > 0
|
||||
|
||||
def testWorkerManagerPiecefieldDeny(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
server1 = file_server
|
||||
server1.sites[site.address] = site
|
||||
server2 = FileServer(file_server.ip, 1545)
|
||||
server2.sites[site_temp.address] = site_temp
|
||||
site_temp.connection_server = server2
|
||||
|
||||
# Add file_server as peer to client
|
||||
server2_peer1 = site_temp.addPeer(file_server.ip, 1544) # Working
|
||||
|
||||
site_temp.downloadContent("content.json", download_files=False)
|
||||
site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
|
||||
|
||||
# Add fake peers with optional files downloaded
|
||||
for i in range(5):
|
||||
fake_peer = site_temp.addPeer("127.0.1.%s" % i, 1544)
|
||||
fake_peer.hashfield = site.content_manager.hashfield
|
||||
fake_peer.has_hashfield = True
|
||||
|
||||
with Spy.Spy(WorkerManager, "addWorker") as requests:
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 6 * 1024 * 1024, 7 * 1024 * 1024))
|
||||
|
||||
# It should only request parts from peer1 as the other peers does not have the requested parts in piecefields
|
||||
assert len([request[1] for request in requests if request[1] != server2_peer1]) == 0
|
||||
|
||||
def testWorkerManagerPiecefieldDownload(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
server1 = file_server
|
||||
server1.sites[site.address] = site
|
||||
server2 = FileServer(file_server.ip, 1545)
|
||||
server2.sites[site_temp.address] = site_temp
|
||||
site_temp.connection_server = server2
|
||||
sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
|
||||
|
||||
# Create 10 fake peer for each piece
|
||||
for i in range(10):
|
||||
peer = Peer(file_server.ip, 1544, site_temp, server2)
|
||||
peer.piecefields[sha512][i] = "1"
|
||||
peer.updateHashfield = mock.MagicMock(return_value=False)
|
||||
peer.updatePiecefields = mock.MagicMock(return_value=False)
|
||||
peer.findHashIds = mock.MagicMock(return_value={"nope": []})
|
||||
peer.hashfield = site.content_manager.hashfield
|
||||
peer.has_hashfield = True
|
||||
peer.key = "Peer:%s" % i
|
||||
site_temp.peers["Peer:%s" % i] = peer
|
||||
|
||||
site_temp.downloadContent("content.json", download_files=False)
|
||||
site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
|
||||
|
||||
with Spy.Spy(Peer, "getFile") as requests:
|
||||
for i in range(10):
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, i * 1024 * 1024, (i + 1) * 1024 * 1024))
|
||||
|
||||
assert len(requests) == 10
|
||||
for i in range(10):
|
||||
assert requests[i][0] == site_temp.peers["Peer:%s" % i] # Every part should be requested from piece owner peer
|
||||
|
||||
def testDownloadStats(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Open virtual file
|
||||
assert not site_temp.storage.isFile(inner_path)
|
||||
|
||||
# Check size before downloads
|
||||
assert site_temp.settings["size"] < 10 * 1024 * 1024
|
||||
assert site_temp.settings["optional_downloaded"] == 0
|
||||
size_piecemap = site_temp.content_manager.getFileInfo(inner_path + ".piecemap.msgpack")["size"]
|
||||
size_bigfile = site_temp.content_manager.getFileInfo(inner_path)["size"]
|
||||
|
||||
with site_temp.storage.openBigfile(inner_path) as f:
|
||||
assert "\0" not in f.read(1024)
|
||||
assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
|
||||
|
||||
with site_temp.storage.openBigfile(inner_path) as f:
|
||||
# Don't count twice
|
||||
assert "\0" not in f.read(1024)
|
||||
assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
|
||||
|
||||
# Add second block
|
||||
assert "\0" not in f.read(1024 * 1024)
|
||||
assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
|
||||
|
||||
def testPrebuffer(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Open virtual file
|
||||
assert not site_temp.storage.isFile(inner_path)
|
||||
|
||||
with site_temp.storage.openBigfile(inner_path, prebuffer=1024 * 1024 * 2) as f:
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
f.seek(5 * 1024 * 1024)
|
||||
assert f.read(7) == "Test524"
|
||||
# assert len(requests) == 3 # 1x piecemap + 1x getpiecefield + 1x for pieces
|
||||
assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 2
|
||||
|
||||
time.sleep(0.5) # Wait prebuffer download
|
||||
|
||||
sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
|
||||
assert site_temp.storage.piecefields[sha512].tostring() == "0000011100"
|
||||
|
||||
# No prebuffer beyond end of the file
|
||||
f.seek(9 * 1024 * 1024)
|
||||
assert "\0" not in f.read(7)
|
||||
|
||||
assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 0
|
||||
|
||||
def testDownloadAllPieces(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Open virtual file
|
||||
assert not site_temp.storage.isFile(inner_path)
|
||||
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
site_temp.needFile("%s|all" % inner_path)
|
||||
|
||||
assert len(requests) == 12 # piecemap.msgpack, getPiecefields, 10 x piece
|
||||
|
||||
# Don't re-download already got pieces
|
||||
with Spy.Spy(FileRequest, "route") as requests:
|
||||
site_temp.needFile("%s|all" % inner_path)
|
||||
|
||||
assert len(requests) == 0
|
||||
|
||||
def testFileSize(self, file_server, site, site_temp):
|
||||
inner_path = self.createBigfile(site)
|
||||
|
||||
# Init source server
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
|
||||
# Init client server
|
||||
client = ConnectionServer(file_server.ip, 1545)
|
||||
site_temp.connection_server = client
|
||||
site_temp.addPeer(file_server.ip, 1544)
|
||||
|
||||
# Download site
|
||||
site_temp.download(blind_includes=True).join(timeout=5)
|
||||
|
||||
# Open virtual file
|
||||
assert not site_temp.storage.isFile(inner_path)
|
||||
|
||||
# Download first block
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 0 * 1024 * 1024, 1 * 1024 * 1024))
|
||||
assert site_temp.storage.getSize(inner_path) < 1000 * 1000 * 10 # Size on the disk should be smaller than the real size
|
||||
|
||||
site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
|
||||
assert site_temp.storage.getSize(inner_path) == site.storage.getSize(inner_path)
|
|
@ -1 +0,0 @@
|
|||
from src.Test.conftest import *
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1,2 +0,0 @@
|
|||
import BigfilePlugin
|
||||
from BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
|
|
@ -1,182 +0,0 @@
|
|||
import time
|
||||
import sys
|
||||
import collections
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
import gevent
|
||||
from util import helper
|
||||
from Config import config
|
||||
|
||||
|
||||
class ChartCollector(object):
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
if config.action == "main":
|
||||
gevent.spawn_later(60 * 3, self.collector)
|
||||
self.log = logging.getLogger("ChartCollector")
|
||||
self.last_values = collections.defaultdict(dict)
|
||||
|
||||
def setInitialLastValues(self, sites):
|
||||
# Recover last value of site bytes/sent
|
||||
for site in sites:
|
||||
self.last_values["site:" + site.address]["site_bytes_recv"] = site.settings.get("bytes_recv", 0)
|
||||
self.last_values["site:" + site.address]["site_bytes_sent"] = site.settings.get("bytes_sent", 0)
|
||||
|
||||
def getCollectors(self):
|
||||
collectors = {}
|
||||
file_server = sys.modules["main"].file_server
|
||||
sites = file_server.sites
|
||||
if not sites:
|
||||
return collectors
|
||||
content_db = sites.values()[0].content_manager.contents.db
|
||||
|
||||
# Connection stats
|
||||
collectors["connection"] = lambda: len(file_server.connections)
|
||||
collectors["connection_in"] = (
|
||||
lambda: len([1 for connection in file_server.connections if connection.type == "in"])
|
||||
)
|
||||
collectors["connection_onion"] = (
|
||||
lambda: len([1 for connection in file_server.connections if connection.ip.endswith(".onion")])
|
||||
)
|
||||
collectors["connection_ping_avg"] = (
|
||||
lambda: round(1000 * helper.avg(
|
||||
[connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
|
||||
))
|
||||
)
|
||||
collectors["connection_ping_min"] = (
|
||||
lambda: round(1000 * min(
|
||||
[connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
|
||||
))
|
||||
)
|
||||
collectors["connection_rev_avg"] = (
|
||||
lambda: helper.avg(
|
||||
[connection.handshake["rev"] for connection in file_server.connections if connection.handshake]
|
||||
)
|
||||
)
|
||||
|
||||
# Request stats
|
||||
collectors["file_bytes_recv|change"] = lambda: file_server.bytes_recv
|
||||
collectors["file_bytes_sent|change"] = lambda: file_server.bytes_sent
|
||||
collectors["request_num_recv|change"] = lambda: file_server.num_recv
|
||||
collectors["request_num_sent|change"] = lambda: file_server.num_sent
|
||||
|
||||
# Limit
|
||||
collectors["optional_limit"] = lambda: content_db.getOptionalLimitBytes()
|
||||
collectors["optional_used"] = lambda: content_db.getOptionalUsedBytes()
|
||||
collectors["optional_downloaded"] = lambda: sum([site.settings.get("optional_downloaded", 0) for site in sites.values()])
|
||||
|
||||
# Peers
|
||||
collectors["peer"] = lambda (peers): len(peers)
|
||||
collectors["peer_onion"] = lambda (peers): len([True for peer in peers if ".onion" in peer])
|
||||
|
||||
# Size
|
||||
collectors["size"] = lambda: sum([site.settings.get("size", 0) for site in sites.values()])
|
||||
collectors["size_optional"] = lambda: sum([site.settings.get("size_optional", 0) for site in sites.values()])
|
||||
collectors["content"] = lambda: sum([len(site.content_manager.contents) for site in sites.values()])
|
||||
|
||||
return collectors
|
||||
|
||||
def getSiteCollectors(self):
|
||||
site_collectors = {}
|
||||
|
||||
# Size
|
||||
site_collectors["site_size"] = lambda(site): site.settings.get("size", 0)
|
||||
site_collectors["site_size_optional"] = lambda(site): site.settings.get("size_optional", 0)
|
||||
site_collectors["site_optional_downloaded"] = lambda(site): site.settings.get("optional_downloaded", 0)
|
||||
site_collectors["site_content"] = lambda(site): len(site.content_manager.contents)
|
||||
|
||||
# Data transfer
|
||||
site_collectors["site_bytes_recv|change"] = lambda(site): site.settings.get("bytes_recv", 0)
|
||||
site_collectors["site_bytes_sent|change"] = lambda(site): site.settings.get("bytes_sent", 0)
|
||||
|
||||
# Peers
|
||||
site_collectors["site_peer"] = lambda(site): len(site.peers)
|
||||
site_collectors["site_peer_onion"] = lambda(site): len(
|
||||
[True for peer in site.peers.itervalues() if peer.ip.endswith(".onion")]
|
||||
)
|
||||
site_collectors["site_peer_connected"] = lambda(site): len([True for peer in site.peers.itervalues() if peer.connection])
|
||||
|
||||
return site_collectors
|
||||
|
||||
def getUniquePeers(self):
|
||||
sites = sys.modules["main"].file_server.sites
|
||||
return set(itertools.chain.from_iterable(
|
||||
[site.peers.keys() for site in sites.values()]
|
||||
))
|
||||
|
||||
def collectDatas(self, collectors, last_values, site=None):
|
||||
if site is None:
|
||||
peers = self.getUniquePeers()
|
||||
datas = {}
|
||||
for key, collector in collectors.iteritems():
|
||||
try:
|
||||
if site:
|
||||
value = collector(site)
|
||||
elif key.startswith("peer"):
|
||||
value = collector(peers)
|
||||
else:
|
||||
value = collector()
|
||||
except Exception as err:
|
||||
self.log.info("Collector %s error: %s" % (key, err))
|
||||
value = None
|
||||
|
||||
if "|change" in key: # Store changes relative to last value
|
||||
key = key.replace("|change", "")
|
||||
last_value = last_values.get(key, 0)
|
||||
last_values[key] = value
|
||||
value = value - last_value
|
||||
|
||||
if value is None:
|
||||
datas[key] = None
|
||||
else:
|
||||
datas[key] = round(value, 3)
|
||||
return datas
|
||||
|
||||
def collectGlobal(self, collectors, last_values):
|
||||
now = int(time.time())
|
||||
s = time.time()
|
||||
datas = self.collectDatas(collectors, last_values["global"])
|
||||
values = []
|
||||
for key, value in datas.iteritems():
|
||||
values.append((self.db.getTypeId(key), value, now))
|
||||
self.log.debug("Global collectors done in %.3fs" % (time.time() - s))
|
||||
|
||||
s = time.time()
|
||||
cur = self.db.getCursor()
|
||||
cur.execute("BEGIN")
|
||||
cur.cursor.executemany("INSERT INTO data (type_id, value, date_added) VALUES (?, ?, ?)", values)
|
||||
cur.execute("END")
|
||||
cur.close()
|
||||
self.log.debug("Global collectors inserted in %.3fs" % (time.time() - s))
|
||||
|
||||
def collectSites(self, sites, collectors, last_values):
|
||||
now = int(time.time())
|
||||
s = time.time()
|
||||
values = []
|
||||
for address, site in sites.iteritems():
|
||||
site_datas = self.collectDatas(collectors, last_values["site:%s" % address], site)
|
||||
for key, value in site_datas.iteritems():
|
||||
values.append((self.db.getTypeId(key), self.db.getSiteId(address), value, now))
|
||||
time.sleep(0.000001)
|
||||
self.log.debug("Site collections done in %.3fs" % (time.time() - s))
|
||||
|
||||
s = time.time()
|
||||
cur = self.db.getCursor()
|
||||
cur.execute("BEGIN")
|
||||
cur.cursor.executemany("INSERT INTO data (type_id, site_id, value, date_added) VALUES (?, ?, ?, ?)", values)
|
||||
cur.execute("END")
|
||||
cur.close()
|
||||
self.log.debug("Site collectors inserted in %.3fs" % (time.time() - s))
|
||||
|
||||
def collector(self):
|
||||
collectors = self.getCollectors()
|
||||
site_collectors = self.getSiteCollectors()
|
||||
sites = sys.modules["main"].file_server.sites
|
||||
i = 0
|
||||
while 1:
|
||||
self.collectGlobal(collectors, self.last_values)
|
||||
if i % 12 == 0: # Only collect sites data every hour
|
||||
self.collectSites(sites, site_collectors, self.last_values)
|
||||
time.sleep(60 * 5)
|
||||
i += 1
|
|
@ -1,133 +0,0 @@
|
|||
from Config import config
|
||||
from Db import Db
|
||||
import time
|
||||
|
||||
|
||||
class ChartDb(Db):
|
||||
def __init__(self):
|
||||
self.version = 2
|
||||
super(ChartDb, self).__init__(self.getSchema(), "%s/chart.db" % config.data_dir)
|
||||
self.foreign_keys = True
|
||||
self.checkTables()
|
||||
self.sites = self.loadSites()
|
||||
self.types = self.loadTypes()
|
||||
|
||||
def getSchema(self):
|
||||
schema = {}
|
||||
schema["db_name"] = "Chart"
|
||||
schema["tables"] = {}
|
||||
schema["tables"]["data"] = {
|
||||
"cols": [
|
||||
["data_id", "INTEGER PRIMARY KEY ASC AUTOINCREMENT NOT NULL UNIQUE"],
|
||||
["type_id", "INTEGER NOT NULL"],
|
||||
["site_id", "INTEGER"],
|
||||
["value", "INTEGER"],
|
||||
["date_added", "DATETIME DEFAULT (CURRENT_TIMESTAMP)"]
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE INDEX site_id ON data (site_id)",
|
||||
"CREATE INDEX date_added ON data (date_added)"
|
||||
],
|
||||
"schema_changed": 2
|
||||
}
|
||||
schema["tables"]["type"] = {
|
||||
"cols": [
|
||||
["type_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
|
||||
["name", "TEXT"]
|
||||
],
|
||||
"schema_changed": 1
|
||||
}
|
||||
schema["tables"]["site"] = {
|
||||
"cols": [
|
||||
["site_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
|
||||
["address", "TEXT"]
|
||||
],
|
||||
"schema_changed": 1
|
||||
}
|
||||
return schema
|
||||
|
||||
def getTypeId(self, name):
|
||||
if name not in self.types:
|
||||
self.execute("INSERT INTO type ?", {"name": name})
|
||||
self.types[name] = self.cur.cursor.lastrowid
|
||||
|
||||
return self.types[name]
|
||||
|
||||
def getSiteId(self, address):
|
||||
if address not in self.sites:
|
||||
self.execute("INSERT INTO site ?", {"address": address})
|
||||
self.sites[address] = self.cur.cursor.lastrowid
|
||||
|
||||
return self.sites[address]
|
||||
|
||||
def loadSites(self):
|
||||
sites = {}
|
||||
for row in self.execute("SELECT * FROM site"):
|
||||
sites[row["address"]] = row["site_id"]
|
||||
return sites
|
||||
|
||||
def loadTypes(self):
|
||||
types = {}
|
||||
for row in self.execute("SELECT * FROM type"):
|
||||
types[row["name"]] = row["type_id"]
|
||||
return types
|
||||
|
||||
def deleteSite(self, address):
|
||||
if address in self.sites:
|
||||
site_id = self.sites[address]
|
||||
del self.sites[address]
|
||||
self.execute("DELETE FROM site WHERE ?", {"site_id": site_id})
|
||||
self.execute("DELETE FROM data WHERE ?", {"site_id": site_id})
|
||||
|
||||
def archive(self):
|
||||
week_back = 1
|
||||
while 1:
|
||||
s = time.time()
|
||||
date_added_from = time.time() - 60 * 60 * 24 * 7 * (week_back + 1)
|
||||
date_added_to = date_added_from + 60 * 60 * 24 * 7
|
||||
res = self.execute("""
|
||||
SELECT
|
||||
MAX(date_added) AS date_added,
|
||||
SUM(value) AS value,
|
||||
GROUP_CONCAT(data_id) AS data_ids,
|
||||
type_id,
|
||||
site_id,
|
||||
COUNT(*) AS num
|
||||
FROM data
|
||||
WHERE
|
||||
site_id IS NULL AND
|
||||
date_added > :date_added_from AND
|
||||
date_added < :date_added_to
|
||||
GROUP BY strftime('%Y-%m-%d %H', date_added, 'unixepoch', 'localtime'), type_id
|
||||
""", {"date_added_from": date_added_from, "date_added_to": date_added_to})
|
||||
|
||||
num_archived = 0
|
||||
cur = self.getCursor()
|
||||
for row in res:
|
||||
if row["num"] == 1:
|
||||
continue
|
||||
cur.execute("INSERT INTO data ?", {
|
||||
"type_id": row["type_id"],
|
||||
"site_id": row["site_id"],
|
||||
"value": row["value"],
|
||||
"date_added": row["date_added"]
|
||||
})
|
||||
cur.execute("DELETE FROM data WHERE data_id IN (%s)" % row["data_ids"])
|
||||
num_archived += row["num"]
|
||||
self.log.debug("Archived %s data from %s weeks ago in %.3fs" % (num_archived, week_back, time.time() - s))
|
||||
week_back += 1
|
||||
time.sleep(0.1)
|
||||
if num_archived == 0:
|
||||
break
|
||||
# Only keep 6 month of global stats
|
||||
self.execute(
|
||||
"DELETE FROM data WHERE site_id IS NULL AND date_added < :date_added_limit",
|
||||
{"date_added_limit": time.time() - 60 * 60 * 24 * 30 * 6 }
|
||||
)
|
||||
# Only keep 1 month of site stats
|
||||
self.execute(
|
||||
"DELETE FROM data WHERE site_id IS NOT NULL AND date_added < :date_added_limit",
|
||||
{"date_added_limit": time.time() - 60 * 60 * 24 * 30 }
|
||||
)
|
||||
if week_back > 1:
|
||||
self.execute("VACUUM")
|
|
@ -1,60 +0,0 @@
|
|||
import time
|
||||
import itertools
|
||||
|
||||
import gevent
|
||||
|
||||
from Config import config
|
||||
from util import helper
|
||||
from Plugin import PluginManager
|
||||
from ChartDb import ChartDb
|
||||
from ChartCollector import ChartCollector
|
||||
|
||||
if "db" not in locals().keys(): # Share on reloads
|
||||
db = ChartDb()
|
||||
gevent.spawn_later(10 * 60, db.archive)
|
||||
helper.timer(60 * 60 * 6, db.archive)
|
||||
collector = ChartCollector(db)
|
||||
|
||||
@PluginManager.registerTo("SiteManager")
|
||||
class SiteManagerPlugin(object):
|
||||
def load(self, *args, **kwargs):
|
||||
back = super(SiteManagerPlugin, self).load(*args, **kwargs)
|
||||
collector.setInitialLastValues(self.sites.values())
|
||||
return back
|
||||
|
||||
def delete(self, address, *args, **kwargs):
|
||||
db.deleteSite(address)
|
||||
return super(SiteManagerPlugin, self).delete(address, *args, **kwargs)
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def actionChartDbQuery(self, to, query, params=None):
|
||||
if not "ADMIN" in self.permissions:
|
||||
return {"error": "No permission"}
|
||||
|
||||
if config.debug or config.verbose:
|
||||
s = time.time()
|
||||
rows = []
|
||||
try:
|
||||
if not query.strip().upper().startswith("SELECT"):
|
||||
raise Exception("Only SELECT query supported")
|
||||
res = db.execute(query, params)
|
||||
except Exception, err: # Response the error to client
|
||||
self.log.error("ChartDbQuery error: %s" % err)
|
||||
return {"error": str(err)}
|
||||
# Convert result to dict
|
||||
for row in res:
|
||||
rows.append(dict(row))
|
||||
if config.verbose and time.time() - s > 0.1: # Log slow query
|
||||
self.log.debug("Slow query: %s (%.3fs)" % (query, time.time() - s))
|
||||
return rows
|
||||
|
||||
def actionChartGetPeerLocations(self, to):
|
||||
if not "ADMIN" in self.permissions:
|
||||
return {"error": "No permission"}
|
||||
|
||||
peers = {}
|
||||
for site in self.server.sites.values():
|
||||
peers.update(site.peers)
|
||||
peer_locations = self.getPeerLocations(peers)
|
||||
return peer_locations
|
|
@ -1 +0,0 @@
|
|||
import ChartPlugin
|
|
@ -1,216 +0,0 @@
|
|||
import time
|
||||
import re
|
||||
import cgi
|
||||
import hashlib
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Translate import Translate
|
||||
from Config import config
|
||||
|
||||
from ContentFilterStorage import ContentFilterStorage
|
||||
|
||||
|
||||
if "_" not in locals():
|
||||
_ = Translate("plugins/ContentFilter/languages/")
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteManager")
|
||||
class SiteManagerPlugin(object):
|
||||
def load(self, *args, **kwargs):
|
||||
global filter_storage
|
||||
super(SiteManagerPlugin, self).load(*args, **kwargs)
|
||||
filter_storage = ContentFilterStorage(site_manager=self)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
# Mute
|
||||
def cbMuteAdd(self, to, auth_address, cert_user_id, reason):
|
||||
filter_storage.file_content["mutes"][auth_address] = {
|
||||
"cert_user_id": cert_user_id, "reason": reason, "source": self.site.address, "date_added": time.time()
|
||||
}
|
||||
filter_storage.save()
|
||||
filter_storage.changeDbs(auth_address, "remove")
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionMuteAdd(self, to, auth_address, cert_user_id, reason):
|
||||
if "ADMIN" in self.getPermissions(to):
|
||||
self.cbMuteAdd(to, auth_address, cert_user_id, reason)
|
||||
else:
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[_["Hide all content from <b>%s</b>?"] % cgi.escape(cert_user_id), _["Mute"]],
|
||||
lambda (res): self.cbMuteAdd(to, auth_address, cert_user_id, reason)
|
||||
)
|
||||
|
||||
def cbMuteRemove(self, to, auth_address):
|
||||
del filter_storage.file_content["mutes"][auth_address]
|
||||
filter_storage.save()
|
||||
filter_storage.changeDbs(auth_address, "load")
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionMuteRemove(self, to, auth_address):
|
||||
if "ADMIN" in self.getPermissions(to):
|
||||
self.cbMuteRemove(to, auth_address)
|
||||
else:
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[_["Unmute <b>%s</b>?"] % cgi.escape(filter_storage.file_content["mutes"][auth_address]["cert_user_id"]), _["Unmute"]],
|
||||
lambda (res): self.cbMuteRemove(to, auth_address)
|
||||
)
|
||||
|
||||
def actionMuteList(self, to):
|
||||
if "ADMIN" in self.getPermissions(to):
|
||||
self.response(to, filter_storage.file_content["mutes"])
|
||||
else:
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can list mutes"})
|
||||
|
||||
# Siteblock
|
||||
def actionSiteblockAdd(self, to, site_address, reason=None):
|
||||
if "ADMIN" not in self.getPermissions(to):
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can add to blocklist"})
|
||||
filter_storage.file_content["siteblocks"][site_address] = {"date_added": time.time(), "reason": reason}
|
||||
filter_storage.save()
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionSiteblockRemove(self, to, site_address):
|
||||
if "ADMIN" not in self.getPermissions(to):
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can remove from blocklist"})
|
||||
del filter_storage.file_content["siteblocks"][site_address]
|
||||
filter_storage.save()
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionSiteblockList(self, to):
|
||||
if "ADMIN" in self.getPermissions(to):
|
||||
self.response(to, filter_storage.file_content["siteblocks"])
|
||||
else:
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can list blocklists"})
|
||||
|
||||
# Include
|
||||
def actionFilterIncludeAdd(self, to, inner_path, description=None, address=None):
|
||||
if address:
|
||||
if "ADMIN" not in self.getPermissions(to):
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
|
||||
site = self.server.sites[address]
|
||||
else:
|
||||
address = self.site.address
|
||||
site = self.site
|
||||
|
||||
if "ADMIN" in self.getPermissions(to):
|
||||
self.cbFilterIncludeAdd(to, True, address, inner_path, description)
|
||||
else:
|
||||
content = site.storage.loadJson(inner_path)
|
||||
title = _["New shared global content filter: <b>%s</b> (%s sites, %s users)"] % (
|
||||
cgi.escape(inner_path), len(content.get("siteblocks", {})), len(content.get("mutes", {}))
|
||||
)
|
||||
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[title, "Add"],
|
||||
lambda (res): self.cbFilterIncludeAdd(to, res, address, inner_path, description)
|
||||
)
|
||||
|
||||
def cbFilterIncludeAdd(self, to, res, address, inner_path, description):
|
||||
if not res:
|
||||
self.response(to, res)
|
||||
return False
|
||||
|
||||
filter_storage.includeAdd(address, inner_path, description)
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionFilterIncludeRemove(self, to, inner_path, address=None):
|
||||
if address:
|
||||
if "ADMIN" not in self.getPermissions(to):
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
|
||||
else:
|
||||
address = self.site.address
|
||||
|
||||
key = "%s/%s" % (address, inner_path)
|
||||
if key not in filter_storage.file_content["includes"]:
|
||||
self.response(to, {"error": "Include not found"})
|
||||
filter_storage.includeRemove(address, inner_path)
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionFilterIncludeList(self, to, all_sites=False, filters=False):
|
||||
if all_sites and "ADMIN" not in self.getPermissions(to):
|
||||
return self.response(to, {"error": "Forbidden: Only ADMIN sites can list all sites includes"})
|
||||
|
||||
back = []
|
||||
includes = filter_storage.file_content.get("includes", {}).values()
|
||||
for include in includes:
|
||||
if not all_sites and include["address"] != self.site.address:
|
||||
continue
|
||||
if filters:
|
||||
include = dict(include) # Don't modify original file_content
|
||||
include_site = filter_storage.site_manager.get(include["address"])
|
||||
if not include_site:
|
||||
continue
|
||||
content = include_site.storage.loadJson(include["inner_path"])
|
||||
include["mutes"] = content.get("mutes", {})
|
||||
include["siteblocks"] = content.get("siteblocks", {})
|
||||
back.append(include)
|
||||
self.response(to, back)
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteStorage")
|
||||
class SiteStoragePlugin(object):
|
||||
def updateDbFile(self, inner_path, file=None, cur=None):
|
||||
if file is not False: # File deletion always allowed
|
||||
# Find for bitcoin addresses in file path
|
||||
matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
|
||||
# Check if any of the adresses are in the mute list
|
||||
for auth_address in matches:
|
||||
if filter_storage.isMuted(auth_address):
|
||||
self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))
|
||||
return False
|
||||
|
||||
return super(SiteStoragePlugin, self).updateDbFile(inner_path, file=file, cur=cur)
|
||||
|
||||
def onUpdated(self, inner_path, file=None):
|
||||
file_path = "%s/%s" % (self.site.address, inner_path)
|
||||
if file_path in filter_storage.file_content["includes"]:
|
||||
self.log.debug("Filter file updated: %s" % inner_path)
|
||||
filter_storage.includeUpdateAll()
|
||||
return super(SiteStoragePlugin, self).onUpdated(inner_path, file=file)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
def actionWrapper(self, path, extra_headers=None):
|
||||
match = re.match("/(?P<address>[A-Za-z0-9\._-]+)(?P<inner_path>/.*|$)", path)
|
||||
if not match:
|
||||
return False
|
||||
address = match.group("address")
|
||||
|
||||
if self.server.site_manager.get(address): # Site already exists
|
||||
return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
|
||||
|
||||
if self.server.site_manager.isDomain(address):
|
||||
address = self.server.site_manager.resolveDomain(address)
|
||||
|
||||
if address:
|
||||
address_sha256 = "0x" + hashlib.sha256(address).hexdigest()
|
||||
else:
|
||||
address_sha256 = None
|
||||
|
||||
if filter_storage.isSiteblocked(address) or filter_storage.isSiteblocked(address_sha256):
|
||||
site = self.server.site_manager.get(config.homepage)
|
||||
if not extra_headers:
|
||||
extra_headers = {}
|
||||
|
||||
script_nonce = self.getScriptNonce()
|
||||
|
||||
self.sendHeader(extra_headers=extra_headers, script_nonce=script_nonce)
|
||||
return iter([super(UiRequestPlugin, self).renderWrapper(
|
||||
site, path, "uimedia/plugins/contentfilter/blocklisted.html?address=" + address,
|
||||
"Blacklisted site", extra_headers, show_loadingscreen=False, script_nonce=script_nonce
|
||||
)])
|
||||
else:
|
||||
return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
|
||||
|
||||
def actionUiMedia(self, path, *args, **kwargs):
|
||||
if path.startswith("/uimedia/plugins/contentfilter/"):
|
||||
file_path = path.replace("/uimedia/plugins/contentfilter/", "plugins/ContentFilter/media/")
|
||||
return self.actionFile(file_path)
|
||||
else:
|
||||
return super(UiRequestPlugin, self).actionUiMedia(path)
|
|
@ -1,140 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import logging
|
||||
import collections
|
||||
import time
|
||||
|
||||
from Debug import Debug
|
||||
from Plugin import PluginManager
|
||||
from Config import config
|
||||
from util import helper
|
||||
|
||||
class ContentFilterStorage(object):
|
||||
def __init__(self, site_manager):
|
||||
self.log = logging.getLogger("ContentFilterStorage")
|
||||
self.file_path = "%s/filters.json" % config.data_dir
|
||||
self.site_manager = site_manager
|
||||
self.file_content = self.load()
|
||||
|
||||
# Set default values for filters.json
|
||||
if not self.file_content:
|
||||
self.file_content = {}
|
||||
|
||||
# Site blacklist renamed to site blocks
|
||||
if "site_blacklist" in self.file_content:
|
||||
self.file_content["siteblocks"] = self.file_content["site_blacklist"]
|
||||
del self.file_content["site_blacklist"]
|
||||
|
||||
for key in ["mutes", "siteblocks", "includes"]:
|
||||
if key not in self.file_content:
|
||||
self.file_content[key] = {}
|
||||
|
||||
self.include_filters = collections.defaultdict(set) # Merged list of mutes and blacklists from all include
|
||||
self.includeUpdateAll(update_site_dbs=False)
|
||||
|
||||
def load(self):
|
||||
# Rename previously used mutes.json -> filters.json
|
||||
if os.path.isfile("%s/mutes.json" % config.data_dir):
|
||||
self.log.info("Renaming mutes.json to filters.json...")
|
||||
os.rename("%s/mutes.json" % config.data_dir, self.file_path)
|
||||
if os.path.isfile(self.file_path):
|
||||
try:
|
||||
return json.load(open(self.file_path))
|
||||
except Exception as err:
|
||||
self.log.error("Error loading filters.json: %s" % err)
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
def includeUpdateAll(self, update_site_dbs=True):
|
||||
s = time.time()
|
||||
new_include_filters = collections.defaultdict(set)
|
||||
|
||||
# Load all include files data into a merged set
|
||||
for include_path in self.file_content["includes"]:
|
||||
address, inner_path = include_path.split("/", 1)
|
||||
try:
|
||||
content = self.site_manager.get(address).storage.loadJson(inner_path)
|
||||
except Exception as err:
|
||||
self.log.warning(
|
||||
"Error loading include %s: %s" %
|
||||
(include_path, Debug.formatException(err))
|
||||
)
|
||||
continue
|
||||
|
||||
for key, val in content.iteritems():
|
||||
if type(val) is not dict:
|
||||
continue
|
||||
|
||||
new_include_filters[key].update(val.keys())
|
||||
|
||||
mutes_added = new_include_filters["mutes"].difference(self.include_filters["mutes"])
|
||||
mutes_removed = self.include_filters["mutes"].difference(new_include_filters["mutes"])
|
||||
|
||||
self.include_filters = new_include_filters
|
||||
|
||||
if update_site_dbs:
|
||||
for auth_address in mutes_added:
|
||||
self.changeDbs(auth_address, "remove")
|
||||
|
||||
for auth_address in mutes_removed:
|
||||
if not self.isMuted(auth_address):
|
||||
self.changeDbs(auth_address, "load")
|
||||
|
||||
num_mutes = len(self.include_filters["mutes"])
|
||||
num_siteblocks = len(self.include_filters["siteblocks"])
|
||||
self.log.debug(
|
||||
"Loaded %s mutes, %s blocked sites from %s includes in %.3fs" %
|
||||
(num_mutes, num_siteblocks, len(self.file_content["includes"]), time.time() - s)
|
||||
)
|
||||
|
||||
def includeAdd(self, address, inner_path, description=None):
|
||||
self.file_content["includes"]["%s/%s" % (address, inner_path)] = {
|
||||
"date_added": time.time(),
|
||||
"address": address,
|
||||
"description": description,
|
||||
"inner_path": inner_path
|
||||
}
|
||||
self.includeUpdateAll()
|
||||
self.save()
|
||||
|
||||
def includeRemove(self, address, inner_path):
|
||||
del self.file_content["includes"]["%s/%s" % (address, inner_path)]
|
||||
self.includeUpdateAll()
|
||||
self.save()
|
||||
|
||||
def save(self):
|
||||
s = time.time()
|
||||
helper.atomicWrite(self.file_path, json.dumps(self.file_content, indent=2, sort_keys=True))
|
||||
self.log.debug("Saved in %.3fs" % (time.time() - s))
|
||||
|
||||
def isMuted(self, auth_address):
|
||||
if auth_address in self.file_content["mutes"] or auth_address in self.include_filters["mutes"]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def isSiteblocked(self, address):
|
||||
if address in self.file_content["siteblocks"] or address in self.include_filters["siteblocks"]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
# Search and remove or readd files of an user
|
||||
def changeDbs(self, auth_address, action):
|
||||
self.log.debug("Mute action %s on user %s" % (action, auth_address))
|
||||
res = self.site_manager.list().values()[0].content_manager.contents.db.execute(
|
||||
"SELECT * FROM content LEFT JOIN site USING (site_id) WHERE inner_path LIKE :inner_path",
|
||||
{"inner_path": "%%/%s/%%" % auth_address}
|
||||
)
|
||||
for row in res:
|
||||
site = self.site_manager.sites.get(row["address"])
|
||||
if not site:
|
||||
continue
|
||||
dir_inner_path = helper.getDirname(row["inner_path"])
|
||||
for file_name in site.storage.walk(dir_inner_path):
|
||||
if action == "remove":
|
||||
site.storage.onUpdated(dir_inner_path + file_name, False)
|
||||
else:
|
||||
site.storage.onUpdated(dir_inner_path + file_name)
|
||||
site.onFileDone(dir_inner_path + file_name)
|
|
@ -1,82 +0,0 @@
|
|||
import pytest
|
||||
from ContentFilter import ContentFilterPlugin
|
||||
from Site import SiteManager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def filter_storage():
|
||||
ContentFilterPlugin.filter_storage = ContentFilterPlugin.ContentFilterStorage(SiteManager.site_manager)
|
||||
return ContentFilterPlugin.filter_storage
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
@pytest.mark.usefixtures("resetTempSettings")
|
||||
class TestContentFilter:
|
||||
def createInclude(self, site):
|
||||
site.storage.writeJson("filters.json", {
|
||||
"mutes": {"1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C": {}},
|
||||
"siteblocks": {site.address: {}}
|
||||
})
|
||||
|
||||
def testIncludeLoad(self, site, filter_storage):
|
||||
self.createInclude(site)
|
||||
filter_storage.file_content["includes"]["%s/%s" % (site.address, "filters.json")] = {
|
||||
"date_added": 1528295893,
|
||||
}
|
||||
|
||||
assert not filter_storage.include_filters["mutes"]
|
||||
assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
assert not filter_storage.isSiteblocked(site.address)
|
||||
filter_storage.includeUpdateAll(update_site_dbs=False)
|
||||
assert len(filter_storage.include_filters["mutes"]) == 1
|
||||
assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
assert filter_storage.isSiteblocked(site.address)
|
||||
|
||||
def testIncludeAdd(self, site, filter_storage):
|
||||
self.createInclude(site)
|
||||
query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C'"
|
||||
assert not filter_storage.isSiteblocked(site.address)
|
||||
assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
assert site.storage.query(query_num_json).fetchone()["num"] == 2
|
||||
|
||||
# Add include
|
||||
filter_storage.includeAdd(site.address, "filters.json")
|
||||
|
||||
assert filter_storage.isSiteblocked(site.address)
|
||||
assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
assert site.storage.query(query_num_json).fetchone()["num"] == 0
|
||||
|
||||
# Remove include
|
||||
filter_storage.includeRemove(site.address, "filters.json")
|
||||
|
||||
assert not filter_storage.isSiteblocked(site.address)
|
||||
assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
assert site.storage.query(query_num_json).fetchone()["num"] == 2
|
||||
|
||||
def testIncludeChange(self, site, filter_storage):
|
||||
self.createInclude(site)
|
||||
filter_storage.includeAdd(site.address, "filters.json")
|
||||
assert filter_storage.isSiteblocked(site.address)
|
||||
assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
|
||||
|
||||
# Add new blocked site
|
||||
assert not filter_storage.isSiteblocked("1Hello")
|
||||
|
||||
filter_content = site.storage.loadJson("filters.json")
|
||||
filter_content["siteblocks"]["1Hello"] = {}
|
||||
site.storage.writeJson("filters.json", filter_content)
|
||||
|
||||
assert filter_storage.isSiteblocked("1Hello")
|
||||
|
||||
# Add new muted user
|
||||
query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q'"
|
||||
assert not filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
|
||||
assert site.storage.query(query_num_json).fetchone()["num"] == 2
|
||||
|
||||
filter_content["mutes"]["1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q"] = {}
|
||||
site.storage.writeJson("filters.json", filter_content)
|
||||
|
||||
assert filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
|
||||
assert site.storage.query(query_num_json).fetchone()["num"] == 0
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
from src.Test.conftest import *
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1 +0,0 @@
|
|||
import ContentFilterPlugin
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Hide all content from <b>%s</b>?": "<b>%s</b> tartalmaniak elrejtése?",
|
||||
"Mute": "Elnémítás",
|
||||
"Unmute <b>%s</b>?": "<b>%s</b> tartalmaniak megjelenítése?",
|
||||
"Unmute": "Némítás visszavonása"
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Hide all content from <b>%s</b>?": "<b>%s</b> Vuoi nascondere i contenuti di questo utente ?",
|
||||
"Mute": "Attiva Silenzia",
|
||||
"Unmute <b>%s</b>?": "<b>%s</b> Vuoi mostrare i contenuti di questo utente ?",
|
||||
"Unmute": "Disattiva Silenzia"
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Hide all content from <b>%s</b>?": "<b>%s</b> Ocultar todo o conteúdo de ?",
|
||||
"Mute": "Ativar o Silêncio",
|
||||
"Unmute <b>%s</b>?": "<b>%s</b> Você quer mostrar o conteúdo deste usuário ?",
|
||||
"Unmute": "Desligar o silêncio"
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Hide all content from <b>%s</b>?": "屏蔽 <b>%s</b> 的所有內容?",
|
||||
"Mute": "屏蔽",
|
||||
"Unmute <b>%s</b>?": "對 <b>%s</b> 解除屏蔽?",
|
||||
"Unmute": "解除屏蔽"
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"Hide all content from <b>%s</b>?": "屏蔽 <b>%s</b> 的所有内容?",
|
||||
"Mute": "屏蔽",
|
||||
"Unmute <b>%s</b>?": "对 <b>%s</b> 解除屏蔽?",
|
||||
"Unmute": "解除屏蔽"
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
<html>
|
||||
<body>
|
||||
|
||||
<style>
|
||||
.content { line-height: 24px; font-family: monospace; font-size: 14px; color: #636363; text-transform: uppercase; top: 38%; position: relative; text-align: center; perspective: 1000px }
|
||||
.content h1, .content h2 { font-weight: normal; letter-spacing: 1px; }
|
||||
.content h2 { font-size: 15px; }
|
||||
.content #details {
|
||||
text-align: left; display: inline-block; width: 350px; background-color: white; padding: 17px 27px; border-radius: 0px;
|
||||
box-shadow: 0px 2px 7px -1px #d8d8d8; text-transform: none; margin: 15px; transform: scale(0) rotateX(90deg); transition: all 0.6s cubic-bezier(0.785, 0.135, 0.15, 0.86);
|
||||
}
|
||||
.content #details #added { font-size: 12px; text-align: right; color: #a9a9a9; }
|
||||
|
||||
#button { transition: all 1s cubic-bezier(0.075, 0.82, 0.165, 1); opacity: 0; transform: translateY(50px); transition-delay: 0.5s }
|
||||
.button {
|
||||
padding: 8px 20px; background-color: #FFF85F; border-bottom: 2px solid #CDBD1E; border-radius: 2px;
|
||||
text-decoration: none; transition: all 0.5s; background-position: left center; display: inline-block; margin-top: 10px; color: black;
|
||||
}
|
||||
.button:hover { background-color: #FFF400; border-bottom: 2px solid #4D4D4C; transition: none; }
|
||||
.button:active { position: relative; top: 1px; }
|
||||
.button:focus { outline: none; }
|
||||
|
||||
.textbutton { color: #999; margin-top: 25px; display: inline-block; text-transform: none; font-family: Arial, Helvetica; text-decoration: none; padding: 5px 15px; }
|
||||
.textbutton-main { background-color: #FFF; color: #333; border-radius: 5px; }
|
||||
.textbutton:hover { text-decoration: underline; color: #333; transition: none !important; }
|
||||
.textbutton:active { background-color: #fafbfc; }
|
||||
</style>
|
||||
|
||||
<div class="content">
|
||||
<h1>Site blocked</h1>
|
||||
<h2>This site is on your blocklist:</h2>
|
||||
<div id="details">
|
||||
<div id="reason">Too much image</div>
|
||||
<div id="added">on 2015-01-25 12:32:11</div>
|
||||
</div>
|
||||
<div id="buttons">
|
||||
<a href="/" class="textbutton textbutton-main" id="back">Back to homepage</a>
|
||||
<a href="#Visit+Site" class="textbutton" id="visit">Remove from blocklist and visit the site</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script type="text/javascript" src="js/ZeroFrame.js"></script>
|
||||
|
||||
<script>
|
||||
function buf2hex(buffer) {
|
||||
return Array.prototype.map.call(new Uint8Array(buffer), x => ('00' + x.toString(16)).slice(-2)).join('');
|
||||
}
|
||||
|
||||
async function sha256hex(s) {
|
||||
var buff = new TextEncoder("utf-8").encode(s)
|
||||
return "0x" + buf2hex(await crypto.subtle.digest("SHA-256", buff))
|
||||
}
|
||||
|
||||
class Page extends ZeroFrame {
|
||||
onOpenWebsocket () {
|
||||
this.cmd("wrapperSetTitle", "Visiting a blocked site - ZeroNet")
|
||||
this.cmd("siteInfo", {}, (site_info) => {
|
||||
this.site_info = site_info
|
||||
})
|
||||
var address = document.location.search.match(/address=(.*?)[&\?]/)[1]
|
||||
this.updateSiteblockDetails(address)
|
||||
}
|
||||
|
||||
async updateSiteblockDetails(address) {
|
||||
var address_sha256 = await sha256hex(address)
|
||||
var blocks = await this.cmdp("siteblockList")
|
||||
if (blocks[address] || blocks[address_sha256]) {
|
||||
block = blocks[address]
|
||||
} else {
|
||||
var includes = await this.cmdp("filterIncludeList", {all_sites: true, filters: true})
|
||||
for (let include of includes) {
|
||||
if (include["siteblocks"][address]) {
|
||||
var block = include["siteblocks"][address]
|
||||
block["include"] = include
|
||||
}
|
||||
if (include["siteblocks"][address_sha256]) {
|
||||
var block = include["siteblocks"][address_sha256]
|
||||
block["include"] = include
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.blocks = blocks
|
||||
var reason = block["reason"]
|
||||
if (!reason) reason = "Unknown reason"
|
||||
var date = new Date(block["date_added"] * 1000)
|
||||
document.getElementById("reason").innerText = reason
|
||||
document.getElementById("added").innerText = "at " + date.toLocaleDateString() + " " + date.toLocaleTimeString()
|
||||
if (block["include"]) {
|
||||
document.getElementById("added").innerText += " from a shared blocklist"
|
||||
document.getElementById("visit").innerText = "Ignore blocking and visit the site"
|
||||
}
|
||||
document.getElementById("details").style.transform = "scale(1) rotateX(0deg)"
|
||||
document.getElementById("visit").style.transform = "translateY(0)"
|
||||
document.getElementById("visit").style.opacity = "1"
|
||||
document.getElementById("visit").onclick = () => {
|
||||
if (block["include"])
|
||||
this.cmd("siteAdd", address, () => { this.cmd("wrapperReload") })
|
||||
else
|
||||
this.cmd("siteblockRemove", address, () => { this.cmd("wrapperReload") })
|
||||
}
|
||||
}
|
||||
}
|
||||
page = new Page()
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -1,119 +0,0 @@
|
|||
// Version 1.0.0 - Initial release
|
||||
// Version 1.1.0 (2017-08-02) - Added cmdp function that returns promise instead of using callback
|
||||
// Version 1.2.0 (2017-08-02) - Added Ajax monkey patch to emulate XMLHttpRequest over ZeroFrame API
|
||||
|
||||
const CMD_INNER_READY = 'innerReady'
|
||||
const CMD_RESPONSE = 'response'
|
||||
const CMD_WRAPPER_READY = 'wrapperReady'
|
||||
const CMD_PING = 'ping'
|
||||
const CMD_PONG = 'pong'
|
||||
const CMD_WRAPPER_OPENED_WEBSOCKET = 'wrapperOpenedWebsocket'
|
||||
const CMD_WRAPPER_CLOSE_WEBSOCKET = 'wrapperClosedWebsocket'
|
||||
|
||||
class ZeroFrame {
|
||||
constructor(url) {
|
||||
this.url = url
|
||||
this.waiting_cb = {}
|
||||
this.wrapper_nonce = document.location.href.replace(/.*wrapper_nonce=([A-Za-z0-9]+).*/, "$1")
|
||||
this.connect()
|
||||
this.next_message_id = 1
|
||||
this.init()
|
||||
}
|
||||
|
||||
init() {
|
||||
return this
|
||||
}
|
||||
|
||||
connect() {
|
||||
this.target = window.parent
|
||||
window.addEventListener('message', e => this.onMessage(e), false)
|
||||
this.cmd(CMD_INNER_READY)
|
||||
}
|
||||
|
||||
onMessage(e) {
|
||||
let message = e.data
|
||||
let cmd = message.cmd
|
||||
if (cmd === CMD_RESPONSE) {
|
||||
if (this.waiting_cb[message.to] !== undefined) {
|
||||
this.waiting_cb[message.to](message.result)
|
||||
}
|
||||
else {
|
||||
this.log("Websocket callback not found:", message)
|
||||
}
|
||||
} else if (cmd === CMD_WRAPPER_READY) {
|
||||
this.cmd(CMD_INNER_READY)
|
||||
} else if (cmd === CMD_PING) {
|
||||
this.response(message.id, CMD_PONG)
|
||||
} else if (cmd === CMD_WRAPPER_OPENED_WEBSOCKET) {
|
||||
this.onOpenWebsocket()
|
||||
} else if (cmd === CMD_WRAPPER_CLOSE_WEBSOCKET) {
|
||||
this.onCloseWebsocket()
|
||||
} else {
|
||||
this.onRequest(cmd, message)
|
||||
}
|
||||
}
|
||||
|
||||
onRequest(cmd, message) {
|
||||
this.log("Unknown request", message)
|
||||
}
|
||||
|
||||
response(to, result) {
|
||||
this.send({
|
||||
cmd: CMD_RESPONSE,
|
||||
to: to,
|
||||
result: result
|
||||
})
|
||||
}
|
||||
|
||||
cmd(cmd, params={}, cb=null) {
|
||||
this.send({
|
||||
cmd: cmd,
|
||||
params: params
|
||||
}, cb)
|
||||
}
|
||||
|
||||
cmdp(cmd, params={}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.cmd(cmd, params, (res) => {
|
||||
if (res && res.error) {
|
||||
reject(res.error)
|
||||
} else {
|
||||
resolve(res)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
send(message, cb=null) {
|
||||
message.wrapper_nonce = this.wrapper_nonce
|
||||
message.id = this.next_message_id
|
||||
this.next_message_id++
|
||||
this.target.postMessage(message, '*')
|
||||
if (cb) {
|
||||
this.waiting_cb[message.id] = cb
|
||||
}
|
||||
}
|
||||
|
||||
log(...args) {
|
||||
console.log.apply(console, ['[ZeroFrame]'].concat(args))
|
||||
}
|
||||
|
||||
onOpenWebsocket() {
|
||||
this.log('Websocket open')
|
||||
}
|
||||
|
||||
onCloseWebsocket() {
|
||||
this.log('Websocket close')
|
||||
}
|
||||
|
||||
monkeyPatchAjax() {
|
||||
var page = this
|
||||
XMLHttpRequest.prototype.realOpen = XMLHttpRequest.prototype.open
|
||||
this.cmd("wrapperGetAjaxKey", [], (res) => { this.ajax_key = res })
|
||||
var newOpen = function (method, url, async) {
|
||||
url += "?ajax_key=" + page.ajax_key
|
||||
return this.realOpen(method, url, async)
|
||||
}
|
||||
XMLHttpRequest.prototype.open = newOpen
|
||||
}
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
import re
|
||||
import cgi
|
||||
import copy
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Translate import Translate
|
||||
if "_" not in locals():
|
||||
_ = Translate("plugins/Cors/languages/")
|
||||
|
||||
|
||||
def getCorsPath(site, inner_path):
|
||||
match = re.match("^cors-([A-Za-z0-9]{26,35})/(.*)", inner_path)
|
||||
if not match:
|
||||
raise Exception("Invalid cors path: %s" % inner_path)
|
||||
cors_address = match.group(1)
|
||||
cors_inner_path = match.group(2)
|
||||
|
||||
if not "Cors:%s" % cors_address in site.settings["permissions"]:
|
||||
raise Exception("This site has no permission to access site %s" % cors_address)
|
||||
|
||||
return cors_address, cors_inner_path
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def hasSitePermission(self, address, cmd=None):
|
||||
if super(UiWebsocketPlugin, self).hasSitePermission(address, cmd=cmd):
|
||||
return True
|
||||
|
||||
if not "Cors:%s" % address in self.site.settings["permissions"] or cmd not in ["fileGet", "fileList", "dirList", "fileRules", "optionalFileInfo", "fileQuery", "dbQuery", "userGetSettings", "siteInfo"]:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
# Add cors support for file commands
|
||||
def corsFuncWrapper(self, func_name, to, inner_path, *args, **kwargs):
|
||||
if inner_path.startswith("cors-"):
|
||||
cors_address, cors_inner_path = getCorsPath(self.site, inner_path)
|
||||
|
||||
req_self = copy.copy(self)
|
||||
req_self.site = self.server.sites.get(cors_address) # Change the site to the merged one
|
||||
if not req_self.site:
|
||||
return {"error": "No site found"}
|
||||
|
||||
func = getattr(super(UiWebsocketPlugin, req_self), func_name)
|
||||
back = func(to, cors_inner_path, *args, **kwargs)
|
||||
return back
|
||||
else:
|
||||
func = getattr(super(UiWebsocketPlugin, self), func_name)
|
||||
return func(to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileGet(self, to, inner_path, *args, **kwargs):
|
||||
return self.corsFuncWrapper("actionFileGet", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileList(self, to, inner_path, *args, **kwargs):
|
||||
return self.corsFuncWrapper("actionFileList", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionDirList(self, to, inner_path, *args, **kwargs):
|
||||
return self.corsFuncWrapper("actionDirList", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileRules(self, to, inner_path, *args, **kwargs):
|
||||
return self.corsFuncWrapper("actionFileRules", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionOptionalFileInfo(self, to, inner_path, *args, **kwargs):
|
||||
return self.corsFuncWrapper("actionOptionalFileInfo", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionCorsPermission(self, to, address):
|
||||
site = self.server.sites.get(address)
|
||||
if site:
|
||||
site_name = site.content_manager.contents.get("content.json", {}).get("title")
|
||||
button_title = _["Grant"]
|
||||
else:
|
||||
site_name = address
|
||||
button_title = _["Grant & Add"]
|
||||
|
||||
if site and "Cors:" + address in self.permissions:
|
||||
return "ignored"
|
||||
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[_["This site requests <b>read</b> permission to: <b>%s</b>"] % cgi.escape(site_name), button_title],
|
||||
lambda (res): self.cbCorsPermission(to, address)
|
||||
)
|
||||
|
||||
def cbCorsPermission(self, to, address):
|
||||
self.actionPermissionAdd(to, "Cors:" + address)
|
||||
site = self.server.sites.get(address)
|
||||
if not site:
|
||||
self.server.site_manager.need(address)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
# Allow to load cross origin files using /cors-address/file.jpg
|
||||
def parsePath(self, path):
|
||||
path_parts = super(UiRequestPlugin, self).parsePath(path)
|
||||
if "cors-" not in path: # Optimization
|
||||
return path_parts
|
||||
site = self.server.sites[path_parts["address"]]
|
||||
try:
|
||||
path_parts["address"], path_parts["inner_path"] = getCorsPath(site, path_parts["inner_path"])
|
||||
except:
|
||||
return None
|
||||
return path_parts
|
|
@ -1 +0,0 @@
|
|||
import CorsPlugin
|
|
@ -1,53 +0,0 @@
|
|||
from lib.pybitcointools import bitcoin as btctools
|
||||
import hashlib
|
||||
|
||||
ecc_cache = {}
|
||||
|
||||
|
||||
def encrypt(data, pubkey, ephemcurve=None, ciphername='aes-256-cbc'):
|
||||
from lib import pyelliptic
|
||||
curve, pubkey_x, pubkey_y, i = pyelliptic.ECC._decode_pubkey(pubkey)
|
||||
if ephemcurve is None:
|
||||
ephemcurve = curve
|
||||
ephem = pyelliptic.ECC(curve=ephemcurve)
|
||||
key = hashlib.sha512(ephem.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
|
||||
key_e, key_m = key[:32], key[32:]
|
||||
pubkey = ephem.get_pubkey()
|
||||
iv = pyelliptic.OpenSSL.rand(pyelliptic.OpenSSL.get_cipher(ciphername).get_blocksize())
|
||||
ctx = pyelliptic.Cipher(key_e, iv, 1, ciphername)
|
||||
ciphertext = iv + pubkey + ctx.ciphering(data)
|
||||
mac = pyelliptic.hmac_sha256(key_m, ciphertext)
|
||||
return key_e, ciphertext + mac
|
||||
|
||||
|
||||
def split(encrypted):
|
||||
iv = encrypted[0:16]
|
||||
ciphertext = encrypted[16+70:-32]
|
||||
|
||||
return iv, ciphertext
|
||||
|
||||
|
||||
def getEcc(privatekey=None):
|
||||
from lib import pyelliptic
|
||||
global eccs
|
||||
if privatekey not in ecc_cache:
|
||||
if privatekey:
|
||||
publickey_bin = btctools.encode_pubkey(btctools.privtopub(privatekey), "bin")
|
||||
publickey_openssl = toOpensslPublickey(publickey_bin)
|
||||
privatekey_openssl = toOpensslPrivatekey(privatekey)
|
||||
ecc_cache[privatekey] = pyelliptic.ECC(curve='secp256k1', privkey=privatekey_openssl, pubkey=publickey_openssl)
|
||||
else:
|
||||
ecc_cache[None] = pyelliptic.ECC()
|
||||
return ecc_cache[privatekey]
|
||||
|
||||
|
||||
def toOpensslPrivatekey(privatekey):
|
||||
privatekey_bin = btctools.encode_privkey(privatekey, "bin")
|
||||
return '\x02\xca\x00\x20' + privatekey_bin
|
||||
|
||||
|
||||
def toOpensslPublickey(publickey):
|
||||
publickey_bin = btctools.encode_pubkey(publickey, "bin")
|
||||
publickey_bin = publickey_bin[1:]
|
||||
publickey_openssl = '\x02\xca\x00 ' + publickey_bin[:32] + '\x00 ' + publickey_bin[32:]
|
||||
return publickey_openssl
|
|
@ -1,149 +0,0 @@
|
|||
import base64
|
||||
import os
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Crypt import CryptBitcoin
|
||||
from lib.pybitcointools import bitcoin as btctools
|
||||
|
||||
import CryptMessage
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def encrypt(self, text, publickey):
|
||||
encrypted = CryptMessage.encrypt(text, CryptMessage.toOpensslPublickey(publickey))
|
||||
return encrypted
|
||||
|
||||
def decrypt(self, encrypted, privatekey):
|
||||
back = CryptMessage.getEcc(privatekey).decrypt(encrypted)
|
||||
return back.decode("utf8")
|
||||
|
||||
# - Actions -
|
||||
|
||||
# Returns user's public key unique to site
|
||||
# Return: Public key
|
||||
def actionUserPublickey(self, to, index=0):
|
||||
publickey = self.user.getEncryptPublickey(self.site.address, index)
|
||||
self.response(to, publickey)
|
||||
|
||||
# Encrypt a text using the publickey or user's sites unique publickey
|
||||
# Return: Encrypted text using base64 encoding
|
||||
def actionEciesEncrypt(self, to, text, publickey=0, return_aes_key=False):
|
||||
if type(publickey) is int: # Encrypt using user's publickey
|
||||
publickey = self.user.getEncryptPublickey(self.site.address, publickey)
|
||||
aes_key, encrypted = self.encrypt(text.encode("utf8"), publickey.decode("base64"))
|
||||
if return_aes_key:
|
||||
self.response(to, [base64.b64encode(encrypted), base64.b64encode(aes_key)])
|
||||
else:
|
||||
self.response(to, base64.b64encode(encrypted))
|
||||
|
||||
# Decrypt a text using privatekey or the user's site unique private key
|
||||
# Return: Decrypted text or list of decrypted texts
|
||||
def actionEciesDecrypt(self, to, param, privatekey=0):
|
||||
if type(privatekey) is int: # Decrypt using user's privatekey
|
||||
privatekey = self.user.getEncryptPrivatekey(self.site.address, privatekey)
|
||||
|
||||
if type(param) == list:
|
||||
encrypted_texts = param
|
||||
else:
|
||||
encrypted_texts = [param]
|
||||
|
||||
texts = [] # Decoded texts
|
||||
for encrypted_text in encrypted_texts:
|
||||
try:
|
||||
text = self.decrypt(encrypted_text.decode("base64"), privatekey)
|
||||
texts.append(text)
|
||||
except Exception as err:
|
||||
texts.append(None)
|
||||
|
||||
if type(param) == list:
|
||||
self.response(to, texts)
|
||||
else:
|
||||
self.response(to, texts[0])
|
||||
|
||||
# Encrypt a text using AES
|
||||
# Return: Iv, AES key, Encrypted text
|
||||
def actionAesEncrypt(self, to, text, key=None, iv=None):
|
||||
from lib import pyelliptic
|
||||
|
||||
if key:
|
||||
key = key.decode("base64")
|
||||
else:
|
||||
key = os.urandom(32)
|
||||
|
||||
if iv: # Generate new AES key if not definied
|
||||
iv = iv.decode("base64")
|
||||
else:
|
||||
iv = pyelliptic.Cipher.gen_IV('aes-256-cbc')
|
||||
|
||||
if text:
|
||||
encrypted = pyelliptic.Cipher(key, iv, 1, ciphername='aes-256-cbc').ciphering(text.encode("utf8"))
|
||||
else:
|
||||
encrypted = ""
|
||||
|
||||
self.response(to, [base64.b64encode(key), base64.b64encode(iv), base64.b64encode(encrypted)])
|
||||
|
||||
# Decrypt a text using AES
|
||||
# Return: Decrypted text
|
||||
def actionAesDecrypt(self, to, *args):
|
||||
from lib import pyelliptic
|
||||
|
||||
if len(args) == 3: # Single decrypt
|
||||
encrypted_texts = [(args[0], args[1])]
|
||||
keys = [args[2]]
|
||||
else: # Batch decrypt
|
||||
encrypted_texts, keys = args
|
||||
|
||||
texts = [] # Decoded texts
|
||||
for iv, encrypted_text in encrypted_texts:
|
||||
encrypted_text = encrypted_text.decode("base64")
|
||||
iv = iv.decode("base64")
|
||||
text = None
|
||||
for key in keys:
|
||||
ctx = pyelliptic.Cipher(key.decode("base64"), iv, 0, ciphername='aes-256-cbc')
|
||||
try:
|
||||
decrypted = ctx.ciphering(encrypted_text)
|
||||
if decrypted and decrypted.decode("utf8"): # Valid text decoded
|
||||
text = decrypted
|
||||
except Exception, err:
|
||||
pass
|
||||
texts.append(text)
|
||||
|
||||
if len(args) == 3:
|
||||
self.response(to, texts[0])
|
||||
else:
|
||||
self.response(to, texts)
|
||||
|
||||
|
||||
@PluginManager.registerTo("User")
|
||||
class UserPlugin(object):
|
||||
def getEncryptPrivatekey(self, address, param_index=0):
|
||||
assert param_index >= 0 and param_index <= 1000
|
||||
site_data = self.getSiteData(address)
|
||||
|
||||
if site_data.get("cert"): # Different privatekey for different cert provider
|
||||
index = param_index + self.getAddressAuthIndex(site_data["cert"])
|
||||
else:
|
||||
index = param_index
|
||||
|
||||
if "encrypt_privatekey_%s" % index not in site_data:
|
||||
address_index = self.getAddressAuthIndex(address)
|
||||
crypt_index = address_index + 1000 + index
|
||||
site_data["encrypt_privatekey_%s" % index] = CryptBitcoin.hdPrivatekey(self.master_seed, crypt_index)
|
||||
self.log.debug("New encrypt privatekey generated for %s:%s" % (address, index))
|
||||
return site_data["encrypt_privatekey_%s" % index]
|
||||
|
||||
def getEncryptPublickey(self, address, param_index=0):
|
||||
assert param_index >= 0 and param_index <= 1000
|
||||
site_data = self.getSiteData(address)
|
||||
|
||||
if site_data.get("cert"): # Different privatekey for different cert provider
|
||||
index = param_index + self.getAddressAuthIndex(site_data["cert"])
|
||||
else:
|
||||
index = param_index
|
||||
|
||||
if "encrypt_publickey_%s" % index not in site_data:
|
||||
privatekey = self.getEncryptPrivatekey(address, param_index)
|
||||
publickey = btctools.encode_pubkey(btctools.privtopub(privatekey), "bin_compressed")
|
||||
site_data["encrypt_publickey_%s" % index] = base64.b64encode(publickey)
|
||||
return site_data["encrypt_publickey_%s" % index]
|
|
@ -1,109 +0,0 @@
|
|||
import pytest
|
||||
from CryptMessage import CryptMessage
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
class TestCrypt:
|
||||
def testPublickey(self, ui_websocket):
|
||||
pub = ui_websocket.testAction("UserPublickey", 0)
|
||||
assert len(pub) == 44 # Compressed, b64 encoded publickey
|
||||
|
||||
# Different pubkey for specificed index
|
||||
assert ui_websocket.testAction("UserPublickey", 1) != ui_websocket.testAction("UserPublickey", 0)
|
||||
|
||||
# Same publickey for same index
|
||||
assert ui_websocket.testAction("UserPublickey", 2) == ui_websocket.testAction("UserPublickey", 2)
|
||||
|
||||
# Different publickey for different cert
|
||||
site_data = ui_websocket.user.getSiteData(ui_websocket.site.address)
|
||||
site_data["cert"] = None
|
||||
pub1 = ui_websocket.testAction("UserPublickey", 0)
|
||||
|
||||
site_data = ui_websocket.user.getSiteData(ui_websocket.site.address)
|
||||
site_data["cert"] = "zeroid.bit"
|
||||
pub2 = ui_websocket.testAction("UserPublickey", 0)
|
||||
assert pub1 != pub2
|
||||
|
||||
|
||||
|
||||
def testEcies(self, ui_websocket):
|
||||
ui_websocket.actionUserPublickey(0, 0)
|
||||
pub = ui_websocket.ws.result
|
||||
|
||||
ui_websocket.actionEciesEncrypt(0, "hello", pub)
|
||||
encrypted = ui_websocket.ws.result
|
||||
assert len(encrypted) == 180
|
||||
|
||||
# Don't allow decrypt using other privatekey index
|
||||
ui_websocket.actionEciesDecrypt(0, encrypted, 123)
|
||||
decrypted = ui_websocket.ws.result
|
||||
assert decrypted != "hello"
|
||||
|
||||
# Decrypt using correct privatekey
|
||||
ui_websocket.actionEciesDecrypt(0, encrypted)
|
||||
decrypted = ui_websocket.ws.result
|
||||
assert decrypted == "hello"
|
||||
|
||||
# Decrypt batch
|
||||
ui_websocket.actionEciesDecrypt(0, [encrypted, "baad", encrypted])
|
||||
decrypted = ui_websocket.ws.result
|
||||
assert decrypted == ["hello", None, "hello"]
|
||||
|
||||
|
||||
def testEciesUtf8(self, ui_websocket):
|
||||
# Utf8 test
|
||||
utf8_text = u'\xc1rv\xedzt\xfbr\xf5t\xfck\xf6rf\xfar\xf3g\xe9p'
|
||||
ui_websocket.actionEciesEncrypt(0, utf8_text)
|
||||
encrypted = ui_websocket.ws.result
|
||||
|
||||
ui_websocket.actionEciesDecrypt(0, encrypted)
|
||||
assert ui_websocket.ws.result == utf8_text
|
||||
|
||||
|
||||
def testEciesAes(self, ui_websocket):
|
||||
ui_websocket.actionEciesEncrypt(0, "hello", return_aes_key=True)
|
||||
ecies_encrypted, aes_key = ui_websocket.ws.result
|
||||
|
||||
# Decrypt using Ecies
|
||||
ui_websocket.actionEciesDecrypt(0, ecies_encrypted)
|
||||
assert ui_websocket.ws.result == "hello"
|
||||
|
||||
# Decrypt using AES
|
||||
aes_iv, aes_encrypted = CryptMessage.split(ecies_encrypted.decode("base64"))
|
||||
|
||||
ui_websocket.actionAesDecrypt(0, aes_iv.encode("base64"), aes_encrypted.encode("base64"), aes_key)
|
||||
assert ui_websocket.ws.result == "hello"
|
||||
|
||||
|
||||
def testAes(self, ui_websocket):
|
||||
ui_websocket.actionAesEncrypt(0, "hello")
|
||||
key, iv, encrypted = ui_websocket.ws.result
|
||||
|
||||
assert len(key) == 44
|
||||
assert len(iv) == 24
|
||||
assert len(encrypted) == 24
|
||||
|
||||
# Single decrypt
|
||||
ui_websocket.actionAesDecrypt(0, iv, encrypted, key)
|
||||
assert ui_websocket.ws.result == "hello"
|
||||
|
||||
# Batch decrypt
|
||||
ui_websocket.actionAesEncrypt(0, "hello")
|
||||
key2, iv2, encrypted2 = ui_websocket.ws.result
|
||||
|
||||
assert [key, iv, encrypted] != [key2, iv2, encrypted2]
|
||||
|
||||
# 2 correct key
|
||||
ui_websocket.actionAesDecrypt(0, [[iv, encrypted], [iv, encrypted], [iv, "baad"], [iv2, encrypted2]], [key])
|
||||
assert ui_websocket.ws.result == ["hello", "hello", None, None]
|
||||
|
||||
# 3 key
|
||||
ui_websocket.actionAesDecrypt(0, [[iv, encrypted], [iv, encrypted], [iv, "baad"], [iv2, encrypted2]], [key, key2])
|
||||
assert ui_websocket.ws.result == ["hello", "hello", None, "hello"]
|
||||
|
||||
def testAesUtf8(self, ui_websocket):
|
||||
utf8_text = u'\xc1rv\xedzt\xfbr\xf5t\xfck\xf6rf\xfar\xf3g\xe9'
|
||||
ui_websocket.actionAesEncrypt(0, utf8_text)
|
||||
key, iv, encrypted = ui_websocket.ws.result
|
||||
|
||||
ui_websocket.actionAesDecrypt(0, iv, encrypted, key)
|
||||
assert ui_websocket.ws.result == utf8_text
|
|
@ -1 +0,0 @@
|
|||
from src.Test.conftest import *
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1 +0,0 @@
|
|||
import CryptMessagePlugin
|
|
@ -1,194 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
import gevent
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Config import config
|
||||
from Debug import Debug
|
||||
|
||||
|
||||
# Keep archive open for faster reponse times for large sites
|
||||
archive_cache = {}
|
||||
|
||||
|
||||
def closeArchive(archive_path):
|
||||
if archive_path in archive_cache:
|
||||
del archive_cache[archive_path]
|
||||
|
||||
|
||||
def openArchive(archive_path, file_obj=None):
|
||||
if archive_path not in archive_cache:
|
||||
if archive_path.endswith("tar.gz"):
|
||||
import tarfile
|
||||
archive_cache[archive_path] = tarfile.open(file_obj or archive_path, "r:gz")
|
||||
elif archive_path.endswith("tar.bz2"):
|
||||
import tarfile
|
||||
archive_cache[archive_path] = tarfile.open(file_obj or archive_path, "r:bz2")
|
||||
else:
|
||||
import zipfile
|
||||
archive_cache[archive_path] = zipfile.ZipFile(file_obj or archive_path)
|
||||
gevent.spawn_later(5, lambda: closeArchive(archive_path)) # Close after 5 sec
|
||||
|
||||
archive = archive_cache[archive_path]
|
||||
return archive
|
||||
|
||||
|
||||
def openArchiveFile(archive_path, path_within, file_obj=None):
|
||||
archive = openArchive(archive_path, file_obj=file_obj)
|
||||
if archive_path.endswith(".zip"):
|
||||
return archive.open(path_within)
|
||||
else:
|
||||
return archive.extractfile(path_within.encode("utf8"))
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
def actionSiteMedia(self, path, **kwargs):
|
||||
if ".zip/" in path or ".tar.gz/" in path:
|
||||
file_obj = None
|
||||
path_parts = self.parsePath(path)
|
||||
file_path = u"%s/%s/%s" % (config.data_dir, path_parts["address"], path_parts["inner_path"].decode("utf8"))
|
||||
match = re.match("^(.*\.(?:tar.gz|tar.bz2|zip))/(.*)", file_path)
|
||||
archive_path, path_within = match.groups()
|
||||
if archive_path not in archive_cache:
|
||||
site = self.server.site_manager.get(path_parts["address"])
|
||||
if not site:
|
||||
return self.actionSiteAddPrompt(path)
|
||||
archive_inner_path = site.storage.getInnerPath(archive_path)
|
||||
if not os.path.isfile(archive_path):
|
||||
# Wait until file downloads
|
||||
result = site.needFile(archive_inner_path, priority=10)
|
||||
# Send virutal file path download finished event to remove loading screen
|
||||
site.updateWebsocket(file_done=archive_inner_path)
|
||||
if not result:
|
||||
return self.error404(archive_inner_path)
|
||||
file_obj = site.storage.openBigfile(archive_inner_path)
|
||||
|
||||
header_allow_ajax = False
|
||||
if self.get.get("ajax_key"):
|
||||
requester_site = self.server.site_manager.get(path_parts["request_address"])
|
||||
if self.get["ajax_key"] == requester_site.settings["ajax_key"]:
|
||||
header_allow_ajax = True
|
||||
else:
|
||||
return self.error403("Invalid ajax_key")
|
||||
|
||||
try:
|
||||
file = openArchiveFile(archive_path, path_within, file_obj=file_obj)
|
||||
content_type = self.getContentType(file_path)
|
||||
self.sendHeader(200, content_type=content_type, noscript=kwargs.get("header_noscript", False), allow_ajax=header_allow_ajax)
|
||||
return self.streamFile(file)
|
||||
except Exception as err:
|
||||
self.log.debug("Error opening archive file: %s" % Debug.formatException(err))
|
||||
return self.error404(path)
|
||||
|
||||
return super(UiRequestPlugin, self).actionSiteMedia(path, **kwargs)
|
||||
|
||||
def streamFile(self, file):
|
||||
for i in range(100): # Read max 6MB
|
||||
try:
|
||||
block = file.read(60 * 1024)
|
||||
if block:
|
||||
yield block
|
||||
else:
|
||||
raise StopIteration
|
||||
except StopIteration:
|
||||
file.close()
|
||||
break
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteStorage")
|
||||
class SiteStoragePlugin(object):
|
||||
def isFile(self, inner_path):
|
||||
if ".zip/" in inner_path or ".tar.gz/" in inner_path:
|
||||
match = re.match("^(.*\.(?:tar.gz|tar.bz2|zip))/(.*)", inner_path)
|
||||
archive_inner_path, path_within = match.groups()
|
||||
return super(SiteStoragePlugin, self).isFile(archive_inner_path)
|
||||
else:
|
||||
return super(SiteStoragePlugin, self).isFile(inner_path)
|
||||
|
||||
def openArchive(self, inner_path):
|
||||
archive_path = self.getPath(inner_path)
|
||||
file_obj = None
|
||||
if archive_path not in archive_cache:
|
||||
if not os.path.isfile(archive_path):
|
||||
result = self.site.needFile(inner_path, priority=10)
|
||||
self.site.updateWebsocket(file_done=inner_path)
|
||||
if not result:
|
||||
raise Exception("Unable to download file")
|
||||
file_obj = self.site.storage.openBigfile(inner_path)
|
||||
|
||||
try:
|
||||
archive = openArchive(archive_path, file_obj=file_obj)
|
||||
except Exception as err:
|
||||
raise Exception("Unable to download file: %s" % err)
|
||||
|
||||
return archive
|
||||
|
||||
def walk(self, inner_path, *args, **kwags):
|
||||
if ".zip" in inner_path or ".tar.gz" in inner_path:
|
||||
match = re.match("^(.*\.(?:tar.gz|tar.bz2|zip))(.*)", inner_path)
|
||||
archive_inner_path, path_within = match.groups()
|
||||
archive = self.openArchive(archive_inner_path)
|
||||
path_within = path_within.lstrip("/")
|
||||
|
||||
if archive_inner_path.endswith(".zip"):
|
||||
namelist = [name for name in archive.namelist() if not name.endswith("/")]
|
||||
else:
|
||||
namelist = [item.name for item in archive.getmembers() if not item.isdir()]
|
||||
|
||||
namelist_relative = []
|
||||
for name in namelist:
|
||||
if not name.startswith(path_within):
|
||||
continue
|
||||
name_relative = name.replace(path_within, "", 1).rstrip("/")
|
||||
namelist_relative.append(name_relative)
|
||||
|
||||
return namelist_relative
|
||||
|
||||
else:
|
||||
return super(SiteStoragePlugin, self).walk(inner_path, *args, **kwags)
|
||||
|
||||
def list(self, inner_path, *args, **kwags):
|
||||
if ".zip" in inner_path or ".tar.gz" in inner_path:
|
||||
match = re.match("^(.*\.(?:tar.gz|tar.bz2|zip))(.*)", inner_path)
|
||||
archive_inner_path, path_within = match.groups()
|
||||
archive = self.openArchive(archive_inner_path)
|
||||
path_within = path_within.lstrip("/")
|
||||
|
||||
if archive_inner_path.endswith(".zip"):
|
||||
namelist = [name for name in archive.namelist()]
|
||||
else:
|
||||
namelist = [item.name for item in archive.getmembers()]
|
||||
|
||||
namelist_relative = []
|
||||
for name in namelist:
|
||||
if not name.startswith(path_within):
|
||||
continue
|
||||
name_relative = name.replace(path_within, "", 1).rstrip("/")
|
||||
|
||||
if "/" in name_relative: # File is in sub-directory
|
||||
continue
|
||||
|
||||
namelist_relative.append(name_relative)
|
||||
return namelist_relative
|
||||
|
||||
else:
|
||||
return super(SiteStoragePlugin, self).list(inner_path, *args, **kwags)
|
||||
|
||||
def read(self, inner_path, mode="r"):
|
||||
if ".zip/" in inner_path or ".tar.gz/" in inner_path:
|
||||
match = re.match("^(.*\.(?:tar.gz|tar.bz2|zip))(.*)", inner_path)
|
||||
archive_inner_path, path_within = match.groups()
|
||||
archive = self.openArchive(archive_inner_path)
|
||||
path_within = path_within.lstrip("/")
|
||||
print archive, archive_inner_path
|
||||
|
||||
if archive_inner_path.endswith(".zip"):
|
||||
return archive.open(path_within).read()
|
||||
else:
|
||||
return archive.extractfile(path_within.encode("utf8")).read()
|
||||
|
||||
else:
|
||||
return super(SiteStoragePlugin, self).read(inner_path, mode)
|
||||
|
|
@ -1 +0,0 @@
|
|||
import FilePackPlugin
|
|
@ -1,384 +0,0 @@
|
|||
import re
|
||||
import time
|
||||
import copy
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Translate import Translate
|
||||
from util import RateLimit
|
||||
from util import helper
|
||||
from Debug import Debug
|
||||
try:
|
||||
import OptionalManager.UiWebsocketPlugin # To make optioanlFileInfo merger sites compatible
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if "merger_db" not in locals().keys(): # To keep merger_sites between module reloads
|
||||
merger_db = {} # Sites that allowed to list other sites {address: [type1, type2...]}
|
||||
merged_db = {} # Sites that allowed to be merged to other sites {address: type, ...}
|
||||
merged_to_merger = {} # {address: [site1, site2, ...]} cache
|
||||
site_manager = None # Site manager for merger sites
|
||||
|
||||
if "_" not in locals():
|
||||
_ = Translate("plugins/MergerSite/languages/")
|
||||
|
||||
|
||||
# Check if the site has permission to this merger site
|
||||
def checkMergerPath(address, inner_path):
|
||||
merged_match = re.match("^merged-(.*?)/([A-Za-z0-9]{26,35})/", inner_path)
|
||||
if merged_match:
|
||||
merger_type = merged_match.group(1)
|
||||
# Check if merged site is allowed to include other sites
|
||||
if merger_type in merger_db.get(address, []):
|
||||
# Check if included site allows to include
|
||||
merged_address = merged_match.group(2)
|
||||
if merged_db.get(merged_address) == merger_type:
|
||||
inner_path = re.sub("^merged-(.*?)/([A-Za-z0-9]{26,35})/", "", inner_path)
|
||||
return merged_address, inner_path
|
||||
else:
|
||||
raise Exception(
|
||||
"Merger site (%s) does not have permission for merged site: %s (%s)" %
|
||||
(merger_type, merged_address, merged_db.get(merged_address))
|
||||
)
|
||||
else:
|
||||
raise Exception("No merger (%s) permission to load: <br>%s (%s not in %s)" % (
|
||||
address, inner_path, merger_type, merger_db.get(address, []))
|
||||
)
|
||||
else:
|
||||
raise Exception("Invalid merger path: %s" % inner_path)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
# Download new site
|
||||
def actionMergerSiteAdd(self, to, addresses):
|
||||
if type(addresses) != list:
|
||||
# Single site add
|
||||
addresses = [addresses]
|
||||
# Check if the site has merger permission
|
||||
merger_types = merger_db.get(self.site.address)
|
||||
if not merger_types:
|
||||
return self.response(to, {"error": "Not a merger site"})
|
||||
|
||||
if RateLimit.isAllowed(self.site.address + "-MergerSiteAdd", 10) and len(addresses) == 1:
|
||||
# Without confirmation if only one site address and not called in last 10 sec
|
||||
self.cbMergerSiteAdd(to, addresses)
|
||||
else:
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[_["Add <b>%s</b> new site?"] % len(addresses), "Add"],
|
||||
lambda (res): self.cbMergerSiteAdd(to, addresses)
|
||||
)
|
||||
self.response(to, "ok")
|
||||
|
||||
# Callback of adding new site confirmation
|
||||
def cbMergerSiteAdd(self, to, addresses):
|
||||
added = 0
|
||||
for address in addresses:
|
||||
added += 1
|
||||
site_manager.need(address)
|
||||
if added:
|
||||
self.cmd("notification", ["done", _["Added <b>%s</b> new site"] % added, 5000])
|
||||
RateLimit.called(self.site.address + "-MergerSiteAdd")
|
||||
site_manager.updateMergerSites()
|
||||
|
||||
# Delete a merged site
|
||||
def actionMergerSiteDelete(self, to, address):
|
||||
site = self.server.sites.get(address)
|
||||
if not site:
|
||||
return self.response(to, {"error": "No site found: %s" % address})
|
||||
|
||||
merger_types = merger_db.get(self.site.address)
|
||||
if not merger_types:
|
||||
return self.response(to, {"error": "Not a merger site"})
|
||||
if merged_db.get(address) not in merger_types:
|
||||
return self.response(to, {"error": "Merged type (%s) not in %s" % (merged_db.get(address), merger_types)})
|
||||
|
||||
self.cmd("notification", ["done", _["Site deleted: <b>%s</b>"] % address, 5000])
|
||||
self.response(to, "ok")
|
||||
|
||||
# Lists merged sites
|
||||
def actionMergerSiteList(self, to, query_site_info=False):
|
||||
merger_types = merger_db.get(self.site.address)
|
||||
ret = {}
|
||||
if not merger_types:
|
||||
return self.response(to, {"error": "Not a merger site"})
|
||||
for address, merged_type in merged_db.iteritems():
|
||||
if merged_type not in merger_types:
|
||||
continue # Site not for us
|
||||
if query_site_info:
|
||||
site = self.server.sites.get(address)
|
||||
ret[address] = self.formatSiteInfo(site, create_user=False)
|
||||
else:
|
||||
ret[address] = merged_type
|
||||
self.response(to, ret)
|
||||
|
||||
def hasSitePermission(self, address, *args, **kwargs):
|
||||
if super(UiWebsocketPlugin, self).hasSitePermission(address, *args, **kwargs):
|
||||
return True
|
||||
else:
|
||||
if self.site.address in [merger_site.address for merger_site in merged_to_merger.get(address, [])]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
# Add support merger sites for file commands
|
||||
def mergerFuncWrapper(self, func_name, to, inner_path, *args, **kwargs):
|
||||
if inner_path.startswith("merged-"):
|
||||
merged_address, merged_inner_path = checkMergerPath(self.site.address, inner_path)
|
||||
|
||||
# Set the same cert for merged site
|
||||
merger_cert = self.user.getSiteData(self.site.address).get("cert")
|
||||
if merger_cert and self.user.getSiteData(merged_address).get("cert") != merger_cert:
|
||||
self.user.setCert(merged_address, merger_cert)
|
||||
|
||||
req_self = copy.copy(self)
|
||||
req_self.site = self.server.sites.get(merged_address) # Change the site to the merged one
|
||||
|
||||
func = getattr(super(UiWebsocketPlugin, req_self), func_name)
|
||||
return func(to, merged_inner_path, *args, **kwargs)
|
||||
else:
|
||||
func = getattr(super(UiWebsocketPlugin, self), func_name)
|
||||
return func(to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileList(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileList", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionDirList(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionDirList", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileGet(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileGet", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileWrite(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileWrite", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileDelete(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileDelete", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileRules(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileRules", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionFileNeed(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionFileNeed", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionOptionalFileInfo(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionOptionalFileInfo", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionOptionalFileDelete(self, to, inner_path, *args, **kwargs):
|
||||
return self.mergerFuncWrapper("actionOptionalFileDelete", to, inner_path, *args, **kwargs)
|
||||
|
||||
def actionBigfileUploadInit(self, to, inner_path, *args, **kwargs):
|
||||
back = self.mergerFuncWrapper("actionBigfileUploadInit", to, inner_path, *args, **kwargs)
|
||||
if inner_path.startswith("merged-"):
|
||||
merged_address, merged_inner_path = checkMergerPath(self.site.address, inner_path)
|
||||
back["inner_path"] = "merged-%s/%s/%s" % (merged_db[merged_address], merged_address, back["inner_path"])
|
||||
return back
|
||||
|
||||
# Add support merger sites for file commands with privatekey parameter
|
||||
def mergerFuncWrapperWithPrivatekey(self, func_name, to, privatekey, inner_path, *args, **kwargs):
|
||||
func = getattr(super(UiWebsocketPlugin, self), func_name)
|
||||
if inner_path.startswith("merged-"):
|
||||
merged_address, merged_inner_path = checkMergerPath(self.site.address, inner_path)
|
||||
merged_site = self.server.sites.get(merged_address)
|
||||
|
||||
# Set the same cert for merged site
|
||||
merger_cert = self.user.getSiteData(self.site.address).get("cert")
|
||||
if merger_cert:
|
||||
self.user.setCert(merged_address, merger_cert)
|
||||
|
||||
site_before = self.site # Save to be able to change it back after we ran the command
|
||||
self.site = merged_site # Change the site to the merged one
|
||||
try:
|
||||
back = func(to, privatekey, merged_inner_path, *args, **kwargs)
|
||||
finally:
|
||||
self.site = site_before # Change back to original site
|
||||
return back
|
||||
else:
|
||||
return func(to, privatekey, inner_path, *args, **kwargs)
|
||||
|
||||
def actionSiteSign(self, to, privatekey=None, inner_path="content.json", *args, **kwargs):
|
||||
return self.mergerFuncWrapperWithPrivatekey("actionSiteSign", to, privatekey, inner_path, *args, **kwargs)
|
||||
|
||||
def actionSitePublish(self, to, privatekey=None, inner_path="content.json", *args, **kwargs):
|
||||
return self.mergerFuncWrapperWithPrivatekey("actionSitePublish", to, privatekey, inner_path, *args, **kwargs)
|
||||
|
||||
def actionPermissionAdd(self, to, permission):
|
||||
super(UiWebsocketPlugin, self).actionPermissionAdd(to, permission)
|
||||
if permission.startswith("Merger"):
|
||||
self.site.storage.rebuildDb()
|
||||
|
||||
def actionPermissionDetails(self, to, permission):
|
||||
if not permission.startswith("Merger"):
|
||||
return super(UiWebsocketPlugin, self).actionPermissionDetails(to, permission)
|
||||
|
||||
merger_type = permission.replace("Merger:", "")
|
||||
if not re.match("^[A-Za-z0-9-]+$", merger_type):
|
||||
raise Exception("Invalid merger_type: %s" % merger_type)
|
||||
merged_sites = []
|
||||
for address, merged_type in merged_db.iteritems():
|
||||
if merged_type != merger_type:
|
||||
continue
|
||||
site = self.server.sites.get(address)
|
||||
try:
|
||||
merged_sites.append(site.content_manager.contents.get("content.json").get("title", address))
|
||||
except Exception as err:
|
||||
merged_sites.append(address)
|
||||
|
||||
details = _["Read and write permissions to sites with merged type of <b>%s</b> "] % merger_type
|
||||
details += _["(%s sites)"] % len(merged_sites)
|
||||
details += "<div style='white-space: normal; max-width: 400px'>%s</div>" % ", ".join(merged_sites)
|
||||
self.response(to, details)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
# Allow to load merged site files using /merged-ZeroMe/address/file.jpg
|
||||
def parsePath(self, path):
|
||||
path_parts = super(UiRequestPlugin, self).parsePath(path)
|
||||
if "merged-" not in path: # Optimization
|
||||
return path_parts
|
||||
path_parts["address"], path_parts["inner_path"] = checkMergerPath(path_parts["address"], path_parts["inner_path"])
|
||||
return path_parts
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteStorage")
|
||||
class SiteStoragePlugin(object):
|
||||
# Also rebuild from merged sites
|
||||
def getDbFiles(self):
|
||||
merger_types = merger_db.get(self.site.address)
|
||||
|
||||
# First return the site's own db files
|
||||
for item in super(SiteStoragePlugin, self).getDbFiles():
|
||||
yield item
|
||||
|
||||
# Not a merger site, that's all
|
||||
if not merger_types:
|
||||
raise StopIteration
|
||||
|
||||
merged_sites = [
|
||||
site_manager.sites[address]
|
||||
for address, merged_type in merged_db.iteritems()
|
||||
if merged_type in merger_types
|
||||
]
|
||||
found = 0
|
||||
for merged_site in merged_sites:
|
||||
self.log.debug("Loading merged site: %s" % merged_site)
|
||||
merged_type = merged_db[merged_site.address]
|
||||
for content_inner_path, content in merged_site.content_manager.contents.iteritems():
|
||||
# content.json file itself
|
||||
if merged_site.storage.isFile(content_inner_path): # Missing content.json file
|
||||
merged_inner_path = "merged-%s/%s/%s" % (merged_type, merged_site.address, content_inner_path)
|
||||
yield merged_inner_path, merged_site.storage.getPath(content_inner_path)
|
||||
else:
|
||||
merged_site.log.error("[MISSING] %s" % content_inner_path)
|
||||
# Data files in content.json
|
||||
content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site
|
||||
for file_relative_path in content.get("files", {}).keys() + content.get("files_optional", {}).keys():
|
||||
if not file_relative_path.endswith(".json"):
|
||||
continue # We only interesed in json files
|
||||
file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir
|
||||
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
||||
if merged_site.storage.isFile(file_inner_path):
|
||||
merged_inner_path = "merged-%s/%s/%s" % (merged_type, merged_site.address, file_inner_path)
|
||||
yield merged_inner_path, merged_site.storage.getPath(file_inner_path)
|
||||
else:
|
||||
merged_site.log.error("[MISSING] %s" % file_inner_path)
|
||||
found += 1
|
||||
if found % 100 == 0:
|
||||
time.sleep(0.000001) # Context switch to avoid UI block
|
||||
|
||||
# Also notice merger sites on a merged site file change
|
||||
def onUpdated(self, inner_path, file=None):
|
||||
super(SiteStoragePlugin, self).onUpdated(inner_path, file)
|
||||
|
||||
merged_type = merged_db.get(self.site.address)
|
||||
|
||||
for merger_site in merged_to_merger.get(self.site.address, []):
|
||||
if merger_site.address == self.site.address: # Avoid infinite loop
|
||||
continue
|
||||
virtual_path = "merged-%s/%s/%s" % (merged_type, self.site.address, inner_path)
|
||||
if inner_path.endswith(".json"):
|
||||
if file is not None:
|
||||
merger_site.storage.onUpdated(virtual_path, file=file)
|
||||
else:
|
||||
merger_site.storage.onUpdated(virtual_path, file=self.open(inner_path))
|
||||
else:
|
||||
merger_site.storage.onUpdated(virtual_path)
|
||||
|
||||
|
||||
@PluginManager.registerTo("Site")
|
||||
class SitePlugin(object):
|
||||
def fileDone(self, inner_path):
|
||||
super(SitePlugin, self).fileDone(inner_path)
|
||||
|
||||
for merger_site in merged_to_merger.get(self.address, []):
|
||||
if merger_site.address == self.address:
|
||||
continue
|
||||
for ws in merger_site.websockets:
|
||||
ws.event("siteChanged", self, {"event": ["file_done", inner_path]})
|
||||
|
||||
def fileFailed(self, inner_path):
|
||||
super(SitePlugin, self).fileFailed(inner_path)
|
||||
|
||||
for merger_site in merged_to_merger.get(self.address, []):
|
||||
if merger_site.address == self.address:
|
||||
continue
|
||||
for ws in merger_site.websockets:
|
||||
ws.event("siteChanged", self, {"event": ["file_failed", inner_path]})
|
||||
|
||||
|
||||
@PluginManager.registerTo("SiteManager")
|
||||
class SiteManagerPlugin(object):
|
||||
# Update merger site for site types
|
||||
def updateMergerSites(self):
|
||||
global merger_db, merged_db, merged_to_merger, site_manager
|
||||
s = time.time()
|
||||
merger_db = {}
|
||||
merged_db = {}
|
||||
merged_to_merger = {}
|
||||
site_manager = self
|
||||
if not self.sites:
|
||||
return
|
||||
for site in self.sites.itervalues():
|
||||
# Update merged sites
|
||||
try:
|
||||
merged_type = site.content_manager.contents.get("content.json", {}).get("merged_type")
|
||||
except Exception, err:
|
||||
self.log.error("Error loading site %s: %s" % (site.address, Debug.formatException(err)))
|
||||
continue
|
||||
if merged_type:
|
||||
merged_db[site.address] = merged_type
|
||||
|
||||
# Update merger sites
|
||||
for permission in site.settings["permissions"]:
|
||||
if not permission.startswith("Merger:"):
|
||||
continue
|
||||
if merged_type:
|
||||
self.log.error(
|
||||
"Removing permission %s from %s: Merger and merged at the same time." %
|
||||
(permission, site.address)
|
||||
)
|
||||
site.settings["permissions"].remove(permission)
|
||||
continue
|
||||
merger_type = permission.replace("Merger:", "")
|
||||
if site.address not in merger_db:
|
||||
merger_db[site.address] = []
|
||||
merger_db[site.address].append(merger_type)
|
||||
site_manager.sites[site.address] = site
|
||||
|
||||
# Update merged to merger
|
||||
if merged_type:
|
||||
for merger_site in self.sites.itervalues():
|
||||
if "Merger:" + merged_type in merger_site.settings["permissions"]:
|
||||
if site.address not in merged_to_merger:
|
||||
merged_to_merger[site.address] = []
|
||||
merged_to_merger[site.address].append(merger_site)
|
||||
self.log.debug("Updated merger sites in %.3fs" % (time.time() - s))
|
||||
|
||||
def load(self, *args, **kwags):
|
||||
super(SiteManagerPlugin, self).load(*args, **kwags)
|
||||
self.updateMergerSites()
|
||||
|
||||
def save(self, *args, **kwags):
|
||||
super(SiteManagerPlugin, self).save(*args, **kwags)
|
||||
self.updateMergerSites()
|
|
@ -1 +0,0 @@
|
|||
import MergerSitePlugin
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "¿Agregar <b>%s</b> nuevo sitio?",
|
||||
"Added <b>%s</b> new site": "Sitio <b>%s</b> agregado",
|
||||
"Site deleted: <b>%s</b>": "Sitio removido: <b>%s</b>"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "Ajouter le site <b>%s</b> ?",
|
||||
"Added <b>%s</b> new site": "Site <b>%s</b> ajouté",
|
||||
"Site deleted: <b>%s</b>": "Site <b>%s</b> supprimé"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "Új oldal hozzáadása: <b>%s</b>?",
|
||||
"Added <b>%s</b> new site": "Új oldal hozzáadva: <b>%s</b>",
|
||||
"Site deleted: <b>%s</b>": "Oldal törölve: <b>%s</b>"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "Aggiungere <b>%s</b> nuovo sito ?",
|
||||
"Added <b>%s</b> new site": "Sito <b>%s</b> aggiunto",
|
||||
"Site deleted: <b>%s</b>": "Sito <b>%s</b> eliminato"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "Adicionar <b>%s</b> novo site?",
|
||||
"Added <b>%s</b> new site": "Site <b>%s</b> adicionado",
|
||||
"Site deleted: <b>%s</b>": "Site removido: <b>%s</b>"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "<b>%s</b> sitesi eklensin mi?",
|
||||
"Added <b>%s</b> new site": "<b>%s</b> sitesi eklendi",
|
||||
"Site deleted: <b>%s</b>": "<b>%s</b> sitesi silindi"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "添加新網站: <b>%s</b>?",
|
||||
"Added <b>%s</b> new site": "已添加到新網站:<b>%s</b>",
|
||||
"Site deleted: <b>%s</b>": "網站已刪除:<b>%s</b>"
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"Add <b>%s</b> new site?": "添加新站点: <b>%s</b>?",
|
||||
"Added <b>%s</b> new site": "已添加到新站点:<b>%s</b>",
|
||||
"Site deleted: <b>%s</b>": "站点已删除:<b>%s</b>"
|
||||
}
|
|
@ -1,187 +0,0 @@
|
|||
import time
|
||||
import re
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Db import DbQuery
|
||||
from Debug import Debug
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def formatSiteInfo(self, site, create_user=True):
|
||||
site_info = super(UiWebsocketPlugin, self).formatSiteInfo(site, create_user=create_user)
|
||||
feed_following = self.user.sites.get(site.address, {}).get("follow", None)
|
||||
if feed_following == None:
|
||||
site_info["feed_follow_num"] = None
|
||||
else:
|
||||
site_info["feed_follow_num"] = len(feed_following)
|
||||
return site_info
|
||||
|
||||
def actionFeedFollow(self, to, feeds):
|
||||
self.user.setFeedFollow(self.site.address, feeds)
|
||||
self.user.save()
|
||||
self.response(to, "ok")
|
||||
|
||||
def actionFeedListFollow(self, to):
|
||||
feeds = self.user.sites[self.site.address].get("follow", {})
|
||||
self.response(to, feeds)
|
||||
|
||||
def actionFeedQuery(self, to, limit=10, day_limit=3):
|
||||
if "ADMIN" not in self.site.settings["permissions"]:
|
||||
return self.response(to, "FeedQuery not allowed")
|
||||
|
||||
from Site import SiteManager
|
||||
rows = []
|
||||
stats = []
|
||||
|
||||
total_s = time.time()
|
||||
num_sites = 0
|
||||
|
||||
for address, site_data in self.user.sites.items():
|
||||
feeds = site_data.get("follow")
|
||||
if not feeds:
|
||||
continue
|
||||
if type(feeds) is not dict:
|
||||
self.log.debug("Invalid feed for site %s" % address)
|
||||
continue
|
||||
num_sites += 1
|
||||
for name, query_set in feeds.iteritems():
|
||||
site = SiteManager.site_manager.get(address)
|
||||
if not site or not site.storage.has_db:
|
||||
continue
|
||||
|
||||
s = time.time()
|
||||
try:
|
||||
query_raw, params = query_set
|
||||
query_parts = re.split(r"UNION(?:\s+ALL|)", query_raw)
|
||||
for i, query_part in enumerate(query_parts):
|
||||
db_query = DbQuery(query_part)
|
||||
if day_limit:
|
||||
where = " WHERE %s > strftime('%%s', 'now', '-%s day')" % (db_query.fields.get("date_added", "date_added"), day_limit)
|
||||
if "WHERE" in query_part:
|
||||
query_part = re.sub("WHERE (.*?)(?=$| GROUP BY)", where+" AND (\\1)", query_part)
|
||||
else:
|
||||
query_part += where
|
||||
query_parts[i] = query_part
|
||||
query = " UNION ".join(query_parts)
|
||||
|
||||
if ":params" in query:
|
||||
query = query.replace(":params", ",".join(["?"] * len(params)))
|
||||
res = site.storage.query(query + " ORDER BY date_added DESC LIMIT %s" % limit, params * query_raw.count(":params"))
|
||||
else:
|
||||
res = site.storage.query(query + " ORDER BY date_added DESC LIMIT %s" % limit)
|
||||
|
||||
except Exception as err: # Log error
|
||||
self.log.error("%s feed query %s error: %s" % (address, name, Debug.formatException(err)))
|
||||
stats.append({"site": site.address, "feed_name": name, "error": str(err), "query": query})
|
||||
continue
|
||||
|
||||
for row in res:
|
||||
row = dict(row)
|
||||
if not isinstance(row["date_added"], (int, long, float, complex)):
|
||||
self.log.debug("Invalid date_added from site %s: %r" % (address, row["date_added"]))
|
||||
continue
|
||||
if row["date_added"] > 1000000000000: # Formatted as millseconds
|
||||
row["date_added"] = row["date_added"] / 1000
|
||||
if "date_added" not in row or row["date_added"] > time.time() + 120:
|
||||
self.log.debug("Newsfeed item from the future from from site %s" % address)
|
||||
continue # Feed item is in the future, skip it
|
||||
row["site"] = address
|
||||
row["feed_name"] = name
|
||||
rows.append(row)
|
||||
stats.append({"site": site.address, "feed_name": name, "taken": round(time.time() - s, 3)})
|
||||
time.sleep(0.0001)
|
||||
return self.response(to, {"rows": rows, "stats": stats, "num": len(rows), "sites": num_sites, "taken": round(time.time() - total_s, 3)})
|
||||
|
||||
def parseSearch(self, search):
|
||||
parts = re.split("(site|type):", search)
|
||||
if len(parts) > 1: # Found filter
|
||||
search_text = parts[0]
|
||||
parts = [part.strip() for part in parts]
|
||||
filters = dict(zip(parts[1::2], parts[2::2]))
|
||||
else:
|
||||
search_text = search
|
||||
filters = {}
|
||||
return [search_text, filters]
|
||||
|
||||
def actionFeedSearch(self, to, search, limit=30, day_limit=30):
|
||||
if "ADMIN" not in self.site.settings["permissions"]:
|
||||
return self.response(to, "FeedSearch not allowed")
|
||||
|
||||
from Site import SiteManager
|
||||
rows = []
|
||||
stats = []
|
||||
num_sites = 0
|
||||
total_s = time.time()
|
||||
|
||||
search_text, filters = self.parseSearch(search)
|
||||
|
||||
for address, site in SiteManager.site_manager.list().iteritems():
|
||||
if not site.storage.has_db:
|
||||
continue
|
||||
|
||||
if "site" in filters:
|
||||
if filters["site"].lower() not in [site.address, site.content_manager.contents["content.json"].get("title").lower()]:
|
||||
continue
|
||||
|
||||
if site.storage.db: # Database loaded
|
||||
feeds = site.storage.db.schema.get("feeds")
|
||||
else:
|
||||
try:
|
||||
feeds = site.storage.loadJson("dbschema.json").get("feeds")
|
||||
except:
|
||||
continue
|
||||
|
||||
if not feeds:
|
||||
continue
|
||||
|
||||
num_sites += 1
|
||||
|
||||
for name, query in feeds.iteritems():
|
||||
s = time.time()
|
||||
try:
|
||||
db_query = DbQuery(query)
|
||||
|
||||
params = []
|
||||
# Filters
|
||||
if search_text:
|
||||
db_query.wheres.append("(%s LIKE ? OR %s LIKE ?)" % (db_query.fields["body"], db_query.fields["title"]))
|
||||
search_like = "%" + search_text.replace(" ", "%") + "%"
|
||||
params.append(search_like)
|
||||
params.append(search_like)
|
||||
if filters.get("type") and filters["type"] not in query:
|
||||
continue
|
||||
|
||||
if day_limit:
|
||||
db_query.wheres.append(
|
||||
"%s > strftime('%%s', 'now', '-%s day')" % (db_query.fields.get("date_added", "date_added"), day_limit)
|
||||
)
|
||||
|
||||
# Order
|
||||
db_query.parts["ORDER BY"] = "date_added DESC"
|
||||
db_query.parts["LIMIT"] = str(limit)
|
||||
|
||||
res = site.storage.query(str(db_query), params)
|
||||
except Exception, err:
|
||||
self.log.error("%s feed query %s error: %s" % (address, name, Debug.formatException(err)))
|
||||
stats.append({"site": site.address, "feed_name": name, "error": str(err), "query": query})
|
||||
continue
|
||||
for row in res:
|
||||
row = dict(row)
|
||||
if row["date_added"] > time.time() + 120:
|
||||
continue # Feed item is in the future, skip it
|
||||
row["site"] = address
|
||||
row["feed_name"] = name
|
||||
rows.append(row)
|
||||
stats.append({"site": site.address, "feed_name": name, "taken": round(time.time() - s, 3)})
|
||||
return self.response(to, {"rows": rows, "num": len(rows), "sites": num_sites, "taken": round(time.time() - total_s, 3), "stats": stats})
|
||||
|
||||
|
||||
@PluginManager.registerTo("User")
|
||||
class UserPlugin(object):
|
||||
# Set queries that user follows
|
||||
def setFeedFollow(self, address, feeds):
|
||||
site_data = self.getSiteData(address)
|
||||
site_data["follow"] = feeds
|
||||
self.save()
|
||||
return site_data
|
|
@ -1 +0,0 @@
|
|||
import NewsfeedPlugin
|
|
@ -1,422 +0,0 @@
|
|||
import time
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
|
||||
import gevent
|
||||
|
||||
from util import helper
|
||||
from Plugin import PluginManager
|
||||
from Config import config
|
||||
from Debug import Debug
|
||||
|
||||
if "content_db" not in locals().keys(): # To keep between module reloads
|
||||
content_db = None
|
||||
|
||||
|
||||
@PluginManager.registerTo("ContentDb")
|
||||
class ContentDbPlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
global content_db
|
||||
content_db = self
|
||||
self.filled = {} # Site addresses that already filled from content.json
|
||||
self.need_filling = False # file_optional table just created, fill data from content.json files
|
||||
self.time_peer_numbers_updated = 0
|
||||
self.my_optional_files = {} # Last 50 site_address/inner_path called by fileWrite (auto-pinning these files)
|
||||
self.optional_files = collections.defaultdict(dict)
|
||||
self.optional_files_loading = False
|
||||
helper.timer(60 * 5, self.checkOptionalLimit)
|
||||
super(ContentDbPlugin, self).__init__(*args, **kwargs)
|
||||
|
||||
def getSchema(self):
|
||||
schema = super(ContentDbPlugin, self).getSchema()
|
||||
|
||||
# Need file_optional table
|
||||
schema["tables"]["file_optional"] = {
|
||||
"cols": [
|
||||
["file_id", "INTEGER PRIMARY KEY UNIQUE NOT NULL"],
|
||||
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
|
||||
["inner_path", "TEXT"],
|
||||
["hash_id", "INTEGER"],
|
||||
["size", "INTEGER"],
|
||||
["peer", "INTEGER DEFAULT 0"],
|
||||
["uploaded", "INTEGER DEFAULT 0"],
|
||||
["is_downloaded", "INTEGER DEFAULT 0"],
|
||||
["is_pinned", "INTEGER DEFAULT 0"],
|
||||
["time_added", "INTEGER DEFAULT 0"],
|
||||
["time_downloaded", "INTEGER DEFAULT 0"],
|
||||
["time_accessed", "INTEGER DEFAULT 0"]
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE UNIQUE INDEX file_optional_key ON file_optional (site_id, inner_path)",
|
||||
"CREATE INDEX is_downloaded ON file_optional (is_downloaded)"
|
||||
],
|
||||
"schema_changed": 11
|
||||
}
|
||||
|
||||
return schema
|
||||
|
||||
def initSite(self, site):
|
||||
super(ContentDbPlugin, self).initSite(site)
|
||||
if self.need_filling:
|
||||
self.fillTableFileOptional(site)
|
||||
if not self.optional_files_loading:
|
||||
gevent.spawn_later(1, self.loadFilesOptional)
|
||||
self.optional_files_loading = True
|
||||
|
||||
def checkTables(self):
|
||||
changed_tables = super(ContentDbPlugin, self).checkTables()
|
||||
if "file_optional" in changed_tables:
|
||||
self.need_filling = True
|
||||
return changed_tables
|
||||
|
||||
# Load optional files ending
|
||||
def loadFilesOptional(self):
|
||||
s = time.time()
|
||||
num = 0
|
||||
total = 0
|
||||
total_downloaded = 0
|
||||
res = content_db.execute("SELECT site_id, inner_path, size, is_downloaded FROM file_optional")
|
||||
site_sizes = collections.defaultdict(lambda: collections.defaultdict(int))
|
||||
for row in res:
|
||||
self.optional_files[row["site_id"]][row["inner_path"][-8:]] = 1
|
||||
num += 1
|
||||
|
||||
# Update site size stats
|
||||
site_sizes[row["site_id"]]["size_optional"] += row["size"]
|
||||
if row["is_downloaded"]:
|
||||
site_sizes[row["site_id"]]["optional_downloaded"] += row["size"]
|
||||
|
||||
# Site site size stats to sites.json settings
|
||||
site_ids_reverse = {val: key for key, val in self.site_ids.iteritems()}
|
||||
for site_id, stats in site_sizes.iteritems():
|
||||
site_address = site_ids_reverse.get(site_id)
|
||||
if not site_address:
|
||||
self.log.error("Not found site_id: %s" % site_id)
|
||||
continue
|
||||
site = self.sites[site_address]
|
||||
site.settings["size_optional"] = stats["size_optional"]
|
||||
site.settings["optional_downloaded"] = stats["optional_downloaded"]
|
||||
total += stats["size_optional"]
|
||||
total_downloaded += stats["optional_downloaded"]
|
||||
|
||||
self.log.debug(
|
||||
"Loaded %s optional files: %.2fMB, downloaded: %.2fMB in %.3fs" %
|
||||
(num, float(total) / 1024 / 1024, float(total_downloaded) / 1024 / 1024, time.time() - s)
|
||||
)
|
||||
|
||||
if self.need_filling and self.getOptionalLimitBytes() >= 0 and self.getOptionalLimitBytes() < total_downloaded:
|
||||
limit_bytes = self.getOptionalLimitBytes()
|
||||
limit_new = round((float(total_downloaded) / 1024 / 1024 / 1024) * 1.1, 2) # Current limit + 10%
|
||||
self.log.debug(
|
||||
"First startup after update and limit is smaller than downloaded files size (%.2fGB), increasing it from %.2fGB to %.2fGB" %
|
||||
(float(total_downloaded) / 1024 / 1024 / 1024, float(limit_bytes) / 1024 / 1024 / 1024, limit_new)
|
||||
)
|
||||
config.saveValue("optional_limit", limit_new)
|
||||
config.optional_limit = str(limit_new)
|
||||
|
||||
# Predicts if the file is optional
|
||||
def isOptionalFile(self, site_id, inner_path):
|
||||
return self.optional_files[site_id].get(inner_path[-8:])
|
||||
|
||||
# Fill file_optional table with optional files found in sites
|
||||
def fillTableFileOptional(self, site):
|
||||
s = time.time()
|
||||
site_id = self.site_ids.get(site.address)
|
||||
if not site_id:
|
||||
return False
|
||||
cur = self.getCursor()
|
||||
cur.execute("BEGIN")
|
||||
res = cur.execute("SELECT * FROM content WHERE size_files_optional > 0 AND site_id = %s" % site_id)
|
||||
num = 0
|
||||
for row in res.fetchall():
|
||||
content = site.content_manager.contents[row["inner_path"]]
|
||||
try:
|
||||
num += self.setContentFilesOptional(site, row["inner_path"], content, cur=cur)
|
||||
except Exception as err:
|
||||
self.log.error("Error loading %s into file_optional: %s" % (row["inner_path"], err))
|
||||
cur.execute("COMMIT")
|
||||
cur.close()
|
||||
|
||||
# Set my files to pinned
|
||||
from User import UserManager
|
||||
user = UserManager.user_manager.get()
|
||||
if not user:
|
||||
user = UserManager.user_manager.create()
|
||||
auth_address = user.getAuthAddress(site.address)
|
||||
self.execute(
|
||||
"UPDATE file_optional SET is_pinned = 1 WHERE site_id = :site_id AND inner_path LIKE :inner_path",
|
||||
{"site_id": site_id, "inner_path": "%%/%s/%%" % auth_address}
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"Filled file_optional table for %s in %.3fs (loaded: %s, is_pinned: %s)" %
|
||||
(site.address, time.time() - s, num, self.cur.cursor.rowcount)
|
||||
)
|
||||
self.filled[site.address] = True
|
||||
|
||||
def setContentFilesOptional(self, site, content_inner_path, content, cur=None):
|
||||
if not cur:
|
||||
cur = self
|
||||
try:
|
||||
cur.execute("BEGIN")
|
||||
except Exception as err:
|
||||
self.log.warning("Transaction begin error %s %s: %s" % (site, content_inner_path, Debug.formatException(err)))
|
||||
|
||||
num = 0
|
||||
site_id = self.site_ids[site.address]
|
||||
content_inner_dir = helper.getDirname(content_inner_path)
|
||||
for relative_inner_path, file in content.get("files_optional", {}).iteritems():
|
||||
file_inner_path = content_inner_dir + relative_inner_path
|
||||
hash_id = int(file["sha512"][0:4], 16)
|
||||
if hash_id in site.content_manager.hashfield:
|
||||
is_downloaded = 1
|
||||
else:
|
||||
is_downloaded = 0
|
||||
if site.address + "/" + content_inner_dir in self.my_optional_files:
|
||||
is_pinned = 1
|
||||
else:
|
||||
is_pinned = 0
|
||||
cur.insertOrUpdate("file_optional", {
|
||||
"hash_id": hash_id,
|
||||
"size": int(file["size"])
|
||||
}, {
|
||||
"site_id": site_id,
|
||||
"inner_path": file_inner_path
|
||||
}, oninsert={
|
||||
"time_added": int(time.time()),
|
||||
"time_downloaded": int(time.time()) if is_downloaded else 0,
|
||||
"is_downloaded": is_downloaded,
|
||||
"peer": is_downloaded,
|
||||
"is_pinned": is_pinned
|
||||
})
|
||||
self.optional_files[site_id][file_inner_path[-8:]] = 1
|
||||
num += 1
|
||||
|
||||
if cur == self:
|
||||
try:
|
||||
cur.execute("END")
|
||||
except Exception as err:
|
||||
self.log.warning("Transaction end error %s %s: %s" % (site, content_inner_path, Debug.formatException(err)))
|
||||
return num
|
||||
|
||||
def setContent(self, site, inner_path, content, size=0):
|
||||
super(ContentDbPlugin, self).setContent(site, inner_path, content, size=size)
|
||||
old_content = site.content_manager.contents.get(inner_path, {})
|
||||
if (not self.need_filling or self.filled.get(site.address)) and ("files_optional" in content or "files_optional" in old_content):
|
||||
self.setContentFilesOptional(site, inner_path, content)
|
||||
# Check deleted files
|
||||
if old_content:
|
||||
old_files = old_content.get("files_optional", {}).keys()
|
||||
new_files = content.get("files_optional", {}).keys()
|
||||
content_inner_dir = helper.getDirname(inner_path)
|
||||
deleted = [content_inner_dir + key for key in old_files if key not in new_files]
|
||||
if deleted:
|
||||
site_id = self.site_ids[site.address]
|
||||
self.execute("DELETE FROM file_optional WHERE ?", {"site_id": site_id, "inner_path": deleted})
|
||||
|
||||
def deleteContent(self, site, inner_path):
|
||||
content = site.content_manager.contents.get(inner_path)
|
||||
if content and "files_optional" in content:
|
||||
site_id = self.site_ids[site.address]
|
||||
content_inner_dir = helper.getDirname(inner_path)
|
||||
optional_inner_paths = [
|
||||
content_inner_dir + relative_inner_path
|
||||
for relative_inner_path in content.get("files_optional", {}).keys()
|
||||
]
|
||||
self.execute("DELETE FROM file_optional WHERE ?", {"site_id": site_id, "inner_path": optional_inner_paths})
|
||||
super(ContentDbPlugin, self).deleteContent(site, inner_path)
|
||||
|
||||
def updatePeerNumbers(self):
|
||||
s = time.time()
|
||||
num_file = 0
|
||||
num_updated = 0
|
||||
num_site = 0
|
||||
for site in self.sites.values():
|
||||
if not site.content_manager.has_optional_files:
|
||||
continue
|
||||
if not site.settings["serving"]:
|
||||
continue
|
||||
has_updated_hashfield = next((
|
||||
peer
|
||||
for peer in site.peers.itervalues()
|
||||
if peer.has_hashfield and peer.hashfield.time_changed > self.time_peer_numbers_updated
|
||||
), None)
|
||||
|
||||
if not has_updated_hashfield and site.content_manager.hashfield.time_changed < self.time_peer_numbers_updated:
|
||||
continue
|
||||
|
||||
hashfield_peers = itertools.chain.from_iterable(
|
||||
peer.hashfield.storage
|
||||
for peer in site.peers.itervalues()
|
||||
if peer.has_hashfield
|
||||
)
|
||||
peer_nums = collections.Counter(
|
||||
itertools.chain(
|
||||
hashfield_peers,
|
||||
site.content_manager.hashfield
|
||||
)
|
||||
)
|
||||
|
||||
site_id = self.site_ids[site.address]
|
||||
if not site_id:
|
||||
continue
|
||||
|
||||
res = self.execute("SELECT file_id, hash_id, peer FROM file_optional WHERE ?", {"site_id": site_id})
|
||||
updates = {}
|
||||
for row in res:
|
||||
peer_num = peer_nums.get(row["hash_id"], 0)
|
||||
if peer_num != row["peer"]:
|
||||
updates[row["file_id"]] = peer_num
|
||||
|
||||
self.execute("BEGIN")
|
||||
for file_id, peer_num in updates.iteritems():
|
||||
self.execute("UPDATE file_optional SET peer = ? WHERE file_id = ?", (peer_num, file_id))
|
||||
self.execute("END")
|
||||
|
||||
num_updated += len(updates)
|
||||
num_file += len(peer_nums)
|
||||
num_site += 1
|
||||
|
||||
self.time_peer_numbers_updated = time.time()
|
||||
self.log.debug("%s/%s peer number for %s site updated in %.3fs" % (num_updated, num_file, num_site, time.time() - s))
|
||||
|
||||
def queryDeletableFiles(self):
|
||||
# First return the files with atleast 10 seeder and not accessed in last week
|
||||
query = """
|
||||
SELECT * FROM file_optional
|
||||
WHERE peer > 10 AND %s
|
||||
ORDER BY time_accessed < %s DESC, uploaded / size
|
||||
""" % (self.getOptionalUsedWhere(), int(time.time() - 60 * 60 * 7))
|
||||
limit_start = 0
|
||||
while 1:
|
||||
num = 0
|
||||
res = self.execute("%s LIMIT %s, 50" % (query, limit_start))
|
||||
for row in res:
|
||||
yield row
|
||||
num += 1
|
||||
if num < 50:
|
||||
break
|
||||
limit_start += 50
|
||||
|
||||
self.log.debug("queryDeletableFiles returning less-seeded files")
|
||||
|
||||
# Then return files less seeder but still not accessed in last week
|
||||
query = """
|
||||
SELECT * FROM file_optional
|
||||
WHERE peer <= 10 AND %s
|
||||
ORDER BY peer DESC, time_accessed < %s DESC, uploaded / size
|
||||
""" % (self.getOptionalUsedWhere(), int(time.time() - 60 * 60 * 7))
|
||||
limit_start = 0
|
||||
while 1:
|
||||
num = 0
|
||||
res = self.execute("%s LIMIT %s, 50" % (query, limit_start))
|
||||
for row in res:
|
||||
yield row
|
||||
num += 1
|
||||
if num < 50:
|
||||
break
|
||||
limit_start += 50
|
||||
|
||||
self.log.debug("queryDeletableFiles returning everyting")
|
||||
|
||||
# At the end return all files
|
||||
query = """
|
||||
SELECT * FROM file_optional
|
||||
WHERE peer <= 10 AND %s
|
||||
ORDER BY peer DESC, time_accessed, uploaded / size
|
||||
""" % self.getOptionalUsedWhere()
|
||||
limit_start = 0
|
||||
while 1:
|
||||
num = 0
|
||||
res = self.execute("%s LIMIT %s, 50" % (query, limit_start))
|
||||
for row in res:
|
||||
yield row
|
||||
num += 1
|
||||
if num < 50:
|
||||
break
|
||||
limit_start += 50
|
||||
|
||||
def getOptionalLimitBytes(self):
|
||||
if config.optional_limit.endswith("%"):
|
||||
limit_percent = float(re.sub("[^0-9.]", "", config.optional_limit))
|
||||
limit_bytes = helper.getFreeSpace() * (limit_percent / 100)
|
||||
else:
|
||||
limit_bytes = float(re.sub("[^0-9.]", "", config.optional_limit)) * 1024 * 1024 * 1024
|
||||
return limit_bytes
|
||||
|
||||
def getOptionalUsedWhere(self):
|
||||
maxsize = config.optional_limit_exclude_minsize * 1024 * 1024
|
||||
query = "is_downloaded = 1 AND is_pinned = 0 AND size < %s" % maxsize
|
||||
|
||||
# Don't delete optional files from owned sites
|
||||
my_site_ids = []
|
||||
for address, site in self.sites.items():
|
||||
if site.settings["own"]:
|
||||
my_site_ids.append(str(self.site_ids[address]))
|
||||
|
||||
if my_site_ids:
|
||||
query += " AND site_id NOT IN (%s)" % ", ".join(my_site_ids)
|
||||
return query
|
||||
|
||||
def getOptionalUsedBytes(self):
|
||||
size = self.execute("SELECT SUM(size) FROM file_optional WHERE %s" % self.getOptionalUsedWhere()).fetchone()[0]
|
||||
if not size:
|
||||
size = 0
|
||||
return size
|
||||
|
||||
def getOptionalNeedDelete(self, size):
|
||||
if config.optional_limit.endswith("%"):
|
||||
limit_percent = float(re.sub("[^0-9.]", "", config.optional_limit))
|
||||
need_delete = size - ((helper.getFreeSpace() + size) * (limit_percent / 100))
|
||||
else:
|
||||
need_delete = size - self.getOptionalLimitBytes()
|
||||
return need_delete
|
||||
|
||||
def checkOptionalLimit(self, limit=None):
|
||||
if not limit:
|
||||
limit = self.getOptionalLimitBytes()
|
||||
|
||||
if limit < 0:
|
||||
self.log.debug("Invalid limit for optional files: %s" % limit)
|
||||
return False
|
||||
|
||||
size = self.getOptionalUsedBytes()
|
||||
|
||||
need_delete = self.getOptionalNeedDelete(size)
|
||||
|
||||
self.log.debug(
|
||||
"Optional size: %.1fMB/%.1fMB, Need delete: %.1fMB" %
|
||||
(float(size) / 1024 / 1024, float(limit) / 1024 / 1024, float(need_delete) / 1024 / 1024)
|
||||
)
|
||||
if need_delete <= 0:
|
||||
return False
|
||||
|
||||
self.updatePeerNumbers()
|
||||
|
||||
site_ids_reverse = {val: key for key, val in self.site_ids.iteritems()}
|
||||
deleted_file_ids = []
|
||||
for row in self.queryDeletableFiles():
|
||||
site_address = site_ids_reverse.get(row["site_id"])
|
||||
site = self.sites.get(site_address)
|
||||
if not site:
|
||||
self.log.error("No site found for id: %s" % row["site_id"])
|
||||
continue
|
||||
site.log.debug("Deleting %s %.3f MB left" % (row["inner_path"], float(need_delete) / 1024 / 1024))
|
||||
deleted_file_ids.append(row["file_id"])
|
||||
try:
|
||||
site.content_manager.optionalRemoved(row["inner_path"], row["hash_id"], row["size"])
|
||||
site.storage.delete(row["inner_path"])
|
||||
need_delete -= row["size"]
|
||||
except Exception as err:
|
||||
site.log.error("Error deleting %s: %s" % (row["inner_path"], err))
|
||||
|
||||
if need_delete <= 0:
|
||||
break
|
||||
|
||||
cur = self.getCursor()
|
||||
cur.execute("BEGIN")
|
||||
for file_id in deleted_file_ids:
|
||||
cur.execute("UPDATE file_optional SET is_downloaded = 0, is_pinned = 0, peer = peer - 1 WHERE ?", {"file_id": file_id})
|
||||
cur.execute("COMMIT")
|
||||
cur.close()
|
|
@ -1,229 +0,0 @@
|
|||
import time
|
||||
import re
|
||||
import collections
|
||||
|
||||
import gevent
|
||||
|
||||
from util import helper
|
||||
from Plugin import PluginManager
|
||||
import ContentDbPlugin
|
||||
|
||||
|
||||
# We can only import plugin host clases after the plugins are loaded
|
||||
@PluginManager.afterLoad
|
||||
def importPluginnedClasses():
|
||||
global config
|
||||
from Config import config
|
||||
|
||||
|
||||
def processAccessLog():
|
||||
if access_log:
|
||||
content_db = ContentDbPlugin.content_db
|
||||
now = int(time.time())
|
||||
num = 0
|
||||
for site_id in access_log:
|
||||
content_db.execute(
|
||||
"UPDATE file_optional SET time_accessed = %s WHERE ?" % now,
|
||||
{"site_id": site_id, "inner_path": access_log[site_id].keys()}
|
||||
)
|
||||
num += len(access_log[site_id])
|
||||
access_log.clear()
|
||||
|
||||
|
||||
def processRequestLog():
|
||||
if request_log:
|
||||
content_db = ContentDbPlugin.content_db
|
||||
cur = content_db.getCursor()
|
||||
num = 0
|
||||
cur.execute("BEGIN")
|
||||
for site_id in request_log:
|
||||
for inner_path, uploaded in request_log[site_id].iteritems():
|
||||
content_db.execute(
|
||||
"UPDATE file_optional SET uploaded = uploaded + %s WHERE ?" % uploaded,
|
||||
{"site_id": site_id, "inner_path": inner_path}
|
||||
)
|
||||
num += 1
|
||||
cur.execute("END")
|
||||
request_log.clear()
|
||||
|
||||
|
||||
if "access_log" not in locals().keys(): # To keep between module reloads
|
||||
access_log = collections.defaultdict(dict) # {site_id: {inner_path1: 1, inner_path2: 1...}}
|
||||
request_log = collections.defaultdict(lambda: collections.defaultdict(int)) # {site_id: {inner_path1: 1, inner_path2: 1...}}
|
||||
helper.timer(61, processAccessLog)
|
||||
helper.timer(60, processRequestLog)
|
||||
|
||||
|
||||
@PluginManager.registerTo("ContentManager")
|
||||
class ContentManagerPlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cache_is_pinned = {}
|
||||
super(ContentManagerPlugin, self).__init__(*args, **kwargs)
|
||||
|
||||
def optionalDownloaded(self, inner_path, hash_id, size=None, own=False):
|
||||
if "|" in inner_path: # Big file piece
|
||||
file_inner_path, file_range = inner_path.split("|")
|
||||
else:
|
||||
file_inner_path = inner_path
|
||||
|
||||
self.contents.db.executeDelayed(
|
||||
"UPDATE file_optional SET time_downloaded = :now, is_downloaded = 1, peer = peer + 1 WHERE site_id = :site_id AND inner_path = :inner_path AND is_downloaded = 0",
|
||||
{"now": int(time.time()), "site_id": self.contents.db.site_ids[self.site.address], "inner_path": file_inner_path}
|
||||
)
|
||||
|
||||
return super(ContentManagerPlugin, self).optionalDownloaded(inner_path, hash_id, size, own)
|
||||
|
||||
def optionalRemoved(self, inner_path, hash_id, size=None):
|
||||
self.contents.db.execute(
|
||||
"UPDATE file_optional SET is_downloaded = 0, is_pinned = 0, peer = peer - 1 WHERE site_id = :site_id AND inner_path = :inner_path AND is_downloaded = 1",
|
||||
{"site_id": self.contents.db.site_ids[self.site.address], "inner_path": inner_path}
|
||||
)
|
||||
|
||||
if self.contents.db.cur.cursor.rowcount > 0:
|
||||
back = super(ContentManagerPlugin, self).optionalRemoved(inner_path, hash_id, size)
|
||||
# Re-add to hashfield if we have other file with the same hash_id
|
||||
if self.isDownloaded(hash_id=hash_id, force_check_db=True):
|
||||
self.hashfield.appendHashId(hash_id)
|
||||
return back
|
||||
|
||||
def isDownloaded(self, inner_path=None, hash_id=None, force_check_db=False):
|
||||
if hash_id and not force_check_db and hash_id not in self.hashfield:
|
||||
return False
|
||||
|
||||
if inner_path:
|
||||
res = self.contents.db.execute(
|
||||
"SELECT is_downloaded FROM file_optional WHERE site_id = :site_id AND inner_path = :inner_path LIMIT 1",
|
||||
{"site_id": self.contents.db.site_ids[self.site.address], "inner_path": inner_path}
|
||||
)
|
||||
else:
|
||||
res = self.contents.db.execute(
|
||||
"SELECT is_downloaded FROM file_optional WHERE site_id = :site_id AND hash_id = :hash_id AND is_downloaded = 1 LIMIT 1",
|
||||
{"site_id": self.contents.db.site_ids[self.site.address], "hash_id": hash_id}
|
||||
)
|
||||
row = res.fetchone()
|
||||
if row and row[0]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def isPinned(self, inner_path):
|
||||
if inner_path in self.cache_is_pinned:
|
||||
self.site.log.debug("Cached is pinned: %s" % inner_path)
|
||||
return self.cache_is_pinned[inner_path]
|
||||
|
||||
res = self.contents.db.execute(
|
||||
"SELECT is_pinned FROM file_optional WHERE site_id = :site_id AND inner_path = :inner_path LIMIT 1",
|
||||
{"site_id": self.contents.db.site_ids[self.site.address], "inner_path": inner_path}
|
||||
)
|
||||
row = res.fetchone()
|
||||
|
||||
if row and row[0]:
|
||||
is_pinned = True
|
||||
else:
|
||||
is_pinned = False
|
||||
|
||||
self.cache_is_pinned[inner_path] = is_pinned
|
||||
self.site.log.debug("Cache set is pinned: %s %s" % (inner_path, is_pinned))
|
||||
|
||||
return is_pinned
|
||||
|
||||
def setPin(self, inner_path, is_pinned):
|
||||
content_db = self.contents.db
|
||||
site_id = content_db.site_ids[self.site.address]
|
||||
content_db.execute("UPDATE file_optional SET is_pinned = %d WHERE ?" % is_pinned, {"site_id": site_id, "inner_path": inner_path})
|
||||
self.cache_is_pinned = {}
|
||||
|
||||
def optionalDelete(self, inner_path):
|
||||
if self.isPinned(inner_path):
|
||||
self.site.log.debug("Skip deleting pinned optional file: %s" % inner_path)
|
||||
return False
|
||||
else:
|
||||
return super(ContentManagerPlugin, self).optionalDelete(inner_path)
|
||||
|
||||
|
||||
@PluginManager.registerTo("WorkerManager")
|
||||
class WorkerManagerPlugin(object):
|
||||
def doneTask(self, task):
|
||||
super(WorkerManagerPlugin, self).doneTask(task)
|
||||
|
||||
if task["optional_hash_id"] and not self.tasks: # Execute delayed queries immedietly after tasks finished
|
||||
ContentDbPlugin.content_db.processDelayed()
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
def parsePath(self, path):
|
||||
global access_log
|
||||
path_parts = super(UiRequestPlugin, self).parsePath(path)
|
||||
if path_parts:
|
||||
site_id = ContentDbPlugin.content_db.site_ids.get(path_parts["request_address"])
|
||||
if site_id:
|
||||
if ContentDbPlugin.content_db.isOptionalFile(site_id, path_parts["inner_path"]):
|
||||
access_log[site_id][path_parts["inner_path"]] = 1
|
||||
return path_parts
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileRequest")
|
||||
class FileRequestPlugin(object):
|
||||
def actionGetFile(self, params):
|
||||
stats = super(FileRequestPlugin, self).actionGetFile(params)
|
||||
self.recordFileRequest(params["site"], params["inner_path"], stats)
|
||||
return stats
|
||||
|
||||
def actionStreamFile(self, params):
|
||||
stats = super(FileRequestPlugin, self).actionStreamFile(params)
|
||||
self.recordFileRequest(params["site"], params["inner_path"], stats)
|
||||
return stats
|
||||
|
||||
def recordFileRequest(self, site_address, inner_path, stats):
|
||||
if not stats:
|
||||
# Only track the last request of files
|
||||
return False
|
||||
site_id = ContentDbPlugin.content_db.site_ids[site_address]
|
||||
if site_id and ContentDbPlugin.content_db.isOptionalFile(site_id, inner_path):
|
||||
request_log[site_id][inner_path] += stats["bytes_sent"]
|
||||
|
||||
|
||||
@PluginManager.registerTo("Site")
|
||||
class SitePlugin(object):
|
||||
def isDownloadable(self, inner_path):
|
||||
is_downloadable = super(SitePlugin, self).isDownloadable(inner_path)
|
||||
if is_downloadable:
|
||||
return is_downloadable
|
||||
|
||||
for path in self.settings.get("optional_help", {}).iterkeys():
|
||||
if inner_path.startswith(path):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def fileForgot(self, inner_path):
|
||||
if "|" in inner_path and self.content_manager.isPinned(re.sub(r"\|.*", "", inner_path)):
|
||||
self.log.debug("File %s is pinned, no fileForgot" % inner_path)
|
||||
return False
|
||||
else:
|
||||
return super(SitePlugin, self).fileForgot(inner_path)
|
||||
|
||||
def fileDone(self, inner_path):
|
||||
if "|" in inner_path and self.bad_files.get(inner_path, 0) > 5: # Idle optional file done
|
||||
inner_path_file = re.sub(r"\|.*", "", inner_path)
|
||||
num_changed = 0
|
||||
for key, val in self.bad_files.items():
|
||||
if key.startswith(inner_path_file) and val > 1:
|
||||
self.bad_files[key] = 1
|
||||
num_changed += 1
|
||||
self.log.debug("Idle optional file piece done, changed retry number of %s pieces." % num_changed)
|
||||
if num_changed:
|
||||
gevent.spawn(self.retryBadFiles)
|
||||
|
||||
return super(SitePlugin, self).fileDone(inner_path)
|
||||
|
||||
|
||||
@PluginManager.registerTo("ConfigPlugin")
|
||||
class ConfigPlugin(object):
|
||||
def createArguments(self):
|
||||
group = self.parser.add_argument_group("OptionalManager plugin")
|
||||
group.add_argument('--optional_limit', help='Limit total size of optional files', default="10%", metavar="GB or free space %")
|
||||
group.add_argument('--optional_limit_exclude_minsize', help='Exclude files larger than this limit from optional size limit calculation', default=20, metavar="MB", type=int)
|
||||
|
||||
return super(ConfigPlugin, self).createArguments()
|
|
@ -1,148 +0,0 @@
|
|||
import hashlib
|
||||
import os
|
||||
import copy
|
||||
import json
|
||||
from cStringIO import StringIO
|
||||
|
||||
import pytest
|
||||
|
||||
from OptionalManager import OptionalManagerPlugin
|
||||
from util import helper
|
||||
from Crypt import CryptBitcoin
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
class TestOptionalManager:
|
||||
def testDbFill(self, site):
|
||||
contents = site.content_manager.contents
|
||||
assert len(site.content_manager.hashfield) > 0
|
||||
assert contents.db.execute("SELECT COUNT(*) FROM file_optional WHERE is_downloaded = 1").fetchone()[0] == len(site.content_manager.hashfield)
|
||||
|
||||
def testSetContent(self, site):
|
||||
contents = site.content_manager.contents
|
||||
|
||||
# Add new file
|
||||
new_content = copy.deepcopy(contents["content.json"])
|
||||
new_content["files_optional"]["testfile"] = {
|
||||
"size": 1234,
|
||||
"sha512": "aaaabbbbcccc"
|
||||
}
|
||||
num_optional_files_before = contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0]
|
||||
contents["content.json"] = new_content
|
||||
assert contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0] > num_optional_files_before
|
||||
|
||||
# Remove file
|
||||
new_content = copy.deepcopy(contents["content.json"])
|
||||
del new_content["files_optional"]["testfile"]
|
||||
num_optional_files_before = contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0]
|
||||
contents["content.json"] = new_content
|
||||
assert contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0] < num_optional_files_before
|
||||
|
||||
def testDeleteContent(self, site):
|
||||
contents = site.content_manager.contents
|
||||
num_optional_files_before = contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0]
|
||||
del contents["content.json"]
|
||||
assert contents.db.execute("SELECT COUNT(*) FROM file_optional").fetchone()[0] < num_optional_files_before
|
||||
|
||||
def testVerifyFiles(self, site):
|
||||
contents = site.content_manager.contents
|
||||
|
||||
# Add new file
|
||||
new_content = copy.deepcopy(contents["content.json"])
|
||||
new_content["files_optional"]["testfile"] = {
|
||||
"size": 1234,
|
||||
"sha512": "aaaabbbbcccc"
|
||||
}
|
||||
contents["content.json"] = new_content
|
||||
file_row = contents.db.execute("SELECT * FROM file_optional WHERE inner_path = 'testfile'").fetchone()
|
||||
assert not file_row["is_downloaded"]
|
||||
|
||||
# Write file from outside of ZeroNet
|
||||
site.storage.open("testfile", "wb").write("A" * 1234) # For quick check hash does not matter only file size
|
||||
|
||||
hashfield_len_before = len(site.content_manager.hashfield)
|
||||
site.storage.verifyFiles(quick_check=True)
|
||||
assert len(site.content_manager.hashfield) == hashfield_len_before + 1
|
||||
|
||||
file_row = contents.db.execute("SELECT * FROM file_optional WHERE inner_path = 'testfile'").fetchone()
|
||||
assert file_row["is_downloaded"]
|
||||
|
||||
# Delete file outside of ZeroNet
|
||||
site.storage.delete("testfile")
|
||||
site.storage.verifyFiles(quick_check=True)
|
||||
file_row = contents.db.execute("SELECT * FROM file_optional WHERE inner_path = 'testfile'").fetchone()
|
||||
assert not file_row["is_downloaded"]
|
||||
|
||||
def testVerifyFilesSameHashId(self, site):
|
||||
contents = site.content_manager.contents
|
||||
|
||||
new_content = copy.deepcopy(contents["content.json"])
|
||||
|
||||
# Add two files with same hashid (first 4 character)
|
||||
new_content["files_optional"]["testfile1"] = {
|
||||
"size": 1234,
|
||||
"sha512": "aaaabbbbcccc"
|
||||
}
|
||||
new_content["files_optional"]["testfile2"] = {
|
||||
"size": 2345,
|
||||
"sha512": "aaaabbbbdddd"
|
||||
}
|
||||
contents["content.json"] = new_content
|
||||
|
||||
assert site.content_manager.hashfield.getHashId("aaaabbbbcccc") == site.content_manager.hashfield.getHashId("aaaabbbbdddd")
|
||||
|
||||
# Write files from outside of ZeroNet (For quick check hash does not matter only file size)
|
||||
site.storage.open("testfile1", "wb").write("A" * 1234)
|
||||
site.storage.open("testfile2", "wb").write("B" * 2345)
|
||||
|
||||
site.storage.verifyFiles(quick_check=True)
|
||||
|
||||
# Make sure that both is downloaded
|
||||
assert site.content_manager.isDownloaded("testfile1")
|
||||
assert site.content_manager.isDownloaded("testfile2")
|
||||
assert site.content_manager.hashfield.getHashId("aaaabbbbcccc") in site.content_manager.hashfield
|
||||
|
||||
# Delete one of the files
|
||||
site.storage.delete("testfile1")
|
||||
site.storage.verifyFiles(quick_check=True)
|
||||
assert not site.content_manager.isDownloaded("testfile1")
|
||||
assert site.content_manager.isDownloaded("testfile2")
|
||||
assert site.content_manager.hashfield.getHashId("aaaabbbbdddd") in site.content_manager.hashfield
|
||||
|
||||
def testIsPinned(self, site):
|
||||
assert not site.content_manager.isPinned("data/img/zerotalk-upvote.png")
|
||||
site.content_manager.setPin("data/img/zerotalk-upvote.png", True)
|
||||
assert site.content_manager.isPinned("data/img/zerotalk-upvote.png")
|
||||
|
||||
assert len(site.content_manager.cache_is_pinned) == 1
|
||||
site.content_manager.cache_is_pinned = {}
|
||||
assert site.content_manager.isPinned("data/img/zerotalk-upvote.png")
|
||||
|
||||
def testBigfilePieceReset(self, site):
|
||||
site.bad_files = {
|
||||
"data/fake_bigfile.mp4|0-1024": 10,
|
||||
"data/fake_bigfile.mp4|1024-2048": 10,
|
||||
"data/fake_bigfile.mp4|2048-3064": 10
|
||||
}
|
||||
site.onFileDone("data/fake_bigfile.mp4|0-1024")
|
||||
assert site.bad_files["data/fake_bigfile.mp4|1024-2048"] == 1
|
||||
assert site.bad_files["data/fake_bigfile.mp4|2048-3064"] == 1
|
||||
|
||||
def testOptionalDelete(self, site):
|
||||
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
|
||||
contents = site.content_manager.contents
|
||||
|
||||
site.content_manager.setPin("data/img/zerotalk-upvote.png", True)
|
||||
site.content_manager.setPin("data/img/zeroid.png", False)
|
||||
new_content = copy.deepcopy(contents["content.json"])
|
||||
del new_content["files_optional"]["data/img/zerotalk-upvote.png"]
|
||||
del new_content["files_optional"]["data/img/zeroid.png"]
|
||||
|
||||
assert site.storage.isFile("data/img/zerotalk-upvote.png")
|
||||
assert site.storage.isFile("data/img/zeroid.png")
|
||||
|
||||
site.storage.writeJson("content.json", new_content)
|
||||
site.content_manager.loadContent("content.json", force=True)
|
||||
|
||||
assert not site.storage.isFile("data/img/zeroid.png")
|
||||
assert site.storage.isFile("data/img/zerotalk-upvote.png")
|
|
@ -1 +0,0 @@
|
|||
from src.Test.conftest import *
|
|
@ -1,5 +0,0 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
|
@ -1,379 +0,0 @@
|
|||
import re
|
||||
import time
|
||||
import cgi
|
||||
|
||||
import gevent
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Config import config
|
||||
from util import helper
|
||||
from Translate import Translate
|
||||
|
||||
if "_" not in locals():
|
||||
_ = Translate("plugins/OptionalManager/languages/")
|
||||
|
||||
bigfile_sha512_cache = {}
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.time_peer_numbers_updated = 0
|
||||
super(UiWebsocketPlugin, self).__init__(*args, **kwargs)
|
||||
|
||||
def actionSiteSign(self, to, privatekey=None, inner_path="content.json", *args, **kwargs):
|
||||
# Add file to content.db and set it as pinned
|
||||
content_db = self.site.content_manager.contents.db
|
||||
content_inner_dir = helper.getDirname(inner_path)
|
||||
content_db.my_optional_files[self.site.address + "/" + content_inner_dir] = time.time()
|
||||
if len(content_db.my_optional_files) > 50: # Keep only last 50
|
||||
oldest_key = min(
|
||||
content_db.my_optional_files.iterkeys(),
|
||||
key=(lambda key: content_db.my_optional_files[key])
|
||||
)
|
||||
del content_db.my_optional_files[oldest_key]
|
||||
|
||||
return super(UiWebsocketPlugin, self).actionSiteSign(to, privatekey, inner_path, *args, **kwargs)
|
||||
|
||||
def updatePeerNumbers(self):
|
||||
self.site.updateHashfield()
|
||||
content_db = self.site.content_manager.contents.db
|
||||
content_db.updatePeerNumbers()
|
||||
self.site.updateWebsocket(peernumber_updated=True)
|
||||
|
||||
def addBigfileInfo(self, row):
|
||||
global bigfile_sha512_cache
|
||||
|
||||
content_db = self.site.content_manager.contents.db
|
||||
site = content_db.sites[row["address"]]
|
||||
if not site.settings.get("has_bigfile"):
|
||||
return False
|
||||
|
||||
file_key = row["address"] + "/" + row["inner_path"]
|
||||
sha512 = bigfile_sha512_cache.get(file_key)
|
||||
file_info = None
|
||||
if not sha512:
|
||||
file_info = site.content_manager.getFileInfo(row["inner_path"])
|
||||
if not file_info or not file_info.get("piece_size"):
|
||||
return False
|
||||
sha512 = file_info["sha512"]
|
||||
bigfile_sha512_cache[file_key] = sha512
|
||||
|
||||
if sha512 in site.storage.piecefields:
|
||||
piecefield = site.storage.piecefields[sha512].tostring()
|
||||
else:
|
||||
piecefield = None
|
||||
|
||||
if piecefield:
|
||||
row["pieces"] = len(piecefield)
|
||||
row["pieces_downloaded"] = piecefield.count("1")
|
||||
row["downloaded_percent"] = 100 * row["pieces_downloaded"] / row["pieces"]
|
||||
if row["pieces_downloaded"]:
|
||||
if not file_info:
|
||||
file_info = site.content_manager.getFileInfo(row["inner_path"])
|
||||
row["bytes_downloaded"] = row["pieces_downloaded"] * file_info.get("piece_size", 0)
|
||||
else:
|
||||
row["bytes_downloaded"] = 0
|
||||
|
||||
row["is_downloading"] = bool(next((inner_path for inner_path in site.bad_files if inner_path.startswith(row["inner_path"])), False))
|
||||
|
||||
# Add leech / seed stats
|
||||
row["peer_seed"] = 0
|
||||
row["peer_leech"] = 0
|
||||
for peer in site.peers.itervalues():
|
||||
if not peer.time_piecefields_updated or sha512 not in peer.piecefields:
|
||||
continue
|
||||
peer_piecefield = peer.piecefields[sha512].tostring()
|
||||
if not peer_piecefield:
|
||||
continue
|
||||
if peer_piecefield == "1" * len(peer_piecefield):
|
||||
row["peer_seed"] += 1
|
||||
else:
|
||||
row["peer_leech"] += 1
|
||||
|
||||
# Add myself
|
||||
if piecefield:
|
||||
if row["pieces_downloaded"] == row["pieces"]:
|
||||
row["peer_seed"] += 1
|
||||
else:
|
||||
row["peer_leech"] += 1
|
||||
|
||||
return True
|
||||
|
||||
# Optional file functions
|
||||
|
||||
def actionOptionalFileList(self, to, address=None, orderby="time_downloaded DESC", limit=10, filter="downloaded"):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
# Update peer numbers if necessary
|
||||
content_db = self.site.content_manager.contents.db
|
||||
if time.time() - content_db.time_peer_numbers_updated > 60 * 1 and time.time() - self.time_peer_numbers_updated > 60 * 5:
|
||||
# Start in new thread to avoid blocking
|
||||
self.time_peer_numbers_updated = time.time()
|
||||
gevent.spawn(self.updatePeerNumbers)
|
||||
|
||||
if address == "all" and "ADMIN" not in self.permissions:
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
if not all([re.match("^[a-z_*/+-]+( DESC| ASC|)$", part.strip()) for part in orderby.split(",")]):
|
||||
return self.response(to, "Invalid order_by")
|
||||
|
||||
if type(limit) != int:
|
||||
return self.response(to, "Invalid limit")
|
||||
|
||||
back = []
|
||||
content_db = self.site.content_manager.contents.db
|
||||
|
||||
wheres = {}
|
||||
wheres_raw = []
|
||||
if "bigfile" in filter:
|
||||
wheres["size >"] = 1024 * 1024 * 10
|
||||
if "downloaded" in filter:
|
||||
wheres_raw.append("(is_downloaded = 1 OR is_pinned = 1)")
|
||||
if "pinned" in filter:
|
||||
wheres["is_pinned"] = 1
|
||||
|
||||
if address == "all":
|
||||
join = "LEFT JOIN site USING (site_id)"
|
||||
else:
|
||||
wheres["site_id"] = content_db.site_ids[address]
|
||||
join = ""
|
||||
|
||||
if wheres_raw:
|
||||
query_wheres_raw = "AND" + " AND ".join(wheres_raw)
|
||||
else:
|
||||
query_wheres_raw = ""
|
||||
|
||||
query = "SELECT * FROM file_optional %s WHERE ? %s ORDER BY %s LIMIT %s" % (join, query_wheres_raw, orderby, limit)
|
||||
|
||||
for row in content_db.execute(query, wheres):
|
||||
row = dict(row)
|
||||
if address != "all":
|
||||
row["address"] = address
|
||||
|
||||
if row["size"] > 1024 * 1024:
|
||||
has_info = self.addBigfileInfo(row)
|
||||
else:
|
||||
has_info = False
|
||||
|
||||
if not has_info:
|
||||
if row["is_downloaded"]:
|
||||
row["bytes_downloaded"] = row["size"]
|
||||
row["downloaded_percent"] = 100
|
||||
else:
|
||||
row["bytes_downloaded"] = 0
|
||||
row["downloaded_percent"] = 0
|
||||
|
||||
back.append(row)
|
||||
self.response(to, back)
|
||||
|
||||
def actionOptionalFileInfo(self, to, inner_path):
|
||||
content_db = self.site.content_manager.contents.db
|
||||
site_id = content_db.site_ids[self.site.address]
|
||||
|
||||
# Update peer numbers if necessary
|
||||
if time.time() - content_db.time_peer_numbers_updated > 60 * 1 and time.time() - self.time_peer_numbers_updated > 60 * 5:
|
||||
# Start in new thread to avoid blocking
|
||||
self.time_peer_numbers_updated = time.time()
|
||||
gevent.spawn(self.updatePeerNumbers)
|
||||
|
||||
query = "SELECT * FROM file_optional WHERE site_id = :site_id AND inner_path = :inner_path LIMIT 1"
|
||||
res = content_db.execute(query, {"site_id": site_id, "inner_path": inner_path})
|
||||
row = next(res, None)
|
||||
if row:
|
||||
row = dict(row)
|
||||
if row["size"] > 1024 * 1024:
|
||||
row["address"] = self.site.address
|
||||
self.addBigfileInfo(row)
|
||||
self.response(to, row)
|
||||
else:
|
||||
self.response(to, None)
|
||||
|
||||
def setPin(self, inner_path, is_pinned, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return {"error": "Forbidden"}
|
||||
|
||||
site = self.server.sites[address]
|
||||
site.content_manager.setPin(inner_path, is_pinned)
|
||||
|
||||
return "ok"
|
||||
|
||||
def actionOptionalFilePin(self, to, inner_path, address=None):
|
||||
if type(inner_path) is not list:
|
||||
inner_path = [inner_path]
|
||||
back = self.setPin(inner_path, 1, address)
|
||||
num_file = len(inner_path)
|
||||
if back == "ok":
|
||||
if num_file == 1:
|
||||
self.cmd("notification", ["done", _["Pinned %s"] % cgi.escape(helper.getFilename(inner_path[0])), 5000])
|
||||
else:
|
||||
self.cmd("notification", ["done", _["Pinned %s files"] % num_file, 5000])
|
||||
self.response(to, back)
|
||||
|
||||
def actionOptionalFileUnpin(self, to, inner_path, address=None):
|
||||
if type(inner_path) is not list:
|
||||
inner_path = [inner_path]
|
||||
back = self.setPin(inner_path, 0, address)
|
||||
num_file = len(inner_path)
|
||||
if back == "ok":
|
||||
if num_file == 1:
|
||||
self.cmd("notification", ["done", _["Removed pin from %s"] % cgi.escape(helper.getFilename(inner_path[0])), 5000])
|
||||
else:
|
||||
self.cmd("notification", ["done", _["Removed pin from %s files"] % num_file, 5000])
|
||||
self.response(to, back)
|
||||
|
||||
def actionOptionalFileDelete(self, to, inner_path, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
site = self.server.sites[address]
|
||||
|
||||
content_db = site.content_manager.contents.db
|
||||
site_id = content_db.site_ids[site.address]
|
||||
|
||||
res = content_db.execute("SELECT * FROM file_optional WHERE ? LIMIT 1", {"site_id": site_id, "inner_path": inner_path, "is_downloaded": 1})
|
||||
row = next(res, None)
|
||||
|
||||
if not row:
|
||||
return self.response(to, {"error": "Not found in content.db"})
|
||||
|
||||
removed = site.content_manager.optionalRemoved(inner_path, row["hash_id"], row["size"])
|
||||
# if not removed:
|
||||
# return self.response(to, {"error": "Not found in hash_id: %s" % row["hash_id"]})
|
||||
|
||||
content_db.execute("UPDATE file_optional SET is_downloaded = 0, is_pinned = 0, peer = peer - 1 WHERE ?", {"site_id": site_id, "inner_path": inner_path})
|
||||
|
||||
try:
|
||||
site.storage.delete(inner_path)
|
||||
except Exception as err:
|
||||
return self.response(to, {"error": "File delete error: %s" % err})
|
||||
site.updateWebsocket(file_delete=inner_path)
|
||||
|
||||
if inner_path in site.content_manager.cache_is_pinned:
|
||||
site.content_manager.cache_is_pinned = {}
|
||||
|
||||
self.response(to, "ok")
|
||||
|
||||
# Limit functions
|
||||
|
||||
def actionOptionalLimitStats(self, to):
|
||||
if "ADMIN" not in self.site.settings["permissions"]:
|
||||
return self.response(to, "Forbidden")
|
||||
|
||||
back = {}
|
||||
back["limit"] = config.optional_limit
|
||||
back["used"] = self.site.content_manager.contents.db.getOptionalUsedBytes()
|
||||
back["free"] = helper.getFreeSpace()
|
||||
|
||||
self.response(to, back)
|
||||
|
||||
def actionOptionalLimitSet(self, to, limit):
|
||||
if "ADMIN" not in self.site.settings["permissions"]:
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
config.optional_limit = re.sub("\.0+$", "", limit) # Remove unnecessary digits from end
|
||||
config.saveValue("optional_limit", limit)
|
||||
self.response(to, "ok")
|
||||
|
||||
# Distribute help functions
|
||||
|
||||
def actionOptionalHelpList(self, to, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
site = self.server.sites[address]
|
||||
|
||||
self.response(to, site.settings.get("optional_help", {}))
|
||||
|
||||
def actionOptionalHelp(self, to, directory, title, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
site = self.server.sites[address]
|
||||
content_db = site.content_manager.contents.db
|
||||
site_id = content_db.site_ids[address]
|
||||
|
||||
if "optional_help" not in site.settings:
|
||||
site.settings["optional_help"] = {}
|
||||
|
||||
stats = content_db.execute(
|
||||
"SELECT COUNT(*) AS num, SUM(size) AS size FROM file_optional WHERE site_id = :site_id AND inner_path LIKE :inner_path",
|
||||
{"site_id": site_id, "inner_path": directory + "%"}
|
||||
).fetchone()
|
||||
stats = dict(stats)
|
||||
|
||||
if not stats["size"]:
|
||||
stats["size"] = 0
|
||||
if not stats["num"]:
|
||||
stats["num"] = 0
|
||||
|
||||
self.cmd("notification", [
|
||||
"done",
|
||||
_["You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>"] %
|
||||
(cgi.escape(title), cgi.escape(directory)),
|
||||
10000
|
||||
])
|
||||
|
||||
site.settings["optional_help"][directory] = title
|
||||
|
||||
self.response(to, dict(stats))
|
||||
|
||||
def actionOptionalHelpRemove(self, to, directory, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
site = self.server.sites[address]
|
||||
|
||||
try:
|
||||
del site.settings["optional_help"][directory]
|
||||
self.response(to, "ok")
|
||||
except Exception:
|
||||
self.response(to, {"error": "Not found"})
|
||||
|
||||
def cbOptionalHelpAll(self, to, site, value):
|
||||
site.settings["autodownloadoptional"] = value
|
||||
self.response(to, value)
|
||||
|
||||
def actionOptionalHelpAll(self, to, value, address=None):
|
||||
if not address:
|
||||
address = self.site.address
|
||||
|
||||
if not self.hasSitePermission(address):
|
||||
return self.response(to, {"error": "Forbidden"})
|
||||
|
||||
site = self.server.sites[address]
|
||||
|
||||
if value:
|
||||
if "ADMIN" in self.site.settings["permissions"]:
|
||||
self.cbOptionalHelpAll(to, site, True)
|
||||
else:
|
||||
site_title = site.content_manager.contents["content.json"].get("title", address)
|
||||
self.cmd(
|
||||
"confirm",
|
||||
[
|
||||
_["Help distribute all new optional files on site <b>%s</b>"] % cgi.escape(site_title),
|
||||
_["Yes, I want to help!"]
|
||||
],
|
||||
lambda (res): self.cbOptionalHelpAll(to, site, True)
|
||||
)
|
||||
else:
|
||||
site.settings["autodownloadoptional"] = False
|
||||
self.response(to, False)
|
|
@ -1 +0,0 @@
|
|||
import OptionalManagerPlugin
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "Archivos %s fijados",
|
||||
"Removed pin from %s files": "Archivos %s que no estan fijados",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "Tu empezaste a ayudar a distribuir <b>%s</b>.<br><small>Directorio: %s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "Ayude a distribuir todos los archivos opcionales en el sitio <b>%s</b>",
|
||||
"Yes, I want to help!": "¡Si, yo quiero ayudar!"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "Fichiers %s épinglés",
|
||||
"Removed pin from %s files": "Fichiers %s ne sont plus épinglés",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "Vous avez commencé à aider à distribuer <b>%s</b>.<br><small>Dossier : %s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "Aider à distribuer tous les fichiers optionnels du site <b>%s</b>",
|
||||
"Yes, I want to help!": "Oui, je veux aider !"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "%s fájl rögzítve",
|
||||
"Removed pin from %s files": "%s fájl rögzítés eltávolítva",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "Új segítség a terjesztésben: <b>%s</b>.<br><small>Könyvtár: %s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "Segítség az összes új opcionális fájl terjesztésében az <b>%s</b> oldalon",
|
||||
"Yes, I want to help!": "Igen, segíteni akarok!"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "Arquivos %s fixados",
|
||||
"Removed pin from %s files": "Arquivos %s não estão fixados",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "Você começou a ajudar a distribuir <b>%s</b>.<br><small>Pasta: %s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "Ajude a distribuir todos os novos arquivos opcionais no site <b>%s</b>",
|
||||
"Yes, I want to help!": "Sim, eu quero ajudar!"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "已固定 %s 個檔",
|
||||
"Removed pin from %s files": "已解除固定 %s 個檔",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "你已經開始幫助分發 <b>%s</b> 。<br><small>目錄:%s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "你想要幫助分發 <b>%s</b> 網站的所有檔嗎?",
|
||||
"Yes, I want to help!": "是,我想要幫助!"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"Pinned %s files": "已固定 %s 个文件",
|
||||
"Removed pin from %s files": "已解除固定 %s 个文件",
|
||||
"You started to help distribute <b>%s</b>.<br><small>Directory: %s</small>": "您已经开始帮助分发 <b>%s</b> 。<br><small>目录:%s</small>",
|
||||
"Help distribute all new optional files on site <b>%s</b>": "您想要帮助分发 <b>%s</b> 站点的所有文件吗?",
|
||||
"Yes, I want to help!": "是,我想要帮助!"
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
import time
|
||||
import sqlite3
|
||||
import random
|
||||
import atexit
|
||||
|
||||
import gevent
|
||||
from Plugin import PluginManager
|
||||
|
||||
|
||||
@PluginManager.registerTo("ContentDb")
|
||||
class ContentDbPlugin(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
atexit.register(self.saveAllPeers)
|
||||
super(ContentDbPlugin, self).__init__(*args, **kwargs)
|
||||
|
||||
def getSchema(self):
|
||||
schema = super(ContentDbPlugin, self).getSchema()
|
||||
|
||||
schema["tables"]["peer"] = {
|
||||
"cols": [
|
||||
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
|
||||
["address", "TEXT NOT NULL"],
|
||||
["port", "INTEGER NOT NULL"],
|
||||
["hashfield", "BLOB"],
|
||||
["reputation", "INTEGER NOT NULL"],
|
||||
["time_added", "INTEGER NOT NULL"],
|
||||
["time_found", "INTEGER NOT NULL"]
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE UNIQUE INDEX peer_key ON peer (site_id, address, port)"
|
||||
],
|
||||
"schema_changed": 2
|
||||
}
|
||||
|
||||
return schema
|
||||
|
||||
def loadPeers(self, site):
|
||||
s = time.time()
|
||||
site_id = self.site_ids.get(site.address)
|
||||
res = self.execute("SELECT * FROM peer WHERE site_id = :site_id", {"site_id": site_id})
|
||||
num = 0
|
||||
num_hashfield = 0
|
||||
for row in res:
|
||||
peer = site.addPeer(str(row["address"]), row["port"])
|
||||
if not peer: # Already exist
|
||||
continue
|
||||
if row["hashfield"]:
|
||||
peer.hashfield.replaceFromString(row["hashfield"])
|
||||
num_hashfield += 1
|
||||
peer.time_added = row["time_added"]
|
||||
peer.time_found = row["time_found"]
|
||||
peer.reputation = row["reputation"]
|
||||
if row["address"].endswith(".onion"):
|
||||
peer.reputation = peer.reputation / 2 - 1 # Onion peers less likely working
|
||||
num += 1
|
||||
if num_hashfield:
|
||||
site.content_manager.has_optional_files = True
|
||||
site.log.debug("%s peers (%s with hashfield) loaded in %.3fs" % (num, num_hashfield, time.time() - s))
|
||||
|
||||
def iteratePeers(self, site):
|
||||
site_id = self.site_ids.get(site.address)
|
||||
for key, peer in site.peers.iteritems():
|
||||
address, port = key.rsplit(":", 1)
|
||||
if peer.has_hashfield:
|
||||
hashfield = sqlite3.Binary(peer.hashfield.tostring())
|
||||
else:
|
||||
hashfield = ""
|
||||
yield (site_id, address, port, hashfield, peer.reputation, int(peer.time_added), int(peer.time_found))
|
||||
|
||||
def savePeers(self, site, spawn=False):
|
||||
if spawn:
|
||||
# Save peers every hour (+random some secs to not update very site at same time)
|
||||
gevent.spawn_later(60 * 60 + random.randint(0, 60), self.savePeers, site, spawn=True)
|
||||
if not site.peers:
|
||||
site.log.debug("Peers not saved: No peers found")
|
||||
return
|
||||
s = time.time()
|
||||
site_id = self.site_ids.get(site.address)
|
||||
cur = self.getCursor()
|
||||
cur.execute("BEGIN")
|
||||
try:
|
||||
cur.execute("DELETE FROM peer WHERE site_id = :site_id", {"site_id": site_id})
|
||||
cur.cursor.executemany(
|
||||
"INSERT INTO peer (site_id, address, port, hashfield, reputation, time_added, time_found) VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
self.iteratePeers(site)
|
||||
)
|
||||
except Exception as err:
|
||||
site.log.error("Save peer error: %s" % err)
|
||||
finally:
|
||||
cur.execute("END")
|
||||
site.log.debug("Peers saved in %.3fs" % (time.time() - s))
|
||||
|
||||
def initSite(self, site):
|
||||
super(ContentDbPlugin, self).initSite(site)
|
||||
gevent.spawn_later(0.5, self.loadPeers, site)
|
||||
gevent.spawn_later(60*60, self.savePeers, site, spawn=True)
|
||||
|
||||
def saveAllPeers(self):
|
||||
for site in self.sites.values():
|
||||
try:
|
||||
self.savePeers(site)
|
||||
except Exception, err:
|
||||
site.log.error("Save peer error: %s" % err)
|
|
@ -1,2 +0,0 @@
|
|||
import PeerDbPlugin
|
||||
|
|
@ -1,760 +0,0 @@
|
|||
import re
|
||||
import os
|
||||
import cgi
|
||||
import sys
|
||||
import math
|
||||
import time
|
||||
import json
|
||||
try:
|
||||
import cStringIO as StringIO
|
||||
except:
|
||||
import StringIO
|
||||
|
||||
import gevent
|
||||
|
||||
from Config import config
|
||||
from Plugin import PluginManager
|
||||
from Debug import Debug
|
||||
from Translate import Translate
|
||||
from util import helper
|
||||
from ZipStream import ZipStream
|
||||
|
||||
plugin_dir = "plugins/Sidebar"
|
||||
media_dir = plugin_dir + "/media"
|
||||
sys.path.append(plugin_dir) # To able to load geoip lib
|
||||
|
||||
loc_cache = {}
|
||||
if "_" not in locals():
|
||||
_ = Translate(plugin_dir + "/languages/")
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
# Inject our resources to end of original file streams
|
||||
def actionUiMedia(self, path):
|
||||
if path == "/uimedia/all.js" or path == "/uimedia/all.css":
|
||||
# First yield the original file and header
|
||||
body_generator = super(UiRequestPlugin, self).actionUiMedia(path)
|
||||
for part in body_generator:
|
||||
yield part
|
||||
|
||||
# Append our media file to the end
|
||||
ext = re.match(".*(js|css)$", path).group(1)
|
||||
plugin_media_file = "%s/all.%s" % (media_dir, ext)
|
||||
if config.debug:
|
||||
# If debugging merge *.css to all.css and *.js to all.js
|
||||
from Debug import DebugMedia
|
||||
DebugMedia.merge(plugin_media_file)
|
||||
if ext == "js":
|
||||
yield _.translateData(open(plugin_media_file).read())
|
||||
else:
|
||||
for part in self.actionFile(plugin_media_file, send_header=False):
|
||||
yield part
|
||||
elif path.startswith("/uimedia/globe/"): # Serve WebGL globe files
|
||||
file_name = re.match(".*/(.*)", path).group(1)
|
||||
plugin_media_file = "%s-globe/%s" % (media_dir, file_name)
|
||||
if config.debug and path.endswith("all.js"):
|
||||
# If debugging merge *.css to all.css and *.js to all.js
|
||||
from Debug import DebugMedia
|
||||
DebugMedia.merge(plugin_media_file)
|
||||
for part in self.actionFile(plugin_media_file):
|
||||
yield part
|
||||
else:
|
||||
for part in super(UiRequestPlugin, self).actionUiMedia(path):
|
||||
yield part
|
||||
|
||||
def actionZip(self):
|
||||
address = self.get["address"]
|
||||
site = self.server.site_manager.get(address)
|
||||
if not site:
|
||||
return self.error404("Site not found")
|
||||
|
||||
title = site.content_manager.contents.get("content.json", {}).get("title", "").encode('ascii', 'ignore')
|
||||
filename = "%s-backup-%s.zip" % (title, time.strftime("%Y-%m-%d_%H_%M"))
|
||||
self.sendHeader(content_type="application/zip", extra_headers={'Content-Disposition': 'attachment; filename="%s"' % filename})
|
||||
|
||||
return self.streamZip(site.storage.getPath("."))
|
||||
|
||||
def streamZip(self, file_path):
|
||||
zs = ZipStream(file_path)
|
||||
while 1:
|
||||
data = zs.read()
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiWebsocket")
|
||||
class UiWebsocketPlugin(object):
|
||||
def sidebarRenderPeerStats(self, body, site):
|
||||
connected = len([peer for peer in site.peers.values() if peer.connection and peer.connection.connected])
|
||||
connectable = len([peer_id for peer_id in site.peers.keys() if not peer_id.endswith(":0")])
|
||||
onion = len([peer_id for peer_id in site.peers.keys() if ".onion" in peer_id])
|
||||
local = len([peer for peer in site.peers.values() if helper.isPrivateIp(peer.ip)])
|
||||
peers_total = len(site.peers)
|
||||
|
||||
# Add myself
|
||||
if site.settings["serving"]:
|
||||
peers_total += 1
|
||||
if any(site.connection_server.port_opened.values()):
|
||||
connectable += 1
|
||||
if site.connection_server.tor_manager.start_onions:
|
||||
onion += 1
|
||||
|
||||
if peers_total:
|
||||
percent_connected = float(connected) / peers_total
|
||||
percent_connectable = float(connectable) / peers_total
|
||||
percent_onion = float(onion) / peers_total
|
||||
else:
|
||||
percent_connectable = percent_connected = percent_onion = 0
|
||||
|
||||
if local:
|
||||
local_html = _(u"<li class='color-yellow'><span>{_[Local]}:</span><b>{local}</b></li>")
|
||||
else:
|
||||
local_html = ""
|
||||
|
||||
peer_ips = [peer.key for peer in site.getConnectablePeers(20, allow_private=False)]
|
||||
peer_ips.sort(key=lambda peer_ip: ".onion:" in peer_ip)
|
||||
copy_link = "http://127.0.0.1:43110/%s/?zeronet_peers=%s" % (
|
||||
site.content_manager.contents["content.json"].get("domain", site.address),
|
||||
",".join(peer_ips)
|
||||
)
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>
|
||||
{_[Peers]}
|
||||
<small class="label-right"><a href='{copy_link}' id='link-copypeers' class='link-right'>{_[Copy to clipboard]}</a></small>
|
||||
</label>
|
||||
<ul class='graph'>
|
||||
<li style='width: 100%' class='total back-black' title="{_[Total peers]}"></li>
|
||||
<li style='width: {percent_connectable:.0%}' class='connectable back-blue' title='{_[Connectable peers]}'></li>
|
||||
<li style='width: {percent_onion:.0%}' class='connected back-purple' title='{_[Onion]}'></li>
|
||||
<li style='width: {percent_connected:.0%}' class='connected back-green' title='{_[Connected peers]}'></li>
|
||||
</ul>
|
||||
<ul class='graph-legend'>
|
||||
<li class='color-green'><span>{_[Connected]}:</span><b>{connected}</b></li>
|
||||
<li class='color-blue'><span>{_[Connectable]}:</span><b>{connectable}</b></li>
|
||||
<li class='color-purple'><span>{_[Onion]}:</span><b>{onion}</b></li>
|
||||
{local_html}
|
||||
<li class='color-black'><span>{_[Total]}:</span><b>{peers_total}</b></li>
|
||||
</ul>
|
||||
</li>
|
||||
""".replace("{local_html}", local_html)))
|
||||
|
||||
def sidebarRenderTransferStats(self, body, site):
|
||||
recv = float(site.settings.get("bytes_recv", 0)) / 1024 / 1024
|
||||
sent = float(site.settings.get("bytes_sent", 0)) / 1024 / 1024
|
||||
transfer_total = recv + sent
|
||||
if transfer_total:
|
||||
percent_recv = recv / transfer_total
|
||||
percent_sent = sent / transfer_total
|
||||
else:
|
||||
percent_recv = 0.5
|
||||
percent_sent = 0.5
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Data transfer]}</label>
|
||||
<ul class='graph graph-stacked'>
|
||||
<li style='width: {percent_recv:.0%}' class='received back-yellow' title="{_[Received bytes]}"></li>
|
||||
<li style='width: {percent_sent:.0%}' class='sent back-green' title="{_[Sent bytes]}"></li>
|
||||
</ul>
|
||||
<ul class='graph-legend'>
|
||||
<li class='color-yellow'><span>{_[Received]}:</span><b>{recv:.2f}MB</b></li>
|
||||
<li class='color-green'<span>{_[Sent]}:</span><b>{sent:.2f}MB</b></li>
|
||||
</ul>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
def sidebarRenderFileStats(self, body, site):
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>
|
||||
{_[Files]}
|
||||
<small class="label-right"><a href='#Site+directory' id='link-directory' class='link-right'>{_[Open site directory]}</a>
|
||||
<a href='/ZeroNet-Internal/Zip?address={site.address}' id='link-zip' class='link-right' download='site.zip'>{_[Save as .zip]}</a></small>
|
||||
</label>
|
||||
<ul class='graph graph-stacked'>
|
||||
"""))
|
||||
|
||||
extensions = (
|
||||
("html", "yellow"),
|
||||
("css", "orange"),
|
||||
("js", "purple"),
|
||||
("Image", "green"),
|
||||
("json", "darkblue"),
|
||||
("User data", "blue"),
|
||||
("Other", "white"),
|
||||
("Total", "black")
|
||||
)
|
||||
# Collect stats
|
||||
size_filetypes = {}
|
||||
size_total = 0
|
||||
contents = site.content_manager.listContents() # Without user files
|
||||
for inner_path in contents:
|
||||
content = site.content_manager.contents[inner_path]
|
||||
if "files" not in content or content["files"] is None:
|
||||
continue
|
||||
for file_name, file_details in content["files"].items():
|
||||
size_total += file_details["size"]
|
||||
ext = file_name.split(".")[-1]
|
||||
size_filetypes[ext] = size_filetypes.get(ext, 0) + file_details["size"]
|
||||
|
||||
# Get user file sizes
|
||||
size_user_content = site.content_manager.contents.execute(
|
||||
"SELECT SUM(size) + SUM(size_files) AS size FROM content WHERE ?",
|
||||
{"not__inner_path": contents}
|
||||
).fetchone()["size"]
|
||||
if not size_user_content:
|
||||
size_user_content = 0
|
||||
size_filetypes["User data"] = size_user_content
|
||||
size_total += size_user_content
|
||||
|
||||
# The missing difference is content.json sizes
|
||||
if "json" in size_filetypes:
|
||||
size_filetypes["json"] += max(0, site.settings["size"] - size_total)
|
||||
size_total = size_other = site.settings["size"]
|
||||
|
||||
# Bar
|
||||
for extension, color in extensions:
|
||||
if extension == "Total":
|
||||
continue
|
||||
if extension == "Other":
|
||||
size = max(0, size_other)
|
||||
elif extension == "Image":
|
||||
size = size_filetypes.get("jpg", 0) + size_filetypes.get("png", 0) + size_filetypes.get("gif", 0)
|
||||
size_other -= size
|
||||
else:
|
||||
size = size_filetypes.get(extension, 0)
|
||||
size_other -= size
|
||||
if size_total == 0:
|
||||
percent = 0
|
||||
else:
|
||||
percent = 100 * (float(size) / size_total)
|
||||
percent = math.floor(percent * 100) / 100 # Floor to 2 digits
|
||||
body.append(
|
||||
u"""<li style='width: %.2f%%' class='%s back-%s' title="%s"></li>""" %
|
||||
(percent, _[extension], color, _[extension])
|
||||
)
|
||||
|
||||
# Legend
|
||||
body.append("</ul><ul class='graph-legend'>")
|
||||
for extension, color in extensions:
|
||||
if extension == "Other":
|
||||
size = max(0, size_other)
|
||||
elif extension == "Image":
|
||||
size = size_filetypes.get("jpg", 0) + size_filetypes.get("png", 0) + size_filetypes.get("gif", 0)
|
||||
elif extension == "Total":
|
||||
size = size_total
|
||||
else:
|
||||
size = size_filetypes.get(extension, 0)
|
||||
|
||||
if extension == "js":
|
||||
title = "javascript"
|
||||
else:
|
||||
title = extension
|
||||
|
||||
if size > 1024 * 1024 * 10: # Format as mB is more than 10mB
|
||||
size_formatted = "%.0fMB" % (size / 1024 / 1024)
|
||||
else:
|
||||
size_formatted = "%.0fkB" % (size / 1024)
|
||||
|
||||
body.append(u"<li class='color-%s'><span>%s:</span><b>%s</b></li>" % (color, _[title], size_formatted))
|
||||
|
||||
body.append("</ul></li>")
|
||||
|
||||
def sidebarRenderSizeLimit(self, body, site):
|
||||
free_space = helper.getFreeSpace() / 1024 / 1024
|
||||
size = float(site.settings["size"]) / 1024 / 1024
|
||||
size_limit = site.getSizeLimit()
|
||||
percent_used = size / size_limit
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Size limit]} <small>({_[limit used]}: {percent_used:.0%}, {_[free space]}: {free_space:,d}MB)</small></label>
|
||||
<input type='text' class='text text-num' value="{size_limit}" id='input-sitelimit'/><span class='text-post'>MB</span>
|
||||
<a href='#Set' class='button' id='button-sitelimit'>{_[Set]}</a>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
def sidebarRenderOptionalFileStats(self, body, site):
|
||||
size_total = float(site.settings["size_optional"])
|
||||
size_downloaded = float(site.settings["optional_downloaded"])
|
||||
|
||||
if not size_total:
|
||||
return False
|
||||
|
||||
percent_downloaded = size_downloaded / size_total
|
||||
|
||||
size_formatted_total = size_total / 1024 / 1024
|
||||
size_formatted_downloaded = size_downloaded / 1024 / 1024
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Optional files]}</label>
|
||||
<ul class='graph'>
|
||||
<li style='width: 100%' class='total back-black' title="{_[Total size]}"></li>
|
||||
<li style='width: {percent_downloaded:.0%}' class='connected back-green' title='{_[Downloaded files]}'></li>
|
||||
</ul>
|
||||
<ul class='graph-legend'>
|
||||
<li class='color-green'><span>{_[Downloaded]}:</span><b>{size_formatted_downloaded:.2f}MB</b></li>
|
||||
<li class='color-black'><span>{_[Total]}:</span><b>{size_formatted_total:.2f}MB</b></li>
|
||||
</ul>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
return True
|
||||
|
||||
def sidebarRenderOptionalFileSettings(self, body, site):
|
||||
if self.site.settings.get("autodownloadoptional"):
|
||||
checked = "checked='checked'"
|
||||
else:
|
||||
checked = ""
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Download and help distribute all files]}</label>
|
||||
<input type="checkbox" class="checkbox" id="checkbox-autodownloadoptional" {checked}/><div class="checkbox-skin"></div>
|
||||
"""))
|
||||
|
||||
autodownload_bigfile_size_limit = int(site.settings.get("autodownload_bigfile_size_limit", config.autodownload_bigfile_size_limit))
|
||||
body.append(_(u"""
|
||||
<div class='settings-autodownloadoptional'>
|
||||
<label>{_[Auto download big file size limit]}</label>
|
||||
<input type='text' class='text text-num' value="{autodownload_bigfile_size_limit}" id='input-autodownload_bigfile_size_limit'/><span class='text-post'>MB</span>
|
||||
<a href='#Set' class='button' id='button-autodownload_bigfile_size_limit'>{_[Set]}</a>
|
||||
</div>
|
||||
"""))
|
||||
body.append("</li>")
|
||||
|
||||
def sidebarRenderBadFiles(self, body, site):
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Needs to be updated]}:</label>
|
||||
<ul class='filelist'>
|
||||
"""))
|
||||
|
||||
i = 0
|
||||
for bad_file, tries in site.bad_files.iteritems():
|
||||
i += 1
|
||||
body.append(_(u"""<li class='color-red' title="{bad_file_path} ({tries})">{bad_filename}</li>""", {
|
||||
"bad_file_path": bad_file,
|
||||
"bad_filename": helper.getFilename(bad_file),
|
||||
"tries": _.pluralize(tries, "{} try", "{} tries")
|
||||
}))
|
||||
if i > 30:
|
||||
break
|
||||
|
||||
if len(site.bad_files) > 30:
|
||||
num_bad_files = len(site.bad_files) - 30
|
||||
body.append(_(u"""<li class='color-red'>{_[+ {num_bad_files} more]}</li>""", nested=True))
|
||||
|
||||
body.append("""
|
||||
</ul>
|
||||
</li>
|
||||
""")
|
||||
|
||||
def sidebarRenderDbOptions(self, body, site):
|
||||
if site.storage.db:
|
||||
inner_path = site.storage.getInnerPath(site.storage.db.db_path)
|
||||
size = float(site.storage.getSize(inner_path)) / 1024
|
||||
feeds = len(site.storage.db.schema.get("feeds", {}))
|
||||
else:
|
||||
inner_path = _[u"No database found"]
|
||||
size = 0.0
|
||||
feeds = 0
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Database]} <small>({size:.2f}kB, {_[search feeds]}: {_[{feeds} query]})</small></label>
|
||||
<div class='flex'>
|
||||
<input type='text' class='text disabled' value="{inner_path}" disabled='disabled'/>
|
||||
<a href='#Reload' id="button-dbreload" class='button'>{_[Reload]}</a>
|
||||
<a href='#Rebuild' id="button-dbrebuild" class='button'>{_[Rebuild]}</a>
|
||||
</div>
|
||||
</li>
|
||||
""", nested=True))
|
||||
|
||||
def sidebarRenderIdentity(self, body, site):
|
||||
auth_address = self.user.getAuthAddress(self.site.address, create=False)
|
||||
rules = self.site.content_manager.getRules("data/users/%s/content.json" % auth_address)
|
||||
if rules and rules.get("max_size"):
|
||||
quota = rules["max_size"] / 1024
|
||||
try:
|
||||
content = site.content_manager.contents["data/users/%s/content.json" % auth_address]
|
||||
used = len(json.dumps(content)) + sum([file["size"] for file in content["files"].values()])
|
||||
except:
|
||||
used = 0
|
||||
used = used / 1024
|
||||
else:
|
||||
quota = used = 0
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Identity address]} <small>({_[limit used]}: {used:.2f}kB / {quota:.2f}kB)</small></label>
|
||||
<div class='flex'>
|
||||
<span class='input text disabled'>{auth_address}</span>
|
||||
<a href='#Change' class='button' id='button-identity'>{_[Change]}</a>
|
||||
</div>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
def sidebarRenderControls(self, body, site):
|
||||
auth_address = self.user.getAuthAddress(self.site.address, create=False)
|
||||
if self.site.settings["serving"]:
|
||||
class_pause = ""
|
||||
class_resume = "hidden"
|
||||
else:
|
||||
class_pause = "hidden"
|
||||
class_resume = ""
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Site control]}</label>
|
||||
<a href='#Update' class='button noupdate' id='button-update'>{_[Update]}</a>
|
||||
<a href='#Pause' class='button {class_pause}' id='button-pause'>{_[Pause]}</a>
|
||||
<a href='#Resume' class='button {class_resume}' id='button-resume'>{_[Resume]}</a>
|
||||
<a href='#Delete' class='button noupdate' id='button-delete'>{_[Delete]}</a>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
donate_key = site.content_manager.contents.get("content.json", {}).get("donate", True)
|
||||
site_address = self.site.address
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Site address]}</label><br>
|
||||
<div class='flex'>
|
||||
<span class='input text disabled'>{site_address}</span>
|
||||
"""))
|
||||
if donate_key == False or donate_key == "":
|
||||
pass
|
||||
elif (type(donate_key) == str or type(donate_key) == unicode) and len(donate_key) > 0:
|
||||
body.append(_(u"""
|
||||
</div>
|
||||
</li>
|
||||
<li>
|
||||
<label>{_[Donate]}</label><br>
|
||||
<div class='flex'>
|
||||
{donate_key}
|
||||
"""))
|
||||
else:
|
||||
body.append(_(u"""
|
||||
<a href='bitcoin:{site_address}' class='button' id='button-donate'>{_[Donate]}</a>
|
||||
"""))
|
||||
body.append(_(u"""
|
||||
</div>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
def sidebarRenderOwnedCheckbox(self, body, site):
|
||||
if self.site.settings["own"]:
|
||||
checked = "checked='checked'"
|
||||
else:
|
||||
checked = ""
|
||||
|
||||
body.append(_(u"""
|
||||
<h2 class='owned-title'>{_[This is my site]}</h2>
|
||||
<input type="checkbox" class="checkbox" id="checkbox-owned" {checked}/><div class="checkbox-skin"></div>
|
||||
"""))
|
||||
|
||||
def sidebarRenderOwnSettings(self, body, site):
|
||||
title = site.content_manager.contents.get("content.json", {}).get("title", "")
|
||||
description = site.content_manager.contents.get("content.json", {}).get("description", "")
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label for='settings-title'>{_[Site title]}</label>
|
||||
<input type='text' class='text' value="{title}" id='settings-title'/>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
<label for='settings-description'>{_[Site description]}</label>
|
||||
<input type='text' class='text' value="{description}" id='settings-description'/>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
<a href='#Save' class='button' id='button-settings'>{_[Save site settings]}</a>
|
||||
</li>
|
||||
"""))
|
||||
|
||||
def sidebarRenderContents(self, body, site):
|
||||
has_privatekey = bool(self.user.getSiteData(site.address, create=False).get("privatekey"))
|
||||
if has_privatekey:
|
||||
tag_privatekey = _(u"{_[Private key saved.]} <a href='#Forgot+private+key' id='privatekey-forgot' class='link-right'>{_[Forgot]}</a>")
|
||||
else:
|
||||
tag_privatekey = _(u"<a href='#Add+private+key' id='privatekey-add' class='link-right'>{_[Add saved private key]}</a>")
|
||||
|
||||
body.append(_(u"""
|
||||
<li>
|
||||
<label>{_[Content publishing]} <small class='label-right'>{tag_privatekey}</small></label>
|
||||
""".replace("{tag_privatekey}", tag_privatekey)))
|
||||
|
||||
# Choose content you want to sign
|
||||
body.append(_(u"""
|
||||
<div class='flex'>
|
||||
<input type='text' class='text' value="content.json" id='input-contents'/>
|
||||
<a href='#Sign-and-Publish' id='button-sign-publish' class='button'>{_[Sign and publish]}</a>
|
||||
<a href='#Sign-or-Publish' id='menu-sign-publish'>\u22EE</a>
|
||||
</div>
|
||||
"""))
|
||||
|
||||
contents = ["content.json"]
|
||||
contents += site.content_manager.contents.get("content.json", {}).get("includes", {}).keys()
|
||||
body.append(_(u"<div class='contents'>{_[Choose]}: "))
|
||||
for content in contents:
|
||||
body.append(_("<a href='{content}' class='contents-content'>{content}</a> "))
|
||||
body.append("</div>")
|
||||
body.append("</li>")
|
||||
|
||||
def actionSidebarGetHtmlTag(self, to):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
site = self.site
|
||||
|
||||
body = []
|
||||
|
||||
body.append("<div>")
|
||||
body.append("<a href='#Close' class='close'>×</a>")
|
||||
body.append("<h1>%s</h1>" % cgi.escape(site.content_manager.contents.get("content.json", {}).get("title", ""), True))
|
||||
|
||||
body.append("<div class='globe loading'></div>")
|
||||
|
||||
body.append("<ul class='fields'>")
|
||||
|
||||
self.sidebarRenderPeerStats(body, site)
|
||||
self.sidebarRenderTransferStats(body, site)
|
||||
self.sidebarRenderFileStats(body, site)
|
||||
self.sidebarRenderSizeLimit(body, site)
|
||||
has_optional = self.sidebarRenderOptionalFileStats(body, site)
|
||||
if has_optional:
|
||||
self.sidebarRenderOptionalFileSettings(body, site)
|
||||
self.sidebarRenderDbOptions(body, site)
|
||||
self.sidebarRenderIdentity(body, site)
|
||||
self.sidebarRenderControls(body, site)
|
||||
if site.bad_files:
|
||||
self.sidebarRenderBadFiles(body, site)
|
||||
|
||||
self.sidebarRenderOwnedCheckbox(body, site)
|
||||
body.append("<div class='settings-owned'>")
|
||||
self.sidebarRenderOwnSettings(body, site)
|
||||
self.sidebarRenderContents(body, site)
|
||||
body.append("</div>")
|
||||
body.append("</ul>")
|
||||
body.append("</div>")
|
||||
|
||||
body.append("<div class='menu template'>")
|
||||
body.append("<a href='#'' class='menu-item template'>Template</a>")
|
||||
body.append("</div>")
|
||||
|
||||
self.response(to, "".join(body))
|
||||
|
||||
def downloadGeoLiteDb(self, db_path):
|
||||
import urllib
|
||||
import gzip
|
||||
import shutil
|
||||
from util import helper
|
||||
|
||||
self.log.info("Downloading GeoLite2 City database...")
|
||||
self.cmd("progress", ["geolite-info", _["Downloading GeoLite2 City database (one time only, ~20MB)..."], 0])
|
||||
db_urls = [
|
||||
"https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz",
|
||||
"https://raw.githubusercontent.com/texnikru/GeoLite2-Database/master/GeoLite2-City.mmdb.gz"
|
||||
]
|
||||
for db_url in db_urls:
|
||||
try:
|
||||
# Download
|
||||
response = helper.httpRequest(db_url)
|
||||
data_size = response.getheader('content-length')
|
||||
data_recv = 0
|
||||
data = StringIO.StringIO()
|
||||
while True:
|
||||
buff = response.read(1024 * 512)
|
||||
if not buff:
|
||||
break
|
||||
data.write(buff)
|
||||
data_recv += 1024 * 512
|
||||
if data_size:
|
||||
progress = int(float(data_recv) / int(data_size) * 100)
|
||||
self.cmd("progress", ["geolite-info", _["Downloading GeoLite2 City database (one time only, ~20MB)..."], progress])
|
||||
self.log.info("GeoLite2 City database downloaded (%s bytes), unpacking..." % data.tell())
|
||||
data.seek(0)
|
||||
|
||||
# Unpack
|
||||
with gzip.GzipFile(fileobj=data) as gzip_file:
|
||||
shutil.copyfileobj(gzip_file, open(db_path, "wb"))
|
||||
|
||||
self.cmd("progress", ["geolite-info", _["GeoLite2 City database downloaded!"], 100])
|
||||
time.sleep(2) # Wait for notify animation
|
||||
return True
|
||||
except Exception as err:
|
||||
self.log.error("Error downloading %s: %s" % (db_url, err))
|
||||
pass
|
||||
self.cmd("progress", [
|
||||
"geolite-info",
|
||||
_["GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}"].format(err, db_urls[0]),
|
||||
-100
|
||||
])
|
||||
|
||||
def getLoc(self, geodb, ip):
|
||||
global loc_cache
|
||||
|
||||
if ip in loc_cache:
|
||||
return loc_cache[ip]
|
||||
else:
|
||||
try:
|
||||
loc_data = geodb.get(ip)
|
||||
except:
|
||||
loc_data = None
|
||||
|
||||
if not loc_data or "location" not in loc_data:
|
||||
loc_cache[ip] = None
|
||||
return None
|
||||
|
||||
loc = {
|
||||
"lat": loc_data["location"]["latitude"],
|
||||
"lon": loc_data["location"]["longitude"],
|
||||
}
|
||||
if "city" in loc_data:
|
||||
loc["city"] = loc_data["city"]["names"]["en"]
|
||||
|
||||
if "country" in loc_data:
|
||||
loc["country"] = loc_data["country"]["names"]["en"]
|
||||
|
||||
loc_cache[ip] = loc
|
||||
return loc
|
||||
|
||||
def getPeerLocations(self, peers):
|
||||
import maxminddb
|
||||
db_path = config.data_dir + '/GeoLite2-City.mmdb'
|
||||
if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0:
|
||||
if not self.downloadGeoLiteDb(db_path):
|
||||
return False
|
||||
geodb = maxminddb.open_database(db_path)
|
||||
|
||||
peers = peers.values()
|
||||
# Place bars
|
||||
peer_locations = []
|
||||
placed = {} # Already placed bars here
|
||||
for peer in peers:
|
||||
# Height of bar
|
||||
if peer.connection and peer.connection.last_ping_delay:
|
||||
ping = round(peer.connection.last_ping_delay * 1000)
|
||||
else:
|
||||
ping = None
|
||||
loc = self.getLoc(geodb, peer.ip)
|
||||
|
||||
if not loc:
|
||||
continue
|
||||
# Create position array
|
||||
lat, lon = loc["lat"], loc["lon"]
|
||||
latlon = "%s,%s" % (lat, lon)
|
||||
if latlon in placed and helper.getIpType(peer.ip) == "ipv4": # Dont place more than 1 bar to same place, fake repos using ip address last two part
|
||||
lat += float(128 - int(peer.ip.split(".")[-2])) / 50
|
||||
lon += float(128 - int(peer.ip.split(".")[-1])) / 50
|
||||
latlon = "%s,%s" % (lat, lon)
|
||||
placed[latlon] = True
|
||||
peer_location = {}
|
||||
peer_location.update(loc)
|
||||
peer_location["lat"] = lat
|
||||
peer_location["lon"] = lon
|
||||
peer_location["ping"] = ping
|
||||
|
||||
peer_locations.append(peer_location)
|
||||
|
||||
# Append myself
|
||||
for ip in self.site.connection_server.ip_external_list:
|
||||
my_loc = self.getLoc(geodb, ip)
|
||||
if my_loc:
|
||||
my_loc["ping"] = 0
|
||||
peer_locations.append(my_loc)
|
||||
|
||||
return peer_locations
|
||||
|
||||
|
||||
def actionSidebarGetPeers(self, to):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
try:
|
||||
peer_locations = self.getPeerLocations(self.site.peers)
|
||||
globe_data = []
|
||||
ping_times = [
|
||||
peer_location["ping"]
|
||||
for peer_location in peer_locations
|
||||
if peer_location["ping"]
|
||||
]
|
||||
if ping_times:
|
||||
ping_avg = sum(ping_times) / float(len(ping_times))
|
||||
else:
|
||||
ping_avg = 0
|
||||
|
||||
for peer_location in peer_locations:
|
||||
if peer_location["ping"] == 0: # Me
|
||||
height = -0.135
|
||||
elif peer_location["ping"]:
|
||||
height = min(0.20, math.log(1 + peer_location["ping"] / ping_avg, 300))
|
||||
else:
|
||||
height = -0.03
|
||||
|
||||
globe_data += [peer_location["lat"], peer_location["lon"], height]
|
||||
|
||||
self.response(to, globe_data)
|
||||
except Exception, err:
|
||||
self.log.debug("sidebarGetPeers error: %s" % Debug.formatException(err))
|
||||
self.response(to, {"error": err})
|
||||
|
||||
def actionSiteSetOwned(self, to, owned):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
if self.site.address == config.updatesite:
|
||||
return self.response(to, "You can't change the ownership of the updater site")
|
||||
|
||||
self.site.settings["own"] = bool(owned)
|
||||
self.site.updateWebsocket(owned=owned)
|
||||
|
||||
def actionUserSetSitePrivatekey(self, to, privatekey):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
site_data = self.user.sites[self.site.address]
|
||||
site_data["privatekey"] = privatekey
|
||||
self.site.updateWebsocket(set_privatekey=bool(privatekey))
|
||||
|
||||
return "ok"
|
||||
|
||||
def actionSiteSetAutodownloadoptional(self, to, owned):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
self.site.settings["autodownloadoptional"] = bool(owned)
|
||||
self.site.bad_files = {}
|
||||
gevent.spawn(self.site.update, check_files=True)
|
||||
self.site.worker_manager.removeSolvedFileTasks()
|
||||
|
||||
def actionDbReload(self, to):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
self.site.storage.closeDb()
|
||||
self.site.storage.getDb()
|
||||
|
||||
return self.response(to, "ok")
|
||||
|
||||
def actionDbRebuild(self, to):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
return self.response(to, "You don't have permission to run this command")
|
||||
|
||||
self.site.storage.rebuildDb()
|
||||
|
||||
return self.response(to, "ok")
|
|
@ -1,43 +0,0 @@
|
|||
import cStringIO as StringIO
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
|
||||
class ZipStream(file):
|
||||
def __init__(self, dir_path):
|
||||
self.dir_path = dir_path
|
||||
self.pos = 0
|
||||
self.zf = zipfile.ZipFile(self, 'w', zipfile.ZIP_DEFLATED, allowZip64 = True)
|
||||
self.buff = StringIO.StringIO()
|
||||
self.file_list = self.getFileList()
|
||||
|
||||
def getFileList(self):
|
||||
for root, dirs, files in os.walk(self.dir_path):
|
||||
for file in files:
|
||||
file_path = root + "/" + file
|
||||
relative_path = os.path.join(os.path.relpath(root, self.dir_path), file)
|
||||
yield file_path, relative_path
|
||||
self.zf.close()
|
||||
|
||||
def read(self, size=60 * 1024):
|
||||
for file_path, relative_path in self.file_list:
|
||||
self.zf.write(file_path, relative_path)
|
||||
if self.buff.tell() >= size:
|
||||
break
|
||||
self.buff.seek(0)
|
||||
back = self.buff.read()
|
||||
self.buff.truncate(0)
|
||||
return back
|
||||
|
||||
def write(self, data):
|
||||
self.pos += len(data)
|
||||
self.buff.write(data)
|
||||
|
||||
def tell(self):
|
||||
return self.pos
|
||||
|
||||
def seek(self, pos, type):
|
||||
pass
|
||||
|
||||
def flush(self):
|
||||
pass
|
|
@ -1 +0,0 @@
|
|||
import SidebarPlugin
|
|
@ -1,81 +0,0 @@
|
|||
{
|
||||
"Peers": "Klienter",
|
||||
"Connected": "Forbundet",
|
||||
"Connectable": "Mulige",
|
||||
"Connectable peers": "Mulige klienter",
|
||||
|
||||
"Data transfer": "Data overførsel",
|
||||
"Received": "Modtaget",
|
||||
"Received bytes": "Bytes modtaget",
|
||||
"Sent": "Sendt",
|
||||
"Sent bytes": "Bytes sendt",
|
||||
|
||||
"Files": "Filer",
|
||||
"Total": "I alt",
|
||||
"Image": "Image",
|
||||
"Other": "Andet",
|
||||
"User data": "Bruger data",
|
||||
|
||||
"Size limit": "Side max størrelse",
|
||||
"limit used": "brugt",
|
||||
"free space": "fri",
|
||||
"Set": "Opdater",
|
||||
|
||||
"Optional files": "Valgfri filer",
|
||||
"Downloaded": "Downloadet",
|
||||
"Download and help distribute all files": "Download og hjælp med at dele filer",
|
||||
"Total size": "Størrelse i alt",
|
||||
"Downloaded files": "Filer downloadet",
|
||||
|
||||
"Database": "Database",
|
||||
"search feeds": "søgninger",
|
||||
"{feeds} query": "{feeds} søgninger",
|
||||
"Reload": "Genindlæs",
|
||||
"Rebuild": "Genopbyg",
|
||||
"No database found": "Ingen database fundet",
|
||||
|
||||
"Identity address": "Autorisations ID",
|
||||
"Change": "Skift",
|
||||
|
||||
"Update": "Opdater",
|
||||
"Pause": "Pause",
|
||||
"Resume": "Aktiv",
|
||||
"Delete": "Slet",
|
||||
"Are you sure?": "Er du sikker?",
|
||||
|
||||
"Site address": "Side addresse",
|
||||
"Donate": "Doner penge",
|
||||
|
||||
"Missing files": "Manglende filer",
|
||||
"{} try": "{} forsøg",
|
||||
"{} tries": "{} forsøg",
|
||||
"+ {num_bad_files} more": "+ {num_bad_files} mere",
|
||||
|
||||
"This is my site": "Dette er min side",
|
||||
"Site title": "Side navn",
|
||||
"Site description": "Side beskrivelse",
|
||||
"Save site settings": "Gem side opsætning",
|
||||
|
||||
"Content publishing": "Indhold offentliggøres",
|
||||
"Choose": "Vælg",
|
||||
"Sign": "Signer",
|
||||
"Publish": "Offentliggør",
|
||||
|
||||
"This function is disabled on this proxy": "Denne funktion er slået fra på denne ZeroNet proxyEz a funkció ki van kapcsolva ezen a proxy-n",
|
||||
"GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}": "GeoLite2 City database kunne ikke downloades: {}!<br>Download venligst databasen manuelt og udpak i data folder:<br>{}",
|
||||
"Downloading GeoLite2 City database (one time only, ~20MB)...": "GeoLite2 város adatbázis letöltése (csak egyszer kell, kb 20MB)...",
|
||||
"GeoLite2 City database downloaded!": "GeoLite2 City database downloadet!",
|
||||
|
||||
"Are you sure?": "Er du sikker?",
|
||||
"Site storage limit modified!": "Side max størrelse ændret!",
|
||||
"Database schema reloaded!": "Database definition genindlæst!",
|
||||
"Database rebuilding....": "Genopbygger database...",
|
||||
"Database rebuilt!": "Database genopbygget!",
|
||||
"Site updated!": "Side opdateret!",
|
||||
"Delete this site": "Slet denne side",
|
||||
"File write error: ": "Fejl ved skrivning af fil: ",
|
||||
"Site settings saved!": "Side opsætning gemt!",
|
||||
"Enter your private key:": "Indtast din private nøgle:",
|
||||
" Signed!": " Signeret!",
|
||||
"WebGL not supported": "WebGL er ikke supporteret"
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
{
|
||||
"Peers": "Peers",
|
||||
"Connected": "Verbunden",
|
||||
"Connectable": "Verbindbar",
|
||||
"Connectable peers": "Verbindbare Peers",
|
||||
|
||||
"Data transfer": "Datei Transfer",
|
||||
"Received": "Empfangen",
|
||||
"Received bytes": "Empfangene Bytes",
|
||||
"Sent": "Gesendet",
|
||||
"Sent bytes": "Gesendete Bytes",
|
||||
|
||||
"Files": "Dateien",
|
||||
"Total": "Gesamt",
|
||||
"Image": "Bilder",
|
||||
"Other": "Sonstiges",
|
||||
"User data": "Nutzer Daten",
|
||||
|
||||
"Size limit": "Speicher Limit",
|
||||
"limit used": "Limit benutzt",
|
||||
"free space": "freier Speicher",
|
||||
"Set": "Setzten",
|
||||
|
||||
"Optional files": "Optionale Dateien",
|
||||
"Downloaded": "Heruntergeladen",
|
||||
"Download and help distribute all files": "Herunterladen und helfen alle Dateien zu verteilen",
|
||||
"Total size": "Gesamte Größe",
|
||||
"Downloaded files": "Heruntergeladene Dateien",
|
||||
|
||||
"Database": "Datenbank",
|
||||
"search feeds": "Feeds durchsuchen",
|
||||
"{feeds} query": "{feeds} Abfrage",
|
||||
"Reload": "Neu laden",
|
||||
"Rebuild": "Neu bauen",
|
||||
"No database found": "Keine Datenbank gefunden",
|
||||
|
||||
"Identity address": "Identitäts Adresse",
|
||||
"Change": "Ändern",
|
||||
|
||||
"Update": "Aktualisieren",
|
||||
"Pause": "Pausieren",
|
||||
"Resume": "Fortsetzen",
|
||||
"Delete": "Löschen",
|
||||
"Are you sure?": "Bist du sicher?",
|
||||
|
||||
"Site address": "Seiten Adresse",
|
||||
"Donate": "Spenden",
|
||||
|
||||
"Missing files": "Fehlende Dateien",
|
||||
"{} try": "{} versuch",
|
||||
"{} tries": "{} versuche",
|
||||
"+ {num_bad_files} more": "+ {num_bad_files} mehr",
|
||||
|
||||
"This is my site": "Das ist meine Seite",
|
||||
"Site title": "Seiten Titel",
|
||||
"Site description": "Seiten Beschreibung",
|
||||
"Save site settings": "Einstellungen der Seite speichern",
|
||||
|
||||
"Content publishing": "Inhaltsveröffentlichung",
|
||||
"Choose": "Wähle",
|
||||
"Sign": "Signieren",
|
||||
"Publish": "Veröffentlichen",
|
||||
|
||||
"This function is disabled on this proxy": "Diese Funktion ist auf dieser Proxy deaktiviert",
|
||||
"GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}": "GeoLite2 City Datenbank Download Fehler: {}!<br>Bitte manuell herunterladen und die Datei in das Datei Verzeichnis extrahieren:<br>{}",
|
||||
"Downloading GeoLite2 City database (one time only, ~20MB)...": "Herunterladen der GeoLite2 City Datenbank (einmalig, ~20MB)...",
|
||||
"GeoLite2 City database downloaded!": "GeoLite2 City Datenbank heruntergeladen!",
|
||||
|
||||
"Are you sure?": "Bist du sicher?",
|
||||
"Site storage limit modified!": "Speicher Limit der Seite modifiziert!",
|
||||
"Database schema reloaded!": "Datebank Schema neu geladen!",
|
||||
"Database rebuilding....": "Datenbank neu bauen...",
|
||||
"Database rebuilt!": "Datenbank neu gebaut!",
|
||||
"Site updated!": "Seite aktualisiert!",
|
||||
"Delete this site": "Diese Seite löschen",
|
||||
"File write error: ": "Datei schreib fehler:",
|
||||
"Site settings saved!": "Seiten Einstellungen gespeichert!",
|
||||
"Enter your private key:": "Gib deinen privaten Schlüssel ein:",
|
||||
" Signed!": " Signiert!",
|
||||
"WebGL not supported": "WebGL nicht unterstützt"
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
{
|
||||
"Peers": "Pares",
|
||||
"Connected": "Conectados",
|
||||
"Connectable": "Conectables",
|
||||
"Connectable peers": "Pares conectables",
|
||||
|
||||
"Data transfer": "Transferencia de datos",
|
||||
"Received": "Recibidos",
|
||||
"Received bytes": "Bytes recibidos",
|
||||
"Sent": "Enviados",
|
||||
"Sent bytes": "Bytes envidados",
|
||||
|
||||
"Files": "Ficheros",
|
||||
"Total": "Total",
|
||||
"Image": "Imagen",
|
||||
"Other": "Otro",
|
||||
"User data": "Datos del usuario",
|
||||
|
||||
"Size limit": "Límite de tamaño",
|
||||
"limit used": "Límite utilizado",
|
||||
"free space": "Espacio libre",
|
||||
"Set": "Establecer",
|
||||
|
||||
"Optional files": "Ficheros opcionales",
|
||||
"Downloaded": "Descargado",
|
||||
"Download and help distribute all files": "Descargar y ayudar a distribuir todos los ficheros",
|
||||
"Total size": "Tamaño total",
|
||||
"Downloaded files": "Ficheros descargados",
|
||||
|
||||
"Database": "Base de datos",
|
||||
"search feeds": "Fuentes de búsqueda",
|
||||
"{feeds} query": "{feeds} consulta",
|
||||
"Reload": "Recargar",
|
||||
"Rebuild": "Reconstruir",
|
||||
"No database found": "No se ha encontrado la base de datos",
|
||||
|
||||
"Identity address": "Dirección de la identidad",
|
||||
"Change": "Cambiar",
|
||||
|
||||
"Update": "Actualizar",
|
||||
"Pause": "Pausar",
|
||||
"Resume": "Reanudar",
|
||||
"Delete": "Borrar",
|
||||
|
||||
"Site address": "Dirección del sitio",
|
||||
"Donate": "Donar",
|
||||
|
||||
"Missing files": "Ficheros perdidos",
|
||||
"{} try": "{} intento",
|
||||
"{} tries": "{} intentos",
|
||||
"+ {num_bad_files} more": "+ {num_bad_files} más",
|
||||
|
||||
"This is my site": "Este es mi sitio",
|
||||
"Site title": "Título del sitio",
|
||||
"Site description": "Descripción del sitio",
|
||||
"Save site settings": "Guardar la configuración del sitio",
|
||||
|
||||
"Content publishing": "Publicación del contenido",
|
||||
"Choose": "Elegir",
|
||||
"Sign": "Firmar",
|
||||
"Publish": "Publicar",
|
||||
"This function is disabled on this proxy": "Esta función está deshabilitada en este proxy",
|
||||
"GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}": "¡Error de la base de datos GeoLite2: {}!<br>Por favor, descárgalo manualmente y descomprime al directorio de datos:<br>{}",
|
||||
"Downloading GeoLite2 City database (one time only, ~20MB)...": "Descargando la base de datos de GeoLite2 (una única vez, ~20MB)...",
|
||||
"GeoLite2 City database downloaded!": "¡Base de datos de GeoLite2 descargada!",
|
||||
|
||||
"Are you sure?": "¿Estás seguro?",
|
||||
"Site storage limit modified!": "¡Límite de almacenamiento del sitio modificado!",
|
||||
"Database schema reloaded!": "¡Esquema de la base de datos recargado!",
|
||||
"Database rebuilding....": "Reconstruyendo la base de datos...",
|
||||
"Database rebuilt!": "¡Base de datos reconstruida!",
|
||||
"Site updated!": "¡Sitio actualizado!",
|
||||
"Delete this site": "Borrar este sitio",
|
||||
"File write error: ": "Error de escritura de fichero:",
|
||||
"Site settings saved!": "¡Configuración del sitio guardada!",
|
||||
"Enter your private key:": "Introduce tu clave privada:",
|
||||
" Signed!": " ¡firmado!",
|
||||
"WebGL not supported": "WebGL no está soportado"
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{
|
||||
"Peers": "Pairs",
|
||||
"Connected": "Connectés",
|
||||
"Connectable": "Accessibles",
|
||||
"Connectable peers": "Pairs accessibles",
|
||||
|
||||
"Data transfer": "Données transférées",
|
||||
"Received": "Reçues",
|
||||
"Received bytes": "Bytes reçus",
|
||||
"Sent": "Envoyées",
|
||||
"Sent bytes": "Bytes envoyés",
|
||||
|
||||
"Files": "Fichiers",
|
||||
"Total": "Total",
|
||||
"Image": "Image",
|
||||
"Other": "Autre",
|
||||
"User data": "Utilisateurs",
|
||||
|
||||
"Size limit": "Taille maximale",
|
||||
"limit used": "utlisé",
|
||||
"free space": "libre",
|
||||
"Set": "Modifier",
|
||||
|
||||
"Optional files": "Fichiers optionnels",
|
||||
"Downloaded": "Téléchargé",
|
||||
"Download and help distribute all files": "Télécharger et distribuer tous les fichiers",
|
||||
"Total size": "Taille totale",
|
||||
"Downloaded files": "Fichiers téléchargés",
|
||||
|
||||
"Database": "Base de données",
|
||||
"search feeds": "recherche",
|
||||
"{feeds} query": "{feeds} requête",
|
||||
"Reload": "Recharger",
|
||||
"Rebuild": "Reconstruire",
|
||||
"No database found": "Aucune base de données trouvée",
|
||||
|
||||
"Identity address": "Adresse d'identité",
|
||||
"Change": "Modifier",
|
||||
|
||||
"Site control": "Opérations",
|
||||
"Update": "Mettre à jour",
|
||||
"Pause": "Suspendre",
|
||||
"Resume": "Reprendre",
|
||||
"Delete": "Supprimer",
|
||||
"Are you sure?": "Êtes-vous certain?",
|
||||
|
||||
"Site address": "Adresse du site",
|
||||
"Donate": "Faire un don",
|
||||
|
||||
"Missing files": "Fichiers manquants",
|
||||
"{} try": "{} essai",
|
||||
"{} tries": "{} essais",
|
||||
"+ {num_bad_files} more": "+ {num_bad_files} manquants",
|
||||
|
||||
"This is my site": "Ce site m'appartient",
|
||||
"Site title": "Nom du site",
|
||||
"Site description": "Description du site",
|
||||
"Save site settings": "Enregistrer les paramètres",
|
||||
|
||||
"Content publishing": "Publication du contenu",
|
||||
"Choose": "Sélectionner",
|
||||
"Sign": "Signer",
|
||||
"Publish": "Publier",
|
||||
|
||||
"This function is disabled on this proxy": "Cette fonction est désactivé sur ce proxy",
|
||||
"GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}": "Erreur au téléchargement de la base de données GeoLite2: {}!<br>Téléchargez et décompressez dans le dossier data:<br>{}",
|
||||
"Downloading GeoLite2 City database (one time only, ~20MB)...": "Téléchargement de la base de données GeoLite2 (une seule fois, ~20MB)...",
|
||||
"GeoLite2 City database downloaded!": "Base de données GeoLite2 téléchargée!",
|
||||
|
||||
"Are you sure?": "Êtes-vous certain?",
|
||||
"Site storage limit modified!": "Taille maximale modifiée!",
|
||||
"Database schema reloaded!": "Base de données rechargée!",
|
||||
"Database rebuilding....": "Reconstruction de la base de données...",
|
||||
"Database rebuilt!": "Base de données reconstruite!",
|
||||
"Site updated!": "Site mis à jour!",
|
||||
"Delete this site": "Supprimer ce site",
|
||||
"File write error: ": "Erreur à l'écriture du fichier: ",
|
||||
"Site settings saved!": "Paramètres du site enregistrés!",
|
||||
"Enter your private key:": "Entrez votre clé privée:",
|
||||
" Signed!": " Signé!",
|
||||
"WebGL not supported": "WebGL n'est pas supporté"
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{
|
||||
"Peers": "Csatlakozási pontok",
|
||||
"Connected": "Csaltakozva",
|
||||
"Connectable": "Csatlakozható",
|
||||
"Connectable peers": "Csatlakozható peer-ek",
|
||||
|
||||
"Data transfer": "Adatátvitel",
|
||||
"Received": "Fogadott",
|
||||
"Received bytes": "Fogadott byte-ok",
|
||||
"Sent": "Küldött",
|
||||
"Sent bytes": "Küldött byte-ok",
|
||||
|
||||
"Files": "Fájlok",
|
||||
"Total": "Összesen",
|
||||
"Image": "Kép",
|
||||
"Other": "Egyéb",
|
||||
"User data": "Felh. adat",
|
||||
|
||||
"Size limit": "Méret korlát",
|
||||
"limit used": "felhasznált",
|
||||
"free space": "szabad hely",
|
||||
"Set": "Beállít",
|
||||
|
||||
"Optional files": "Opcionális fájlok",
|
||||
"Downloaded": "Letöltött",
|
||||
"Download and help distribute all files": "Minden opcionális fájl letöltése",
|
||||
"Total size": "Teljes méret",
|
||||
"Downloaded files": "Letöltve",
|
||||
|
||||
"Database": "Adatbázis",
|
||||
"search feeds": "Keresés források",
|
||||
"{feeds} query": "{feeds} lekérdezés",
|
||||
"Reload": "Újratöltés",
|
||||
"Rebuild": "Újraépítés",
|
||||
"No database found": "Adatbázis nem található",
|
||||
|
||||
"Identity address": "Azonosító cím",
|
||||
"Change": "Módosít",
|
||||
|
||||
"Site control": "Oldal műveletek",
|
||||
"Update": "Frissít",
|
||||
"Pause": "Szünteltet",
|
||||
"Resume": "Folytat",
|
||||
"Delete": "Töröl",
|
||||
"Are you sure?": "Biztos vagy benne?",
|
||||
|
||||
"Site address": "Oldal címe",
|
||||
"Donate": "Támogatás",
|
||||
|
||||
"Missing files": "Hiányzó fájlok",
|
||||
"{} try": "{} próbálkozás",
|
||||
"{} tries": "{} próbálkozás",
|
||||
"+ {num_bad_files} more": "+ még {num_bad_files} darab",
|
||||
|
||||
"This is my site": "Ez az én oldalam",
|
||||
"Site title": "Oldal neve",
|
||||
"Site description": "Oldal leírása",
|
||||
"Save site settings": "Oldal beállítások mentése",
|
||||
|
||||
"Content publishing": "Tartalom publikálás",
|
||||
"Choose": "Válassz",
|
||||
"Sign": "Aláírás",
|
||||
"Publish": "Publikálás",
|
||||
|
||||
"This function is disabled on this proxy": "Ez a funkció ki van kapcsolva ezen a proxy-n",
|
||||
"GeoLite2 City database download error: {}!<br>Please download manually and unpack to data dir:<br>{}": "GeoLite2 város adatbázis letöltési hiba: {}!<br>A térképhez töltsd le és csomagold ki a data könyvtárba:<br>{}",
|
||||
"Downloading GeoLite2 City database (one time only, ~20MB)...": "GeoLite2 város adatbázis letöltése (csak egyszer kell, kb 20MB)...",
|
||||
"GeoLite2 City database downloaded!": "GeoLite2 város adatbázis letöltve!",
|
||||
|
||||
"Are you sure?": "Biztos vagy benne?",
|
||||
"Site storage limit modified!": "Az oldalt méret korlát módosítva!",
|
||||
"Database schema reloaded!": "Adatbázis séma újratöltve!",
|
||||
"Database rebuilding....": "Adatbázis újraépítés...",
|
||||
"Database rebuilt!": "Adatbázis újraépítve!",
|
||||
"Site updated!": "Az oldal frissítve!",
|
||||
"Delete this site": "Az oldal törlése",
|
||||
"File write error: ": "Fájl írási hiba: ",
|
||||
"Site settings saved!": "Az oldal beállításai elmentve!",
|
||||
"Enter your private key:": "Add meg a prviát kulcsod:",
|
||||
" Signed!": " Aláírva!",
|
||||
"WebGL not supported": "WebGL nem támogatott"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue