Compare commits

...

147 commits

Author SHA1 Message Date
canewsin
0aed438ae5 Merge branch 'py3-latest' into zeronet-enhanced 2022-01-12 15:04:17 +05:30
canewsin
ec8203fff7 Fix ConnServer GetFile exception
https://github.com/ZeroNetX/ZeroNet/issues/136
2022-01-12 02:33:22 +05:30
canewsin
45b92157cf More Plugin Changes 2022-01-12 01:13:19 +05:30
canewsin
5e61fe8b06 Catch Missing File Error 2022-01-08 11:48:30 +05:30
canewsin
90b6cb9cdf Fetch Plugins Repo 2022-01-07 03:45:50 +05:30
canewsin
1362b7e2e6 Bug Fixes 2022-01-07 03:24:35 +05:30
canewsin
00061c45a5 Bug Fixes 2022-01-07 03:00:48 +05:30
canewsin
7fef6dde0e More Bug Fixes 2022-01-07 02:56:14 +05:30
canewsin
c594e63512 Bug Fixes 2022-01-07 02:29:04 +05:30
canewsin
36adb63f61 Fix Tests 2022-01-06 15:07:17 +05:30
canewsin
59f7d3221f Merge branch 'pr/112' into zeronet-enhanced 2022-01-06 12:47:11 +05:30
canewsin
b39a1a5f1b Merge branch 'py3-internal' into zeronet-enhanced 2022-01-06 12:39:44 +05:30
canewsin
f2324b8eb4 Fetch Latest ZeroNet-plugins Repo 2022-01-06 12:32:43 +05:30
canewsin
912688dbbd Update ZeroNet Plugins 2022-01-06 12:28:53 +05:30
Zola Gonano
f2e1c2ad81
Clean up tor v3 patch (#115)
https://github.com/ZeroNetX/ZeroNet/pull/115
2021-12-27 00:24:13 +05:30
canewsin
62f88a11a7 Add More Plugins to Repo 2021-12-26 23:30:36 +05:30
canewsin
7dbf8da8cb Parallel Shutdown of file_server and ui_server
Make Parallel Shutdown of file_server and ui_server at Shutdown of ZeroNet
2021-12-22 15:09:52 +05:30
canewsin
f79aca1dad Merge branches 'py3-internal' and 'py3-internal' of https://github.com/ZeroNetX/ZeroNet into py3-internal 2021-12-22 14:10:04 +05:30
canewsin
7575e2d455 Doubled Site Publish Limits 2021-12-22 14:05:01 +05:30
Zola Gonano
58b2d92351
Update ZeroNet Repo Urls (#103) 2021-12-20 20:25:45 +05:30
Zola Gonano
86a73e4665
UI/UX: Increases Size of Notifications Close Button(#106) 2021-12-20 20:24:30 +05:30
canewsin
18da16e8d4 Fix Github Actions 2021-12-16 20:03:47 +05:30
canewsin
523e399d8c Fix Github Actions 2021-12-16 20:00:50 +05:30
canewsin
643dd84a28 Fix Tests for Github Actions 2021-12-16 19:56:24 +05:30
canewsin
4448358247 0.7.6-internal 2
Moved Plugins to Seperate Repo
2021-12-16 19:47:09 +05:30
canewsin
0e48004563 v 0.7.6-internal
- Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
- Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
- Added current version:  connections to /Stats page. see the previous point.
2021-12-11 16:35:02 +05:30
canewsin
0bbf19aab9 Update Donation Link 2021-12-11 16:26:27 +05:30
Vadim Ushakov
545fe9442c Merge branch 'HelloZeroNet-py3' into massive-rework
# Conflicts:
#	src/File/FileServer.py
2021-11-06 22:39:47 +07:00
Vadim Ushakov
5c8bbe5801 build-docker-images.sh: push to Docker Hub as well 2021-11-06 22:00:46 +07:00
Vadim Ushakov
348a4b0865 Add support for building ubuntu:20.04-based docker images + some automation 2021-11-06 21:19:42 +07:00
Vadim Ushakov
f7372fc393 Remove __slots__ from Peer, the slot system doesn't work well with classes extended by plugins anyway 2021-11-06 13:16:43 +07:00
Vadim Ushakov
b7a3aa37e1 Sidebar Plugin: display more detailed statistics about peers 2021-11-05 23:38:38 +07:00
Vadim Ushakov
b6b23d0e8e In Site.updater(), properly detect the case when peer has no file at all, not just an older version. 2021-11-03 17:15:43 +07:00
Vadim Ushakov
e000eae046 Add new configuration option: simultaneous_connection_throttle_threshold 2021-11-03 16:06:06 +07:00
Vadim Ushakov
1863043505 Update ChangeLog-0.8.0.md 2021-11-03 12:59:49 +07:00
Vadim Ushakov
d32d9f781b Move getIpType() from helper to ConnectionServer 2021-11-03 12:57:13 +07:00
Vadim Ushakov
168c436b73 Add new configuration variables and temporarily disable Site.persistent_peer_req
New configuration options:

site_announce_interval_min
site_announce_interval_max

site_peer_check_interval_min
site_peer_check_interval_max

site_update_check_interval_min
site_update_check_interval_max

site_connectable_peer_count_max
site_connectable_peer_count_min

Site.persistent_peer_req is temporarily disabled since it makes excessive pressure on the network when working over TOR and needs some reworking.
2021-10-27 20:57:44 +07:00
Vadim Ushakov
77e0bb3650 PeerDb plugin: save and restore fields time_response and connection_error 2021-10-27 18:54:58 +07:00
Vadim Ushakov
ef69dcd331 Implement Send Back LRU cache to reduce useless network transfers 2021-10-27 01:06:12 +07:00
Vadim Ushakov
32eb47c482 a small fix in Peer 2021-10-26 22:32:28 +07:00
Vadim Ushakov
f484c0a1b8 fine-tuning FileServer 2021-10-26 17:40:03 +07:00
Vadim Ushakov
645f3ba34a Reorganization of Peer class and peer-related Site's methods 2021-10-26 17:38:40 +07:00
Vadim Ushakov
93a95f511a Limit the pex request frequency, interval is 120 secs for each peer 2021-10-25 17:59:35 +07:00
Vadim Ushakov
dff52d691a Small improvements in FileServer 2021-10-25 17:09:16 +07:00
Vadim Ushakov
1a8d30146e Fix typos in comments 2021-10-25 16:18:35 +07:00
Vadim Ushakov
8f908c961d Fine-tuning PeerConnector 2021-10-25 16:12:00 +07:00
Vadim Ushakov
ce971ab738 Don't increment bad_file failed tries counter on rediscovering the same file on update()
Do increment it only on actual fileFailed() event.
2021-10-25 13:18:43 +07:00
Vadim Ushakov
1fd1f47a94 Fix detection of the broken Internet connection on the app start up 2021-10-25 12:53:37 +07:00
Vadim Ushakov
b512c54f75 Implement new logic for waiting for connected peers when updating or publishing a site 2021-10-24 23:53:08 +07:00
Vadim Ushakov
b4f94e5022 Make use of waitForPeers() when running FileServer-driven update() 2021-10-23 21:44:41 +07:00
Vadim Ushakov
e612f93631 Spawn message loops for outgoing connections in a sepatare pool managed by ConnectionServer 2021-10-23 21:41:14 +07:00
Vadim Ushakov
fe24e17baa Fix the prev commit 2021-10-23 21:40:41 +07:00
Vadim Ushakov
1b68182a76 FileServer: don't schedule multiple updates for the same site in parallel 2021-10-22 17:18:24 +07:00
Vadim Ushakov
1ef129bdf9 Fix "changed size during iteration" in verifyFiles() 2021-10-22 14:38:05 +07:00
Vadim Ushakov
19b840defd Implement new websocket command serverSetOfflineMode 2021-10-22 02:59:28 +07:00
Vadim Ushakov
e3daa09316 Improve the file server shutdown logic and display the shutdown progress bar in the UI 2021-10-22 00:30:42 +07:00
Vadim Ushakov
77d2d69376 Replace file_server.sites with file_server.getSites() in ChartCollector.py 2021-10-22 00:27:47 +07:00
Vadim Ushakov
c36cba7980 Implement new websocket command serverSetPassiveMode 2021-10-21 18:45:08 +07:00
Vadim Ushakov
ddc4861223 Site.py: code cleanup 2021-10-21 16:18:40 +07:00
Vadim Ushakov
cd3262a2a7 code style fix in Peer.py 2021-10-21 14:50:54 +07:00
Vadim Ushakov
b194eb0f33 Rename param to reflect its meaning: check_site_on_reconnect -> update_site_on_reconnect 2021-10-21 14:37:53 +07:00
Vadim Ushakov
5744e40505 Redesign the scheduling of site checking and verification
Save `check_files_timestamp` and `verify_files_timestamp` in sites.json and run checks only when the time interval is expired.
2021-10-21 13:19:10 +07:00
Vadim Ushakov
5ec970adb8 Add option verify_files to Site.update() to allow the real content verification check, not just the simple file size-based one
Add more informative updateWebsocket() notification in Site.update() and SiteStorage.verifyFiles()
2021-10-21 02:24:16 +07:00
Vadim Ushakov
75bba6ca1a Be more verbose about starting/stopping FileServer threads 2021-10-20 22:40:34 +07:00
Vadim Ushakov
7e438a90e1 Don't spam in log.info() from getRecentPeers() 2021-10-20 21:10:04 +07:00
Vadim Ushakov
46bea95002 Small cleanup in TrackerSharePlugin.py 2021-10-20 20:43:52 +07:00
Vadim Ushakov
2a25d61b96 Fix https://github.com/HelloZeroNet/ZeroNet/issues/2757 2021-10-20 19:01:55 +07:00
Vadim Ushakov
ba6295f793 Add tests directly to SafeRe.py 2021-10-20 18:28:03 +07:00
Vadim Ushakov
23ef37374b Various fixes in ChangeLog-0.8.0.md 2021-10-20 17:39:39 +07:00
Vadim Ushakov
488fd4045e Update ChangeLog-0.8.0.md 2021-10-20 17:28:03 +07:00
Vadim Ushakov
769a2c08dd Fix possible infinite growing of the SafeRe regexp cache by limiting the cache size 2021-10-20 17:25:47 +07:00
Vadim Ushakov
d2b65c550c Minor fix in ChangeLog-0.8.0.md 2021-10-20 16:43:13 +07:00
Vadim Ushakov
d5652eaa51 Update ChangeLog-0.8.0.md 2021-10-20 16:42:21 +07:00
Vadim Ushakov
164f5199a9 Add default onion v3 tracker addresses 2021-10-20 16:28:28 +07:00
Vadim Ushakov
72e5d3df64 Fix typo in "All server stopped" 2021-10-20 14:28:22 +07:00
Vadim Ushakov
1c73d1a095 Merge TORv3 patch
From: http://127.0.0.1:43110/19HKdTAeBh5nRiKn791czY7TwRB1QNrf1Q/?:users/1HvNGwHKqhj3ZMEM53tz6jbdqe4LRpanEu:zn:dc17f896-bf3f-4962-bdd4-0a470040c9c5

Related issues:
https://github.com/HelloZeroNet/ZeroNet/issues/2351
https://github.com/HelloZeroNet/ZeroNet/issues/1292
2021-10-20 14:26:07 +07:00
Vadim Ushakov
b9ec7124f9 minor changes for debugging 2021-10-20 14:02:00 +07:00
Vadim Ushakov
abbd2c51f0 Update ChangeLog-0.8.0.md 2021-03-31 12:55:45 +07:00
Vadim Ushakov
be65ff2c40 Make more efforts of looking for peers when publishing a site 2021-03-31 11:27:19 +07:00
Vadim Ushakov
c772592c4a New default ZeroHello address 2021-03-31 11:20:37 +07:00
Vadim Ushakov
5d6fe6a631 Fix a bug introduced in c84b413f58 2021-03-31 11:16:06 +07:00
Vadim Ushakov
adaeedf4d8 Dockerfile: move to alpine:3.13 in order to use newer tor version 2021-03-31 10:51:17 +07:00
Vadim Ushakov
8474abc967 Fix building the docker image 2021-03-31 09:39:22 +07:00
Vadim Ushakov
986dedfa7f Trying to fix incomplete updates
Partially fixes https://github.com/HelloZeroNet/ZeroNet/issues/2476

Approx. every 10th update check is now performed with `since = 0`
2021-03-25 12:41:53 +07:00
Vadim Ushakov
0151546329 ContentManager.py: move duplicated code to new method serializeForSigning() 2021-03-24 09:51:07 +07:00
Vadim Ushakov
d0069471b8 Don't raise VerifyError with misleading message "Invalid old-style sign" when the file has no sign at all. 2021-03-24 08:04:23 +07:00
Vadim Ushakov
1144964062 Make the site block check usable from plugins and core modules
Fixes https://github.com/HelloZeroNet/ZeroNet/issues/1888
2021-03-23 23:16:35 +07:00
Vadim Ushakov
6c8849139f ZNE-ChangeLog/ChangeLog-0.8.0.md: typos 2021-03-23 19:05:51 +07:00
Vadim Ushakov
3677684971 Add ZNE-ChangeLog/ChangeLog-0.8.0.md 2021-03-23 19:02:43 +07:00
Vadim Ushakov
4e27e300e3 Delete plugin/AnnounceShare/ 2021-03-23 16:01:36 +07:00
Vadim Ushakov
697b12d808 Fix naming. verifyContentJson() should be verifyContentFile() 2020-11-04 22:13:21 +07:00
Vadim Ushakov
570f854485 Fix a typo in Site.py 2020-11-04 21:58:32 +07:00
Vadim Ushakov
5d5b3684cc ContentManager: split verifyFile() into 2 functions and always log the verify error at INFO level 2020-11-04 18:22:14 +07:00
Vadim Ushakov
7354d712e0 Be more persistent in delivering site updates. 2020-11-04 16:08:01 +07:00
Vadim Ushakov
6c8b059f57 FileServer: small fixes 2020-11-04 16:05:01 +07:00
Vadim Ushakov
27ce79f044 Fix a typo in FileServer 2020-11-04 09:56:29 +07:00
Vadim Ushakov
90d01e6004 Fix a tor issue introduced in the latest changes 2020-11-04 09:25:44 +07:00
Vadim Ushakov
325f071329 Fixes and refactoring in Connection.py, Peer.py 2020-11-03 23:25:29 +07:00
Vadim Ushakov
c84b413f58 Refactor ConnectionServer, FileServer; fix bugs introduced in previous commits 2020-11-03 21:21:33 +07:00
Vadim Ushakov
ba16fdcae9 Fix a typo 2020-11-02 09:04:38 +07:00
Vadim Ushakov
ea21b32b93 Add explicit invalidation and expiration of site update timestamps 2020-10-31 18:05:50 +07:00
Vadim Ushakov
e8358ee8f2 More fixes on the way to reliable site updates. 2020-10-31 03:59:54 +07:00
Vadim Ushakov
d1b9cc8261 Redesign the Internet outage detection. Improvements in FileServer threads. 2020-10-30 23:28:16 +07:00
Vadim Ushakov
829fd46781 Redesign the site updating strategy in Site.py, SiteAnnouncer.py, FileServer.py 2020-10-30 14:36:08 +07:00
Vadim Ushakov
adf40dbb6b Refactor SiteAnnouncer.announce 2020-10-29 12:57:16 +07:00
Vadim Ushakov
8fd88c50f9 Redesign cleanupSites() and all the related stuff and rename it to periodic maintenance. 2020-10-28 23:38:17 +07:00
Vadim Ushakov
511a90a5c5 Redesign Site.needConnections() 2020-10-28 20:31:48 +07:00
Vadim Ushakov
3ca323f8b0 FileServer: move loadTrackersFile() to a separate thread 2020-10-28 17:56:11 +07:00
Vadim Ushakov
112c778c28 Peer.py: allow overriding the log level
Not a best solution, but with minimal code changes.
2020-10-28 14:53:56 +07:00
Vadim Ushakov
f1d91989d5 SiteAnnouncer: make use of a separate logger instance, not the Site's logger 2020-10-28 14:51:17 +07:00
Vadim Ushakov
b6ae96db5a Implement overriding log levels for separate modules 2020-10-28 14:48:30 +07:00
Vadim Ushakov
3d68a25e13 TrackerShare: refactor 2020-10-27 17:01:40 +07:00
Vadim Ushakov
d4239d16f9 TrackerShare: send my trackers in response to actionGetTrackers even if they don't seem working; add private field for hiding trackers 2020-10-27 12:08:54 +07:00
Vadim Ushakov
b0005026b4 TrackerShare: rename the shared key to trackers key in the json file
and delete the unused `type` parameter
2020-10-26 19:52:35 +07:00
Vadim Ushakov
1f34f477ef TrackerShare: add plugin_info.json 2020-10-26 13:33:25 +07:00
Vadim Ushakov
e33a54bc65 Merge branch 'py3' into plugins 2020-10-26 00:35:00 +07:00
Vadim Ushakov
920ddd944f Merge branch 'py3' into plugins 2020-03-09 19:28:09 +07:00
Vadim Ushakov
f9706e3dc4 TrackerZero: don't register the same onion addresses multiple times 2020-03-09 19:24:57 +07:00
Vadim Ushakov
822e53ebb8 Merge branch 'py3' into plugins 2019-07-18 00:14:06 +07:00
Vadim Ushakov
5f6589cfc2 TrackerShare: be more verbose in enoughWorkingTrackers() 2019-07-18 00:01:56 +07:00
Vadim Ushakov
95c8f0e97e TrackerShare: consider UDP disabled if config.tor == "always" 2019-07-17 23:48:53 +07:00
Vadim Ushakov
cb363d2f11 TrackerShare: move the tracker list cleanup code from getSupportedProtocols() to a separate method 2019-07-17 23:35:39 +07:00
Vadim Ushakov
0d02c3c4da TrackerShare: increase default limits: zero=10,other=5; total=20 2019-07-17 23:23:03 +07:00
Vadim Ushakov
2811d7c9d4 TrackerShare: replace self.site_announcer on every call from SiteAnnouncer 2019-07-17 23:21:11 +07:00
Vadim Ushakov
96e935300c TrackerZero: fix error in updateHashCache()
Reported to the upstream: https://github.com/HelloZeroNet/ZeroNet/issues/2095
2019-07-15 14:13:14 +07:00
Vadim Ushakov
2e1b0e093f TrackerZero: fix errors in actionStatsTrackerZero() 2019-07-14 18:26:38 +07:00
Vadim Ushakov
fee63a1ed2 TrackerZero: add the missing .onion suffix 2019-07-13 21:16:15 +07:00
Vadim Ushakov
142f5862df TrackerZero: ignore "announce" action if the plugin is disabled by its settings 2019-07-13 15:51:28 +07:00
Vadim Ushakov
de5a9ff67b TrackerShare: drop incomplete support of Bootstrapper, we now have TrackerZero, which is able to register its addresses by itself 2019-07-13 15:38:05 +07:00
Vadim Ushakov
d57deaa8e4 TrackerShare: ignore the udp:// protocol, when UDP is known to be disabled by the config 2019-07-13 15:36:05 +07:00
Vadim Ushakov
eb6d0c9644 TrackerZero: add missimg @helper.encodeResponse 2019-07-13 15:35:16 +07:00
Vadim Ushakov
a36b2c9241 TrackerZero: add support of persistent onion addresses 2019-07-13 02:34:07 +07:00
Vadim Ushakov
9a8519b487 TrackerZero: read settings from tracker-zero.json; register listened addresses in TrackerShare/AnnounceShare 2019-07-13 00:51:31 +07:00
Vadim Ushakov
f4708d9781 TrackerZero: add a separate class for not to run complicated code in overloaded methods 2019-07-12 15:44:23 +07:00
Vadim Ushakov
b7550474a5 TrackerZero: copy the Bootstrapper code to a new plugin TrackerZero 2019-07-12 01:48:52 +07:00
Vadim Ushakov
735061b79d Merge branch 'py3' into plugins 2019-07-12 01:28:32 +07:00
Vadim Ushakov
aa6d7a468d TrackerShare: store trackers in shared-trackers.json, since trackers.json is in use by AnnounceShare and may have an incompatible format in the future 2019-07-07 14:31:13 +07:00
Vadim Ushakov
f5b63a430c Merge branch 'py3' into plugins 2019-07-06 17:35:37 +07:00
Vadim Ushakov
6ee1db4197 TrackerList: fix a typo 2019-07-06 17:21:14 +07:00
Vadim Ushakov
37627822de TrackerList: make the plugin compatible with TrackerShare 2019-07-06 01:39:32 +07:00
Vadim Ushakov
d35a15d674 TrackerShare: don't delete "my" trackers on errors, but delete them on program restart; add "persistent" flag for manually added trackers 2019-07-06 01:15:37 +07:00
Vadim Ushakov
c8545ce054 TrackerShare: print the total number of discovered trackers at the end of discovery procedure 2019-07-05 23:41:23 +07:00
Vadim Ushakov
8f8e10a703 TrackerShare: Change the log level for several messages from debug to info
Increased the log level for messages that are not very annoying and help to keep the track of events.
2019-07-05 23:17:13 +07:00
Vadim Ushakov
33c81a89e9 TrackerShare: rename the config arguments to avoid the name clash with AnnounceShare's arguments
--working_shared_trackers_limit -> --shared_trackers_limit
--working_shared_trackers_limit_per_protocol -> --shared_trackers_limit_per_protocol

Also modify the help messages so that they were more consistent with how the code really works.
2019-07-05 19:38:00 +07:00
Vadim Ushakov
84526a6657 TrackerShare: raise the default limit per protocol limit from 2 to 4 2019-07-05 19:28:03 +07:00
Vadim Ushakov
3910338b28 Import plugin: TrackerList 2019-07-05 19:16:25 +07:00
Vadim Ushakov
b2e92b1d10 Import the redesigned AnnounceShare under the new name TrackerShare 2019-07-05 19:14:23 +07:00
45 changed files with 3095 additions and 756 deletions

32
build-docker-images.sh Executable file
View file

@ -0,0 +1,32 @@
#!/bin/sh
set -e
arg_push=
case "$1" in
--push) arg_push=y ; shift ;;
esac
default_suffix=alpine
prefix="${1:-local/}"
for dokerfile in dockerfiles/Dockerfile.* ; do
suffix="`echo "$dokerfile" | sed 's/.*\/Dockerfile\.//'`"
image_name="${prefix}zeronet:$suffix"
latest=""
t_latest=""
if [ "$suffix" = "$default_suffix" ] ; then
latest="${prefix}zeronet:latest"
t_latest="-t ${latest}"
fi
echo "DOCKER BUILD $image_name"
docker build -f "$dokerfile" -t "$image_name" $t_latest .
if [ -n "$arg_push" ] ; then
docker push "$image_name"
if [ -n "$latest" ] ; then
docker push "$latest"
fi
fi
done

View file

@ -0,0 +1 @@
Dockerfile.alpine3.13

View file

@ -0,0 +1,44 @@
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
FROM alpine:3.13
# Base settings
ENV HOME /root
# Install packages
# Install packages
COPY install-dep-packages.sh /root/install-dep-packages.sh
RUN /root/install-dep-packages.sh install
COPY requirements.txt /root/requirements.txt
RUN pip3 install -r /root/requirements.txt \
&& /root/install-dep-packages.sh remove-makedeps \
&& echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
RUN python3 -V \
&& python3 -m pip list \
&& tor --version \
&& openssl version
# Add Zeronet source
COPY . /root
VOLUME /root/data
# Control if Tor proxy is started
ENV ENABLE_TOR false
WORKDIR /root
# Set upstart command
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
# Expose ports
EXPOSE 43110 26552

View file

@ -0,0 +1 @@
Dockerfile.ubuntu20.04

View file

@ -0,0 +1,44 @@
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
FROM ubuntu:20.04
# Base settings
ENV HOME /root
# Install packages
# Install packages
COPY install-dep-packages.sh /root/install-dep-packages.sh
RUN /root/install-dep-packages.sh install
COPY requirements.txt /root/requirements.txt
RUN pip3 install -r /root/requirements.txt \
&& /root/install-dep-packages.sh remove-makedeps \
&& echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
RUN python3 -V \
&& python3 -m pip list \
&& tor --version \
&& openssl version
# Add Zeronet source
COPY . /root
VOLUME /root/data
# Control if Tor proxy is started
ENV ENABLE_TOR false
WORKDIR /root
# Set upstart command
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
# Expose ports
EXPOSE 43110 26552

34
dockerfiles/gen-dockerfiles.sh Executable file
View file

@ -0,0 +1,34 @@
#!/bin/sh
set -e
die() {
echo "$@" > /dev/stderr
exit 1
}
for os in alpine:3.13 ubuntu:20.04 ; do
prefix="`echo "$os" | sed -e 's/://'`"
short_prefix="`echo "$os" | sed -e 's/:.*//'`"
zeronet="zeronet-Dockerfile"
dockerfile="Dockerfile.$prefix"
dockerfile_short="Dockerfile.$short_prefix"
echo "GEN $dockerfile"
if ! test -f "$zeronet" ; then
die "No such file: $zeronet"
fi
echo "\
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
# SEE $zeronet FOR THE SOURCE FILE.
FROM $os
`cat "$zeronet"`
" > "$dockerfile.tmp" && mv "$dockerfile.tmp" "$dockerfile" && ln -s -f "$dockerfile" "$dockerfile_short"
done

View file

@ -1,14 +1,16 @@
FROM alpine:3.12
# Base settings # Base settings
ENV HOME /root ENV HOME /root
# Install packages
COPY install-dep-packages.sh /root/install-dep-packages.sh
RUN /root/install-dep-packages.sh install
COPY requirements.txt /root/requirements.txt COPY requirements.txt /root/requirements.txt
#Install ZeroNet RUN pip3 install -r /root/requirements.txt \
RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \ && /root/install-dep-packages.sh remove-makedeps \
&& pip3 install -r /root/requirements.txt \
&& apk del python3-dev gcc libffi-dev musl-dev make \
&& echo "ControlPort 9051" >> /etc/tor/torrc \ && echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc && echo "CookieAuthentication 1" >> /etc/tor/torrc
@ -18,6 +20,7 @@ RUN python3 -V \
&& openssl version && openssl version
# Add Zeronet source # Add Zeronet source
COPY . /root COPY . /root
VOLUME /root/data VOLUME /root/data
@ -31,4 +34,3 @@ CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver
# Expose ports # Expose ports
EXPOSE 43110 26552 EXPOSE 43110 26552

49
install-dep-packages.sh Executable file
View file

@ -0,0 +1,49 @@
#!/bin/sh
set -e
do_alpine() {
local deps="python3 py3-pip openssl tor"
local makedeps="python3-dev gcc g++ libffi-dev musl-dev make automake autoconf libtool"
case "$1" in
install)
apk --update --no-cache --no-progress add $deps $makedeps
;;
remove-makedeps)
apk del $makedeps
;;
esac
}
do_ubuntu() {
local deps="python3 python3-pip openssl tor"
local makedeps="python3-dev gcc g++ libffi-dev make automake autoconf libtool"
case "$1" in
install)
apt-get update && \
apt-get install --no-install-recommends -y $deps $makedeps && \
rm -rf /var/lib/apt/lists/*
;;
remove-makedeps)
apt-get remove -y $makedeps
;;
esac
}
if test -f /etc/os-release ; then
. /etc/os-release
elif test -f /usr/lib/os-release ; then
. /usr/lib/os-release
else
echo "No such file: /etc/os-release" > /dev/stderr
exit 1
fi
case "$ID" in
ubuntu) do_ubuntu "$@" ;;
alpine) do_alpine "$@" ;;
*)
echo "Unsupported OS ID: $ID" > /dev/stderr
exit 1
esac

@ -1 +0,0 @@
Subproject commit d3cbe172712951f43bb6589e92e9e9eeb86c3172

View file

@ -82,6 +82,17 @@ class Config(object):
from Crypt import CryptHash from Crypt import CryptHash
access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
trackers = [ trackers = [
# by zeroseed at http://127.0.0.1:43110/19HKdTAeBh5nRiKn791czY7TwRB1QNrf1Q/?:users/1HvNGwHKqhj3ZMEM53tz6jbdqe4LRpanEu:zn:dc17f896-bf3f-4962-bdd4-0a470040c9c5
"zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441",
"zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441",
"zero://my562dxpjropcd5hy3nd5pemsc4aavbiptci5amwxzbelmzgkkuxpvid.onion:15441",
"zero://pn4q2zzt2pw4nk7yidxvsxmydko7dfibuzxdswi6gu6ninjpofvqs2id.onion:15441",
"zero://6i54dd5th73oelv636ivix6sjnwfgk2qsltnyvswagwphub375t3xcad.onion:15441",
"zero://tl74auz4tyqv4bieeclmyoe4uwtoc2dj7fdqv4nc4gl5j2bwg2r26bqd.onion:15441",
"zero://wlxav3szbrdhest4j7dib2vgbrd7uj7u7rnuzg22cxbih7yxyg2hsmid.onion:15441",
"zero://zy7wttvjtsijt5uwmlar4yguvjc2gppzbdj4v6bujng6xwjmkdg7uvqd.onion:15441",
# ZeroNet 0.7.2 defaults:
"zero://boot3rdez4rzn36x.onion:15441", "zero://boot3rdez4rzn36x.onion:15441",
"http://open.acgnxtracker.com:80/announce", # DE "http://open.acgnxtracker.com:80/announce", # DE
"http://tracker.bt4g.com:2095/announce", # Cloudflare "http://tracker.bt4g.com:2095/announce", # Cloudflare
@ -275,10 +286,29 @@ class Config(object):
self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=10, type=int, metavar='limit') self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=10, type=int, metavar='limit')
self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit') self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
self.parser.add_argument('--connected_limit', help='Max connected peer per site', default=8, type=int, metavar='connected_limit') self.parser.add_argument('--connected_limit', help='Max number of connected peers per site. Soft limit.', default=10, type=int, metavar='connected_limit')
self.parser.add_argument('--global_connected_limit', help='Max connections', default=512, type=int, metavar='global_connected_limit') self.parser.add_argument('--global_connected_limit', help='Max number of connections. Soft limit.', default=512, type=int, metavar='global_connected_limit')
self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers') self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
self.parser.add_argument('--site_announce_interval_min', help='Site announce interval for the most active sites, in minutes.', default=4, type=int, metavar='site_announce_interval_min')
self.parser.add_argument('--site_announce_interval_max', help='Site announce interval for inactive sites, in minutes.', default=30, type=int, metavar='site_announce_interval_max')
self.parser.add_argument('--site_peer_check_interval_min', help='Connectable peers check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_peer_check_interval_min')
self.parser.add_argument('--site_peer_check_interval_max', help='Connectable peers check interval for inactive sites, in minutes.', default=20, type=int, metavar='site_peer_check_interval_max')
self.parser.add_argument('--site_update_check_interval_min', help='Site update check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_update_check_interval_min')
self.parser.add_argument('--site_update_check_interval_max', help='Site update check interval for inactive sites, in minutes.', default=45, type=int, metavar='site_update_check_interval_max')
self.parser.add_argument('--site_connectable_peer_count_max', help='Search for as many connectable peers for the most active sites', default=10, type=int, metavar='site_connectable_peer_count_max')
self.parser.add_argument('--site_connectable_peer_count_min', help='Search for as many connectable peers for inactive sites', default=2, type=int, metavar='site_connectable_peer_count_min')
self.parser.add_argument('--send_back_lru_size', help='Size of the send back LRU cache', default=5000, type=int, metavar='send_back_lru_size')
self.parser.add_argument('--send_back_limit', help='Send no more than so many files at once back to peer, when we discovered that the peer held older file versions', default=3, type=int, metavar='send_back_limit')
self.parser.add_argument('--expose_no_ownership', help='By default, ZeroNet tries checking updates for own sites more frequently. This can be used by a third party for revealing the network addresses of a site owner. If this option is enabled, ZeroNet performs the checks in the same way for any sites.', type='bool', choices=[True, False], default=False)
self.parser.add_argument('--simultaneous_connection_throttle_threshold', help='Throttle opening new connections when the number of outgoing connections in not fully established state exceeds the threshold.', default=15, type=int, metavar='simultaneous_connection_throttle_threshold')
self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip') self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port') self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port') self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')

View file

@ -17,12 +17,13 @@ from util import helper
class Connection(object): class Connection(object):
__slots__ = ( __slots__ = (
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type", "sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
"handshake", "crypt", "connected", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection", "handshake", "crypt", "connected", "connecting", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock", "last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams" "last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
) )
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False): def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
self.server = server
self.sock = sock self.sock = sock
self.cert_pin = None self.cert_pin = None
if "#" in ip: if "#" in ip:
@ -42,7 +43,6 @@ class Connection(object):
self.is_private_ip = False self.is_private_ip = False
self.is_tracker_connection = is_tracker_connection self.is_tracker_connection = is_tracker_connection
self.server = server
self.unpacker = None # Stream incoming socket messages here self.unpacker = None # Stream incoming socket messages here
self.unpacker_bytes = 0 # How many bytes the unpacker received self.unpacker_bytes = 0 # How many bytes the unpacker received
self.req_id = 0 # Last request id self.req_id = 0 # Last request id
@ -50,6 +50,7 @@ class Connection(object):
self.crypt = None # Connection encryption method self.crypt = None # Connection encryption method
self.sock_wrapped = False # Socket wrapped to encryption self.sock_wrapped = False # Socket wrapped to encryption
self.connecting = False
self.connected = False self.connected = False
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
self.closed = False self.closed = False
@ -81,11 +82,11 @@ class Connection(object):
def setIp(self, ip): def setIp(self, ip):
self.ip = ip self.ip = ip
self.ip_type = helper.getIpType(ip) self.ip_type = self.server.getIpType(ip)
self.updateName() self.updateName()
def createSocket(self): def createSocket(self):
if helper.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"): if self.server.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
# Create IPv6 connection as IPv4 when using proxy # Create IPv6 connection as IPv4 when using proxy
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM) return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else: else:
@ -118,13 +119,28 @@ class Connection(object):
# Open connection to peer and wait for handshake # Open connection to peer and wait for handshake
def connect(self): def connect(self):
self.connecting = True
try:
return self._connect()
except Exception as err:
self.connecting = False
self.connected = False
raise
def _connect(self):
self.updateOnlineStatus(outgoing_activity=True)
if not self.event_connected or self.event_connected.ready():
self.event_connected = gevent.event.AsyncResult()
self.type = "out" self.type = "out"
unreachability = self.server.getIpUnreachability(self.ip)
if unreachability:
raise Exception(unreachability)
if self.ip_type == "onion": if self.ip_type == "onion":
if not self.server.tor_manager or not self.server.tor_manager.enabled:
raise Exception("Can't connect to onion addresses, no Tor controller present")
self.sock = self.server.tor_manager.createSocket(self.ip, self.port) self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
elif config.tor == "always" and helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
raise Exception("Can't connect to local IPs in Tor: always mode")
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection: elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
if config.trackers_proxy == "tor": if config.trackers_proxy == "tor":
self.sock = self.server.tor_manager.createSocket(self.ip, self.port) self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
@ -148,37 +164,56 @@ class Connection(object):
self.sock.connect(sock_address) self.sock.connect(sock_address)
# Implicit SSL if self.shouldEncrypt():
should_encrypt = not self.ip_type == "onion" and self.ip not in self.server.broken_ssl_ips and self.ip not in config.ip_local
if self.cert_pin:
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", cert_pin=self.cert_pin)
self.sock.do_handshake()
self.crypt = "tls-rsa"
self.sock_wrapped = True
elif should_encrypt and "tls-rsa" in CryptConnection.manager.crypt_supported:
try: try:
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa") self.wrapSocket()
self.sock.do_handshake()
self.crypt = "tls-rsa"
self.sock_wrapped = True
except Exception as err: except Exception as err:
if not config.force_encryption: if self.sock:
self.sock.close()
self.sock = None
if self.mustEncrypt():
raise
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err))) self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
self.server.broken_ssl_ips[self.ip] = True self.server.broken_ssl_ips[self.ip] = True
self.sock.close() return self.connect()
self.crypt = None
self.sock = self.createSocket()
self.sock.settimeout(30)
self.sock.connect(sock_address)
# Detect protocol # Detect protocol
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
event_connected = self.event_connected event_connected = self.event_connected
gevent.spawn(self.messageLoop) self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
self.server.outgoing_pool.spawn(self.messageLoop)
connect_res = event_connected.get() # Wait for handshake connect_res = event_connected.get() # Wait for handshake
if self.sock:
self.sock.settimeout(timeout_before) self.sock.settimeout(timeout_before)
return connect_res return connect_res
def mustEncrypt(self):
if self.cert_pin:
return True
if (not self.ip_type == "onion") and config.force_encryption:
return True
return False
def shouldEncrypt(self):
if self.mustEncrypt():
return True
return (
(not self.ip_type == "onion")
and
(self.ip not in self.server.broken_ssl_ips)
and
(self.ip not in config.ip_local)
and
("tls-rsa" in CryptConnection.manager.crypt_supported)
)
def wrapSocket(self, crypt="tls-rsa", do_handshake=True):
server = (self.type == "in")
sock = CryptConnection.manager.wrapSocket(self.sock, crypt, server=server, cert_pin=self.cert_pin)
sock.do_handshake()
self.crypt = crypt
self.sock_wrapped = True
self.sock = sock
# Handle incoming connection # Handle incoming connection
def handleIncomingConnection(self, sock): def handleIncomingConnection(self, sock):
self.log("Incoming connection...") self.log("Incoming connection...")
@ -192,9 +227,7 @@ class Connection(object):
first_byte = sock.recv(1, gevent.socket.MSG_PEEK) first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
if first_byte == b"\x16": if first_byte == b"\x16":
self.log("Crypt in connection using implicit SSL") self.log("Crypt in connection using implicit SSL")
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", True) self.wrapSocket(do_handshake=False)
self.sock_wrapped = True
self.crypt = "tls-rsa"
except Exception as err: except Exception as err:
self.log("Socket peek error: %s" % Debug.formatException(err)) self.log("Socket peek error: %s" % Debug.formatException(err))
self.messageLoop() self.messageLoop()
@ -213,6 +246,7 @@ class Connection(object):
self.protocol = "v2" self.protocol = "v2"
self.updateName() self.updateName()
self.connected = True self.connected = True
self.connecting = False
buff_len = 0 buff_len = 0
req_len = 0 req_len = 0
self.unpacker_bytes = 0 self.unpacker_bytes = 0
@ -435,13 +469,13 @@ class Connection(object):
self.updateName() self.updateName()
self.event_connected.set(True) # Mark handshake as done self.event_connected.set(True) # Mark handshake as done
self.event_connected = None
self.handshake_time = time.time() self.handshake_time = time.time()
# Handle incoming message # Handle incoming message
def handleMessage(self, message): def handleMessage(self, message):
cmd = message["cmd"] cmd = message["cmd"]
self.updateOnlineStatus(successful_activity=True)
self.last_message_time = time.time() self.last_message_time = time.time()
self.last_cmd_recv = cmd self.last_cmd_recv = cmd
if cmd == "response": # New style response if cmd == "response": # New style response
@ -458,12 +492,10 @@ class Connection(object):
self.last_ping_delay = ping self.last_ping_delay = ping
# Server switched to crypt, lets do it also if not crypted already # Server switched to crypt, lets do it also if not crypted already
if message.get("crypt") and not self.sock_wrapped: if message.get("crypt") and not self.sock_wrapped:
self.crypt = message["crypt"] crypt = message["crypt"]
server = (self.type == "in") server = (self.type == "in")
self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (self.crypt, server, ping)) self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (crypt, server, ping))
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin) self.wrapSocket(crypt)
self.sock.do_handshake()
self.sock_wrapped = True
if not self.sock_wrapped and self.cert_pin: if not self.sock_wrapped and self.cert_pin:
self.close("Crypt connection error: Socket not encrypted, but certificate pin present") self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
@ -491,8 +523,7 @@ class Connection(object):
server = (self.type == "in") server = (self.type == "in")
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server)) self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
try: try:
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin) self.wrapSocket(self.crypt)
self.sock_wrapped = True
except Exception as err: except Exception as err:
if not config.force_encryption: if not config.force_encryption:
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err))) self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
@ -504,6 +535,7 @@ class Connection(object):
# Send data to connection # Send data to connection
def send(self, message, streaming=False): def send(self, message, streaming=False):
self.updateOnlineStatus(outgoing_activity=True)
self.last_send_time = time.time() self.last_send_time = time.time()
if config.debug_socket: if config.debug_socket:
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % ( self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
@ -543,6 +575,11 @@ class Connection(object):
message = None message = None
with self.send_lock: with self.send_lock:
self.sock.sendall(data) self.sock.sendall(data)
# XXX: Should not be used here:
# self.updateOnlineStatus(successful_activity=True)
# Looks like self.sock.sendall() returns normally, instead of
# raising an Exception (at least, some times).
# So the only way of detecting the network activity is self.handleMessage()
except Exception as err: except Exception as err:
self.close("Send error: %s (cmd: %s)" % (err, stat_key)) self.close("Send error: %s (cmd: %s)" % (err, stat_key))
return False return False
@ -554,7 +591,7 @@ class Connection(object):
buff = 64 * 1024 buff = 64 * 1024
bytes_left = read_bytes bytes_left = read_bytes
bytes_sent = 0 bytes_sent = 0
while True: while True and self.sock != None:
self.last_send_time = time.time() self.last_send_time = time.time()
data = file.read(min(bytes_left, buff)) data = file.read(min(bytes_left, buff))
bytes_sent += len(data) bytes_sent += len(data)
@ -584,7 +621,8 @@ class Connection(object):
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd} self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
if stream_to: if stream_to:
self.waiting_streams[self.req_id] = stream_to self.waiting_streams[self.req_id] = stream_to
self.send(data) # Send request if not self.send(data): # Send request
return False
res = event.get() # Wait until event solves res = event.get() # Wait until event solves
return res return res
@ -608,6 +646,7 @@ class Connection(object):
return False # Already closed return False # Already closed
self.closed = True self.closed = True
self.connected = False self.connected = False
self.connecting = False
if self.event_connected: if self.event_connected:
self.event_connected.set(False) self.event_connected.set(False)
@ -633,3 +672,12 @@ class Connection(object):
self.sock = None self.sock = None
self.unpacker = None self.unpacker = None
self.event_connected = None self.event_connected = None
self.crypt = None
self.sock_wrapped = False
return True
def updateOnlineStatus(self, outgoing_activity=False, successful_activity=False):
self.server.updateOnlineStatus(self,
outgoing_activity=outgoing_activity,
successful_activity=successful_activity)

View file

@ -1,4 +1,5 @@
import logging import logging
import re
import time import time
import sys import sys
import socket import socket
@ -8,6 +9,7 @@ import gevent
import msgpack import msgpack
from gevent.server import StreamServer from gevent.server import StreamServer
from gevent.pool import Pool from gevent.pool import Pool
import gevent.event
import util import util
from util import helper from util import helper
@ -36,20 +38,32 @@ class ConnectionServer(object):
self.port_opened = {} self.port_opened = {}
self.peer_blacklist = SiteManager.peer_blacklist self.peer_blacklist = SiteManager.peer_blacklist
self.managed_pools = {}
self.tor_manager = TorManager(self.ip, self.port) self.tor_manager = TorManager(self.ip, self.port)
self.connections = [] # Connections self.connections = [] # Connections
self.whitelist = config.ip_local # No flood protection on this ips self.whitelist = config.ip_local # No flood protection on this ips
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
self.broken_ssl_ips = {} # Peerids of broken ssl connections self.broken_ssl_ips = {} # Peerids of broken ssl connections
self.ips = {} # Connection by ip self.ips = {} # Connection by ip
self.has_internet = True # Internet outage detection self.has_internet = True # Internet outage detection
self.internet_online_since = 0
self.internet_offline_since = 0
self.last_outgoing_internet_activity_time = 0 # Last time the application tried to send any data
self.last_successful_internet_activity_time = 0 # Last time the application successfully sent or received any data
self.internet_outage_threshold = 60 * 2
self.stream_server = None self.stream_server = None
self.stream_server_proxy = None self.stream_server_proxy = None
self.running = False self.running = False
self.stopping = False self.stopping = False
self.stopping_event = gevent.event.Event()
self.thread_checker = None self.thread_checker = None
self.thread_pool = Pool(None)
self.managed_pools["thread"] = self.thread_pool
self.stat_recv = defaultdict(lambda: defaultdict(int)) self.stat_recv = defaultdict(lambda: defaultdict(int))
self.stat_sent = defaultdict(lambda: defaultdict(int)) self.stat_sent = defaultdict(lambda: defaultdict(int))
self.bytes_recv = 0 self.bytes_recv = 0
@ -61,8 +75,14 @@ class ConnectionServer(object):
self.num_outgoing = 0 self.num_outgoing = 0
self.had_external_incoming = False self.had_external_incoming = False
self.timecorrection = 0.0 self.timecorrection = 0.0
self.pool = Pool(500) # do not accept more than 500 connections self.pool = Pool(500) # do not accept more than 500 connections
self.managed_pools["incoming"] = self.pool
self.outgoing_pool = Pool(None)
self.managed_pools["outgoing"] = self.outgoing_pool
# Bittorrent style peerid # Bittorrent style peerid
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64") self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
@ -83,10 +103,11 @@ class ConnectionServer(object):
return False return False
self.running = True self.running = True
if check_connections: if check_connections:
self.thread_checker = gevent.spawn(self.checkConnections) self.thread_checker = self.spawn(self.checkConnections)
CryptConnection.manager.loadCerts() CryptConnection.manager.loadCerts()
if config.tor != "disable": if config.tor != "disable":
self.tor_manager.start() self.tor_manager.start()
self.tor_manager.startOnions()
if not self.port: if not self.port:
self.log.info("No port found, not binding") self.log.info("No port found, not binding")
return False return False
@ -107,7 +128,7 @@ class ConnectionServer(object):
return None return None
if self.stream_server_proxy: if self.stream_server_proxy:
gevent.spawn(self.listenProxy) self.spawn(self.listenProxy)
try: try:
self.stream_server.serve_forever() self.stream_server.serve_forever()
except Exception as err: except Exception as err:
@ -115,22 +136,92 @@ class ConnectionServer(object):
return False return False
self.log.debug("Stopped.") self.log.debug("Stopped.")
def stop(self): def stop(self, ui_websocket=None):
self.log.debug("Stopping %s" % self.stream_server) self.log.debug("Stopping %s" % self.stream_server)
self.stopping = True self.stopping = True
self.running = False self.running = False
self.stopping_event.set()
self.onStop(ui_websocket=ui_websocket)
def onStop(self, ui_websocket=None):
timeout = 30
start_time = time.time()
join_quantum = 0.1
prev_msg = None
while True:
if time.time() >= start_time + timeout:
break
total_size = 0
sizes = {}
timestep = 0
for name, pool in list(self.managed_pools.items()):
timestep += join_quantum
pool.join(timeout=join_quantum)
size = len(pool)
if size:
sizes[name] = size
total_size += size
if len(sizes) == 0:
break
if timestep < 1:
time.sleep(1 - timestep)
# format message
s = ""
for name, size in sizes.items():
s += "%s pool: %s, " % (name, size)
msg = "Waiting for tasks in managed pools to stop: %s" % s
# Prevent flooding to log
if msg != prev_msg:
prev_msg = msg
self.log.info("%s", msg)
percent = 100 * (time.time() - start_time) / timeout
msg = "File Server: waiting for %s tasks to stop" % total_size
self.sendShutdownProgress(ui_websocket, msg, percent)
for name, pool in list(self.managed_pools.items()):
size = len(pool)
if size:
self.log.info("Killing %s tasks in %s pool", size, name)
pool.kill()
self.sendShutdownProgress(ui_websocket, "File Server stopped. Now to exit.", 100)
if self.thread_checker: if self.thread_checker:
gevent.kill(self.thread_checker) gevent.kill(self.thread_checker)
self.thread_checker = None
if self.stream_server: if self.stream_server:
self.stream_server.stop() self.stream_server.stop()
def sendShutdownProgress(self, ui_websocket, message, progress):
if not ui_websocket:
return
ui_websocket.cmd("progress", ["shutdown", message, progress])
time.sleep(0.01)
# Sleeps the specified amount of time or until ConnectionServer is stopped
def sleep(self, t):
if t:
self.stopping_event.wait(timeout=t)
else:
time.sleep(t)
# Spawns a thread that will be waited for on server being stopped (and killed after a timeout)
def spawn(self, *args, **kwargs):
thread = self.thread_pool.spawn(*args, **kwargs)
return thread
def closeConnections(self): def closeConnections(self):
self.log.debug("Closing all connection: %s" % len(self.connections)) self.log.debug("Closing all connection: %s" % len(self.connections))
for connection in self.connections[:]: for connection in self.connections[:]:
connection.close("Close all connections") connection.close("Close all connections")
def handleIncomingConnection(self, sock, addr): def handleIncomingConnection(self, sock, addr):
if config.offline: if not self.allowsAcceptingConnections():
sock.close() sock.close()
return False return False
@ -148,7 +239,7 @@ class ConnectionServer(object):
self.ip_incoming[ip] += 1 self.ip_incoming[ip] += 1
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
self.log.debug("Connection flood detected from %s" % ip) self.log.debug("Connection flood detected from %s" % ip)
time.sleep(30) self.sleep(30)
sock.close() sock.close()
return False return False
else: else:
@ -167,7 +258,7 @@ class ConnectionServer(object):
pass pass
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False): def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
ip_type = helper.getIpType(ip) ip_type = self.getIpType(ip)
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
if has_per_site_onion: # Site-unique connection for Tor if has_per_site_onion: # Site-unique connection for Tor
if ip.endswith(".onion"): if ip.endswith(".onion"):
@ -203,7 +294,7 @@ class ConnectionServer(object):
return connection return connection
# No connection found # No connection found
if create and not config.offline: # Allow to create new connection if not found if create and self.allowsCreatingConnections():
if port == 0: if port == 0:
raise Exception("This peer is not connectable") raise Exception("This peer is not connectable")
@ -211,6 +302,7 @@ class ConnectionServer(object):
raise Exception("This peer is blacklisted") raise Exception("This peer is blacklisted")
try: try:
#self.log.info("Connection to: %s:%s", ip, port)
if has_per_site_onion: # Lock connection to site if has_per_site_onion: # Lock connection to site
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection) connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
else: else:
@ -229,11 +321,12 @@ class ConnectionServer(object):
self.last_connection_id_current_version += 1 self.last_connection_id_current_version += 1
except Exception as err: except Exception as err:
#self.log.info("Connection error (%s, %s): %s", ip, port, Debug.formatException(err))
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err))) connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
raise err raise err
if len(self.connections) > config.global_connected_limit: if len(self.connections) > config.global_connected_limit:
gevent.spawn(self.checkMaxConnections) self.spawn(self.checkMaxConnections)
return connection return connection
else: else:
@ -256,12 +349,12 @@ class ConnectionServer(object):
def checkConnections(self): def checkConnections(self):
run_i = 0 run_i = 0
time.sleep(15) self.sleep(15)
while self.running: while self.running:
run_i += 1 run_i += 1
self.ip_incoming = {} # Reset connected ips counter self.ip_incoming = {} # Reset connected ips counter
last_message_time = 0
s = time.time() s = time.time()
self.updateOnlineStatus(None)
for connection in self.connections[:]: # Make a copy for connection in self.connections[:]: # Make a copy
if connection.ip.endswith(".onion") or config.tor == "always": if connection.ip.endswith(".onion") or config.tor == "always":
timeout_multipler = 2 timeout_multipler = 2
@ -269,9 +362,6 @@ class ConnectionServer(object):
timeout_multipler = 1 timeout_multipler = 1
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time) idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
if connection.last_message_time > last_message_time and not connection.is_private_ip:
# Message from local IPs does not means internet connection
last_message_time = connection.last_message_time
if connection.unpacker and idle > 30: if connection.unpacker and idle > 30:
# Delete the unpacker if not needed # Delete the unpacker if not needed
@ -319,24 +409,12 @@ class ConnectionServer(object):
# Reset bad action counter every 30 min # Reset bad action counter every 30 min
connection.bad_actions = 0 connection.bad_actions = 0
# Internet outage detection
if time.time() - last_message_time > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
# Offline: Last message more than 60-600sec depending on connection number
if self.has_internet and last_message_time:
self.has_internet = False
self.onInternetOffline()
else:
# Online
if not self.has_internet:
self.has_internet = True
self.onInternetOnline()
self.timecorrection = self.getTimecorrection() self.timecorrection = self.getTimecorrection()
if time.time() - s > 0.01: if time.time() - s > 0.01:
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s)) self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
time.sleep(15) self.sleep(15)
self.log.debug("Checkconnections ended") self.log.debug("Checkconnections ended")
@util.Noparallel(blocking=False) @util.Noparallel(blocking=False)
@ -361,6 +439,68 @@ class ConnectionServer(object):
)) ))
return num_closed return num_closed
# Returns True if we should slow down opening new connections as at the moment
# there are too many connections being established and not connected completely
# (not entered the message loop yet).
def shouldThrottleNewConnections(self):
threshold = config.simultaneous_connection_throttle_threshold
if len(self.connections) <= threshold:
return False
nr_connections_being_established = 0
for connection in self.connections[:]: # Make a copy
if connection.connecting and not connection.connected and connection.type == "out":
nr_connections_being_established += 1
if nr_connections_being_established > threshold:
return True
return False
# Internet outage detection
def updateOnlineStatus(self, connection, outgoing_activity=False, successful_activity=False):
now = time.time()
if connection and not connection.is_private_ip:
if outgoing_activity:
self.last_outgoing_internet_activity_time = now
if successful_activity:
self.last_successful_internet_activity_time = now
self.setInternetStatus(True)
return
if not self.last_outgoing_internet_activity_time:
return
if (
(self.last_successful_internet_activity_time < now - self.internet_outage_threshold)
and
(self.last_successful_internet_activity_time < self.last_outgoing_internet_activity_time)
):
self.setInternetStatus(False)
return
# This is the old algorithm just in case we missed something
idle = now - self.last_successful_internet_activity_time
if idle > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
# Offline: Last successful activity more than 60-600sec depending on connection number
self.setInternetStatus(False)
return
def setInternetStatus(self, status):
if self.has_internet == status:
return
self.has_internet = status
if self.has_internet:
self.internet_online_since = time.time()
self.spawn(self.onInternetOnline)
else:
self.internet_offline_since = time.time()
self.spawn(self.onInternetOffline)
def isInternetOnline(self):
return self.has_internet
def onInternetOnline(self): def onInternetOnline(self):
self.log.info("Internet online") self.log.info("Internet online")
@ -368,6 +508,32 @@ class ConnectionServer(object):
self.had_external_incoming = False self.had_external_incoming = False
self.log.info("Internet offline") self.log.info("Internet offline")
def setOfflineMode(self, offline_mode):
if config.offline == offline_mode:
return
config.offline = offline_mode # Yep, awkward
if offline_mode:
self.log.info("offline mode is ON")
else:
self.log.info("offline mode is OFF")
def isOfflineMode(self):
return config.offline
def allowsCreatingConnections(self):
if self.isOfflineMode():
return False
if self.stopping:
return False
return True
def allowsAcceptingConnections(self):
if self.isOfflineMode():
return False
if self.stopping:
return False
return True
def getTimecorrection(self): def getTimecorrection(self):
corrections = sorted([ corrections = sorted([
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
@ -379,3 +545,48 @@ class ConnectionServer(object):
mid = int(len(corrections) / 2 - 1) mid = int(len(corrections) / 2 - 1)
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3 median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
return median return median
############################################################################
# Methods for handling network address types
# (ipv4, ipv6, onion etc... more to be implemented by plugins)
#
# All the functions handling network address types have "Ip" in the name.
# So it was in the initial codebase, and I keep the naming, since I couldn't
# think of a better option.
# "IP" is short and quite clear and lets you understand that a variable
# contains a peer address or other transport-level address and not
# an address of ZeroNet site.
#
# Returns type of the given network address.
# Since: 0.8.0
# Replaces helper.getIpType() in order to be extensible by plugins.
def getIpType(self, ip):
if ip.endswith(".onion"):
return "onion"
elif ":" in ip:
return "ipv6"
elif re.match(r"[0-9\.]+$", ip):
return "ipv4"
else:
return "unknown"
# Checks if a network address can be reachable in the current configuration
# and returs a string describing why it cannot.
# If the network address can be reachable, returns False.
# Since: 0.8.0
def getIpUnreachability(self, ip):
ip_type = self.getIpType(ip)
if ip_type == 'onion' and not self.tor_manager.enabled:
return "Can't connect to onion addresses, no Tor controller present"
if config.tor == "always" and helper.isPrivateIp(ip) and ip not in config.ip_local:
return "Can't connect to local IPs in Tor: always mode"
return False
# Returns True if ConnctionServer has means for establishing outgoing
# connections to the given address.
# Since: 0.8.0
def isIpReachable(self, ip):
return self.getIpUnreachability(ip) == False

View file

@ -239,7 +239,7 @@ class ContentManager(object):
if num_removed_bad_files > 0: if num_removed_bad_files > 0:
self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False) self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False)
gevent.spawn(self.site.update, since=0) self.site.spawn(self.site.update, since=0)
self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files)) self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files))
@ -651,6 +651,25 @@ class ContentManager(object):
) )
return files_node, files_optional_node return files_node, files_optional_node
def serializeForSigning(self, content):
if "sign" in content:
del(content["sign"]) # The file signed without the sign
if "signs" in content:
del(content["signs"]) # The file signed without the signs
sign_content = json.dumps(content, sort_keys=True) # Dump the json to string to remove whitespaces
# Fix float representation error on Android
modified = content["modified"]
if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
modified_fixed = "{:.6f}".format(modified).strip("0.")
sign_content = sign_content.replace(
'"modified": %s' % repr(modified),
'"modified": %s' % modified_fixed
)
return sign_content
# Create and sign a content.json # Create and sign a content.json
# Return: The new content if filewrite = False # Return: The new content if filewrite = False
def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False): def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False):
@ -756,12 +775,7 @@ class ContentManager(object):
self.log.info("Signing %s..." % inner_path) self.log.info("Signing %s..." % inner_path)
if "signs" in new_content: sign_content = self.serializeForSigning(new_content)
del(new_content["signs"]) # Delete old signs
if "sign" in new_content:
del(new_content["sign"]) # Delete old sign (backward compatibility)
sign_content = json.dumps(new_content, sort_keys=True)
sign = CryptBitcoin.sign(sign_content, privatekey) sign = CryptBitcoin.sign(sign_content, privatekey)
# new_content["signs"] = content.get("signs", {}) # TODO: Multisig # new_content["signs"] = content.get("signs", {}) # TODO: Multisig
if sign: # If signing is successful (not an old address) if sign: # If signing is successful (not an old address)
@ -925,12 +939,9 @@ class ContentManager(object):
return True # All good return True # All good
# Verify file validity def verifyContentFile(self, inner_path, file, ignore_same=True):
# Return: None = Same as before, False = Invalid, True = Valid
def verifyFile(self, inner_path, file, ignore_same=True):
if inner_path.endswith("content.json"): # content.json: Check using sign
from Crypt import CryptBitcoin from Crypt import CryptBitcoin
try:
if type(file) is dict: if type(file) is dict:
new_content = file new_content = file
else: else:
@ -960,21 +971,7 @@ class ContentManager(object):
# Check sign # Check sign
sign = new_content.get("sign") sign = new_content.get("sign")
signs = new_content.get("signs", {}) signs = new_content.get("signs", {})
if "sign" in new_content: sign_content = self.serializeForSigning(new_content)
del(new_content["sign"]) # The file signed without the sign
if "signs" in new_content:
del(new_content["signs"]) # The file signed without the signs
sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitepsace
# Fix float representation error on Android
modified = new_content["modified"]
if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
modified_fixed = "{:.6f}".format(modified).strip("0.")
sign_content = sign_content.replace(
'"modified": %s' % repr(modified),
'"modified": %s' % modified_fixed
)
if signs: # New style signing if signs: # New style signing
valid_signers = self.getValidSigners(inner_path, new_content) valid_signers = self.getValidSigners(inner_path, new_content)
@ -998,14 +995,12 @@ class ContentManager(object):
raise VerifyError("Valid signs: %s/%s" % (valid_signs, signs_required)) raise VerifyError("Valid signs: %s/%s" % (valid_signs, signs_required))
else: else:
return self.verifyContent(inner_path, new_content) return self.verifyContent(inner_path, new_content)
else: # Old style signing elif sign: # Old style signing
raise VerifyError("Invalid old-style sign") raise VerifyError("Invalid old-style sign")
else:
raise VerifyError("Not signed")
except Exception as err: def verifyOrdinaryFile(self, inner_path, file, ignore_same=True):
self.log.warning("%s: verify sign error: %s" % (inner_path, Debug.formatException(err)))
raise err
else: # Check using sha512 hash
file_info = self.getFileInfo(inner_path) file_info = self.getFileInfo(inner_path)
if file_info: if file_info:
if CryptHash.sha512sum(file) != file_info.get("sha512", ""): if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
@ -1022,6 +1017,18 @@ class ContentManager(object):
else: # File not in content.json else: # File not in content.json
raise VerifyError("File not in content.json") raise VerifyError("File not in content.json")
# Verify file validity
# Return: None = Same as before, False = Invalid, True = Valid
def verifyFile(self, inner_path, file, ignore_same=True):
try:
if inner_path.endswith("content.json"):
return self.verifyContentFile(inner_path, file, ignore_same)
else:
return self.verifyOrdinaryFile(inner_path, file, ignore_same)
except Exception as err:
self.log.info("%s: verify error: %s" % (inner_path, Debug.formatException(err)))
raise err
def optionalDelete(self, inner_path): def optionalDelete(self, inner_path):
self.site.storage.delete(inner_path) self.site.storage.delete(inner_path)

View file

@ -10,21 +10,35 @@ from Config import config
from . import Debug from . import Debug
last_error = None last_error = None
thread_shutdown = None
def shutdown(reason="Unknown"): thread_shutdown = None
logging.info("Shutting down (reason: %s)..." % reason)
def shutdownThread():
import main import main
if "file_server" in dir(main):
try: try:
gevent.spawn(main.file_server.stop) if "file_server" in dir(main):
thread = gevent.spawn(main.file_server.stop)
thread.join(timeout=60)
if "ui_server" in dir(main): if "ui_server" in dir(main):
gevent.spawn(main.ui_server.stop) thread = gevent.spawn(main.ui_server.stop)
thread.join(timeout=10)
except Exception as err: except Exception as err:
print("Proper shutdown error: %s" % err) print("Error in shutdown thread: %s" % err)
sys.exit(0) sys.exit(0)
else: else:
sys.exit(0) sys.exit(0)
def shutdown(reason="Unknown"):
global thread_shutdown
logging.info("Shutting down (reason: %s)..." % reason)
try:
if not thread_shutdown:
thread_shutdown = gevent.spawn(shutdownThread)
except Exception as err:
print("Proper shutdown error: %s" % err)
sys.exit(0)
# Store last error, ignore notify, allow manual error logging # Store last error, ignore notify, allow manual error logging
def handleError(*args, **kwargs): def handleError(*args, **kwargs):
global last_error global last_error

View file

@ -33,7 +33,7 @@ class FileRequest(object):
self.connection = connection self.connection = connection
self.req_id = None self.req_id = None
self.sites = self.server.sites self.sites = self.server.getSites()
self.log = server.log self.log = server.log
self.responded = False # Responded to the request self.responded = False # Responded to the request
@ -376,7 +376,7 @@ class FileRequest(object):
for hash_id, peers in found.items(): for hash_id, peers in found.items():
for peer in peers: for peer in peers:
ip_type = helper.getIpType(peer.ip) ip_type = self.server.getIpType(peer.ip)
if len(back[ip_type][hash_id]) < 20: if len(back[ip_type][hash_id]) < 20:
back[ip_type][hash_id].append(peer.packMyAddress()) back[ip_type][hash_id].append(peer.packMyAddress())
return back return back
@ -430,7 +430,7 @@ class FileRequest(object):
# Check requested port of the other peer # Check requested port of the other peer
def actionCheckport(self, params): def actionCheckport(self, params):
if helper.getIpType(self.connection.ip) == "ipv6": if self.server.getIpType(self.connection.ip) == "ipv6":
sock_address = (self.connection.ip, params["port"], 0, 0) sock_address = (self.connection.ip, params["port"], 0, 0)
else: else:
sock_address = (self.connection.ip, params["port"]) sock_address = (self.connection.ip, params["port"])

View file

@ -3,6 +3,7 @@ import time
import random import random
import socket import socket
import sys import sys
import weakref
import gevent import gevent
import gevent.pool import gevent.pool
@ -18,6 +19,13 @@ from Connection import ConnectionServer
from Plugin import PluginManager from Plugin import PluginManager
from Debug import Debug from Debug import Debug
log = logging.getLogger("FileServer")
class FakeThread(object):
def __init__(self):
pass
def ready(self):
return False
@PluginManager.acceptPlugins @PluginManager.acceptPlugins
class FileServer(ConnectionServer): class FileServer(ConnectionServer):
@ -25,12 +33,31 @@ class FileServer(ConnectionServer):
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type): def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
self.site_manager = SiteManager.site_manager self.site_manager = SiteManager.site_manager
self.portchecker = PeerPortchecker.PeerPortchecker(self) self.portchecker = PeerPortchecker.PeerPortchecker(self)
self.log = logging.getLogger("FileServer")
self.ip_type = ip_type self.ip_type = ip_type
self.ip_external_list = [] self.ip_external_list = []
# This is wrong:
# self.log = logging.getLogger("FileServer")
# The value of self.log will be overwritten in ConnectionServer.__init__()
self.recheck_port = True
self.active_mode_thread_pool = gevent.pool.Pool(None)
self.site_pool = gevent.pool.Pool(None)
self.update_pool = gevent.pool.Pool(10)
self.update_start_time = 0
self.update_sites_task_next_nr = 1
self.update_threads = weakref.WeakValueDictionary()
self.passive_mode = None
self.active_mode = None
self.active_mode_threads = {}
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported(): if self.getIpType(ip) == "ipv6" or self.isIpv6Supported():
self.supported_ip_types.append("ipv6") self.supported_ip_types.append("ipv6")
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types): if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
@ -52,27 +79,50 @@ class FileServer(ConnectionServer):
config.arguments.fileserver_port = port config.arguments.fileserver_port = port
ConnectionServer.__init__(self, ip, port, self.handleRequest) ConnectionServer.__init__(self, ip, port, self.handleRequest)
self.log.debug("Supported IP types: %s" % self.supported_ip_types) log.debug("Supported IP types: %s" % self.supported_ip_types)
self.managed_pools["active_mode_thread"] = self.active_mode_thread_pool
self.managed_pools["update"] = self.update_pool
self.managed_pools["site"] = self.site_pool
if ip_type == "dual" and ip == "::": if ip_type == "dual" and ip == "::":
# Also bind to ipv4 addres in dual mode # Also bind to ipv4 addres in dual mode
try: try:
self.log.debug("Binding proxy to %s:%s" % ("::", self.port)) log.debug("Binding proxy to %s:%s" % ("::", self.port))
self.stream_server_proxy = StreamServer( self.stream_server_proxy = StreamServer(
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100 ("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
) )
except Exception as err: except Exception as err:
self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err)) log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
self.port_opened = {} self.port_opened = {}
self.sites = self.site_manager.sites
self.last_request = time.time() self.last_request = time.time()
self.files_parsing = {} self.files_parsing = {}
self.ui_server = None self.ui_server = None
def getSites(self):
sites = self.site_manager.list()
# We need to keep self.sites for the backward compatibility with plugins.
# Never. Ever. Use it.
# TODO: fix plugins
self.sites = sites
return sites
def getSite(self, address):
return self.getSites().get(address, None)
def getSiteAddresses(self):
# Avoid saving the site list on the stack, since a site may be deleted
# from the original list while iterating.
# Use the list of addresses instead.
return [
site.address for site in
sorted(list(self.getSites().values()), key=lambda site: site.settings.get("modified", 0), reverse=True)
]
def getRandomPort(self, ip, port_range_from, port_range_to): def getRandomPort(self, ip, port_range_from, port_range_to):
self.log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to)) log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
tried = [] tried = []
for bind_retry in range(100): for bind_retry in range(100):
port = random.randint(port_range_from, port_range_to) port = random.randint(port_range_from, port_range_to)
@ -84,14 +134,14 @@ class FileServer(ConnectionServer):
sock.bind((ip, port)) sock.bind((ip, port))
success = True success = True
except Exception as err: except Exception as err:
self.log.warning("Error binding to port %s: %s" % (port, err)) log.warning("Error binding to port %s: %s" % (port, err))
success = False success = False
sock.close() sock.close()
if success: if success:
self.log.info("Found unused random port: %s" % port) log.info("Found unused random port: %s" % port)
return port return port
else: else:
time.sleep(0.1) self.sleep(0.1)
return False return False
def isIpv6Supported(self): def isIpv6Supported(self):
@ -104,16 +154,16 @@ class FileServer(ConnectionServer):
sock.connect((ipv6_testip, 80)) sock.connect((ipv6_testip, 80))
local_ipv6 = sock.getsockname()[0] local_ipv6 = sock.getsockname()[0]
if local_ipv6 == "::1": if local_ipv6 == "::1":
self.log.debug("IPv6 not supported, no local IPv6 address") log.debug("IPv6 not supported, no local IPv6 address")
return False return False
else: else:
self.log.debug("IPv6 supported on IP %s" % local_ipv6) log.debug("IPv6 supported on IP %s" % local_ipv6)
return True return True
except socket.error as err: except socket.error as err:
self.log.warning("IPv6 not supported: %s" % err) log.warning("IPv6 not supported: %s" % err)
return False return False
except Exception as err: except Exception as err:
self.log.error("IPv6 check error: %s" % err) log.error("IPv6 check error: %s" % err)
return False return False
def listenProxy(self): def listenProxy(self):
@ -121,29 +171,34 @@ class FileServer(ConnectionServer):
self.stream_server_proxy.serve_forever() self.stream_server_proxy.serve_forever()
except Exception as err: except Exception as err:
if err.errno == 98: # Address already in use error if err.errno == 98: # Address already in use error
self.log.debug("StreamServer proxy listen error: %s" % err) log.debug("StreamServer proxy listen error: %s" % err)
else: else:
self.log.info("StreamServer proxy listen error: %s" % err) log.info("StreamServer proxy listen error: %s" % err)
# Handle request to fileserver # Handle request to fileserver
def handleRequest(self, connection, message): def handleRequest(self, connection, message):
if config.verbose: if config.verbose:
if "params" in message: if "params" in message:
self.log.debug( log.debug(
"FileRequest: %s %s %s %s" % "FileRequest: %s %s %s %s" %
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path")) (str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
) )
else: else:
self.log.debug("FileRequest: %s %s" % (str(connection), message["cmd"])) log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
req = FileRequest(self, connection) req = FileRequest(self, connection)
req.route(message["cmd"], message.get("req_id"), message.get("params")) req.route(message["cmd"], message.get("req_id"), message.get("params"))
if not self.has_internet and not connection.is_private_ip: if not connection.is_private_ip:
self.has_internet = True self.setInternetStatus(True)
self.onInternetOnline()
def onInternetOnline(self): def onInternetOnline(self):
self.log.info("Internet online") log.info("Internet online")
gevent.spawn(self.checkSites, check_files=False, force_port_check=True) invalid_interval=(
self.internet_offline_since - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
time.time()
)
self.invalidateUpdateTime(invalid_interval)
self.recheck_port = True
self.spawn(self.updateSites)
# Reload the FileRequest class to prevent restarts in debug mode # Reload the FileRequest class to prevent restarts in debug mode
def reload(self): def reload(self):
@ -152,8 +207,8 @@ class FileServer(ConnectionServer):
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
def portCheck(self): def portCheck(self):
if config.offline: if self.isOfflineMode():
self.log.info("Offline mode: port check disabled") log.info("Offline mode: port check disabled")
res = {"ipv4": None, "ipv6": None} res = {"ipv4": None, "ipv6": None}
self.port_opened = res self.port_opened = res
return res return res
@ -162,14 +217,14 @@ class FileServer(ConnectionServer):
for ip_external in config.ip_external: for ip_external in config.ip_external:
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
ip_external_types = set([helper.getIpType(ip) for ip in config.ip_external]) ip_external_types = set([self.getIpType(ip) for ip in config.ip_external])
res = { res = {
"ipv4": "ipv4" in ip_external_types, "ipv4": "ipv4" in ip_external_types,
"ipv6": "ipv6" in ip_external_types "ipv6": "ipv6" in ip_external_types
} }
self.ip_external_list = config.ip_external self.ip_external_list = config.ip_external
self.port_opened.update(res) self.port_opened.update(res)
self.log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"])) log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
return res return res
self.port_opened = {} self.port_opened = {}
@ -177,7 +232,7 @@ class FileServer(ConnectionServer):
self.ui_server.updateWebsocket() self.ui_server.updateWebsocket()
if "ipv6" in self.supported_ip_types: if "ipv6" in self.supported_ip_types:
res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6") res_ipv6_thread = self.spawn(self.portchecker.portCheck, self.port, "ipv6")
else: else:
res_ipv6_thread = None res_ipv6_thread = None
@ -190,8 +245,8 @@ class FileServer(ConnectionServer):
res_ipv6 = {"ip": None, "opened": None} res_ipv6 = {"ip": None, "opened": None}
else: else:
res_ipv6 = res_ipv6_thread.get() res_ipv6 = res_ipv6_thread.get()
if res_ipv6["opened"] and not helper.getIpType(res_ipv6["ip"]) == "ipv6": if res_ipv6["opened"] and not self.getIpType(res_ipv6["ip"]) == "ipv6":
self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"]) log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
res_ipv6["opened"] = False res_ipv6["opened"] = False
self.ip_external_list = [] self.ip_external_list = []
@ -200,7 +255,7 @@ class FileServer(ConnectionServer):
self.ip_external_list.append(res_ip["ip"]) self.ip_external_list.append(res_ip["ip"])
SiteManager.peer_blacklist.append((res_ip["ip"], self.port)) SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"])) log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]} res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
@ -211,9 +266,9 @@ class FileServer(ConnectionServer):
for ip in interface_ips: for ip in interface_ips:
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list: if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
self.ip_external_list.append(ip) self.ip_external_list.append(ip)
res[helper.getIpType(ip)] = True # We have opened port if we have external ip res[self.getIpType(ip)] = True # We have opened port if we have external ip
SiteManager.peer_blacklist.append((ip, self.port)) SiteManager.peer_blacklist.append((ip, self.port))
self.log.debug("External ip found on interfaces: %s" % ip) log.debug("External ip found on interfaces: %s" % ip)
self.port_opened.update(res) self.port_opened.update(res)
@ -222,131 +277,381 @@ class FileServer(ConnectionServer):
return res return res
# Check site file integrity @util.Noparallel(queue=True)
def checkSite(self, site, check_files=False): def recheckPort(self):
if site.isServing(): if self.recheck_port:
site.announce(mode="startup") # Announce site to tracker
site.update(check_files=check_files) # Update site's content.json and download changed files
site.sendMyHashfield()
site.updateHashfield()
# Check sites integrity
@util.Noparallel()
def checkSites(self, check_files=False, force_port_check=False):
self.log.debug("Checking sites...")
s = time.time()
sites_checking = False
if not self.port_opened or force_port_check: # Test and open port if not tested yet
if len(self.sites) <= 2: # Don't wait port opening on first startup
sites_checking = True
for address, site in list(self.sites.items()):
gevent.spawn(self.checkSite, site, check_files)
self.portCheck() self.portCheck()
self.recheck_port = False
if not self.port_opened["ipv4"]: # Returns False if Internet is immediately available
self.tor_manager.startOnions() # Returns True if we've spent some time waiting for Internet
# Returns None if FileServer is stopping or the Offline mode is enabled
@util.Noparallel()
def waitForInternetOnline(self):
if self.isOfflineMode() or self.stopping:
return None
if self.isInternetOnline():
return False
while not self.isInternetOnline():
self.sleep(30)
if self.isOfflineMode() or self.stopping:
return None
if self.isInternetOnline():
break
if len(self.update_pool) == 0:
log.info("Internet connection seems to be broken. Running an update for a random site to check if we are able to connect to any peer.")
thread = self.thread_pool.spawn(self.updateRandomSite)
thread.join()
self.recheckPort()
return True
def updateRandomSite(self, site_addresses=None, force=False):
if not site_addresses:
site_addresses = self.getSiteAddresses()
site_addresses = random.sample(site_addresses, 1)
if len(site_addresses) < 1:
return
address = site_addresses[0]
site = self.getSite(address)
if not site:
return
log.info("Randomly chosen site: %s", site.address_short)
self.spawnUpdateSite(site).join()
def updateSite(self, site, check_files=False, verify_files=False):
if not site:
return
if verify_files:
mode = 'verify'
elif check_files:
mode = 'check'
else:
mode = 'update'
log.info("running <%s> for %s" % (mode, site.address_short))
site.update2(check_files=check_files, verify_files=verify_files)
def spawnUpdateSite(self, site, check_files=False, verify_files=False):
fake_thread = FakeThread()
self.update_threads[site.address] = fake_thread
thread = self.update_pool.spawn(self.updateSite, site,
check_files=check_files, verify_files=verify_files)
self.update_threads[site.address] = thread
return thread
def lookupInUpdatePool(self, site_address):
thread = self.update_threads.get(site_address, None)
if not thread or thread.ready():
return None
return thread
def siteIsInUpdatePool(self, site_address):
return self.lookupInUpdatePool(site_address) is not None
def invalidateUpdateTime(self, invalid_interval):
for address in self.getSiteAddresses():
site = self.getSite(address)
if site:
site.invalidateUpdateTime(invalid_interval)
def isSiteUpdateTimeValid(self, site_address):
site = self.getSite(site_address)
if not site:
return False
return site.isUpdateTimeValid()
def updateSites(self):
task_nr = self.update_sites_task_next_nr
self.update_sites_task_next_nr += 1
task_description = "updateSites [#%d]" % task_nr
log.info("%s: started", task_description)
# Don't wait port opening on first startup. Do the instant check now.
if len(self.getSites()) <= 2:
for address, site in list(self.getSites().items()):
self.updateSite(site, check_files=True)
self.recheckPort()
all_site_addresses = self.getSiteAddresses()
site_addresses = [
address for address in all_site_addresses
if not self.isSiteUpdateTimeValid(address)
]
log.info("%s: chosen %d sites (of %d)", task_description, len(site_addresses), len(all_site_addresses))
sites_processed = 0
sites_skipped = 0
start_time = time.time()
self.update_start_time = start_time
progress_print_time = time.time()
if not sites_checking:
check_pool = gevent.pool.Pool(5)
# Check sites integrity # Check sites integrity
for site in sorted(list(self.sites.values()), key=lambda site: site.settings.get("modified", 0), reverse=True): for site_address in site_addresses:
if not site.isServing(): site = None
continue self.sleep(1)
check_thread = check_pool.spawn(self.checkSite, site, check_files) # Check in new thread self.waitForInternetOnline()
time.sleep(2)
if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
check_thread.join(timeout=5)
self.log.debug("Checksites done in %.3fs" % (time.time() - s))
def cleanupSites(self): while self.isActiveMode() and self.shouldThrottleNewConnections():
import gc self.sleep(1)
startup = True
time.sleep(5 * 60) # Sites already cleaned up on startup if not self.isActiveMode():
peers_protected = set([]) break
while 1:
# Sites health care every 20 min site = self.getSite(site_address)
self.log.debug( if not site or site.isUpdateTimeValid() or self.siteIsInUpdatePool(site_address):
"Running site cleanup, connections: %s, internet: %s, protected peers: %s" % sites_skipped += 1
(len(self.connections), self.has_internet, len(peers_protected)) continue
sites_processed += 1
thread = self.spawnUpdateSite(site)
if not self.isActiveMode():
break
if time.time() - progress_print_time > 60:
progress_print_time = time.time()
time_spent = time.time() - start_time
time_per_site = time_spent / float(sites_processed)
sites_left = len(site_addresses) - sites_processed
time_left = time_per_site * sites_left
log.info("%s: DONE: %d sites in %.2fs (%.2fs per site); SKIPPED: %d sites; LEFT: %d sites in %.2fs",
task_description,
sites_processed,
time_spent,
time_per_site,
sites_skipped,
sites_left,
time_left
) )
for address, site in list(self.sites.items()): if not self.isActiveMode():
if not site.isServing(): log.info("%s: stopped", task_description)
else:
log.info("%s: finished in %.2fs", task_description, time.time() - start_time)
def peekSiteForVerification(self):
check_files_interval = 60 * 60 * 24
verify_files_interval = 60 * 60 * 24 * 10
site_addresses = self.getSiteAddresses()
random.shuffle(site_addresses)
for site_address in site_addresses:
site = self.getSite(site_address)
if not site:
continue continue
mode = site.isFileVerificationExpired(check_files_interval, verify_files_interval)
if mode:
return (site_address, mode)
return (None, None)
if not startup:
site.cleanupPeers(peers_protected)
time.sleep(1) # Prevent too quick request def sitesVerificationThread(self):
log.info("sitesVerificationThread started")
short_timeout = 20
long_timeout = 120
peers_protected = set([]) self.sleep(long_timeout)
for address, site in list(self.sites.items()):
if not site.isServing():
continue
if site.peers:
with gevent.Timeout(10, exception=False):
site.announcer.announcePex()
# Last check modification failed
if site.content_updated is False:
site.update()
elif site.bad_files:
site.retryBadFiles()
if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
# Keep active connections if site has been modified witin 7 days
connected_num = site.needConnections(check_site_on_reconnect=True)
if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing
peers_protected.update([peer.key for peer in site.getConnectedPeers()])
time.sleep(1) # Prevent too quick request
while self.isActiveMode():
site = None site = None
gc.collect() # Implicit garbage collection self.sleep(short_timeout)
startup = False self.waitForInternetOnline()
time.sleep(60 * 20)
def announceSite(self, site): while self.isActiveMode() and self.shouldThrottleNewConnections():
site.announce(mode="update", pex=False) self.sleep(1)
active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
if site.settings["own"] or active_site:
# Check connections more frequently on own and active sites to speed-up first connections
site.needConnections(check_site_on_reconnect=True)
site.sendMyHashfield(3)
site.updateHashfield(3)
# Announce sites every 20 min if not self.isActiveMode():
def announceSites(self): break
time.sleep(5 * 60) # Sites already announced on startup
while 1: site_address, mode = self.peekSiteForVerification()
config.loadTrackersFile() if not site_address:
s = time.time() self.sleep(long_timeout)
for address, site in list(self.sites.items()):
if not site.isServing():
continue continue
gevent.spawn(self.announceSite, site).join(timeout=10)
time.sleep(1)
taken = time.time() - s
# Query all trackers one-by-one in 20 minutes evenly distributed while self.siteIsInUpdatePool(site_address) and self.isActiveMode():
sleep = max(0, 60 * 20 / len(config.trackers) - taken) self.sleep(1)
self.log.debug("Site announce tracker done in %.3fs, sleeping for %.3fs..." % (taken, sleep)) if not self.isActiveMode():
time.sleep(sleep) break
site = self.getSite(site_address)
if not site:
continue
if mode == "verify":
check_files = False
verify_files = True
elif mode == "check":
check_files = True
verify_files = False
else:
continue
thread = self.spawnUpdateSite(site,
check_files=check_files, verify_files=verify_files)
log.info("sitesVerificationThread stopped")
def sitesMaintenanceThread(self, mode="full"):
log.info("sitesMaintenanceThread(%s) started" % mode)
startup = True
short_timeout = 2
min_long_timeout = 10
max_long_timeout = 60 * 10
long_timeout = min_long_timeout
short_cycle_time_limit = 60 * 2
while self.isActiveMode():
self.sleep(long_timeout)
while self.isActiveMode() and self.shouldThrottleNewConnections():
self.sleep(1)
if not self.isActiveMode():
break
start_time = time.time()
log.debug(
"Starting <%s> maintenance cycle: connections=%s, internet=%s",
mode,
len(self.connections), self.isInternetOnline()
)
start_time = time.time()
site_addresses = self.getSiteAddresses()
sites_processed = 0
for site_address in site_addresses:
if not self.isActiveMode():
break
site = self.getSite(site_address)
if not site:
continue
log.debug("Running maintenance for site: %s", site.address_short)
done = site.runPeriodicMaintenance(startup=startup)
site = None
if done:
sites_processed += 1
self.sleep(short_timeout)
# If we host hundreds of sites, the full maintenance cycle may take very
# long time, especially on startup ( > 1 hour).
# This means we are not able to run the maintenance procedure for active
# sites frequently enough using just a single maintenance thread.
# So we run 2 maintenance threads:
# * One running full cycles.
# * And one running short cycles for the most active sites.
# When the short cycle runs out of the time limit, it restarts
# from the beginning of the site list.
if mode == "short" and time.time() - start_time > short_cycle_time_limit:
break
log.debug("<%s> maintenance cycle finished in %.2fs. Total sites: %d. Processed sites: %d. Timeout: %d",
mode,
time.time() - start_time,
len(site_addresses),
sites_processed,
long_timeout
)
if sites_processed:
long_timeout = max(int(long_timeout / 2), min_long_timeout)
else:
long_timeout = min(long_timeout + 1, max_long_timeout)
site_addresses = None
startup = False
log.info("sitesMaintenanceThread(%s) stopped" % mode)
def keepAliveThread(self):
# This thread is mostly useless on a system under load, since it never does
# any works, if we have active traffic.
#
# We should initiate some network activity to detect the Internet outage
# and avoid false positives. We normally have some network activity
# initiated by various parts on the application as well as network peers.
# So it's not a problem.
#
# However, if it actually happens that we have no network traffic for
# some time (say, we host just a couple of inactive sites, and no peers
# are interested in connecting to them), we initiate some traffic by
# performing the update for a random site. It's way better than just
# silly pinging a random peer for no profit.
log.info("keepAliveThread started")
while self.isActiveMode():
self.waitForInternetOnline()
threshold = self.internet_outage_threshold / 2.0
self.sleep(threshold / 2.0)
while self.isActiveMode() and self.shouldThrottleNewConnections():
self.sleep(1)
if not self.isActiveMode():
break
last_activity_time = max(
self.last_successful_internet_activity_time,
self.last_outgoing_internet_activity_time)
now = time.time()
if not len(self.getSites()):
continue
if last_activity_time > now - threshold:
continue
if len(self.update_pool) != 0:
continue
log.info("No network activity for %.2fs. Running an update for a random site.",
now - last_activity_time
)
self.update_pool.spawn(self.updateRandomSite, force=True)
log.info("keepAliveThread stopped")
# Periodic reloading of tracker files
def reloadTrackerFilesThread(self):
# TODO:
# This should probably be more sophisticated.
# We should check if the files have actually changed,
# and do it more often.
log.info("reloadTrackerFilesThread started")
interval = 60 * 10
while self.isActiveMode():
self.sleep(interval)
if not self.isActiveMode():
break
config.loadTrackersFile()
log.info("reloadTrackerFilesThread stopped")
# Detects if computer back from wakeup # Detects if computer back from wakeup
def wakeupWatcher(self): def wakeupWatcherThread(self):
log.info("wakeupWatcherThread started")
last_time = time.time() last_time = time.time()
last_my_ips = socket.gethostbyname_ex('')[2] last_my_ips = socket.gethostbyname_ex('')[2]
while 1: while self.isActiveMode():
time.sleep(30) self.sleep(30)
if not self.isActiveMode():
break
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3 is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
if is_time_changed: if is_time_changed:
# If taken more than 3 minute then the computer was in sleep mode # If taken more than 3 minute then the computer was in sleep mode
self.log.info( log.info(
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." % "Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
(last_time, time.time(), time.time() - last_time) (last_time, time.time(), time.time() - last_time)
) )
@ -354,50 +659,130 @@ class FileServer(ConnectionServer):
my_ips = socket.gethostbyname_ex('')[2] my_ips = socket.gethostbyname_ex('')[2]
is_ip_changed = my_ips != last_my_ips is_ip_changed = my_ips != last_my_ips
if is_ip_changed: if is_ip_changed:
self.log.info("IP change detected from %s to %s" % (last_my_ips, my_ips)) log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
if is_time_changed or is_ip_changed: if is_time_changed or is_ip_changed:
self.checkSites(check_files=False, force_port_check=True) invalid_interval=(
last_time - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
time.time()
)
self.invalidateUpdateTime(invalid_interval)
self.recheck_port = True
self.spawn(self.updateSites)
last_time = time.time() last_time = time.time()
last_my_ips = my_ips last_my_ips = my_ips
log.info("wakeupWatcherThread stopped")
def setOfflineMode(self, offline_mode):
ConnectionServer.setOfflineMode(self, offline_mode)
self.setupActiveMode()
def setPassiveMode(self, passive_mode):
if self.passive_mode == passive_mode:
return
self.passive_mode = passive_mode
if self.passive_mode:
log.info("passive mode is ON");
else:
log.info("passive mode is OFF");
self.setupActiveMode()
def isPassiveMode(self):
return self.passive_mode
def setupActiveMode(self):
active_mode = (not self.passive_mode) and (not self.isOfflineMode())
if self.active_mode == active_mode:
return
self.active_mode = active_mode
if self.active_mode:
log.info("active mode is ON");
self.enterActiveMode();
else:
log.info("active mode is OFF");
self.leaveActiveMode();
def killActiveModeThreads(self):
for key, thread in list(self.active_mode_threads.items()):
if thread:
if not thread.ready():
log.info("killing %s" % key)
gevent.kill(thread)
del self.active_mode_threads[key]
def leaveActiveMode(self):
pass
def enterActiveMode(self):
self.killActiveModeThreads()
x = self.active_mode_threads
p = self.active_mode_thread_pool
x["thread_keep_alive"] = p.spawn(self.keepAliveThread)
x["thread_wakeup_watcher"] = p.spawn(self.wakeupWatcherThread)
x["thread_sites_verification"] = p.spawn(self.sitesVerificationThread)
x["thread_reload_tracker_files"] = p.spawn(self.reloadTrackerFilesThread)
x["thread_sites_maintenance_full"] = p.spawn(self.sitesMaintenanceThread, mode="full")
x["thread_sites_maintenance_short"] = p.spawn(self.sitesMaintenanceThread, mode="short")
x["thread_initial_site_updater"] = p.spawn(self.updateSites)
# Returns True, if an active mode thread should keep going,
# i.e active mode is enabled and the server not going to shutdown
def isActiveMode(self):
self.setupActiveMode()
if not self.active_mode:
return False
if not self.running:
return False
if self.stopping:
return False
return True
# Bind and start serving sites # Bind and start serving sites
def start(self, check_sites=True): # If passive_mode is False, FileServer starts the full-featured file serving:
# * Checks for updates at startup.
# * Checks site's integrity.
# * Runs periodic update checks.
# * Watches for internet being up or down and for computer to wake up and runs update checks.
# If passive_mode is True, all the mentioned activity is disabled.
def start(self, passive_mode=False, check_sites=None, check_connections=True):
# Backward compatibility for a misnamed argument:
if check_sites is not None:
passive_mode = not check_sites
if self.stopping: if self.stopping:
return False return False
ConnectionServer.start(self) ConnectionServer.start(self, check_connections=check_connections)
try: try:
self.stream_server.start() self.stream_server.start()
except Exception as err: except Exception as err:
self.log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err)) log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
self.sites = self.site_manager.list()
if config.debug: if config.debug:
# Auto reload FileRequest on change # Auto reload FileRequest on change
from Debug import DebugReloader from Debug import DebugReloader
DebugReloader.watcher.addCallback(self.reload) DebugReloader.watcher.addCallback(self.reload)
if check_sites: # Open port, Update sites, Check files integrity # XXX: for initializing self.sites
gevent.spawn(self.checkSites) # Remove this line when self.sites gets completely unused
self.getSites()
thread_announce_sites = gevent.spawn(self.announceSites) self.setPassiveMode(passive_mode)
thread_cleanup_sites = gevent.spawn(self.cleanupSites)
thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
ConnectionServer.listen(self) ConnectionServer.listen(self)
self.log.debug("Stopped.") log.info("Stopped.")
def stop(self): def stop(self, ui_websocket=None):
if self.running and self.portchecker.upnp_port_opened: if self.running and self.portchecker.upnp_port_opened:
self.log.debug('Closing port %d' % self.port) log.debug('Closing port %d' % self.port)
try: try:
self.portchecker.portClose(self.port) self.portchecker.portClose(self.port)
self.log.info('Closed port via upnp.') log.info('Closed port via upnp.')
except Exception as err: except Exception as err:
self.log.info("Failed at attempt to use upnp to close port: %s" % err) log.info("Failed at attempt to use upnp to close port: %s" % err)
return ConnectionServer.stop(self) return ConnectionServer.stop(self, ui_websocket=ui_websocket)

View file

@ -20,51 +20,135 @@ if config.use_tempfiles:
# Communicate remote peers # Communicate remote peers
@PluginManager.acceptPlugins @PluginManager.acceptPlugins
class Peer(object): class Peer(object):
__slots__ = (
"ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield",
"time_added", "has_hashfield", "is_tracker_connection", "time_my_hashfield_sent", "last_ping", "reputation",
"last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time"
)
def __init__(self, ip, port, site=None, connection_server=None): def __init__(self, ip, port, site=None, connection_server=None):
self.ip = ip self.ip = ip
self.port = port self.port = port
self.site = site self.site = site
self.key = "%s:%s" % (ip, port) self.key = "%s:%s" % (ip, port)
self.ip_type = None
self.removed = False
self.log_level = logging.DEBUG
self.connection_error_log_level = logging.DEBUG
self.connection = None self.connection = None
self.connection_server = connection_server self.connection_server = connection_server
self.has_hashfield = False # Lazy hashfield object not created yet self.has_hashfield = False # Lazy hashfield object not created yet
self.time_hashfield = None # Last time peer's hashfiled downloaded self.time_hashfield = None # Last time peer's hashfiled downloaded
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
self.time_found = time.time() # Time of last found in the torrent tracker self.time_found = time.time() # Time of last found in the torrent tracker
self.time_response = None # Time of last successful response from peer self.time_response = 0 # Time of last successful response from peer
self.time_added = time.time() self.time_added = time.time()
self.last_ping = None # Last response time for ping self.last_ping = None # Last response time for ping
self.last_pex = 0 # Last query/response time for pex
self.is_tracker_connection = False # Tracker connection instead of normal peer self.is_tracker_connection = False # Tracker connection instead of normal peer
self.reputation = 0 # More likely to connect if larger self.reputation = 0 # More likely to connect if larger
self.last_content_json_update = 0.0 # Modify date of last received content.json self.last_content_json_update = 0.0 # Modify date of last received content.json
self.protected = 0
self.reachable = None
self.connection_error = 0 # Series of connection error self.connection_error = 0 # Series of connection error
self.hash_failed = 0 # Number of bad files from peer self.hash_failed = 0 # Number of bad files from peer
self.download_bytes = 0 # Bytes downloaded self.download_bytes = 0 # Bytes downloaded
self.download_time = 0 # Time spent to download self.download_time = 0 # Time spent to download
self.protectedRequests = ["getFile", "streamFile", "update", "listModified"]
def __getattr__(self, key): def __getattr__(self, key):
if key == "hashfield": if key == "hashfield":
self.has_hashfield = True self.has_hashfield = True
self.hashfield = PeerHashfield() self.hashfield = PeerHashfield()
return self.hashfield return self.hashfield
else: else:
return getattr(self, key) # Raise appropriately formatted attribute error
return object.__getattribute__(self, key)
def log(self, text): def log(self, text, log_level = None):
if log_level is None:
log_level = self.log_level
if log_level <= logging.DEBUG:
if not config.verbose: if not config.verbose:
return # Only log if we are in debug mode return # Only log if we are in debug mode
logger = None
if self.site: if self.site:
self.site.log.debug("%s:%s %s" % (self.ip, self.port, text)) logger = self.site.log
else: else:
logging.debug("%s:%s %s" % (self.ip, self.port, text)) logger = logging.getLogger()
logger.log(log_level, "%s:%s %s" % (self.ip, self.port, text))
# Protect connection from being closed by site.cleanupPeers()
def markProtected(self, interval=60*2):
self.protected = max(self.protected, time.time() + interval)
def isProtected(self):
if self.protected > 0:
if self.protected < time.time():
self.protected = 0
return self.protected > 0
def isTtlExpired(self, ttl):
last_activity = max(self.time_found, self.time_response)
return (time.time() - last_activity) > ttl
# Since 0.8.0
def isConnected(self):
if self.connection and not self.connection.connected:
self.connection = None
return self.connection and self.connection.connected
# Peer proved to to be connectable recently
# Since 0.8.0
def isConnectable(self):
if self.connection_error >= 1: # The last connection attempt failed
return False
if time.time() - self.time_response > 60 * 60 * 2: # Last successful response more than 2 hours ago
return False
return self.isReachable()
# Since 0.8.0
def isReachable(self):
if self.reachable is None:
self.updateCachedState()
return self.reachable
# Since 0.8.0
def getIpType(self):
if not self.ip_type:
self.updateCachedState()
return self.ip_type
# We cache some ConnectionServer-related state for better performance.
# This kind of state currently doesn't change during a program session,
# and it's safe to read and cache it just once. But future versions
# may bring more pieces of dynamic configuration. So we update the state
# on each peer.found().
def updateCachedState(self):
connection_server = self.getConnectionServer()
if not self.port or self.port == 1: # Port 1 considered as "no open port"
self.reachable = False
else:
self.reachable = connection_server.isIpReachable(self.ip)
self.ip_type = connection_server.getIpType(self.ip)
# FIXME:
# This should probably be changed.
# When creating a peer object, the caller must provide either `connection_server`,
# or `site`, so Peer object is able to use `site.connection_server`.
def getConnectionServer(self):
if self.connection_server:
connection_server = self.connection_server
elif self.site:
connection_server = self.site.connection_server
else:
import main
connection_server = main.file_server
return connection_server
# Connect to host # Connect to host
def connect(self, connection=None): def connect(self, connection=None):
@ -87,29 +171,30 @@ class Peer(object):
self.connection = None self.connection = None
try: try:
if self.connection_server: connection_server = self.getConnectionServer()
connection_server = self.connection_server
elif self.site:
connection_server = self.site.connection_server
else:
import main
connection_server = main.file_server
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection) self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
if self.connection and self.connection.connected:
self.reputation += 1 self.reputation += 1
self.connection.sites += 1 self.connection.sites += 1
except Exception as err: except Exception as err:
self.onConnectionError("Getting connection error") self.onConnectionError("Getting connection error")
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" % self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
(Debug.formatException(err), self.connection_error, self.hash_failed)) (Debug.formatException(err), self.connection_error, self.hash_failed),
log_level=self.connection_error_log_level)
self.connection = None self.connection = None
return self.connection return self.connection
def disconnect(self, reason="Unknown"):
if self.connection:
self.connection.close(reason)
self.connection = None
# Check if we have connection to peer # Check if we have connection to peer
def findConnection(self): def findConnection(self):
if self.connection and self.connection.connected: # We have connection to peer if self.connection and self.connection.connected: # We have connection to peer
return self.connection return self.connection
else: # Try to find from other sites connections else: # Try to find from other sites connections
self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site) self.connection = self.getConnectionServer().getConnection(self.ip, self.port, create=False, site=self.site)
if self.connection: if self.connection:
self.connection.sites += 1 self.connection.sites += 1
return self.connection return self.connection
@ -143,9 +228,13 @@ class Peer(object):
if source in ("tracker", "local"): if source in ("tracker", "local"):
self.site.peers_recent.appendleft(self) self.site.peers_recent.appendleft(self)
self.time_found = time.time() self.time_found = time.time()
self.updateCachedState()
# Send a command to peer and return response value # Send a command to peer and return response value
def request(self, cmd, params={}, stream_to=None): def request(self, cmd, params={}, stream_to=None):
if self.removed:
return False
if not self.connection or self.connection.closed: if not self.connection or self.connection.closed:
self.connect() self.connect()
if not self.connection: if not self.connection:
@ -156,6 +245,8 @@ class Peer(object):
for retry in range(1, 4): # Retry 3 times for retry in range(1, 4): # Retry 3 times
try: try:
if cmd in self.protectedRequests:
self.markProtected()
if not self.connection: if not self.connection:
raise Exception("No connection found") raise Exception("No connection found")
res = self.connection.request(cmd, params, stream_to) res = self.connection.request(cmd, params, stream_to)
@ -188,6 +279,9 @@ class Peer(object):
# Get a file content from peer # Get a file content from peer
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False): def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
if self.removed:
return False
if file_size and file_size > 5 * 1024 * 1024: if file_size and file_size > 5 * 1024 * 1024:
max_read_size = 1024 * 1024 max_read_size = 1024 * 1024
else: else:
@ -241,11 +335,14 @@ class Peer(object):
return buff return buff
# Send a ping request # Send a ping request
def ping(self): def ping(self, timeout=10.0, tryes=3):
if self.removed:
return False
response_time = None response_time = None
for retry in range(1, 3): # Retry 3 times for retry in range(1, tryes): # Retry 3 times
s = time.time() s = time.time()
with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception with gevent.Timeout(timeout, False):
res = self.request("ping") res = self.request("ping")
if res and "body" in res and res["body"] == b"Pong!": if res and "body" in res and res["body"] == b"Pong!":
@ -264,10 +361,18 @@ class Peer(object):
return response_time return response_time
# Request peer exchange from peer # Request peer exchange from peer
def pex(self, site=None, need_num=5): def pex(self, site=None, need_num=5, request_interval=60*2):
if self.removed:
return False
if not site: if not site:
site = self.site # If no site defined request peers for this site site = self.site # If no site defined request peers for this site
if self.last_pex + request_interval >= time.time():
return False
self.last_pex = time.time()
# give back 5 connectible peers # give back 5 connectible peers
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False)) packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num} request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
@ -276,6 +381,7 @@ class Peer(object):
if packed_peers["ipv6"]: if packed_peers["ipv6"]:
request["peers_ipv6"] = packed_peers["ipv6"] request["peers_ipv6"] = packed_peers["ipv6"]
res = self.request("pex", request) res = self.request("pex", request)
self.last_pex = time.time()
if not res or "error" in res: if not res or "error" in res:
return False return False
added = 0 added = 0
@ -307,9 +413,14 @@ class Peer(object):
# List modified files since the date # List modified files since the date
# Return: {inner_path: modification date,...} # Return: {inner_path: modification date,...}
def listModified(self, since): def listModified(self, since):
if self.removed:
return False
return self.request("listModified", {"since": since, "site": self.site.address}) return self.request("listModified", {"since": since, "site": self.site.address})
def updateHashfield(self, force=False): def updateHashfield(self, force=False):
if self.removed:
return False
# Don't update hashfield again in 5 min # Don't update hashfield again in 5 min
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force: if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
return False return False
@ -325,6 +436,9 @@ class Peer(object):
# Find peers for hashids # Find peers for hashids
# Return: {hash1: ["ip:port", "ip:port",...],...} # Return: {hash1: ["ip:port", "ip:port",...],...}
def findHashIds(self, hash_ids): def findHashIds(self, hash_ids):
if self.removed:
return False
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids}) res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
if not res or "error" in res or type(res) is not dict: if not res or "error" in res or type(res) is not dict:
return False return False
@ -368,6 +482,9 @@ class Peer(object):
return True return True
def publish(self, address, inner_path, body, modified, diffs=[]): def publish(self, address, inner_path, body, modified, diffs=[]):
if self.removed:
return False
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095: if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
# To save bw we don't push big content.json to peers # To save bw we don't push big content.json to peers
body = b"" body = b""
@ -382,20 +499,22 @@ class Peer(object):
# Stop and remove from site # Stop and remove from site
def remove(self, reason="Removing"): def remove(self, reason="Removing"):
self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed)) self.removed = True
if self.site and self.key in self.site.peers: self.log("Removing peer with reason: <%s>. Connection error: %s, Hash failed: %s" % (reason, self.connection_error, self.hash_failed))
del(self.site.peers[self.key]) if self.site:
self.site.deregisterPeer(self)
# No way: self.site = None
# We don't assign None to self.site here because it leads to random exceptions in various threads,
# that hold references to the peer and still believe it belongs to the site.
if self.site and self in self.site.peers_recent: self.disconnect(reason)
self.site.peers_recent.remove(self)
if self.connection:
self.connection.close(reason)
# - EVENTS - # - EVENTS -
# On connection error # On connection error
def onConnectionError(self, reason="Unknown"): def onConnectionError(self, reason="Unknown"):
if not self.getConnectionServer().isInternetOnline():
return
self.connection_error += 1 self.connection_error += 1
if self.site and len(self.site.peers) > 200: if self.site and len(self.site.peers) > 200:
limit = 3 limit = 3
@ -403,7 +522,7 @@ class Peer(object):
limit = 6 limit = 6
self.reputation -= 1 self.reputation -= 1
if self.connection_error >= limit: # Dead peer if self.connection_error >= limit: # Dead peer
self.remove("Peer connection: %s" % reason) self.remove("Connection error limit reached: %s. Provided message: %s" % (limit, reason))
# Done working with peer # Done working with peer
def onWorkerDone(self): def onWorkerDone(self):

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
import random import random
import time import time
import hashlib import hashlib
import logging
import re import re
import collections import collections
@ -12,6 +13,7 @@ from Debug import Debug
from util import helper from util import helper
from greenlet import GreenletExit from greenlet import GreenletExit
import util import util
from util import CircularIterator
class AnnounceError(Exception): class AnnounceError(Exception):
@ -24,11 +26,20 @@ global_stats = collections.defaultdict(lambda: collections.defaultdict(int))
class SiteAnnouncer(object): class SiteAnnouncer(object):
def __init__(self, site): def __init__(self, site):
self.site = site self.site = site
self.log = logging.getLogger("Site:%s SiteAnnouncer" % self.site.address_short)
self.stats = {} self.stats = {}
self.fileserver_port = config.fileserver_port self.fileserver_port = config.fileserver_port
self.peer_id = self.site.connection_server.peer_id self.peer_id = self.site.connection_server.peer_id
self.last_tracker_id = random.randint(0, 10) self.tracker_circular_iterator = CircularIterator()
self.time_last_announce = 0 self.time_last_announce = 0
self.supported_tracker_count = 0
# Returns connection_server rela
# Since 0.8.0
@property
def connection_server(self):
return self.site.connection_server
def getTrackers(self): def getTrackers(self):
return config.trackers return config.trackers
@ -36,25 +47,76 @@ class SiteAnnouncer(object):
def getSupportedTrackers(self): def getSupportedTrackers(self):
trackers = self.getTrackers() trackers = self.getTrackers()
if not self.site.connection_server.tor_manager.enabled: if not self.connection_server.tor_manager.enabled:
trackers = [tracker for tracker in trackers if ".onion" not in tracker] trackers = [tracker for tracker in trackers if ".onion" not in tracker]
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
if "ipv6" not in self.site.connection_server.supported_ip_types: if "ipv6" not in self.connection_server.supported_ip_types:
trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"] trackers = [tracker for tracker in trackers if self.connection_server.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
return trackers return trackers
def getAnnouncingTrackers(self, mode): # Returns a cached value of len(self.getSupportedTrackers()), which can be
# inacurate.
# To be used from Site for estimating available tracker count.
def getSupportedTrackerCount(self):
return self.supported_tracker_count
def shouldTrackerBeTemporarilyIgnored(self, tracker, mode, force):
if not tracker:
return True
if force:
return False
now = time.time()
# Throttle accessing unresponsive trackers
tracker_stats = global_stats[tracker]
delay = min(30 * tracker_stats["num_error"], 60 * 10)
time_announce_allowed = tracker_stats["time_request"] + delay
if now < time_announce_allowed:
return True
return False
def getAnnouncingTrackers(self, mode, force):
trackers = self.getSupportedTrackers() trackers = self.getSupportedTrackers()
if trackers and (mode == "update" or mode == "more"): # Only announce on one tracker, increment the queried tracker id self.supported_tracker_count = len(trackers)
self.last_tracker_id += 1
self.last_tracker_id = self.last_tracker_id % len(trackers) if trackers and (mode == "update" or mode == "more"):
trackers_announcing = [trackers[self.last_tracker_id]] # We only going to use this one
# Choose just 2 trackers to announce to
trackers_announcing = []
# One is the next in sequence
self.tracker_circular_iterator.resetSuccessiveCount()
while 1:
tracker = self.tracker_circular_iterator.next(trackers)
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
trackers_announcing.append(tracker)
break
if self.tracker_circular_iterator.isWrapped():
break
# And one is just random
shuffled_trackers = random.sample(trackers, len(trackers))
for tracker in shuffled_trackers:
if tracker in trackers_announcing:
continue
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
trackers_announcing.append(tracker)
break
else: else:
trackers_announcing = trackers trackers_announcing = [
tracker for tracker in trackers
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force)
]
return trackers_announcing return trackers_announcing
@ -62,95 +124,33 @@ class SiteAnnouncer(object):
back = [] back = []
# Type of addresses they can reach me # Type of addresses they can reach me
if config.trackers_proxy == "disable" and config.tor != "always": if config.trackers_proxy == "disable" and config.tor != "always":
for ip_type, opened in list(self.site.connection_server.port_opened.items()): for ip_type, opened in list(self.connection_server.port_opened.items()):
if opened: if opened:
back.append(ip_type) back.append(ip_type)
if self.site.connection_server.tor_manager.start_onions: if self.connection_server.tor_manager.start_onions:
back.append("onion") back.append("onion")
return back return back
@util.Noparallel(blocking=False) @util.Noparallel()
def announce(self, force=False, mode="start", pex=True): def announce(self, force=False, mode="start", pex=True):
if not self.site.isServing():
return
if time.time() - self.time_last_announce < 30 and not force: if time.time() - self.time_last_announce < 30 and not force:
return # No reannouncing within 30 secs return # No reannouncing within 30 secs
if force:
self.site.log.debug("Force reannounce in mode %s" % mode) self.log.debug("announce: force=%s, mode=%s, pex=%s" % (force, mode, pex))
self.fileserver_port = config.fileserver_port self.fileserver_port = config.fileserver_port
self.time_last_announce = time.time() self.time_last_announce = time.time()
trackers = self.getAnnouncingTrackers(mode) trackers = self.getAnnouncingTrackers(mode, force)
self.log.debug("Chosen trackers: %s" % trackers)
if config.verbose: self.announceToTrackers(trackers, force=force, mode=mode)
self.site.log.debug("Tracker announcing, trackers: %s" % trackers)
errors = []
slow = []
s = time.time()
threads = []
num_announced = 0
for tracker in trackers: # Start announce threads
tracker_stats = global_stats[tracker]
# Reduce the announce time for trackers that looks unreliable
time_announce_allowed = time.time() - 60 * min(30, tracker_stats["num_error"])
if tracker_stats["num_error"] > 5 and tracker_stats["time_request"] > time_announce_allowed and not force:
if config.verbose:
self.site.log.debug("Tracker %s looks unreliable, announce skipped (error: %s)" % (tracker, tracker_stats["num_error"]))
continue
thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
threads.append(thread)
thread.tracker = tracker
time.sleep(0.01)
self.updateWebsocket(trackers="announcing")
gevent.joinall(threads, timeout=20) # Wait for announce finish
for thread in threads:
if thread.value is None:
continue
if thread.value is not False:
if thread.value > 1.0: # Takes more than 1 second to announce
slow.append("%.2fs %s" % (thread.value, thread.tracker))
num_announced += 1
else:
if thread.ready():
errors.append(thread.tracker)
else: # Still running
slow.append("30s+ %s" % thread.tracker)
# Save peers num
self.site.settings["peers"] = len(self.site.peers)
if len(errors) < len(threads): # At least one tracker finished
if len(trackers) == 1:
announced_to = trackers[0]
else:
announced_to = "%s/%s trackers" % (num_announced, len(threads))
if mode != "update" or config.verbose:
self.site.log.debug(
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
(mode, announced_to, time.time() - s, errors, slow)
)
else:
if len(threads) > 1:
self.site.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
if len(threads) == 1 and mode != "start": # Move to next tracker
self.site.log.debug("Tracker failed, skipping to next one...")
self.site.greenlet_manager.spawnLater(1.0, self.announce, force=force, mode=mode, pex=pex)
self.updateWebsocket(trackers="announced")
if pex: if pex:
self.updateWebsocket(pex="announcing")
if mode == "more": # Need more peers
self.announcePex(need_num=10)
else:
self.announcePex() self.announcePex()
self.updateWebsocket(pex="announced")
def getTrackerHandler(self, protocol): def getTrackerHandler(self, protocol):
return None return None
@ -177,7 +177,7 @@ class SiteAnnouncer(object):
s = time.time() s = time.time()
address_parts = self.getAddressParts(tracker) address_parts = self.getAddressParts(tracker)
if not address_parts: if not address_parts:
self.site.log.warning("Tracker %s error: Invalid address" % tracker) self.log.warning("Tracker %s error: Invalid address" % tracker)
return False return False
if tracker not in self.stats: if tracker not in self.stats:
@ -188,7 +188,7 @@ class SiteAnnouncer(object):
self.stats[tracker]["time_request"] = time.time() self.stats[tracker]["time_request"] = time.time()
global_stats[tracker]["time_request"] = time.time() global_stats[tracker]["time_request"] = time.time()
if config.verbose: if config.verbose:
self.site.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode)) self.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
if mode == "update": if mode == "update":
num_want = 10 num_want = 10
else: else:
@ -202,7 +202,7 @@ class SiteAnnouncer(object):
else: else:
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"]) raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
except Exception as err: except Exception as err:
self.site.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode)) self.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
error = err error = err
if error: if error:
@ -210,11 +210,11 @@ class SiteAnnouncer(object):
self.stats[tracker]["time_status"] = time.time() self.stats[tracker]["time_status"] = time.time()
self.stats[tracker]["last_error"] = str(error) self.stats[tracker]["last_error"] = str(error)
self.stats[tracker]["time_last_error"] = time.time() self.stats[tracker]["time_last_error"] = time.time()
if self.site.connection_server.has_internet: if self.connection_server.has_internet:
self.stats[tracker]["num_error"] += 1 self.stats[tracker]["num_error"] += 1
self.stats[tracker]["num_request"] += 1 self.stats[tracker]["num_request"] += 1
global_stats[tracker]["num_request"] += 1 global_stats[tracker]["num_request"] += 1
if self.site.connection_server.has_internet: if self.connection_server.has_internet:
global_stats[tracker]["num_error"] += 1 global_stats[tracker]["num_error"] += 1
self.updateWebsocket(tracker="error") self.updateWebsocket(tracker="error")
return False return False
@ -249,27 +249,92 @@ class SiteAnnouncer(object):
self.site.updateWebsocket(peers_added=added) self.site.updateWebsocket(peers_added=added)
if config.verbose: if config.verbose:
self.site.log.debug( self.log.debug(
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" % "Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers)) (address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
) )
return time.time() - s return time.time() - s
@util.Noparallel(blocking=False) def announceToTrackers(self, trackers, force=False, mode="start"):
def announcePex(self, query_num=2, need_num=5): errors = []
peers = self.site.getConnectedPeers() slow = []
if len(peers) == 0: # Wait 3s for connections s = time.time()
time.sleep(3) threads = []
peers = self.site.getConnectedPeers() num_announced = 0
if len(peers) == 0: # Small number of connected peers for this site, connect to any for tracker in trackers: # Start announce threads
peers = list(self.site.getRecentPeers(20)) thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
need_num = 10 threads.append(thread)
thread.tracker = tracker
time.sleep(0.01)
self.updateWebsocket(trackers="announcing")
gevent.joinall(threads, timeout=20) # Wait for announce finish
for thread in threads:
if thread.value is None:
continue
if thread.value is not False:
if thread.value > 1.0: # Takes more than 1 second to announce
slow.append("%.2fs %s" % (thread.value, thread.tracker))
num_announced += 1
else:
if thread.ready():
errors.append(thread.tracker)
else: # Still running
slow.append("30s+ %s" % thread.tracker)
# Save peers num
self.site.settings["peers"] = len(self.site.peers)
if len(errors) < len(threads): # At least one tracker finished
if len(trackers) == 1:
announced_to = trackers[0]
else:
announced_to = "%s/%s trackers" % (num_announced, len(threads))
if mode != "update" or config.verbose:
self.log.debug(
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
(mode, announced_to, time.time() - s, errors, slow)
)
else:
if len(threads) > 1:
self.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
if len(threads) > 1 and mode != "start": # Move to next tracker
self.log.debug("Tracker failed, skipping to next one...")
self.site.greenlet_manager.spawnLater(5.0, self.announce, force=force, mode=mode, pex=False)
self.updateWebsocket(trackers="announced")
@util.Noparallel(blocking=False)
def announcePex(self, query_num=2, need_num=10, establish_connections=True):
peers = []
try:
peer_count = 20 + query_num * 2
# Wait for some peers to connect
for _ in range(5):
if not self.site.isServing():
return
peers = self.site.getConnectedPeers(only_fully_connected=True)
if len(peers) > 0:
break
time.sleep(2)
if len(peers) < peer_count and establish_connections:
# Small number of connected peers for this site, connect to any
peers = list(self.site.getRecentPeers(peer_count))
if len(peers) > 0:
self.updateWebsocket(pex="announcing")
random.shuffle(peers) random.shuffle(peers)
done = 0 done = 0
total_added = 0 total_added = 0
for peer in peers: for peer in peers:
if not establish_connections and not peer.isConnected():
continue
num_added = peer.pex(need_num=need_num) num_added = peer.pex(need_num=need_num)
if num_added is not False: if num_added is not False:
done += 1 done += 1
@ -277,11 +342,13 @@ class SiteAnnouncer(object):
if num_added: if num_added:
self.site.worker_manager.onPeers() self.site.worker_manager.onPeers()
self.site.updateWebsocket(peers_added=num_added) self.site.updateWebsocket(peers_added=num_added)
else:
time.sleep(0.1)
if done == query_num: if done == query_num:
break break
self.site.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added)) time.sleep(0.1)
self.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
finally:
if len(peers) > 0:
self.updateWebsocket(pex="announced")
def updateWebsocket(self, **kwargs): def updateWebsocket(self, **kwargs):
if kwargs: if kwargs:

256
src/Site/SiteHelpers.py Normal file
View file

@ -0,0 +1,256 @@
import time
import weakref
import gevent
class ConnectRequirement(object):
next_id = 1
def __init__(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
self.need_nr_peers = need_nr_peers # how many total peers we need
self.need_nr_connected_peers = need_nr_connected_peers # how many connected peers we need
self.result = gevent.event.AsyncResult() # resolves on need_nr_peers condition
self.result_connected = gevent.event.AsyncResult() # resolves on need_nr_connected_peers condition
self.expiration_interval = expiration_interval
self.expired = False
if expiration_interval:
self.expire_at = time.time() + expiration_interval
else:
self.expire_at = None
self.nr_peers = -1 # updated PeerConnector()
self.nr_connected_peers = -1 # updated PeerConnector()
self.heartbeat = gevent.event.AsyncResult()
self.id = type(self).next_id
type(self).next_id += 1
def fulfilled(self):
return self.result.ready() and self.result_connected.ready()
def ready(self):
return self.expired or self.fulfilled()
# Heartbeat sent when any of the following happens:
# * self.result is set
# * self.result_connected is set
# * self.nr_peers changed
# * self.nr_peers_connected changed
# * self.expired is set
def waitHeartbeat(self, timeout=None):
if self.heartbeat.ready():
self.heartbeat = gevent.event.AsyncResult()
return self.heartbeat.wait(timeout=timeout)
def sendHeartbeat(self):
self.heartbeat.set_result()
if self.heartbeat.ready():
self.heartbeat = gevent.event.AsyncResult()
class PeerConnector(object):
def __init__(self, site):
self.site = site
self.peer_reqs = weakref.WeakValueDictionary() # How many connected peers we need.
# Separate entry for each requirement.
# Objects of type ConnectRequirement.
self.peer_connector_controller = None # Thread doing the orchestration in background.
self.peer_connector_workers = dict() # Threads trying to connect to individual peers.
self.peer_connector_worker_limit = 5 # Max nr of workers.
self.peer_connector_announcer = None # Thread doing announces in background.
# Max effective values. Set by processReqs().
self.need_nr_peers = 0
self.need_nr_connected_peers = 0
self.nr_peers = 0 # set by processReqs()
self.nr_connected_peers = 0 # set by processReqs2()
# Connector Controller state
self.peers = list()
def addReq(self, req):
self.peer_reqs[req.id] = req
self.processReqs()
def newReq(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
req = ConnectRequirement(need_nr_peers, need_nr_connected_peers, expiration_interval=expiration_interval)
self.addReq(req)
return req
def processReqs(self, nr_connected_peers=None):
nr_peers = len(self.site.peers)
self.nr_peers = nr_peers
need_nr_peers = 0
need_nr_connected_peers = 0
items = list(self.peer_reqs.items())
for key, req in items:
send_heartbeat = False
if req.expire_at and req.expire_at < time.time():
req.expired = True
self.peer_reqs.pop(key, None)
send_heartbeat = True
elif req.result.ready() and req.result_connected.ready():
pass
else:
if nr_connected_peers is not None:
if req.need_nr_peers <= nr_peers and req.need_nr_connected_peers <= nr_connected_peers:
req.result.set_result(nr_peers)
req.result_connected.set_result(nr_connected_peers)
send_heartbeat = True
if req.nr_peers != nr_peers or req.nr_connected_peers != nr_connected_peers:
req.nr_peers = nr_peers
req.nr_connected_peers = nr_connected_peers
send_heartbeat = True
if not (req.result.ready() and req.result_connected.ready()):
need_nr_peers = max(need_nr_peers, req.need_nr_peers)
need_nr_connected_peers = max(need_nr_connected_peers, req.need_nr_connected_peers)
if send_heartbeat:
req.sendHeartbeat()
self.need_nr_peers = need_nr_peers
self.need_nr_connected_peers = need_nr_connected_peers
if nr_connected_peers is None:
nr_connected_peers = 0
if need_nr_peers > nr_peers:
self.spawnPeerConnectorAnnouncer();
if need_nr_connected_peers > nr_connected_peers:
self.spawnPeerConnectorController();
def processReqs2(self):
self.nr_connected_peers = len(self.site.getConnectedPeers(only_fully_connected=True))
self.processReqs(nr_connected_peers=self.nr_connected_peers)
# For adding new peers when ConnectorController is working.
# While it is iterating over a cached list of peers, there can be a significant lag
# for a newly discovered peer to get in sight of the controller.
# Suppose most previously known peers are dead and we've just get a few
# new peers from a tracker.
# So we mix the new peer to the cached list.
# When ConnectorController is stopped (self.peers is empty), we just do nothing here.
def addPeer(self, peer):
if not self.peers:
return
if peer not in self.peers:
self.peers.append(peer)
def deregisterPeer(self, peer):
try:
self.peers.remove(peer)
except:
pass
def sleep(self, t):
self.site.connection_server.sleep(t)
def keepGoing(self):
return self.site.isServing() and self.site.connection_server.allowsCreatingConnections()
def peerConnectorWorker(self, peer):
if not peer.isConnected():
peer.connect()
if peer.isConnected():
peer.ping()
self.processReqs2()
def peerConnectorController(self):
self.peers = list()
addendum = 20
while self.keepGoing():
no_peers_loop = 0
while len(self.site.peers) < 1:
# No peers at all.
# Waiting for the announcer to discover some peers.
self.sleep(10 + no_peers_loop)
no_peers_loop += 1
if not self.keepGoing() or no_peers_loop > 60:
break
self.processReqs2()
if self.need_nr_connected_peers <= self.nr_connected_peers:
# Ok, nobody waits for connected peers.
# Done.
break
if len(self.site.peers) < 1:
break
if len(self.peers) < 1:
# refill the peer list
self.peers = self.site.getRecentPeers(self.need_nr_connected_peers * 2 + self.nr_connected_peers + addendum)
addendum = min(addendum * 2 + 50, 10000)
if len(self.peers) <= self.nr_connected_peers:
# Looks like all known peers are connected.
# Waiting for the announcer to discover some peers.
self.site.announcer.announcePex(establish_connections=False)
self.sleep(10)
continue
added = 0
# try connecting to peers
while self.keepGoing() and len(self.peer_connector_workers) < self.peer_connector_worker_limit:
if len(self.peers) < 1:
break
peer = self.peers.pop(0)
if peer.isConnected():
continue
thread = self.peer_connector_workers.get(peer, None)
if thread:
continue
thread = self.site.spawn(self.peerConnectorWorker, peer)
self.peer_connector_workers[peer] = thread
thread.link(lambda thread, peer=peer: self.peer_connector_workers.pop(peer, None))
added += 1
if not self.keepGoing():
break
if not added:
# Looks like all known peers are either connected or being connected,
# so we weren't able to start connecting any peer in this iteration.
# Waiting for the announcer to discover some peers.
self.sleep(20)
# wait for more room in self.peer_connector_workers
while self.keepGoing() and len(self.peer_connector_workers) >= self.peer_connector_worker_limit:
self.sleep(2)
if not self.site.connection_server.isInternetOnline():
self.sleep(30)
self.peers = list()
self.peer_connector_controller = None
def peerConnectorAnnouncer(self):
while self.keepGoing():
if self.need_nr_peers <= self.nr_peers:
break
self.site.announce(mode="more")
self.processReqs2()
if self.need_nr_peers <= self.nr_peers:
break
self.sleep(10)
if not self.site.connection_server.isInternetOnline():
self.sleep(20)
self.peer_connector_announcer = None
def spawnPeerConnectorController(self):
if self.peer_connector_controller is None or self.peer_connector_controller.ready():
self.peer_connector_controller = self.site.spawn(self.peerConnectorController)
def spawnPeerConnectorAnnouncer(self):
if self.peer_connector_announcer is None or self.peer_connector_announcer.ready():
self.peer_connector_announcer = self.site.spawn(self.peerConnectorAnnouncer)

View file

@ -4,6 +4,7 @@ import re
import os import os
import time import time
import atexit import atexit
import collections
import gevent import gevent
@ -27,6 +28,21 @@ class SiteManager(object):
gevent.spawn(self.saveTimer) gevent.spawn(self.saveTimer)
atexit.register(lambda: self.save(recalculate_size=True)) atexit.register(lambda: self.save(recalculate_size=True))
# ZeroNet has a bug of desyncing between:
# * time sent in a response of listModified
# and
# * time checked on receiving a file.
# This leads to the following scenario:
# * Request listModified.
# * Detect that the remote peer missing an update
# * Send a newer version of the file back to the peer.
# * The peer responses "ok: File not changed"
# .....
# * Request listModified the next time and do all the same again.
# So we keep the list of sent back entries to prevent sending multiple useless updates:
# "{site.address} - {peer.key} - {inner_path}" -> mtime
self.send_back_lru = collections.OrderedDict()
# Load all sites from data/sites.json # Load all sites from data/sites.json
@util.Noparallel() @util.Noparallel()
def load(self, cleanup=True, startup=False): def load(self, cleanup=True, startup=False):
@ -155,6 +171,11 @@ class SiteManager(object):
def resolveDomainCached(self, domain): def resolveDomainCached(self, domain):
return self.resolveDomain(domain) return self.resolveDomain(domain)
# Checks if the address is blocked. To be implemented in content filter plugins.
# Since 0.8.0
def isAddressBlocked(self, address):
return False
# Return: Site object or None if not found # Return: Site object or None if not found
def get(self, address): def get(self, address):
if self.isDomainCached(address): if self.isDomainCached(address):
@ -216,6 +237,23 @@ class SiteManager(object):
self.load(startup=True) self.load(startup=True)
return self.sites return self.sites
# Return False if we never sent <inner_path> to <peer>
# or if the file that was sent was older than <remote_modified>
# so that send back logic is suppressed for <inner_path>.
# True if <inner_path> can be sent back to <peer>.
def checkSendBackLRU(self, site, peer, inner_path, remote_modified):
key = site.address + ' - ' + peer.key + ' - ' + inner_path
sent_modified = self.send_back_lru.get(key, 0)
return remote_modified < sent_modified
def addToSendBackLRU(self, site, peer, inner_path, modified):
key = site.address + ' - ' + peer.key + ' - ' + inner_path
if self.send_back_lru.get(key, None) is None:
self.send_back_lru[key] = modified
while len(self.send_back_lru) > config.send_back_lru_size:
self.send_back_lru.popitem(last=False)
else:
self.send_back_lru.move_to_end(key, last=True)
site_manager = SiteManager() # Singletone site_manager = SiteManager() # Singletone

View file

@ -24,6 +24,25 @@ thread_pool_fs_read = ThreadPool.ThreadPool(config.threads_fs_read, name="FS rea
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write") thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch") thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
class VerifyFiles_Notificator(object):
def __init__(self, site, quick_check):
self.site = site
self.quick_check = quick_check
self.scanned_files = 0
self.websocket_update_interval = 0.25
self.websocket_update_time = time.time()
def inc(self):
self.scanned_files += 1
if self.websocket_update_time + self.websocket_update_interval < time.time():
self.send()
def send(self):
self.websocket_update_time = time.time()
if self.quick_check:
self.site.updateWebsocket(checking=self.scanned_files)
else:
self.site.updateWebsocket(verifying=self.scanned_files)
@PluginManager.acceptPlugins @PluginManager.acceptPlugins
class SiteStorage(object): class SiteStorage(object):
@ -356,7 +375,7 @@ class SiteStorage(object):
# Reopen DB to check changes # Reopen DB to check changes
if self.has_db: if self.has_db:
self.closeDb("New dbschema") self.closeDb("New dbschema")
gevent.spawn(self.getDb) self.site.spawn(self.getDb)
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
if config.verbose: if config.verbose:
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file)) self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
@ -369,8 +388,12 @@ class SiteStorage(object):
# Load and parse json file # Load and parse json file
@thread_pool_fs_read.wrap @thread_pool_fs_read.wrap
def loadJson(self, inner_path): def loadJson(self, inner_path):
with self.open(inner_path, "r", encoding="utf8") as file: try :
with self.open(inner_path) as file:
return json.load(file) return json.load(file)
except Exception as err:
self.log.error("Json load error: %s" % Debug.formatException(err))
return None
# Write formatted json file # Write formatted json file
def writeJson(self, inner_path, data): def writeJson(self, inner_path, data):
@ -420,6 +443,8 @@ class SiteStorage(object):
return inner_path return inner_path
# Verify all files sha512sum using content.json # Verify all files sha512sum using content.json
# The result may not be accurate if self.site.isStopping().
# verifyFiles() return immediately in that case.
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True): def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
bad_files = [] bad_files = []
back = defaultdict(int) back = defaultdict(int)
@ -431,17 +456,55 @@ class SiteStorage(object):
self.log.debug("VerifyFile content.json not exists") self.log.debug("VerifyFile content.json not exists")
self.site.needFile("content.json", update=True) # Force update to fix corrupt file self.site.needFile("content.json", update=True) # Force update to fix corrupt file
self.site.content_manager.loadContent() # Reload content.json self.site.content_manager.loadContent() # Reload content.json
for content_inner_path, content in list(self.site.content_manager.contents.items()):
# Trying to read self.site.content_manager.contents without being stuck
# on reading the long file list and also without getting
# "RuntimeError: dictionary changed size during iteration"
# We can't use just list(iteritems()) since it loads all the contents files
# at once and gets unresponsive.
contents = {}
notificator = None
tries = 0
max_tries = 40
stop = False
while not stop:
try:
contents = {}
notificator = VerifyFiles_Notificator(self.site, quick_check)
for content_inner_path, content in self.site.content_manager.contents.iteritems():
notificator.inc()
contents[content_inner_path] = content
if self.site.isStopping():
stop = True
break
stop = True
except RuntimeError as err:
if "changed size during iteration" in str(err):
tries += 1
if tries >= max_tries:
self.log.info("contents.json file list changed during iteration. %s tries done. Giving up.", tries)
stop = True
self.log.info("contents.json file list changed during iteration. Trying again... (%s)", tries)
time.sleep(2 * tries)
else:
stop = True
for content_inner_path, content in contents.items():
back["num_content"] += 1 back["num_content"] += 1
i += 1 i += 1
if i % 50 == 0: if i % 50 == 0:
time.sleep(0.001) # Context switch to avoid gevent hangs time.sleep(0.001) # Context switch to avoid gevent hangs
if self.site.isStopping():
break
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
back["num_content_missing"] += 1 back["num_content_missing"] += 1
self.log.debug("[MISSING] %s" % content_inner_path) self.log.debug("[MISSING] %s" % content_inner_path)
bad_files.append(content_inner_path) bad_files.append(content_inner_path)
for file_relative_path in list(content.get("files", {}).keys()): for file_relative_path in list(content.get("files", {}).keys()):
notificator.inc()
back["num_file"] += 1 back["num_file"] += 1
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading / file_inner_path = file_inner_path.strip("/") # Strip leading /
@ -452,14 +515,19 @@ class SiteStorage(object):
bad_files.append(file_inner_path) bad_files.append(file_inner_path)
continue continue
err = None
if quick_check: if quick_check:
ok = os.path.getsize(file_path) == content["files"][file_relative_path]["size"] file_size = os.path.getsize(file_path)
expected_size = content["files"][file_relative_path]["size"]
ok = file_size == expected_size
if not ok: if not ok:
err = "Invalid size" err = "Invalid size: %s - actual, %s - expected" % (file_size, expected_size)
else: else:
try: try:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb")) ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
except Exception as err: except Exception as err2:
err = err2
ok = False ok = False
if not ok: if not ok:
@ -472,6 +540,7 @@ class SiteStorage(object):
optional_added = 0 optional_added = 0
optional_removed = 0 optional_removed = 0
for file_relative_path in list(content.get("files_optional", {}).keys()): for file_relative_path in list(content.get("files_optional", {}).keys()):
notificator.inc()
back["num_optional"] += 1 back["num_optional"] += 1
file_node = content["files_optional"][file_relative_path] file_node = content["files_optional"][file_relative_path]
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
@ -516,6 +585,8 @@ class SiteStorage(object):
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed) (content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
) )
notificator.send()
self.site.content_manager.contents.db.processDelayed() self.site.content_manager.contents.db.processDelayed()
time.sleep(0.001) # Context switch to avoid gevent hangs time.sleep(0.001) # Context switch to avoid gevent hangs
return back return back

View file

@ -16,7 +16,7 @@ class TestFileRequest:
client = ConnectionServer(file_server.ip, 1545) client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544) connection = client.getConnection(file_server.ip, 1544)
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Normal request # Normal request
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0}) response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
@ -61,7 +61,7 @@ class TestFileRequest:
file_server.ip_incoming = {} # Reset flood protection file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer(file_server.ip, 1545) client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544) connection = client.getConnection(file_server.ip, 1544)
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
buff = io.BytesIO() buff = io.BytesIO()
response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff) response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff)
@ -89,7 +89,7 @@ class TestFileRequest:
client.stop() client.stop()
def testPex(self, file_server, site, site_temp): def testPex(self, file_server, site, site_temp):
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp} client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client site_temp.connection_server = client

View file

@ -13,7 +13,7 @@ from . import Spy
@pytest.mark.usefixtures("resetTempSettings") @pytest.mark.usefixtures("resetTempSettings")
class TestPeer: class TestPeer:
def testPing(self, file_server, site, site_temp): def testPing(self, file_server, site, site_temp):
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp} client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client site_temp.connection_server = client
@ -32,7 +32,7 @@ class TestPeer:
client.stop() client.stop()
def testDownloadFile(self, file_server, site, site_temp): def testDownloadFile(self, file_server, site, site_temp):
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp} client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client site_temp.connection_server = client
@ -77,11 +77,11 @@ class TestPeer:
def testHashfieldExchange(self, file_server, site, site_temp): def testHashfieldExchange(self, file_server, site, site_temp):
server1 = file_server server1 = file_server
server1.sites[site.address] = site server1.getSites()[site.address] = site
site.connection_server = server1 site.connection_server = server1
server2 = FileServer(file_server.ip, 1545) server2 = FileServer(file_server.ip, 1545)
server2.sites[site_temp.address] = site_temp server2.getSites()[site_temp.address] = site_temp
site_temp.connection_server = server2 site_temp.connection_server = server2
site.storage.verifyFiles(quick_check=True) # Find what optional files we have site.storage.verifyFiles(quick_check=True) # Find what optional files we have
@ -127,7 +127,7 @@ class TestPeer:
server2.stop() server2.stop()
def testFindHash(self, file_server, site, site_temp): def testFindHash(self, file_server, site, site_temp):
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp} client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client site_temp.connection_server = client

View file

@ -23,7 +23,7 @@ class TestSiteDownload:
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -74,7 +74,7 @@ class TestSiteDownload:
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -130,7 +130,7 @@ class TestSiteDownload:
def testArchivedDownload(self, file_server, site, site_temp): def testArchivedDownload(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -178,7 +178,7 @@ class TestSiteDownload:
def testArchivedBeforeDownload(self, file_server, site, site_temp): def testArchivedBeforeDownload(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -229,7 +229,7 @@ class TestSiteDownload:
def testOptionalDownload(self, file_server, site, site_temp): def testOptionalDownload(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = ConnectionServer(file_server.ip, 1545) client = ConnectionServer(file_server.ip, 1545)
@ -271,7 +271,7 @@ class TestSiteDownload:
def testFindOptional(self, file_server, site, site_temp): def testFindOptional(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init full source server (has optional files) # Init full source server (has optional files)
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT") site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
@ -284,7 +284,7 @@ class TestSiteDownload:
gevent.spawn(listen) gevent.spawn(listen)
time.sleep(0.001) # Port opening time.sleep(0.001) # Port opening
file_server_full.sites[site_full.address] = site_full # Add site file_server_full.getSites()[site_full.address] = site_full # Add site
site_full.storage.verifyFiles(quick_check=True) # Check optional files site_full.storage.verifyFiles(quick_check=True) # Check optional files
site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server
hashfield = site_full_peer.updateHashfield() # Update hashfield hashfield = site_full_peer.updateHashfield() # Update hashfield
@ -342,7 +342,7 @@ class TestSiteDownload:
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -423,7 +423,7 @@ class TestSiteDownload:
def testBigUpdate(self, file_server, site, site_temp): def testBigUpdate(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -476,7 +476,7 @@ class TestSiteDownload:
def testHugeContentSiteUpdate(self, file_server, site, site_temp): def testHugeContentSiteUpdate(self, file_server, site, site_temp):
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)
@ -524,7 +524,7 @@ class TestSiteDownload:
# Init source server # Init source server
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Init client server # Init client server
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)

View file

@ -75,7 +75,7 @@ class TestTor:
assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp) assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp)
# Only allow to query from the locked site # Only allow to query from the locked site
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
connection_locked = file_server.getConnection(address + ".onion", 1544, site=site) connection_locked = file_server.getConnection(address + ".onion", 1544, site=site)
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0}) assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site" assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
@ -83,11 +83,11 @@ class TestTor:
def testPex(self, file_server, site, site_temp): def testPex(self, file_server, site, site_temp):
# Register site to currently running fileserver # Register site to currently running fileserver
site.connection_server = file_server site.connection_server = file_server
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
# Create a new file server to emulate new peer connecting to our peer # Create a new file server to emulate new peer connecting to our peer
file_server_temp = FileServer(file_server.ip, 1545) file_server_temp = FileServer(file_server.ip, 1545)
site_temp.connection_server = file_server_temp site_temp.connection_server = file_server_temp
file_server_temp.sites[site_temp.address] = site_temp file_server_temp.getSites()[site_temp.address] = site_temp
# We will request peers from this # We will request peers from this
peer_source = site_temp.addPeer(file_server.ip, 1544) peer_source = site_temp.addPeer(file_server.ip, 1544)
@ -113,7 +113,7 @@ class TestTor:
def testFindHash(self, tor_manager, file_server, site, site_temp): def testFindHash(self, tor_manager, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection file_server.ip_incoming = {} # Reset flood protection
file_server.sites[site.address] = site file_server.getSites()[site.address] = site
file_server.tor_manager = tor_manager file_server.tor_manager = tor_manager
client = FileServer(file_server.ip, 1545) client = FileServer(file_server.ip, 1545)

BIN
src/Test/testdata/chart.db-shm vendored Normal file

Binary file not shown.

BIN
src/Test/testdata/chart.db-wal vendored Normal file

Binary file not shown.

BIN
src/Test/testdata/content.db-shm vendored Normal file

Binary file not shown.

BIN
src/Test/testdata/content.db-wal vendored Normal file

Binary file not shown.

1
src/Test/testdata/filters.json vendored Normal file
View file

@ -0,0 +1 @@
{}

58
src/Test/testdata/openssl.cnf vendored Normal file
View file

@ -0,0 +1,58 @@
[ req ]
default_bits = 2048
default_keyfile = server-key.pem
distinguished_name = subject
req_extensions = req_ext
x509_extensions = x509_ext
string_mask = utf8only
# The Subject DN can be formed using X501 or RFC 4514 (see RFC 4519 for a description).
# Its sort of a mashup. For example, RFC 4514 does not provide emailAddress.
[ subject ]
countryName = US
stateOrProvinceName = NY
localityName = New York
organizationName = Example, LLC
# Use a friendly name here because its presented to the user. The server's DNS
# names are placed in Subject Alternate Names. Plus, DNS names here is deprecated
# by both IETF and CA/Browser Forums. If you place a DNS name here, then you
# must include the DNS name in the SAN too (otherwise, Chrome and others that
# strictly follow the CA/Browser Baseline Requirements will fail).
commonName = Example Company
emailAddress = test@example.com
# Section x509_ext is used when generating a self-signed certificate. I.e., openssl req -x509 ...
[ x509_ext ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
subjectAltName = @alternate_names
# RFC 5280, Section 4.2.1.12 makes EKU optional
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
# extendedKeyUsage = serverAuth, clientAuth
# Section req_ext is used when generating a certificate signing request. I.e., openssl req ...
[ req_ext ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
subjectAltName = @alternate_names
# RFC 5280, Section 4.2.1.12 makes EKU optional
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
# extendedKeyUsage = serverAuth, clientAuth
[ alternate_names ]
DNS.1 = nazwa.pl
DNS.2 = www.nazwa.pl

1
src/Test/testdata/sites.json vendored Normal file
View file

@ -0,0 +1 @@
{}

1
src/Test/testdata/trackers.json vendored Normal file
View file

@ -0,0 +1 @@
{}

9
src/Test/testdata/users.json vendored Normal file
View file

@ -0,0 +1,9 @@
{
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}

View file

@ -167,7 +167,7 @@ class UiServer:
self.log.error("Web interface bind error, must be running already, exiting.... %s" % err) self.log.error("Web interface bind error, must be running already, exiting.... %s" % err)
import main import main
main.file_server.stop() main.file_server.stop()
self.log.debug("Stopped.") self.log.info("Stopped.")
def stop(self): def stop(self):
self.log.debug("Stopping...") self.log.debug("Stopping...")

View file

@ -318,6 +318,7 @@ class UiWebsocket(object):
back["updatesite"] = config.updatesite back["updatesite"] = config.updatesite
back["dist_type"] = config.dist_type back["dist_type"] = config.dist_type
back["lib_verify_best"] = CryptBitcoin.lib_verify_best back["lib_verify_best"] = CryptBitcoin.lib_verify_best
back["passive_mode"] = file_server.passive_mode
return back return back
def formatAnnouncerInfo(self, site): def formatAnnouncerInfo(self, site):
@ -912,9 +913,9 @@ class UiWebsocket(object):
self.response(to, "ok") self.response(to, "ok")
# Update site content.json # Update site content.json
def actionSiteUpdate(self, to, address, check_files=False, since=None, announce=False): def actionSiteUpdate(self, to, address, check_files=False, verify_files=False, since=None, announce=False):
def updateThread(): def updateThread():
site.update(announce=announce, check_files=check_files, since=since) site.update(announce=announce, check_files=check_files, verify_files=verify_files, since=since)
self.response(to, "Updated") self.response(to, "Updated")
site = self.server.sites.get(address) site = self.server.sites.get(address)
@ -1164,6 +1165,32 @@ class UiWebsocket(object):
file_server.portCheck() file_server.portCheck()
self.response(to, file_server.port_opened) self.response(to, file_server.port_opened)
@flag.admin
@flag.no_multiuser
def actionServerSetPassiveMode(self, to, passive_mode=False):
import main
file_server = main.file_server
if file_server.isPassiveMode() != passive_mode:
file_server.setPassiveMode(passive_mode)
if file_server.isPassiveMode():
self.cmd("notification", ["info", _["Passive mode enabled"], 5000])
else:
self.cmd("notification", ["info", _["Passive mode disabled"], 5000])
self.server.updateWebsocket()
@flag.admin
@flag.no_multiuser
def actionServerSetOfflineMode(self, to, offline_mode=False):
import main
file_server = main.file_server
if file_server.isOfflineMode() != offline_mode:
file_server.setOfflineMode(offline_mode)
if file_server.isOfflineMode():
self.cmd("notification", ["info", _["Offline mode enabled"], 5000])
else:
self.cmd("notification", ["info", _["Offline mode disabled"], 5000])
self.server.updateWebsocket()
@flag.admin @flag.admin
@flag.no_multiuser @flag.no_multiuser
def actionServerShutdown(self, to, restart=False): def actionServerShutdown(self, to, restart=False):
@ -1174,7 +1201,7 @@ class UiWebsocket(object):
return False return False
if restart: if restart:
main.restart_after_shutdown = True main.restart_after_shutdown = True
main.file_server.stop() main.file_server.stop(ui_websocket=self)
main.ui_server.stop() main.ui_server.stop()
if restart: if restart:

View file

@ -0,0 +1,9 @@
# This file is for adding rules for selectively enabling debug logging
# when working on the code.
# Add your rules here and skip this file when committing changes.
#import re
#from util import SelectiveLogger
#
#SelectiveLogger.addLogLevelRaisingRule("ConnServer")
#SelectiveLogger.addLogLevelRaisingRule(re.compile(r'^Site:'))

View file

@ -4,6 +4,7 @@ import sys
import stat import stat
import time import time
import logging import logging
import loglevel_overrides
startup_errors = [] startup_errors = []
def startupError(msg): def startupError(msg):
@ -154,7 +155,7 @@ class Actions(object):
logging.info("Starting servers....") logging.info("Starting servers....")
gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)]) gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)])
logging.info("All server stopped") logging.info("All servers stopped")
# Site commands # Site commands

View file

@ -0,0 +1,34 @@
import random
class CircularIterator:
def __init__(self):
self.successive_count = 0
self.last_size = 0
self.index = -1
def next(self, items):
self.last_size = len(items)
if self.last_size == 0:
return None
if self.index < 0:
self.index = random.randint(0, self.last_size)
else:
self.index += 1
self.index = self.index % self.last_size
self.successive_count += 1
return items[self.index]
def resetSuccessiveCount(self):
self.successive_count = 0
def getSuccessiveCount(self):
return self.successive_count
def isWrapped(self):
return self.successive_count >= self.last_size

View file

@ -3,17 +3,37 @@ from Debug import Debug
class GreenletManager: class GreenletManager:
def __init__(self): # pool is either gevent.pool.Pool or GreenletManager.
# if pool is None, new gevent.pool.Pool() is created.
def __init__(self, pool=None):
self.greenlets = set() self.greenlets = set()
if not pool:
pool = gevent.pool.Pool(None)
self.pool = pool
def _spawn_later(self, seconds, *args, **kwargs):
# If pool is another GreenletManager, delegate to it.
if hasattr(self.pool, 'spawnLater'):
return self.pool.spawnLater(seconds, *args, **kwargs)
# There's gevent.spawn_later(), but there isn't gevent.pool.Pool.spawn_later().
# Doing manually.
greenlet = self.pool.greenlet_class(*args, **kwargs)
self.pool.add(greenlet)
greenlet.start_later(seconds)
return greenlet
def _spawn(self, *args, **kwargs):
return self.pool.spawn(*args, **kwargs)
def spawnLater(self, *args, **kwargs): def spawnLater(self, *args, **kwargs):
greenlet = gevent.spawn_later(*args, **kwargs) greenlet = self._spawn_later(*args, **kwargs)
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet)) greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
self.greenlets.add(greenlet) self.greenlets.add(greenlet)
return greenlet return greenlet
def spawn(self, *args, **kwargs): def spawn(self, *args, **kwargs):
greenlet = gevent.spawn(*args, **kwargs) greenlet = self._spawn(*args, **kwargs)
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet)) greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
self.greenlets.add(greenlet) self.greenlets.add(greenlet)
return greenlet return greenlet

View file

@ -1,10 +1,16 @@
import re import re
import logging
log = logging.getLogger("SafeRe")
class UnsafePatternError(Exception): class UnsafePatternError(Exception):
pass pass
max_cache_size = 1000
cached_patterns = {} cached_patterns = {}
old_cached_patterns = {}
def isSafePattern(pattern): def isSafePattern(pattern):
@ -15,18 +21,78 @@ def isSafePattern(pattern):
if unsafe_pattern_match: if unsafe_pattern_match:
raise UnsafePatternError("Potentially unsafe part of the pattern: %s in %s" % (unsafe_pattern_match.group(0), pattern)) raise UnsafePatternError("Potentially unsafe part of the pattern: %s in %s" % (unsafe_pattern_match.group(0), pattern))
repetitions = re.findall(r"\.[\*\{\+]", pattern) repetitions1 = re.findall(r"\.[\*\{\+]", pattern)
if len(repetitions) >= 10: repetitions2 = re.findall(r"[^(][?]", pattern)
raise UnsafePatternError("More than 10 repetitions of %s in %s" % (repetitions[0], pattern)) if len(repetitions1) + len(repetitions2) >= 10:
raise UnsafePatternError("More than 10 repetitions in %s" % pattern)
return True return True
def match(pattern, *args, **kwargs): def compilePattern(pattern):
global cached_patterns
global old_cached_patterns
cached_pattern = cached_patterns.get(pattern) cached_pattern = cached_patterns.get(pattern)
if cached_pattern: if cached_pattern:
return cached_pattern.match(*args, **kwargs) return cached_pattern
else:
cached_pattern = old_cached_patterns.get(pattern)
if cached_pattern:
del old_cached_patterns[pattern]
cached_patterns[pattern] = cached_pattern
return cached_pattern
if isSafePattern(pattern): if isSafePattern(pattern):
cached_patterns[pattern] = re.compile(pattern) cached_pattern = re.compile(pattern)
return cached_patterns[pattern].match(*args, **kwargs) cached_patterns[pattern] = cached_pattern
log.debug("Compiled new pattern: %s" % pattern)
log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
if len(cached_patterns) > max_cache_size:
old_cached_patterns = cached_patterns
cached_patterns = {}
log.debug("Size limit reached. Rotating cache.")
log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
return cached_pattern
def match(pattern, *args, **kwargs):
cached_pattern = compilePattern(pattern)
return cached_pattern.match(*args, **kwargs)
################################################################################
# TESTS
def testSafePattern(pattern):
try:
return isSafePattern(pattern)
except UnsafePatternError as err:
return False
# Some real examples to make sure it works as expected
assert testSafePattern('(data/mp4/.*|updater/.*)')
assert testSafePattern('((js|css)/(?!all.(js|css)))|.git')
# Unsafe cases:
# ((?!json).)*$ not allowed, because of ) before the * character. Possible fix: .*(?!json)$
assert not testSafePattern('((?!json).)*$')
assert testSafePattern('.*(?!json)$')
# (.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot) not allowed, because it has 12 .* repetition patterns. Possible fix: .*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)
assert not testSafePattern('(.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot)')
assert testSafePattern('.*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)')
# https://github.com/HelloZeroNet/ZeroNet/issues/2757
assert not testSafePattern('a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
assert not testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}x.{0,1}')
assert testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}')
assert not testSafePattern('a?a?a?a?a?a?a?x.*x.*x.*')
assert testSafePattern('a?a?a?a?a?a?a?x.*x.*')
################################################################################

View file

@ -0,0 +1,43 @@
import logging
import re
log_level_raising_rules = []
def addLogLevelRaisingRule(rule, level=None):
if level is None:
level = logging.INFO
log_level_raising_rules.append({
"rule": rule,
"level": level
})
def matchLogLevelRaisingRule(name):
for rule in log_level_raising_rules:
if isinstance(rule["rule"], re.Pattern):
if rule["rule"].search(name):
return rule["level"]
else:
if rule["rule"] == name:
return rule["level"]
return None
class SelectiveLogger(logging.getLoggerClass()):
def __init__(self, name, level=logging.NOTSET):
return super().__init__(name, level)
def raiseLevel(self, level):
raised_level = matchLogLevelRaisingRule(self.name)
if raised_level is not None:
if level < raised_level:
level = raised_level
return level
def isEnabledFor(self, level):
level = self.raiseLevel(level)
return super().isEnabledFor(level)
def _log(self, level, msg, args, **kwargs):
level = self.raiseLevel(level)
return super()._log(level, msg, args, **kwargs)
logging.setLoggerClass(SelectiveLogger)

View file

@ -1,4 +1,5 @@
from .Cached import Cached from .Cached import Cached
from .CircularIterator import CircularIterator
from .Event import Event from .Event import Event
from .Noparallel import Noparallel from .Noparallel import Noparallel
from .Pooled import Pooled from .Pooled import Pooled

View file

@ -290,7 +290,8 @@ local_ip_pattern = re.compile(r"^127\.|192\.168\.|10\.|172\.1[6-9]\.|172\.2[0-9]
def isPrivateIp(ip): def isPrivateIp(ip):
return local_ip_pattern.match(ip) return local_ip_pattern.match(ip)
# XXX: Deprecated. Use ConnectionServer.getIpType() instead.
# To be removed in 0.9.0
def getIpType(ip): def getIpType(ip):
if ip.endswith(".onion"): if ip.endswith(".onion"):
return "onion" return "onion"