Compare commits
147 commits
py3-latest
...
zeronet-en
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0aed438ae5 | ||
![]() |
ec8203fff7 | ||
![]() |
45b92157cf | ||
![]() |
5e61fe8b06 | ||
![]() |
90b6cb9cdf | ||
![]() |
1362b7e2e6 | ||
![]() |
00061c45a5 | ||
![]() |
7fef6dde0e | ||
![]() |
c594e63512 | ||
![]() |
36adb63f61 | ||
![]() |
59f7d3221f | ||
![]() |
b39a1a5f1b | ||
![]() |
f2324b8eb4 | ||
![]() |
912688dbbd | ||
![]() |
f2e1c2ad81 | ||
![]() |
62f88a11a7 | ||
![]() |
7dbf8da8cb | ||
![]() |
f79aca1dad | ||
![]() |
7575e2d455 | ||
![]() |
58b2d92351 | ||
![]() |
86a73e4665 | ||
![]() |
18da16e8d4 | ||
![]() |
523e399d8c | ||
![]() |
643dd84a28 | ||
![]() |
4448358247 | ||
![]() |
0e48004563 | ||
![]() |
0bbf19aab9 | ||
![]() |
545fe9442c | ||
![]() |
5c8bbe5801 | ||
![]() |
348a4b0865 | ||
![]() |
f7372fc393 | ||
![]() |
b7a3aa37e1 | ||
![]() |
b6b23d0e8e | ||
![]() |
e000eae046 | ||
![]() |
1863043505 | ||
![]() |
d32d9f781b | ||
![]() |
168c436b73 | ||
![]() |
77e0bb3650 | ||
![]() |
ef69dcd331 | ||
![]() |
32eb47c482 | ||
![]() |
f484c0a1b8 | ||
![]() |
645f3ba34a | ||
![]() |
93a95f511a | ||
![]() |
dff52d691a | ||
![]() |
1a8d30146e | ||
![]() |
8f908c961d | ||
![]() |
ce971ab738 | ||
![]() |
1fd1f47a94 | ||
![]() |
b512c54f75 | ||
![]() |
b4f94e5022 | ||
![]() |
e612f93631 | ||
![]() |
fe24e17baa | ||
![]() |
1b68182a76 | ||
![]() |
1ef129bdf9 | ||
![]() |
19b840defd | ||
![]() |
e3daa09316 | ||
![]() |
77d2d69376 | ||
![]() |
c36cba7980 | ||
![]() |
ddc4861223 | ||
![]() |
cd3262a2a7 | ||
![]() |
b194eb0f33 | ||
![]() |
5744e40505 | ||
![]() |
5ec970adb8 | ||
![]() |
75bba6ca1a | ||
![]() |
7e438a90e1 | ||
![]() |
46bea95002 | ||
![]() |
2a25d61b96 | ||
![]() |
ba6295f793 | ||
![]() |
23ef37374b | ||
![]() |
488fd4045e | ||
![]() |
769a2c08dd | ||
![]() |
d2b65c550c | ||
![]() |
d5652eaa51 | ||
![]() |
164f5199a9 | ||
![]() |
72e5d3df64 | ||
![]() |
1c73d1a095 | ||
![]() |
b9ec7124f9 | ||
![]() |
abbd2c51f0 | ||
![]() |
be65ff2c40 | ||
![]() |
c772592c4a | ||
![]() |
5d6fe6a631 | ||
![]() |
adaeedf4d8 | ||
![]() |
8474abc967 | ||
![]() |
986dedfa7f | ||
![]() |
0151546329 | ||
![]() |
d0069471b8 | ||
![]() |
1144964062 | ||
![]() |
6c8849139f | ||
![]() |
3677684971 | ||
![]() |
4e27e300e3 | ||
![]() |
697b12d808 | ||
![]() |
570f854485 | ||
![]() |
5d5b3684cc | ||
![]() |
7354d712e0 | ||
![]() |
6c8b059f57 | ||
![]() |
27ce79f044 | ||
![]() |
90d01e6004 | ||
![]() |
325f071329 | ||
![]() |
c84b413f58 | ||
![]() |
ba16fdcae9 | ||
![]() |
ea21b32b93 | ||
![]() |
e8358ee8f2 | ||
![]() |
d1b9cc8261 | ||
![]() |
829fd46781 | ||
![]() |
adf40dbb6b | ||
![]() |
8fd88c50f9 | ||
![]() |
511a90a5c5 | ||
![]() |
3ca323f8b0 | ||
![]() |
112c778c28 | ||
![]() |
f1d91989d5 | ||
![]() |
b6ae96db5a | ||
![]() |
3d68a25e13 | ||
![]() |
d4239d16f9 | ||
![]() |
b0005026b4 | ||
![]() |
1f34f477ef | ||
![]() |
e33a54bc65 | ||
![]() |
920ddd944f | ||
![]() |
f9706e3dc4 | ||
![]() |
822e53ebb8 | ||
![]() |
5f6589cfc2 | ||
![]() |
95c8f0e97e | ||
![]() |
cb363d2f11 | ||
![]() |
0d02c3c4da | ||
![]() |
2811d7c9d4 | ||
![]() |
96e935300c | ||
![]() |
2e1b0e093f | ||
![]() |
fee63a1ed2 | ||
![]() |
142f5862df | ||
![]() |
de5a9ff67b | ||
![]() |
d57deaa8e4 | ||
![]() |
eb6d0c9644 | ||
![]() |
a36b2c9241 | ||
![]() |
9a8519b487 | ||
![]() |
f4708d9781 | ||
![]() |
b7550474a5 | ||
![]() |
735061b79d | ||
![]() |
aa6d7a468d | ||
![]() |
f5b63a430c | ||
![]() |
6ee1db4197 | ||
![]() |
37627822de | ||
![]() |
d35a15d674 | ||
![]() |
c8545ce054 | ||
![]() |
8f8e10a703 | ||
![]() |
33c81a89e9 | ||
![]() |
84526a6657 | ||
![]() |
3910338b28 | ||
![]() |
b2e92b1d10 |
61 changed files with 3365 additions and 1409 deletions
|
@ -1,40 +0,0 @@
|
||||||
name: Build Docker Image on Commit
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
tags:
|
|
||||||
- '!' # Exclude tags
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-publish:
|
|
||||||
runs-on: docker-builder
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set REPO_VARS
|
|
||||||
id: repo-url
|
|
||||||
run: |
|
|
||||||
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
|
|
||||||
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Login to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
|
|
||||||
|
|
||||||
- name: Build and push Docker images
|
|
||||||
run: |
|
|
||||||
# Build Docker image with commit SHA
|
|
||||||
docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
|
|
||||||
docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
|
|
||||||
|
|
||||||
# Build Docker image with nightly tag
|
|
||||||
docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
|
|
||||||
docker push $REPO_HOST/$REPO_PATH:nightly
|
|
||||||
|
|
||||||
# Remove local images to save storage
|
|
||||||
docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
|
|
||||||
docker rmi $REPO_HOST/$REPO_PATH:nightly
|
|
|
@ -1,37 +0,0 @@
|
||||||
name: Build and Publish Docker Image on Tag
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- '*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-publish:
|
|
||||||
runs-on: docker-builder
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set REPO_VARS
|
|
||||||
id: repo-url
|
|
||||||
run: |
|
|
||||||
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
|
|
||||||
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Login to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
run: |
|
|
||||||
TAG=${{ github.ref_name }} # Get the tag name from the context
|
|
||||||
# Build and push multi-platform Docker images
|
|
||||||
docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
|
|
||||||
# Tag and push latest
|
|
||||||
docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
|
|
||||||
docker push $REPO_HOST/$REPO_PATH:latest
|
|
||||||
|
|
||||||
# Remove the local image to save storage
|
|
||||||
docker rmi $REPO_HOST/$REPO_PATH:$TAG
|
|
||||||
docker rmi $REPO_HOST/$REPO_PATH:latest
|
|
11
.github/FUNDING.yml
vendored
11
.github/FUNDING.yml
vendored
|
@ -1,10 +1 @@
|
||||||
github: canewsin
|
custom: https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/
|
||||||
patreon: # Replace with a single Patreon username e.g., user1
|
|
||||||
open_collective: # Replace with a single Open Collective username e.g., user1
|
|
||||||
ko_fi: canewsin
|
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
|
||||||
liberapay: canewsin
|
|
||||||
issuehunt: # Replace with a single IssueHunt username e.g., user1
|
|
||||||
otechie: # Replace with a single Otechie username e.g., user1
|
|
||||||
custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']
|
|
||||||
|
|
72
.github/workflows/codeql-analysis.yml
vendored
72
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,72 +0,0 @@
|
||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ py3-latest ]
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches: [ py3-latest ]
|
|
||||||
schedule:
|
|
||||||
- cron: '32 19 * * 2'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'javascript', 'python' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
|
||||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v2
|
|
||||||
with:
|
|
||||||
languages: ${{ matrix.language }}
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
|
|
||||||
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
|
||||||
# queries: security-extended,security-and-quality
|
|
||||||
|
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v2
|
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
|
||||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
|
||||||
|
|
||||||
# If the Autobuild fails above, remove it and uncomment the following three lines.
|
|
||||||
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
|
|
||||||
|
|
||||||
# - run: |
|
|
||||||
# echo "Run, Build Application using script"
|
|
||||||
# ./location_of_script_within_repo/buildscript.sh
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v2
|
|
71
.github/workflows/tests.yml
vendored
71
.github/workflows/tests.yml
vendored
|
@ -4,48 +4,49 @@ on: [push, pull_request]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
strategy:
|
strategy:
|
||||||
max-parallel: 16
|
max-parallel: 16
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ["3.7", "3.8", "3.9"]
|
python-version: [3.6, 3.7, 3.8, 3.9]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout ZeroNet
|
- name: Checkout ZeroNet
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: "true"
|
submodules: 'true'
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v1
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Prepare for installation
|
- name: Prepare for installation
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install setuptools
|
python3 -m pip install setuptools
|
||||||
python3 -m pip install --upgrade pip wheel
|
python3 -m pip install --upgrade pip wheel
|
||||||
python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
|
python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
|
||||||
|
|
||||||
- name: Install
|
- name: Install
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install --upgrade -r requirements.txt
|
python3 -m pip install --upgrade -r requirements.txt
|
||||||
python3 -m pip list
|
python3 -m pip list
|
||||||
|
|
||||||
- name: Prepare for tests
|
- name: Prepare for tests
|
||||||
run: |
|
run: |
|
||||||
openssl version -a
|
openssl version -a
|
||||||
echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
|
echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
|
catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
|
||||||
export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
|
export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
|
||||||
export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
|
export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
|
||||||
export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
|
export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
|
||||||
export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
|
export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
|
||||||
export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
|
export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
|
||||||
export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
|
export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
|
||||||
find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||||
find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
|
||||||
flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
|
flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -7,7 +7,6 @@ __pycache__/
|
||||||
|
|
||||||
# Hidden files
|
# Hidden files
|
||||||
.*
|
.*
|
||||||
!/.forgejo
|
|
||||||
!/.github
|
!/.github
|
||||||
!/.gitignore
|
!/.gitignore
|
||||||
!/.travis.yml
|
!/.travis.yml
|
||||||
|
|
81
CHANGELOG.md
81
CHANGELOG.md
|
@ -1,85 +1,6 @@
|
||||||
### ZeroNet 0.9.0 (2023-07-12) Rev4630
|
### ZeroNet 0.7.2 (2020-09-?) Rev4206?
|
||||||
- Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
|
|
||||||
- Add trackers to Config.py for failsafety incase missing trackers.txt
|
|
||||||
- Added Proxy links
|
|
||||||
- Fix pysha3 dep installation issue
|
|
||||||
- FileRequest -> Remove Unnecessary check, Fix error wording
|
|
||||||
- Fix Response when site is missing for `actionAs`
|
|
||||||
|
|
||||||
|
|
||||||
### ZeroNet 0.8.5 (2023-02-12) Rev4625
|
|
||||||
- Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
|
|
||||||
- default theme-class for missing value in `users.json`.
|
|
||||||
- Fetch Stats Plugin changes.
|
|
||||||
|
|
||||||
### ZeroNet 0.8.4 (2022-12-12) Rev4620
|
|
||||||
- Increase Minimum Site size to 25MB.
|
|
||||||
|
|
||||||
### ZeroNet 0.8.3 (2022-12-11) Rev4611
|
|
||||||
- main.py -> Fix accessing unassigned varible
|
|
||||||
- ContentManager -> Support for multiSig
|
|
||||||
- SiteStrorage.py -> Fix accessing unassigned varible
|
|
||||||
- ContentManager.py Improve Logging of Valid Signers
|
|
||||||
|
|
||||||
### ZeroNet 0.8.2 (2022-11-01) Rev4610
|
|
||||||
- Fix Startup Error when plugins dir missing
|
|
||||||
- Move trackers to seperate file & Add more trackers
|
|
||||||
- Config:: Skip loading missing tracker files
|
|
||||||
- Added documentation for getRandomPort fn
|
|
||||||
|
|
||||||
### ZeroNet 0.8.1 (2022-10-01) Rev4600
|
|
||||||
- fix readdress loop (cherry-pick previously added commit from conservancy)
|
|
||||||
- Remove Patreon badge
|
|
||||||
- Update README-ru.md (#177)
|
|
||||||
- Include inner_path of failed request for signing in error msg and response
|
|
||||||
- Don't Fail Silently When Cert is Not Selected
|
|
||||||
- Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
|
|
||||||
- Update FUNDING.yml
|
|
||||||
|
|
||||||
### ZeroNet 0.8.0 (2022-05-27) Rev4591
|
|
||||||
- Revert File Open to catch File Access Errors.
|
|
||||||
|
|
||||||
### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
|
|
||||||
- Use xescape(s) from zeronet-conservancy
|
|
||||||
- actionUpdate response Optimisation
|
|
||||||
- Fetch Plugins Repo Updates
|
|
||||||
- Fix Unhandled File Access Errors
|
|
||||||
- Create codeql-analysis.yml
|
|
||||||
|
|
||||||
### ZeroNet 0.7.9 (2022-05-26) Rev4585
|
|
||||||
- Rust Version Compatibility for update Protocol msg
|
|
||||||
- Removed Non Working Trakers.
|
|
||||||
- Dynamically Load Trackers from Dashboard Site.
|
|
||||||
- Tracker Supply Improvements.
|
|
||||||
- Fix Repo Url for Bug Report
|
|
||||||
- First Party Tracker Update Service using Dashboard Site.
|
|
||||||
- remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
|
|
||||||
|
|
||||||
### ZeroNet 0.7.8 (2022-03-02) Rev4580
|
|
||||||
- Update Plugins with some bug fixes and Improvements
|
|
||||||
|
|
||||||
### ZeroNet 0.7.6 (2022-01-12) Rev4565
|
|
||||||
- Sync Plugin Updates
|
|
||||||
- Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
|
|
||||||
- Add More Default Plugins to Repo
|
|
||||||
- Doubled Site Publish Limits
|
|
||||||
- Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
|
|
||||||
- UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
|
|
||||||
- Moved Plugins to Seperate Repo
|
|
||||||
- Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
|
|
||||||
- Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
|
|
||||||
- Added current version: connections to /Stats page. see the previous point.
|
|
||||||
|
|
||||||
### ZeroNet 0.7.5 (2021-11-28) Rev4560
|
|
||||||
- Add more default trackers
|
|
||||||
- Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
|
|
||||||
- Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
|
|
||||||
|
|
||||||
### ZeroNet 0.7.3 (2021-11-28) Rev4555
|
|
||||||
- Fix xrange is undefined error
|
|
||||||
- Fix Incorrect viewport on mobile while loading
|
|
||||||
- Tor-V3 Patch by anonymoose
|
|
||||||
|
|
||||||
|
|
||||||
### ZeroNet 0.7.1 (2019-07-01) Rev4206
|
### ZeroNet 0.7.1 (2019-07-01) Rev4206
|
||||||
### Added
|
### Added
|
||||||
|
|
243
README-ru.md
243
README-ru.md
|
@ -3,131 +3,206 @@
|
||||||
[简体中文](./README-zh-cn.md)
|
[简体中文](./README-zh-cn.md)
|
||||||
[English](./README.md)
|
[English](./README.md)
|
||||||
|
|
||||||
Децентрализованные вебсайты, использующие криптографию Bitcoin и протокол BitTorrent — https://zeronet.dev ([Зеркало в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). В отличии от Bitcoin, ZeroNet'у не требуется блокчейн для работы, однако он использует ту же криптографию, чтобы обеспечить сохранность и проверку данных.
|
Децентрализованные вебсайты использующие Bitcoin криптографию и BitTorrent сеть - https://zeronet.dev
|
||||||
|
|
||||||
|
|
||||||
## Зачем?
|
## Зачем?
|
||||||
|
|
||||||
- Мы верим в открытую, свободную, и неподдающуюся цензуре сеть и связь.
|
* Мы верим в открытую, свободную, и не отцензуренную сеть и коммуникацию.
|
||||||
- Нет единой точки отказа: Сайт остаётся онлайн, пока его обслуживает хотя бы 1 пир.
|
* Нет единой точки отказа: Сайт онлайн пока по крайней мере 1 пир обслуживает его.
|
||||||
- Нет затрат на хостинг: Сайты обслуживаются посетителями.
|
* Никаких затрат на хостинг: Сайты обслуживаются посетителями.
|
||||||
- Невозможно отключить: Он нигде, потому что он везде.
|
* Невозможно отключить: Он нигде, потому что он везде.
|
||||||
- Скорость и возможность работать без Интернета: Вы сможете получить доступ к сайту, потому что его копия хранится на вашем компьютере и у ваших пиров.
|
* Быстр и работает оффлайн: Вы можете получить доступ к сайту, даже если Интернет недоступен.
|
||||||
|
|
||||||
|
|
||||||
## Особенности
|
## Особенности
|
||||||
|
* Обновляемые в реальном времени сайты
|
||||||
|
* Поддержка Namecoin .bit доменов
|
||||||
|
* Лёгок в установке: распаковал & запустил
|
||||||
|
* Клонирование вебсайтов в один клик
|
||||||
|
* Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||||
|
based authorization: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
|
||||||
|
* Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
|
||||||
|
* Анонимность: Полная поддержка сети Tor с помощью скрытых служб .onion вместо адресов IPv4
|
||||||
|
* TLS зашифрованные связи
|
||||||
|
* Автоматическое открытие uPnP порта
|
||||||
|
* Плагин для поддержки многопользовательской (openproxy)
|
||||||
|
* Работает с любыми браузерами и операционными системами
|
||||||
|
|
||||||
- Обновление сайтов в реальном времени
|
|
||||||
- Поддержка доменов `.bit` ([Namecoin](https://www.namecoin.org))
|
|
||||||
- Легкая установка: просто распакуйте и запустите
|
|
||||||
- Клонирование сайтов "в один клик"
|
|
||||||
- Беспарольная [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
|
||||||
авторизация: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
|
|
||||||
- Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
|
|
||||||
- Анонимность: Полная поддержка сети Tor, используя скрытые службы `.onion` вместо адресов IPv4
|
|
||||||
- Зашифрованное TLS подключение
|
|
||||||
- Автоматическое открытие UPnP–порта
|
|
||||||
- Плагин для поддержки нескольких пользователей (openproxy)
|
|
||||||
- Работа с любыми браузерами и операционными системами
|
|
||||||
|
|
||||||
## Текущие ограничения
|
|
||||||
|
|
||||||
- Файловые транзакции не сжаты
|
|
||||||
- Нет приватных сайтов
|
|
||||||
|
|
||||||
## Как это работает?
|
## Как это работает?
|
||||||
|
|
||||||
- После запуска `zeronet.py` вы сможете посещать сайты в ZeroNet, используя адрес
|
* После запуска `zeronet.py` вы сможете посетить зайты (zeronet сайты) используя адрес
|
||||||
`http://127.0.0.1:43110/{zeronet_адрес}`
|
`http://127.0.0.1:43110/{zeronet_address}`
|
||||||
(Например: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
|
(например. `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
|
||||||
- Когда вы посещаете новый сайт в ZeroNet, он пытается найти пиров с помощью протокола BitTorrent,
|
* Когда вы посещаете новый сайт zeronet, он пытается найти пиров с помощью BitTorrent
|
||||||
чтобы скачать у них файлы сайта (HTML, CSS, JS и т.д.).
|
чтобы загрузить файлы сайтов (html, css, js ...) из них.
|
||||||
- После посещения сайта вы тоже становитесь его пиром.
|
* Каждый посещенный зайт также обслуживается вами. (Т.е хранится у вас на компьютере)
|
||||||
- Каждый сайт содержит файл `content.json`, который содержит SHA512 хеши всех остальные файлы
|
* Каждый сайт содержит файл `content.json`, который содержит все остальные файлы в хэше sha512
|
||||||
и подпись, созданную с помощью закрытого ключа сайта.
|
и подпись, созданную с использованием частного ключа сайта.
|
||||||
- Если владелец сайта (тот, кто владеет закрытым ключом для адреса сайта) изменяет сайт, он
|
* Если владелец сайта (у которого есть закрытый ключ для адреса сайта) изменяет сайт, то он/она
|
||||||
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
|
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
|
||||||
(используя подпись), скачвают изменённые файлы и распространяют новый контент для других пиров.
|
(используя подпись), они загружают измененные файлы и публикуют новый контент для других пиров.
|
||||||
|
|
||||||
|
#### [Слайд-шоу о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
||||||
|
#### [Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
|
||||||
|
|
||||||
|
#### [Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||||
|
|
||||||
[Презентация о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
|
|
||||||
[Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
|
|
||||||
[Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
|
||||||
|
|
||||||
## Скриншоты
|
## Скриншоты
|
||||||
|
|
||||||

|

|
||||||

|

|
||||||
[Больше скриншотов в документации ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
|
|
||||||
|
|
||||||
## Как присоединиться?
|
#### [Больше скриншотов в ZeroNet документации »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
|
||||||
|
|
||||||
### Windows
|
|
||||||
|
|
||||||
- Скачайте и распакуйте архив [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26МБ)
|
## Как вступить
|
||||||
- Запустите `ZeroNet.exe`
|
|
||||||
|
|
||||||
### macOS
|
* Скачайте ZeroBundle пакет:
|
||||||
|
* [Microsoft Windows](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip)
|
||||||
|
* [Apple macOS](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip)
|
||||||
|
* [Linux 64-bit](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip)
|
||||||
|
* [Linux 32-bit](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip)
|
||||||
|
* Распакуйте где угодно
|
||||||
|
* Запустите `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
|
||||||
|
|
||||||
- Скачайте и распакуйте архив [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14МБ)
|
### Linux терминал
|
||||||
- Запустите `ZeroNet.app`
|
|
||||||
|
|
||||||
### Linux (64 бит)
|
* `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
|
||||||
|
* `unzip ZeroNet-linux.zip`
|
||||||
|
* `cd ZeroNet-linux`
|
||||||
|
* Запустите с помощью `./ZeroNet.sh`
|
||||||
|
|
||||||
- Скачайте и распакуйте архив [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14МБ)
|
Он загружает последнюю версию ZeroNet, затем запускает её автоматически.
|
||||||
- Запустите `./ZeroNet.sh`
|
|
||||||
|
|
||||||
> **Note**
|
#### Ручная установка для Debian Linux
|
||||||
> Запустите таким образом: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваш_ip_адрес`, чтобы разрешить удалённое подключение к веб–интерфейсу.
|
|
||||||
|
|
||||||
### Docker
|
* `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
|
||||||
|
* `unzip ZeroNet-src.zip`
|
||||||
|
* `cd ZeroNet`
|
||||||
|
* `sudo apt-get update`
|
||||||
|
* `sudo apt-get install python3-pip`
|
||||||
|
* `sudo python3 -m pip install -r requirements.txt`
|
||||||
|
* Запустите с помощью `python3 zeronet.py`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
Официальный образ находится здесь: https://hub.docker.com/r/canewsin/zeronet/
|
### [Arch Linux](https://www.archlinux.org)
|
||||||
|
|
||||||
### Android (arm, arm64, x86)
|
* `git clone https://aur.archlinux.org/zeronet.git`
|
||||||
|
* `cd zeronet`
|
||||||
|
* `makepkg -srci`
|
||||||
|
* `systemctl start zeronet`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
- Для работы требуется Android как минимум версии 5.0 Lollipop
|
Смотрите [ArchWiki](https://wiki.archlinux.org)'s [ZeroNet
|
||||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
article](https://wiki.archlinux.org/index.php/ZeroNet) для дальнейшей помощи.
|
||||||
alt="Download from Google Play"
|
|
||||||
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
|
|
||||||
- Скачать APK: https://github.com/canewsin/zeronet_mobile/releases
|
|
||||||
|
|
||||||
### Android (arm, arm64, x86) Облегчённый клиент только для просмотра (1МБ)
|
### [Gentoo Linux](https://www.gentoo.org)
|
||||||
|
|
||||||
- Для работы требуется Android как минимум версии 4.1 Jelly Bean
|
* [`layman -a raiagent`](https://github.com/leycec/raiagent)
|
||||||
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
|
* `echo '>=net-vpn/zeronet-0.5.4' >> /etc/portage/package.accept_keywords`
|
||||||
alt="Download from Google Play"
|
* *(Опционально)* Включить поддержку Tor: `echo 'net-vpn/zeronet tor' >>
|
||||||
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
|
/etc/portage/package.use`
|
||||||
|
* `emerge zeronet`
|
||||||
|
* `rc-service zeronet start`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
### Установка из исходного кода
|
Смотрите `/usr/share/doc/zeronet-*/README.gentoo.bz2` для дальнейшей помощи.
|
||||||
|
|
||||||
```sh
|
### [FreeBSD](https://www.freebsd.org/)
|
||||||
wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
|
|
||||||
unzip ZeroNet-src.zip
|
* `pkg install zeronet` or `cd /usr/ports/security/zeronet/ && make install clean`
|
||||||
cd ZeroNet
|
* `sysrc zeronet_enable="YES"`
|
||||||
sudo apt-get update
|
* `service zeronet start`
|
||||||
sudo apt-get install python3-pip
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
sudo python3 -m pip install -r requirements.txt
|
|
||||||
|
### [Vagrant](https://www.vagrantup.com/)
|
||||||
|
|
||||||
|
* `vagrant up`
|
||||||
|
* Подключитесь к VM с помощью `vagrant ssh`
|
||||||
|
* `cd /vagrant`
|
||||||
|
* Запустите `python3 zeronet.py --ui_ip 0.0.0.0`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
|
### [Docker](https://www.docker.com/)
|
||||||
|
* `docker run -d -v <local_data_folder>:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 canewsin/zeronet`
|
||||||
|
* Это изображение Docker включает в себя прокси-сервер Tor, который по умолчанию отключён.
|
||||||
|
Остерегайтесь что некоторые хостинг-провайдеры могут не позволить вам запускать Tor на своих серверах.
|
||||||
|
Если вы хотите включить его,установите переменную среды `ENABLE_TOR` в` true` (по умолчанию: `false`) Например:
|
||||||
|
|
||||||
|
`docker run -d -e "ENABLE_TOR=true" -v <local_data_folder>:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 canewsin/zeronet`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
|
### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
|
||||||
|
|
||||||
|
* `virtualenv env`
|
||||||
|
* `source env/bin/activate`
|
||||||
|
* `pip install msgpack gevent`
|
||||||
|
* `python3 zeronet.py`
|
||||||
|
* Откройте http://127.0.0.1:43110/ в вашем браузере.
|
||||||
|
|
||||||
|
## Текущие ограничения
|
||||||
|
|
||||||
|
* Файловые транзакции не сжаты
|
||||||
|
* Нет приватных сайтов
|
||||||
|
|
||||||
|
|
||||||
|
## Как я могу создать сайт в Zeronet?
|
||||||
|
|
||||||
|
Завершите работу zeronet, если он запущен
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ zeronet.py siteCreate
|
||||||
|
...
|
||||||
|
- Site private key (Приватный ключ сайта): 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
|
||||||
|
- Site address (Адрес сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||||
|
...
|
||||||
|
- Site created! (Сайт создан)
|
||||||
|
$ zeronet.py
|
||||||
|
...
|
||||||
```
|
```
|
||||||
- Запустите `python3 zeronet.py`
|
|
||||||
|
|
||||||
Откройте приветственную страницу ZeroHello в вашем браузере по ссылке http://127.0.0.1:43110/
|
Поздравляем, вы закончили! Теперь каждый может получить доступ к вашему зайту используя
|
||||||
|
`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
|
||||||
|
|
||||||
## Как мне создать сайт в ZeroNet?
|
Следующие шаги: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
||||||
|
|
||||||
- Кликните на **⋮** > **"Create new, empty site"** в меню на сайте [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
|
|
||||||
- Вы будете **перенаправлены** на совершенно новый сайт, который может быть изменён только вами!
|
|
||||||
- Вы можете найти и изменить контент вашего сайта в каталоге **data/[адрес_вашего_сайта]**
|
|
||||||
- После изменений откройте ваш сайт, переключите влево кнопку "0" в правом верхнем углу, затем нажмите кнопки **sign** и **publish** внизу
|
|
||||||
|
|
||||||
Следующие шаги: [Документация разработчика ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
|
## Как я могу модифицировать Zeronet сайт?
|
||||||
|
|
||||||
|
* Измените файлы расположенные в data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 директории.
|
||||||
|
Когда закончите с изменением:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||||
|
- Signing site (Подпись сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
|
||||||
|
Private key (Приватный ключ) (input hidden):
|
||||||
|
```
|
||||||
|
|
||||||
|
* Введите секретный ключ, который вы получили при создании сайта, потом:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
|
||||||
|
...
|
||||||
|
Site:13DNDk..bhC2 Publishing to 3/10 peers...
|
||||||
|
Site:13DNDk..bhC2 Successfuly published to 3 peers
|
||||||
|
- Serving files....
|
||||||
|
```
|
||||||
|
|
||||||
|
* Вот и всё! Вы успешно подписали и опубликовали свои изменения.
|
||||||
|
|
||||||
|
|
||||||
## Поддержите проект
|
## Поддержите проект
|
||||||
|
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
|
||||||
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Рекомендуем)
|
|
||||||
- LiberaPay: https://liberapay.com/PramUkesh
|
- LiberaPay: https://liberapay.com/PramUkesh
|
||||||
- Paypal: https://paypal.me/PramUkesh
|
- Paypal: https://paypal.me/PramUkesh
|
||||||
- Другие способы: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
|
- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
|
||||||
|
|
||||||
|
|
||||||
#### Спасибо!
|
#### Спасибо!
|
||||||
|
|
||||||
- Здесь вы можете получить больше информации, помощь, прочитать список изменений и исследовать ZeroNet сайты: https://www.reddit.com/r/zeronetx/
|
* Больше информации, помощь, журнал изменений, zeronet сайты: https://www.reddit.com/r/zeronetx/
|
||||||
- Общение происходит на канале [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или в [Gitter](https://gitter.im/canewsin/ZeroNet)
|
* Приходите, пообщайтесь с нами: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или на [gitter](https://gitter.im/canewsin/ZeroNet)
|
||||||
- Электронная почта: canews.in@gmail.com
|
* Email: canews.in@gmail.com
|
||||||
|
|
19
README.md
19
README.md
|
@ -1,4 +1,5 @@
|
||||||
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
|
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
|
||||||
|
|
||||||
<!--TODO: Update Onion Site -->
|
<!--TODO: Update Onion Site -->
|
||||||
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
|
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
|
||||||
|
|
||||||
|
@ -99,24 +100,6 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
|
||||||
#### Docker
|
#### Docker
|
||||||
There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
|
There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
|
||||||
|
|
||||||
### Online Proxies
|
|
||||||
Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
|
|
||||||
|
|
||||||
#### Official ZNX Proxy :
|
|
||||||
|
|
||||||
https://proxy.zeronet.dev/
|
|
||||||
|
|
||||||
https://zeronet.dev/
|
|
||||||
|
|
||||||
#### From Community
|
|
||||||
|
|
||||||
https://0net-preview.com/
|
|
||||||
|
|
||||||
https://portal.ngnoid.tv/
|
|
||||||
|
|
||||||
https://zeronet.ipfsscan.io/
|
|
||||||
|
|
||||||
|
|
||||||
### Install from source
|
### Install from source
|
||||||
|
|
||||||
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
|
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
|
||||||
|
|
32
build-docker-images.sh
Executable file
32
build-docker-images.sh
Executable file
|
@ -0,0 +1,32 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
arg_push=
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
--push) arg_push=y ; shift ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
default_suffix=alpine
|
||||||
|
prefix="${1:-local/}"
|
||||||
|
|
||||||
|
for dokerfile in dockerfiles/Dockerfile.* ; do
|
||||||
|
suffix="`echo "$dokerfile" | sed 's/.*\/Dockerfile\.//'`"
|
||||||
|
image_name="${prefix}zeronet:$suffix"
|
||||||
|
|
||||||
|
latest=""
|
||||||
|
t_latest=""
|
||||||
|
if [ "$suffix" = "$default_suffix" ] ; then
|
||||||
|
latest="${prefix}zeronet:latest"
|
||||||
|
t_latest="-t ${latest}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "DOCKER BUILD $image_name"
|
||||||
|
docker build -f "$dokerfile" -t "$image_name" $t_latest .
|
||||||
|
if [ -n "$arg_push" ] ; then
|
||||||
|
docker push "$image_name"
|
||||||
|
if [ -n "$latest" ] ; then
|
||||||
|
docker push "$latest"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
1
dockerfiles/Dockerfile.alpine
Symbolic link
1
dockerfiles/Dockerfile.alpine
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
Dockerfile.alpine3.13
|
44
dockerfiles/Dockerfile.alpine3.13
Normal file
44
dockerfiles/Dockerfile.alpine3.13
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
|
||||||
|
# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
|
||||||
|
|
||||||
|
FROM alpine:3.13
|
||||||
|
|
||||||
|
# Base settings
|
||||||
|
ENV HOME /root
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
|
||||||
|
COPY install-dep-packages.sh /root/install-dep-packages.sh
|
||||||
|
|
||||||
|
RUN /root/install-dep-packages.sh install
|
||||||
|
|
||||||
|
COPY requirements.txt /root/requirements.txt
|
||||||
|
|
||||||
|
RUN pip3 install -r /root/requirements.txt \
|
||||||
|
&& /root/install-dep-packages.sh remove-makedeps \
|
||||||
|
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
||||||
|
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
||||||
|
|
||||||
|
RUN python3 -V \
|
||||||
|
&& python3 -m pip list \
|
||||||
|
&& tor --version \
|
||||||
|
&& openssl version
|
||||||
|
|
||||||
|
# Add Zeronet source
|
||||||
|
|
||||||
|
COPY . /root
|
||||||
|
VOLUME /root/data
|
||||||
|
|
||||||
|
# Control if Tor proxy is started
|
||||||
|
ENV ENABLE_TOR false
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
# Set upstart command
|
||||||
|
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
EXPOSE 43110 26552
|
||||||
|
|
1
dockerfiles/Dockerfile.ubuntu
Symbolic link
1
dockerfiles/Dockerfile.ubuntu
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
Dockerfile.ubuntu20.04
|
44
dockerfiles/Dockerfile.ubuntu20.04
Normal file
44
dockerfiles/Dockerfile.ubuntu20.04
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
|
||||||
|
# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
|
||||||
|
|
||||||
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
|
# Base settings
|
||||||
|
ENV HOME /root
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
|
||||||
|
COPY install-dep-packages.sh /root/install-dep-packages.sh
|
||||||
|
|
||||||
|
RUN /root/install-dep-packages.sh install
|
||||||
|
|
||||||
|
COPY requirements.txt /root/requirements.txt
|
||||||
|
|
||||||
|
RUN pip3 install -r /root/requirements.txt \
|
||||||
|
&& /root/install-dep-packages.sh remove-makedeps \
|
||||||
|
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
||||||
|
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
||||||
|
|
||||||
|
RUN python3 -V \
|
||||||
|
&& python3 -m pip list \
|
||||||
|
&& tor --version \
|
||||||
|
&& openssl version
|
||||||
|
|
||||||
|
# Add Zeronet source
|
||||||
|
|
||||||
|
COPY . /root
|
||||||
|
VOLUME /root/data
|
||||||
|
|
||||||
|
# Control if Tor proxy is started
|
||||||
|
ENV ENABLE_TOR false
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
# Set upstart command
|
||||||
|
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
EXPOSE 43110 26552
|
||||||
|
|
34
dockerfiles/gen-dockerfiles.sh
Executable file
34
dockerfiles/gen-dockerfiles.sh
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
die() {
|
||||||
|
echo "$@" > /dev/stderr
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for os in alpine:3.13 ubuntu:20.04 ; do
|
||||||
|
prefix="`echo "$os" | sed -e 's/://'`"
|
||||||
|
short_prefix="`echo "$os" | sed -e 's/:.*//'`"
|
||||||
|
|
||||||
|
zeronet="zeronet-Dockerfile"
|
||||||
|
|
||||||
|
dockerfile="Dockerfile.$prefix"
|
||||||
|
dockerfile_short="Dockerfile.$short_prefix"
|
||||||
|
|
||||||
|
echo "GEN $dockerfile"
|
||||||
|
|
||||||
|
if ! test -f "$zeronet" ; then
|
||||||
|
die "No such file: $zeronet"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "\
|
||||||
|
# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
|
||||||
|
# SEE $zeronet FOR THE SOURCE FILE.
|
||||||
|
|
||||||
|
FROM $os
|
||||||
|
|
||||||
|
`cat "$zeronet"`
|
||||||
|
" > "$dockerfile.tmp" && mv "$dockerfile.tmp" "$dockerfile" && ln -s -f "$dockerfile" "$dockerfile_short"
|
||||||
|
done
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
FROM alpine:3.12
|
# Base settings
|
||||||
|
|
||||||
#Base settings
|
|
||||||
ENV HOME /root
|
ENV HOME /root
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
|
||||||
|
COPY install-dep-packages.sh /root/install-dep-packages.sh
|
||||||
|
|
||||||
|
RUN /root/install-dep-packages.sh install
|
||||||
|
|
||||||
COPY requirements.txt /root/requirements.txt
|
COPY requirements.txt /root/requirements.txt
|
||||||
|
|
||||||
#Install ZeroNet
|
RUN pip3 install -r /root/requirements.txt \
|
||||||
RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
|
&& /root/install-dep-packages.sh remove-makedeps \
|
||||||
&& pip3 install -r /root/requirements.txt \
|
|
||||||
&& apk del python3-dev gcc libffi-dev musl-dev make \
|
|
||||||
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
&& echo "ControlPort 9051" >> /etc/tor/torrc \
|
||||||
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
|
||||||
|
|
||||||
|
@ -17,18 +19,18 @@ RUN python3 -V \
|
||||||
&& tor --version \
|
&& tor --version \
|
||||||
&& openssl version
|
&& openssl version
|
||||||
|
|
||||||
#Add Zeronet source
|
# Add Zeronet source
|
||||||
|
|
||||||
COPY . /root
|
COPY . /root
|
||||||
VOLUME /root/data
|
VOLUME /root/data
|
||||||
|
|
||||||
#Control if Tor proxy is started
|
# Control if Tor proxy is started
|
||||||
ENV ENABLE_TOR false
|
ENV ENABLE_TOR false
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
#Set upstart command
|
# Set upstart command
|
||||||
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
|
||||||
|
|
||||||
#Expose ports
|
# Expose ports
|
||||||
EXPOSE 43110 26552
|
EXPOSE 43110 26552
|
||||||
|
|
49
install-dep-packages.sh
Executable file
49
install-dep-packages.sh
Executable file
|
@ -0,0 +1,49 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
do_alpine() {
|
||||||
|
local deps="python3 py3-pip openssl tor"
|
||||||
|
local makedeps="python3-dev gcc g++ libffi-dev musl-dev make automake autoconf libtool"
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
install)
|
||||||
|
apk --update --no-cache --no-progress add $deps $makedeps
|
||||||
|
;;
|
||||||
|
remove-makedeps)
|
||||||
|
apk del $makedeps
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
do_ubuntu() {
|
||||||
|
local deps="python3 python3-pip openssl tor"
|
||||||
|
local makedeps="python3-dev gcc g++ libffi-dev make automake autoconf libtool"
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
install)
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install --no-install-recommends -y $deps $makedeps && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
;;
|
||||||
|
remove-makedeps)
|
||||||
|
apt-get remove -y $makedeps
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
if test -f /etc/os-release ; then
|
||||||
|
. /etc/os-release
|
||||||
|
elif test -f /usr/lib/os-release ; then
|
||||||
|
. /usr/lib/os-release
|
||||||
|
else
|
||||||
|
echo "No such file: /etc/os-release" > /dev/stderr
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$ID" in
|
||||||
|
ubuntu) do_ubuntu "$@" ;;
|
||||||
|
alpine) do_alpine "$@" ;;
|
||||||
|
*)
|
||||||
|
echo "Unsupported OS ID: $ID" > /dev/stderr
|
||||||
|
exit 1
|
||||||
|
esac
|
1
plugins
1
plugins
|
@ -1 +0,0 @@
|
||||||
Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
|
|
|
@ -3,7 +3,7 @@ greenlet==0.4.16; python_version <= "3.6"
|
||||||
gevent>=20.9.0; python_version >= "3.7"
|
gevent>=20.9.0; python_version >= "3.7"
|
||||||
msgpack>=0.4.4
|
msgpack>=0.4.4
|
||||||
base58
|
base58
|
||||||
merkletools @ git+https://github.com/ZeroNetX/pymerkletools.git@dev
|
merkletools
|
||||||
rsa
|
rsa
|
||||||
PySocks>=1.6.8
|
PySocks>=1.6.8
|
||||||
pyasn1
|
pyasn1
|
||||||
|
|
|
@ -13,8 +13,8 @@ import time
|
||||||
class Config(object):
|
class Config(object):
|
||||||
|
|
||||||
def __init__(self, argv):
|
def __init__(self, argv):
|
||||||
self.version = "0.9.0"
|
self.version = "0.7.6"
|
||||||
self.rev = 4630
|
self.rev = 4565
|
||||||
self.argv = argv
|
self.argv = argv
|
||||||
self.action = None
|
self.action = None
|
||||||
self.test_parser = None
|
self.test_parser = None
|
||||||
|
@ -82,12 +82,45 @@ class Config(object):
|
||||||
from Crypt import CryptHash
|
from Crypt import CryptHash
|
||||||
access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
|
access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
|
||||||
trackers = [
|
trackers = [
|
||||||
|
# by zeroseed at http://127.0.0.1:43110/19HKdTAeBh5nRiKn791czY7TwRB1QNrf1Q/?:users/1HvNGwHKqhj3ZMEM53tz6jbdqe4LRpanEu:zn:dc17f896-bf3f-4962-bdd4-0a470040c9c5
|
||||||
|
"zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441",
|
||||||
|
"zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441",
|
||||||
|
"zero://my562dxpjropcd5hy3nd5pemsc4aavbiptci5amwxzbelmzgkkuxpvid.onion:15441",
|
||||||
|
"zero://pn4q2zzt2pw4nk7yidxvsxmydko7dfibuzxdswi6gu6ninjpofvqs2id.onion:15441",
|
||||||
|
"zero://6i54dd5th73oelv636ivix6sjnwfgk2qsltnyvswagwphub375t3xcad.onion:15441",
|
||||||
|
"zero://tl74auz4tyqv4bieeclmyoe4uwtoc2dj7fdqv4nc4gl5j2bwg2r26bqd.onion:15441",
|
||||||
|
"zero://wlxav3szbrdhest4j7dib2vgbrd7uj7u7rnuzg22cxbih7yxyg2hsmid.onion:15441",
|
||||||
|
"zero://zy7wttvjtsijt5uwmlar4yguvjc2gppzbdj4v6bujng6xwjmkdg7uvqd.onion:15441",
|
||||||
|
|
||||||
|
# ZeroNet 0.7.2 defaults:
|
||||||
|
"zero://boot3rdez4rzn36x.onion:15441",
|
||||||
"http://open.acgnxtracker.com:80/announce", # DE
|
"http://open.acgnxtracker.com:80/announce", # DE
|
||||||
"http://tracker.bt4g.com:2095/announce", # Cloudflare
|
"http://tracker.bt4g.com:2095/announce", # Cloudflare
|
||||||
"http://tracker.files.fm:6969/announce",
|
"zero://2602:ffc5::c5b2:5360:26312", # US/ATL
|
||||||
"http://t.publictracker.xyz:6969/announce",
|
"zero://145.239.95.38:15441",
|
||||||
|
"zero://188.116.183.41:26552",
|
||||||
|
"zero://145.239.95.38:15441",
|
||||||
|
"zero://211.125.90.79:22234",
|
||||||
|
"zero://216.189.144.82:26312",
|
||||||
|
"zero://45.77.23.92:15555",
|
||||||
|
"zero://51.15.54.182:21041",
|
||||||
"https://tracker.lilithraws.cf:443/announce",
|
"https://tracker.lilithraws.cf:443/announce",
|
||||||
"https://tracker.babico.name.tr:443/announce",
|
"udp://code2chicken.nl:6969/announce",
|
||||||
|
"udp://abufinzio.monocul.us:6969/announce",
|
||||||
|
"udp://tracker.0x.tf:6969/announce",
|
||||||
|
"udp://tracker.zerobytes.xyz:1337/announce",
|
||||||
|
"udp://vibe.sleepyinternetfun.xyz:1738/announce",
|
||||||
|
"udp://www.torrent.eu.org:451/announce",
|
||||||
|
"zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441",
|
||||||
|
"zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441",
|
||||||
|
"zero://gugt43coc5tkyrhrc3esf6t6aeycvcqzw7qafxrjpqbwt4ssz5czgzyd.onion:15441",
|
||||||
|
"zero://hb6ozikfiaafeuqvgseiik4r46szbpjfu66l67wjinnyv6dtopuwhtqd.onion:15445",
|
||||||
|
"zero://75pmmcbp4vvo2zndmjnrkandvbg6jyptygvvpwsf2zguj7urq7t4jzyd.onion:7777",
|
||||||
|
"zero://dw4f4sckg2ultdj5qu7vtkf3jsfxsah3mz6pivwfd6nv3quji3vfvhyd.onion:6969",
|
||||||
|
"zero://5vczpwawviukvd7grfhsfxp7a6huz77hlis4fstjkym5kmf4pu7i7myd.onion:15441",
|
||||||
|
"zero://ow7in4ftwsix5klcbdfqvfqjvimqshbm2o75rhtpdnsderrcbx74wbad.onion:15441",
|
||||||
|
"zero://agufghdtniyfwty3wk55drxxwj2zxgzzo7dbrtje73gmvcpxy4ngs4ad.onion:15441",
|
||||||
|
"zero://qn65si4gtcwdiliq7vzrwu62qrweoxb6tx2cchwslaervj6szuje66qd.onion:26117",
|
||||||
]
|
]
|
||||||
# Platform specific
|
# Platform specific
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
|
@ -251,12 +284,31 @@ class Config(object):
|
||||||
self.parser.add_argument('--access_key', help='Plugin access key default: Random key generated at startup', default=access_key_default, metavar='key')
|
self.parser.add_argument('--access_key', help='Plugin access key default: Random key generated at startup', default=access_key_default, metavar='key')
|
||||||
self.parser.add_argument('--dist_type', help='Type of installed distribution', default='source')
|
self.parser.add_argument('--dist_type', help='Type of installed distribution', default='source')
|
||||||
|
|
||||||
self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=25, type=int, metavar='limit')
|
self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=10, type=int, metavar='limit')
|
||||||
self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
|
self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
|
||||||
self.parser.add_argument('--connected_limit', help='Max connected peer per site', default=8, type=int, metavar='connected_limit')
|
self.parser.add_argument('--connected_limit', help='Max number of connected peers per site. Soft limit.', default=10, type=int, metavar='connected_limit')
|
||||||
self.parser.add_argument('--global_connected_limit', help='Max connections', default=512, type=int, metavar='global_connected_limit')
|
self.parser.add_argument('--global_connected_limit', help='Max number of connections. Soft limit.', default=512, type=int, metavar='global_connected_limit')
|
||||||
self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
|
self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
|
||||||
|
|
||||||
|
self.parser.add_argument('--site_announce_interval_min', help='Site announce interval for the most active sites, in minutes.', default=4, type=int, metavar='site_announce_interval_min')
|
||||||
|
self.parser.add_argument('--site_announce_interval_max', help='Site announce interval for inactive sites, in minutes.', default=30, type=int, metavar='site_announce_interval_max')
|
||||||
|
|
||||||
|
self.parser.add_argument('--site_peer_check_interval_min', help='Connectable peers check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_peer_check_interval_min')
|
||||||
|
self.parser.add_argument('--site_peer_check_interval_max', help='Connectable peers check interval for inactive sites, in minutes.', default=20, type=int, metavar='site_peer_check_interval_max')
|
||||||
|
|
||||||
|
self.parser.add_argument('--site_update_check_interval_min', help='Site update check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_update_check_interval_min')
|
||||||
|
self.parser.add_argument('--site_update_check_interval_max', help='Site update check interval for inactive sites, in minutes.', default=45, type=int, metavar='site_update_check_interval_max')
|
||||||
|
|
||||||
|
self.parser.add_argument('--site_connectable_peer_count_max', help='Search for as many connectable peers for the most active sites', default=10, type=int, metavar='site_connectable_peer_count_max')
|
||||||
|
self.parser.add_argument('--site_connectable_peer_count_min', help='Search for as many connectable peers for inactive sites', default=2, type=int, metavar='site_connectable_peer_count_min')
|
||||||
|
|
||||||
|
self.parser.add_argument('--send_back_lru_size', help='Size of the send back LRU cache', default=5000, type=int, metavar='send_back_lru_size')
|
||||||
|
self.parser.add_argument('--send_back_limit', help='Send no more than so many files at once back to peer, when we discovered that the peer held older file versions', default=3, type=int, metavar='send_back_limit')
|
||||||
|
|
||||||
|
self.parser.add_argument('--expose_no_ownership', help='By default, ZeroNet tries checking updates for own sites more frequently. This can be used by a third party for revealing the network addresses of a site owner. If this option is enabled, ZeroNet performs the checks in the same way for any sites.', type='bool', choices=[True, False], default=False)
|
||||||
|
|
||||||
|
self.parser.add_argument('--simultaneous_connection_throttle_threshold', help='Throttle opening new connections when the number of outgoing connections in not fully established state exceeds the threshold.', default=15, type=int, metavar='simultaneous_connection_throttle_threshold')
|
||||||
|
|
||||||
self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
|
self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
|
||||||
self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
|
self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
|
||||||
self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')
|
self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')
|
||||||
|
@ -319,7 +371,8 @@ class Config(object):
|
||||||
|
|
||||||
def loadTrackersFile(self):
|
def loadTrackersFile(self):
|
||||||
if not self.trackers_file:
|
if not self.trackers_file:
|
||||||
self.trackers_file = ["trackers.txt", "{data_dir}/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d/trackers.txt"]
|
return None
|
||||||
|
|
||||||
self.trackers = self.arguments.trackers[:]
|
self.trackers = self.arguments.trackers[:]
|
||||||
|
|
||||||
for trackers_file in self.trackers_file:
|
for trackers_file in self.trackers_file:
|
||||||
|
@ -331,9 +384,6 @@ class Config(object):
|
||||||
else: # Relative to zeronet.py
|
else: # Relative to zeronet.py
|
||||||
trackers_file_path = self.start_dir + "/" + trackers_file
|
trackers_file_path = self.start_dir + "/" + trackers_file
|
||||||
|
|
||||||
if not os.path.exists(trackers_file_path):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for line in open(trackers_file_path):
|
for line in open(trackers_file_path):
|
||||||
tracker = line.strip()
|
tracker = line.strip()
|
||||||
if "://" in tracker and tracker not in self.trackers:
|
if "://" in tracker and tracker not in self.trackers:
|
||||||
|
|
|
@ -17,12 +17,13 @@ from util import helper
|
||||||
class Connection(object):
|
class Connection(object):
|
||||||
__slots__ = (
|
__slots__ = (
|
||||||
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
|
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
|
||||||
"handshake", "crypt", "connected", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
|
"handshake", "crypt", "connected", "connecting", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
|
||||||
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
|
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
|
||||||
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
|
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
|
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
|
||||||
|
self.server = server
|
||||||
self.sock = sock
|
self.sock = sock
|
||||||
self.cert_pin = None
|
self.cert_pin = None
|
||||||
if "#" in ip:
|
if "#" in ip:
|
||||||
|
@ -42,7 +43,6 @@ class Connection(object):
|
||||||
self.is_private_ip = False
|
self.is_private_ip = False
|
||||||
self.is_tracker_connection = is_tracker_connection
|
self.is_tracker_connection = is_tracker_connection
|
||||||
|
|
||||||
self.server = server
|
|
||||||
self.unpacker = None # Stream incoming socket messages here
|
self.unpacker = None # Stream incoming socket messages here
|
||||||
self.unpacker_bytes = 0 # How many bytes the unpacker received
|
self.unpacker_bytes = 0 # How many bytes the unpacker received
|
||||||
self.req_id = 0 # Last request id
|
self.req_id = 0 # Last request id
|
||||||
|
@ -50,6 +50,7 @@ class Connection(object):
|
||||||
self.crypt = None # Connection encryption method
|
self.crypt = None # Connection encryption method
|
||||||
self.sock_wrapped = False # Socket wrapped to encryption
|
self.sock_wrapped = False # Socket wrapped to encryption
|
||||||
|
|
||||||
|
self.connecting = False
|
||||||
self.connected = False
|
self.connected = False
|
||||||
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
|
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
|
||||||
self.closed = False
|
self.closed = False
|
||||||
|
@ -81,11 +82,11 @@ class Connection(object):
|
||||||
|
|
||||||
def setIp(self, ip):
|
def setIp(self, ip):
|
||||||
self.ip = ip
|
self.ip = ip
|
||||||
self.ip_type = helper.getIpType(ip)
|
self.ip_type = self.server.getIpType(ip)
|
||||||
self.updateName()
|
self.updateName()
|
||||||
|
|
||||||
def createSocket(self):
|
def createSocket(self):
|
||||||
if helper.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
|
if self.server.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
|
||||||
# Create IPv6 connection as IPv4 when using proxy
|
# Create IPv6 connection as IPv4 when using proxy
|
||||||
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||||
else:
|
else:
|
||||||
|
@ -118,13 +119,28 @@ class Connection(object):
|
||||||
|
|
||||||
# Open connection to peer and wait for handshake
|
# Open connection to peer and wait for handshake
|
||||||
def connect(self):
|
def connect(self):
|
||||||
|
self.connecting = True
|
||||||
|
try:
|
||||||
|
return self._connect()
|
||||||
|
except Exception as err:
|
||||||
|
self.connecting = False
|
||||||
|
self.connected = False
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _connect(self):
|
||||||
|
self.updateOnlineStatus(outgoing_activity=True)
|
||||||
|
|
||||||
|
if not self.event_connected or self.event_connected.ready():
|
||||||
|
self.event_connected = gevent.event.AsyncResult()
|
||||||
|
|
||||||
self.type = "out"
|
self.type = "out"
|
||||||
|
|
||||||
|
unreachability = self.server.getIpUnreachability(self.ip)
|
||||||
|
if unreachability:
|
||||||
|
raise Exception(unreachability)
|
||||||
|
|
||||||
if self.ip_type == "onion":
|
if self.ip_type == "onion":
|
||||||
if not self.server.tor_manager or not self.server.tor_manager.enabled:
|
|
||||||
raise Exception("Can't connect to onion addresses, no Tor controller present")
|
|
||||||
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
||||||
elif config.tor == "always" and helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
|
|
||||||
raise Exception("Can't connect to local IPs in Tor: always mode")
|
|
||||||
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
|
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
|
||||||
if config.trackers_proxy == "tor":
|
if config.trackers_proxy == "tor":
|
||||||
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
||||||
|
@ -148,37 +164,56 @@ class Connection(object):
|
||||||
|
|
||||||
self.sock.connect(sock_address)
|
self.sock.connect(sock_address)
|
||||||
|
|
||||||
# Implicit SSL
|
if self.shouldEncrypt():
|
||||||
should_encrypt = not self.ip_type == "onion" and self.ip not in self.server.broken_ssl_ips and self.ip not in config.ip_local
|
|
||||||
if self.cert_pin:
|
|
||||||
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", cert_pin=self.cert_pin)
|
|
||||||
self.sock.do_handshake()
|
|
||||||
self.crypt = "tls-rsa"
|
|
||||||
self.sock_wrapped = True
|
|
||||||
elif should_encrypt and "tls-rsa" in CryptConnection.manager.crypt_supported:
|
|
||||||
try:
|
try:
|
||||||
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa")
|
self.wrapSocket()
|
||||||
self.sock.do_handshake()
|
|
||||||
self.crypt = "tls-rsa"
|
|
||||||
self.sock_wrapped = True
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if not config.force_encryption:
|
if self.sock:
|
||||||
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
self.sock.close()
|
||||||
self.server.broken_ssl_ips[self.ip] = True
|
self.sock = None
|
||||||
self.sock.close()
|
if self.mustEncrypt():
|
||||||
self.crypt = None
|
raise
|
||||||
self.sock = self.createSocket()
|
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
||||||
self.sock.settimeout(30)
|
self.server.broken_ssl_ips[self.ip] = True
|
||||||
self.sock.connect(sock_address)
|
return self.connect()
|
||||||
|
|
||||||
# Detect protocol
|
# Detect protocol
|
||||||
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
|
|
||||||
event_connected = self.event_connected
|
event_connected = self.event_connected
|
||||||
gevent.spawn(self.messageLoop)
|
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
|
||||||
|
self.server.outgoing_pool.spawn(self.messageLoop)
|
||||||
connect_res = event_connected.get() # Wait for handshake
|
connect_res = event_connected.get() # Wait for handshake
|
||||||
self.sock.settimeout(timeout_before)
|
if self.sock:
|
||||||
|
self.sock.settimeout(timeout_before)
|
||||||
return connect_res
|
return connect_res
|
||||||
|
|
||||||
|
def mustEncrypt(self):
|
||||||
|
if self.cert_pin:
|
||||||
|
return True
|
||||||
|
if (not self.ip_type == "onion") and config.force_encryption:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def shouldEncrypt(self):
|
||||||
|
if self.mustEncrypt():
|
||||||
|
return True
|
||||||
|
return (
|
||||||
|
(not self.ip_type == "onion")
|
||||||
|
and
|
||||||
|
(self.ip not in self.server.broken_ssl_ips)
|
||||||
|
and
|
||||||
|
(self.ip not in config.ip_local)
|
||||||
|
and
|
||||||
|
("tls-rsa" in CryptConnection.manager.crypt_supported)
|
||||||
|
)
|
||||||
|
|
||||||
|
def wrapSocket(self, crypt="tls-rsa", do_handshake=True):
|
||||||
|
server = (self.type == "in")
|
||||||
|
sock = CryptConnection.manager.wrapSocket(self.sock, crypt, server=server, cert_pin=self.cert_pin)
|
||||||
|
sock.do_handshake()
|
||||||
|
self.crypt = crypt
|
||||||
|
self.sock_wrapped = True
|
||||||
|
self.sock = sock
|
||||||
|
|
||||||
# Handle incoming connection
|
# Handle incoming connection
|
||||||
def handleIncomingConnection(self, sock):
|
def handleIncomingConnection(self, sock):
|
||||||
self.log("Incoming connection...")
|
self.log("Incoming connection...")
|
||||||
|
@ -192,9 +227,7 @@ class Connection(object):
|
||||||
first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
|
first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
|
||||||
if first_byte == b"\x16":
|
if first_byte == b"\x16":
|
||||||
self.log("Crypt in connection using implicit SSL")
|
self.log("Crypt in connection using implicit SSL")
|
||||||
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", True)
|
self.wrapSocket(do_handshake=False)
|
||||||
self.sock_wrapped = True
|
|
||||||
self.crypt = "tls-rsa"
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log("Socket peek error: %s" % Debug.formatException(err))
|
self.log("Socket peek error: %s" % Debug.formatException(err))
|
||||||
self.messageLoop()
|
self.messageLoop()
|
||||||
|
@ -213,6 +246,7 @@ class Connection(object):
|
||||||
self.protocol = "v2"
|
self.protocol = "v2"
|
||||||
self.updateName()
|
self.updateName()
|
||||||
self.connected = True
|
self.connected = True
|
||||||
|
self.connecting = False
|
||||||
buff_len = 0
|
buff_len = 0
|
||||||
req_len = 0
|
req_len = 0
|
||||||
self.unpacker_bytes = 0
|
self.unpacker_bytes = 0
|
||||||
|
@ -435,13 +469,13 @@ class Connection(object):
|
||||||
self.updateName()
|
self.updateName()
|
||||||
|
|
||||||
self.event_connected.set(True) # Mark handshake as done
|
self.event_connected.set(True) # Mark handshake as done
|
||||||
self.event_connected = None
|
|
||||||
self.handshake_time = time.time()
|
self.handshake_time = time.time()
|
||||||
|
|
||||||
# Handle incoming message
|
# Handle incoming message
|
||||||
def handleMessage(self, message):
|
def handleMessage(self, message):
|
||||||
cmd = message["cmd"]
|
cmd = message["cmd"]
|
||||||
|
|
||||||
|
self.updateOnlineStatus(successful_activity=True)
|
||||||
self.last_message_time = time.time()
|
self.last_message_time = time.time()
|
||||||
self.last_cmd_recv = cmd
|
self.last_cmd_recv = cmd
|
||||||
if cmd == "response": # New style response
|
if cmd == "response": # New style response
|
||||||
|
@ -458,12 +492,10 @@ class Connection(object):
|
||||||
self.last_ping_delay = ping
|
self.last_ping_delay = ping
|
||||||
# Server switched to crypt, lets do it also if not crypted already
|
# Server switched to crypt, lets do it also if not crypted already
|
||||||
if message.get("crypt") and not self.sock_wrapped:
|
if message.get("crypt") and not self.sock_wrapped:
|
||||||
self.crypt = message["crypt"]
|
crypt = message["crypt"]
|
||||||
server = (self.type == "in")
|
server = (self.type == "in")
|
||||||
self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (self.crypt, server, ping))
|
self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (crypt, server, ping))
|
||||||
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
|
self.wrapSocket(crypt)
|
||||||
self.sock.do_handshake()
|
|
||||||
self.sock_wrapped = True
|
|
||||||
|
|
||||||
if not self.sock_wrapped and self.cert_pin:
|
if not self.sock_wrapped and self.cert_pin:
|
||||||
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
|
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
|
||||||
|
@ -491,8 +523,7 @@ class Connection(object):
|
||||||
server = (self.type == "in")
|
server = (self.type == "in")
|
||||||
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
|
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
|
||||||
try:
|
try:
|
||||||
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
|
self.wrapSocket(self.crypt)
|
||||||
self.sock_wrapped = True
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if not config.force_encryption:
|
if not config.force_encryption:
|
||||||
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
||||||
|
@ -504,6 +535,7 @@ class Connection(object):
|
||||||
|
|
||||||
# Send data to connection
|
# Send data to connection
|
||||||
def send(self, message, streaming=False):
|
def send(self, message, streaming=False):
|
||||||
|
self.updateOnlineStatus(outgoing_activity=True)
|
||||||
self.last_send_time = time.time()
|
self.last_send_time = time.time()
|
||||||
if config.debug_socket:
|
if config.debug_socket:
|
||||||
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
|
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
|
||||||
|
@ -543,6 +575,11 @@ class Connection(object):
|
||||||
message = None
|
message = None
|
||||||
with self.send_lock:
|
with self.send_lock:
|
||||||
self.sock.sendall(data)
|
self.sock.sendall(data)
|
||||||
|
# XXX: Should not be used here:
|
||||||
|
# self.updateOnlineStatus(successful_activity=True)
|
||||||
|
# Looks like self.sock.sendall() returns normally, instead of
|
||||||
|
# raising an Exception (at least, some times).
|
||||||
|
# So the only way of detecting the network activity is self.handleMessage()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.close("Send error: %s (cmd: %s)" % (err, stat_key))
|
self.close("Send error: %s (cmd: %s)" % (err, stat_key))
|
||||||
return False
|
return False
|
||||||
|
@ -554,7 +591,7 @@ class Connection(object):
|
||||||
buff = 64 * 1024
|
buff = 64 * 1024
|
||||||
bytes_left = read_bytes
|
bytes_left = read_bytes
|
||||||
bytes_sent = 0
|
bytes_sent = 0
|
||||||
while True:
|
while True and self.sock != None:
|
||||||
self.last_send_time = time.time()
|
self.last_send_time = time.time()
|
||||||
data = file.read(min(bytes_left, buff))
|
data = file.read(min(bytes_left, buff))
|
||||||
bytes_sent += len(data)
|
bytes_sent += len(data)
|
||||||
|
@ -584,7 +621,8 @@ class Connection(object):
|
||||||
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
|
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
|
||||||
if stream_to:
|
if stream_to:
|
||||||
self.waiting_streams[self.req_id] = stream_to
|
self.waiting_streams[self.req_id] = stream_to
|
||||||
self.send(data) # Send request
|
if not self.send(data): # Send request
|
||||||
|
return False
|
||||||
res = event.get() # Wait until event solves
|
res = event.get() # Wait until event solves
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@ -608,6 +646,7 @@ class Connection(object):
|
||||||
return False # Already closed
|
return False # Already closed
|
||||||
self.closed = True
|
self.closed = True
|
||||||
self.connected = False
|
self.connected = False
|
||||||
|
self.connecting = False
|
||||||
if self.event_connected:
|
if self.event_connected:
|
||||||
self.event_connected.set(False)
|
self.event_connected.set(False)
|
||||||
|
|
||||||
|
@ -633,3 +672,12 @@ class Connection(object):
|
||||||
self.sock = None
|
self.sock = None
|
||||||
self.unpacker = None
|
self.unpacker = None
|
||||||
self.event_connected = None
|
self.event_connected = None
|
||||||
|
self.crypt = None
|
||||||
|
self.sock_wrapped = False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def updateOnlineStatus(self, outgoing_activity=False, successful_activity=False):
|
||||||
|
self.server.updateOnlineStatus(self,
|
||||||
|
outgoing_activity=outgoing_activity,
|
||||||
|
successful_activity=successful_activity)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
import socket
|
import socket
|
||||||
|
@ -8,6 +9,7 @@ import gevent
|
||||||
import msgpack
|
import msgpack
|
||||||
from gevent.server import StreamServer
|
from gevent.server import StreamServer
|
||||||
from gevent.pool import Pool
|
from gevent.pool import Pool
|
||||||
|
import gevent.event
|
||||||
|
|
||||||
import util
|
import util
|
||||||
from util import helper
|
from util import helper
|
||||||
|
@ -32,25 +34,36 @@ class ConnectionServer(object):
|
||||||
self.port = port
|
self.port = port
|
||||||
self.last_connection_id = 0 # Connection id incrementer
|
self.last_connection_id = 0 # Connection id incrementer
|
||||||
self.last_connection_id_current_version = 0 # Connection id incrementer for current client version
|
self.last_connection_id_current_version = 0 # Connection id incrementer for current client version
|
||||||
self.last_connection_id_supported_version = 0 # Connection id incrementer for last supported version
|
|
||||||
self.log = logging.getLogger("ConnServer")
|
self.log = logging.getLogger("ConnServer")
|
||||||
self.port_opened = {}
|
self.port_opened = {}
|
||||||
self.peer_blacklist = SiteManager.peer_blacklist
|
self.peer_blacklist = SiteManager.peer_blacklist
|
||||||
|
|
||||||
|
self.managed_pools = {}
|
||||||
|
|
||||||
self.tor_manager = TorManager(self.ip, self.port)
|
self.tor_manager = TorManager(self.ip, self.port)
|
||||||
self.connections = [] # Connections
|
self.connections = [] # Connections
|
||||||
self.whitelist = config.ip_local # No flood protection on this ips
|
self.whitelist = config.ip_local # No flood protection on this ips
|
||||||
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
|
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
|
||||||
self.broken_ssl_ips = {} # Peerids of broken ssl connections
|
self.broken_ssl_ips = {} # Peerids of broken ssl connections
|
||||||
self.ips = {} # Connection by ip
|
self.ips = {} # Connection by ip
|
||||||
|
|
||||||
self.has_internet = True # Internet outage detection
|
self.has_internet = True # Internet outage detection
|
||||||
|
self.internet_online_since = 0
|
||||||
|
self.internet_offline_since = 0
|
||||||
|
self.last_outgoing_internet_activity_time = 0 # Last time the application tried to send any data
|
||||||
|
self.last_successful_internet_activity_time = 0 # Last time the application successfully sent or received any data
|
||||||
|
self.internet_outage_threshold = 60 * 2
|
||||||
|
|
||||||
self.stream_server = None
|
self.stream_server = None
|
||||||
self.stream_server_proxy = None
|
self.stream_server_proxy = None
|
||||||
self.running = False
|
self.running = False
|
||||||
self.stopping = False
|
self.stopping = False
|
||||||
|
self.stopping_event = gevent.event.Event()
|
||||||
self.thread_checker = None
|
self.thread_checker = None
|
||||||
|
|
||||||
|
self.thread_pool = Pool(None)
|
||||||
|
self.managed_pools["thread"] = self.thread_pool
|
||||||
|
|
||||||
self.stat_recv = defaultdict(lambda: defaultdict(int))
|
self.stat_recv = defaultdict(lambda: defaultdict(int))
|
||||||
self.stat_sent = defaultdict(lambda: defaultdict(int))
|
self.stat_sent = defaultdict(lambda: defaultdict(int))
|
||||||
self.bytes_recv = 0
|
self.bytes_recv = 0
|
||||||
|
@ -62,8 +75,14 @@ class ConnectionServer(object):
|
||||||
self.num_outgoing = 0
|
self.num_outgoing = 0
|
||||||
self.had_external_incoming = False
|
self.had_external_incoming = False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
self.timecorrection = 0.0
|
self.timecorrection = 0.0
|
||||||
self.pool = Pool(500) # do not accept more than 500 connections
|
self.pool = Pool(500) # do not accept more than 500 connections
|
||||||
|
self.managed_pools["incoming"] = self.pool
|
||||||
|
|
||||||
|
self.outgoing_pool = Pool(None)
|
||||||
|
self.managed_pools["outgoing"] = self.outgoing_pool
|
||||||
|
|
||||||
# Bittorrent style peerid
|
# Bittorrent style peerid
|
||||||
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
|
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
|
||||||
|
@ -84,10 +103,11 @@ class ConnectionServer(object):
|
||||||
return False
|
return False
|
||||||
self.running = True
|
self.running = True
|
||||||
if check_connections:
|
if check_connections:
|
||||||
self.thread_checker = gevent.spawn(self.checkConnections)
|
self.thread_checker = self.spawn(self.checkConnections)
|
||||||
CryptConnection.manager.loadCerts()
|
CryptConnection.manager.loadCerts()
|
||||||
if config.tor != "disable":
|
if config.tor != "disable":
|
||||||
self.tor_manager.start()
|
self.tor_manager.start()
|
||||||
|
self.tor_manager.startOnions()
|
||||||
if not self.port:
|
if not self.port:
|
||||||
self.log.info("No port found, not binding")
|
self.log.info("No port found, not binding")
|
||||||
return False
|
return False
|
||||||
|
@ -108,7 +128,7 @@ class ConnectionServer(object):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if self.stream_server_proxy:
|
if self.stream_server_proxy:
|
||||||
gevent.spawn(self.listenProxy)
|
self.spawn(self.listenProxy)
|
||||||
try:
|
try:
|
||||||
self.stream_server.serve_forever()
|
self.stream_server.serve_forever()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
@ -116,22 +136,92 @@ class ConnectionServer(object):
|
||||||
return False
|
return False
|
||||||
self.log.debug("Stopped.")
|
self.log.debug("Stopped.")
|
||||||
|
|
||||||
def stop(self):
|
def stop(self, ui_websocket=None):
|
||||||
self.log.debug("Stopping %s" % self.stream_server)
|
self.log.debug("Stopping %s" % self.stream_server)
|
||||||
self.stopping = True
|
self.stopping = True
|
||||||
self.running = False
|
self.running = False
|
||||||
|
self.stopping_event.set()
|
||||||
|
self.onStop(ui_websocket=ui_websocket)
|
||||||
|
|
||||||
|
def onStop(self, ui_websocket=None):
|
||||||
|
timeout = 30
|
||||||
|
start_time = time.time()
|
||||||
|
join_quantum = 0.1
|
||||||
|
prev_msg = None
|
||||||
|
while True:
|
||||||
|
if time.time() >= start_time + timeout:
|
||||||
|
break
|
||||||
|
|
||||||
|
total_size = 0
|
||||||
|
sizes = {}
|
||||||
|
timestep = 0
|
||||||
|
for name, pool in list(self.managed_pools.items()):
|
||||||
|
timestep += join_quantum
|
||||||
|
pool.join(timeout=join_quantum)
|
||||||
|
size = len(pool)
|
||||||
|
if size:
|
||||||
|
sizes[name] = size
|
||||||
|
total_size += size
|
||||||
|
|
||||||
|
if len(sizes) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
if timestep < 1:
|
||||||
|
time.sleep(1 - timestep)
|
||||||
|
|
||||||
|
# format message
|
||||||
|
s = ""
|
||||||
|
for name, size in sizes.items():
|
||||||
|
s += "%s pool: %s, " % (name, size)
|
||||||
|
msg = "Waiting for tasks in managed pools to stop: %s" % s
|
||||||
|
# Prevent flooding to log
|
||||||
|
if msg != prev_msg:
|
||||||
|
prev_msg = msg
|
||||||
|
self.log.info("%s", msg)
|
||||||
|
|
||||||
|
percent = 100 * (time.time() - start_time) / timeout
|
||||||
|
msg = "File Server: waiting for %s tasks to stop" % total_size
|
||||||
|
self.sendShutdownProgress(ui_websocket, msg, percent)
|
||||||
|
|
||||||
|
for name, pool in list(self.managed_pools.items()):
|
||||||
|
size = len(pool)
|
||||||
|
if size:
|
||||||
|
self.log.info("Killing %s tasks in %s pool", size, name)
|
||||||
|
pool.kill()
|
||||||
|
|
||||||
|
self.sendShutdownProgress(ui_websocket, "File Server stopped. Now to exit.", 100)
|
||||||
|
|
||||||
if self.thread_checker:
|
if self.thread_checker:
|
||||||
gevent.kill(self.thread_checker)
|
gevent.kill(self.thread_checker)
|
||||||
|
self.thread_checker = None
|
||||||
if self.stream_server:
|
if self.stream_server:
|
||||||
self.stream_server.stop()
|
self.stream_server.stop()
|
||||||
|
|
||||||
|
def sendShutdownProgress(self, ui_websocket, message, progress):
|
||||||
|
if not ui_websocket:
|
||||||
|
return
|
||||||
|
ui_websocket.cmd("progress", ["shutdown", message, progress])
|
||||||
|
time.sleep(0.01)
|
||||||
|
|
||||||
|
# Sleeps the specified amount of time or until ConnectionServer is stopped
|
||||||
|
def sleep(self, t):
|
||||||
|
if t:
|
||||||
|
self.stopping_event.wait(timeout=t)
|
||||||
|
else:
|
||||||
|
time.sleep(t)
|
||||||
|
|
||||||
|
# Spawns a thread that will be waited for on server being stopped (and killed after a timeout)
|
||||||
|
def spawn(self, *args, **kwargs):
|
||||||
|
thread = self.thread_pool.spawn(*args, **kwargs)
|
||||||
|
return thread
|
||||||
|
|
||||||
def closeConnections(self):
|
def closeConnections(self):
|
||||||
self.log.debug("Closing all connection: %s" % len(self.connections))
|
self.log.debug("Closing all connection: %s" % len(self.connections))
|
||||||
for connection in self.connections[:]:
|
for connection in self.connections[:]:
|
||||||
connection.close("Close all connections")
|
connection.close("Close all connections")
|
||||||
|
|
||||||
def handleIncomingConnection(self, sock, addr):
|
def handleIncomingConnection(self, sock, addr):
|
||||||
if config.offline:
|
if not self.allowsAcceptingConnections():
|
||||||
sock.close()
|
sock.close()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -149,7 +239,7 @@ class ConnectionServer(object):
|
||||||
self.ip_incoming[ip] += 1
|
self.ip_incoming[ip] += 1
|
||||||
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
|
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
|
||||||
self.log.debug("Connection flood detected from %s" % ip)
|
self.log.debug("Connection flood detected from %s" % ip)
|
||||||
time.sleep(30)
|
self.sleep(30)
|
||||||
sock.close()
|
sock.close()
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
|
@ -158,10 +248,8 @@ class ConnectionServer(object):
|
||||||
connection = Connection(self, ip, port, sock)
|
connection = Connection(self, ip, port, sock)
|
||||||
self.connections.append(connection)
|
self.connections.append(connection)
|
||||||
rev = connection.handshake.get("rev", 0)
|
rev = connection.handshake.get("rev", 0)
|
||||||
if rev >= 4560:
|
if rev > 0 and rev == config.rev:
|
||||||
self.last_connection_id_supported_version += 1
|
self.last_connection_id_current_version += 1
|
||||||
if rev == config.rev:
|
|
||||||
self.last_connection_id_current_version += 1
|
|
||||||
if ip not in config.ip_local:
|
if ip not in config.ip_local:
|
||||||
self.ips[ip] = connection
|
self.ips[ip] = connection
|
||||||
connection.handleIncomingConnection(sock)
|
connection.handleIncomingConnection(sock)
|
||||||
|
@ -170,7 +258,7 @@ class ConnectionServer(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
|
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
|
||||||
ip_type = helper.getIpType(ip)
|
ip_type = self.getIpType(ip)
|
||||||
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
|
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
|
||||||
if has_per_site_onion: # Site-unique connection for Tor
|
if has_per_site_onion: # Site-unique connection for Tor
|
||||||
if ip.endswith(".onion"):
|
if ip.endswith(".onion"):
|
||||||
|
@ -206,7 +294,7 @@ class ConnectionServer(object):
|
||||||
return connection
|
return connection
|
||||||
|
|
||||||
# No connection found
|
# No connection found
|
||||||
if create and not config.offline: # Allow to create new connection if not found
|
if create and self.allowsCreatingConnections():
|
||||||
if port == 0:
|
if port == 0:
|
||||||
raise Exception("This peer is not connectable")
|
raise Exception("This peer is not connectable")
|
||||||
|
|
||||||
|
@ -214,6 +302,7 @@ class ConnectionServer(object):
|
||||||
raise Exception("This peer is blacklisted")
|
raise Exception("This peer is blacklisted")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
#self.log.info("Connection to: %s:%s", ip, port)
|
||||||
if has_per_site_onion: # Lock connection to site
|
if has_per_site_onion: # Lock connection to site
|
||||||
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
|
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
|
||||||
else:
|
else:
|
||||||
|
@ -228,17 +317,16 @@ class ConnectionServer(object):
|
||||||
raise Exception("Connection event return error")
|
raise Exception("Connection event return error")
|
||||||
else:
|
else:
|
||||||
rev = connection.handshake.get("rev", 0)
|
rev = connection.handshake.get("rev", 0)
|
||||||
if rev >= 4560:
|
if rev > 0 and rev == config.rev:
|
||||||
self.last_connection_id_supported_version += 1
|
self.last_connection_id_current_version += 1
|
||||||
if rev == config.rev:
|
|
||||||
self.last_connection_id_current_version += 1
|
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
#self.log.info("Connection error (%s, %s): %s", ip, port, Debug.formatException(err))
|
||||||
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
|
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
if len(self.connections) > config.global_connected_limit:
|
if len(self.connections) > config.global_connected_limit:
|
||||||
gevent.spawn(self.checkMaxConnections)
|
self.spawn(self.checkMaxConnections)
|
||||||
|
|
||||||
return connection
|
return connection
|
||||||
else:
|
else:
|
||||||
|
@ -261,12 +349,12 @@ class ConnectionServer(object):
|
||||||
|
|
||||||
def checkConnections(self):
|
def checkConnections(self):
|
||||||
run_i = 0
|
run_i = 0
|
||||||
time.sleep(15)
|
self.sleep(15)
|
||||||
while self.running:
|
while self.running:
|
||||||
run_i += 1
|
run_i += 1
|
||||||
self.ip_incoming = {} # Reset connected ips counter
|
self.ip_incoming = {} # Reset connected ips counter
|
||||||
last_message_time = 0
|
|
||||||
s = time.time()
|
s = time.time()
|
||||||
|
self.updateOnlineStatus(None)
|
||||||
for connection in self.connections[:]: # Make a copy
|
for connection in self.connections[:]: # Make a copy
|
||||||
if connection.ip.endswith(".onion") or config.tor == "always":
|
if connection.ip.endswith(".onion") or config.tor == "always":
|
||||||
timeout_multipler = 2
|
timeout_multipler = 2
|
||||||
|
@ -274,9 +362,6 @@ class ConnectionServer(object):
|
||||||
timeout_multipler = 1
|
timeout_multipler = 1
|
||||||
|
|
||||||
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
|
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
|
||||||
if connection.last_message_time > last_message_time and not connection.is_private_ip:
|
|
||||||
# Message from local IPs does not means internet connection
|
|
||||||
last_message_time = connection.last_message_time
|
|
||||||
|
|
||||||
if connection.unpacker and idle > 30:
|
if connection.unpacker and idle > 30:
|
||||||
# Delete the unpacker if not needed
|
# Delete the unpacker if not needed
|
||||||
|
@ -324,24 +409,12 @@ class ConnectionServer(object):
|
||||||
# Reset bad action counter every 30 min
|
# Reset bad action counter every 30 min
|
||||||
connection.bad_actions = 0
|
connection.bad_actions = 0
|
||||||
|
|
||||||
# Internet outage detection
|
|
||||||
if time.time() - last_message_time > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
|
|
||||||
# Offline: Last message more than 60-600sec depending on connection number
|
|
||||||
if self.has_internet and last_message_time:
|
|
||||||
self.has_internet = False
|
|
||||||
self.onInternetOffline()
|
|
||||||
else:
|
|
||||||
# Online
|
|
||||||
if not self.has_internet:
|
|
||||||
self.has_internet = True
|
|
||||||
self.onInternetOnline()
|
|
||||||
|
|
||||||
self.timecorrection = self.getTimecorrection()
|
self.timecorrection = self.getTimecorrection()
|
||||||
|
|
||||||
if time.time() - s > 0.01:
|
if time.time() - s > 0.01:
|
||||||
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
|
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
|
||||||
|
|
||||||
time.sleep(15)
|
self.sleep(15)
|
||||||
self.log.debug("Checkconnections ended")
|
self.log.debug("Checkconnections ended")
|
||||||
|
|
||||||
@util.Noparallel(blocking=False)
|
@util.Noparallel(blocking=False)
|
||||||
|
@ -366,6 +439,68 @@ class ConnectionServer(object):
|
||||||
))
|
))
|
||||||
return num_closed
|
return num_closed
|
||||||
|
|
||||||
|
# Returns True if we should slow down opening new connections as at the moment
|
||||||
|
# there are too many connections being established and not connected completely
|
||||||
|
# (not entered the message loop yet).
|
||||||
|
def shouldThrottleNewConnections(self):
|
||||||
|
threshold = config.simultaneous_connection_throttle_threshold
|
||||||
|
if len(self.connections) <= threshold:
|
||||||
|
return False
|
||||||
|
nr_connections_being_established = 0
|
||||||
|
for connection in self.connections[:]: # Make a copy
|
||||||
|
if connection.connecting and not connection.connected and connection.type == "out":
|
||||||
|
nr_connections_being_established += 1
|
||||||
|
if nr_connections_being_established > threshold:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Internet outage detection
|
||||||
|
def updateOnlineStatus(self, connection, outgoing_activity=False, successful_activity=False):
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
if connection and not connection.is_private_ip:
|
||||||
|
if outgoing_activity:
|
||||||
|
self.last_outgoing_internet_activity_time = now
|
||||||
|
if successful_activity:
|
||||||
|
self.last_successful_internet_activity_time = now
|
||||||
|
self.setInternetStatus(True)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.last_outgoing_internet_activity_time:
|
||||||
|
return
|
||||||
|
|
||||||
|
if (
|
||||||
|
(self.last_successful_internet_activity_time < now - self.internet_outage_threshold)
|
||||||
|
and
|
||||||
|
(self.last_successful_internet_activity_time < self.last_outgoing_internet_activity_time)
|
||||||
|
):
|
||||||
|
self.setInternetStatus(False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# This is the old algorithm just in case we missed something
|
||||||
|
idle = now - self.last_successful_internet_activity_time
|
||||||
|
if idle > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
|
||||||
|
# Offline: Last successful activity more than 60-600sec depending on connection number
|
||||||
|
self.setInternetStatus(False)
|
||||||
|
return
|
||||||
|
|
||||||
|
def setInternetStatus(self, status):
|
||||||
|
if self.has_internet == status:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.has_internet = status
|
||||||
|
|
||||||
|
if self.has_internet:
|
||||||
|
self.internet_online_since = time.time()
|
||||||
|
self.spawn(self.onInternetOnline)
|
||||||
|
else:
|
||||||
|
self.internet_offline_since = time.time()
|
||||||
|
self.spawn(self.onInternetOffline)
|
||||||
|
|
||||||
|
def isInternetOnline(self):
|
||||||
|
return self.has_internet
|
||||||
|
|
||||||
def onInternetOnline(self):
|
def onInternetOnline(self):
|
||||||
self.log.info("Internet online")
|
self.log.info("Internet online")
|
||||||
|
|
||||||
|
@ -373,6 +508,32 @@ class ConnectionServer(object):
|
||||||
self.had_external_incoming = False
|
self.had_external_incoming = False
|
||||||
self.log.info("Internet offline")
|
self.log.info("Internet offline")
|
||||||
|
|
||||||
|
def setOfflineMode(self, offline_mode):
|
||||||
|
if config.offline == offline_mode:
|
||||||
|
return
|
||||||
|
config.offline = offline_mode # Yep, awkward
|
||||||
|
if offline_mode:
|
||||||
|
self.log.info("offline mode is ON")
|
||||||
|
else:
|
||||||
|
self.log.info("offline mode is OFF")
|
||||||
|
|
||||||
|
def isOfflineMode(self):
|
||||||
|
return config.offline
|
||||||
|
|
||||||
|
def allowsCreatingConnections(self):
|
||||||
|
if self.isOfflineMode():
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def allowsAcceptingConnections(self):
|
||||||
|
if self.isOfflineMode():
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def getTimecorrection(self):
|
def getTimecorrection(self):
|
||||||
corrections = sorted([
|
corrections = sorted([
|
||||||
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
|
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
|
||||||
|
@ -384,3 +545,48 @@ class ConnectionServer(object):
|
||||||
mid = int(len(corrections) / 2 - 1)
|
mid = int(len(corrections) / 2 - 1)
|
||||||
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
|
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
|
||||||
return median
|
return median
|
||||||
|
|
||||||
|
|
||||||
|
############################################################################
|
||||||
|
|
||||||
|
# Methods for handling network address types
|
||||||
|
# (ipv4, ipv6, onion etc... more to be implemented by plugins)
|
||||||
|
#
|
||||||
|
# All the functions handling network address types have "Ip" in the name.
|
||||||
|
# So it was in the initial codebase, and I keep the naming, since I couldn't
|
||||||
|
# think of a better option.
|
||||||
|
# "IP" is short and quite clear and lets you understand that a variable
|
||||||
|
# contains a peer address or other transport-level address and not
|
||||||
|
# an address of ZeroNet site.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Returns type of the given network address.
|
||||||
|
# Since: 0.8.0
|
||||||
|
# Replaces helper.getIpType() in order to be extensible by plugins.
|
||||||
|
def getIpType(self, ip):
|
||||||
|
if ip.endswith(".onion"):
|
||||||
|
return "onion"
|
||||||
|
elif ":" in ip:
|
||||||
|
return "ipv6"
|
||||||
|
elif re.match(r"[0-9\.]+$", ip):
|
||||||
|
return "ipv4"
|
||||||
|
else:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
# Checks if a network address can be reachable in the current configuration
|
||||||
|
# and returs a string describing why it cannot.
|
||||||
|
# If the network address can be reachable, returns False.
|
||||||
|
# Since: 0.8.0
|
||||||
|
def getIpUnreachability(self, ip):
|
||||||
|
ip_type = self.getIpType(ip)
|
||||||
|
if ip_type == 'onion' and not self.tor_manager.enabled:
|
||||||
|
return "Can't connect to onion addresses, no Tor controller present"
|
||||||
|
if config.tor == "always" and helper.isPrivateIp(ip) and ip not in config.ip_local:
|
||||||
|
return "Can't connect to local IPs in Tor: always mode"
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Returns True if ConnctionServer has means for establishing outgoing
|
||||||
|
# connections to the given address.
|
||||||
|
# Since: 0.8.0
|
||||||
|
def isIpReachable(self, ip):
|
||||||
|
return self.getIpUnreachability(ip) == False
|
||||||
|
|
|
@ -239,7 +239,7 @@ class ContentManager(object):
|
||||||
|
|
||||||
if num_removed_bad_files > 0:
|
if num_removed_bad_files > 0:
|
||||||
self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False)
|
self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False)
|
||||||
gevent.spawn(self.site.update, since=0)
|
self.site.spawn(self.site.update, since=0)
|
||||||
|
|
||||||
self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files))
|
self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files))
|
||||||
|
|
||||||
|
@ -651,6 +651,25 @@ class ContentManager(object):
|
||||||
)
|
)
|
||||||
return files_node, files_optional_node
|
return files_node, files_optional_node
|
||||||
|
|
||||||
|
def serializeForSigning(self, content):
|
||||||
|
if "sign" in content:
|
||||||
|
del(content["sign"]) # The file signed without the sign
|
||||||
|
if "signs" in content:
|
||||||
|
del(content["signs"]) # The file signed without the signs
|
||||||
|
|
||||||
|
sign_content = json.dumps(content, sort_keys=True) # Dump the json to string to remove whitespaces
|
||||||
|
|
||||||
|
# Fix float representation error on Android
|
||||||
|
modified = content["modified"]
|
||||||
|
if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
|
||||||
|
modified_fixed = "{:.6f}".format(modified).strip("0.")
|
||||||
|
sign_content = sign_content.replace(
|
||||||
|
'"modified": %s' % repr(modified),
|
||||||
|
'"modified": %s' % modified_fixed
|
||||||
|
)
|
||||||
|
|
||||||
|
return sign_content
|
||||||
|
|
||||||
# Create and sign a content.json
|
# Create and sign a content.json
|
||||||
# Return: The new content if filewrite = False
|
# Return: The new content if filewrite = False
|
||||||
def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False):
|
def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False):
|
||||||
|
@ -727,6 +746,7 @@ class ContentManager(object):
|
||||||
elif "files_optional" in new_content:
|
elif "files_optional" in new_content:
|
||||||
del new_content["files_optional"]
|
del new_content["files_optional"]
|
||||||
|
|
||||||
|
new_content["modified"] = int(time.time()) # Add timestamp
|
||||||
if inner_path == "content.json":
|
if inner_path == "content.json":
|
||||||
new_content["zeronet_version"] = config.version
|
new_content["zeronet_version"] = config.version
|
||||||
new_content["signs_required"] = content.get("signs_required", 1)
|
new_content["signs_required"] = content.get("signs_required", 1)
|
||||||
|
@ -746,44 +766,20 @@ class ContentManager(object):
|
||||||
)
|
)
|
||||||
self.log.info("Correct %s in valid signers: %s" % (privatekey_address, valid_signers))
|
self.log.info("Correct %s in valid signers: %s" % (privatekey_address, valid_signers))
|
||||||
|
|
||||||
signs_required = 1
|
|
||||||
if inner_path == "content.json" and privatekey_address == self.site.address:
|
if inner_path == "content.json" and privatekey_address == self.site.address:
|
||||||
# If signing using the root key, then sign the valid signers
|
# If signing using the root key, then sign the valid signers
|
||||||
signs_required = new_content["signs_required"]
|
signers_data = "%s:%s" % (new_content["signs_required"], ",".join(valid_signers))
|
||||||
signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
|
|
||||||
new_content["signers_sign"] = CryptBitcoin.sign(str(signers_data), privatekey)
|
new_content["signers_sign"] = CryptBitcoin.sign(str(signers_data), privatekey)
|
||||||
if not new_content["signers_sign"]:
|
if not new_content["signers_sign"]:
|
||||||
self.log.info("Old style address, signers_sign is none")
|
self.log.info("Old style address, signers_sign is none")
|
||||||
|
|
||||||
self.log.info("Signing %s..." % inner_path)
|
self.log.info("Signing %s..." % inner_path)
|
||||||
|
|
||||||
if "signs" in new_content:
|
sign_content = self.serializeForSigning(new_content)
|
||||||
# del(new_content["signs"]) # Delete old signs
|
|
||||||
old_signs_content = new_content["signs"]
|
|
||||||
del(new_content["signs"])
|
|
||||||
else:
|
|
||||||
old_signs_content = None
|
|
||||||
if "sign" in new_content:
|
|
||||||
del(new_content["sign"]) # Delete old sign (backward compatibility)
|
|
||||||
|
|
||||||
if signs_required > 1:
|
|
||||||
has_valid_sign = False
|
|
||||||
sign_content = json.dumps(new_content, sort_keys=True)
|
|
||||||
for signer in valid_signers:
|
|
||||||
res = CryptBitcoin.verify(sign_content,signer,old_signs_content[signer]);
|
|
||||||
print(res)
|
|
||||||
if res:
|
|
||||||
has_valid_sign = has_valid_sign or res
|
|
||||||
if has_valid_sign:
|
|
||||||
new_content["modified"] = content["modified"]
|
|
||||||
sign_content = json.dumps(new_content, sort_keys=True)
|
|
||||||
else:
|
|
||||||
new_content["modified"] = int(time.time()) # Add timestamp
|
|
||||||
sign_content = json.dumps(new_content, sort_keys=True)
|
|
||||||
sign = CryptBitcoin.sign(sign_content, privatekey)
|
sign = CryptBitcoin.sign(sign_content, privatekey)
|
||||||
# new_content["signs"] = content.get("signs", {}) # TODO: Multisig
|
# new_content["signs"] = content.get("signs", {}) # TODO: Multisig
|
||||||
if sign: # If signing is successful (not an old address)
|
if sign: # If signing is successful (not an old address)
|
||||||
new_content["signs"] = old_signs_content or {}
|
new_content["signs"] = {}
|
||||||
new_content["signs"][privatekey_address] = sign
|
new_content["signs"][privatekey_address] = sign
|
||||||
|
|
||||||
self.verifyContent(inner_path, new_content)
|
self.verifyContent(inner_path, new_content)
|
||||||
|
@ -818,9 +814,7 @@ class ContentManager(object):
|
||||||
|
|
||||||
# Return: The required number of valid signs for the content.json
|
# Return: The required number of valid signs for the content.json
|
||||||
def getSignsRequired(self, inner_path, content=None):
|
def getSignsRequired(self, inner_path, content=None):
|
||||||
if not content:
|
return 1 # Todo: Multisig
|
||||||
return 1
|
|
||||||
return content.get("signs_required", 1)
|
|
||||||
|
|
||||||
def verifyCertSign(self, user_address, user_auth_type, user_name, issuer_address, sign):
|
def verifyCertSign(self, user_address, user_auth_type, user_name, issuer_address, sign):
|
||||||
from Crypt import CryptBitcoin
|
from Crypt import CryptBitcoin
|
||||||
|
@ -945,104 +939,95 @@ class ContentManager(object):
|
||||||
|
|
||||||
return True # All good
|
return True # All good
|
||||||
|
|
||||||
|
def verifyContentFile(self, inner_path, file, ignore_same=True):
|
||||||
|
from Crypt import CryptBitcoin
|
||||||
|
|
||||||
|
if type(file) is dict:
|
||||||
|
new_content = file
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if sys.version_info.major == 3 and sys.version_info.minor < 6:
|
||||||
|
new_content = json.loads(file.read().decode("utf8"))
|
||||||
|
else:
|
||||||
|
new_content = json.load(file)
|
||||||
|
except Exception as err:
|
||||||
|
raise VerifyError("Invalid json file: %s" % err)
|
||||||
|
if inner_path in self.contents:
|
||||||
|
old_content = self.contents.get(inner_path, {"modified": 0})
|
||||||
|
# Checks if its newer the ours
|
||||||
|
if old_content["modified"] == new_content["modified"] and ignore_same: # Ignore, have the same content.json
|
||||||
|
return None
|
||||||
|
elif old_content["modified"] > new_content["modified"]: # We have newer
|
||||||
|
raise VerifyError(
|
||||||
|
"We have newer (Our: %s, Sent: %s)" %
|
||||||
|
(old_content["modified"], new_content["modified"])
|
||||||
|
)
|
||||||
|
if new_content["modified"] > time.time() + 60 * 60 * 24: # Content modified in the far future (allow 1 day+)
|
||||||
|
raise VerifyError("Modify timestamp is in the far future!")
|
||||||
|
if self.isArchived(inner_path, new_content["modified"]):
|
||||||
|
if inner_path in self.site.bad_files:
|
||||||
|
del self.site.bad_files[inner_path]
|
||||||
|
raise VerifyError("This file is archived!")
|
||||||
|
# Check sign
|
||||||
|
sign = new_content.get("sign")
|
||||||
|
signs = new_content.get("signs", {})
|
||||||
|
sign_content = self.serializeForSigning(new_content)
|
||||||
|
|
||||||
|
if signs: # New style signing
|
||||||
|
valid_signers = self.getValidSigners(inner_path, new_content)
|
||||||
|
signs_required = self.getSignsRequired(inner_path, new_content)
|
||||||
|
|
||||||
|
if inner_path == "content.json" and len(valid_signers) > 1: # Check signers_sign on root content.json
|
||||||
|
signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
|
||||||
|
if not CryptBitcoin.verify(signers_data, self.site.address, new_content["signers_sign"]):
|
||||||
|
raise VerifyError("Invalid signers_sign!")
|
||||||
|
|
||||||
|
if inner_path != "content.json" and not self.verifyCert(inner_path, new_content): # Check if cert valid
|
||||||
|
raise VerifyError("Invalid cert!")
|
||||||
|
|
||||||
|
valid_signs = 0
|
||||||
|
for address in valid_signers:
|
||||||
|
if address in signs:
|
||||||
|
valid_signs += CryptBitcoin.verify(sign_content, address, signs[address])
|
||||||
|
if valid_signs >= signs_required:
|
||||||
|
break # Break if we has enough signs
|
||||||
|
if valid_signs < signs_required:
|
||||||
|
raise VerifyError("Valid signs: %s/%s" % (valid_signs, signs_required))
|
||||||
|
else:
|
||||||
|
return self.verifyContent(inner_path, new_content)
|
||||||
|
elif sign: # Old style signing
|
||||||
|
raise VerifyError("Invalid old-style sign")
|
||||||
|
else:
|
||||||
|
raise VerifyError("Not signed")
|
||||||
|
|
||||||
|
def verifyOrdinaryFile(self, inner_path, file, ignore_same=True):
|
||||||
|
file_info = self.getFileInfo(inner_path)
|
||||||
|
if file_info:
|
||||||
|
if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
|
||||||
|
raise VerifyError("Invalid hash")
|
||||||
|
|
||||||
|
if file_info.get("size", 0) != file.tell():
|
||||||
|
raise VerifyError(
|
||||||
|
"File size does not match %s <> %s" %
|
||||||
|
(inner_path, file.tell(), file_info.get("size", 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
else: # File not in content.json
|
||||||
|
raise VerifyError("File not in content.json")
|
||||||
|
|
||||||
# Verify file validity
|
# Verify file validity
|
||||||
# Return: None = Same as before, False = Invalid, True = Valid
|
# Return: None = Same as before, False = Invalid, True = Valid
|
||||||
def verifyFile(self, inner_path, file, ignore_same=True):
|
def verifyFile(self, inner_path, file, ignore_same=True):
|
||||||
if inner_path.endswith("content.json"): # content.json: Check using sign
|
try:
|
||||||
from Crypt import CryptBitcoin
|
if inner_path.endswith("content.json"):
|
||||||
try:
|
return self.verifyContentFile(inner_path, file, ignore_same)
|
||||||
if type(file) is dict:
|
else:
|
||||||
new_content = file
|
return self.verifyOrdinaryFile(inner_path, file, ignore_same)
|
||||||
else:
|
except Exception as err:
|
||||||
try:
|
self.log.info("%s: verify error: %s" % (inner_path, Debug.formatException(err)))
|
||||||
if sys.version_info.major == 3 and sys.version_info.minor < 6:
|
raise err
|
||||||
new_content = json.loads(file.read().decode("utf8"))
|
|
||||||
else:
|
|
||||||
new_content = json.load(file)
|
|
||||||
except Exception as err:
|
|
||||||
raise VerifyError("Invalid json file: %s" % err)
|
|
||||||
if inner_path in self.contents:
|
|
||||||
old_content = self.contents.get(inner_path, {"modified": 0})
|
|
||||||
# Checks if its newer the ours
|
|
||||||
if old_content["modified"] == new_content["modified"] and ignore_same: # Ignore, have the same content.json
|
|
||||||
return None
|
|
||||||
elif old_content["modified"] > new_content["modified"]: # We have newer
|
|
||||||
raise VerifyError(
|
|
||||||
"We have newer (Our: %s, Sent: %s)" %
|
|
||||||
(old_content["modified"], new_content["modified"])
|
|
||||||
)
|
|
||||||
if new_content["modified"] > time.time() + 60 * 60 * 24: # Content modified in the far future (allow 1 day+)
|
|
||||||
raise VerifyError("Modify timestamp is in the far future!")
|
|
||||||
if self.isArchived(inner_path, new_content["modified"]):
|
|
||||||
if inner_path in self.site.bad_files:
|
|
||||||
del self.site.bad_files[inner_path]
|
|
||||||
raise VerifyError("This file is archived!")
|
|
||||||
# Check sign
|
|
||||||
sign = new_content.get("sign")
|
|
||||||
signs = new_content.get("signs", {})
|
|
||||||
if "sign" in new_content:
|
|
||||||
del(new_content["sign"]) # The file signed without the sign
|
|
||||||
if "signs" in new_content:
|
|
||||||
del(new_content["signs"]) # The file signed without the signs
|
|
||||||
|
|
||||||
sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitepsace
|
|
||||||
|
|
||||||
# Fix float representation error on Android
|
|
||||||
modified = new_content["modified"]
|
|
||||||
if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
|
|
||||||
modified_fixed = "{:.6f}".format(modified).strip("0.")
|
|
||||||
sign_content = sign_content.replace(
|
|
||||||
'"modified": %s' % repr(modified),
|
|
||||||
'"modified": %s' % modified_fixed
|
|
||||||
)
|
|
||||||
|
|
||||||
if signs: # New style signing
|
|
||||||
valid_signers = self.getValidSigners(inner_path, new_content)
|
|
||||||
signs_required = self.getSignsRequired(inner_path, new_content)
|
|
||||||
|
|
||||||
if inner_path == "content.json" and len(valid_signers) > 1: # Check signers_sign on root content.json
|
|
||||||
signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
|
|
||||||
if not CryptBitcoin.verify(signers_data, self.site.address, new_content["signers_sign"]):
|
|
||||||
raise VerifyError("Invalid signers_sign!")
|
|
||||||
|
|
||||||
if inner_path != "content.json" and not self.verifyCert(inner_path, new_content): # Check if cert valid
|
|
||||||
raise VerifyError("Invalid cert!")
|
|
||||||
|
|
||||||
valid_signs = []
|
|
||||||
for address in valid_signers:
|
|
||||||
if address in signs:
|
|
||||||
result = CryptBitcoin.verify(sign_content, address, signs[address])
|
|
||||||
if result:
|
|
||||||
valid_signs.append(address)
|
|
||||||
if len(valid_signs) >= signs_required:
|
|
||||||
break # Break if we has enough signs
|
|
||||||
if len(valid_signs) < signs_required:
|
|
||||||
raise VerifyError("Valid signs: %s/%s, Valid Signers : %s" % (len(valid_signs), signs_required, valid_signs))
|
|
||||||
else:
|
|
||||||
return self.verifyContent(inner_path, new_content)
|
|
||||||
else: # Old style signing
|
|
||||||
raise VerifyError("Invalid old-style sign")
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
self.log.warning("%s: verify sign error: %s" % (inner_path, Debug.formatException(err)))
|
|
||||||
raise err
|
|
||||||
|
|
||||||
else: # Check using sha512 hash
|
|
||||||
file_info = self.getFileInfo(inner_path)
|
|
||||||
if file_info:
|
|
||||||
if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
|
|
||||||
raise VerifyError("Invalid hash")
|
|
||||||
|
|
||||||
if file_info.get("size", 0) != file.tell():
|
|
||||||
raise VerifyError(
|
|
||||||
"File size does not match %s <> %s" %
|
|
||||||
(inner_path, file.tell(), file_info.get("size", 0))
|
|
||||||
)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
else: # File not in content.json
|
|
||||||
raise VerifyError("File not in content.json")
|
|
||||||
|
|
||||||
def optionalDelete(self, inner_path):
|
def optionalDelete(self, inner_path):
|
||||||
self.site.storage.delete(inner_path)
|
self.site.storage.delete(inner_path)
|
||||||
|
|
|
@ -127,10 +127,6 @@ class CryptConnectionManager:
|
||||||
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
|
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
|
||||||
]
|
]
|
||||||
self.openssl_env['CN'] = random.choice(self.fakedomains)
|
self.openssl_env['CN'] = random.choice(self.fakedomains)
|
||||||
environ = os.environ
|
|
||||||
environ['OPENSSL_CONF'] = self.openssl_env['OPENSSL_CONF']
|
|
||||||
environ['RANDFILE'] = self.openssl_env['RANDFILE']
|
|
||||||
environ['CN'] = self.openssl_env['CN']
|
|
||||||
|
|
||||||
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
|
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
|
||||||
self.createSslContexts()
|
self.createSslContexts()
|
||||||
|
@ -156,7 +152,7 @@ class CryptConnectionManager:
|
||||||
self.log.debug("Running: %s" % cmd)
|
self.log.debug("Running: %s" % cmd)
|
||||||
proc = subprocess.Popen(
|
proc = subprocess.Popen(
|
||||||
cmd, shell=True, stderr=subprocess.STDOUT,
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
stdout=subprocess.PIPE, env=environ
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
)
|
)
|
||||||
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
proc.wait()
|
proc.wait()
|
||||||
|
@ -179,7 +175,7 @@ class CryptConnectionManager:
|
||||||
self.log.debug("Generating certificate key and signing request...")
|
self.log.debug("Generating certificate key and signing request...")
|
||||||
proc = subprocess.Popen(
|
proc = subprocess.Popen(
|
||||||
cmd, shell=True, stderr=subprocess.STDOUT,
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
stdout=subprocess.PIPE, env=environ
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
)
|
)
|
||||||
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
proc.wait()
|
proc.wait()
|
||||||
|
@ -198,7 +194,7 @@ class CryptConnectionManager:
|
||||||
self.log.debug("Generating RSA cert...")
|
self.log.debug("Generating RSA cert...")
|
||||||
proc = subprocess.Popen(
|
proc = subprocess.Popen(
|
||||||
cmd, shell=True, stderr=subprocess.STDOUT,
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
stdout=subprocess.PIPE, env=environ
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
)
|
)
|
||||||
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
proc.wait()
|
proc.wait()
|
||||||
|
|
|
@ -10,19 +10,33 @@ from Config import config
|
||||||
from . import Debug
|
from . import Debug
|
||||||
|
|
||||||
last_error = None
|
last_error = None
|
||||||
|
thread_shutdown = None
|
||||||
|
|
||||||
|
thread_shutdown = None
|
||||||
|
|
||||||
|
def shutdownThread():
|
||||||
|
import main
|
||||||
|
try:
|
||||||
|
if "file_server" in dir(main):
|
||||||
|
thread = gevent.spawn(main.file_server.stop)
|
||||||
|
thread.join(timeout=60)
|
||||||
|
if "ui_server" in dir(main):
|
||||||
|
thread = gevent.spawn(main.ui_server.stop)
|
||||||
|
thread.join(timeout=10)
|
||||||
|
except Exception as err:
|
||||||
|
print("Error in shutdown thread: %s" % err)
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
def shutdown(reason="Unknown"):
|
def shutdown(reason="Unknown"):
|
||||||
|
global thread_shutdown
|
||||||
logging.info("Shutting down (reason: %s)..." % reason)
|
logging.info("Shutting down (reason: %s)..." % reason)
|
||||||
import main
|
try:
|
||||||
if "file_server" in dir(main):
|
if not thread_shutdown:
|
||||||
try:
|
thread_shutdown = gevent.spawn(shutdownThread)
|
||||||
gevent.spawn(main.file_server.stop)
|
except Exception as err:
|
||||||
if "ui_server" in dir(main):
|
print("Proper shutdown error: %s" % err)
|
||||||
gevent.spawn(main.ui_server.stop)
|
|
||||||
except Exception as err:
|
|
||||||
print("Proper shutdown error: %s" % err)
|
|
||||||
sys.exit(0)
|
|
||||||
else:
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
# Store last error, ignore notify, allow manual error logging
|
# Store last error, ignore notify, allow manual error logging
|
||||||
|
|
|
@ -33,7 +33,7 @@ class FileRequest(object):
|
||||||
self.connection = connection
|
self.connection = connection
|
||||||
|
|
||||||
self.req_id = None
|
self.req_id = None
|
||||||
self.sites = self.server.sites
|
self.sites = self.server.getSites()
|
||||||
self.log = server.log
|
self.log = server.log
|
||||||
self.responded = False # Responded to the request
|
self.responded = False # Responded to the request
|
||||||
|
|
||||||
|
@ -109,35 +109,31 @@ class FileRequest(object):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
inner_path = params.get("inner_path", "")
|
inner_path = params.get("inner_path", "")
|
||||||
|
current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
|
||||||
|
body = params["body"]
|
||||||
|
|
||||||
if not inner_path.endswith("content.json"):
|
if not inner_path.endswith("content.json"):
|
||||||
self.response({"error": "Only content.json update allowed"})
|
self.response({"error": "Only content.json update allowed"})
|
||||||
self.connection.badAction(5)
|
self.connection.badAction(5)
|
||||||
return
|
return
|
||||||
|
|
||||||
current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
|
|
||||||
should_validate_content = True
|
should_validate_content = True
|
||||||
if "modified" in params and params["modified"] <= current_content_modified:
|
if "modified" in params and params["modified"] <= current_content_modified:
|
||||||
should_validate_content = False
|
should_validate_content = False
|
||||||
valid = None # Same or earlier content as we have
|
valid = None # Same or earlier content as we have
|
||||||
|
elif not body: # No body sent, we have to download it first
|
||||||
body = params["body"]
|
|
||||||
if not body: # No body sent, we have to download it first
|
|
||||||
site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
|
site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
|
||||||
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
||||||
try:
|
try:
|
||||||
body = peer.getFile(site.address, inner_path).read()
|
body = peer.getFile(site.address, inner_path).read()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
|
site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
|
||||||
self.response({"error": "Invalid File update: Failed to download updated file content"})
|
self.response({"error": "File invalid update: Can't download updaed file"})
|
||||||
self.connection.badAction(5)
|
self.connection.badAction(5)
|
||||||
return
|
return
|
||||||
|
|
||||||
if should_validate_content:
|
if should_validate_content:
|
||||||
try:
|
try:
|
||||||
if type(body) is str:
|
|
||||||
body = body.encode()
|
|
||||||
# elif type(body) is list:
|
|
||||||
# content = json.loads(bytes(list).decode())
|
|
||||||
content = json.loads(body.decode())
|
content = json.loads(body.decode())
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
|
site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
|
||||||
|
@ -165,19 +161,21 @@ class FileRequest(object):
|
||||||
|
|
||||||
site.onFileDone(inner_path) # Trigger filedone
|
site.onFileDone(inner_path) # Trigger filedone
|
||||||
|
|
||||||
# Download every changed file from peer
|
if inner_path.endswith("content.json"): # Download every changed file from peer
|
||||||
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
||||||
# On complete publish to other peers
|
# On complete publish to other peers
|
||||||
diffs = params.get("diffs", {})
|
diffs = params.get("diffs", {})
|
||||||
site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
|
site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
|
||||||
|
|
||||||
# Load new content file and download changed files in new thread
|
# Load new content file and download changed files in new thread
|
||||||
def downloader():
|
def downloader():
|
||||||
site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
|
site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
|
||||||
|
del self.server.files_parsing[file_uri]
|
||||||
|
|
||||||
|
gevent.spawn(downloader)
|
||||||
|
else:
|
||||||
del self.server.files_parsing[file_uri]
|
del self.server.files_parsing[file_uri]
|
||||||
|
|
||||||
gevent.spawn(downloader)
|
|
||||||
|
|
||||||
self.response({"ok": "Thanks, file %s updated!" % inner_path})
|
self.response({"ok": "Thanks, file %s updated!" % inner_path})
|
||||||
self.connection.goodAction()
|
self.connection.goodAction()
|
||||||
|
|
||||||
|
@ -378,7 +376,7 @@ class FileRequest(object):
|
||||||
|
|
||||||
for hash_id, peers in found.items():
|
for hash_id, peers in found.items():
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
ip_type = helper.getIpType(peer.ip)
|
ip_type = self.server.getIpType(peer.ip)
|
||||||
if len(back[ip_type][hash_id]) < 20:
|
if len(back[ip_type][hash_id]) < 20:
|
||||||
back[ip_type][hash_id].append(peer.packMyAddress())
|
back[ip_type][hash_id].append(peer.packMyAddress())
|
||||||
return back
|
return back
|
||||||
|
@ -432,7 +430,7 @@ class FileRequest(object):
|
||||||
|
|
||||||
# Check requested port of the other peer
|
# Check requested port of the other peer
|
||||||
def actionCheckport(self, params):
|
def actionCheckport(self, params):
|
||||||
if helper.getIpType(self.connection.ip) == "ipv6":
|
if self.server.getIpType(self.connection.ip) == "ipv6":
|
||||||
sock_address = (self.connection.ip, params["port"], 0, 0)
|
sock_address = (self.connection.ip, params["port"], 0, 0)
|
||||||
else:
|
else:
|
||||||
sock_address = (self.connection.ip, params["port"])
|
sock_address = (self.connection.ip, params["port"])
|
||||||
|
|
|
@ -3,6 +3,7 @@ import time
|
||||||
import random
|
import random
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
import weakref
|
||||||
|
|
||||||
import gevent
|
import gevent
|
||||||
import gevent.pool
|
import gevent.pool
|
||||||
|
@ -18,6 +19,13 @@ from Connection import ConnectionServer
|
||||||
from Plugin import PluginManager
|
from Plugin import PluginManager
|
||||||
from Debug import Debug
|
from Debug import Debug
|
||||||
|
|
||||||
|
log = logging.getLogger("FileServer")
|
||||||
|
|
||||||
|
class FakeThread(object):
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
def ready(self):
|
||||||
|
return False
|
||||||
|
|
||||||
@PluginManager.acceptPlugins
|
@PluginManager.acceptPlugins
|
||||||
class FileServer(ConnectionServer):
|
class FileServer(ConnectionServer):
|
||||||
|
@ -25,12 +33,31 @@ class FileServer(ConnectionServer):
|
||||||
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
|
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
|
||||||
self.site_manager = SiteManager.site_manager
|
self.site_manager = SiteManager.site_manager
|
||||||
self.portchecker = PeerPortchecker.PeerPortchecker(self)
|
self.portchecker = PeerPortchecker.PeerPortchecker(self)
|
||||||
self.log = logging.getLogger("FileServer")
|
|
||||||
self.ip_type = ip_type
|
self.ip_type = ip_type
|
||||||
self.ip_external_list = []
|
self.ip_external_list = []
|
||||||
|
|
||||||
|
# This is wrong:
|
||||||
|
# self.log = logging.getLogger("FileServer")
|
||||||
|
# The value of self.log will be overwritten in ConnectionServer.__init__()
|
||||||
|
|
||||||
|
self.recheck_port = True
|
||||||
|
|
||||||
|
self.active_mode_thread_pool = gevent.pool.Pool(None)
|
||||||
|
self.site_pool = gevent.pool.Pool(None)
|
||||||
|
|
||||||
|
self.update_pool = gevent.pool.Pool(10)
|
||||||
|
self.update_start_time = 0
|
||||||
|
self.update_sites_task_next_nr = 1
|
||||||
|
|
||||||
|
self.update_threads = weakref.WeakValueDictionary()
|
||||||
|
|
||||||
|
self.passive_mode = None
|
||||||
|
self.active_mode = None
|
||||||
|
self.active_mode_threads = {}
|
||||||
|
|
||||||
|
|
||||||
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
|
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
|
||||||
if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported():
|
if self.getIpType(ip) == "ipv6" or self.isIpv6Supported():
|
||||||
self.supported_ip_types.append("ipv6")
|
self.supported_ip_types.append("ipv6")
|
||||||
|
|
||||||
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
|
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
|
||||||
|
@ -52,33 +79,50 @@ class FileServer(ConnectionServer):
|
||||||
config.arguments.fileserver_port = port
|
config.arguments.fileserver_port = port
|
||||||
|
|
||||||
ConnectionServer.__init__(self, ip, port, self.handleRequest)
|
ConnectionServer.__init__(self, ip, port, self.handleRequest)
|
||||||
self.log.debug("Supported IP types: %s" % self.supported_ip_types)
|
log.debug("Supported IP types: %s" % self.supported_ip_types)
|
||||||
|
|
||||||
|
self.managed_pools["active_mode_thread"] = self.active_mode_thread_pool
|
||||||
|
self.managed_pools["update"] = self.update_pool
|
||||||
|
self.managed_pools["site"] = self.site_pool
|
||||||
|
|
||||||
if ip_type == "dual" and ip == "::":
|
if ip_type == "dual" and ip == "::":
|
||||||
# Also bind to ipv4 addres in dual mode
|
# Also bind to ipv4 addres in dual mode
|
||||||
try:
|
try:
|
||||||
self.log.debug("Binding proxy to %s:%s" % ("::", self.port))
|
log.debug("Binding proxy to %s:%s" % ("::", self.port))
|
||||||
self.stream_server_proxy = StreamServer(
|
self.stream_server_proxy = StreamServer(
|
||||||
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
|
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
|
||||||
)
|
)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
|
log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
|
||||||
|
|
||||||
self.port_opened = {}
|
self.port_opened = {}
|
||||||
|
|
||||||
self.sites = self.site_manager.sites
|
|
||||||
self.last_request = time.time()
|
self.last_request = time.time()
|
||||||
self.files_parsing = {}
|
self.files_parsing = {}
|
||||||
self.ui_server = None
|
self.ui_server = None
|
||||||
|
|
||||||
|
def getSites(self):
|
||||||
|
sites = self.site_manager.list()
|
||||||
|
# We need to keep self.sites for the backward compatibility with plugins.
|
||||||
|
# Never. Ever. Use it.
|
||||||
|
# TODO: fix plugins
|
||||||
|
self.sites = sites
|
||||||
|
return sites
|
||||||
|
|
||||||
|
def getSite(self, address):
|
||||||
|
return self.getSites().get(address, None)
|
||||||
|
|
||||||
|
def getSiteAddresses(self):
|
||||||
|
# Avoid saving the site list on the stack, since a site may be deleted
|
||||||
|
# from the original list while iterating.
|
||||||
|
# Use the list of addresses instead.
|
||||||
|
return [
|
||||||
|
site.address for site in
|
||||||
|
sorted(list(self.getSites().values()), key=lambda site: site.settings.get("modified", 0), reverse=True)
|
||||||
|
]
|
||||||
|
|
||||||
def getRandomPort(self, ip, port_range_from, port_range_to):
|
def getRandomPort(self, ip, port_range_from, port_range_to):
|
||||||
"""Generates Random Port from given range
|
log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
|
||||||
Args:
|
|
||||||
ip: IP Address
|
|
||||||
port_range_from: From Range
|
|
||||||
port_range_to: to Range
|
|
||||||
"""
|
|
||||||
self.log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
|
|
||||||
tried = []
|
tried = []
|
||||||
for bind_retry in range(100):
|
for bind_retry in range(100):
|
||||||
port = random.randint(port_range_from, port_range_to)
|
port = random.randint(port_range_from, port_range_to)
|
||||||
|
@ -90,14 +134,14 @@ class FileServer(ConnectionServer):
|
||||||
sock.bind((ip, port))
|
sock.bind((ip, port))
|
||||||
success = True
|
success = True
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.warning("Error binding to port %s: %s" % (port, err))
|
log.warning("Error binding to port %s: %s" % (port, err))
|
||||||
success = False
|
success = False
|
||||||
sock.close()
|
sock.close()
|
||||||
if success:
|
if success:
|
||||||
self.log.info("Found unused random port: %s" % port)
|
log.info("Found unused random port: %s" % port)
|
||||||
return port
|
return port
|
||||||
else:
|
else:
|
||||||
time.sleep(0.1)
|
self.sleep(0.1)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def isIpv6Supported(self):
|
def isIpv6Supported(self):
|
||||||
|
@ -110,16 +154,16 @@ class FileServer(ConnectionServer):
|
||||||
sock.connect((ipv6_testip, 80))
|
sock.connect((ipv6_testip, 80))
|
||||||
local_ipv6 = sock.getsockname()[0]
|
local_ipv6 = sock.getsockname()[0]
|
||||||
if local_ipv6 == "::1":
|
if local_ipv6 == "::1":
|
||||||
self.log.debug("IPv6 not supported, no local IPv6 address")
|
log.debug("IPv6 not supported, no local IPv6 address")
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
self.log.debug("IPv6 supported on IP %s" % local_ipv6)
|
log.debug("IPv6 supported on IP %s" % local_ipv6)
|
||||||
return True
|
return True
|
||||||
except socket.error as err:
|
except socket.error as err:
|
||||||
self.log.warning("IPv6 not supported: %s" % err)
|
log.warning("IPv6 not supported: %s" % err)
|
||||||
return False
|
return False
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.error("IPv6 check error: %s" % err)
|
log.error("IPv6 check error: %s" % err)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def listenProxy(self):
|
def listenProxy(self):
|
||||||
|
@ -127,29 +171,34 @@ class FileServer(ConnectionServer):
|
||||||
self.stream_server_proxy.serve_forever()
|
self.stream_server_proxy.serve_forever()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if err.errno == 98: # Address already in use error
|
if err.errno == 98: # Address already in use error
|
||||||
self.log.debug("StreamServer proxy listen error: %s" % err)
|
log.debug("StreamServer proxy listen error: %s" % err)
|
||||||
else:
|
else:
|
||||||
self.log.info("StreamServer proxy listen error: %s" % err)
|
log.info("StreamServer proxy listen error: %s" % err)
|
||||||
|
|
||||||
# Handle request to fileserver
|
# Handle request to fileserver
|
||||||
def handleRequest(self, connection, message):
|
def handleRequest(self, connection, message):
|
||||||
if config.verbose:
|
if config.verbose:
|
||||||
if "params" in message:
|
if "params" in message:
|
||||||
self.log.debug(
|
log.debug(
|
||||||
"FileRequest: %s %s %s %s" %
|
"FileRequest: %s %s %s %s" %
|
||||||
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
|
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
|
log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
|
||||||
req = FileRequest(self, connection)
|
req = FileRequest(self, connection)
|
||||||
req.route(message["cmd"], message.get("req_id"), message.get("params"))
|
req.route(message["cmd"], message.get("req_id"), message.get("params"))
|
||||||
if not self.has_internet and not connection.is_private_ip:
|
if not connection.is_private_ip:
|
||||||
self.has_internet = True
|
self.setInternetStatus(True)
|
||||||
self.onInternetOnline()
|
|
||||||
|
|
||||||
def onInternetOnline(self):
|
def onInternetOnline(self):
|
||||||
self.log.info("Internet online")
|
log.info("Internet online")
|
||||||
gevent.spawn(self.checkSites, check_files=False, force_port_check=True)
|
invalid_interval=(
|
||||||
|
self.internet_offline_since - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
|
||||||
|
time.time()
|
||||||
|
)
|
||||||
|
self.invalidateUpdateTime(invalid_interval)
|
||||||
|
self.recheck_port = True
|
||||||
|
self.spawn(self.updateSites)
|
||||||
|
|
||||||
# Reload the FileRequest class to prevent restarts in debug mode
|
# Reload the FileRequest class to prevent restarts in debug mode
|
||||||
def reload(self):
|
def reload(self):
|
||||||
|
@ -158,8 +207,8 @@ class FileServer(ConnectionServer):
|
||||||
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
|
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
|
||||||
|
|
||||||
def portCheck(self):
|
def portCheck(self):
|
||||||
if config.offline:
|
if self.isOfflineMode():
|
||||||
self.log.info("Offline mode: port check disabled")
|
log.info("Offline mode: port check disabled")
|
||||||
res = {"ipv4": None, "ipv6": None}
|
res = {"ipv4": None, "ipv6": None}
|
||||||
self.port_opened = res
|
self.port_opened = res
|
||||||
return res
|
return res
|
||||||
|
@ -168,14 +217,14 @@ class FileServer(ConnectionServer):
|
||||||
for ip_external in config.ip_external:
|
for ip_external in config.ip_external:
|
||||||
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
|
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
|
||||||
|
|
||||||
ip_external_types = set([helper.getIpType(ip) for ip in config.ip_external])
|
ip_external_types = set([self.getIpType(ip) for ip in config.ip_external])
|
||||||
res = {
|
res = {
|
||||||
"ipv4": "ipv4" in ip_external_types,
|
"ipv4": "ipv4" in ip_external_types,
|
||||||
"ipv6": "ipv6" in ip_external_types
|
"ipv6": "ipv6" in ip_external_types
|
||||||
}
|
}
|
||||||
self.ip_external_list = config.ip_external
|
self.ip_external_list = config.ip_external
|
||||||
self.port_opened.update(res)
|
self.port_opened.update(res)
|
||||||
self.log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
|
log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
self.port_opened = {}
|
self.port_opened = {}
|
||||||
|
@ -183,7 +232,7 @@ class FileServer(ConnectionServer):
|
||||||
self.ui_server.updateWebsocket()
|
self.ui_server.updateWebsocket()
|
||||||
|
|
||||||
if "ipv6" in self.supported_ip_types:
|
if "ipv6" in self.supported_ip_types:
|
||||||
res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6")
|
res_ipv6_thread = self.spawn(self.portchecker.portCheck, self.port, "ipv6")
|
||||||
else:
|
else:
|
||||||
res_ipv6_thread = None
|
res_ipv6_thread = None
|
||||||
|
|
||||||
|
@ -196,8 +245,8 @@ class FileServer(ConnectionServer):
|
||||||
res_ipv6 = {"ip": None, "opened": None}
|
res_ipv6 = {"ip": None, "opened": None}
|
||||||
else:
|
else:
|
||||||
res_ipv6 = res_ipv6_thread.get()
|
res_ipv6 = res_ipv6_thread.get()
|
||||||
if res_ipv6["opened"] and not helper.getIpType(res_ipv6["ip"]) == "ipv6":
|
if res_ipv6["opened"] and not self.getIpType(res_ipv6["ip"]) == "ipv6":
|
||||||
self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
|
log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
|
||||||
res_ipv6["opened"] = False
|
res_ipv6["opened"] = False
|
||||||
|
|
||||||
self.ip_external_list = []
|
self.ip_external_list = []
|
||||||
|
@ -206,7 +255,7 @@ class FileServer(ConnectionServer):
|
||||||
self.ip_external_list.append(res_ip["ip"])
|
self.ip_external_list.append(res_ip["ip"])
|
||||||
SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
|
SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
|
||||||
|
|
||||||
self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
|
log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
|
||||||
|
|
||||||
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
|
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
|
||||||
|
|
||||||
|
@ -217,9 +266,9 @@ class FileServer(ConnectionServer):
|
||||||
for ip in interface_ips:
|
for ip in interface_ips:
|
||||||
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
|
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
|
||||||
self.ip_external_list.append(ip)
|
self.ip_external_list.append(ip)
|
||||||
res[helper.getIpType(ip)] = True # We have opened port if we have external ip
|
res[self.getIpType(ip)] = True # We have opened port if we have external ip
|
||||||
SiteManager.peer_blacklist.append((ip, self.port))
|
SiteManager.peer_blacklist.append((ip, self.port))
|
||||||
self.log.debug("External ip found on interfaces: %s" % ip)
|
log.debug("External ip found on interfaces: %s" % ip)
|
||||||
|
|
||||||
self.port_opened.update(res)
|
self.port_opened.update(res)
|
||||||
|
|
||||||
|
@ -228,131 +277,381 @@ class FileServer(ConnectionServer):
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
# Check site file integrity
|
@util.Noparallel(queue=True)
|
||||||
def checkSite(self, site, check_files=False):
|
def recheckPort(self):
|
||||||
if site.isServing():
|
if self.recheck_port:
|
||||||
site.announce(mode="startup") # Announce site to tracker
|
|
||||||
site.update(check_files=check_files) # Update site's content.json and download changed files
|
|
||||||
site.sendMyHashfield()
|
|
||||||
site.updateHashfield()
|
|
||||||
|
|
||||||
# Check sites integrity
|
|
||||||
@util.Noparallel()
|
|
||||||
def checkSites(self, check_files=False, force_port_check=False):
|
|
||||||
self.log.debug("Checking sites...")
|
|
||||||
s = time.time()
|
|
||||||
sites_checking = False
|
|
||||||
if not self.port_opened or force_port_check: # Test and open port if not tested yet
|
|
||||||
if len(self.sites) <= 2: # Don't wait port opening on first startup
|
|
||||||
sites_checking = True
|
|
||||||
for address, site in list(self.sites.items()):
|
|
||||||
gevent.spawn(self.checkSite, site, check_files)
|
|
||||||
|
|
||||||
self.portCheck()
|
self.portCheck()
|
||||||
|
self.recheck_port = False
|
||||||
|
|
||||||
if not self.port_opened["ipv4"]:
|
# Returns False if Internet is immediately available
|
||||||
self.tor_manager.startOnions()
|
# Returns True if we've spent some time waiting for Internet
|
||||||
|
# Returns None if FileServer is stopping or the Offline mode is enabled
|
||||||
|
@util.Noparallel()
|
||||||
|
def waitForInternetOnline(self):
|
||||||
|
if self.isOfflineMode() or self.stopping:
|
||||||
|
return None
|
||||||
|
|
||||||
if not sites_checking:
|
if self.isInternetOnline():
|
||||||
check_pool = gevent.pool.Pool(5)
|
return False
|
||||||
# Check sites integrity
|
|
||||||
for site in sorted(list(self.sites.values()), key=lambda site: site.settings.get("modified", 0), reverse=True):
|
while not self.isInternetOnline():
|
||||||
if not site.isServing():
|
self.sleep(30)
|
||||||
continue
|
if self.isOfflineMode() or self.stopping:
|
||||||
check_thread = check_pool.spawn(self.checkSite, site, check_files) # Check in new thread
|
return None
|
||||||
time.sleep(2)
|
if self.isInternetOnline():
|
||||||
if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
|
break
|
||||||
check_thread.join(timeout=5)
|
if len(self.update_pool) == 0:
|
||||||
self.log.debug("Checksites done in %.3fs" % (time.time() - s))
|
log.info("Internet connection seems to be broken. Running an update for a random site to check if we are able to connect to any peer.")
|
||||||
|
thread = self.thread_pool.spawn(self.updateRandomSite)
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
self.recheckPort()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def updateRandomSite(self, site_addresses=None, force=False):
|
||||||
|
if not site_addresses:
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
|
||||||
|
site_addresses = random.sample(site_addresses, 1)
|
||||||
|
if len(site_addresses) < 1:
|
||||||
|
return
|
||||||
|
|
||||||
|
address = site_addresses[0]
|
||||||
|
site = self.getSite(address)
|
||||||
|
|
||||||
|
if not site:
|
||||||
|
return
|
||||||
|
|
||||||
|
log.info("Randomly chosen site: %s", site.address_short)
|
||||||
|
|
||||||
|
self.spawnUpdateSite(site).join()
|
||||||
|
|
||||||
|
def updateSite(self, site, check_files=False, verify_files=False):
|
||||||
|
if not site:
|
||||||
|
return
|
||||||
|
if verify_files:
|
||||||
|
mode = 'verify'
|
||||||
|
elif check_files:
|
||||||
|
mode = 'check'
|
||||||
|
else:
|
||||||
|
mode = 'update'
|
||||||
|
log.info("running <%s> for %s" % (mode, site.address_short))
|
||||||
|
site.update2(check_files=check_files, verify_files=verify_files)
|
||||||
|
|
||||||
|
def spawnUpdateSite(self, site, check_files=False, verify_files=False):
|
||||||
|
fake_thread = FakeThread()
|
||||||
|
self.update_threads[site.address] = fake_thread
|
||||||
|
thread = self.update_pool.spawn(self.updateSite, site,
|
||||||
|
check_files=check_files, verify_files=verify_files)
|
||||||
|
self.update_threads[site.address] = thread
|
||||||
|
return thread
|
||||||
|
|
||||||
|
def lookupInUpdatePool(self, site_address):
|
||||||
|
thread = self.update_threads.get(site_address, None)
|
||||||
|
if not thread or thread.ready():
|
||||||
|
return None
|
||||||
|
return thread
|
||||||
|
|
||||||
|
def siteIsInUpdatePool(self, site_address):
|
||||||
|
return self.lookupInUpdatePool(site_address) is not None
|
||||||
|
|
||||||
|
def invalidateUpdateTime(self, invalid_interval):
|
||||||
|
for address in self.getSiteAddresses():
|
||||||
|
site = self.getSite(address)
|
||||||
|
if site:
|
||||||
|
site.invalidateUpdateTime(invalid_interval)
|
||||||
|
|
||||||
|
def isSiteUpdateTimeValid(self, site_address):
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
return False
|
||||||
|
return site.isUpdateTimeValid()
|
||||||
|
|
||||||
|
def updateSites(self):
|
||||||
|
task_nr = self.update_sites_task_next_nr
|
||||||
|
self.update_sites_task_next_nr += 1
|
||||||
|
|
||||||
|
task_description = "updateSites [#%d]" % task_nr
|
||||||
|
log.info("%s: started", task_description)
|
||||||
|
|
||||||
|
# Don't wait port opening on first startup. Do the instant check now.
|
||||||
|
if len(self.getSites()) <= 2:
|
||||||
|
for address, site in list(self.getSites().items()):
|
||||||
|
self.updateSite(site, check_files=True)
|
||||||
|
|
||||||
|
self.recheckPort()
|
||||||
|
|
||||||
|
all_site_addresses = self.getSiteAddresses()
|
||||||
|
site_addresses = [
|
||||||
|
address for address in all_site_addresses
|
||||||
|
if not self.isSiteUpdateTimeValid(address)
|
||||||
|
]
|
||||||
|
|
||||||
|
log.info("%s: chosen %d sites (of %d)", task_description, len(site_addresses), len(all_site_addresses))
|
||||||
|
|
||||||
|
sites_processed = 0
|
||||||
|
sites_skipped = 0
|
||||||
|
start_time = time.time()
|
||||||
|
self.update_start_time = start_time
|
||||||
|
progress_print_time = time.time()
|
||||||
|
|
||||||
|
# Check sites integrity
|
||||||
|
for site_address in site_addresses:
|
||||||
|
site = None
|
||||||
|
self.sleep(1)
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site or site.isUpdateTimeValid() or self.siteIsInUpdatePool(site_address):
|
||||||
|
sites_skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
sites_processed += 1
|
||||||
|
thread = self.spawnUpdateSite(site)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
if time.time() - progress_print_time > 60:
|
||||||
|
progress_print_time = time.time()
|
||||||
|
time_spent = time.time() - start_time
|
||||||
|
time_per_site = time_spent / float(sites_processed)
|
||||||
|
sites_left = len(site_addresses) - sites_processed
|
||||||
|
time_left = time_per_site * sites_left
|
||||||
|
log.info("%s: DONE: %d sites in %.2fs (%.2fs per site); SKIPPED: %d sites; LEFT: %d sites in %.2fs",
|
||||||
|
task_description,
|
||||||
|
sites_processed,
|
||||||
|
time_spent,
|
||||||
|
time_per_site,
|
||||||
|
sites_skipped,
|
||||||
|
sites_left,
|
||||||
|
time_left
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
log.info("%s: stopped", task_description)
|
||||||
|
else:
|
||||||
|
log.info("%s: finished in %.2fs", task_description, time.time() - start_time)
|
||||||
|
|
||||||
|
def peekSiteForVerification(self):
|
||||||
|
check_files_interval = 60 * 60 * 24
|
||||||
|
verify_files_interval = 60 * 60 * 24 * 10
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
random.shuffle(site_addresses)
|
||||||
|
for site_address in site_addresses:
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
mode = site.isFileVerificationExpired(check_files_interval, verify_files_interval)
|
||||||
|
if mode:
|
||||||
|
return (site_address, mode)
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
|
||||||
|
def sitesVerificationThread(self):
|
||||||
|
log.info("sitesVerificationThread started")
|
||||||
|
short_timeout = 20
|
||||||
|
long_timeout = 120
|
||||||
|
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
|
||||||
|
while self.isActiveMode():
|
||||||
|
site = None
|
||||||
|
self.sleep(short_timeout)
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site_address, mode = self.peekSiteForVerification()
|
||||||
|
if not site_address:
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
continue
|
||||||
|
|
||||||
|
while self.siteIsInUpdatePool(site_address) and self.isActiveMode():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if mode == "verify":
|
||||||
|
check_files = False
|
||||||
|
verify_files = True
|
||||||
|
elif mode == "check":
|
||||||
|
check_files = True
|
||||||
|
verify_files = False
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.spawnUpdateSite(site,
|
||||||
|
check_files=check_files, verify_files=verify_files)
|
||||||
|
|
||||||
|
log.info("sitesVerificationThread stopped")
|
||||||
|
|
||||||
|
def sitesMaintenanceThread(self, mode="full"):
|
||||||
|
log.info("sitesMaintenanceThread(%s) started" % mode)
|
||||||
|
|
||||||
def cleanupSites(self):
|
|
||||||
import gc
|
|
||||||
startup = True
|
startup = True
|
||||||
time.sleep(5 * 60) # Sites already cleaned up on startup
|
|
||||||
peers_protected = set([])
|
short_timeout = 2
|
||||||
while 1:
|
min_long_timeout = 10
|
||||||
# Sites health care every 20 min
|
max_long_timeout = 60 * 10
|
||||||
self.log.debug(
|
long_timeout = min_long_timeout
|
||||||
"Running site cleanup, connections: %s, internet: %s, protected peers: %s" %
|
short_cycle_time_limit = 60 * 2
|
||||||
(len(self.connections), self.has_internet, len(peers_protected))
|
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
log.debug(
|
||||||
|
"Starting <%s> maintenance cycle: connections=%s, internet=%s",
|
||||||
|
mode,
|
||||||
|
len(self.connections), self.isInternetOnline()
|
||||||
|
)
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
|
||||||
|
sites_processed = 0
|
||||||
|
|
||||||
|
for site_address in site_addresses:
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
|
||||||
|
log.debug("Running maintenance for site: %s", site.address_short)
|
||||||
|
|
||||||
|
done = site.runPeriodicMaintenance(startup=startup)
|
||||||
|
site = None
|
||||||
|
if done:
|
||||||
|
sites_processed += 1
|
||||||
|
self.sleep(short_timeout)
|
||||||
|
|
||||||
|
# If we host hundreds of sites, the full maintenance cycle may take very
|
||||||
|
# long time, especially on startup ( > 1 hour).
|
||||||
|
# This means we are not able to run the maintenance procedure for active
|
||||||
|
# sites frequently enough using just a single maintenance thread.
|
||||||
|
# So we run 2 maintenance threads:
|
||||||
|
# * One running full cycles.
|
||||||
|
# * And one running short cycles for the most active sites.
|
||||||
|
# When the short cycle runs out of the time limit, it restarts
|
||||||
|
# from the beginning of the site list.
|
||||||
|
if mode == "short" and time.time() - start_time > short_cycle_time_limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
log.debug("<%s> maintenance cycle finished in %.2fs. Total sites: %d. Processed sites: %d. Timeout: %d",
|
||||||
|
mode,
|
||||||
|
time.time() - start_time,
|
||||||
|
len(site_addresses),
|
||||||
|
sites_processed,
|
||||||
|
long_timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
for address, site in list(self.sites.items()):
|
if sites_processed:
|
||||||
if not site.isServing():
|
long_timeout = max(int(long_timeout / 2), min_long_timeout)
|
||||||
continue
|
else:
|
||||||
|
long_timeout = min(long_timeout + 1, max_long_timeout)
|
||||||
|
|
||||||
if not startup:
|
site_addresses = None
|
||||||
site.cleanupPeers(peers_protected)
|
|
||||||
|
|
||||||
time.sleep(1) # Prevent too quick request
|
|
||||||
|
|
||||||
peers_protected = set([])
|
|
||||||
for address, site in list(self.sites.items()):
|
|
||||||
if not site.isServing():
|
|
||||||
continue
|
|
||||||
|
|
||||||
if site.peers:
|
|
||||||
with gevent.Timeout(10, exception=False):
|
|
||||||
site.announcer.announcePex()
|
|
||||||
|
|
||||||
# Last check modification failed
|
|
||||||
if site.content_updated is False:
|
|
||||||
site.update()
|
|
||||||
elif site.bad_files:
|
|
||||||
site.retryBadFiles()
|
|
||||||
|
|
||||||
if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
|
|
||||||
# Keep active connections if site has been modified witin 7 days
|
|
||||||
connected_num = site.needConnections(check_site_on_reconnect=True)
|
|
||||||
|
|
||||||
if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing
|
|
||||||
peers_protected.update([peer.key for peer in site.getConnectedPeers()])
|
|
||||||
|
|
||||||
time.sleep(1) # Prevent too quick request
|
|
||||||
|
|
||||||
site = None
|
|
||||||
gc.collect() # Implicit garbage collection
|
|
||||||
startup = False
|
startup = False
|
||||||
time.sleep(60 * 20)
|
log.info("sitesMaintenanceThread(%s) stopped" % mode)
|
||||||
|
|
||||||
def announceSite(self, site):
|
def keepAliveThread(self):
|
||||||
site.announce(mode="update", pex=False)
|
# This thread is mostly useless on a system under load, since it never does
|
||||||
active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
|
# any works, if we have active traffic.
|
||||||
if site.settings["own"] or active_site:
|
#
|
||||||
# Check connections more frequently on own and active sites to speed-up first connections
|
# We should initiate some network activity to detect the Internet outage
|
||||||
site.needConnections(check_site_on_reconnect=True)
|
# and avoid false positives. We normally have some network activity
|
||||||
site.sendMyHashfield(3)
|
# initiated by various parts on the application as well as network peers.
|
||||||
site.updateHashfield(3)
|
# So it's not a problem.
|
||||||
|
#
|
||||||
|
# However, if it actually happens that we have no network traffic for
|
||||||
|
# some time (say, we host just a couple of inactive sites, and no peers
|
||||||
|
# are interested in connecting to them), we initiate some traffic by
|
||||||
|
# performing the update for a random site. It's way better than just
|
||||||
|
# silly pinging a random peer for no profit.
|
||||||
|
log.info("keepAliveThread started")
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
# Announce sites every 20 min
|
threshold = self.internet_outage_threshold / 2.0
|
||||||
def announceSites(self):
|
|
||||||
time.sleep(5 * 60) # Sites already announced on startup
|
self.sleep(threshold / 2.0)
|
||||||
while 1:
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
last_activity_time = max(
|
||||||
|
self.last_successful_internet_activity_time,
|
||||||
|
self.last_outgoing_internet_activity_time)
|
||||||
|
now = time.time()
|
||||||
|
if not len(self.getSites()):
|
||||||
|
continue
|
||||||
|
if last_activity_time > now - threshold:
|
||||||
|
continue
|
||||||
|
if len(self.update_pool) != 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
log.info("No network activity for %.2fs. Running an update for a random site.",
|
||||||
|
now - last_activity_time
|
||||||
|
)
|
||||||
|
self.update_pool.spawn(self.updateRandomSite, force=True)
|
||||||
|
log.info("keepAliveThread stopped")
|
||||||
|
|
||||||
|
# Periodic reloading of tracker files
|
||||||
|
def reloadTrackerFilesThread(self):
|
||||||
|
# TODO:
|
||||||
|
# This should probably be more sophisticated.
|
||||||
|
# We should check if the files have actually changed,
|
||||||
|
# and do it more often.
|
||||||
|
log.info("reloadTrackerFilesThread started")
|
||||||
|
interval = 60 * 10
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.sleep(interval)
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
config.loadTrackersFile()
|
config.loadTrackersFile()
|
||||||
s = time.time()
|
log.info("reloadTrackerFilesThread stopped")
|
||||||
for address, site in list(self.sites.items()):
|
|
||||||
if not site.isServing():
|
|
||||||
continue
|
|
||||||
gevent.spawn(self.announceSite, site).join(timeout=10)
|
|
||||||
time.sleep(1)
|
|
||||||
taken = time.time() - s
|
|
||||||
|
|
||||||
# Query all trackers one-by-one in 20 minutes evenly distributed
|
|
||||||
sleep = max(0, 60 * 20 / len(config.trackers) - taken)
|
|
||||||
|
|
||||||
self.log.debug("Site announce tracker done in %.3fs, sleeping for %.3fs..." % (taken, sleep))
|
|
||||||
time.sleep(sleep)
|
|
||||||
|
|
||||||
# Detects if computer back from wakeup
|
# Detects if computer back from wakeup
|
||||||
def wakeupWatcher(self):
|
def wakeupWatcherThread(self):
|
||||||
|
log.info("wakeupWatcherThread started")
|
||||||
last_time = time.time()
|
last_time = time.time()
|
||||||
last_my_ips = socket.gethostbyname_ex('')[2]
|
last_my_ips = socket.gethostbyname_ex('')[2]
|
||||||
while 1:
|
while self.isActiveMode():
|
||||||
time.sleep(30)
|
self.sleep(30)
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
|
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
|
||||||
if is_time_changed:
|
if is_time_changed:
|
||||||
# If taken more than 3 minute then the computer was in sleep mode
|
# If taken more than 3 minute then the computer was in sleep mode
|
||||||
self.log.info(
|
log.info(
|
||||||
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
|
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
|
||||||
(last_time, time.time(), time.time() - last_time)
|
(last_time, time.time(), time.time() - last_time)
|
||||||
)
|
)
|
||||||
|
@ -360,50 +659,130 @@ class FileServer(ConnectionServer):
|
||||||
my_ips = socket.gethostbyname_ex('')[2]
|
my_ips = socket.gethostbyname_ex('')[2]
|
||||||
is_ip_changed = my_ips != last_my_ips
|
is_ip_changed = my_ips != last_my_ips
|
||||||
if is_ip_changed:
|
if is_ip_changed:
|
||||||
self.log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
|
log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
|
||||||
|
|
||||||
if is_time_changed or is_ip_changed:
|
if is_time_changed or is_ip_changed:
|
||||||
self.checkSites(check_files=False, force_port_check=True)
|
invalid_interval=(
|
||||||
|
last_time - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
|
||||||
|
time.time()
|
||||||
|
)
|
||||||
|
self.invalidateUpdateTime(invalid_interval)
|
||||||
|
self.recheck_port = True
|
||||||
|
self.spawn(self.updateSites)
|
||||||
|
|
||||||
last_time = time.time()
|
last_time = time.time()
|
||||||
last_my_ips = my_ips
|
last_my_ips = my_ips
|
||||||
|
log.info("wakeupWatcherThread stopped")
|
||||||
|
|
||||||
|
def setOfflineMode(self, offline_mode):
|
||||||
|
ConnectionServer.setOfflineMode(self, offline_mode)
|
||||||
|
self.setupActiveMode()
|
||||||
|
|
||||||
|
def setPassiveMode(self, passive_mode):
|
||||||
|
if self.passive_mode == passive_mode:
|
||||||
|
return
|
||||||
|
self.passive_mode = passive_mode
|
||||||
|
if self.passive_mode:
|
||||||
|
log.info("passive mode is ON");
|
||||||
|
else:
|
||||||
|
log.info("passive mode is OFF");
|
||||||
|
self.setupActiveMode()
|
||||||
|
|
||||||
|
def isPassiveMode(self):
|
||||||
|
return self.passive_mode
|
||||||
|
|
||||||
|
def setupActiveMode(self):
|
||||||
|
active_mode = (not self.passive_mode) and (not self.isOfflineMode())
|
||||||
|
if self.active_mode == active_mode:
|
||||||
|
return
|
||||||
|
self.active_mode = active_mode
|
||||||
|
if self.active_mode:
|
||||||
|
log.info("active mode is ON");
|
||||||
|
self.enterActiveMode();
|
||||||
|
else:
|
||||||
|
log.info("active mode is OFF");
|
||||||
|
self.leaveActiveMode();
|
||||||
|
|
||||||
|
def killActiveModeThreads(self):
|
||||||
|
for key, thread in list(self.active_mode_threads.items()):
|
||||||
|
if thread:
|
||||||
|
if not thread.ready():
|
||||||
|
log.info("killing %s" % key)
|
||||||
|
gevent.kill(thread)
|
||||||
|
del self.active_mode_threads[key]
|
||||||
|
|
||||||
|
def leaveActiveMode(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def enterActiveMode(self):
|
||||||
|
self.killActiveModeThreads()
|
||||||
|
x = self.active_mode_threads
|
||||||
|
p = self.active_mode_thread_pool
|
||||||
|
x["thread_keep_alive"] = p.spawn(self.keepAliveThread)
|
||||||
|
x["thread_wakeup_watcher"] = p.spawn(self.wakeupWatcherThread)
|
||||||
|
x["thread_sites_verification"] = p.spawn(self.sitesVerificationThread)
|
||||||
|
x["thread_reload_tracker_files"] = p.spawn(self.reloadTrackerFilesThread)
|
||||||
|
x["thread_sites_maintenance_full"] = p.spawn(self.sitesMaintenanceThread, mode="full")
|
||||||
|
x["thread_sites_maintenance_short"] = p.spawn(self.sitesMaintenanceThread, mode="short")
|
||||||
|
x["thread_initial_site_updater"] = p.spawn(self.updateSites)
|
||||||
|
|
||||||
|
# Returns True, if an active mode thread should keep going,
|
||||||
|
# i.e active mode is enabled and the server not going to shutdown
|
||||||
|
def isActiveMode(self):
|
||||||
|
self.setupActiveMode()
|
||||||
|
if not self.active_mode:
|
||||||
|
return False
|
||||||
|
if not self.running:
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
# Bind and start serving sites
|
# Bind and start serving sites
|
||||||
def start(self, check_sites=True):
|
# If passive_mode is False, FileServer starts the full-featured file serving:
|
||||||
|
# * Checks for updates at startup.
|
||||||
|
# * Checks site's integrity.
|
||||||
|
# * Runs periodic update checks.
|
||||||
|
# * Watches for internet being up or down and for computer to wake up and runs update checks.
|
||||||
|
# If passive_mode is True, all the mentioned activity is disabled.
|
||||||
|
def start(self, passive_mode=False, check_sites=None, check_connections=True):
|
||||||
|
|
||||||
|
# Backward compatibility for a misnamed argument:
|
||||||
|
if check_sites is not None:
|
||||||
|
passive_mode = not check_sites
|
||||||
|
|
||||||
if self.stopping:
|
if self.stopping:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
ConnectionServer.start(self)
|
ConnectionServer.start(self, check_connections=check_connections)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.stream_server.start()
|
self.stream_server.start()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
|
log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
|
||||||
|
|
||||||
self.sites = self.site_manager.list()
|
|
||||||
if config.debug:
|
if config.debug:
|
||||||
# Auto reload FileRequest on change
|
# Auto reload FileRequest on change
|
||||||
from Debug import DebugReloader
|
from Debug import DebugReloader
|
||||||
DebugReloader.watcher.addCallback(self.reload)
|
DebugReloader.watcher.addCallback(self.reload)
|
||||||
|
|
||||||
if check_sites: # Open port, Update sites, Check files integrity
|
# XXX: for initializing self.sites
|
||||||
gevent.spawn(self.checkSites)
|
# Remove this line when self.sites gets completely unused
|
||||||
|
self.getSites()
|
||||||
|
|
||||||
thread_announce_sites = gevent.spawn(self.announceSites)
|
self.setPassiveMode(passive_mode)
|
||||||
thread_cleanup_sites = gevent.spawn(self.cleanupSites)
|
|
||||||
thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
|
|
||||||
|
|
||||||
ConnectionServer.listen(self)
|
ConnectionServer.listen(self)
|
||||||
|
|
||||||
self.log.debug("Stopped.")
|
log.info("Stopped.")
|
||||||
|
|
||||||
def stop(self):
|
def stop(self, ui_websocket=None):
|
||||||
if self.running and self.portchecker.upnp_port_opened:
|
if self.running and self.portchecker.upnp_port_opened:
|
||||||
self.log.debug('Closing port %d' % self.port)
|
log.debug('Closing port %d' % self.port)
|
||||||
try:
|
try:
|
||||||
self.portchecker.portClose(self.port)
|
self.portchecker.portClose(self.port)
|
||||||
self.log.info('Closed port via upnp.')
|
log.info('Closed port via upnp.')
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.info("Failed at attempt to use upnp to close port: %s" % err)
|
log.info("Failed at attempt to use upnp to close port: %s" % err)
|
||||||
|
|
||||||
return ConnectionServer.stop(self)
|
return ConnectionServer.stop(self, ui_websocket=ui_websocket)
|
||||||
|
|
193
src/Peer/Peer.py
193
src/Peer/Peer.py
|
@ -20,51 +20,135 @@ if config.use_tempfiles:
|
||||||
# Communicate remote peers
|
# Communicate remote peers
|
||||||
@PluginManager.acceptPlugins
|
@PluginManager.acceptPlugins
|
||||||
class Peer(object):
|
class Peer(object):
|
||||||
__slots__ = (
|
|
||||||
"ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield",
|
|
||||||
"time_added", "has_hashfield", "is_tracker_connection", "time_my_hashfield_sent", "last_ping", "reputation",
|
|
||||||
"last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time"
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, ip, port, site=None, connection_server=None):
|
def __init__(self, ip, port, site=None, connection_server=None):
|
||||||
self.ip = ip
|
self.ip = ip
|
||||||
self.port = port
|
self.port = port
|
||||||
self.site = site
|
self.site = site
|
||||||
self.key = "%s:%s" % (ip, port)
|
self.key = "%s:%s" % (ip, port)
|
||||||
|
|
||||||
|
self.ip_type = None
|
||||||
|
|
||||||
|
self.removed = False
|
||||||
|
|
||||||
|
self.log_level = logging.DEBUG
|
||||||
|
self.connection_error_log_level = logging.DEBUG
|
||||||
|
|
||||||
self.connection = None
|
self.connection = None
|
||||||
self.connection_server = connection_server
|
self.connection_server = connection_server
|
||||||
self.has_hashfield = False # Lazy hashfield object not created yet
|
self.has_hashfield = False # Lazy hashfield object not created yet
|
||||||
self.time_hashfield = None # Last time peer's hashfiled downloaded
|
self.time_hashfield = None # Last time peer's hashfiled downloaded
|
||||||
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
|
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
|
||||||
self.time_found = time.time() # Time of last found in the torrent tracker
|
self.time_found = time.time() # Time of last found in the torrent tracker
|
||||||
self.time_response = None # Time of last successful response from peer
|
self.time_response = 0 # Time of last successful response from peer
|
||||||
self.time_added = time.time()
|
self.time_added = time.time()
|
||||||
self.last_ping = None # Last response time for ping
|
self.last_ping = None # Last response time for ping
|
||||||
|
self.last_pex = 0 # Last query/response time for pex
|
||||||
self.is_tracker_connection = False # Tracker connection instead of normal peer
|
self.is_tracker_connection = False # Tracker connection instead of normal peer
|
||||||
self.reputation = 0 # More likely to connect if larger
|
self.reputation = 0 # More likely to connect if larger
|
||||||
self.last_content_json_update = 0.0 # Modify date of last received content.json
|
self.last_content_json_update = 0.0 # Modify date of last received content.json
|
||||||
|
self.protected = 0
|
||||||
|
self.reachable = None
|
||||||
|
|
||||||
self.connection_error = 0 # Series of connection error
|
self.connection_error = 0 # Series of connection error
|
||||||
self.hash_failed = 0 # Number of bad files from peer
|
self.hash_failed = 0 # Number of bad files from peer
|
||||||
self.download_bytes = 0 # Bytes downloaded
|
self.download_bytes = 0 # Bytes downloaded
|
||||||
self.download_time = 0 # Time spent to download
|
self.download_time = 0 # Time spent to download
|
||||||
|
|
||||||
|
self.protectedRequests = ["getFile", "streamFile", "update", "listModified"]
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
if key == "hashfield":
|
if key == "hashfield":
|
||||||
self.has_hashfield = True
|
self.has_hashfield = True
|
||||||
self.hashfield = PeerHashfield()
|
self.hashfield = PeerHashfield()
|
||||||
return self.hashfield
|
return self.hashfield
|
||||||
else:
|
else:
|
||||||
return getattr(self, key)
|
# Raise appropriately formatted attribute error
|
||||||
|
return object.__getattribute__(self, key)
|
||||||
|
|
||||||
|
def log(self, text, log_level = None):
|
||||||
|
if log_level is None:
|
||||||
|
log_level = self.log_level
|
||||||
|
if log_level <= logging.DEBUG:
|
||||||
|
if not config.verbose:
|
||||||
|
return # Only log if we are in debug mode
|
||||||
|
|
||||||
|
logger = None
|
||||||
|
|
||||||
def log(self, text):
|
|
||||||
if not config.verbose:
|
|
||||||
return # Only log if we are in debug mode
|
|
||||||
if self.site:
|
if self.site:
|
||||||
self.site.log.debug("%s:%s %s" % (self.ip, self.port, text))
|
logger = self.site.log
|
||||||
else:
|
else:
|
||||||
logging.debug("%s:%s %s" % (self.ip, self.port, text))
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
logger.log(log_level, "%s:%s %s" % (self.ip, self.port, text))
|
||||||
|
|
||||||
|
# Protect connection from being closed by site.cleanupPeers()
|
||||||
|
def markProtected(self, interval=60*2):
|
||||||
|
self.protected = max(self.protected, time.time() + interval)
|
||||||
|
|
||||||
|
def isProtected(self):
|
||||||
|
if self.protected > 0:
|
||||||
|
if self.protected < time.time():
|
||||||
|
self.protected = 0
|
||||||
|
return self.protected > 0
|
||||||
|
|
||||||
|
def isTtlExpired(self, ttl):
|
||||||
|
last_activity = max(self.time_found, self.time_response)
|
||||||
|
return (time.time() - last_activity) > ttl
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def isConnected(self):
|
||||||
|
if self.connection and not self.connection.connected:
|
||||||
|
self.connection = None
|
||||||
|
return self.connection and self.connection.connected
|
||||||
|
|
||||||
|
# Peer proved to to be connectable recently
|
||||||
|
# Since 0.8.0
|
||||||
|
def isConnectable(self):
|
||||||
|
if self.connection_error >= 1: # The last connection attempt failed
|
||||||
|
return False
|
||||||
|
if time.time() - self.time_response > 60 * 60 * 2: # Last successful response more than 2 hours ago
|
||||||
|
return False
|
||||||
|
return self.isReachable()
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def isReachable(self):
|
||||||
|
if self.reachable is None:
|
||||||
|
self.updateCachedState()
|
||||||
|
return self.reachable
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def getIpType(self):
|
||||||
|
if not self.ip_type:
|
||||||
|
self.updateCachedState()
|
||||||
|
return self.ip_type
|
||||||
|
|
||||||
|
# We cache some ConnectionServer-related state for better performance.
|
||||||
|
# This kind of state currently doesn't change during a program session,
|
||||||
|
# and it's safe to read and cache it just once. But future versions
|
||||||
|
# may bring more pieces of dynamic configuration. So we update the state
|
||||||
|
# on each peer.found().
|
||||||
|
def updateCachedState(self):
|
||||||
|
connection_server = self.getConnectionServer()
|
||||||
|
if not self.port or self.port == 1: # Port 1 considered as "no open port"
|
||||||
|
self.reachable = False
|
||||||
|
else:
|
||||||
|
self.reachable = connection_server.isIpReachable(self.ip)
|
||||||
|
self.ip_type = connection_server.getIpType(self.ip)
|
||||||
|
|
||||||
|
|
||||||
|
# FIXME:
|
||||||
|
# This should probably be changed.
|
||||||
|
# When creating a peer object, the caller must provide either `connection_server`,
|
||||||
|
# or `site`, so Peer object is able to use `site.connection_server`.
|
||||||
|
def getConnectionServer(self):
|
||||||
|
if self.connection_server:
|
||||||
|
connection_server = self.connection_server
|
||||||
|
elif self.site:
|
||||||
|
connection_server = self.site.connection_server
|
||||||
|
else:
|
||||||
|
import main
|
||||||
|
connection_server = main.file_server
|
||||||
|
return connection_server
|
||||||
|
|
||||||
# Connect to host
|
# Connect to host
|
||||||
def connect(self, connection=None):
|
def connect(self, connection=None):
|
||||||
|
@ -87,29 +171,30 @@ class Peer(object):
|
||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.connection_server:
|
connection_server = self.getConnectionServer()
|
||||||
connection_server = self.connection_server
|
|
||||||
elif self.site:
|
|
||||||
connection_server = self.site.connection_server
|
|
||||||
else:
|
|
||||||
import main
|
|
||||||
connection_server = main.file_server
|
|
||||||
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
|
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
|
||||||
self.reputation += 1
|
if self.connection and self.connection.connected:
|
||||||
self.connection.sites += 1
|
self.reputation += 1
|
||||||
|
self.connection.sites += 1
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.onConnectionError("Getting connection error")
|
self.onConnectionError("Getting connection error")
|
||||||
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
|
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
|
||||||
(Debug.formatException(err), self.connection_error, self.hash_failed))
|
(Debug.formatException(err), self.connection_error, self.hash_failed),
|
||||||
|
log_level=self.connection_error_log_level)
|
||||||
self.connection = None
|
self.connection = None
|
||||||
return self.connection
|
return self.connection
|
||||||
|
|
||||||
|
def disconnect(self, reason="Unknown"):
|
||||||
|
if self.connection:
|
||||||
|
self.connection.close(reason)
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
# Check if we have connection to peer
|
# Check if we have connection to peer
|
||||||
def findConnection(self):
|
def findConnection(self):
|
||||||
if self.connection and self.connection.connected: # We have connection to peer
|
if self.connection and self.connection.connected: # We have connection to peer
|
||||||
return self.connection
|
return self.connection
|
||||||
else: # Try to find from other sites connections
|
else: # Try to find from other sites connections
|
||||||
self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site)
|
self.connection = self.getConnectionServer().getConnection(self.ip, self.port, create=False, site=self.site)
|
||||||
if self.connection:
|
if self.connection:
|
||||||
self.connection.sites += 1
|
self.connection.sites += 1
|
||||||
return self.connection
|
return self.connection
|
||||||
|
@ -143,9 +228,13 @@ class Peer(object):
|
||||||
if source in ("tracker", "local"):
|
if source in ("tracker", "local"):
|
||||||
self.site.peers_recent.appendleft(self)
|
self.site.peers_recent.appendleft(self)
|
||||||
self.time_found = time.time()
|
self.time_found = time.time()
|
||||||
|
self.updateCachedState()
|
||||||
|
|
||||||
# Send a command to peer and return response value
|
# Send a command to peer and return response value
|
||||||
def request(self, cmd, params={}, stream_to=None):
|
def request(self, cmd, params={}, stream_to=None):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
if not self.connection or self.connection.closed:
|
if not self.connection or self.connection.closed:
|
||||||
self.connect()
|
self.connect()
|
||||||
if not self.connection:
|
if not self.connection:
|
||||||
|
@ -156,6 +245,8 @@ class Peer(object):
|
||||||
|
|
||||||
for retry in range(1, 4): # Retry 3 times
|
for retry in range(1, 4): # Retry 3 times
|
||||||
try:
|
try:
|
||||||
|
if cmd in self.protectedRequests:
|
||||||
|
self.markProtected()
|
||||||
if not self.connection:
|
if not self.connection:
|
||||||
raise Exception("No connection found")
|
raise Exception("No connection found")
|
||||||
res = self.connection.request(cmd, params, stream_to)
|
res = self.connection.request(cmd, params, stream_to)
|
||||||
|
@ -188,6 +279,9 @@ class Peer(object):
|
||||||
|
|
||||||
# Get a file content from peer
|
# Get a file content from peer
|
||||||
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
|
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
if file_size and file_size > 5 * 1024 * 1024:
|
if file_size and file_size > 5 * 1024 * 1024:
|
||||||
max_read_size = 1024 * 1024
|
max_read_size = 1024 * 1024
|
||||||
else:
|
else:
|
||||||
|
@ -241,11 +335,14 @@ class Peer(object):
|
||||||
return buff
|
return buff
|
||||||
|
|
||||||
# Send a ping request
|
# Send a ping request
|
||||||
def ping(self):
|
def ping(self, timeout=10.0, tryes=3):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
response_time = None
|
response_time = None
|
||||||
for retry in range(1, 3): # Retry 3 times
|
for retry in range(1, tryes): # Retry 3 times
|
||||||
s = time.time()
|
s = time.time()
|
||||||
with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception
|
with gevent.Timeout(timeout, False):
|
||||||
res = self.request("ping")
|
res = self.request("ping")
|
||||||
|
|
||||||
if res and "body" in res and res["body"] == b"Pong!":
|
if res and "body" in res and res["body"] == b"Pong!":
|
||||||
|
@ -264,10 +361,18 @@ class Peer(object):
|
||||||
return response_time
|
return response_time
|
||||||
|
|
||||||
# Request peer exchange from peer
|
# Request peer exchange from peer
|
||||||
def pex(self, site=None, need_num=5):
|
def pex(self, site=None, need_num=5, request_interval=60*2):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
if not site:
|
if not site:
|
||||||
site = self.site # If no site defined request peers for this site
|
site = self.site # If no site defined request peers for this site
|
||||||
|
|
||||||
|
if self.last_pex + request_interval >= time.time():
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.last_pex = time.time()
|
||||||
|
|
||||||
# give back 5 connectible peers
|
# give back 5 connectible peers
|
||||||
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
|
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
|
||||||
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
|
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
|
||||||
|
@ -276,6 +381,7 @@ class Peer(object):
|
||||||
if packed_peers["ipv6"]:
|
if packed_peers["ipv6"]:
|
||||||
request["peers_ipv6"] = packed_peers["ipv6"]
|
request["peers_ipv6"] = packed_peers["ipv6"]
|
||||||
res = self.request("pex", request)
|
res = self.request("pex", request)
|
||||||
|
self.last_pex = time.time()
|
||||||
if not res or "error" in res:
|
if not res or "error" in res:
|
||||||
return False
|
return False
|
||||||
added = 0
|
added = 0
|
||||||
|
@ -307,9 +413,14 @@ class Peer(object):
|
||||||
# List modified files since the date
|
# List modified files since the date
|
||||||
# Return: {inner_path: modification date,...}
|
# Return: {inner_path: modification date,...}
|
||||||
def listModified(self, since):
|
def listModified(self, since):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
return self.request("listModified", {"since": since, "site": self.site.address})
|
return self.request("listModified", {"since": since, "site": self.site.address})
|
||||||
|
|
||||||
def updateHashfield(self, force=False):
|
def updateHashfield(self, force=False):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
# Don't update hashfield again in 5 min
|
# Don't update hashfield again in 5 min
|
||||||
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
|
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
|
||||||
return False
|
return False
|
||||||
|
@ -325,6 +436,9 @@ class Peer(object):
|
||||||
# Find peers for hashids
|
# Find peers for hashids
|
||||||
# Return: {hash1: ["ip:port", "ip:port",...],...}
|
# Return: {hash1: ["ip:port", "ip:port",...],...}
|
||||||
def findHashIds(self, hash_ids):
|
def findHashIds(self, hash_ids):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
|
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
|
||||||
if not res or "error" in res or type(res) is not dict:
|
if not res or "error" in res or type(res) is not dict:
|
||||||
return False
|
return False
|
||||||
|
@ -368,6 +482,9 @@ class Peer(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def publish(self, address, inner_path, body, modified, diffs=[]):
|
def publish(self, address, inner_path, body, modified, diffs=[]):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
|
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
|
||||||
# To save bw we don't push big content.json to peers
|
# To save bw we don't push big content.json to peers
|
||||||
body = b""
|
body = b""
|
||||||
|
@ -382,20 +499,22 @@ class Peer(object):
|
||||||
|
|
||||||
# Stop and remove from site
|
# Stop and remove from site
|
||||||
def remove(self, reason="Removing"):
|
def remove(self, reason="Removing"):
|
||||||
self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed))
|
self.removed = True
|
||||||
if self.site and self.key in self.site.peers:
|
self.log("Removing peer with reason: <%s>. Connection error: %s, Hash failed: %s" % (reason, self.connection_error, self.hash_failed))
|
||||||
del(self.site.peers[self.key])
|
if self.site:
|
||||||
|
self.site.deregisterPeer(self)
|
||||||
|
# No way: self.site = None
|
||||||
|
# We don't assign None to self.site here because it leads to random exceptions in various threads,
|
||||||
|
# that hold references to the peer and still believe it belongs to the site.
|
||||||
|
|
||||||
if self.site and self in self.site.peers_recent:
|
self.disconnect(reason)
|
||||||
self.site.peers_recent.remove(self)
|
|
||||||
|
|
||||||
if self.connection:
|
|
||||||
self.connection.close(reason)
|
|
||||||
|
|
||||||
# - EVENTS -
|
# - EVENTS -
|
||||||
|
|
||||||
# On connection error
|
# On connection error
|
||||||
def onConnectionError(self, reason="Unknown"):
|
def onConnectionError(self, reason="Unknown"):
|
||||||
|
if not self.getConnectionServer().isInternetOnline():
|
||||||
|
return
|
||||||
self.connection_error += 1
|
self.connection_error += 1
|
||||||
if self.site and len(self.site.peers) > 200:
|
if self.site and len(self.site.peers) > 200:
|
||||||
limit = 3
|
limit = 3
|
||||||
|
@ -403,7 +522,7 @@ class Peer(object):
|
||||||
limit = 6
|
limit = 6
|
||||||
self.reputation -= 1
|
self.reputation -= 1
|
||||||
if self.connection_error >= limit: # Dead peer
|
if self.connection_error >= limit: # Dead peer
|
||||||
self.remove("Peer connection: %s" % reason)
|
self.remove("Connection error limit reached: %s. Provided message: %s" % (limit, reason))
|
||||||
|
|
||||||
# Done working with peer
|
# Done working with peer
|
||||||
def onWorkerDone(self):
|
def onWorkerDone(self):
|
||||||
|
|
|
@ -16,9 +16,7 @@ import plugins
|
||||||
class PluginManager:
|
class PluginManager:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.log = logging.getLogger("PluginManager")
|
self.log = logging.getLogger("PluginManager")
|
||||||
self.path_plugins = None
|
self.path_plugins = os.path.abspath(os.path.dirname(plugins.__file__))
|
||||||
if plugins.__file__:
|
|
||||||
self.path_plugins = os.path.dirname(os.path.abspath(plugins.__file__));
|
|
||||||
self.path_installed_plugins = config.data_dir + "/__plugins__"
|
self.path_installed_plugins = config.data_dir + "/__plugins__"
|
||||||
self.plugins = defaultdict(list) # Registered plugins (key: class name, value: list of plugins for class)
|
self.plugins = defaultdict(list) # Registered plugins (key: class name, value: list of plugins for class)
|
||||||
self.subclass_order = {} # Record the load order of the plugins, to keep it after reload
|
self.subclass_order = {} # Record the load order of the plugins, to keep it after reload
|
||||||
|
@ -34,8 +32,7 @@ class PluginManager:
|
||||||
|
|
||||||
self.config.setdefault("builtin", {})
|
self.config.setdefault("builtin", {})
|
||||||
|
|
||||||
if self.path_plugins:
|
sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
|
||||||
sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
|
|
||||||
self.migratePlugins()
|
self.migratePlugins()
|
||||||
|
|
||||||
if config.debug: # Auto reload Plugins on file change
|
if config.debug: # Auto reload Plugins on file change
|
||||||
|
@ -130,8 +127,6 @@ class PluginManager:
|
||||||
def loadPlugins(self):
|
def loadPlugins(self):
|
||||||
all_loaded = True
|
all_loaded = True
|
||||||
s = time.time()
|
s = time.time()
|
||||||
if self.path_plugins is None:
|
|
||||||
return
|
|
||||||
for plugin in self.listPlugins():
|
for plugin in self.listPlugins():
|
||||||
self.log.debug("Loading plugin: %s (%s)" % (plugin["name"], plugin["source"]))
|
self.log.debug("Loading plugin: %s (%s)" % (plugin["name"], plugin["source"]))
|
||||||
if plugin["source"] != "builtin":
|
if plugin["source"] != "builtin":
|
||||||
|
|
1023
src/Site/Site.py
1023
src/Site/Site.py
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,7 @@
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
import collections
|
import collections
|
||||||
|
|
||||||
|
@ -12,6 +13,7 @@ from Debug import Debug
|
||||||
from util import helper
|
from util import helper
|
||||||
from greenlet import GreenletExit
|
from greenlet import GreenletExit
|
||||||
import util
|
import util
|
||||||
|
from util import CircularIterator
|
||||||
|
|
||||||
|
|
||||||
class AnnounceError(Exception):
|
class AnnounceError(Exception):
|
||||||
|
@ -24,11 +26,20 @@ global_stats = collections.defaultdict(lambda: collections.defaultdict(int))
|
||||||
class SiteAnnouncer(object):
|
class SiteAnnouncer(object):
|
||||||
def __init__(self, site):
|
def __init__(self, site):
|
||||||
self.site = site
|
self.site = site
|
||||||
|
self.log = logging.getLogger("Site:%s SiteAnnouncer" % self.site.address_short)
|
||||||
|
|
||||||
self.stats = {}
|
self.stats = {}
|
||||||
self.fileserver_port = config.fileserver_port
|
self.fileserver_port = config.fileserver_port
|
||||||
self.peer_id = self.site.connection_server.peer_id
|
self.peer_id = self.site.connection_server.peer_id
|
||||||
self.last_tracker_id = random.randint(0, 10)
|
self.tracker_circular_iterator = CircularIterator()
|
||||||
self.time_last_announce = 0
|
self.time_last_announce = 0
|
||||||
|
self.supported_tracker_count = 0
|
||||||
|
|
||||||
|
# Returns connection_server rela
|
||||||
|
# Since 0.8.0
|
||||||
|
@property
|
||||||
|
def connection_server(self):
|
||||||
|
return self.site.connection_server
|
||||||
|
|
||||||
def getTrackers(self):
|
def getTrackers(self):
|
||||||
return config.trackers
|
return config.trackers
|
||||||
|
@ -36,25 +47,76 @@ class SiteAnnouncer(object):
|
||||||
def getSupportedTrackers(self):
|
def getSupportedTrackers(self):
|
||||||
trackers = self.getTrackers()
|
trackers = self.getTrackers()
|
||||||
|
|
||||||
if not self.site.connection_server.tor_manager.enabled:
|
if not self.connection_server.tor_manager.enabled:
|
||||||
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
|
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
|
||||||
|
|
||||||
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
|
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
|
||||||
|
|
||||||
if "ipv6" not in self.site.connection_server.supported_ip_types:
|
if "ipv6" not in self.connection_server.supported_ip_types:
|
||||||
trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
|
trackers = [tracker for tracker in trackers if self.connection_server.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
|
||||||
|
|
||||||
return trackers
|
return trackers
|
||||||
|
|
||||||
def getAnnouncingTrackers(self, mode):
|
# Returns a cached value of len(self.getSupportedTrackers()), which can be
|
||||||
|
# inacurate.
|
||||||
|
# To be used from Site for estimating available tracker count.
|
||||||
|
def getSupportedTrackerCount(self):
|
||||||
|
return self.supported_tracker_count
|
||||||
|
|
||||||
|
def shouldTrackerBeTemporarilyIgnored(self, tracker, mode, force):
|
||||||
|
if not tracker:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if force:
|
||||||
|
return False
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
# Throttle accessing unresponsive trackers
|
||||||
|
tracker_stats = global_stats[tracker]
|
||||||
|
delay = min(30 * tracker_stats["num_error"], 60 * 10)
|
||||||
|
time_announce_allowed = tracker_stats["time_request"] + delay
|
||||||
|
if now < time_announce_allowed:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getAnnouncingTrackers(self, mode, force):
|
||||||
trackers = self.getSupportedTrackers()
|
trackers = self.getSupportedTrackers()
|
||||||
|
|
||||||
if trackers and (mode == "update" or mode == "more"): # Only announce on one tracker, increment the queried tracker id
|
self.supported_tracker_count = len(trackers)
|
||||||
self.last_tracker_id += 1
|
|
||||||
self.last_tracker_id = self.last_tracker_id % len(trackers)
|
if trackers and (mode == "update" or mode == "more"):
|
||||||
trackers_announcing = [trackers[self.last_tracker_id]] # We only going to use this one
|
|
||||||
|
# Choose just 2 trackers to announce to
|
||||||
|
|
||||||
|
trackers_announcing = []
|
||||||
|
|
||||||
|
# One is the next in sequence
|
||||||
|
|
||||||
|
self.tracker_circular_iterator.resetSuccessiveCount()
|
||||||
|
while 1:
|
||||||
|
tracker = self.tracker_circular_iterator.next(trackers)
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
|
||||||
|
trackers_announcing.append(tracker)
|
||||||
|
break
|
||||||
|
if self.tracker_circular_iterator.isWrapped():
|
||||||
|
break
|
||||||
|
|
||||||
|
# And one is just random
|
||||||
|
|
||||||
|
shuffled_trackers = random.sample(trackers, len(trackers))
|
||||||
|
for tracker in shuffled_trackers:
|
||||||
|
if tracker in trackers_announcing:
|
||||||
|
continue
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
|
||||||
|
trackers_announcing.append(tracker)
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
trackers_announcing = trackers
|
trackers_announcing = [
|
||||||
|
tracker for tracker in trackers
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force)
|
||||||
|
]
|
||||||
|
|
||||||
return trackers_announcing
|
return trackers_announcing
|
||||||
|
|
||||||
|
@ -62,94 +124,32 @@ class SiteAnnouncer(object):
|
||||||
back = []
|
back = []
|
||||||
# Type of addresses they can reach me
|
# Type of addresses they can reach me
|
||||||
if config.trackers_proxy == "disable" and config.tor != "always":
|
if config.trackers_proxy == "disable" and config.tor != "always":
|
||||||
for ip_type, opened in list(self.site.connection_server.port_opened.items()):
|
for ip_type, opened in list(self.connection_server.port_opened.items()):
|
||||||
if opened:
|
if opened:
|
||||||
back.append(ip_type)
|
back.append(ip_type)
|
||||||
if self.site.connection_server.tor_manager.start_onions:
|
if self.connection_server.tor_manager.start_onions:
|
||||||
back.append("onion")
|
back.append("onion")
|
||||||
return back
|
return back
|
||||||
|
|
||||||
@util.Noparallel(blocking=False)
|
@util.Noparallel()
|
||||||
def announce(self, force=False, mode="start", pex=True):
|
def announce(self, force=False, mode="start", pex=True):
|
||||||
|
if not self.site.isServing():
|
||||||
|
return
|
||||||
|
|
||||||
if time.time() - self.time_last_announce < 30 and not force:
|
if time.time() - self.time_last_announce < 30 and not force:
|
||||||
return # No reannouncing within 30 secs
|
return # No reannouncing within 30 secs
|
||||||
if force:
|
|
||||||
self.site.log.debug("Force reannounce in mode %s" % mode)
|
self.log.debug("announce: force=%s, mode=%s, pex=%s" % (force, mode, pex))
|
||||||
|
|
||||||
self.fileserver_port = config.fileserver_port
|
self.fileserver_port = config.fileserver_port
|
||||||
self.time_last_announce = time.time()
|
self.time_last_announce = time.time()
|
||||||
|
|
||||||
trackers = self.getAnnouncingTrackers(mode)
|
trackers = self.getAnnouncingTrackers(mode, force)
|
||||||
|
self.log.debug("Chosen trackers: %s" % trackers)
|
||||||
if config.verbose:
|
self.announceToTrackers(trackers, force=force, mode=mode)
|
||||||
self.site.log.debug("Tracker announcing, trackers: %s" % trackers)
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
slow = []
|
|
||||||
s = time.time()
|
|
||||||
threads = []
|
|
||||||
num_announced = 0
|
|
||||||
|
|
||||||
for tracker in trackers: # Start announce threads
|
|
||||||
tracker_stats = global_stats[tracker]
|
|
||||||
# Reduce the announce time for trackers that looks unreliable
|
|
||||||
time_announce_allowed = time.time() - 60 * min(30, tracker_stats["num_error"])
|
|
||||||
if tracker_stats["num_error"] > 5 and tracker_stats["time_request"] > time_announce_allowed and not force:
|
|
||||||
if config.verbose:
|
|
||||||
self.site.log.debug("Tracker %s looks unreliable, announce skipped (error: %s)" % (tracker, tracker_stats["num_error"]))
|
|
||||||
continue
|
|
||||||
thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
|
|
||||||
threads.append(thread)
|
|
||||||
thread.tracker = tracker
|
|
||||||
|
|
||||||
time.sleep(0.01)
|
|
||||||
self.updateWebsocket(trackers="announcing")
|
|
||||||
|
|
||||||
gevent.joinall(threads, timeout=20) # Wait for announce finish
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
if thread.value is None:
|
|
||||||
continue
|
|
||||||
if thread.value is not False:
|
|
||||||
if thread.value > 1.0: # Takes more than 1 second to announce
|
|
||||||
slow.append("%.2fs %s" % (thread.value, thread.tracker))
|
|
||||||
num_announced += 1
|
|
||||||
else:
|
|
||||||
if thread.ready():
|
|
||||||
errors.append(thread.tracker)
|
|
||||||
else: # Still running
|
|
||||||
slow.append("30s+ %s" % thread.tracker)
|
|
||||||
|
|
||||||
# Save peers num
|
|
||||||
self.site.settings["peers"] = len(self.site.peers)
|
|
||||||
|
|
||||||
if len(errors) < len(threads): # At least one tracker finished
|
|
||||||
if len(trackers) == 1:
|
|
||||||
announced_to = trackers[0]
|
|
||||||
else:
|
|
||||||
announced_to = "%s/%s trackers" % (num_announced, len(threads))
|
|
||||||
if mode != "update" or config.verbose:
|
|
||||||
self.site.log.debug(
|
|
||||||
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
|
|
||||||
(mode, announced_to, time.time() - s, errors, slow)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if len(threads) > 1:
|
|
||||||
self.site.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
|
|
||||||
if len(threads) == 1 and mode != "start": # Move to next tracker
|
|
||||||
self.site.log.debug("Tracker failed, skipping to next one...")
|
|
||||||
self.site.greenlet_manager.spawnLater(1.0, self.announce, force=force, mode=mode, pex=pex)
|
|
||||||
|
|
||||||
self.updateWebsocket(trackers="announced")
|
|
||||||
|
|
||||||
if pex:
|
if pex:
|
||||||
self.updateWebsocket(pex="announcing")
|
self.announcePex()
|
||||||
if mode == "more": # Need more peers
|
|
||||||
self.announcePex(need_num=10)
|
|
||||||
else:
|
|
||||||
self.announcePex()
|
|
||||||
|
|
||||||
self.updateWebsocket(pex="announced")
|
|
||||||
|
|
||||||
def getTrackerHandler(self, protocol):
|
def getTrackerHandler(self, protocol):
|
||||||
return None
|
return None
|
||||||
|
@ -177,7 +177,7 @@ class SiteAnnouncer(object):
|
||||||
s = time.time()
|
s = time.time()
|
||||||
address_parts = self.getAddressParts(tracker)
|
address_parts = self.getAddressParts(tracker)
|
||||||
if not address_parts:
|
if not address_parts:
|
||||||
self.site.log.warning("Tracker %s error: Invalid address" % tracker)
|
self.log.warning("Tracker %s error: Invalid address" % tracker)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if tracker not in self.stats:
|
if tracker not in self.stats:
|
||||||
|
@ -188,7 +188,7 @@ class SiteAnnouncer(object):
|
||||||
self.stats[tracker]["time_request"] = time.time()
|
self.stats[tracker]["time_request"] = time.time()
|
||||||
global_stats[tracker]["time_request"] = time.time()
|
global_stats[tracker]["time_request"] = time.time()
|
||||||
if config.verbose:
|
if config.verbose:
|
||||||
self.site.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
|
self.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
|
||||||
if mode == "update":
|
if mode == "update":
|
||||||
num_want = 10
|
num_want = 10
|
||||||
else:
|
else:
|
||||||
|
@ -202,7 +202,7 @@ class SiteAnnouncer(object):
|
||||||
else:
|
else:
|
||||||
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
|
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.site.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
|
self.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
|
||||||
error = err
|
error = err
|
||||||
|
|
||||||
if error:
|
if error:
|
||||||
|
@ -210,11 +210,11 @@ class SiteAnnouncer(object):
|
||||||
self.stats[tracker]["time_status"] = time.time()
|
self.stats[tracker]["time_status"] = time.time()
|
||||||
self.stats[tracker]["last_error"] = str(error)
|
self.stats[tracker]["last_error"] = str(error)
|
||||||
self.stats[tracker]["time_last_error"] = time.time()
|
self.stats[tracker]["time_last_error"] = time.time()
|
||||||
if self.site.connection_server.has_internet:
|
if self.connection_server.has_internet:
|
||||||
self.stats[tracker]["num_error"] += 1
|
self.stats[tracker]["num_error"] += 1
|
||||||
self.stats[tracker]["num_request"] += 1
|
self.stats[tracker]["num_request"] += 1
|
||||||
global_stats[tracker]["num_request"] += 1
|
global_stats[tracker]["num_request"] += 1
|
||||||
if self.site.connection_server.has_internet:
|
if self.connection_server.has_internet:
|
||||||
global_stats[tracker]["num_error"] += 1
|
global_stats[tracker]["num_error"] += 1
|
||||||
self.updateWebsocket(tracker="error")
|
self.updateWebsocket(tracker="error")
|
||||||
return False
|
return False
|
||||||
|
@ -249,39 +249,106 @@ class SiteAnnouncer(object):
|
||||||
self.site.updateWebsocket(peers_added=added)
|
self.site.updateWebsocket(peers_added=added)
|
||||||
|
|
||||||
if config.verbose:
|
if config.verbose:
|
||||||
self.site.log.debug(
|
self.log.debug(
|
||||||
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
|
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
|
||||||
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
|
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
|
||||||
)
|
)
|
||||||
return time.time() - s
|
return time.time() - s
|
||||||
|
|
||||||
@util.Noparallel(blocking=False)
|
def announceToTrackers(self, trackers, force=False, mode="start"):
|
||||||
def announcePex(self, query_num=2, need_num=5):
|
errors = []
|
||||||
peers = self.site.getConnectedPeers()
|
slow = []
|
||||||
if len(peers) == 0: # Wait 3s for connections
|
s = time.time()
|
||||||
time.sleep(3)
|
threads = []
|
||||||
peers = self.site.getConnectedPeers()
|
num_announced = 0
|
||||||
|
|
||||||
if len(peers) == 0: # Small number of connected peers for this site, connect to any
|
for tracker in trackers: # Start announce threads
|
||||||
peers = list(self.site.getRecentPeers(20))
|
thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
|
||||||
need_num = 10
|
threads.append(thread)
|
||||||
|
thread.tracker = tracker
|
||||||
|
|
||||||
random.shuffle(peers)
|
time.sleep(0.01)
|
||||||
done = 0
|
self.updateWebsocket(trackers="announcing")
|
||||||
total_added = 0
|
|
||||||
for peer in peers:
|
gevent.joinall(threads, timeout=20) # Wait for announce finish
|
||||||
num_added = peer.pex(need_num=need_num)
|
|
||||||
if num_added is not False:
|
for thread in threads:
|
||||||
done += 1
|
if thread.value is None:
|
||||||
total_added += num_added
|
continue
|
||||||
if num_added:
|
if thread.value is not False:
|
||||||
self.site.worker_manager.onPeers()
|
if thread.value > 1.0: # Takes more than 1 second to announce
|
||||||
self.site.updateWebsocket(peers_added=num_added)
|
slow.append("%.2fs %s" % (thread.value, thread.tracker))
|
||||||
|
num_announced += 1
|
||||||
else:
|
else:
|
||||||
|
if thread.ready():
|
||||||
|
errors.append(thread.tracker)
|
||||||
|
else: # Still running
|
||||||
|
slow.append("30s+ %s" % thread.tracker)
|
||||||
|
|
||||||
|
# Save peers num
|
||||||
|
self.site.settings["peers"] = len(self.site.peers)
|
||||||
|
|
||||||
|
if len(errors) < len(threads): # At least one tracker finished
|
||||||
|
if len(trackers) == 1:
|
||||||
|
announced_to = trackers[0]
|
||||||
|
else:
|
||||||
|
announced_to = "%s/%s trackers" % (num_announced, len(threads))
|
||||||
|
if mode != "update" or config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
|
||||||
|
(mode, announced_to, time.time() - s, errors, slow)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if len(threads) > 1:
|
||||||
|
self.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
|
||||||
|
if len(threads) > 1 and mode != "start": # Move to next tracker
|
||||||
|
self.log.debug("Tracker failed, skipping to next one...")
|
||||||
|
self.site.greenlet_manager.spawnLater(5.0, self.announce, force=force, mode=mode, pex=False)
|
||||||
|
|
||||||
|
self.updateWebsocket(trackers="announced")
|
||||||
|
|
||||||
|
@util.Noparallel(blocking=False)
|
||||||
|
def announcePex(self, query_num=2, need_num=10, establish_connections=True):
|
||||||
|
peers = []
|
||||||
|
try:
|
||||||
|
peer_count = 20 + query_num * 2
|
||||||
|
|
||||||
|
# Wait for some peers to connect
|
||||||
|
for _ in range(5):
|
||||||
|
if not self.site.isServing():
|
||||||
|
return
|
||||||
|
peers = self.site.getConnectedPeers(only_fully_connected=True)
|
||||||
|
if len(peers) > 0:
|
||||||
|
break
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
if len(peers) < peer_count and establish_connections:
|
||||||
|
# Small number of connected peers for this site, connect to any
|
||||||
|
peers = list(self.site.getRecentPeers(peer_count))
|
||||||
|
|
||||||
|
if len(peers) > 0:
|
||||||
|
self.updateWebsocket(pex="announcing")
|
||||||
|
|
||||||
|
random.shuffle(peers)
|
||||||
|
done = 0
|
||||||
|
total_added = 0
|
||||||
|
for peer in peers:
|
||||||
|
if not establish_connections and not peer.isConnected():
|
||||||
|
continue
|
||||||
|
num_added = peer.pex(need_num=need_num)
|
||||||
|
if num_added is not False:
|
||||||
|
done += 1
|
||||||
|
total_added += num_added
|
||||||
|
if num_added:
|
||||||
|
self.site.worker_manager.onPeers()
|
||||||
|
self.site.updateWebsocket(peers_added=num_added)
|
||||||
|
if done == query_num:
|
||||||
|
break
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
if done == query_num:
|
self.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
|
||||||
break
|
finally:
|
||||||
self.site.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
|
if len(peers) > 0:
|
||||||
|
self.updateWebsocket(pex="announced")
|
||||||
|
|
||||||
def updateWebsocket(self, **kwargs):
|
def updateWebsocket(self, **kwargs):
|
||||||
if kwargs:
|
if kwargs:
|
||||||
|
|
256
src/Site/SiteHelpers.py
Normal file
256
src/Site/SiteHelpers.py
Normal file
|
@ -0,0 +1,256 @@
|
||||||
|
import time
|
||||||
|
import weakref
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
class ConnectRequirement(object):
|
||||||
|
next_id = 1
|
||||||
|
def __init__(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
|
||||||
|
self.need_nr_peers = need_nr_peers # how many total peers we need
|
||||||
|
self.need_nr_connected_peers = need_nr_connected_peers # how many connected peers we need
|
||||||
|
self.result = gevent.event.AsyncResult() # resolves on need_nr_peers condition
|
||||||
|
self.result_connected = gevent.event.AsyncResult() # resolves on need_nr_connected_peers condition
|
||||||
|
|
||||||
|
self.expiration_interval = expiration_interval
|
||||||
|
self.expired = False
|
||||||
|
if expiration_interval:
|
||||||
|
self.expire_at = time.time() + expiration_interval
|
||||||
|
else:
|
||||||
|
self.expire_at = None
|
||||||
|
|
||||||
|
self.nr_peers = -1 # updated PeerConnector()
|
||||||
|
self.nr_connected_peers = -1 # updated PeerConnector()
|
||||||
|
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
self.id = type(self).next_id
|
||||||
|
type(self).next_id += 1
|
||||||
|
|
||||||
|
def fulfilled(self):
|
||||||
|
return self.result.ready() and self.result_connected.ready()
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
return self.expired or self.fulfilled()
|
||||||
|
|
||||||
|
# Heartbeat sent when any of the following happens:
|
||||||
|
# * self.result is set
|
||||||
|
# * self.result_connected is set
|
||||||
|
# * self.nr_peers changed
|
||||||
|
# * self.nr_peers_connected changed
|
||||||
|
# * self.expired is set
|
||||||
|
def waitHeartbeat(self, timeout=None):
|
||||||
|
if self.heartbeat.ready():
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
return self.heartbeat.wait(timeout=timeout)
|
||||||
|
|
||||||
|
def sendHeartbeat(self):
|
||||||
|
self.heartbeat.set_result()
|
||||||
|
if self.heartbeat.ready():
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
class PeerConnector(object):
|
||||||
|
|
||||||
|
def __init__(self, site):
|
||||||
|
self.site = site
|
||||||
|
|
||||||
|
self.peer_reqs = weakref.WeakValueDictionary() # How many connected peers we need.
|
||||||
|
# Separate entry for each requirement.
|
||||||
|
# Objects of type ConnectRequirement.
|
||||||
|
self.peer_connector_controller = None # Thread doing the orchestration in background.
|
||||||
|
self.peer_connector_workers = dict() # Threads trying to connect to individual peers.
|
||||||
|
self.peer_connector_worker_limit = 5 # Max nr of workers.
|
||||||
|
self.peer_connector_announcer = None # Thread doing announces in background.
|
||||||
|
|
||||||
|
# Max effective values. Set by processReqs().
|
||||||
|
self.need_nr_peers = 0
|
||||||
|
self.need_nr_connected_peers = 0
|
||||||
|
self.nr_peers = 0 # set by processReqs()
|
||||||
|
self.nr_connected_peers = 0 # set by processReqs2()
|
||||||
|
|
||||||
|
# Connector Controller state
|
||||||
|
self.peers = list()
|
||||||
|
|
||||||
|
def addReq(self, req):
|
||||||
|
self.peer_reqs[req.id] = req
|
||||||
|
self.processReqs()
|
||||||
|
|
||||||
|
def newReq(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
|
||||||
|
req = ConnectRequirement(need_nr_peers, need_nr_connected_peers, expiration_interval=expiration_interval)
|
||||||
|
self.addReq(req)
|
||||||
|
return req
|
||||||
|
|
||||||
|
def processReqs(self, nr_connected_peers=None):
|
||||||
|
nr_peers = len(self.site.peers)
|
||||||
|
self.nr_peers = nr_peers
|
||||||
|
|
||||||
|
need_nr_peers = 0
|
||||||
|
need_nr_connected_peers = 0
|
||||||
|
|
||||||
|
items = list(self.peer_reqs.items())
|
||||||
|
for key, req in items:
|
||||||
|
send_heartbeat = False
|
||||||
|
|
||||||
|
if req.expire_at and req.expire_at < time.time():
|
||||||
|
req.expired = True
|
||||||
|
self.peer_reqs.pop(key, None)
|
||||||
|
send_heartbeat = True
|
||||||
|
elif req.result.ready() and req.result_connected.ready():
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if nr_connected_peers is not None:
|
||||||
|
if req.need_nr_peers <= nr_peers and req.need_nr_connected_peers <= nr_connected_peers:
|
||||||
|
req.result.set_result(nr_peers)
|
||||||
|
req.result_connected.set_result(nr_connected_peers)
|
||||||
|
send_heartbeat = True
|
||||||
|
if req.nr_peers != nr_peers or req.nr_connected_peers != nr_connected_peers:
|
||||||
|
req.nr_peers = nr_peers
|
||||||
|
req.nr_connected_peers = nr_connected_peers
|
||||||
|
send_heartbeat = True
|
||||||
|
|
||||||
|
if not (req.result.ready() and req.result_connected.ready()):
|
||||||
|
need_nr_peers = max(need_nr_peers, req.need_nr_peers)
|
||||||
|
need_nr_connected_peers = max(need_nr_connected_peers, req.need_nr_connected_peers)
|
||||||
|
|
||||||
|
if send_heartbeat:
|
||||||
|
req.sendHeartbeat()
|
||||||
|
|
||||||
|
self.need_nr_peers = need_nr_peers
|
||||||
|
self.need_nr_connected_peers = need_nr_connected_peers
|
||||||
|
|
||||||
|
if nr_connected_peers is None:
|
||||||
|
nr_connected_peers = 0
|
||||||
|
if need_nr_peers > nr_peers:
|
||||||
|
self.spawnPeerConnectorAnnouncer();
|
||||||
|
if need_nr_connected_peers > nr_connected_peers:
|
||||||
|
self.spawnPeerConnectorController();
|
||||||
|
|
||||||
|
def processReqs2(self):
|
||||||
|
self.nr_connected_peers = len(self.site.getConnectedPeers(only_fully_connected=True))
|
||||||
|
self.processReqs(nr_connected_peers=self.nr_connected_peers)
|
||||||
|
|
||||||
|
# For adding new peers when ConnectorController is working.
|
||||||
|
# While it is iterating over a cached list of peers, there can be a significant lag
|
||||||
|
# for a newly discovered peer to get in sight of the controller.
|
||||||
|
# Suppose most previously known peers are dead and we've just get a few
|
||||||
|
# new peers from a tracker.
|
||||||
|
# So we mix the new peer to the cached list.
|
||||||
|
# When ConnectorController is stopped (self.peers is empty), we just do nothing here.
|
||||||
|
def addPeer(self, peer):
|
||||||
|
if not self.peers:
|
||||||
|
return
|
||||||
|
if peer not in self.peers:
|
||||||
|
self.peers.append(peer)
|
||||||
|
|
||||||
|
def deregisterPeer(self, peer):
|
||||||
|
try:
|
||||||
|
self.peers.remove(peer)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def sleep(self, t):
|
||||||
|
self.site.connection_server.sleep(t)
|
||||||
|
|
||||||
|
def keepGoing(self):
|
||||||
|
return self.site.isServing() and self.site.connection_server.allowsCreatingConnections()
|
||||||
|
|
||||||
|
def peerConnectorWorker(self, peer):
|
||||||
|
if not peer.isConnected():
|
||||||
|
peer.connect()
|
||||||
|
if peer.isConnected():
|
||||||
|
peer.ping()
|
||||||
|
self.processReqs2()
|
||||||
|
|
||||||
|
def peerConnectorController(self):
|
||||||
|
self.peers = list()
|
||||||
|
addendum = 20
|
||||||
|
while self.keepGoing():
|
||||||
|
|
||||||
|
no_peers_loop = 0
|
||||||
|
while len(self.site.peers) < 1:
|
||||||
|
# No peers at all.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.sleep(10 + no_peers_loop)
|
||||||
|
no_peers_loop += 1
|
||||||
|
if not self.keepGoing() or no_peers_loop > 60:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.processReqs2()
|
||||||
|
|
||||||
|
if self.need_nr_connected_peers <= self.nr_connected_peers:
|
||||||
|
# Ok, nobody waits for connected peers.
|
||||||
|
# Done.
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(self.site.peers) < 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(self.peers) < 1:
|
||||||
|
# refill the peer list
|
||||||
|
self.peers = self.site.getRecentPeers(self.need_nr_connected_peers * 2 + self.nr_connected_peers + addendum)
|
||||||
|
addendum = min(addendum * 2 + 50, 10000)
|
||||||
|
if len(self.peers) <= self.nr_connected_peers:
|
||||||
|
# Looks like all known peers are connected.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.site.announcer.announcePex(establish_connections=False)
|
||||||
|
self.sleep(10)
|
||||||
|
continue
|
||||||
|
|
||||||
|
added = 0
|
||||||
|
|
||||||
|
# try connecting to peers
|
||||||
|
while self.keepGoing() and len(self.peer_connector_workers) < self.peer_connector_worker_limit:
|
||||||
|
if len(self.peers) < 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
peer = self.peers.pop(0)
|
||||||
|
|
||||||
|
if peer.isConnected():
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.peer_connector_workers.get(peer, None)
|
||||||
|
if thread:
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.site.spawn(self.peerConnectorWorker, peer)
|
||||||
|
self.peer_connector_workers[peer] = thread
|
||||||
|
thread.link(lambda thread, peer=peer: self.peer_connector_workers.pop(peer, None))
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
if not self.keepGoing():
|
||||||
|
break
|
||||||
|
|
||||||
|
if not added:
|
||||||
|
# Looks like all known peers are either connected or being connected,
|
||||||
|
# so we weren't able to start connecting any peer in this iteration.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.sleep(20)
|
||||||
|
|
||||||
|
# wait for more room in self.peer_connector_workers
|
||||||
|
while self.keepGoing() and len(self.peer_connector_workers) >= self.peer_connector_worker_limit:
|
||||||
|
self.sleep(2)
|
||||||
|
|
||||||
|
if not self.site.connection_server.isInternetOnline():
|
||||||
|
self.sleep(30)
|
||||||
|
|
||||||
|
self.peers = list()
|
||||||
|
self.peer_connector_controller = None
|
||||||
|
|
||||||
|
def peerConnectorAnnouncer(self):
|
||||||
|
while self.keepGoing():
|
||||||
|
if self.need_nr_peers <= self.nr_peers:
|
||||||
|
break
|
||||||
|
self.site.announce(mode="more")
|
||||||
|
self.processReqs2()
|
||||||
|
if self.need_nr_peers <= self.nr_peers:
|
||||||
|
break
|
||||||
|
self.sleep(10)
|
||||||
|
if not self.site.connection_server.isInternetOnline():
|
||||||
|
self.sleep(20)
|
||||||
|
self.peer_connector_announcer = None
|
||||||
|
|
||||||
|
def spawnPeerConnectorController(self):
|
||||||
|
if self.peer_connector_controller is None or self.peer_connector_controller.ready():
|
||||||
|
self.peer_connector_controller = self.site.spawn(self.peerConnectorController)
|
||||||
|
|
||||||
|
def spawnPeerConnectorAnnouncer(self):
|
||||||
|
if self.peer_connector_announcer is None or self.peer_connector_announcer.ready():
|
||||||
|
self.peer_connector_announcer = self.site.spawn(self.peerConnectorAnnouncer)
|
|
@ -4,6 +4,7 @@ import re
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import atexit
|
import atexit
|
||||||
|
import collections
|
||||||
|
|
||||||
import gevent
|
import gevent
|
||||||
|
|
||||||
|
@ -27,6 +28,21 @@ class SiteManager(object):
|
||||||
gevent.spawn(self.saveTimer)
|
gevent.spawn(self.saveTimer)
|
||||||
atexit.register(lambda: self.save(recalculate_size=True))
|
atexit.register(lambda: self.save(recalculate_size=True))
|
||||||
|
|
||||||
|
# ZeroNet has a bug of desyncing between:
|
||||||
|
# * time sent in a response of listModified
|
||||||
|
# and
|
||||||
|
# * time checked on receiving a file.
|
||||||
|
# This leads to the following scenario:
|
||||||
|
# * Request listModified.
|
||||||
|
# * Detect that the remote peer missing an update
|
||||||
|
# * Send a newer version of the file back to the peer.
|
||||||
|
# * The peer responses "ok: File not changed"
|
||||||
|
# .....
|
||||||
|
# * Request listModified the next time and do all the same again.
|
||||||
|
# So we keep the list of sent back entries to prevent sending multiple useless updates:
|
||||||
|
# "{site.address} - {peer.key} - {inner_path}" -> mtime
|
||||||
|
self.send_back_lru = collections.OrderedDict()
|
||||||
|
|
||||||
# Load all sites from data/sites.json
|
# Load all sites from data/sites.json
|
||||||
@util.Noparallel()
|
@util.Noparallel()
|
||||||
def load(self, cleanup=True, startup=False):
|
def load(self, cleanup=True, startup=False):
|
||||||
|
@ -155,6 +171,11 @@ class SiteManager(object):
|
||||||
def resolveDomainCached(self, domain):
|
def resolveDomainCached(self, domain):
|
||||||
return self.resolveDomain(domain)
|
return self.resolveDomain(domain)
|
||||||
|
|
||||||
|
# Checks if the address is blocked. To be implemented in content filter plugins.
|
||||||
|
# Since 0.8.0
|
||||||
|
def isAddressBlocked(self, address):
|
||||||
|
return False
|
||||||
|
|
||||||
# Return: Site object or None if not found
|
# Return: Site object or None if not found
|
||||||
def get(self, address):
|
def get(self, address):
|
||||||
if self.isDomainCached(address):
|
if self.isDomainCached(address):
|
||||||
|
@ -216,6 +237,23 @@ class SiteManager(object):
|
||||||
self.load(startup=True)
|
self.load(startup=True)
|
||||||
return self.sites
|
return self.sites
|
||||||
|
|
||||||
|
# Return False if we never sent <inner_path> to <peer>
|
||||||
|
# or if the file that was sent was older than <remote_modified>
|
||||||
|
# so that send back logic is suppressed for <inner_path>.
|
||||||
|
# True if <inner_path> can be sent back to <peer>.
|
||||||
|
def checkSendBackLRU(self, site, peer, inner_path, remote_modified):
|
||||||
|
key = site.address + ' - ' + peer.key + ' - ' + inner_path
|
||||||
|
sent_modified = self.send_back_lru.get(key, 0)
|
||||||
|
return remote_modified < sent_modified
|
||||||
|
|
||||||
|
def addToSendBackLRU(self, site, peer, inner_path, modified):
|
||||||
|
key = site.address + ' - ' + peer.key + ' - ' + inner_path
|
||||||
|
if self.send_back_lru.get(key, None) is None:
|
||||||
|
self.send_back_lru[key] = modified
|
||||||
|
while len(self.send_back_lru) > config.send_back_lru_size:
|
||||||
|
self.send_back_lru.popitem(last=False)
|
||||||
|
else:
|
||||||
|
self.send_back_lru.move_to_end(key, last=True)
|
||||||
|
|
||||||
site_manager = SiteManager() # Singletone
|
site_manager = SiteManager() # Singletone
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,25 @@ thread_pool_fs_read = ThreadPool.ThreadPool(config.threads_fs_read, name="FS rea
|
||||||
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
|
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
|
||||||
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
|
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
|
||||||
|
|
||||||
|
class VerifyFiles_Notificator(object):
|
||||||
|
def __init__(self, site, quick_check):
|
||||||
|
self.site = site
|
||||||
|
self.quick_check = quick_check
|
||||||
|
self.scanned_files = 0
|
||||||
|
self.websocket_update_interval = 0.25
|
||||||
|
self.websocket_update_time = time.time()
|
||||||
|
|
||||||
|
def inc(self):
|
||||||
|
self.scanned_files += 1
|
||||||
|
if self.websocket_update_time + self.websocket_update_interval < time.time():
|
||||||
|
self.send()
|
||||||
|
|
||||||
|
def send(self):
|
||||||
|
self.websocket_update_time = time.time()
|
||||||
|
if self.quick_check:
|
||||||
|
self.site.updateWebsocket(checking=self.scanned_files)
|
||||||
|
else:
|
||||||
|
self.site.updateWebsocket(verifying=self.scanned_files)
|
||||||
|
|
||||||
@PluginManager.acceptPlugins
|
@PluginManager.acceptPlugins
|
||||||
class SiteStorage(object):
|
class SiteStorage(object):
|
||||||
|
@ -260,7 +279,7 @@ class SiteStorage(object):
|
||||||
# Open file object
|
# Open file object
|
||||||
@thread_pool_fs_read.wrap
|
@thread_pool_fs_read.wrap
|
||||||
def read(self, inner_path, mode="rb"):
|
def read(self, inner_path, mode="rb"):
|
||||||
return self.open(inner_path, mode).read()
|
return open(self.getPath(inner_path), mode).read()
|
||||||
|
|
||||||
@thread_pool_fs_write.wrap
|
@thread_pool_fs_write.wrap
|
||||||
def writeThread(self, inner_path, content):
|
def writeThread(self, inner_path, content):
|
||||||
|
@ -356,7 +375,7 @@ class SiteStorage(object):
|
||||||
# Reopen DB to check changes
|
# Reopen DB to check changes
|
||||||
if self.has_db:
|
if self.has_db:
|
||||||
self.closeDb("New dbschema")
|
self.closeDb("New dbschema")
|
||||||
gevent.spawn(self.getDb)
|
self.site.spawn(self.getDb)
|
||||||
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
|
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
|
||||||
if config.verbose:
|
if config.verbose:
|
||||||
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
|
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
|
||||||
|
@ -369,11 +388,11 @@ class SiteStorage(object):
|
||||||
# Load and parse json file
|
# Load and parse json file
|
||||||
@thread_pool_fs_read.wrap
|
@thread_pool_fs_read.wrap
|
||||||
def loadJson(self, inner_path):
|
def loadJson(self, inner_path):
|
||||||
try:
|
try :
|
||||||
with self.open(inner_path, "r", encoding="utf8") as file:
|
with self.open(inner_path) as file:
|
||||||
return json.load(file)
|
return json.load(file)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.log.warning("Json load error: %s" % Debug.formatException(err))
|
self.log.error("Json load error: %s" % Debug.formatException(err))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Write formatted json file
|
# Write formatted json file
|
||||||
|
@ -424,6 +443,8 @@ class SiteStorage(object):
|
||||||
return inner_path
|
return inner_path
|
||||||
|
|
||||||
# Verify all files sha512sum using content.json
|
# Verify all files sha512sum using content.json
|
||||||
|
# The result may not be accurate if self.site.isStopping().
|
||||||
|
# verifyFiles() return immediately in that case.
|
||||||
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
|
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
|
||||||
bad_files = []
|
bad_files = []
|
||||||
back = defaultdict(int)
|
back = defaultdict(int)
|
||||||
|
@ -435,17 +456,55 @@ class SiteStorage(object):
|
||||||
self.log.debug("VerifyFile content.json not exists")
|
self.log.debug("VerifyFile content.json not exists")
|
||||||
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
|
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
|
||||||
self.site.content_manager.loadContent() # Reload content.json
|
self.site.content_manager.loadContent() # Reload content.json
|
||||||
for content_inner_path, content in list(self.site.content_manager.contents.items()):
|
|
||||||
|
# Trying to read self.site.content_manager.contents without being stuck
|
||||||
|
# on reading the long file list and also without getting
|
||||||
|
# "RuntimeError: dictionary changed size during iteration"
|
||||||
|
# We can't use just list(iteritems()) since it loads all the contents files
|
||||||
|
# at once and gets unresponsive.
|
||||||
|
contents = {}
|
||||||
|
notificator = None
|
||||||
|
tries = 0
|
||||||
|
max_tries = 40
|
||||||
|
stop = False
|
||||||
|
while not stop:
|
||||||
|
try:
|
||||||
|
contents = {}
|
||||||
|
notificator = VerifyFiles_Notificator(self.site, quick_check)
|
||||||
|
for content_inner_path, content in self.site.content_manager.contents.iteritems():
|
||||||
|
notificator.inc()
|
||||||
|
contents[content_inner_path] = content
|
||||||
|
if self.site.isStopping():
|
||||||
|
stop = True
|
||||||
|
break
|
||||||
|
stop = True
|
||||||
|
except RuntimeError as err:
|
||||||
|
if "changed size during iteration" in str(err):
|
||||||
|
tries += 1
|
||||||
|
if tries >= max_tries:
|
||||||
|
self.log.info("contents.json file list changed during iteration. %s tries done. Giving up.", tries)
|
||||||
|
stop = True
|
||||||
|
self.log.info("contents.json file list changed during iteration. Trying again... (%s)", tries)
|
||||||
|
time.sleep(2 * tries)
|
||||||
|
else:
|
||||||
|
stop = True
|
||||||
|
|
||||||
|
for content_inner_path, content in contents.items():
|
||||||
back["num_content"] += 1
|
back["num_content"] += 1
|
||||||
i += 1
|
i += 1
|
||||||
if i % 50 == 0:
|
if i % 50 == 0:
|
||||||
time.sleep(0.001) # Context switch to avoid gevent hangs
|
time.sleep(0.001) # Context switch to avoid gevent hangs
|
||||||
|
|
||||||
|
if self.site.isStopping():
|
||||||
|
break
|
||||||
|
|
||||||
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
|
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
|
||||||
back["num_content_missing"] += 1
|
back["num_content_missing"] += 1
|
||||||
self.log.debug("[MISSING] %s" % content_inner_path)
|
self.log.debug("[MISSING] %s" % content_inner_path)
|
||||||
bad_files.append(content_inner_path)
|
bad_files.append(content_inner_path)
|
||||||
|
|
||||||
for file_relative_path in list(content.get("files", {}).keys()):
|
for file_relative_path in list(content.get("files", {}).keys()):
|
||||||
|
notificator.inc()
|
||||||
back["num_file"] += 1
|
back["num_file"] += 1
|
||||||
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
||||||
|
@ -456,15 +515,19 @@ class SiteStorage(object):
|
||||||
bad_files.append(file_inner_path)
|
bad_files.append(file_inner_path)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
err = None
|
||||||
|
|
||||||
if quick_check:
|
if quick_check:
|
||||||
ok = os.path.getsize(file_path) == content["files"][file_relative_path]["size"]
|
file_size = os.path.getsize(file_path)
|
||||||
|
expected_size = content["files"][file_relative_path]["size"]
|
||||||
|
ok = file_size == expected_size
|
||||||
if not ok:
|
if not ok:
|
||||||
err = "Invalid size"
|
err = "Invalid size: %s - actual, %s - expected" % (file_size, expected_size)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
|
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
|
||||||
except Exception as _err:
|
except Exception as err2:
|
||||||
err = _err
|
err = err2
|
||||||
ok = False
|
ok = False
|
||||||
|
|
||||||
if not ok:
|
if not ok:
|
||||||
|
@ -477,6 +540,7 @@ class SiteStorage(object):
|
||||||
optional_added = 0
|
optional_added = 0
|
||||||
optional_removed = 0
|
optional_removed = 0
|
||||||
for file_relative_path in list(content.get("files_optional", {}).keys()):
|
for file_relative_path in list(content.get("files_optional", {}).keys()):
|
||||||
|
notificator.inc()
|
||||||
back["num_optional"] += 1
|
back["num_optional"] += 1
|
||||||
file_node = content["files_optional"][file_relative_path]
|
file_node = content["files_optional"][file_relative_path]
|
||||||
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
|
@ -521,6 +585,8 @@ class SiteStorage(object):
|
||||||
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
|
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
notificator.send()
|
||||||
|
|
||||||
self.site.content_manager.contents.db.processDelayed()
|
self.site.content_manager.contents.db.processDelayed()
|
||||||
time.sleep(0.001) # Context switch to avoid gevent hangs
|
time.sleep(0.001) # Context switch to avoid gevent hangs
|
||||||
return back
|
return back
|
||||||
|
|
|
@ -16,7 +16,7 @@ class TestFileRequest:
|
||||||
client = ConnectionServer(file_server.ip, 1545)
|
client = ConnectionServer(file_server.ip, 1545)
|
||||||
|
|
||||||
connection = client.getConnection(file_server.ip, 1544)
|
connection = client.getConnection(file_server.ip, 1544)
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Normal request
|
# Normal request
|
||||||
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
|
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
|
||||||
|
@ -61,7 +61,7 @@ class TestFileRequest:
|
||||||
file_server.ip_incoming = {} # Reset flood protection
|
file_server.ip_incoming = {} # Reset flood protection
|
||||||
client = ConnectionServer(file_server.ip, 1545)
|
client = ConnectionServer(file_server.ip, 1545)
|
||||||
connection = client.getConnection(file_server.ip, 1544)
|
connection = client.getConnection(file_server.ip, 1544)
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
buff = io.BytesIO()
|
buff = io.BytesIO()
|
||||||
response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff)
|
response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff)
|
||||||
|
@ -89,7 +89,7 @@ class TestFileRequest:
|
||||||
client.stop()
|
client.stop()
|
||||||
|
|
||||||
def testPex(self, file_server, site, site_temp):
|
def testPex(self, file_server, site, site_temp):
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
client.sites = {site_temp.address: site_temp}
|
client.sites = {site_temp.address: site_temp}
|
||||||
site_temp.connection_server = client
|
site_temp.connection_server = client
|
||||||
|
|
|
@ -13,7 +13,7 @@ from . import Spy
|
||||||
@pytest.mark.usefixtures("resetTempSettings")
|
@pytest.mark.usefixtures("resetTempSettings")
|
||||||
class TestPeer:
|
class TestPeer:
|
||||||
def testPing(self, file_server, site, site_temp):
|
def testPing(self, file_server, site, site_temp):
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
client.sites = {site_temp.address: site_temp}
|
client.sites = {site_temp.address: site_temp}
|
||||||
site_temp.connection_server = client
|
site_temp.connection_server = client
|
||||||
|
@ -32,7 +32,7 @@ class TestPeer:
|
||||||
client.stop()
|
client.stop()
|
||||||
|
|
||||||
def testDownloadFile(self, file_server, site, site_temp):
|
def testDownloadFile(self, file_server, site, site_temp):
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
client.sites = {site_temp.address: site_temp}
|
client.sites = {site_temp.address: site_temp}
|
||||||
site_temp.connection_server = client
|
site_temp.connection_server = client
|
||||||
|
@ -77,11 +77,11 @@ class TestPeer:
|
||||||
|
|
||||||
def testHashfieldExchange(self, file_server, site, site_temp):
|
def testHashfieldExchange(self, file_server, site, site_temp):
|
||||||
server1 = file_server
|
server1 = file_server
|
||||||
server1.sites[site.address] = site
|
server1.getSites()[site.address] = site
|
||||||
site.connection_server = server1
|
site.connection_server = server1
|
||||||
|
|
||||||
server2 = FileServer(file_server.ip, 1545)
|
server2 = FileServer(file_server.ip, 1545)
|
||||||
server2.sites[site_temp.address] = site_temp
|
server2.getSites()[site_temp.address] = site_temp
|
||||||
site_temp.connection_server = server2
|
site_temp.connection_server = server2
|
||||||
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
|
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ class TestPeer:
|
||||||
server2.stop()
|
server2.stop()
|
||||||
|
|
||||||
def testFindHash(self, file_server, site, site_temp):
|
def testFindHash(self, file_server, site, site_temp):
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
client.sites = {site_temp.address: site_temp}
|
client.sites = {site_temp.address: site_temp}
|
||||||
site_temp.connection_server = client
|
site_temp.connection_server = client
|
||||||
|
|
|
@ -23,7 +23,7 @@ class TestSiteDownload:
|
||||||
|
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -74,7 +74,7 @@ class TestSiteDownload:
|
||||||
|
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -130,7 +130,7 @@ class TestSiteDownload:
|
||||||
def testArchivedDownload(self, file_server, site, site_temp):
|
def testArchivedDownload(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -178,7 +178,7 @@ class TestSiteDownload:
|
||||||
def testArchivedBeforeDownload(self, file_server, site, site_temp):
|
def testArchivedBeforeDownload(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -229,7 +229,7 @@ class TestSiteDownload:
|
||||||
def testOptionalDownload(self, file_server, site, site_temp):
|
def testOptionalDownload(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = ConnectionServer(file_server.ip, 1545)
|
client = ConnectionServer(file_server.ip, 1545)
|
||||||
|
@ -271,7 +271,7 @@ class TestSiteDownload:
|
||||||
def testFindOptional(self, file_server, site, site_temp):
|
def testFindOptional(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init full source server (has optional files)
|
# Init full source server (has optional files)
|
||||||
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
|
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
|
||||||
|
@ -284,7 +284,7 @@ class TestSiteDownload:
|
||||||
|
|
||||||
gevent.spawn(listen)
|
gevent.spawn(listen)
|
||||||
time.sleep(0.001) # Port opening
|
time.sleep(0.001) # Port opening
|
||||||
file_server_full.sites[site_full.address] = site_full # Add site
|
file_server_full.getSites()[site_full.address] = site_full # Add site
|
||||||
site_full.storage.verifyFiles(quick_check=True) # Check optional files
|
site_full.storage.verifyFiles(quick_check=True) # Check optional files
|
||||||
site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server
|
site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server
|
||||||
hashfield = site_full_peer.updateHashfield() # Update hashfield
|
hashfield = site_full_peer.updateHashfield() # Update hashfield
|
||||||
|
@ -342,7 +342,7 @@ class TestSiteDownload:
|
||||||
|
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -423,7 +423,7 @@ class TestSiteDownload:
|
||||||
def testBigUpdate(self, file_server, site, site_temp):
|
def testBigUpdate(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -476,7 +476,7 @@ class TestSiteDownload:
|
||||||
def testHugeContentSiteUpdate(self, file_server, site, site_temp):
|
def testHugeContentSiteUpdate(self, file_server, site, site_temp):
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
@ -524,7 +524,7 @@ class TestSiteDownload:
|
||||||
|
|
||||||
# Init source server
|
# Init source server
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
|
|
||||||
# Init client server
|
# Init client server
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
|
|
@ -75,7 +75,7 @@ class TestTor:
|
||||||
assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp)
|
assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp)
|
||||||
|
|
||||||
# Only allow to query from the locked site
|
# Only allow to query from the locked site
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
connection_locked = file_server.getConnection(address + ".onion", 1544, site=site)
|
connection_locked = file_server.getConnection(address + ".onion", 1544, site=site)
|
||||||
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
|
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
|
||||||
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
|
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
|
||||||
|
@ -83,11 +83,11 @@ class TestTor:
|
||||||
def testPex(self, file_server, site, site_temp):
|
def testPex(self, file_server, site, site_temp):
|
||||||
# Register site to currently running fileserver
|
# Register site to currently running fileserver
|
||||||
site.connection_server = file_server
|
site.connection_server = file_server
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
# Create a new file server to emulate new peer connecting to our peer
|
# Create a new file server to emulate new peer connecting to our peer
|
||||||
file_server_temp = FileServer(file_server.ip, 1545)
|
file_server_temp = FileServer(file_server.ip, 1545)
|
||||||
site_temp.connection_server = file_server_temp
|
site_temp.connection_server = file_server_temp
|
||||||
file_server_temp.sites[site_temp.address] = site_temp
|
file_server_temp.getSites()[site_temp.address] = site_temp
|
||||||
|
|
||||||
# We will request peers from this
|
# We will request peers from this
|
||||||
peer_source = site_temp.addPeer(file_server.ip, 1544)
|
peer_source = site_temp.addPeer(file_server.ip, 1544)
|
||||||
|
@ -113,7 +113,7 @@ class TestTor:
|
||||||
|
|
||||||
def testFindHash(self, tor_manager, file_server, site, site_temp):
|
def testFindHash(self, tor_manager, file_server, site, site_temp):
|
||||||
file_server.ip_incoming = {} # Reset flood protection
|
file_server.ip_incoming = {} # Reset flood protection
|
||||||
file_server.sites[site.address] = site
|
file_server.getSites()[site.address] = site
|
||||||
file_server.tor_manager = tor_manager
|
file_server.tor_manager = tor_manager
|
||||||
|
|
||||||
client = FileServer(file_server.ip, 1545)
|
client = FileServer(file_server.ip, 1545)
|
||||||
|
|
BIN
src/Test/testdata/chart.db-shm
vendored
Normal file
BIN
src/Test/testdata/chart.db-shm
vendored
Normal file
Binary file not shown.
BIN
src/Test/testdata/chart.db-wal
vendored
Normal file
BIN
src/Test/testdata/chart.db-wal
vendored
Normal file
Binary file not shown.
BIN
src/Test/testdata/content.db-shm
vendored
Normal file
BIN
src/Test/testdata/content.db-shm
vendored
Normal file
Binary file not shown.
BIN
src/Test/testdata/content.db-wal
vendored
Normal file
BIN
src/Test/testdata/content.db-wal
vendored
Normal file
Binary file not shown.
1
src/Test/testdata/filters.json
vendored
Normal file
1
src/Test/testdata/filters.json
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{}
|
58
src/Test/testdata/openssl.cnf
vendored
Normal file
58
src/Test/testdata/openssl.cnf
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
[ req ]
|
||||||
|
default_bits = 2048
|
||||||
|
default_keyfile = server-key.pem
|
||||||
|
distinguished_name = subject
|
||||||
|
req_extensions = req_ext
|
||||||
|
x509_extensions = x509_ext
|
||||||
|
string_mask = utf8only
|
||||||
|
|
||||||
|
# The Subject DN can be formed using X501 or RFC 4514 (see RFC 4519 for a description).
|
||||||
|
# Its sort of a mashup. For example, RFC 4514 does not provide emailAddress.
|
||||||
|
[ subject ]
|
||||||
|
countryName = US
|
||||||
|
stateOrProvinceName = NY
|
||||||
|
localityName = New York
|
||||||
|
organizationName = Example, LLC
|
||||||
|
|
||||||
|
# Use a friendly name here because its presented to the user. The server's DNS
|
||||||
|
# names are placed in Subject Alternate Names. Plus, DNS names here is deprecated
|
||||||
|
# by both IETF and CA/Browser Forums. If you place a DNS name here, then you
|
||||||
|
# must include the DNS name in the SAN too (otherwise, Chrome and others that
|
||||||
|
# strictly follow the CA/Browser Baseline Requirements will fail).
|
||||||
|
commonName = Example Company
|
||||||
|
|
||||||
|
emailAddress = test@example.com
|
||||||
|
|
||||||
|
# Section x509_ext is used when generating a self-signed certificate. I.e., openssl req -x509 ...
|
||||||
|
[ x509_ext ]
|
||||||
|
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid,issuer
|
||||||
|
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth, serverAuth
|
||||||
|
subjectAltName = @alternate_names
|
||||||
|
|
||||||
|
# RFC 5280, Section 4.2.1.12 makes EKU optional
|
||||||
|
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
|
||||||
|
# extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
|
# Section req_ext is used when generating a certificate signing request. I.e., openssl req ...
|
||||||
|
[ req_ext ]
|
||||||
|
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth, serverAuth
|
||||||
|
subjectAltName = @alternate_names
|
||||||
|
|
||||||
|
# RFC 5280, Section 4.2.1.12 makes EKU optional
|
||||||
|
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
|
||||||
|
# extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
|
[ alternate_names ]
|
||||||
|
|
||||||
|
DNS.1 = nazwa.pl
|
||||||
|
DNS.2 = www.nazwa.pl
|
1
src/Test/testdata/sites.json
vendored
Normal file
1
src/Test/testdata/sites.json
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{}
|
1
src/Test/testdata/trackers.json
vendored
Normal file
1
src/Test/testdata/trackers.json
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{}
|
9
src/Test/testdata/users.json
vendored
Normal file
9
src/Test/testdata/users.json
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
|
||||||
|
{
|
||||||
|
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
|
||||||
|
"certs": {},
|
||||||
|
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
|
||||||
|
"sites": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -544,36 +544,17 @@ class UiRequest(object):
|
||||||
if show_loadingscreen:
|
if show_loadingscreen:
|
||||||
meta_tags += '<meta name="viewport" id="viewport" content="width=device-width, initial-scale=0.8">';
|
meta_tags += '<meta name="viewport" id="viewport" content="width=device-width, initial-scale=0.8">';
|
||||||
|
|
||||||
def xescape(s):
|
|
||||||
'''combines parts from re.escape & html.escape'''
|
|
||||||
# https://github.com/python/cpython/blob/3.10/Lib/re.py#L267
|
|
||||||
# '&' is handled otherwise
|
|
||||||
re_chars = {i: '\\' + chr(i) for i in b'()[]{}*+-|^$\\.~# \t\n\r\v\f'}
|
|
||||||
# https://github.com/python/cpython/blob/3.10/Lib/html/__init__.py#L12
|
|
||||||
html_chars = {
|
|
||||||
'<' : '<',
|
|
||||||
'>' : '>',
|
|
||||||
'"' : '"',
|
|
||||||
"'" : ''',
|
|
||||||
}
|
|
||||||
# we can't replace '&' because it makes certain zites work incorrectly
|
|
||||||
# it should however in no way interfere with re.sub in render
|
|
||||||
repl = {}
|
|
||||||
repl.update(re_chars)
|
|
||||||
repl.update(html_chars)
|
|
||||||
return s.translate(repl)
|
|
||||||
|
|
||||||
return self.render(
|
return self.render(
|
||||||
"src/Ui/template/wrapper.html",
|
"src/Ui/template/wrapper.html",
|
||||||
server_url=server_url,
|
server_url=server_url,
|
||||||
inner_path=inner_path,
|
inner_path=inner_path,
|
||||||
file_url=xescape(file_url),
|
file_url=re.escape(file_url),
|
||||||
file_inner_path=xescape(file_inner_path),
|
file_inner_path=re.escape(file_inner_path),
|
||||||
address=site.address,
|
address=site.address,
|
||||||
title=xescape(title),
|
title=html.escape(title),
|
||||||
body_style=body_style,
|
body_style=body_style,
|
||||||
meta_tags=meta_tags,
|
meta_tags=meta_tags,
|
||||||
query_string=xescape(inner_query_string),
|
query_string=re.escape(inner_query_string),
|
||||||
wrapper_key=site.settings["wrapper_key"],
|
wrapper_key=site.settings["wrapper_key"],
|
||||||
ajax_key=site.settings["ajax_key"],
|
ajax_key=site.settings["ajax_key"],
|
||||||
wrapper_nonce=wrapper_nonce,
|
wrapper_nonce=wrapper_nonce,
|
||||||
|
@ -749,10 +730,7 @@ class UiRequest(object):
|
||||||
|
|
||||||
def replaceHtmlVariables(self, block, path_parts):
|
def replaceHtmlVariables(self, block, path_parts):
|
||||||
user = self.getCurrentUser()
|
user = self.getCurrentUser()
|
||||||
if user and user.settings:
|
themeclass = "theme-%-6s" % re.sub("[^a-z]", "", user.settings.get("theme", "light"))
|
||||||
themeclass = "theme-%-6s" % re.sub("[^a-z]", "", user.settings.get("theme", "light"))
|
|
||||||
else:
|
|
||||||
themeclass = "theme-light"
|
|
||||||
block = block.replace(b"{themeclass}", themeclass.encode("utf8"))
|
block = block.replace(b"{themeclass}", themeclass.encode("utf8"))
|
||||||
|
|
||||||
if path_parts:
|
if path_parts:
|
||||||
|
|
|
@ -167,7 +167,7 @@ class UiServer:
|
||||||
self.log.error("Web interface bind error, must be running already, exiting.... %s" % err)
|
self.log.error("Web interface bind error, must be running already, exiting.... %s" % err)
|
||||||
import main
|
import main
|
||||||
main.file_server.stop()
|
main.file_server.stop()
|
||||||
self.log.debug("Stopped.")
|
self.log.info("Stopped.")
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.log.debug("Stopping...")
|
self.log.debug("Stopping...")
|
||||||
|
|
|
@ -318,6 +318,7 @@ class UiWebsocket(object):
|
||||||
back["updatesite"] = config.updatesite
|
back["updatesite"] = config.updatesite
|
||||||
back["dist_type"] = config.dist_type
|
back["dist_type"] = config.dist_type
|
||||||
back["lib_verify_best"] = CryptBitcoin.lib_verify_best
|
back["lib_verify_best"] = CryptBitcoin.lib_verify_best
|
||||||
|
back["passive_mode"] = file_server.passive_mode
|
||||||
return back
|
return back
|
||||||
|
|
||||||
def formatAnnouncerInfo(self, site):
|
def formatAnnouncerInfo(self, site):
|
||||||
|
@ -327,10 +328,7 @@ class UiWebsocket(object):
|
||||||
|
|
||||||
def actionAs(self, to, address, cmd, params=[]):
|
def actionAs(self, to, address, cmd, params=[]):
|
||||||
if not self.hasSitePermission(address, cmd=cmd):
|
if not self.hasSitePermission(address, cmd=cmd):
|
||||||
#TODO! Return this as error ?
|
|
||||||
return self.response(to, "No permission for site %s" % address)
|
return self.response(to, "No permission for site %s" % address)
|
||||||
if not self.server.sites.get(address):
|
|
||||||
return self.response(to, {"error": "Site Does Not Exist: %s" % address})
|
|
||||||
req_self = copy.copy(self)
|
req_self = copy.copy(self)
|
||||||
req_self.site = self.server.sites.get(address)
|
req_self.site = self.server.sites.get(address)
|
||||||
req_self.hasCmdPermission = self.hasCmdPermission # Use the same permissions as current site
|
req_self.hasCmdPermission = self.hasCmdPermission # Use the same permissions as current site
|
||||||
|
@ -422,15 +420,10 @@ class UiWebsocket(object):
|
||||||
is_user_content = file_info and ("cert_signers" in file_info or "cert_signers_pattern" in file_info)
|
is_user_content = file_info and ("cert_signers" in file_info or "cert_signers_pattern" in file_info)
|
||||||
if is_user_content and privatekey is None:
|
if is_user_content and privatekey is None:
|
||||||
cert = self.user.getCert(self.site.address)
|
cert = self.user.getCert(self.site.address)
|
||||||
if not cert:
|
extend["cert_auth_type"] = cert["auth_type"]
|
||||||
error = "Site sign failed: No certificate selected for Site: %s, Hence Signing inner_path: %s Failed, Try Adding/Selecting User Cert via Site Login" % (self.site.address, inner_path)
|
extend["cert_user_id"] = self.user.getCertUserId(site.address)
|
||||||
self.log.error(error)
|
extend["cert_sign"] = cert["cert_sign"]
|
||||||
return self.response(to, {"error": error})
|
self.log.debug("Extending content.json with cert %s" % extend["cert_user_id"])
|
||||||
else:
|
|
||||||
extend["cert_auth_type"] = cert["auth_type"]
|
|
||||||
extend["cert_user_id"] = self.user.getCertUserId(site.address)
|
|
||||||
extend["cert_sign"] = cert["cert_sign"]
|
|
||||||
self.log.debug("Extending content.json with cert %s" % extend["cert_user_id"])
|
|
||||||
|
|
||||||
if not self.hasFilePermission(inner_path):
|
if not self.hasFilePermission(inner_path):
|
||||||
self.log.error("SiteSign error: you don't own this site & site owner doesn't allow you to do so.")
|
self.log.error("SiteSign error: you don't own this site & site owner doesn't allow you to do so.")
|
||||||
|
@ -920,9 +913,9 @@ class UiWebsocket(object):
|
||||||
self.response(to, "ok")
|
self.response(to, "ok")
|
||||||
|
|
||||||
# Update site content.json
|
# Update site content.json
|
||||||
def actionSiteUpdate(self, to, address, check_files=False, since=None, announce=False):
|
def actionSiteUpdate(self, to, address, check_files=False, verify_files=False, since=None, announce=False):
|
||||||
def updateThread():
|
def updateThread():
|
||||||
site.update(announce=announce, check_files=check_files, since=since)
|
site.update(announce=announce, check_files=check_files, verify_files=verify_files, since=since)
|
||||||
self.response(to, "Updated")
|
self.response(to, "Updated")
|
||||||
|
|
||||||
site = self.server.sites.get(address)
|
site = self.server.sites.get(address)
|
||||||
|
@ -1172,6 +1165,32 @@ class UiWebsocket(object):
|
||||||
file_server.portCheck()
|
file_server.portCheck()
|
||||||
self.response(to, file_server.port_opened)
|
self.response(to, file_server.port_opened)
|
||||||
|
|
||||||
|
@flag.admin
|
||||||
|
@flag.no_multiuser
|
||||||
|
def actionServerSetPassiveMode(self, to, passive_mode=False):
|
||||||
|
import main
|
||||||
|
file_server = main.file_server
|
||||||
|
if file_server.isPassiveMode() != passive_mode:
|
||||||
|
file_server.setPassiveMode(passive_mode)
|
||||||
|
if file_server.isPassiveMode():
|
||||||
|
self.cmd("notification", ["info", _["Passive mode enabled"], 5000])
|
||||||
|
else:
|
||||||
|
self.cmd("notification", ["info", _["Passive mode disabled"], 5000])
|
||||||
|
self.server.updateWebsocket()
|
||||||
|
|
||||||
|
@flag.admin
|
||||||
|
@flag.no_multiuser
|
||||||
|
def actionServerSetOfflineMode(self, to, offline_mode=False):
|
||||||
|
import main
|
||||||
|
file_server = main.file_server
|
||||||
|
if file_server.isOfflineMode() != offline_mode:
|
||||||
|
file_server.setOfflineMode(offline_mode)
|
||||||
|
if file_server.isOfflineMode():
|
||||||
|
self.cmd("notification", ["info", _["Offline mode enabled"], 5000])
|
||||||
|
else:
|
||||||
|
self.cmd("notification", ["info", _["Offline mode disabled"], 5000])
|
||||||
|
self.server.updateWebsocket()
|
||||||
|
|
||||||
@flag.admin
|
@flag.admin
|
||||||
@flag.no_multiuser
|
@flag.no_multiuser
|
||||||
def actionServerShutdown(self, to, restart=False):
|
def actionServerShutdown(self, to, restart=False):
|
||||||
|
@ -1182,7 +1201,7 @@ class UiWebsocket(object):
|
||||||
return False
|
return False
|
||||||
if restart:
|
if restart:
|
||||||
main.restart_after_shutdown = True
|
main.restart_after_shutdown = True
|
||||||
main.file_server.stop()
|
main.file_server.stop(ui_websocket=self)
|
||||||
main.ui_server.stop()
|
main.ui_server.stop()
|
||||||
|
|
||||||
if restart:
|
if restart:
|
||||||
|
|
9
src/loglevel_overrides.py
Normal file
9
src/loglevel_overrides.py
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# This file is for adding rules for selectively enabling debug logging
|
||||||
|
# when working on the code.
|
||||||
|
# Add your rules here and skip this file when committing changes.
|
||||||
|
|
||||||
|
#import re
|
||||||
|
#from util import SelectiveLogger
|
||||||
|
#
|
||||||
|
#SelectiveLogger.addLogLevelRaisingRule("ConnServer")
|
||||||
|
#SelectiveLogger.addLogLevelRaisingRule(re.compile(r'^Site:'))
|
|
@ -4,6 +4,7 @@ import sys
|
||||||
import stat
|
import stat
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
import loglevel_overrides
|
||||||
|
|
||||||
startup_errors = []
|
startup_errors = []
|
||||||
def startupError(msg):
|
def startupError(msg):
|
||||||
|
@ -154,7 +155,7 @@ class Actions(object):
|
||||||
|
|
||||||
logging.info("Starting servers....")
|
logging.info("Starting servers....")
|
||||||
gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)])
|
gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)])
|
||||||
logging.info("All server stopped")
|
logging.info("All servers stopped")
|
||||||
|
|
||||||
# Site commands
|
# Site commands
|
||||||
|
|
||||||
|
@ -254,9 +255,8 @@ class Actions(object):
|
||||||
file_correct = site.content_manager.verifyFile(
|
file_correct = site.content_manager.verifyFile(
|
||||||
content_inner_path, site.storage.open(content_inner_path, "rb"), ignore_same=False
|
content_inner_path, site.storage.open(content_inner_path, "rb"), ignore_same=False
|
||||||
)
|
)
|
||||||
except Exception as exp:
|
except Exception as err:
|
||||||
file_correct = False
|
file_correct = False
|
||||||
err = exp
|
|
||||||
|
|
||||||
if file_correct is True:
|
if file_correct is True:
|
||||||
logging.info("[OK] %s (Done in %.3fs)" % (content_inner_path, time.time() - s))
|
logging.info("[OK] %s (Done in %.3fs)" % (content_inner_path, time.time() - s))
|
||||||
|
|
34
src/util/CircularIterator.py
Normal file
34
src/util/CircularIterator.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
import random
|
||||||
|
|
||||||
|
class CircularIterator:
|
||||||
|
def __init__(self):
|
||||||
|
self.successive_count = 0
|
||||||
|
self.last_size = 0
|
||||||
|
self.index = -1
|
||||||
|
|
||||||
|
def next(self, items):
|
||||||
|
self.last_size = len(items)
|
||||||
|
|
||||||
|
if self.last_size == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.index < 0:
|
||||||
|
self.index = random.randint(0, self.last_size)
|
||||||
|
else:
|
||||||
|
self.index += 1
|
||||||
|
|
||||||
|
self.index = self.index % self.last_size
|
||||||
|
|
||||||
|
self.successive_count += 1
|
||||||
|
|
||||||
|
return items[self.index]
|
||||||
|
|
||||||
|
def resetSuccessiveCount(self):
|
||||||
|
self.successive_count = 0
|
||||||
|
|
||||||
|
def getSuccessiveCount(self):
|
||||||
|
return self.successive_count
|
||||||
|
|
||||||
|
def isWrapped(self):
|
||||||
|
return self.successive_count >= self.last_size
|
||||||
|
|
|
@ -42,8 +42,6 @@ def patch(old_f, actions):
|
||||||
continue
|
continue
|
||||||
elif action == "+": # Add lines
|
elif action == "+": # Add lines
|
||||||
for add_line in param:
|
for add_line in param:
|
||||||
if type(add_line) is str:
|
|
||||||
add_line = add_line.encode()
|
|
||||||
new_f.write(add_line)
|
new_f.write(add_line)
|
||||||
else:
|
else:
|
||||||
raise "Unknown action: %s" % action
|
raise "Unknown action: %s" % action
|
||||||
|
|
|
@ -3,17 +3,37 @@ from Debug import Debug
|
||||||
|
|
||||||
|
|
||||||
class GreenletManager:
|
class GreenletManager:
|
||||||
def __init__(self):
|
# pool is either gevent.pool.Pool or GreenletManager.
|
||||||
|
# if pool is None, new gevent.pool.Pool() is created.
|
||||||
|
def __init__(self, pool=None):
|
||||||
self.greenlets = set()
|
self.greenlets = set()
|
||||||
|
if not pool:
|
||||||
|
pool = gevent.pool.Pool(None)
|
||||||
|
self.pool = pool
|
||||||
|
|
||||||
|
def _spawn_later(self, seconds, *args, **kwargs):
|
||||||
|
# If pool is another GreenletManager, delegate to it.
|
||||||
|
if hasattr(self.pool, 'spawnLater'):
|
||||||
|
return self.pool.spawnLater(seconds, *args, **kwargs)
|
||||||
|
|
||||||
|
# There's gevent.spawn_later(), but there isn't gevent.pool.Pool.spawn_later().
|
||||||
|
# Doing manually.
|
||||||
|
greenlet = self.pool.greenlet_class(*args, **kwargs)
|
||||||
|
self.pool.add(greenlet)
|
||||||
|
greenlet.start_later(seconds)
|
||||||
|
return greenlet
|
||||||
|
|
||||||
|
def _spawn(self, *args, **kwargs):
|
||||||
|
return self.pool.spawn(*args, **kwargs)
|
||||||
|
|
||||||
def spawnLater(self, *args, **kwargs):
|
def spawnLater(self, *args, **kwargs):
|
||||||
greenlet = gevent.spawn_later(*args, **kwargs)
|
greenlet = self._spawn_later(*args, **kwargs)
|
||||||
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
|
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
|
||||||
self.greenlets.add(greenlet)
|
self.greenlets.add(greenlet)
|
||||||
return greenlet
|
return greenlet
|
||||||
|
|
||||||
def spawn(self, *args, **kwargs):
|
def spawn(self, *args, **kwargs):
|
||||||
greenlet = gevent.spawn(*args, **kwargs)
|
greenlet = self._spawn(*args, **kwargs)
|
||||||
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
|
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
|
||||||
self.greenlets.add(greenlet)
|
self.greenlets.add(greenlet)
|
||||||
return greenlet
|
return greenlet
|
||||||
|
|
|
@ -1,10 +1,16 @@
|
||||||
import re
|
import re
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger("SafeRe")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class UnsafePatternError(Exception):
|
class UnsafePatternError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
max_cache_size = 1000
|
||||||
cached_patterns = {}
|
cached_patterns = {}
|
||||||
|
old_cached_patterns = {}
|
||||||
|
|
||||||
|
|
||||||
def isSafePattern(pattern):
|
def isSafePattern(pattern):
|
||||||
|
@ -15,18 +21,78 @@ def isSafePattern(pattern):
|
||||||
if unsafe_pattern_match:
|
if unsafe_pattern_match:
|
||||||
raise UnsafePatternError("Potentially unsafe part of the pattern: %s in %s" % (unsafe_pattern_match.group(0), pattern))
|
raise UnsafePatternError("Potentially unsafe part of the pattern: %s in %s" % (unsafe_pattern_match.group(0), pattern))
|
||||||
|
|
||||||
repetitions = re.findall(r"\.[\*\{\+]", pattern)
|
repetitions1 = re.findall(r"\.[\*\{\+]", pattern)
|
||||||
if len(repetitions) >= 10:
|
repetitions2 = re.findall(r"[^(][?]", pattern)
|
||||||
raise UnsafePatternError("More than 10 repetitions of %s in %s" % (repetitions[0], pattern))
|
if len(repetitions1) + len(repetitions2) >= 10:
|
||||||
|
raise UnsafePatternError("More than 10 repetitions in %s" % pattern)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def match(pattern, *args, **kwargs):
|
def compilePattern(pattern):
|
||||||
|
global cached_patterns
|
||||||
|
global old_cached_patterns
|
||||||
|
|
||||||
cached_pattern = cached_patterns.get(pattern)
|
cached_pattern = cached_patterns.get(pattern)
|
||||||
if cached_pattern:
|
if cached_pattern:
|
||||||
return cached_pattern.match(*args, **kwargs)
|
return cached_pattern
|
||||||
else:
|
|
||||||
if isSafePattern(pattern):
|
cached_pattern = old_cached_patterns.get(pattern)
|
||||||
cached_patterns[pattern] = re.compile(pattern)
|
if cached_pattern:
|
||||||
return cached_patterns[pattern].match(*args, **kwargs)
|
del old_cached_patterns[pattern]
|
||||||
|
cached_patterns[pattern] = cached_pattern
|
||||||
|
return cached_pattern
|
||||||
|
|
||||||
|
if isSafePattern(pattern):
|
||||||
|
cached_pattern = re.compile(pattern)
|
||||||
|
cached_patterns[pattern] = cached_pattern
|
||||||
|
log.debug("Compiled new pattern: %s" % pattern)
|
||||||
|
log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
|
||||||
|
|
||||||
|
if len(cached_patterns) > max_cache_size:
|
||||||
|
old_cached_patterns = cached_patterns
|
||||||
|
cached_patterns = {}
|
||||||
|
log.debug("Size limit reached. Rotating cache.")
|
||||||
|
log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
|
||||||
|
|
||||||
|
return cached_pattern
|
||||||
|
|
||||||
|
|
||||||
|
def match(pattern, *args, **kwargs):
|
||||||
|
cached_pattern = compilePattern(pattern)
|
||||||
|
return cached_pattern.match(*args, **kwargs)
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# TESTS
|
||||||
|
|
||||||
|
def testSafePattern(pattern):
|
||||||
|
try:
|
||||||
|
return isSafePattern(pattern)
|
||||||
|
except UnsafePatternError as err:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Some real examples to make sure it works as expected
|
||||||
|
assert testSafePattern('(data/mp4/.*|updater/.*)')
|
||||||
|
assert testSafePattern('((js|css)/(?!all.(js|css)))|.git')
|
||||||
|
|
||||||
|
|
||||||
|
# Unsafe cases:
|
||||||
|
|
||||||
|
# ((?!json).)*$ not allowed, because of ) before the * character. Possible fix: .*(?!json)$
|
||||||
|
assert not testSafePattern('((?!json).)*$')
|
||||||
|
assert testSafePattern('.*(?!json)$')
|
||||||
|
|
||||||
|
# (.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot) not allowed, because it has 12 .* repetition patterns. Possible fix: .*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)
|
||||||
|
assert not testSafePattern('(.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot)')
|
||||||
|
assert testSafePattern('.*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)')
|
||||||
|
|
||||||
|
# https://github.com/HelloZeroNet/ZeroNet/issues/2757
|
||||||
|
assert not testSafePattern('a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
|
||||||
|
assert not testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}x.{0,1}')
|
||||||
|
assert testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}')
|
||||||
|
assert not testSafePattern('a?a?a?a?a?a?a?x.*x.*x.*')
|
||||||
|
assert testSafePattern('a?a?a?a?a?a?a?x.*x.*')
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
43
src/util/SelectiveLogger.py
Normal file
43
src/util/SelectiveLogger.py
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
log_level_raising_rules = []
|
||||||
|
|
||||||
|
def addLogLevelRaisingRule(rule, level=None):
|
||||||
|
if level is None:
|
||||||
|
level = logging.INFO
|
||||||
|
log_level_raising_rules.append({
|
||||||
|
"rule": rule,
|
||||||
|
"level": level
|
||||||
|
})
|
||||||
|
|
||||||
|
def matchLogLevelRaisingRule(name):
|
||||||
|
for rule in log_level_raising_rules:
|
||||||
|
if isinstance(rule["rule"], re.Pattern):
|
||||||
|
if rule["rule"].search(name):
|
||||||
|
return rule["level"]
|
||||||
|
else:
|
||||||
|
if rule["rule"] == name:
|
||||||
|
return rule["level"]
|
||||||
|
return None
|
||||||
|
|
||||||
|
class SelectiveLogger(logging.getLoggerClass()):
|
||||||
|
def __init__(self, name, level=logging.NOTSET):
|
||||||
|
return super().__init__(name, level)
|
||||||
|
|
||||||
|
def raiseLevel(self, level):
|
||||||
|
raised_level = matchLogLevelRaisingRule(self.name)
|
||||||
|
if raised_level is not None:
|
||||||
|
if level < raised_level:
|
||||||
|
level = raised_level
|
||||||
|
return level
|
||||||
|
|
||||||
|
def isEnabledFor(self, level):
|
||||||
|
level = self.raiseLevel(level)
|
||||||
|
return super().isEnabledFor(level)
|
||||||
|
|
||||||
|
def _log(self, level, msg, args, **kwargs):
|
||||||
|
level = self.raiseLevel(level)
|
||||||
|
return super()._log(level, msg, args, **kwargs)
|
||||||
|
|
||||||
|
logging.setLoggerClass(SelectiveLogger)
|
|
@ -1,4 +1,5 @@
|
||||||
from .Cached import Cached
|
from .Cached import Cached
|
||||||
|
from .CircularIterator import CircularIterator
|
||||||
from .Event import Event
|
from .Event import Event
|
||||||
from .Noparallel import Noparallel
|
from .Noparallel import Noparallel
|
||||||
from .Pooled import Pooled
|
from .Pooled import Pooled
|
||||||
|
|
|
@ -290,7 +290,8 @@ local_ip_pattern = re.compile(r"^127\.|192\.168\.|10\.|172\.1[6-9]\.|172\.2[0-9]
|
||||||
def isPrivateIp(ip):
|
def isPrivateIp(ip):
|
||||||
return local_ip_pattern.match(ip)
|
return local_ip_pattern.match(ip)
|
||||||
|
|
||||||
|
# XXX: Deprecated. Use ConnectionServer.getIpType() instead.
|
||||||
|
# To be removed in 0.9.0
|
||||||
def getIpType(ip):
|
def getIpType(ip):
|
||||||
if ip.endswith(".onion"):
|
if ip.endswith(".onion"):
|
||||||
return "onion"
|
return "onion"
|
||||||
|
|
142
trackers.txt
142
trackers.txt
|
@ -1,142 +0,0 @@
|
||||||
udp://tracker.opentrackr.org:1337/announce
|
|
||||||
udp://explodie.org:6969/announce
|
|
||||||
udp://open.stealth.si:80/announce
|
|
||||||
http://tracker.ipv6tracker.ru:80/announce
|
|
||||||
udp://tracker.birkenwald.de:6969/announce
|
|
||||||
udp://tracker.moeking.me:6969/announce
|
|
||||||
http://tracker.bt4g.com:2095/announce
|
|
||||||
https://tracker.nanoha.org:443/announce
|
|
||||||
http://tracker.files.fm:6969/announce
|
|
||||||
http://open.acgnxtracker.com:80/announce
|
|
||||||
udp://tracker.army:6969/announce
|
|
||||||
udp://fe.dealclub.de:6969/announce
|
|
||||||
udp://tracker.leech.ie:1337/announce
|
|
||||||
udp://tracker.altrosky.nl:6969/announce
|
|
||||||
https://tracker.cyber-hub.net:443/announce
|
|
||||||
https://tracker.lilithraws.cf:443/announce
|
|
||||||
http://bt.okmp3.ru:2710/announce
|
|
||||||
udp://vibe.sleepyinternetfun.xyz:1738/announce
|
|
||||||
udp://open.publictracker.xyz:6969/announce
|
|
||||||
udp://tracker.bitsearch.to:1337/announce
|
|
||||||
udp://tracker.pomf.se:80/announce
|
|
||||||
https://tr.burnabyhighstar.com:443/announce
|
|
||||||
https://tr.abiir.top:443/announce
|
|
||||||
udp://open.free-tracker.ga:6969/announce
|
|
||||||
http://i-p-v-6.tk:6969/announce
|
|
||||||
http://open-v6.demonoid.ch:6969/announce
|
|
||||||
udp://aarsen.me:6969/announce
|
|
||||||
udp://htz3.noho.st:6969/announce
|
|
||||||
udp://uploads.gamecoast.net:6969/announce
|
|
||||||
udp://mail.zasaonsk.ga:6969/announce
|
|
||||||
udp://tracker.joybomb.tw:6969/announce
|
|
||||||
udp://tracker.jonaslsa.com:6969/announce
|
|
||||||
udp://leefafa.tk:6969/announce
|
|
||||||
udp://carr.codes:6969/announce
|
|
||||||
https://tr.fuckbitcoin.xyz:443/announce
|
|
||||||
udp://tracker.cubonegro.xyz:6969/announce
|
|
||||||
udp://tracker.skynetcloud.site:6969/announce
|
|
||||||
http://tracker4.itzmx.com:2710/announce
|
|
||||||
https://tracker.lilithraws.org:443/announce
|
|
||||||
udp://tracker.novaopcj.eu.org:6969/announce
|
|
||||||
udp://exodus.desync.com:6969/announce
|
|
||||||
http://t.acg.rip:6699/announce
|
|
||||||
udp://tracker2.dler.com:80/announce
|
|
||||||
udp://6ahddutb1ucc3cp.ru:6969/announce
|
|
||||||
udp://tracker.blacksparrowmedia.net:6969/announce
|
|
||||||
http://fxtt.ru:80/announce
|
|
||||||
udp://tracker.auctor.tv:6969/announce
|
|
||||||
udp://torrentclub.space:6969/announce
|
|
||||||
udp://zecircle.xyz:6969/announce
|
|
||||||
udp://psyco.fr:6969/announce
|
|
||||||
udp://fh2.cmp-gaming.com:6969/announce
|
|
||||||
udp://new-line.net:6969/announce
|
|
||||||
udp://torrents.artixlinux.org:6969/announce
|
|
||||||
udp://bt.ktrackers.com:6666/announce
|
|
||||||
udp://static.54.161.216.95.clients.your-server.de:6969/announce
|
|
||||||
udp://cpe-104-34-3-152.socal.res.rr.com:6969/announce
|
|
||||||
http://t.overflow.biz:6969/announce
|
|
||||||
udp://tracker1.myporn.club:9337/announce
|
|
||||||
udp://moonburrow.club:6969/announce
|
|
||||||
udp://tracker.artixlinux.org:6969/announce
|
|
||||||
https://t1.hloli.org:443/announce
|
|
||||||
udp://bt1.archive.org:6969/announce
|
|
||||||
udp://tracker.theoks.net:6969/announce
|
|
||||||
udp://tracker.4.babico.name.tr:3131/announce
|
|
||||||
udp://buddyfly.top:6969/announce
|
|
||||||
udp://ipv6.tracker.harry.lu:80/announce
|
|
||||||
udp://public.publictracker.xyz:6969/announce
|
|
||||||
udp://mail.artixlinux.org:6969/announce
|
|
||||||
udp://v1046920.hosted-by-vdsina.ru:6969/announce
|
|
||||||
udp://tracker.cyberia.is:6969/announce
|
|
||||||
udp://tracker.beeimg.com:6969/announce
|
|
||||||
udp://creative.7o7.cx:6969/announce
|
|
||||||
udp://open.dstud.io:6969/announce
|
|
||||||
udp://laze.cc:6969/announce
|
|
||||||
udp://download.nerocloud.me:6969/announce
|
|
||||||
udp://cutscloud.duckdns.org:6969/announce
|
|
||||||
https://tracker.jiesen.life:8443/announce
|
|
||||||
udp://jutone.com:6969/announce
|
|
||||||
udp://wepzone.net:6969/announce
|
|
||||||
udp://ipv4.tracker.harry.lu:80/announce
|
|
||||||
udp://tracker.tcp.exchange:6969/announce
|
|
||||||
udp://f1sh.de:6969/announce
|
|
||||||
udp://movies.zsw.ca:6969/announce
|
|
||||||
https://tracker1.ctix.cn:443/announce
|
|
||||||
udp://sanincode.com:6969/announce
|
|
||||||
udp://www.torrent.eu.org:451/announce
|
|
||||||
udp://open.4ever.tk:6969/announce
|
|
||||||
https://tracker2.ctix.cn:443/announce
|
|
||||||
udp://bt2.archive.org:6969/announce
|
|
||||||
http://t.nyaatracker.com:80/announce
|
|
||||||
udp://yahor.ftp.sh:6969/announce
|
|
||||||
udp://tracker.openbtba.com:6969/announce
|
|
||||||
udp://tracker.dler.com:6969/announce
|
|
||||||
udp://tracker-udp.gbitt.info:80/announce
|
|
||||||
udp://tracker.srv00.com:6969/announce
|
|
||||||
udp://tracker.pimpmyworld.to:6969/announce
|
|
||||||
http://tracker.gbitt.info:80/announce
|
|
||||||
udp://tracker6.lelux.fi:6969/announce
|
|
||||||
http://tracker.vrpnet.org:6969/announce
|
|
||||||
http://00.xxtor.com:443/announce
|
|
||||||
http://vps02.net.orel.ru:80/announce
|
|
||||||
udp://tracker.yangxiaoguozi.cn:6969/announce
|
|
||||||
udp://rep-art.ynh.fr:6969/announce
|
|
||||||
https://tracker.imgoingto.icu:443/announce
|
|
||||||
udp://mirror.aptus.co.tz:6969/announce
|
|
||||||
udp://tracker.lelux.fi:6969/announce
|
|
||||||
udp://tracker.torrent.eu.org:451/announce
|
|
||||||
udp://admin.52ywp.com:6969/announce
|
|
||||||
udp://thouvenin.cloud:6969/announce
|
|
||||||
http://vps-dd0a0715.vps.ovh.net:6969/announce
|
|
||||||
udp://bubu.mapfactor.com:6969/announce
|
|
||||||
udp://94-227-232-84.access.telenet.be:6969/announce
|
|
||||||
udp://epider.me:6969/announce
|
|
||||||
udp://camera.lei001.com:6969/announce
|
|
||||||
udp://tamas3.ynh.fr:6969/announce
|
|
||||||
https://tracker.tamersunion.org:443/announce
|
|
||||||
udp://ftp.pet:2710/announce
|
|
||||||
udp://p4p.arenabg.com:1337/announce
|
|
||||||
http://tracker.mywaifu.best:6969/announce
|
|
||||||
udp://tracker.monitorit4.me:6969/announce
|
|
||||||
udp://ipv6.tracker.monitorit4.me:6969/announce
|
|
||||||
zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441
|
|
||||||
zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441
|
|
||||||
zero://5vczpwawviukvd7grfhsfxp7a6huz77hlis4fstjkym5kmf4pu7i7myd.onion:15441
|
|
||||||
zero://pn4q2zzt2pw4nk7yidxvsxmydko7dfibuzxdswi6gu6ninjpofvqs2id.onion:15441
|
|
||||||
zero://6i54dd5th73oelv636ivix6sjnwfgk2qsltnyvswagwphub375t3xcad.onion:15441
|
|
||||||
zero://tl74auz4tyqv4bieeclmyoe4uwtoc2dj7fdqv4nc4gl5j2bwg2r26bqd.onion:15441
|
|
||||||
zero://wlxav3szbrdhest4j7dib2vgbrd7uj7u7rnuzg22cxbih7yxyg2hsmid.onion:15441
|
|
||||||
zero://zy7wttvjtsijt5uwmlar4yguvjc2gppzbdj4v6bujng6xwjmkdg7uvqd.onion:15441
|
|
||||||
zero://rlcjomszyitxpwv7kzopmqgzk3bdpsxeull4c3s6goszkk6h2sotfoad.onion:15441
|
|
||||||
zero://gugt43coc5tkyrhrc3esf6t6aeycvcqzw7qafxrjpqbwt4ssz5czgzyd.onion:15441
|
|
||||||
zero://ow7in4ftwsix5klcbdfqvfqjvimqshbm2o75rhtpdnsderrcbx74wbad.onion:15441
|
|
||||||
zero://57hzgtu62yzxqgbvgxs7g3lfck3za4zrda7qkskar3tlak5recxcebyd.onion:15445
|
|
||||||
zero://hb6ozikfiaafeuqvgseiik4r46szbpjfu66l67wjinnyv6dtopuwhtqd.onion:15445
|
|
||||||
zero://qn65si4gtcwdiliq7vzrwu62qrweoxb6tx2cchwslaervj6szuje66qd.onion:26117
|
|
||||||
zero://s3j2s5pjdfesbsmaqx6alsumaxxdxibmhv4eukmqpv3vqj6f627qx5yd.onion:15441
|
|
||||||
zero://agufghdtniyfwty3wk55drxxwj2zxgzzo7dbrtje73gmvcpxy4ngs4ad.onion:15441
|
|
||||||
zero://kgsvasoakvj4gnjiy7zemu34l3hq46dn5eauqkn76jpowmilci5t2vqd.onion:15445
|
|
||||||
zero://dslesoe72bdfwfu4cfqa2wpd4hr3fhlu4zv6mfsjju5xlpmssouv36qd.onion:15441
|
|
||||||
zero://f2hnjbggc3c2u2apvxdugirnk6bral54ibdoul3hhvu7pd4fso5fq3yd.onion:15441
|
|
||||||
zero://skdeywpgm5xncpxbbr4cuiip6ey4dkambpanog6nruvmef4f3e7o47qd.onion:15441
|
|
||||||
zero://tqmo2nffqo4qc5jgmz3me5eri3zpgf3v2zciufzmhnvznjve5c3argad.onion:15441
|
|
|
@ -66,7 +66,7 @@ def displayErrorMessage(err, error_log_path):
|
||||||
res = ctypes.windll.user32.MessageBoxW(0, err_title, "ZeroNet error", MB_YESNOCANCEL | MB_ICONEXCLAIMATION)
|
res = ctypes.windll.user32.MessageBoxW(0, err_title, "ZeroNet error", MB_YESNOCANCEL | MB_ICONEXCLAIMATION)
|
||||||
if res == ID_YES:
|
if res == ID_YES:
|
||||||
import webbrowser
|
import webbrowser
|
||||||
report_url = "https://github.com/ZeroNetX/ZeroNet/issues/new?assignees=&labels=&template=bug-report.md&title=%s"
|
report_url = "https://github.com/HelloZeroNet/ZeroNet/issues/new?assignees=&labels=&template=bug-report.md&title=%s"
|
||||||
webbrowser.open(report_url % urllib.parse.quote("Unhandled exception: %s" % err_message))
|
webbrowser.open(report_url % urllib.parse.quote("Unhandled exception: %s" % err_message))
|
||||||
if res in [ID_YES, ID_NO]:
|
if res in [ID_YES, ID_NO]:
|
||||||
subprocess.Popen(['notepad.exe', error_log_path])
|
subprocess.Popen(['notepad.exe', error_log_path])
|
||||||
|
|
Loading…
Reference in a new issue