Compare commits

...

No commits in common. "py3-latest" and "py3-patches" have entirely different histories.

261 changed files with 62 additions and 36250 deletions

View file

@ -1,40 +0,0 @@
name: Build Docker Image on Commit
on:
push:
branches:
- main
tags:
- '!' # Exclude tags
jobs:
build-and-publish:
runs-on: docker-builder
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set REPO_VARS
id: repo-url
run: |
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
- name: Login to OCI registry
run: |
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
- name: Build and push Docker images
run: |
# Build Docker image with commit SHA
docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
# Build Docker image with nightly tag
docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
docker push $REPO_HOST/$REPO_PATH:nightly
# Remove local images to save storage
docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
docker rmi $REPO_HOST/$REPO_PATH:nightly

View file

@ -1,37 +0,0 @@
name: Build and Publish Docker Image on Tag
on:
push:
tags:
- '*'
jobs:
build-and-publish:
runs-on: docker-builder
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set REPO_VARS
id: repo-url
run: |
echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
- name: Login to OCI registry
run: |
echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
- name: Build and push Docker image
run: |
TAG=${{ github.ref_name }} # Get the tag name from the context
# Build and push multi-platform Docker images
docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
# Tag and push latest
docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
docker push $REPO_HOST/$REPO_PATH:latest
# Remove the local image to save storage
docker rmi $REPO_HOST/$REPO_PATH:$TAG
docker rmi $REPO_HOST/$REPO_PATH:latest

10
.github/FUNDING.yml vendored
View file

@ -1,10 +0,0 @@
github: canewsin
patreon: # Replace with a single Patreon username e.g., user1
open_collective: # Replace with a single Open Collective username e.g., user1
ko_fi: canewsin
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: canewsin
issuehunt: # Replace with a single IssueHunt username e.g., user1
otechie: # Replace with a single Otechie username e.g., user1
custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']

View file

@ -1,33 +0,0 @@
---
name: Bug report
about: Create a report to help us improve ZeroNet
title: ''
labels: ''
assignees: ''
---
### Step 1: Please describe your environment
* ZeroNet version: _____
* Operating system: _____
* Web browser: _____
* Tor status: not available/always/disabled
* Opened port: yes/no
* Special configuration: ____
### Step 2: Describe the problem:
#### Steps to reproduce:
1. _____
2. _____
3. _____
#### Observed Results:
* What happened? This could be a screenshot, a description, log output (you can send log/debug.log file to hello@zeronet.io if necessary), etc.
#### Expected Results:
* What did you expect to happen?

View file

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for ZeroNet
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View file

@ -1,72 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ py3-latest ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ py3-latest ]
schedule:
- cron: '32 19 * * 2'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -1,51 +0,0 @@
name: tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-20.04
strategy:
max-parallel: 16
matrix:
python-version: ["3.7", "3.8", "3.9"]
steps:
- name: Checkout ZeroNet
uses: actions/checkout@v2
with:
submodules: "true"
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python-version }}
- name: Prepare for installation
run: |
python3 -m pip install setuptools
python3 -m pip install --upgrade pip wheel
python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
- name: Install
run: |
python3 -m pip install --upgrade -r requirements.txt
python3 -m pip list
- name: Prepare for tests
run: |
openssl version -a
echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
- name: Test
run: |
catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/

36
.gitignore vendored
View file

@ -1,36 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# Log files
**/*.log
# Hidden files
.*
!/.forgejo
!/.github
!/.gitignore
!/.travis.yml
!/.gitlab-ci.yml
# Temporary files
*.bak
# Data dir
data/*
*.db
# Virtualenv
env/*
# Tor data
tools/tor/data
# PhantomJS, downloaded manually for unit tests
tools/phantomjs
# ZeroNet config file
zeronet.conf
# ZeroNet log files
log/*

View file

@ -1,48 +0,0 @@
stages:
- test
.test_template: &test_template
stage: test
before_script:
- pip install --upgrade pip wheel
# Selenium and requests can't be installed without a requests hint on Python 3.4
- pip install --upgrade requests>=2.22.0
- pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
- pip install --upgrade -r requirements.txt
script:
- pip list
- openssl version -a
- python -m pytest -x plugins/CryptMessage/Test --color=yes
- python -m pytest -x plugins/Bigfile/Test --color=yes
- python -m pytest -x plugins/AnnounceLocal/Test --color=yes
- python -m pytest -x plugins/OptionalManager/Test --color=yes
- python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini --color=yes
- mv plugins/disabled-Multiuser plugins/Multiuser
- python -m pytest -x plugins/Multiuser/Test --color=yes
- mv plugins/disabled-Bootstrapper plugins/Bootstrapper
- python -m pytest -x plugins/Bootstrapper/Test --color=yes
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
test:py3.4:
image: python:3.4.3
<<: *test_template
test:py3.5:
image: python:3.5.7
<<: *test_template
test:py3.6:
image: python:3.6.9
<<: *test_template
test:py3.7-openssl1.1.0:
image: python:3.7.0b5
<<: *test_template
test:py3.7-openssl1.1.1:
image: python:3.7.4
<<: *test_template
test:py3.8:
image: python:3.8.0b3
<<: *test_template

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "plugins"]
path = plugins
url = https://github.com/ZeroNetX/ZeroNet-Plugins.git

View file

@ -1,47 +0,0 @@
language: python
python:
- 3.4
- 3.5
- 3.6
- 3.7
- 3.8
services:
- docker
cache: pip
before_install:
- pip install --upgrade pip wheel
- pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
# - docker build -t zeronet .
# - docker run -d -v $PWD:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 zeronet
install:
- pip install --upgrade -r requirements.txt
- pip list
before_script:
- openssl version -a
# Add an IPv6 config - see the corresponding Travis issue
# https://github.com/travis-ci/travis-ci/issues/8361
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6';
fi
script:
- catchsegv python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
- export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python -m pytest -x plugins/CryptMessage/Test
- export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python -m pytest -x plugins/Bigfile/Test
- export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python -m pytest -x plugins/AnnounceLocal/Test
- export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python -m pytest -x plugins/OptionalManager/Test
- export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
- export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
- find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
after_failure:
- zip -r log.zip log/
- curl --upload-file ./log.zip https://transfer.sh/log.zip
after_success:
- codecov
- coveralls --rcfile=src/Test/coverage.ini
notifications:
email:
recipients:
hello@zeronet.io
on_success: change

View file

@ -1,649 +0,0 @@
### ZeroNet 0.9.0 (2023-07-12) Rev4630
- Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
- Add trackers to Config.py for failsafety incase missing trackers.txt
- Added Proxy links
- Fix pysha3 dep installation issue
- FileRequest -> Remove Unnecessary check, Fix error wording
- Fix Response when site is missing for `actionAs`
### ZeroNet 0.8.5 (2023-02-12) Rev4625
- Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
- default theme-class for missing value in `users.json`.
- Fetch Stats Plugin changes.
### ZeroNet 0.8.4 (2022-12-12) Rev4620
- Increase Minimum Site size to 25MB.
### ZeroNet 0.8.3 (2022-12-11) Rev4611
- main.py -> Fix accessing unassigned varible
- ContentManager -> Support for multiSig
- SiteStrorage.py -> Fix accessing unassigned varible
- ContentManager.py Improve Logging of Valid Signers
### ZeroNet 0.8.2 (2022-11-01) Rev4610
- Fix Startup Error when plugins dir missing
- Move trackers to seperate file & Add more trackers
- Config:: Skip loading missing tracker files
- Added documentation for getRandomPort fn
### ZeroNet 0.8.1 (2022-10-01) Rev4600
- fix readdress loop (cherry-pick previously added commit from conservancy)
- Remove Patreon badge
- Update README-ru.md (#177)
- Include inner_path of failed request for signing in error msg and response
- Don't Fail Silently When Cert is Not Selected
- Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
- Update FUNDING.yml
### ZeroNet 0.8.0 (2022-05-27) Rev4591
- Revert File Open to catch File Access Errors.
### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
- Use xescape(s) from zeronet-conservancy
- actionUpdate response Optimisation
- Fetch Plugins Repo Updates
- Fix Unhandled File Access Errors
- Create codeql-analysis.yml
### ZeroNet 0.7.9 (2022-05-26) Rev4585
- Rust Version Compatibility for update Protocol msg
- Removed Non Working Trakers.
- Dynamically Load Trackers from Dashboard Site.
- Tracker Supply Improvements.
- Fix Repo Url for Bug Report
- First Party Tracker Update Service using Dashboard Site.
- remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
### ZeroNet 0.7.8 (2022-03-02) Rev4580
- Update Plugins with some bug fixes and Improvements
### ZeroNet 0.7.6 (2022-01-12) Rev4565
- Sync Plugin Updates
- Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
- Add More Default Plugins to Repo
- Doubled Site Publish Limits
- Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
- UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
- Moved Plugins to Seperate Repo
- Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
- Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
- Added current version: connections to /Stats page. see the previous point.
### ZeroNet 0.7.5 (2021-11-28) Rev4560
- Add more default trackers
- Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
- Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
### ZeroNet 0.7.3 (2021-11-28) Rev4555
- Fix xrange is undefined error
- Fix Incorrect viewport on mobile while loading
- Tor-V3 Patch by anonymoose
### ZeroNet 0.7.1 (2019-07-01) Rev4206
### Added
- Built-in logging console in the web UI to see what's happening in the background. (pull down top-right 0 button to see it)
- Display database rebuild errors [Thanks to Lola]
- New plugin system that allows to install and manage builtin/third party extensions to the ZeroNet client using the web interface.
- Support multiple trackers_file
- Add OpenSSL 1.1 support to CryptMessage plugin based on Bitmessage modifications [Thanks to radfish]
- Display visual error message on startup errors
- Fix max opened files changing on Windows platform
- Display TLS1.3 compatibility on /Stats page
- Add fake SNI and ALPN to peer connections to make it more like standard https connections
- Hide and ignore tracker_proxy setting in Tor: Always mode as it's going to use Tor anyway.
- Deny websocket connections from unknown origins
- Restrict open_browser values to avoid RCE on sandbox escape
- Offer access web interface by IP address in case of unknown host
- Link to site's sidebar with "#ZeroNet:OpenSidebar" hash
### Changed
- Allow .. in file names [Thanks to imachug]
- Change unstable trackers
- More clean errors on sites.json/users.json load error
- Various tweaks for tracker rating on unstable connections
- Use OpenSSL 1.1 dlls from default Python Windows distribution if possible
- Re-factor domain resolving for easier domain plugins
- Disable UDP connections if --proxy is used
- New, decorator-based Websocket API permission system to avoid future typo mistakes
### Fixed
- Fix parsing config lines that have no value
- Fix start.py [Thanks to imachug]
- Allow multiple values of the same key in the config file [Thanks ssdifnskdjfnsdjk for reporting]
- Fix parsing config file lines that has % in the value [Thanks slrslr for reporting]
- Fix bootstrapper plugin hash reloads [Thanks geekless for reporting]
- Fix CryptMessage plugin OpenSSL dll loading on Windows (ZeroMail errors) [Thanks cxgreat2014 for reporting]
- Fix startup error when using OpenSSL 1.1 [Thanks to imachug]
- Fix a bug that did not loaded merged site data for 5 sec after the merged site got added
- Fix typo that allowed to add new plugins in public proxy mode. [Thanks styromaniac for reporting]
- Fix loading non-big files with "|all" postfix [Thanks to krzotr]
- Fix OpenSSL cert generation error crash by change Windows console encoding to utf8
#### Wrapper html injection vulnerability [Reported by ivanq]
In ZeroNet before rev4188 the wrapper template variables was rendered incorrectly.
Result: The opened site was able to gain WebSocket connection with unrestricted ADMIN/NOSANDBOX access, change configuration values and possible RCE on client's machine.
Fix: Fixed the template rendering code, disallowed WebSocket connections from unknown locations, restricted open_browser configuration values to avoid possible RCE in case of sandbox escape.
Note: The fix is also back ported to ZeroNet Py 2.x version (Rev3870)
### ZeroNet 0.7.0 (2019-06-12) Rev4106 (First release targeting Python 3.4+)
### Added
- 5-10x faster signature verification by using libsecp256k1 (Thanks to ZeroMux)
- Generated SSL certificate randomization to avoid protocol filters (Thanks to ValdikSS)
- Offline mode
- P2P source code update using ZeroNet protocol
- ecdsaSign/Verify commands to CryptMessage plugin (Thanks to imachug)
- Efficient file rename: change file names instead of re-downloading the file.
- Make redirect optional on site cloning (Thanks to Lola)
- EccPrivToPub / EccPubToPriv functions (Thanks to imachug)
- Detect and change dark/light theme based on OS setting (Thanks to filips123)
### Changed
- Re-factored code to Python3 runtime (compatible with Python 3.4-3.8)
- More safe database sync mode
- Removed bundled third-party libraries where it's possible
- Use lang=en instead of lang={lang} in urls to avoid url encode problems
- Remove environment details from error page
- Don't push content.json updates larger than 10kb to significantly reduce bw usage for site with many files
### Fixed
- Fix sending files with \0 characters
- Security fix: Escape error detail to avoid XSS (reported by krzotr)
- Fix signature verification using libsecp256k1 for compressed addresses (mostly certificates generated in the browser)
- Fix newsfeed if you have more than 1000 followed topic/post on one site.
- Fix site download as zip file
- Fix displaying sites with utf8 title
- Error message if dbRebuild fails (Thanks to Lola)
- Fix browser reopen if executing start.py again. (Thanks to imachug)
### ZeroNet 0.6.5 (2019-02-16) Rev3851 (Last release targeting Python 2.7.x)
### Added
- IPv6 support in peer exchange, bigfiles, optional file finding, tracker sharing, socket listening and connecting (based on tangdou1 modifications)
- New tracker database format with IPv6 support
- Display notification if there is an unpublished modification for your site
- Listen and shut down normally for SIGTERM (Thanks to blurHY)
- Support tilde `~` in filenames (by d14na)
- Support map for Namecoin subdomain names (Thanks to lola)
- Add log level to config page
- Support `{data}` for data dir variable in trackers_file value
- Quick check content.db on startup and rebuild if necessary
- Don't show meek proxy option if the tor client does not supports it
### Changed
- Refactored port open checking with IPv6 support
- Consider non-local IPs as external even is the open port check fails (for CJDNS and Yggdrasil support)
- Add IPv6 tracker and change unstable tracker
- Don't correct sent local time with the calculated time correction
- Disable CSP for Edge
- Only support CREATE commands in dbschema indexes node and SELECT from storage.query
### Fixed
- Check the length of master seed when executing cryptGetPrivatekey CLI command
- Only reload source code on file modification / creation
- Detection and issue warning for latest no-script plugin
- Fix atomic write of a non-existent file
- Fix sql queries with lots of variables and sites with lots of content.json
- Fix multi-line parsing of zeronet.conf
- Fix site deletion from users.json
- Fix site cloning before site downloaded (Reported by unsystemizer)
- Fix queryJson for non-list nodes (Reported by MingchenZhang)
## ZeroNet 0.6.4 (2018-10-20) Rev3660
### Added
- New plugin: UiConfig. A web interface that allows changing ZeroNet settings.
- New plugin: AnnounceShare. Share trackers between users, automatically announce client's ip as tracker if Bootstrapper plugin is enabled.
- Global tracker stats on ZeroHello: Include statistics from all served sites instead of displaying request statistics only for one site.
- Support custom proxy for trackers. (Configurable with /Config)
- Adding peers to sites manually using zeronet_peers get parameter
- Copy site address with peers link on the sidebar.
- Zip file listing and streaming support for Bigfiles.
- Tracker statistics on /Stats page
- Peer reputation save/restore to speed up sync time after startup.
- Full support fileGet, fileList, dirList calls on tar.gz/zip files.
- Archived_before support to user content rules to allow deletion of all user files before the specified date
- Show and manage "Connecting" sites on ZeroHello
- Add theme support to ZeroNet sites
- Dark theme for ZeroHello, ZeroBlog, ZeroTalk
### Changed
- Dynamic big file allocation: More efficient storage usage by don't pre-allocate the whole file at the beginning, but expand the size as the content downloads.
- Reduce the request frequency to unreliable trackers.
- Only allow 5 concurrent checkSites to run in parallel to reduce load under Tor/slow connection.
- Stop site downloading if it reached 95% of site limit to avoid download loop for sites out of limit
- The pinned optional files won't be removed from download queue after 30 retries and won't be deleted even if the site owner removes it.
- Don't remove incomplete (downloading) sites on startup
- Remove --pin_bigfile argument as big files are automatically excluded from optional files limit.
### Fixed
- Trayicon compatibility with latest gevent
- Request number counting for zero:// trackers
- Peer reputation boost for zero:// trackers.
- Blocklist of peers loaded from peerdb (Thanks tangdou1 for report)
- Sidebar map loading on foreign languages (Thx tangdou1 for report)
- FileGet on non-existent files (Thanks mcdev for reporting)
- Peer connecting bug for sites with low amount of peers
#### "The Vacation" Sandbox escape bug [Reported by GitCenter / Krixano / ZeroLSTN]
In ZeroNet 0.6.3 Rev3615 and earlier as a result of invalid file type detection, a malicious site could escape the iframe sandbox.
Result: Browser iframe sandbox escape
Applied fix: Replaced the previous, file extension based file type identification with a proper one.
Affected versions: All versions before ZeroNet Rev3616
## ZeroNet 0.6.3 (2018-06-26)
### Added
- New plugin: ContentFilter that allows to have shared site and user block list.
- Support Tor meek proxies to avoid tracker blocking of GFW
- Detect network level tracker blocking and easy setting meek proxy for tracker connections.
- Support downloading 2GB+ sites as .zip (Thx to Radtoo)
- Support ZeroNet as a transparent proxy (Thx to JeremyRand)
- Allow fileQuery as CORS command (Thx to imachug)
- Windows distribution includes Tor and meek client by default
- Download sites as zip link to sidebar
- File server port randomization
- Implicit SSL for all connection
- fileList API command for zip files
- Auto download bigfiles size limit on sidebar
- Local peer number to the sidebar
- Open site directory button in sidebar
### Changed
- Switched to Azure Tor meek proxy as Amazon one became unavailable
- Refactored/rewritten tracker connection manager
- Improved peer discovery for optional files without opened port
- Also delete Bigfile's piecemap on deletion
### Fixed
- Important security issue: Iframe sandbox escape [Reported by Ivanq / gitcenter]
- Local peer discovery when running multiple clients on the same machine
- Uploading small files with Bigfile plugin
- Ctrl-c shutdown when running CLI commands
- High CPU/IO usage when Multiuser plugin enabled
- Firefox back button
- Peer discovery on older Linux kernels
- Optional file handling when multiple files have the same hash_id (first 4 chars of the hash)
- Msgpack 0.5.5 and 0.5.6 compatibility
## ZeroNet 0.6.2 (2018-02-18)
### Added
- New plugin: AnnounceLocal to make ZeroNet work without an internet connection on the local network.
- Allow dbQuey and userGetSettings using the `as` API command on different sites with Cors permission
- New config option: `--log_level` to reduce log verbosity and IO load
- Prefer to connect to recent peers from trackers first
- Mark peers with port 1 is also unconnectable for future fix for trackers that do not support port 0 announce
### Changed
- Don't keep connection for sites that have not been modified in the last week
- Change unreliable trackers to new ones
- Send maximum 10 findhash request in one find optional files round (15sec)
- Change "Unique to site" to "No certificate" for default option in cert selection dialog.
- Dont print warnings if not in debug mode
- Generalized tracker logging format
- Only recover sites from sites.json if they had peers
- Message from local peers does not means internet connection
- Removed `--debug_gevent` and turned on Gevent block logging by default
### Fixed
- Limit connections to 512 to avoid reaching 1024 limit on windows
- Exception when logging foreign operating system socket errors
- Don't send private (local) IPs on pex
- Don't connect to private IPs in tor always mode
- Properly recover data from msgpack unpacker on file stream start
- Symlinked data directory deletion when deleting site using Windows
- De-duplicate peers before publishing
- Bigfile info for non-existing files
## ZeroNet 0.6.1 (2018-01-25)
### Added
- New plugin: Chart
- Collect and display charts about your contribution to ZeroNet network
- Allow list as argument replacement in sql queries. (Thanks to imachug)
- Newsfeed query time statistics (Click on "From XX sites in X.Xs on ZeroHello)
- New UiWebsocket API command: As to run commands as other site
- Ranged ajax queries for big files
- Filter feed by type and site address
- FileNeed, Bigfile upload command compatibility with merger sites
- Send event on port open / tor status change
- More description on permission request
### Changed
- Reduce memory usage of sidebar geoip database cache
- Change unreliable tracker to new one
- Don't display Cors permission ask if it already granted
- Avoid UI blocking when rebuilding a merger site
- Skip listing ignored directories on signing
- In Multiuser mode show the seed welcome message when adding new certificate instead of first visit
- Faster async port opening on multiple network interfaces
- Allow javascript modals
- Only zoom sidebar globe if mouse button is pressed down
### Fixed
- Open port checking error reporting (Thanks to imachug)
- Out-of-range big file requests
- Don't output errors happened on gevent greenlets twice
- Newsfeed skip sites with no database
- Newsfeed queries with multiple params
- Newsfeed queries with UNION and UNION ALL
- Fix site clone with sites larger that 10MB
- Unreliable Websocket connection when requesting files from different sites at the same time
## ZeroNet 0.6.0 (2017-10-17)
### Added
- New plugin: Big file support
- Automatic pinning on Big file download
- Enable TCP_NODELAY for supporting sockets
- actionOptionalFileList API command arguments to list non-downloaded files or only big files
- serverShowdirectory API command arguments to allow to display site's directory in OS file browser
- fileNeed API command to initialize optional file downloading
- wrapperGetAjaxKey API command to request nonce for AJAX request
- Json.gz support for database files
- P2P port checking (Thanks for grez911)
- `--download_optional auto` argument to enable automatic optional file downloading for newly added site
- Statistics for big files and protocol command requests on /Stats
- Allow to set user limitation based on auth_address
### Changed
- More aggressive and frequent connection timeout checking
- Use out of msgpack context file streaming for files larger than 512KB
- Allow optional files workers over the worker limit
- Automatic redirection to wrapper on nonce_error
- Send websocket event on optional file deletion
- Optimize sites.json saving
- Enable faster C-based msgpack packer by default
- Major optimization on Bootstrapper plugin SQL queries
- Don't reset bad file counter on restart, to allow easier give up on unreachable files
- Incoming connection limit changed from 1000 to 500 to avoid reaching socket limit on Windows
- Changed tracker boot.zeronet.io domain, because zeronet.io got banned in some countries
#### Fixed
- Sub-directories in user directories
## ZeroNet 0.5.7 (2017-07-19)
### Added
- New plugin: CORS to request read permission to other site's content
- New API command: userSetSettings/userGetSettings to store site's settings in users.json
- Avoid file download if the file size does not match with the requested one
- JavaScript and wrapper less file access using /raw/ prefix ([Example](http://127.0.0.1:43110/raw/1AsRLpuRxr3pb9p3TKoMXPSWHzh6i7fMGi/en.tar.gz/index.html))
- --silent command line option to disable logging to stdout
### Changed
- Better error reporting on sign/verification errors
- More test for sign and verification process
- Update to OpenSSL v1.0.2l
- Limit compressed files to 6MB to avoid zip/tar.gz bomb
- Allow space, [], () characters in filenames
- Disable cross-site resource loading to improve privacy. [Reported by Beardog108]
- Download directly accessed Pdf/Svg/Swf files instead of displaying them to avoid wrapper escape using in JS in SVG file. [Reported by Beardog108]
- Disallow potentially unsafe regular expressions to avoid ReDoS [Reported by MuxZeroNet]
### Fixed
- Detecting data directory when running Windows distribution exe [Reported by Plasmmer]
- OpenSSL loading under Android 6+
- Error on exiting when no connection server started
## ZeroNet 0.5.6 (2017-06-15)
### Added
- Callback for certSelect API command
- More compact list formatting in json
### Changed
- Remove obsolete auth_key_sha512 and signature format
- Improved Spanish translation (Thanks to Pupiloho)
### Fixed
- Opened port checking (Thanks l5h5t7 & saber28 for reporting)
- Standalone update.py argument parsing (Thanks Zalex for reporting)
- uPnP crash on startup (Thanks Vertux for reporting)
- CoffeeScript 1.12.6 compatibility (Thanks kavamaken & imachug)
- Multi value argument parsing
- Database error when running from directory that contains special characters (Thanks Pupiloho for reporting)
- Site lock violation logging
#### Proxy bypass during source upgrade [Reported by ZeroMux]
In ZeroNet before 0.5.6 during the client's built-in source code upgrade mechanism,
ZeroNet did not respect Tor and/or proxy settings.
Result: ZeroNet downloaded the update without using the Tor network and potentially leaked the connections.
Fix: Removed the problematic code line from the updater that removed the proxy settings from the socket library.
Affected versions: ZeroNet 0.5.5 and earlier, Fixed in: ZeroNet 0.5.6
#### XSS vulnerability using DNS rebinding. [Reported by Beardog108]
In ZeroNet before 0.5.6 the web interface did not validate the request's Host parameter.
Result: An attacker using a specially crafted DNS entry could have bypassed the browser's cross-site-scripting protection
and potentially gained access to user's private data stored on site.
Fix: By default ZeroNet only accept connections from 127.0.0.1 and localhost hosts.
If you bind the ui server to an external interface, then it also adds the first http request's host to the allowed host list
or you can define it manually using --ui_host.
Affected versions: ZeroNet 0.5.5 and earlier, Fixed in: ZeroNet 0.5.6
## ZeroNet 0.5.5 (2017-05-18)
### Added
- Outgoing socket binding by --bind parameter
- Database rebuilding progress bar
- Protect low traffic site's peers from cleanup closing
- Local site blacklisting
- Cloned site source code upgrade from parent
- Input placeholder support for displayPrompt
- Alternative interaction for wrapperConfirm
### Changed
- New file priorities for faster site display on first visit
- Don't add ? to url if push/replaceState url starts with #
### Fixed
- PermissionAdd/Remove admin command requirement
- Multi-line confirmation dialog
## ZeroNet 0.5.4 (2017-04-14)
### Added
- Major speed and CPU usage enhancements in Tor always mode
- Send skipped modifications to outdated clients
### Changed
- Upgrade libs to latest version
- Faster port opening and closing
- Deny site limit modification in MultiUser mode
### Fixed
- Filling database from optional files
- OpenSSL detection on systems with OpenSSL 1.1
- Users.json corruption on systems with slow hdd
- Fix leaking files in data directory by webui
## ZeroNet 0.5.3 (2017-02-27)
### Added
- Tar.gz/zip packed site support
- Utf8 filenames in archive files
- Experimental --db_mode secure database mode to prevent data loss on systems with unreliable power source.
- Admin user support in MultiUser mode
- Optional deny adding new sites in MultiUser mode
### Changed
- Faster update and publish times by new socket sharing algorithm
### Fixed
- Fix missing json_row errors when using Mute plugin
## ZeroNet 0.5.2 (2017-02-09)
### Added
- User muting
- Win/Mac signed exe/.app
- Signed commits
### Changed
- Faster site updates after startup
- New macOS package for 10.10 compatibility
### Fixed
- Fix "New version just released" popup on page first visit
- Fix disappearing optional files bug (Thanks l5h5t7 for reporting)
- Fix skipped updates on unreliable connections (Thanks P2P for reporting)
- Sandbox escape security fix (Thanks Firebox for reporting)
- Fix error reporting on async websocket functions
## ZeroNet 0.5.1 (2016-11-18)
### Added
- Multi language interface
- New plugin: Translation helper for site html and js files
- Per-site favicon
### Fixed
- Parallel optional file downloading
## ZeroNet 0.5.0 (2016-11-08)
### Added
- New Plugin: Allow list/delete/pin/manage files on ZeroHello
- New API commands to follow user's optional files, and query stats for optional files
- Set total size limit on optional files.
- New Plugin: Save peers to database and keep them between restarts to allow more faster optional file search and make it work without trackers
- Rewritten uPnP port opener + close port on exit (Thanks to sirMackk!)
- Lower memory usage by lazy PeerHashfield creation
- Loaded json files statistics and database info at /Stats page
### Changed
- Separate lock file for better Windows compatibility
- When executing start.py open browser even if ZeroNet is already running
- Keep plugin order after reload to allow plugins to extends an another plug-in
- Only save sites.json if fully loaded to avoid data loss
- Change aletorrenty tracker to a more reliable one
- Much lower findhashid CPU usage
- Pooled downloading of large amount of optional files
- Lots of other optional file changes to make it better
- If we have 1000 peers for a site make cleanup more aggressive
- Use warning instead of error on verification errors
- Push updates to newer clients first
- Bad file reset improvements
### Fixed
- Fix site deletion errors on startup
- Delay websocket messages until it's connected
- Fix database import if data file contains extra data
- Fix big site download
- Fix diff sending bug (been chasing it for a long time)
- Fix random publish errors when json file contained [] characters
- Fix site delete and siteCreate bug
- Fix file write confirmation dialog
## ZeroNet 0.4.1 (2016-09-05)
### Added
- Major core changes to allow fast startup and lower memory usage
- Try to reconnect to Tor on lost connection
- Sidebar fade-in
- Try to avoid incomplete data files overwrite
- Faster database open
- Display user file sizes in sidebar
- Concurrent worker number depends on --connection_limit
### Changed
- Close databases after 5 min idle time
- Better site size calculation
- Allow "-" character in domains
- Always try to keep connections for sites
- Remove merger permission from merged sites
- Newsfeed scans only last 3 days to speed up database queries
- Updated ZeroBundle-win to Python 2.7.12
### Fixed
- Fix for important security problem, which is allowed anyone to publish new content without valid certificate from ID provider. Thanks Kaffie for pointing it out!
- Fix sidebar error when no certificate provider selected
- Skip invalid files on database rebuilding
- Fix random websocket connection error popups
- Fix new siteCreate command
- Fix site size calculation
- Fix port open checking after computer wake up
- Fix --size_limit parsing from command line
## ZeroNet 0.4.0 (2016-08-11)
### Added
- Merger site plugin
- Live source code reloading: Faster core development by allowing me to make changes in ZeroNet source code without restarting it.
- New json table format for merger sites
- Database rebuild from sidebar.
- Allow to store custom data directly in json table: Much simpler and faster SQL queries.
- User file archiving: Allows the site owner to archive inactive user's content into single file. (Reducing initial sync time/cpu/memory usage)
- Also trigger onUpdated/update database on file delete.
- Permission request from ZeroFrame API.
- Allow to store extra data in content.json using fileWrite API command.
- Faster optional files downloading
- Use alternative sources (Gogs, Gitlab) to download updates
- Track provided sites/connection and prefer to keep the ones with more sites to reduce connection number
### Changed
- Keep at least 5 connection per site
- Changed target connection for sites to 10 from 15
- ZeroHello search function stability/speed improvements
- Improvements for clients with slower HDD
### Fixed
- Fix IE11 wrapper nonce errors
- Fix sidebar on mobile devices
- Fix site size calculation
- Fix IE10 compatibility
- Windows XP ZeroBundle compatibility (THX to people of China)
## ZeroNet 0.3.7 (2016-05-27)
### Changed
- Patch command to reduce bandwidth usage by transfer only the changed lines
- Other cpu/memory optimizations
## ZeroNet 0.3.6 (2016-05-27)
### Added
- New ZeroHello
- Newsfeed function
### Fixed
- Security fixes
## ZeroNet 0.3.5 (2016-02-02)
### Added
- Full Tor support with .onion hidden services
- Bootstrap using ZeroNet protocol
### Fixed
- Fix Gevent 1.0.2 compatibility
## ZeroNet 0.3.4 (2015-12-28)
### Added
- AES, ECIES API function support
- PushState and ReplaceState url manipulation support in API
- Multiuser localstorage

674
COPYING
View file

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View file

@ -11,12 +11,13 @@ from util import helper
class CryptConnectionManager: class CryptConnectionManager:
def __init__(self): def __init__(self):
if config.openssl_bin_file: this_file = os.path.abspath(__file__).replace("\\", "/").rstrip("cd")
self.openssl_bin = config.openssl_bin_file if sys.platform.startswith("win"):
elif sys.platform.startswith("win"):
self.openssl_bin = "tools\\openssl\\openssl.exe" self.openssl_bin = "tools\\openssl\\openssl.exe"
elif config.dist_type.startswith("bundle_linux"): elif config.dist_type.startswith("bundle_linux"):
self.openssl_bin = "../runtime/bin/openssl" self.openssl_bin = "../runtime/bin/openssl"
elif "in.canews.zeronet" in this_file:
self.openssl_bin = "../usr/bin/openssl"
else: else:
self.openssl_bin = "openssl" self.openssl_bin = "openssl"
@ -90,13 +91,17 @@ class CryptConnectionManager:
def wrapSocket(self, sock, crypt, server=False, cert_pin=None): def wrapSocket(self, sock, crypt, server=False, cert_pin=None):
if crypt == "tls-rsa": if crypt == "tls-rsa":
if server: if server:
sock_wrapped = self.context_server.wrap_socket(sock, server_side=True) sock_wrapped = self.context_server.wrap_socket(
sock, server_side=True)
else: else:
sock_wrapped = self.context_client.wrap_socket(sock, server_hostname=random.choice(self.fakedomains)) sock_wrapped = self.context_client.wrap_socket(
sock, server_hostname=random.choice(self.fakedomains))
if cert_pin: if cert_pin:
cert_hash = hashlib.sha256(sock_wrapped.getpeercert(True)).hexdigest() cert_hash = hashlib.sha256(
sock_wrapped.getpeercert(True)).hexdigest()
if cert_hash != cert_pin: if cert_hash != cert_pin:
raise Exception("Socket certificate does not match (%s != %s)" % (cert_hash, cert_pin)) raise Exception(
"Socket certificate does not match (%s != %s)" % (cert_hash, cert_pin))
return sock_wrapped return sock_wrapped
else: else:
return sock return sock
@ -127,10 +132,6 @@ class CryptConnectionManager:
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA" "/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
] ]
self.openssl_env['CN'] = random.choice(self.fakedomains) self.openssl_env['CN'] = random.choice(self.fakedomains)
environ = os.environ
environ['OPENSSL_CONF'] = self.openssl_env['OPENSSL_CONF']
environ['RANDFILE'] = self.openssl_env['RANDFILE']
environ['CN'] = self.openssl_env['CN']
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem): if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
self.createSslContexts() self.createSslContexts()
@ -140,7 +141,8 @@ class CryptConnectionManager:
# Replace variables in config template # Replace variables in config template
conf_template = open(self.openssl_conf_template).read() conf_template = open(self.openssl_conf_template).read()
conf_template = conf_template.replace("$ENV::CN", self.openssl_env['CN']) conf_template = conf_template.replace(
"$ENV::CN", self.openssl_env['CN'])
open(self.openssl_conf, "w").write(conf_template) open(self.openssl_conf, "w").write(conf_template)
# Generate CAcert and CAkey # Generate CAcert and CAkey
@ -156,13 +158,14 @@ class CryptConnectionManager:
self.log.debug("Running: %s" % cmd) self.log.debug("Running: %s" % cmd)
proc = subprocess.Popen( proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT, cmd, shell=True, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, env=environ stdout=subprocess.PIPE, env=self.openssl_env
) )
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "") back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait() proc.wait()
if not (os.path.isfile(self.cacert_pem) and os.path.isfile(self.cakey_pem)): if not (os.path.isfile(self.cacert_pem) and os.path.isfile(self.cakey_pem)):
self.log.error("RSA ECC SSL CAcert generation failed, CAcert or CAkey files not exist. (%s)" % back) self.log.error(
"RSA ECC SSL CAcert generation failed, CAcert or CAkey files not exist. (%s)" % back)
return False return False
else: else:
self.log.debug("Result: %s" % back) self.log.debug("Result: %s" % back)
@ -179,7 +182,7 @@ class CryptConnectionManager:
self.log.debug("Generating certificate key and signing request...") self.log.debug("Generating certificate key and signing request...")
proc = subprocess.Popen( proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT, cmd, shell=True, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, env=environ stdout=subprocess.PIPE, env=self.openssl_env
) )
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "") back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait() proc.wait()
@ -198,7 +201,7 @@ class CryptConnectionManager:
self.log.debug("Generating RSA cert...") self.log.debug("Generating RSA cert...")
proc = subprocess.Popen( proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT, cmd, shell=True, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, env=environ stdout=subprocess.PIPE, env=self.openssl_env
) )
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "") back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait() proc.wait()
@ -215,7 +218,8 @@ class CryptConnectionManager:
return True return True
else: else:
self.log.error("RSA ECC SSL cert generation failed, cert or key files not exist.") self.log.error(
"RSA ECC SSL cert generation failed, cert or key files not exist.")
manager = CryptConnectionManager() manager = CryptConnectionManager()

View file

@ -1,33 +0,0 @@
FROM alpine:3.15
#Base settings
ENV HOME /root
COPY requirements.txt /root/requirements.txt
#Install ZeroNet
RUN apk --update --no-cache --no-progress add python3 python3-dev py3-pip gcc g++ autoconf automake libtool libffi-dev musl-dev make tor openssl \
&& pip3 install -r /root/requirements.txt \
&& apk del python3-dev gcc g++ autoconf automake libtool libffi-dev musl-dev make \
&& echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
RUN python3 -V \
&& python3 -m pip list \
&& tor --version \
&& openssl version
#Add Zeronet source
COPY . /root
VOLUME /root/data
#Control if Tor proxy is started
ENV ENABLE_TOR true
WORKDIR /root
#Set upstart command
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26117
#Expose ports
EXPOSE 43110 26117

View file

@ -1,34 +0,0 @@
FROM alpine:3.12
#Base settings
ENV HOME /root
COPY requirements.txt /root/requirements.txt
#Install ZeroNet
RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
&& pip3 install -r /root/requirements.txt \
&& apk del python3-dev gcc libffi-dev musl-dev make \
&& echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
RUN python3 -V \
&& python3 -m pip list \
&& tor --version \
&& openssl version
#Add Zeronet source
COPY . /root
VOLUME /root/data
#Control if Tor proxy is started
ENV ENABLE_TOR false
WORKDIR /root
#Set upstart command
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
#Expose ports
EXPOSE 43110 26552

27
LICENSE
View file

@ -1,27 +0,0 @@
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Additional Conditions :
Contributing to this repo
This repo is governed by GPLv3, same is located at the root of the ZeroNet git repo,
unless specified separately all code is governed by that license, contributions to this repo
are divided into two key types, key contributions and non-key contributions, key contributions
are which, directly affects the code performance, quality and features of software,
non key contributions include things like translation datasets, image, graphic or video
contributions that does not affect the main usability of software but improves the existing
usability of certain thing or feature, these also include tests written with code, since their
purpose is to check, whether something is working or not as intended. All the non-key contributions
are governed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/), unless specified
above, a contribution is ruled by the type of contribution if there is a conflict between two
contributing parties of repo in any case.

View file

@ -1,133 +0,0 @@
# ZeroNet [![tests](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml/badge.svg)](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [![Documentation](https://img.shields.io/badge/docs-faq-brightgreen.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [![Help](https://img.shields.io/badge/keep_this_project_alive-donate-yellow.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [![Docker Pulls](https://img.shields.io/docker/pulls/canewsin/zeronet)](https://hub.docker.com/r/canewsin/zeronet)
[简体中文](./README-zh-cn.md)
[English](./README.md)
Децентрализованные вебсайты, использующие криптографию Bitcoin и протокол BitTorrent — https://zeronet.dev ([Зеркало в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). В отличии от Bitcoin, ZeroNet'у не требуется блокчейн для работы, однако он использует ту же криптографию, чтобы обеспечить сохранность и проверку данных.
## Зачем?
- Мы верим в открытую, свободную, и неподдающуюся цензуре сеть и связь.
- Нет единой точки отказа: Сайт остаётся онлайн, пока его обслуживает хотя бы 1 пир.
- Нет затрат на хостинг: Сайты обслуживаются посетителями.
- Невозможно отключить: Он нигде, потому что он везде.
- Скорость и возможность работать без Интернета: Вы сможете получить доступ к сайту, потому что его копия хранится на вашем компьютере и у ваших пиров.
## Особенности
- Обновление сайтов в реальном времени
- Поддержка доменов `.bit` ([Namecoin](https://www.namecoin.org))
- Легкая установка: просто распакуйте и запустите
- Клонирование сайтов "в один клик"
- Беспарольная [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
авторизация: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
- Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
- Анонимность: Полная поддержка сети Tor, используя скрытые службы `.onion` вместо адресов IPv4
- Зашифрованное TLS подключение
- Автоматическое открытие UPnPпорта
- Плагин для поддержки нескольких пользователей (openproxy)
- Работа с любыми браузерами и операционными системами
## Текущие ограничения
- Файловые транзакции не сжаты
- Нет приватных сайтов
## Как это работает?
- После запуска `zeronet.py` вы сможете посещать сайты в ZeroNet, используя адрес
`http://127.0.0.1:43110/{zeronet_адрес}`
(Например: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
- Когда вы посещаете новый сайт в ZeroNet, он пытается найти пиров с помощью протокола BitTorrent,
чтобы скачать у них файлы сайта (HTML, CSS, JS и т.д.).
- После посещения сайта вы тоже становитесь его пиром.
- Каждый сайт содержит файл `content.json`, который содержит SHA512 хеши всех остальные файлы
и подпись, созданную с помощью закрытого ключа сайта.
- Если владелец сайта (тот, кто владеет закрытым ключом для адреса сайта) изменяет сайт, он
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
(используя подпись), скачвают изменённые файлы и распространяют новый контент для других пиров.
[Презентация о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
[Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
[Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Скриншоты
![Screenshot](https://i.imgur.com/H60OAHY.png)
![ZeroTalk](https://zeronet.io/docs/img/zerotalk.png)
[Больше скриншотов в документации ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
## Как присоединиться?
### Windows
- Скачайте и распакуйте архив [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26МБ)
- Запустите `ZeroNet.exe`
### macOS
- Скачайте и распакуйте архив [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14МБ)
- Запустите `ZeroNet.app`
### Linux (64 бит)
- Скачайте и распакуйте архив [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14МБ)
- Запустите `./ZeroNet.sh`
> **Note**
> Запустите таким образом: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваш_ip_адрес`, чтобы разрешить удалённое подключение к веб–интерфейсу.
### Docker
Официальный образ находится здесь: https://hub.docker.com/r/canewsin/zeronet/
### Android (arm, arm64, x86)
- Для работы требуется Android как минимум версии 5.0 Lollipop
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
- Скачать APK: https://github.com/canewsin/zeronet_mobile/releases
### Android (arm, arm64, x86) Облегчённый клиент только для просмотра (1МБ)
- Для работы требуется Android как минимум версии 4.1 Jelly Bean
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
### Установка из исходного кода
```sh
wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
unzip ZeroNet-src.zip
cd ZeroNet
sudo apt-get update
sudo apt-get install python3-pip
sudo python3 -m pip install -r requirements.txt
```
- Запустите `python3 zeronet.py`
Откройте приветственную страницу ZeroHello в вашем браузере по ссылке http://127.0.0.1:43110/
## Как мне создать сайт в ZeroNet?
- Кликните на **⋮** > **"Create new, empty site"** в меню на сайте [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
- Вы будете **перенаправлены** на совершенно новый сайт, который может быть изменён только вами!
- Вы можете найти и изменить контент вашего сайта в каталоге **data/[адресашего_сайта]**
- После изменений откройте ваш сайт, переключите влево кнопку "0" в правом верхнем углу, затем нажмите кнопки **sign** и **publish** внизу
Следующие шаги: [Документация разработчика ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Поддержите проект
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Рекомендуем)
- LiberaPay: https://liberapay.com/PramUkesh
- Paypal: https://paypal.me/PramUkesh
- Другие способы: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
#### Спасибо!
- Здесь вы можете получить больше информации, помощь, прочитать список изменений и исследовать ZeroNet сайты: https://www.reddit.com/r/zeronetx/
- Общение происходит на канале [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или в [Gitter](https://gitter.im/canewsin/ZeroNet)
- Электронная почта: canews.in@gmail.com

View file

@ -1,132 +0,0 @@
# ZeroNet [![tests](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml/badge.svg)](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [![Documentation](https://img.shields.io/badge/docs-faq-brightgreen.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [![Help](https://img.shields.io/badge/keep_this_project_alive-donate-yellow.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [![Docker Pulls](https://img.shields.io/docker/pulls/canewsin/zeronet)](https://hub.docker.com/r/canewsin/zeronet)
[English](./README.md)
使用 Bitcoin 加密和 BitTorrent 网络的去中心化网络 - https://zeronet.dev
## 为什么?
* 我们相信开放,自由,无审查的网络和通讯
* 不会受单点故障影响:只要有在线的节点,站点就会保持在线
* 无托管费用:站点由访问者托管
* 无法关闭:因为节点无处不在
* 快速并可离线运行:即使没有互联网连接也可以使用
## 功能
* 实时站点更新
* 支持 Namecoin 的 .bit 域名
* 安装方便:只需解压并运行
* 一键克隆存在的站点
* 无需密码、基于 [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
的认证:您的账户被与比特币钱包相同的加密方法保护
* 内建 SQL 服务器和 P2P 数据同步:让开发更简单并提升加载速度
* 匿名性:完整的 Tor 网络支持,支持通过 .onion 隐藏服务相互连接而不是通过 IPv4 地址连接
* TLS 加密连接
* 自动打开 uPnP 端口
* 多用户openproxy支持的插件
* 适用于任何浏览器 / 操作系统
## 原理
* 在运行 `zeronet.py` 后,您将可以通过
`http://127.0.0.1:43110/{zeronet_address}`(例如:
`http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`)访问 zeronet 中的站点
* 在您浏览 zeronet 站点时,客户端会尝试通过 BitTorrent 网络来寻找可用的节点从而下载需要的文件htmlcssjs...
* 您将会储存每一个浏览过的站点
* 每个站点都包含一个名为 `content.json` 的文件,它储存了其他所有文件的 sha512 散列值以及一个通过站点私钥生成的签名
* 如果站点的所有者(拥有站点地址的私钥)修改了站点,并且他 / 她签名了新的 `content.json` 然后推送至其他节点,
那么这些节点将会在使用签名验证 `content.json` 的真实性后,下载修改后的文件并将新内容推送至另外的节点
#### [关于 ZeroNet 加密,站点更新,多用户站点的幻灯片 »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
#### [常见问题 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
#### [ZeroNet 开发者文档 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## 屏幕截图
![Screenshot](https://i.imgur.com/H60OAHY.png)
![ZeroTalk](https://zeronet.io/docs/img/zerotalk.png)
#### [ZeroNet 文档中的更多屏幕截图 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
## 如何加入
### Windows
- 下载 [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
- 在任意位置解压缩
- 运行 `ZeroNet.exe`
### macOS
- 下载 [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
- 在任意位置解压缩
- 运行 `ZeroNet.app`
### Linux (x86-64bit)
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
- `unzip ZeroNet-linux.zip`
- `cd ZeroNet-linux`
- 使用以下命令启动 `./ZeroNet.sh`
- 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
__提示__ 若要允许在 Web 界面上的远程连接,使用以下命令启动 `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address`
### 从源代码安装
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
- `unzip ZeroNet-src.zip`
- `cd ZeroNet`
- `sudo apt-get update`
- `sudo apt-get install python3-pip`
- `sudo python3 -m pip install -r requirements.txt`
- 使用以下命令启动 `python3 zeronet.py`
- 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
### Android (arm, arm64, x86)
- minimum Android version supported 21 (Android 5.0 Lollipop)
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
- APK download: https://github.com/canewsin/zeronet_mobile/releases
### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
- minimum Android version supported 16 (JellyBean)
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
## 现有限制
* 传输文件时没有压缩
* 不支持私有站点
## 如何创建一个 ZeroNet 站点?
* 点击 [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d) 站点的 **⋮** > **「新建空站点」** 菜单项
* 您将被**重定向**到一个全新的站点,该站点只能由您修改
* 您可以在 **data/[您的站点地址]** 目录中找到并修改网站的内容
* 修改后打开您的网站将右上角的「0」按钮拖到左侧然后点击底部的**签名**并**发布**按钮
接下来的步骤:[ZeroNet 开发者文档](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## 帮助这个项目
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
- LiberaPay: https://liberapay.com/PramUkesh
- Paypal: https://paypal.me/PramUkesh
- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
#### 感谢您!
* 更多信息,帮助,变更记录和 zeronet 站点https://www.reddit.com/r/zeronetx/
* 前往 [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) 或 [gitter](https://gitter.im/canewsin/ZeroNet) 和我们聊天
* [这里](https://gitter.im/canewsin/ZeroNet)是一个 gitter 上的中文聊天室
* Email: canews.in@gmail.com

156
README.md
View file

@ -1,156 +0,0 @@
# ZeroNet [![tests](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml/badge.svg)](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [![Documentation](https://img.shields.io/badge/docs-faq-brightgreen.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [![Help](https://img.shields.io/badge/keep_this_project_alive-donate-yellow.svg)](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [![Docker Pulls](https://img.shields.io/docker/pulls/canewsin/zeronet)](https://hub.docker.com/r/canewsin/zeronet)
<!--TODO: Update Onion Site -->
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
## Why?
* We believe in open, free, and uncensored network and communication.
* No single point of failure: Site remains online so long as at least 1 peer is
serving it.
* No hosting costs: Sites are served by visitors.
* Impossible to shut down: It's nowhere because it's everywhere.
* Fast and works offline: You can access the site even if Internet is
unavailable.
## Features
* Real-time updated sites
* Namecoin .bit domains support
* Easy to setup: unpack & run
* Clone websites in one click
* Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
based authorization: Your account is protected by the same cryptography as your Bitcoin wallet
* Built-in SQL server with P2P data synchronization: Allows easier site development and faster page load times
* Anonymity: Full Tor network support with .onion hidden services instead of IPv4 addresses
* TLS encrypted connections
* Automatic uPnP port opening
* Plugin for multiuser (openproxy) support
* Works with any browser/OS
## How does it work?
* After starting `zeronet.py` you will be able to visit zeronet sites using
`http://127.0.0.1:43110/{zeronet_address}` (eg.
`http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
* When you visit a new zeronet site, it tries to find peers using the BitTorrent
network so it can download the site files (html, css, js...) from them.
* Each visited site is also served by you.
* Every site contains a `content.json` file which holds all other files in a sha512 hash
and a signature generated using the site's private key.
* If the site owner (who has the private key for the site address) modifies the
site and signs the new `content.json` and publishes it to the peers.
Afterwards, the peers verify the `content.json` integrity (using the
signature), they download the modified files and publish the new content to
other peers.
#### [Slideshow about ZeroNet cryptography, site updates, multi-user sites »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
#### [Frequently asked questions »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
#### [ZeroNet Developer Documentation »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Screenshots
![Screenshot](https://i.imgur.com/H60OAHY.png)
![ZeroTalk](https://zeronet.io/docs/img/zerotalk.png)
#### [More screenshots in ZeroNet docs »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
## How to join
### Windows
- Download [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
- Unpack anywhere
- Run `ZeroNet.exe`
### macOS
- Download [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
- Unpack anywhere
- Run `ZeroNet.app`
### Linux (x86-64bit)
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
- `unzip ZeroNet-linux.zip`
- `cd ZeroNet-linux`
- Start with: `./ZeroNet.sh`
- Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
__Tip:__ Start with `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address` to allow remote connections on the web interface.
### Android (arm, arm64, x86)
- minimum Android version supported 21 (Android 5.0 Lollipop)
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
- APK download: https://github.com/canewsin/zeronet_mobile/releases
### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
- minimum Android version supported 16 (JellyBean)
- [<img src="https://play.google.com/intl/en_us/badges/images/generic/en_badge_web_generic.png"
alt="Download from Google Play"
height="80">](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
#### Docker
There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
### Online Proxies
Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
#### Official ZNX Proxy :
https://proxy.zeronet.dev/
https://zeronet.dev/
#### From Community
https://0net-preview.com/
https://portal.ngnoid.tv/
https://zeronet.ipfsscan.io/
### Install from source
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
- `unzip ZeroNet-src.zip`
- `cd ZeroNet`
- `sudo apt-get update`
- `sudo apt-get install python3-pip`
- `sudo python3 -m pip install -r requirements.txt`
- Start with: `python3 zeronet.py`
- Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
## Current limitations
* File transactions are not compressed
* No private sites
## How can I create a ZeroNet site?
* Click on **⋮** > **"Create new, empty site"** menu item on the site [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
* You will be **redirected** to a completely new site that is only modifiable by you!
* You can find and modify your site's content in **data/[yoursiteaddress]** directory
* After the modifications open your site, drag the topright "0" button to left, then press **sign** and **publish** buttons on the bottom
Next steps: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Help keep this project alive
- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
- LiberaPay: https://liberapay.com/PramUkesh
- Paypal: https://paypal.me/PramUkesh
- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
#### Thank you!
* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronetx/
* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/canewsin/ZeroNet)
* Email: canews.in@gmail.com

45
Vagrantfile vendored
View file

@ -1,45 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
#Set box
config.vm.box = "ubuntu/trusty64"
#Do not check fo updates
config.vm.box_check_update = false
#Add private network
config.vm.network "private_network", type: "dhcp"
#Redirect ports
config.vm.network "forwarded_port", guest: 43110, host: 43110
config.vm.network "forwarded_port", guest: 15441, host: 15441
#Sync folder using NFS if not windows
config.vm.synced_folder ".", "/vagrant",
:nfs => !Vagrant::Util::Platform.windows?
#Virtal Box settings
config.vm.provider "virtualbox" do |vb|
# Don't boot with headless mode
#vb.gui = true
# Set VM settings
vb.customize ["modifyvm", :id, "--memory", "512"]
vb.customize ["modifyvm", :id, "--cpus", 1]
end
#Update system
config.vm.provision "shell",
inline: "sudo apt-get update -y && sudo apt-get upgrade -y"
#Install deps
config.vm.provision "shell",
inline: "sudo apt-get install msgpack-python python-gevent python-pip python-dev -y"
config.vm.provision "shell",
inline: "sudo pip install msgpack --upgrade"
end

34
md5.hashes Normal file
View file

@ -0,0 +1,34 @@
[
"794f5ac0675f66310963163e62527196",
"f4fdc4ef9fcf3db65ea91fb46b3982ca",
"4bdd9cc3fd3629a7e177bf37df5326c6",
"3df0aae9c0f30941a3893f02b0533d65",
"25001a7ef26550ec1fbb2ae7fbfff6a1",
"634647a7ea916b29f3a8fe5f140341a8",
"e09fab4484cf10d5bc29901f5c17df78",
"11af969820fdc72db9d9c41abd98e4c9",
"371da38ccd0dcdc49b71edd0872be41e",
"a23aeb4308119a2e34e33c109d4ee496",
"0386c7231f8af2706f3b8ca71bb30a82",
"0f408bbceb7572631b0e1dcd97b257e1",
"d4cfb19351a761ae1252934357772f1e",
"7656733d355d0a31ee57ba3901374de8",
"b522f9ad4d17d8962bba7fc1c6880d1a",
"3e8dab64ea8c23463f83de1c68bc2342",
"b5ebbd8c4a7fa865095e95853d5bee35",
"0e7b811892a6abc0cbcf66161ac82bc5",
"d2ba546cd3eae258b10c7fdbaafe9434",
"f558010cc964e206eb03eafd90731e0b",
"4cfcd90b9206701d96c7757222072e5c",
"063cd806f972b6d0f0226d8c04474270",
"c7d737758baf1d516cf3a0ed45176f6e",
"b6cfb932d1499cbc2fba10c06efe9567",
"30865832830c3bb1d67aeb48b0572774",
"4908d51ff8f2daa35a209db0c86dc535",
"336b451616f620743e6aecb30900b822",
"98c9109d618094a9775866c1838d4666",
"11e86b9a2aae72f854bf1f181946d78b",
"28d0faceb156ad1e5f1befa770dce3cd",
"93191cea5d81f6c2b2f5a4a547e2bdfd",
"6b1f09c95720e730ef27970b7f9f3e5c"
]

7
patches.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"filename": "CryptConnection.py",
"patchDir": "src/Crypt",
"patchUrl": "https://raw.githubusercontent.com/canewsin/ZeroNet/py3-patches/CryptConnection.py"
}
]

@ -1 +0,0 @@
Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb

View file

@ -1,13 +0,0 @@
gevent==1.4.0; python_version <= "3.6"
greenlet==0.4.16; python_version <= "3.6"
gevent>=20.9.0; python_version >= "3.7"
msgpack>=0.4.4
base58
merkletools @ git+https://github.com/ZeroNetX/pymerkletools.git@dev
rsa
PySocks>=1.6.8
pyasn1
websocket_client
gevent-ws
coincurve
maxminddb

View file

@ -1,675 +0,0 @@
import argparse
import sys
import os
import locale
import re
import configparser
import logging
import logging.handlers
import stat
import time
class Config(object):
def __init__(self, argv):
self.version = "0.9.0"
self.rev = 4630
self.argv = argv
self.action = None
self.test_parser = None
self.pending_changes = {}
self.need_restart = False
self.keys_api_change_allowed = set([
"tor", "fileserver_port", "language", "tor_use_bridges", "trackers_proxy", "trackers",
"trackers_file", "open_browser", "log_level", "fileserver_ip_type", "ip_external", "offline",
"threads_fs_read", "threads_fs_write", "threads_crypt", "threads_db"
])
self.keys_restart_need = set([
"tor", "fileserver_port", "fileserver_ip_type", "threads_fs_read", "threads_fs_write", "threads_crypt", "threads_db"
])
self.start_dir = self.getStartDir()
self.config_file = self.start_dir + "/zeronet.conf"
self.data_dir = self.start_dir + "/data"
self.log_dir = self.start_dir + "/log"
self.openssl_lib_file = None
self.openssl_bin_file = None
self.trackers_file = False
self.createParser()
self.createArguments()
def createParser(self):
# Create parser
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser.register('type', 'bool', self.strToBool)
self.subparsers = self.parser.add_subparsers(title="Action to perform", dest="action")
def __str__(self):
return str(self.arguments).replace("Namespace", "Config") # Using argparse str output
# Convert string to bool
def strToBool(self, v):
return v.lower() in ("yes", "true", "t", "1")
def getStartDir(self):
this_file = os.path.abspath(__file__).replace("\\", "/").rstrip("cd")
if "--start_dir" in self.argv:
start_dir = self.argv[self.argv.index("--start_dir") + 1]
elif this_file.endswith("/Contents/Resources/core/src/Config.py"):
# Running as ZeroNet.app
if this_file.startswith("/Application") or this_file.startswith("/private") or this_file.startswith(os.path.expanduser("~/Library")):
# Runnig from non-writeable directory, put data to Application Support
start_dir = os.path.expanduser("~/Library/Application Support/ZeroNet")
else:
# Running from writeable directory put data next to .app
start_dir = re.sub("/[^/]+/Contents/Resources/core/src/Config.py", "", this_file)
elif this_file.endswith("/core/src/Config.py"):
# Running as exe or source is at Application Support directory, put var files to outside of core dir
start_dir = this_file.replace("/core/src/Config.py", "")
elif this_file.endswith("usr/share/zeronet/src/Config.py"):
# Running from non-writeable location, e.g., AppImage
start_dir = os.path.expanduser("~/ZeroNet")
else:
start_dir = "."
return start_dir
# Create command line arguments
def createArguments(self):
from Crypt import CryptHash
access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
trackers = [
"http://open.acgnxtracker.com:80/announce", # DE
"http://tracker.bt4g.com:2095/announce", # Cloudflare
"http://tracker.files.fm:6969/announce",
"http://t.publictracker.xyz:6969/announce",
"https://tracker.lilithraws.cf:443/announce",
"https://tracker.babico.name.tr:443/announce",
]
# Platform specific
if sys.platform.startswith("win"):
coffeescript = "type %s | tools\\coffee\\coffee.cmd"
else:
coffeescript = None
try:
language, enc = locale.getdefaultlocale()
language = language.lower().replace("_", "-")
if language not in ["pt-br", "zh-tw"]:
language = language.split("-")[0]
except Exception:
language = "en"
use_openssl = True
if repr(1483108852.565) != "1483108852.565": # Fix for weird Android issue
fix_float_decimals = True
else:
fix_float_decimals = False
config_file = self.start_dir + "/zeronet.conf"
data_dir = self.start_dir + "/data"
log_dir = self.start_dir + "/log"
ip_local = ["127.0.0.1", "::1"]
# Main
action = self.subparsers.add_parser("main", help='Start UiServer and FileServer (default)')
# SiteCreate
action = self.subparsers.add_parser("siteCreate", help='Create a new site')
action.register('type', 'bool', self.strToBool)
action.add_argument('--use_master_seed', help="Allow created site's private key to be recovered using the master seed in users.json (default: True)", type="bool", choices=[True, False], default=True)
# SiteNeedFile
action = self.subparsers.add_parser("siteNeedFile", help='Get a file from site')
action.add_argument('address', help='Site address')
action.add_argument('inner_path', help='File inner path')
# SiteDownload
action = self.subparsers.add_parser("siteDownload", help='Download a new site')
action.add_argument('address', help='Site address')
# SiteSign
action = self.subparsers.add_parser("siteSign", help='Update and sign content.json: address [privatekey]')
action.add_argument('address', help='Site to sign')
action.add_argument('privatekey', help='Private key (default: ask on execute)', nargs='?')
action.add_argument('--inner_path', help='File you want to sign (default: content.json)',
default="content.json", metavar="inner_path")
action.add_argument('--remove_missing_optional', help='Remove optional files that is not present in the directory', action='store_true')
action.add_argument('--publish', help='Publish site after the signing', action='store_true')
# SitePublish
action = self.subparsers.add_parser("sitePublish", help='Publish site to other peers: address')
action.add_argument('address', help='Site to publish')
action.add_argument('peer_ip', help='Peer ip to publish (default: random peers ip from tracker)',
default=None, nargs='?')
action.add_argument('peer_port', help='Peer port to publish (default: random peer port from tracker)',
default=15441, nargs='?')
action.add_argument('--inner_path', help='Content.json you want to publish (default: content.json)',
default="content.json", metavar="inner_path")
# SiteVerify
action = self.subparsers.add_parser("siteVerify", help='Verify site files using sha512: address')
action.add_argument('address', help='Site to verify')
# SiteCmd
action = self.subparsers.add_parser("siteCmd", help='Execute a ZeroFrame API command on a site')
action.add_argument('address', help='Site address')
action.add_argument('cmd', help='API command name')
action.add_argument('parameters', help='Parameters of the command', nargs='?')
# dbRebuild
action = self.subparsers.add_parser("dbRebuild", help='Rebuild site database cache')
action.add_argument('address', help='Site to rebuild')
# dbQuery
action = self.subparsers.add_parser("dbQuery", help='Query site sql cache')
action.add_argument('address', help='Site to query')
action.add_argument('query', help='Sql query')
# PeerPing
action = self.subparsers.add_parser("peerPing", help='Send Ping command to peer')
action.add_argument('peer_ip', help='Peer ip')
action.add_argument('peer_port', help='Peer port', nargs='?')
# PeerGetFile
action = self.subparsers.add_parser("peerGetFile", help='Request and print a file content from peer')
action.add_argument('peer_ip', help='Peer ip')
action.add_argument('peer_port', help='Peer port')
action.add_argument('site', help='Site address')
action.add_argument('filename', help='File name to request')
action.add_argument('--benchmark', help='Request file 10x then displays the total time', action='store_true')
# PeerCmd
action = self.subparsers.add_parser("peerCmd", help='Request and print a file content from peer')
action.add_argument('peer_ip', help='Peer ip')
action.add_argument('peer_port', help='Peer port')
action.add_argument('cmd', help='Command to execute')
action.add_argument('parameters', help='Parameters to command', nargs='?')
# CryptSign
action = self.subparsers.add_parser("cryptSign", help='Sign message using Bitcoin private key')
action.add_argument('message', help='Message to sign')
action.add_argument('privatekey', help='Private key')
# Crypt Verify
action = self.subparsers.add_parser("cryptVerify", help='Verify message using Bitcoin public address')
action.add_argument('message', help='Message to verify')
action.add_argument('sign', help='Signiture for message')
action.add_argument('address', help='Signer\'s address')
# Crypt GetPrivatekey
action = self.subparsers.add_parser("cryptGetPrivatekey", help='Generate a privatekey from master seed')
action.add_argument('master_seed', help='Source master seed')
action.add_argument('site_address_index', help='Site address index', type=int)
action = self.subparsers.add_parser("getConfig", help='Return json-encoded info')
action = self.subparsers.add_parser("testConnection", help='Testing')
action = self.subparsers.add_parser("testAnnounce", help='Testing')
self.test_parser = self.subparsers.add_parser("test", help='Run a test')
self.test_parser.add_argument('test_name', help='Test name', nargs="?")
# self.test_parser.add_argument('--benchmark', help='Run the tests multiple times to measure the performance', action='store_true')
# Config parameters
self.parser.add_argument('--verbose', help='More detailed logging', action='store_true')
self.parser.add_argument('--debug', help='Debug mode', action='store_true')
self.parser.add_argument('--silent', help='Only log errors to terminal output', action='store_true')
self.parser.add_argument('--debug_socket', help='Debug socket connections', action='store_true')
self.parser.add_argument('--merge_media', help='Merge all.js and all.css', action='store_true')
self.parser.add_argument('--batch', help="Batch mode (No interactive input for commands)", action='store_true')
self.parser.add_argument('--start_dir', help='Path of working dir for variable content (data, log, .conf)', default=self.start_dir, metavar="path")
self.parser.add_argument('--config_file', help='Path of config file', default=config_file, metavar="path")
self.parser.add_argument('--data_dir', help='Path of data directory', default=data_dir, metavar="path")
self.parser.add_argument('--console_log_level', help='Level of logging to console', default="default", choices=["default", "DEBUG", "INFO", "ERROR", "off"])
self.parser.add_argument('--log_dir', help='Path of logging directory', default=log_dir, metavar="path")
self.parser.add_argument('--log_level', help='Level of logging to file', default="DEBUG", choices=["DEBUG", "INFO", "ERROR", "off"])
self.parser.add_argument('--log_rotate', help='Log rotate interval', default="daily", choices=["hourly", "daily", "weekly", "off"])
self.parser.add_argument('--log_rotate_backup_count', help='Log rotate backup count', default=5, type=int)
self.parser.add_argument('--language', help='Web interface language', default=language, metavar='language')
self.parser.add_argument('--ui_ip', help='Web interface bind address', default="127.0.0.1", metavar='ip')
self.parser.add_argument('--ui_port', help='Web interface bind port', default=43110, type=int, metavar='port')
self.parser.add_argument('--ui_restrict', help='Restrict web access', default=False, metavar='ip', nargs='*')
self.parser.add_argument('--ui_host', help='Allow access using this hosts', metavar='host', nargs='*')
self.parser.add_argument('--ui_trans_proxy', help='Allow access using a transparent proxy', action='store_true')
self.parser.add_argument('--open_browser', help='Open homepage in web browser automatically',
nargs='?', const="default_browser", metavar='browser_name')
self.parser.add_argument('--homepage', help='Web interface Homepage', default='1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d',
metavar='address')
self.parser.add_argument('--updatesite', help='Source code update site', default='1Update8crprmciJHwp2WXqkx2c4iYp18',
metavar='address')
self.parser.add_argument('--access_key', help='Plugin access key default: Random key generated at startup', default=access_key_default, metavar='key')
self.parser.add_argument('--dist_type', help='Type of installed distribution', default='source')
self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=25, type=int, metavar='limit')
self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
self.parser.add_argument('--connected_limit', help='Max connected peer per site', default=8, type=int, metavar='connected_limit')
self.parser.add_argument('--global_connected_limit', help='Max connections', default=512, type=int, metavar='global_connected_limit')
self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')
self.parser.add_argument('--fileserver_ip_type', help='FileServer ip type', default="dual", choices=["ipv4", "ipv6", "dual"])
self.parser.add_argument('--ip_local', help='My local ips', default=ip_local, type=int, metavar='ip', nargs='*')
self.parser.add_argument('--ip_external', help='Set reported external ip (tested on start if None)', metavar='ip', nargs='*')
self.parser.add_argument('--offline', help='Disable network communication', action='store_true')
self.parser.add_argument('--disable_udp', help='Disable UDP connections', action='store_true')
self.parser.add_argument('--proxy', help='Socks proxy address', metavar='ip:port')
self.parser.add_argument('--bind', help='Bind outgoing sockets to this address', metavar='ip')
self.parser.add_argument('--trackers', help='Bootstraping torrent trackers', default=trackers, metavar='protocol://address', nargs='*')
self.parser.add_argument('--trackers_file', help='Load torrent trackers dynamically from a file', metavar='path', nargs='*')
self.parser.add_argument('--trackers_proxy', help='Force use proxy to connect to trackers (disable, tor, ip:port)', default="disable")
self.parser.add_argument('--use_libsecp256k1', help='Use Libsecp256k1 liblary for speedup', type='bool', choices=[True, False], default=True)
self.parser.add_argument('--use_openssl', help='Use OpenSSL liblary for speedup', type='bool', choices=[True, False], default=True)
self.parser.add_argument('--openssl_lib_file', help='Path for OpenSSL library file (default: detect)', default=argparse.SUPPRESS, metavar="path")
self.parser.add_argument('--openssl_bin_file', help='Path for OpenSSL binary file (default: detect)', default=argparse.SUPPRESS, metavar="path")
self.parser.add_argument('--disable_db', help='Disable database updating', action='store_true')
self.parser.add_argument('--disable_encryption', help='Disable connection encryption', action='store_true')
self.parser.add_argument('--force_encryption', help="Enforce encryption to all peer connections", action='store_true')
self.parser.add_argument('--disable_sslcompression', help='Disable SSL compression to save memory',
type='bool', choices=[True, False], default=True)
self.parser.add_argument('--keep_ssl_cert', help='Disable new SSL cert generation on startup', action='store_true')
self.parser.add_argument('--max_files_opened', help='Change maximum opened files allowed by OS to this value on startup',
default=2048, type=int, metavar='limit')
self.parser.add_argument('--stack_size', help='Change thread stack size', default=None, type=int, metavar='thread_stack_size')
self.parser.add_argument('--use_tempfiles', help='Use temporary files when downloading (experimental)',
type='bool', choices=[True, False], default=False)
self.parser.add_argument('--stream_downloads', help='Stream download directly to files (experimental)',
type='bool', choices=[True, False], default=False)
self.parser.add_argument("--msgpack_purepython", help='Use less memory, but a bit more CPU power',
type='bool', choices=[True, False], default=False)
self.parser.add_argument("--fix_float_decimals", help='Fix content.json modification date float precision on verification',
type='bool', choices=[True, False], default=fix_float_decimals)
self.parser.add_argument("--db_mode", choices=["speed", "security"], default="speed")
self.parser.add_argument('--threads_fs_read', help='Number of threads for file read operations', default=1, type=int)
self.parser.add_argument('--threads_fs_write', help='Number of threads for file write operations', default=1, type=int)
self.parser.add_argument('--threads_crypt', help='Number of threads for cryptographic operations', default=2, type=int)
self.parser.add_argument('--threads_db', help='Number of threads for database operations', default=1, type=int)
self.parser.add_argument("--download_optional", choices=["manual", "auto"], default="manual")
self.parser.add_argument('--coffeescript_compiler', help='Coffeescript compiler for developing', default=coffeescript,
metavar='executable_path')
self.parser.add_argument('--tor', help='enable: Use only for Tor peers, always: Use Tor for every connection', choices=["disable", "enable", "always"], default='enable')
self.parser.add_argument('--tor_controller', help='Tor controller address', metavar='ip:port', default='127.0.0.1:9051')
self.parser.add_argument('--tor_proxy', help='Tor proxy address', metavar='ip:port', default='127.0.0.1:9050')
self.parser.add_argument('--tor_password', help='Tor controller password', metavar='password')
self.parser.add_argument('--tor_use_bridges', help='Use obfuscated bridge relays to avoid Tor block', action='store_true')
self.parser.add_argument('--tor_hs_limit', help='Maximum number of hidden services in Tor always mode', metavar='limit', type=int, default=10)
self.parser.add_argument('--tor_hs_port', help='Hidden service port in Tor always mode', metavar='limit', type=int, default=15441)
self.parser.add_argument('--version', action='version', version='ZeroNet %s r%s' % (self.version, self.rev))
self.parser.add_argument('--end', help='Stop multi value argument parsing', action='store_true')
return self.parser
def loadTrackersFile(self):
if not self.trackers_file:
self.trackers_file = ["trackers.txt", "{data_dir}/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d/trackers.txt"]
self.trackers = self.arguments.trackers[:]
for trackers_file in self.trackers_file:
try:
if trackers_file.startswith("/"): # Absolute
trackers_file_path = trackers_file
elif trackers_file.startswith("{data_dir}"): # Relative to data_dir
trackers_file_path = trackers_file.replace("{data_dir}", self.data_dir)
else: # Relative to zeronet.py
trackers_file_path = self.start_dir + "/" + trackers_file
if not os.path.exists(trackers_file_path):
continue
for line in open(trackers_file_path):
tracker = line.strip()
if "://" in tracker and tracker not in self.trackers:
self.trackers.append(tracker)
except Exception as err:
print("Error loading trackers file: %s" % err)
# Find arguments specified for current action
def getActionArguments(self):
back = {}
arguments = self.parser._subparsers._group_actions[0].choices[self.action]._actions[1:] # First is --version
for argument in arguments:
back[argument.dest] = getattr(self, argument.dest)
return back
# Try to find action from argv
def getAction(self, argv):
actions = [list(action.choices.keys()) for action in self.parser._actions if action.dest == "action"][0] # Valid actions
found_action = False
for action in actions: # See if any in argv
if action in argv:
found_action = action
break
return found_action
# Move plugin parameters to end of argument list
def moveUnknownToEnd(self, argv, default_action):
valid_actions = sum([action.option_strings for action in self.parser._actions], [])
valid_parameters = []
plugin_parameters = []
plugin = False
for arg in argv:
if arg.startswith("--"):
if arg not in valid_actions:
plugin = True
else:
plugin = False
elif arg == default_action:
plugin = False
if plugin:
plugin_parameters.append(arg)
else:
valid_parameters.append(arg)
return valid_parameters + plugin_parameters
def getParser(self, argv):
action = self.getAction(argv)
if not action:
return self.parser
else:
return self.subparsers.choices[action]
# Parse arguments from config file and command line
def parse(self, silent=False, parse_config=True):
argv = self.argv[:] # Copy command line arguments
current_parser = self.getParser(argv)
if silent: # Don't display messages or quit on unknown parameter
original_print_message = self.parser._print_message
original_exit = self.parser.exit
def silencer(parser, function_name):
parser.exited = True
return None
current_parser.exited = False
current_parser._print_message = lambda *args, **kwargs: silencer(current_parser, "_print_message")
current_parser.exit = lambda *args, **kwargs: silencer(current_parser, "exit")
self.parseCommandline(argv, silent) # Parse argv
self.setAttributes()
if parse_config:
argv = self.parseConfig(argv) # Add arguments from config file
self.parseCommandline(argv, silent) # Parse argv
self.setAttributes()
if not silent:
if self.fileserver_ip != "*" and self.fileserver_ip not in self.ip_local:
self.ip_local.append(self.fileserver_ip)
if silent: # Restore original functions
if current_parser.exited and self.action == "main": # Argument parsing halted, don't start ZeroNet with main action
self.action = None
current_parser._print_message = original_print_message
current_parser.exit = original_exit
self.loadTrackersFile()
# Parse command line arguments
def parseCommandline(self, argv, silent=False):
# Find out if action is specificed on start
action = self.getAction(argv)
if not action:
argv.append("--end")
argv.append("main")
action = "main"
argv = self.moveUnknownToEnd(argv, action)
if silent:
res = self.parser.parse_known_args(argv[1:])
if res:
self.arguments = res[0]
else:
self.arguments = {}
else:
self.arguments = self.parser.parse_args(argv[1:])
# Parse config file
def parseConfig(self, argv):
# Find config file path from parameters
if "--config_file" in argv:
self.config_file = argv[argv.index("--config_file") + 1]
# Load config file
if os.path.isfile(self.config_file):
config = configparser.RawConfigParser(allow_no_value=True, strict=False)
config.read(self.config_file)
for section in config.sections():
for key, val in config.items(section):
if val == "True":
val = None
if section != "global": # If not global prefix key with section
key = section + "_" + key
if key == "open_browser": # Prefer config file value over cli argument
while "--%s" % key in argv:
pos = argv.index("--open_browser")
del argv[pos:pos + 2]
argv_extend = ["--%s" % key]
if val:
for line in val.strip().split("\n"): # Allow multi-line values
argv_extend.append(line)
if "\n" in val:
argv_extend.append("--end")
argv = argv[:1] + argv_extend + argv[1:]
return argv
# Return command line value of given argument
def getCmdlineValue(self, key):
if key not in self.argv:
return None
argv_index = self.argv.index(key)
if argv_index == len(self.argv) - 1: # last arg, test not specified
return None
return self.argv[argv_index + 1]
# Expose arguments as class attributes
def setAttributes(self):
# Set attributes from arguments
if self.arguments:
args = vars(self.arguments)
for key, val in args.items():
if type(val) is list:
val = val[:]
if key in ("data_dir", "log_dir", "start_dir", "openssl_bin_file", "openssl_lib_file"):
if val:
val = val.replace("\\", "/")
setattr(self, key, val)
def loadPlugins(self):
from Plugin import PluginManager
@PluginManager.acceptPlugins
class ConfigPlugin(object):
def __init__(self, config):
self.argv = config.argv
self.parser = config.parser
self.subparsers = config.subparsers
self.test_parser = config.test_parser
self.getCmdlineValue = config.getCmdlineValue
self.createArguments()
def createArguments(self):
pass
ConfigPlugin(self)
def saveValue(self, key, value):
if not os.path.isfile(self.config_file):
content = ""
else:
content = open(self.config_file).read()
lines = content.splitlines()
global_line_i = None
key_line_i = None
i = 0
for line in lines:
if line.strip() == "[global]":
global_line_i = i
if line.startswith(key + " =") or line == key:
key_line_i = i
i += 1
if key_line_i and len(lines) > key_line_i + 1:
while True: # Delete previous multiline values
is_value_line = lines[key_line_i + 1].startswith(" ") or lines[key_line_i + 1].startswith("\t")
if not is_value_line:
break
del lines[key_line_i + 1]
if value is None: # Delete line
if key_line_i:
del lines[key_line_i]
else: # Add / update
if type(value) is list:
value_lines = [""] + [str(line).replace("\n", "").replace("\r", "") for line in value]
else:
value_lines = [str(value).replace("\n", "").replace("\r", "")]
new_line = "%s = %s" % (key, "\n ".join(value_lines))
if key_line_i: # Already in the config, change the line
lines[key_line_i] = new_line
elif global_line_i is None: # No global section yet, append to end of file
lines.append("[global]")
lines.append(new_line)
else: # Has global section, append the line after it
lines.insert(global_line_i + 1, new_line)
open(self.config_file, "w").write("\n".join(lines))
def getServerInfo(self):
from Plugin import PluginManager
import main
info = {
"platform": sys.platform,
"fileserver_ip": self.fileserver_ip,
"fileserver_port": self.fileserver_port,
"ui_ip": self.ui_ip,
"ui_port": self.ui_port,
"version": self.version,
"rev": self.rev,
"language": self.language,
"debug": self.debug,
"plugins": PluginManager.plugin_manager.plugin_names,
"log_dir": os.path.abspath(self.log_dir),
"data_dir": os.path.abspath(self.data_dir),
"src_dir": os.path.dirname(os.path.abspath(__file__))
}
try:
info["ip_external"] = main.file_server.port_opened
info["tor_enabled"] = main.file_server.tor_manager.enabled
info["tor_status"] = main.file_server.tor_manager.status
except Exception:
pass
return info
def initConsoleLogger(self):
if self.action == "main":
format = '[%(asctime)s] %(name)s %(message)s'
else:
format = '%(name)s %(message)s'
if self.console_log_level == "default":
if self.silent:
level = logging.ERROR
elif self.debug:
level = logging.DEBUG
else:
level = logging.INFO
else:
level = logging.getLevelName(self.console_log_level)
console_logger = logging.StreamHandler()
console_logger.setFormatter(logging.Formatter(format, "%H:%M:%S"))
console_logger.setLevel(level)
logging.getLogger('').addHandler(console_logger)
def initFileLogger(self):
if self.action == "main":
log_file_path = "%s/debug.log" % self.log_dir
else:
log_file_path = "%s/cmd.log" % self.log_dir
if self.log_rotate == "off":
file_logger = logging.FileHandler(log_file_path, "w", "utf-8")
else:
when_names = {"weekly": "w", "daily": "d", "hourly": "h"}
file_logger = logging.handlers.TimedRotatingFileHandler(
log_file_path, when=when_names[self.log_rotate], interval=1, backupCount=self.log_rotate_backup_count,
encoding="utf8"
)
if os.path.isfile(log_file_path):
file_logger.doRollover() # Always start with empty log file
file_logger.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)-8s %(name)s %(message)s'))
file_logger.setLevel(logging.getLevelName(self.log_level))
logging.getLogger('').setLevel(logging.getLevelName(self.log_level))
logging.getLogger('').addHandler(file_logger)
def initLogging(self, console_logging=None, file_logging=None):
if console_logging == None:
console_logging = self.console_log_level != "off"
if file_logging == None:
file_logging = self.log_level != "off"
# Create necessary files and dirs
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
try:
os.chmod(self.log_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception as err:
print("Can't change permission of %s: %s" % (self.log_dir, err))
# Make warning hidden from console
logging.WARNING = 15 # Don't display warnings if not in debug mode
logging.addLevelName(15, "WARNING")
logging.getLogger('').name = "-" # Remove root prefix
self.error_logger = ErrorLogHandler()
self.error_logger.setLevel(logging.getLevelName("ERROR"))
logging.getLogger('').addHandler(self.error_logger)
if console_logging:
self.initConsoleLogger()
if file_logging:
self.initFileLogger()
class ErrorLogHandler(logging.StreamHandler):
def __init__(self):
self.lines = []
return super().__init__()
def emit(self, record):
self.lines.append([time.time(), record.levelname, self.format(record)])
def onNewRecord(self, record):
pass
config = Config(sys.argv)

View file

@ -1,635 +0,0 @@
import socket
import time
import gevent
try:
from gevent.coros import RLock
except:
from gevent.lock import RLock
from Config import config
from Debug import Debug
from util import Msgpack
from Crypt import CryptConnection
from util import helper
class Connection(object):
__slots__ = (
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
"handshake", "crypt", "connected", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
)
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
self.sock = sock
self.cert_pin = None
if "#" in ip:
ip, self.cert_pin = ip.split("#")
self.target_onion = target_onion # Requested onion adress
self.id = server.last_connection_id
server.last_connection_id += 1
self.protocol = "?"
self.type = "?"
self.ip_type = "?"
self.port = int(port)
self.setIp(ip)
if helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
self.is_private_ip = True
else:
self.is_private_ip = False
self.is_tracker_connection = is_tracker_connection
self.server = server
self.unpacker = None # Stream incoming socket messages here
self.unpacker_bytes = 0 # How many bytes the unpacker received
self.req_id = 0 # Last request id
self.handshake = {} # Handshake info got from peer
self.crypt = None # Connection encryption method
self.sock_wrapped = False # Socket wrapped to encryption
self.connected = False
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
self.closed = False
# Stats
self.start_time = time.time()
self.handshake_time = 0
self.last_recv_time = 0
self.last_message_time = 0
self.last_send_time = 0
self.last_sent_time = 0
self.incomplete_buff_recv = 0
self.bytes_recv = 0
self.bytes_sent = 0
self.last_ping_delay = None
self.last_req_time = 0
self.last_cmd_sent = None
self.last_cmd_recv = None
self.bad_actions = 0
self.sites = 0
self.cpu_time = 0.0
self.send_lock = RLock()
self.name = None
self.updateName()
self.waiting_requests = {} # Waiting sent requests
self.waiting_streams = {} # Waiting response file streams
def setIp(self, ip):
self.ip = ip
self.ip_type = helper.getIpType(ip)
self.updateName()
def createSocket(self):
if helper.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
# Create IPv6 connection as IPv4 when using proxy
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def updateName(self):
self.name = "Conn#%2s %-12s [%s]" % (self.id, self.ip, self.protocol)
def __str__(self):
return self.name
def __repr__(self):
return "<%s>" % self.__str__()
def log(self, text):
self.server.log.debug("%s > %s" % (self.name, text))
def getValidSites(self):
return [key for key, val in self.server.tor_manager.site_onions.items() if val == self.target_onion]
def badAction(self, weight=1):
self.bad_actions += weight
if self.bad_actions > 40:
self.close("Too many bad actions")
elif self.bad_actions > 20:
time.sleep(5)
def goodAction(self):
self.bad_actions = 0
# Open connection to peer and wait for handshake
def connect(self):
self.type = "out"
if self.ip_type == "onion":
if not self.server.tor_manager or not self.server.tor_manager.enabled:
raise Exception("Can't connect to onion addresses, no Tor controller present")
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
elif config.tor == "always" and helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
raise Exception("Can't connect to local IPs in Tor: always mode")
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
if config.trackers_proxy == "tor":
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
else:
import socks
self.sock = socks.socksocket()
proxy_ip, proxy_port = config.trackers_proxy.split(":")
self.sock.set_proxy(socks.PROXY_TYPE_SOCKS5, proxy_ip, int(proxy_port))
else:
self.sock = self.createSocket()
if "TCP_NODELAY" in dir(socket):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
timeout_before = self.sock.gettimeout()
self.sock.settimeout(30)
if self.ip_type == "ipv6" and not hasattr(self.sock, "proxy"):
sock_address = (self.ip, self.port, 1, 1)
else:
sock_address = (self.ip, self.port)
self.sock.connect(sock_address)
# Implicit SSL
should_encrypt = not self.ip_type == "onion" and self.ip not in self.server.broken_ssl_ips and self.ip not in config.ip_local
if self.cert_pin:
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", cert_pin=self.cert_pin)
self.sock.do_handshake()
self.crypt = "tls-rsa"
self.sock_wrapped = True
elif should_encrypt and "tls-rsa" in CryptConnection.manager.crypt_supported:
try:
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa")
self.sock.do_handshake()
self.crypt = "tls-rsa"
self.sock_wrapped = True
except Exception as err:
if not config.force_encryption:
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
self.server.broken_ssl_ips[self.ip] = True
self.sock.close()
self.crypt = None
self.sock = self.createSocket()
self.sock.settimeout(30)
self.sock.connect(sock_address)
# Detect protocol
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
event_connected = self.event_connected
gevent.spawn(self.messageLoop)
connect_res = event_connected.get() # Wait for handshake
self.sock.settimeout(timeout_before)
return connect_res
# Handle incoming connection
def handleIncomingConnection(self, sock):
self.log("Incoming connection...")
if "TCP_NODELAY" in dir(socket):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.type = "in"
if self.ip not in config.ip_local: # Clearnet: Check implicit SSL
try:
first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
if first_byte == b"\x16":
self.log("Crypt in connection using implicit SSL")
self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", True)
self.sock_wrapped = True
self.crypt = "tls-rsa"
except Exception as err:
self.log("Socket peek error: %s" % Debug.formatException(err))
self.messageLoop()
def getMsgpackUnpacker(self):
if self.handshake and self.handshake.get("use_bin_type"):
return Msgpack.getUnpacker(fallback=True, decode=False)
else: # Backward compatibility for <0.7.0
return Msgpack.getUnpacker(fallback=True, decode=True)
# Message loop for connection
def messageLoop(self):
if not self.sock:
self.log("Socket error: No socket found")
return False
self.protocol = "v2"
self.updateName()
self.connected = True
buff_len = 0
req_len = 0
self.unpacker_bytes = 0
try:
while not self.closed:
buff = self.sock.recv(64 * 1024)
if not buff:
break # Connection closed
buff_len = len(buff)
# Statistics
self.last_recv_time = time.time()
self.incomplete_buff_recv += 1
self.bytes_recv += buff_len
self.server.bytes_recv += buff_len
req_len += buff_len
if not self.unpacker:
self.unpacker = self.getMsgpackUnpacker()
self.unpacker_bytes = 0
self.unpacker.feed(buff)
self.unpacker_bytes += buff_len
while True:
try:
message = next(self.unpacker)
except StopIteration:
break
if not type(message) is dict:
if config.debug_socket:
self.log("Invalid message type: %s, content: %r, buffer: %r" % (type(message), message, buff[0:16]))
raise Exception("Invalid message type: %s" % type(message))
# Stats
self.incomplete_buff_recv = 0
stat_key = message.get("cmd", "unknown")
if stat_key == "response" and "to" in message:
cmd_sent = self.waiting_requests.get(message["to"], {"cmd": "unknown"})["cmd"]
stat_key = "response: %s" % cmd_sent
if stat_key == "update":
stat_key = "update: %s" % message["params"]["site"]
self.server.stat_recv[stat_key]["bytes"] += req_len
self.server.stat_recv[stat_key]["num"] += 1
if "stream_bytes" in message:
self.server.stat_recv[stat_key]["bytes"] += message["stream_bytes"]
req_len = 0
# Handle message
if "stream_bytes" in message:
buff_left = self.handleStream(message, buff)
self.unpacker = self.getMsgpackUnpacker()
self.unpacker.feed(buff_left)
self.unpacker_bytes = len(buff_left)
if config.debug_socket:
self.log("Start new unpacker with buff_left: %r" % buff_left)
else:
self.handleMessage(message)
message = None
except Exception as err:
if not self.closed:
self.log("Socket error: %s" % Debug.formatException(err))
self.server.stat_recv["error: %s" % err]["bytes"] += req_len
self.server.stat_recv["error: %s" % err]["num"] += 1
self.close("MessageLoop ended (closed: %s)" % self.closed) # MessageLoop ended, close connection
def getUnpackerUnprocessedBytesNum(self):
if "tell" in dir(self.unpacker):
bytes_num = self.unpacker_bytes - self.unpacker.tell()
else:
bytes_num = self.unpacker._fb_buf_n - self.unpacker._fb_buf_o
return bytes_num
# Stream socket directly to a file
def handleStream(self, message, buff):
stream_bytes_left = message["stream_bytes"]
file = self.waiting_streams[message["to"]]
unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
if unprocessed_bytes_num: # Found stream bytes in unpacker
unpacker_stream_bytes = min(unprocessed_bytes_num, stream_bytes_left)
buff_stream_start = len(buff) - unprocessed_bytes_num
file.write(buff[buff_stream_start:buff_stream_start + unpacker_stream_bytes])
stream_bytes_left -= unpacker_stream_bytes
else:
unpacker_stream_bytes = 0
if config.debug_socket:
self.log(
"Starting stream %s: %s bytes (%s from unpacker, buff size: %s, unprocessed: %s)" %
(message["to"], message["stream_bytes"], unpacker_stream_bytes, len(buff), unprocessed_bytes_num)
)
try:
while 1:
if stream_bytes_left <= 0:
break
stream_buff = self.sock.recv(min(64 * 1024, stream_bytes_left))
if not stream_buff:
break
buff_len = len(stream_buff)
stream_bytes_left -= buff_len
file.write(stream_buff)
# Statistics
self.last_recv_time = time.time()
self.incomplete_buff_recv += 1
self.bytes_recv += buff_len
self.server.bytes_recv += buff_len
except Exception as err:
self.log("Stream read error: %s" % Debug.formatException(err))
if config.debug_socket:
self.log("End stream %s, file pos: %s" % (message["to"], file.tell()))
self.incomplete_buff_recv = 0
self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
del self.waiting_streams[message["to"]]
del self.waiting_requests[message["to"]]
if unpacker_stream_bytes:
return buff[buff_stream_start + unpacker_stream_bytes:]
else:
return b""
# My handshake info
def getHandshakeInfo(self):
# No TLS for onion connections
if self.ip_type == "onion":
crypt_supported = []
elif self.ip in self.server.broken_ssl_ips:
crypt_supported = []
else:
crypt_supported = CryptConnection.manager.crypt_supported
# No peer id for onion connections
if self.ip_type == "onion" or self.ip in config.ip_local:
peer_id = ""
else:
peer_id = self.server.peer_id
# Setup peer lock from requested onion address
if self.handshake and self.handshake.get("target_ip", "").endswith(".onion") and self.server.tor_manager.start_onions:
self.target_onion = self.handshake.get("target_ip").replace(".onion", "") # My onion address
if not self.server.tor_manager.site_onions.values():
self.server.log.warning("Unknown target onion address: %s" % self.target_onion)
handshake = {
"version": config.version,
"protocol": "v2",
"use_bin_type": True,
"peer_id": peer_id,
"fileserver_port": self.server.port,
"port_opened": self.server.port_opened.get(self.ip_type, None),
"target_ip": self.ip,
"rev": config.rev,
"crypt_supported": crypt_supported,
"crypt": self.crypt,
"time": int(time.time())
}
if self.target_onion:
handshake["onion"] = self.target_onion
elif self.ip_type == "onion":
handshake["onion"] = self.server.tor_manager.getOnion("global")
if self.is_tracker_connection:
handshake["tracker_connection"] = True
if config.debug_socket:
self.log("My Handshake: %s" % handshake)
return handshake
def setHandshake(self, handshake):
if config.debug_socket:
self.log("Remote Handshake: %s" % handshake)
if handshake.get("peer_id") == self.server.peer_id and not handshake.get("tracker_connection") and not self.is_tracker_connection:
self.close("Same peer id, can't connect to myself")
self.server.peer_blacklist.append((handshake["target_ip"], handshake["fileserver_port"]))
return False
self.handshake = handshake
if handshake.get("port_opened", None) is False and "onion" not in handshake and not self.is_private_ip: # Not connectable
self.port = 0
else:
self.port = int(handshake["fileserver_port"]) # Set peer fileserver port
if handshake.get("use_bin_type") and self.unpacker:
unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
self.log("Changing unpacker to bin type (unprocessed bytes: %s)" % unprocessed_bytes_num)
unprocessed_bytes = self.unpacker.read_bytes(unprocessed_bytes_num)
self.unpacker = self.getMsgpackUnpacker() # Create new unpacker for different msgpack type
self.unpacker_bytes = 0
if unprocessed_bytes:
self.unpacker.feed(unprocessed_bytes)
# Check if we can encrypt the connection
if handshake.get("crypt_supported") and self.ip not in self.server.broken_ssl_ips:
if type(handshake["crypt_supported"][0]) is bytes:
handshake["crypt_supported"] = [item.decode() for item in handshake["crypt_supported"]] # Backward compatibility
if self.ip_type == "onion" or self.ip in config.ip_local:
crypt = None
elif handshake.get("crypt"): # Recommended crypt by server
crypt = handshake["crypt"]
else: # Select the best supported on both sides
crypt = CryptConnection.manager.selectCrypt(handshake["crypt_supported"])
if crypt:
self.crypt = crypt
if self.type == "in" and handshake.get("onion") and not self.ip_type == "onion": # Set incoming connection's onion address
if self.server.ips.get(self.ip) == self:
del self.server.ips[self.ip]
self.setIp(handshake["onion"] + ".onion")
self.log("Changing ip to %s" % self.ip)
self.server.ips[self.ip] = self
self.updateName()
self.event_connected.set(True) # Mark handshake as done
self.event_connected = None
self.handshake_time = time.time()
# Handle incoming message
def handleMessage(self, message):
cmd = message["cmd"]
self.last_message_time = time.time()
self.last_cmd_recv = cmd
if cmd == "response": # New style response
if message["to"] in self.waiting_requests:
if self.last_send_time and len(self.waiting_requests) == 1:
ping = time.time() - self.last_send_time
self.last_ping_delay = ping
self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
del self.waiting_requests[message["to"]]
elif message["to"] == 0: # Other peers handshake
ping = time.time() - self.start_time
if config.debug_socket:
self.log("Handshake response: %s, ping: %s" % (message, ping))
self.last_ping_delay = ping
# Server switched to crypt, lets do it also if not crypted already
if message.get("crypt") and not self.sock_wrapped:
self.crypt = message["crypt"]
server = (self.type == "in")
self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (self.crypt, server, ping))
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
self.sock.do_handshake()
self.sock_wrapped = True
if not self.sock_wrapped and self.cert_pin:
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
return
self.setHandshake(message)
else:
self.log("Unknown response: %s" % message)
elif cmd:
self.server.num_recv += 1
if cmd == "handshake":
self.handleHandshake(message)
else:
self.server.handleRequest(self, message)
# Incoming handshake set request
def handleHandshake(self, message):
self.setHandshake(message["params"])
data = self.getHandshakeInfo()
data["cmd"] = "response"
data["to"] = message["req_id"]
self.send(data) # Send response to handshake
# Sent crypt request to client
if self.crypt and not self.sock_wrapped:
server = (self.type == "in")
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
try:
self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
self.sock_wrapped = True
except Exception as err:
if not config.force_encryption:
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
self.server.broken_ssl_ips[self.ip] = True
self.close("Broken ssl")
if not self.sock_wrapped and self.cert_pin:
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
# Send data to connection
def send(self, message, streaming=False):
self.last_send_time = time.time()
if config.debug_socket:
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
message.get("cmd"), message.get("to"), streaming,
message.get("params", {}).get("site"), message.get("params", {}).get("inner_path"),
message.get("req_id"))
)
if not self.sock:
self.log("Send error: missing socket")
return False
if not self.connected and message.get("cmd") != "handshake":
self.log("Wait for handshake before send request")
self.event_connected.get()
try:
stat_key = message.get("cmd", "unknown")
if stat_key == "response":
stat_key = "response: %s" % self.last_cmd_recv
else:
self.server.num_sent += 1
self.server.stat_sent[stat_key]["num"] += 1
if streaming:
with self.send_lock:
bytes_sent = Msgpack.stream(message, self.sock.sendall)
self.bytes_sent += bytes_sent
self.server.bytes_sent += bytes_sent
self.server.stat_sent[stat_key]["bytes"] += bytes_sent
message = None
else:
data = Msgpack.pack(message)
self.bytes_sent += len(data)
self.server.bytes_sent += len(data)
self.server.stat_sent[stat_key]["bytes"] += len(data)
message = None
with self.send_lock:
self.sock.sendall(data)
except Exception as err:
self.close("Send error: %s (cmd: %s)" % (err, stat_key))
return False
self.last_sent_time = time.time()
return True
# Stream file to connection without msgpacking
def sendRawfile(self, file, read_bytes):
buff = 64 * 1024
bytes_left = read_bytes
bytes_sent = 0
while True:
self.last_send_time = time.time()
data = file.read(min(bytes_left, buff))
bytes_sent += len(data)
with self.send_lock:
self.sock.sendall(data)
bytes_left -= buff
if bytes_left <= 0:
break
self.bytes_sent += bytes_sent
self.server.bytes_sent += bytes_sent
self.server.stat_sent["raw_file"]["num"] += 1
self.server.stat_sent["raw_file"]["bytes"] += bytes_sent
return True
# Create and send a request to peer
def request(self, cmd, params={}, stream_to=None):
# Last command sent more than 10 sec ago, timeout
if self.waiting_requests and self.protocol == "v2" and time.time() - max(self.last_req_time, self.last_recv_time) > 10:
self.close("Request %s timeout: %.3fs" % (self.last_cmd_sent, time.time() - self.last_send_time))
return False
self.last_req_time = time.time()
self.last_cmd_sent = cmd
self.req_id += 1
data = {"cmd": cmd, "req_id": self.req_id, "params": params}
event = gevent.event.AsyncResult() # Create new event for response
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
if stream_to:
self.waiting_streams[self.req_id] = stream_to
self.send(data) # Send request
res = event.get() # Wait until event solves
return res
def ping(self):
s = time.time()
response = None
with gevent.Timeout(10.0, False):
try:
response = self.request("ping")
except Exception as err:
self.log("Ping error: %s" % Debug.formatException(err))
if response and "body" in response and response["body"] == b"Pong!":
self.last_ping_delay = time.time() - s
return True
else:
return False
# Close connection
def close(self, reason="Unknown"):
if self.closed:
return False # Already closed
self.closed = True
self.connected = False
if self.event_connected:
self.event_connected.set(False)
self.log(
"Closing connection: %s, waiting_requests: %s, sites: %s, buff: %s..." %
(reason, len(self.waiting_requests), self.sites, self.incomplete_buff_recv)
)
for request in self.waiting_requests.values(): # Mark pending requests failed
request["evt"].set(False)
self.waiting_requests = {}
self.waiting_streams = {}
self.sites = 0
self.server.removeConnection(self) # Remove connection from server registry
try:
if self.sock:
self.sock.shutdown(gevent.socket.SHUT_WR)
self.sock.close()
except Exception as err:
if config.debug_socket:
self.log("Close error: %s" % err)
# Little cleanup
self.sock = None
self.unpacker = None
self.event_connected = None

View file

@ -1,386 +0,0 @@
import logging
import time
import sys
import socket
from collections import defaultdict
import gevent
import msgpack
from gevent.server import StreamServer
from gevent.pool import Pool
import util
from util import helper
from Debug import Debug
from .Connection import Connection
from Config import config
from Crypt import CryptConnection
from Crypt import CryptHash
from Tor import TorManager
from Site import SiteManager
class ConnectionServer(object):
def __init__(self, ip=None, port=None, request_handler=None):
if not ip:
if config.fileserver_ip_type == "ipv6":
ip = "::1"
else:
ip = "127.0.0.1"
port = 15441
self.ip = ip
self.port = port
self.last_connection_id = 0 # Connection id incrementer
self.last_connection_id_current_version = 0 # Connection id incrementer for current client version
self.last_connection_id_supported_version = 0 # Connection id incrementer for last supported version
self.log = logging.getLogger("ConnServer")
self.port_opened = {}
self.peer_blacklist = SiteManager.peer_blacklist
self.tor_manager = TorManager(self.ip, self.port)
self.connections = [] # Connections
self.whitelist = config.ip_local # No flood protection on this ips
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
self.broken_ssl_ips = {} # Peerids of broken ssl connections
self.ips = {} # Connection by ip
self.has_internet = True # Internet outage detection
self.stream_server = None
self.stream_server_proxy = None
self.running = False
self.stopping = False
self.thread_checker = None
self.stat_recv = defaultdict(lambda: defaultdict(int))
self.stat_sent = defaultdict(lambda: defaultdict(int))
self.bytes_recv = 0
self.bytes_sent = 0
self.num_recv = 0
self.num_sent = 0
self.num_incoming = 0
self.num_outgoing = 0
self.had_external_incoming = False
self.timecorrection = 0.0
self.pool = Pool(500) # do not accept more than 500 connections
# Bittorrent style peerid
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
# Check msgpack version
if msgpack.version[0] == 0 and msgpack.version[1] < 4:
self.log.error(
"Error: Unsupported msgpack version: %s (<0.4.0), please run `sudo apt-get install python-pip; sudo pip install msgpack --upgrade`" %
str(msgpack.version)
)
sys.exit(0)
if request_handler:
self.handleRequest = request_handler
def start(self, check_connections=True):
if self.stopping:
return False
self.running = True
if check_connections:
self.thread_checker = gevent.spawn(self.checkConnections)
CryptConnection.manager.loadCerts()
if config.tor != "disable":
self.tor_manager.start()
if not self.port:
self.log.info("No port found, not binding")
return False
self.log.debug("Binding to: %s:%s, (msgpack: %s), supported crypt: %s" % (
self.ip, self.port, ".".join(map(str, msgpack.version)),
CryptConnection.manager.crypt_supported
))
try:
self.stream_server = StreamServer(
(self.ip, self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
)
except Exception as err:
self.log.info("StreamServer create error: %s" % Debug.formatException(err))
def listen(self):
if not self.running:
return None
if self.stream_server_proxy:
gevent.spawn(self.listenProxy)
try:
self.stream_server.serve_forever()
except Exception as err:
self.log.info("StreamServer listen error: %s" % err)
return False
self.log.debug("Stopped.")
def stop(self):
self.log.debug("Stopping %s" % self.stream_server)
self.stopping = True
self.running = False
if self.thread_checker:
gevent.kill(self.thread_checker)
if self.stream_server:
self.stream_server.stop()
def closeConnections(self):
self.log.debug("Closing all connection: %s" % len(self.connections))
for connection in self.connections[:]:
connection.close("Close all connections")
def handleIncomingConnection(self, sock, addr):
if config.offline:
sock.close()
return False
ip, port = addr[0:2]
ip = ip.lower()
if ip.startswith("::ffff:"): # IPv6 to IPv4 mapping
ip = ip.replace("::ffff:", "", 1)
self.num_incoming += 1
if not self.had_external_incoming and not helper.isPrivateIp(ip):
self.had_external_incoming = True
# Connection flood protection
if ip in self.ip_incoming and ip not in self.whitelist:
self.ip_incoming[ip] += 1
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
self.log.debug("Connection flood detected from %s" % ip)
time.sleep(30)
sock.close()
return False
else:
self.ip_incoming[ip] = 1
connection = Connection(self, ip, port, sock)
self.connections.append(connection)
rev = connection.handshake.get("rev", 0)
if rev >= 4560:
self.last_connection_id_supported_version += 1
if rev == config.rev:
self.last_connection_id_current_version += 1
if ip not in config.ip_local:
self.ips[ip] = connection
connection.handleIncomingConnection(sock)
def handleMessage(self, *args, **kwargs):
pass
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
ip_type = helper.getIpType(ip)
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
if has_per_site_onion: # Site-unique connection for Tor
if ip.endswith(".onion"):
site_onion = self.tor_manager.getOnion(site.address)
else:
site_onion = self.tor_manager.getOnion("global")
key = ip + site_onion
else:
key = ip
# Find connection by ip
if key in self.ips:
connection = self.ips[key]
if not peer_id or connection.handshake.get("peer_id") == peer_id: # Filter by peer_id
if not connection.connected and create:
succ = connection.event_connected.get() # Wait for connection
if not succ:
raise Exception("Connection event return error")
return connection
# Recover from connection pool
for connection in self.connections:
if connection.ip == ip:
if peer_id and connection.handshake.get("peer_id") != peer_id: # Does not match
continue
if ip.endswith(".onion") and self.tor_manager.start_onions and ip.replace(".onion", "") != connection.target_onion:
# For different site
continue
if not connection.connected and create:
succ = connection.event_connected.get() # Wait for connection
if not succ:
raise Exception("Connection event return error")
return connection
# No connection found
if create and not config.offline: # Allow to create new connection if not found
if port == 0:
raise Exception("This peer is not connectable")
if (ip, port) in self.peer_blacklist and not is_tracker_connection:
raise Exception("This peer is blacklisted")
try:
if has_per_site_onion: # Lock connection to site
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
else:
connection = Connection(self, ip, port, is_tracker_connection=is_tracker_connection)
self.num_outgoing += 1
self.ips[key] = connection
self.connections.append(connection)
connection.log("Connecting... (site: %s)" % site)
succ = connection.connect()
if not succ:
connection.close("Connection event return error")
raise Exception("Connection event return error")
else:
rev = connection.handshake.get("rev", 0)
if rev >= 4560:
self.last_connection_id_supported_version += 1
if rev == config.rev:
self.last_connection_id_current_version += 1
except Exception as err:
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
raise err
if len(self.connections) > config.global_connected_limit:
gevent.spawn(self.checkMaxConnections)
return connection
else:
return None
def removeConnection(self, connection):
# Delete if same as in registry
if self.ips.get(connection.ip) == connection:
del self.ips[connection.ip]
# Site locked connection
if connection.target_onion:
if self.ips.get(connection.ip + connection.target_onion) == connection:
del self.ips[connection.ip + connection.target_onion]
# Cert pinned connection
if connection.cert_pin and self.ips.get(connection.ip + "#" + connection.cert_pin) == connection:
del self.ips[connection.ip + "#" + connection.cert_pin]
if connection in self.connections:
self.connections.remove(connection)
def checkConnections(self):
run_i = 0
time.sleep(15)
while self.running:
run_i += 1
self.ip_incoming = {} # Reset connected ips counter
last_message_time = 0
s = time.time()
for connection in self.connections[:]: # Make a copy
if connection.ip.endswith(".onion") or config.tor == "always":
timeout_multipler = 2
else:
timeout_multipler = 1
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
if connection.last_message_time > last_message_time and not connection.is_private_ip:
# Message from local IPs does not means internet connection
last_message_time = connection.last_message_time
if connection.unpacker and idle > 30:
# Delete the unpacker if not needed
del connection.unpacker
connection.unpacker = None
elif connection.last_cmd_sent == "announce" and idle > 20: # Bootstrapper connection close after 20 sec
connection.close("[Cleanup] Tracker connection, idle: %.3fs" % idle)
if idle > 60 * 60:
# Wake up after 1h
connection.close("[Cleanup] After wakeup, idle: %.3fs" % idle)
elif idle > 20 * 60 and connection.last_send_time < time.time() - 10:
# Idle more than 20 min and we have not sent request in last 10 sec
if not connection.ping():
connection.close("[Cleanup] Ping timeout")
elif idle > 10 * timeout_multipler and connection.incomplete_buff_recv > 0:
# Incomplete data with more than 10 sec idle
connection.close("[Cleanup] Connection buff stalled")
elif idle > 10 * timeout_multipler and connection.protocol == "?": # No connection after 10 sec
connection.close(
"[Cleanup] Connect timeout: %.3fs" % idle
)
elif idle > 10 * timeout_multipler and connection.waiting_requests and time.time() - connection.last_send_time > 10 * timeout_multipler:
# Sent command and no response in 10 sec
connection.close(
"[Cleanup] Command %s timeout: %.3fs" % (connection.last_cmd_sent, time.time() - connection.last_send_time)
)
elif idle < 60 and connection.bad_actions > 40:
connection.close(
"[Cleanup] Too many bad actions: %s" % connection.bad_actions
)
elif idle > 5 * 60 and connection.sites == 0:
connection.close(
"[Cleanup] No site for connection"
)
elif run_i % 90 == 0:
# Reset bad action counter every 30 min
connection.bad_actions = 0
# Internet outage detection
if time.time() - last_message_time > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
# Offline: Last message more than 60-600sec depending on connection number
if self.has_internet and last_message_time:
self.has_internet = False
self.onInternetOffline()
else:
# Online
if not self.has_internet:
self.has_internet = True
self.onInternetOnline()
self.timecorrection = self.getTimecorrection()
if time.time() - s > 0.01:
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
time.sleep(15)
self.log.debug("Checkconnections ended")
@util.Noparallel(blocking=False)
def checkMaxConnections(self):
if len(self.connections) < config.global_connected_limit:
return 0
s = time.time()
num_connected_before = len(self.connections)
self.connections.sort(key=lambda connection: connection.sites)
num_closed = 0
for connection in self.connections:
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
if idle > 60:
connection.close("Connection limit reached")
num_closed += 1
if num_closed > config.global_connected_limit * 0.1:
break
self.log.debug("Closed %s connections of %s after reached limit %s in %.3fs" % (
num_closed, num_connected_before, config.global_connected_limit, time.time() - s
))
return num_closed
def onInternetOnline(self):
self.log.info("Internet online")
def onInternetOffline(self):
self.had_external_incoming = False
self.log.info("Internet offline")
def getTimecorrection(self):
corrections = sorted([
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
for connection in self.connections
if connection.handshake.get("time") and connection.last_ping_delay
])
if len(corrections) < 9:
return 0.0
mid = int(len(corrections) / 2 - 1)
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
return median

View file

@ -1,2 +0,0 @@
from .ConnectionServer import ConnectionServer
from .Connection import Connection

View file

@ -1,162 +0,0 @@
import os
from Db.Db import Db, DbTableError
from Config import config
from Plugin import PluginManager
from Debug import Debug
@PluginManager.acceptPlugins
class ContentDb(Db):
def __init__(self, path):
Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, path)
self.foreign_keys = True
def init(self):
try:
self.schema = self.getSchema()
try:
self.checkTables()
except DbTableError:
pass
self.log.debug("Checking foreign keys...")
foreign_key_error = self.execute("PRAGMA foreign_key_check").fetchone()
if foreign_key_error:
raise Exception("Database foreign key error: %s" % foreign_key_error)
except Exception as err:
self.log.error("Error loading content.db: %s, rebuilding..." % Debug.formatException(err))
self.close()
os.unlink(self.db_path) # Remove and try again
Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, self.db_path)
self.foreign_keys = True
self.schema = self.getSchema()
try:
self.checkTables()
except DbTableError:
pass
self.site_ids = {}
self.sites = {}
def getSchema(self):
schema = {}
schema["db_name"] = "ContentDb"
schema["version"] = 3
schema["tables"] = {}
if not self.getTableVersion("site"):
self.log.debug("Migrating from table version-less content.db")
version = int(self.execute("PRAGMA user_version").fetchone()[0])
if version > 0:
self.checkTables()
self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.site.version", "value": 1})
self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.content.version", "value": 1})
schema["tables"]["site"] = {
"cols": [
["site_id", "INTEGER PRIMARY KEY ASC NOT NULL UNIQUE"],
["address", "TEXT NOT NULL"]
],
"indexes": [
"CREATE UNIQUE INDEX site_address ON site (address)"
],
"schema_changed": 1
}
schema["tables"]["content"] = {
"cols": [
["content_id", "INTEGER PRIMARY KEY UNIQUE NOT NULL"],
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
["inner_path", "TEXT"],
["size", "INTEGER"],
["size_files", "INTEGER"],
["size_files_optional", "INTEGER"],
["modified", "INTEGER"]
],
"indexes": [
"CREATE UNIQUE INDEX content_key ON content (site_id, inner_path)",
"CREATE INDEX content_modified ON content (site_id, modified)"
],
"schema_changed": 1
}
return schema
def initSite(self, site):
self.sites[site.address] = site
def needSite(self, site):
if site.address not in self.site_ids:
self.execute("INSERT OR IGNORE INTO site ?", {"address": site.address})
self.site_ids = {}
for row in self.execute("SELECT * FROM site"):
self.site_ids[row["address"]] = row["site_id"]
return self.site_ids[site.address]
def deleteSite(self, site):
site_id = self.site_ids.get(site.address, 0)
if site_id:
self.execute("DELETE FROM site WHERE site_id = :site_id", {"site_id": site_id})
del self.site_ids[site.address]
del self.sites[site.address]
def setContent(self, site, inner_path, content, size=0):
self.insertOrUpdate("content", {
"size": size,
"size_files": sum([val["size"] for key, val in content.get("files", {}).items()]),
"size_files_optional": sum([val["size"] for key, val in content.get("files_optional", {}).items()]),
"modified": int(content.get("modified", 0))
}, {
"site_id": self.site_ids.get(site.address, 0),
"inner_path": inner_path
})
def deleteContent(self, site, inner_path):
self.execute("DELETE FROM content WHERE ?", {"site_id": self.site_ids.get(site.address, 0), "inner_path": inner_path})
def loadDbDict(self, site):
res = self.execute(
"SELECT GROUP_CONCAT(inner_path, '|') AS inner_paths FROM content WHERE ?",
{"site_id": self.site_ids.get(site.address, 0)}
)
row = res.fetchone()
if row and row["inner_paths"]:
inner_paths = row["inner_paths"].split("|")
return dict.fromkeys(inner_paths, False)
else:
return {}
def getTotalSize(self, site, ignore=None):
params = {"site_id": self.site_ids.get(site.address, 0)}
if ignore:
params["not__inner_path"] = ignore
res = self.execute("SELECT SUM(size) + SUM(size_files) AS size, SUM(size_files_optional) AS size_optional FROM content WHERE ?", params)
row = dict(res.fetchone())
if not row["size"]:
row["size"] = 0
if not row["size_optional"]:
row["size_optional"] = 0
return row["size"], row["size_optional"]
def listModified(self, site, after=None, before=None):
params = {"site_id": self.site_ids.get(site.address, 0)}
if after:
params["modified>"] = after
if before:
params["modified<"] = before
res = self.execute("SELECT inner_path, modified FROM content WHERE ?", params)
return {row["inner_path"]: row["modified"] for row in res}
content_dbs = {}
def getContentDb(path=None):
if not path:
path = "%s/content.db" % config.data_dir
if path not in content_dbs:
content_dbs[path] = ContentDb(path)
content_dbs[path].init()
return content_dbs[path]
getContentDb() # Pre-connect to default one

View file

@ -1,155 +0,0 @@
import time
import os
from . import ContentDb
from Debug import Debug
from Config import config
class ContentDbDict(dict):
def __init__(self, site, *args, **kwargs):
s = time.time()
self.site = site
self.cached_keys = []
self.log = self.site.log
self.db = ContentDb.getContentDb()
self.db_id = self.db.needSite(site)
self.num_loaded = 0
super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database
self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids)))
def loadItem(self, key):
try:
self.num_loaded += 1
if self.num_loaded % 100 == 0:
if config.verbose:
self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack()))
else:
self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
content = self.site.storage.loadJson(key)
dict.__setitem__(self, key, content)
except IOError:
if dict.get(self, key):
self.__delitem__(key) # File not exists anymore
raise KeyError(key)
self.addCachedKey(key)
self.checkLimit()
return content
def getItemSize(self, key):
return self.site.storage.getSize(key)
# Only keep last 10 accessed json in memory
def checkLimit(self):
if len(self.cached_keys) > 10:
key_deleted = self.cached_keys.pop(0)
dict.__setitem__(self, key_deleted, False)
def addCachedKey(self, key):
if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char
self.cached_keys.append(key)
def __getitem__(self, key):
val = dict.get(self, key)
if val: # Already loaded
return val
elif val is None: # Unknown key
raise KeyError(key)
elif val is False: # Loaded before, but purged from cache
return self.loadItem(key)
def __setitem__(self, key, val):
self.addCachedKey(key)
self.checkLimit()
size = self.getItemSize(key)
self.db.setContent(self.site, key, val, size)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
self.db.deleteContent(self.site, key)
dict.__delitem__(self, key)
try:
self.cached_keys.remove(key)
except ValueError:
pass
def iteritems(self):
for key in dict.keys(self):
try:
val = self[key]
except Exception as err:
self.log.warning("Error loading %s: %s" % (key, err))
continue
yield key, val
def items(self):
back = []
for key in dict.keys(self):
try:
val = self[key]
except Exception as err:
self.log.warning("Error loading %s: %s" % (key, err))
continue
back.append((key, val))
return back
def values(self):
back = []
for key, val in dict.iteritems(self):
if not val:
try:
val = self.loadItem(key)
except Exception:
continue
back.append(val)
return back
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
except Exception as err:
self.site.bad_files[key] = self.site.bad_files.get(key, 1)
dict.__delitem__(self, key)
self.log.warning("Error loading %s: %s" % (key, err))
return default
def execute(self, query, params={}):
params["site_id"] = self.db_id
return self.db.execute(query, params)
if __name__ == "__main__":
import psutil
process = psutil.Process(os.getpid())
s_mem = process.memory_info()[0] / float(2 ** 20)
root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27"
contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root)
print("Init len", len(contents))
s = time.time()
for dir_name in os.listdir(root + "/data/users/")[0:8000]:
contents["data/users/%s/content.json" % dir_name]
print("Load: %.3fs" % (time.time() - s))
s = time.time()
found = 0
for key, val in contents.items():
found += 1
assert key
assert val
print("Found:", found)
print("Iteritem: %.3fs" % (time.time() - s))
s = time.time()
found = 0
for key in list(contents.keys()):
found += 1
assert key in contents
print("In: %.3fs" % (time.time() - s))
print("Len:", len(list(contents.values())), len(list(contents.keys())))
print("Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem)

File diff suppressed because it is too large Load diff

View file

@ -1 +0,0 @@
from .ContentManager import ContentManager

View file

@ -1,4 +0,0 @@
from Config import config
from util import ThreadPool
thread_pool_crypt = ThreadPool.ThreadPool(config.threads_crypt)

View file

@ -1,101 +0,0 @@
import logging
import base64
import binascii
import time
import hashlib
from util.Electrum import dbl_format
from Config import config
import util.OpensslFindPatch
lib_verify_best = "sslcrypto"
from lib import sslcrypto
sslcurve_native = sslcrypto.ecc.get_curve("secp256k1")
sslcurve_fallback = sslcrypto.fallback.ecc.get_curve("secp256k1")
sslcurve = sslcurve_native
def loadLib(lib_name, silent=False):
global sslcurve, libsecp256k1message, lib_verify_best
if lib_name == "libsecp256k1":
s = time.time()
from lib import libsecp256k1message
import coincurve
lib_verify_best = "libsecp256k1"
if not silent:
logging.info(
"Libsecpk256k1 loaded: %s in %.3fs" %
(type(coincurve._libsecp256k1.lib).__name__, time.time() - s)
)
elif lib_name == "sslcrypto":
sslcurve = sslcurve_native
if sslcurve_native == sslcurve_fallback:
logging.warning("SSLCurve fallback loaded instead of native")
elif lib_name == "sslcrypto_fallback":
sslcurve = sslcurve_fallback
try:
if not config.use_libsecp256k1:
raise Exception("Disabled by config")
loadLib("libsecp256k1")
lib_verify_best = "libsecp256k1"
except Exception as err:
logging.info("Libsecp256k1 load failed: %s" % err)
def newPrivatekey(): # Return new private key
return sslcurve.private_to_wif(sslcurve.new_private_key()).decode()
def newSeed():
return binascii.hexlify(sslcurve.new_private_key()).decode()
def hdPrivatekey(seed, child):
# Too large child id could cause problems
privatekey_bin = sslcurve.derive_child(seed.encode(), child % 100000000)
return sslcurve.private_to_wif(privatekey_bin).decode()
def privatekeyToAddress(privatekey): # Return address from private key
try:
if len(privatekey) == 64:
privatekey_bin = bytes.fromhex(privatekey)
else:
privatekey_bin = sslcurve.wif_to_private(privatekey.encode())
return sslcurve.private_to_address(privatekey_bin).decode()
except Exception: # Invalid privatekey
return False
def sign(data, privatekey): # Return sign to data using private key
if privatekey.startswith("23") and len(privatekey) > 52:
return None # Old style private key not supported
return base64.b64encode(sslcurve.sign(
data.encode(),
sslcurve.wif_to_private(privatekey.encode()),
recoverable=True,
hash=dbl_format
)).decode()
def verify(data, valid_address, sign, lib_verify=None): # Verify data using address and sign
if not lib_verify:
lib_verify = lib_verify_best
if not sign:
return False
if lib_verify == "libsecp256k1":
sign_address = libsecp256k1message.recover_address(data.encode("utf8"), sign).decode("utf8")
elif lib_verify in ("sslcrypto", "sslcrypto_fallback"):
publickey = sslcurve.recover(base64.b64decode(sign), data.encode(), hash=dbl_format)
sign_address = sslcurve.public_to_address(publickey).decode()
else:
raise Exception("No library enabled for signature verification")
if type(valid_address) is list: # Any address in the list
return sign_address in valid_address
else: # One possible address
return sign_address == valid_address

View file

@ -1,56 +0,0 @@
import hashlib
import os
import base64
def sha512sum(file, blocksize=65536, format="hexdigest"):
if type(file) is str: # Filename specified
file = open(file, "rb")
hash = hashlib.sha512()
for block in iter(lambda: file.read(blocksize), b""):
hash.update(block)
# Truncate to 256bits is good enough
if format == "hexdigest":
return hash.hexdigest()[0:64]
else:
return hash.digest()[0:32]
def sha256sum(file, blocksize=65536):
if type(file) is str: # Filename specified
file = open(file, "rb")
hash = hashlib.sha256()
for block in iter(lambda: file.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def random(length=64, encoding="hex"):
if encoding == "base64": # Characters: A-Za-z0-9
hash = hashlib.sha512(os.urandom(256)).digest()
return base64.b64encode(hash).decode("ascii").replace("+", "").replace("/", "").replace("=", "")[0:length]
else: # Characters: a-f0-9 (faster)
return hashlib.sha512(os.urandom(256)).hexdigest()[0:length]
# Sha512 truncated to 256bits
class Sha512t:
def __init__(self, data):
if data:
self.sha512 = hashlib.sha512(data)
else:
self.sha512 = hashlib.sha512()
def hexdigest(self):
return self.sha512.hexdigest()[0:64]
def digest(self):
return self.sha512.digest()[0:32]
def update(self, data):
return self.sha512.update(data)
def sha512t(data=None):
return Sha512t(data)

View file

@ -1,85 +0,0 @@
import base64
import hashlib
def sign(data, privatekey):
import rsa
from rsa import pkcs1
from lib import Ed25519
## Onion Service V3
if len(privatekey) == 88:
prv_key = base64.b64decode(privatekey)
pub_key = Ed25519.publickey_unsafe(prv_key)
sign = Ed25519.signature_unsafe(data, prv_key, pub_key)
return sign
## Onion Service V2
if "BEGIN RSA PRIVATE KEY" not in privatekey:
privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
priv = rsa.PrivateKey.load_pkcs1(privatekey)
sign = rsa.pkcs1.sign(data, priv, 'SHA-256')
return sign
def verify(data, publickey, sign):
import rsa
from rsa import pkcs1
from lib import Ed25519
## Onion Service V3
if len(publickey) == 32:
try:
valid = Ed25519.checkvalid(sign, data, publickey)
valid = 'SHA-256'
except Exception as err:
print(err)
valid = False
return valid
## Onion Service V2
pub = rsa.PublicKey.load_pkcs1(publickey, format="DER")
try:
valid = rsa.pkcs1.verify(data, sign, pub)
except pkcs1.VerificationError:
valid = False
return valid
def privatekeyToPublickey(privatekey):
import rsa
from rsa import pkcs1
from lib import Ed25519
## Onion Service V3
if len(privatekey) == 88:
prv_key = base64.b64decode(privatekey)
pub_key = Ed25519.publickey_unsafe(prv_key)
return pub_key
## Onion Service V2
if "BEGIN RSA PRIVATE KEY" not in privatekey:
privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
priv = rsa.PrivateKey.load_pkcs1(privatekey)
pub = rsa.PublicKey(priv.n, priv.e)
return pub.save_pkcs1("DER")
def publickeyToOnion(publickey):
from lib import Ed25519
## Onion Service V3
if len(publickey) == 32:
addr = Ed25519.publickey_to_onionaddress(publickey)[:-6]
return addr
## Onion Service V2
return base64.b32encode(hashlib.sha1(publickey).digest()[:10]).lower().decode("ascii")

View file

View file

@ -1,519 +0,0 @@
import sqlite3
import json
import time
import logging
import re
import os
import atexit
import threading
import sys
import weakref
import errno
import gevent
from Debug import Debug
from .DbCursor import DbCursor
from util import SafeRe
from util import helper
from util import ThreadPool
from Config import config
thread_pool_db = ThreadPool.ThreadPool(config.threads_db)
next_db_id = 0
opened_dbs = []
# Close idle databases to save some memory
def dbCleanup():
while 1:
time.sleep(60 * 5)
for db in opened_dbs[:]:
idle = time.time() - db.last_query_time
if idle > 60 * 5 and db.close_idle:
db.close("Cleanup")
def dbCommitCheck():
while 1:
time.sleep(5)
for db in opened_dbs[:]:
if not db.need_commit:
continue
success = db.commit("Interval")
if success:
db.need_commit = False
time.sleep(0.1)
def dbCloseAll():
for db in opened_dbs[:]:
db.close("Close all")
gevent.spawn(dbCleanup)
gevent.spawn(dbCommitCheck)
atexit.register(dbCloseAll)
class DbTableError(Exception):
def __init__(self, message, table):
super().__init__(message)
self.table = table
class Db(object):
def __init__(self, schema, db_path, close_idle=False):
global next_db_id
self.db_path = db_path
self.db_dir = os.path.dirname(db_path) + "/"
self.schema = schema
self.schema["version"] = self.schema.get("version", 1)
self.conn = None
self.cur = None
self.cursors = weakref.WeakSet()
self.id = next_db_id
next_db_id += 1
self.progress_sleeping = False
self.commiting = False
self.log = logging.getLogger("Db#%s:%s" % (self.id, schema["db_name"]))
self.table_names = None
self.collect_stats = False
self.foreign_keys = False
self.need_commit = False
self.query_stats = {}
self.db_keyvalues = {}
self.delayed_queue = []
self.delayed_queue_thread = None
self.close_idle = close_idle
self.last_query_time = time.time()
self.last_sleep_time = time.time()
self.num_execute_since_sleep = 0
self.lock = ThreadPool.Lock()
self.connect_lock = ThreadPool.Lock()
def __repr__(self):
return "<Db#%s:%s close_idle:%s>" % (id(self), self.db_path, self.close_idle)
def connect(self):
self.connect_lock.acquire(True)
try:
if self.conn:
self.log.debug("Already connected, connection ignored")
return
if self not in opened_dbs:
opened_dbs.append(self)
s = time.time()
try: # Directory not exist yet
os.makedirs(self.db_dir)
self.log.debug("Created Db path: %s" % self.db_dir)
except OSError as err:
if err.errno != errno.EEXIST:
raise err
if not os.path.isfile(self.db_path):
self.log.debug("Db file not exist yet: %s" % self.db_path)
self.conn = sqlite3.connect(self.db_path, isolation_level="DEFERRED", check_same_thread=False)
self.conn.row_factory = sqlite3.Row
self.conn.set_progress_handler(self.progress, 5000000)
self.conn.execute('PRAGMA journal_mode=WAL')
if self.foreign_keys:
self.conn.execute("PRAGMA foreign_keys = ON")
self.cur = self.getCursor()
self.log.debug(
"Connected to %s in %.3fs (opened: %s, sqlite version: %s)..." %
(self.db_path, time.time() - s, len(opened_dbs), sqlite3.version)
)
self.log.debug("Connect by thread: %s" % threading.current_thread().ident)
self.log.debug("Connect called by %s" % Debug.formatStack())
finally:
self.connect_lock.release()
def getConn(self):
if not self.conn:
self.connect()
return self.conn
def progress(self, *args, **kwargs):
self.progress_sleeping = True
time.sleep(0.001)
self.progress_sleeping = False
# Execute query using dbcursor
def execute(self, query, params=None):
if not self.conn:
self.connect()
return self.cur.execute(query, params)
@thread_pool_db.wrap
def commit(self, reason="Unknown"):
if self.progress_sleeping:
self.log.debug("Commit ignored: Progress sleeping")
return False
if not self.conn:
self.log.debug("Commit ignored: No connection")
return False
if self.commiting:
self.log.debug("Commit ignored: Already commiting")
return False
try:
s = time.time()
self.commiting = True
self.conn.commit()
self.log.debug("Commited in %.3fs (reason: %s)" % (time.time() - s, reason))
return True
except Exception as err:
if "SQL statements in progress" in str(err):
self.log.warning("Commit delayed: %s (reason: %s)" % (Debug.formatException(err), reason))
else:
self.log.error("Commit error: %s (reason: %s)" % (Debug.formatException(err), reason))
return False
finally:
self.commiting = False
def insertOrUpdate(self, *args, **kwargs):
if not self.conn:
self.connect()
return self.cur.insertOrUpdate(*args, **kwargs)
def executeDelayed(self, *args, **kwargs):
if not self.delayed_queue_thread:
self.delayed_queue_thread = gevent.spawn_later(1, self.processDelayed)
self.delayed_queue.append(("execute", (args, kwargs)))
def insertOrUpdateDelayed(self, *args, **kwargs):
if not self.delayed_queue:
gevent.spawn_later(1, self.processDelayed)
self.delayed_queue.append(("insertOrUpdate", (args, kwargs)))
def processDelayed(self):
if not self.delayed_queue:
self.log.debug("processDelayed aborted")
return
if not self.conn:
self.connect()
s = time.time()
cur = self.getCursor()
for command, params in self.delayed_queue:
if command == "insertOrUpdate":
cur.insertOrUpdate(*params[0], **params[1])
else:
cur.execute(*params[0], **params[1])
if len(self.delayed_queue) > 10:
self.log.debug("Processed %s delayed queue in %.3fs" % (len(self.delayed_queue), time.time() - s))
self.delayed_queue = []
self.delayed_queue_thread = None
def close(self, reason="Unknown"):
if not self.conn:
return False
self.connect_lock.acquire()
s = time.time()
if self.delayed_queue:
self.processDelayed()
if self in opened_dbs:
opened_dbs.remove(self)
self.need_commit = False
self.commit("Closing: %s" % reason)
self.log.debug("Close called by %s" % Debug.formatStack())
for i in range(5):
if len(self.cursors) == 0:
break
self.log.debug("Pending cursors: %s" % len(self.cursors))
time.sleep(0.1 * i)
if len(self.cursors):
self.log.debug("Killing cursors: %s" % len(self.cursors))
self.conn.interrupt()
if self.cur:
self.cur.close()
if self.conn:
ThreadPool.main_loop.call(self.conn.close)
self.conn = None
self.cur = None
self.log.debug("%s closed (reason: %s) in %.3fs, opened: %s" % (self.db_path, reason, time.time() - s, len(opened_dbs)))
self.connect_lock.release()
return True
# Gets a cursor object to database
# Return: Cursor class
def getCursor(self):
if not self.conn:
self.connect()
cur = DbCursor(self)
return cur
def getSharedCursor(self):
if not self.conn:
self.connect()
return self.cur
# Get the table version
# Return: Table version or None if not exist
def getTableVersion(self, table_name):
if not self.db_keyvalues: # Get db keyvalues
try:
res = self.execute("SELECT * FROM keyvalue WHERE json_id=0") # json_id = 0 is internal keyvalues
except sqlite3.OperationalError as err: # Table not exist
self.log.debug("Query table version error: %s" % err)
return False
for row in res:
self.db_keyvalues[row["key"]] = row["value"]
return self.db_keyvalues.get("table.%s.version" % table_name, 0)
# Check Db tables
# Return: <list> Changed table names
def checkTables(self):
s = time.time()
changed_tables = []
cur = self.getSharedCursor()
# Check internal tables
# Check keyvalue table
changed = cur.needTable("keyvalue", [
["keyvalue_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
["key", "TEXT"],
["value", "INTEGER"],
["json_id", "INTEGER"],
], [
"CREATE UNIQUE INDEX key_id ON keyvalue(json_id, key)"
], version=self.schema["version"])
if changed:
changed_tables.append("keyvalue")
# Create json table if no custom one defined
if "json" not in self.schema.get("tables", {}):
if self.schema["version"] == 1:
changed = cur.needTable("json", [
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
["path", "VARCHAR(255)"]
], [
"CREATE UNIQUE INDEX path ON json(path)"
], version=self.schema["version"])
elif self.schema["version"] == 2:
changed = cur.needTable("json", [
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
["directory", "VARCHAR(255)"],
["file_name", "VARCHAR(255)"]
], [
"CREATE UNIQUE INDEX path ON json(directory, file_name)"
], version=self.schema["version"])
elif self.schema["version"] == 3:
changed = cur.needTable("json", [
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
["site", "VARCHAR(255)"],
["directory", "VARCHAR(255)"],
["file_name", "VARCHAR(255)"]
], [
"CREATE UNIQUE INDEX path ON json(directory, site, file_name)"
], version=self.schema["version"])
if changed:
changed_tables.append("json")
# Check schema tables
for table_name, table_settings in self.schema.get("tables", {}).items():
try:
indexes = table_settings.get("indexes", [])
version = table_settings.get("schema_changed", 0)
changed = cur.needTable(
table_name, table_settings["cols"],
indexes, version=version
)
if changed:
changed_tables.append(table_name)
except Exception as err:
self.log.error("Error creating table %s: %s" % (table_name, Debug.formatException(err)))
raise DbTableError(err, table_name)
self.log.debug("Db check done in %.3fs, changed tables: %s" % (time.time() - s, changed_tables))
if changed_tables:
self.db_keyvalues = {} # Refresh table version cache
return changed_tables
# Update json file to db
# Return: True if matched
def updateJson(self, file_path, file=None, cur=None):
if not file_path.startswith(self.db_dir):
return False # Not from the db dir: Skipping
relative_path = file_path[len(self.db_dir):] # File path realative to db file
# Check if filename matches any of mappings in schema
matched_maps = []
for match, map_settings in self.schema["maps"].items():
try:
if SafeRe.match(match, relative_path):
matched_maps.append(map_settings)
except SafeRe.UnsafePatternError as err:
self.log.error(err)
# No match found for the file
if not matched_maps:
return False
# Load the json file
try:
if file is None: # Open file is not file object passed
file = open(file_path, "rb")
if file is False: # File deleted
data = {}
else:
if file_path.endswith("json.gz"):
file = helper.limitedGzipFile(fileobj=file)
if sys.version_info.major == 3 and sys.version_info.minor < 6:
data = json.loads(file.read().decode("utf8"))
else:
data = json.load(file)
except Exception as err:
self.log.debug("Json file %s load error: %s" % (file_path, err))
data = {}
# No cursor specificed
if not cur:
cur = self.getSharedCursor()
cur.logging = False
# Row for current json file if required
if not data or [dbmap for dbmap in matched_maps if "to_keyvalue" in dbmap or "to_table" in dbmap]:
json_row = cur.getJsonRow(relative_path)
# Check matched mappings in schema
for dbmap in matched_maps:
# Insert non-relational key values
if dbmap.get("to_keyvalue"):
# Get current values
res = cur.execute("SELECT * FROM keyvalue WHERE json_id = ?", (json_row["json_id"],))
current_keyvalue = {}
current_keyvalue_id = {}
for row in res:
current_keyvalue[row["key"]] = row["value"]
current_keyvalue_id[row["key"]] = row["keyvalue_id"]
for key in dbmap["to_keyvalue"]:
if key not in current_keyvalue: # Keyvalue not exist yet in the db
cur.execute(
"INSERT INTO keyvalue ?",
{"key": key, "value": data.get(key), "json_id": json_row["json_id"]}
)
elif data.get(key) != current_keyvalue[key]: # Keyvalue different value
cur.execute(
"UPDATE keyvalue SET value = ? WHERE keyvalue_id = ?",
(data.get(key), current_keyvalue_id[key])
)
# Insert data to json table for easier joins
if dbmap.get("to_json_table"):
directory, file_name = re.match("^(.*?)/*([^/]*)$", relative_path).groups()
data_json_row = dict(cur.getJsonRow(directory + "/" + dbmap.get("file_name", file_name)))
changed = False
for key in dbmap["to_json_table"]:
if data.get(key) != data_json_row.get(key):
changed = True
if changed:
# Add the custom col values
data_json_row.update({key: val for key, val in data.items() if key in dbmap["to_json_table"]})
cur.execute("INSERT OR REPLACE INTO json ?", data_json_row)
# Insert data to tables
for table_settings in dbmap.get("to_table", []):
if isinstance(table_settings, dict): # Custom settings
table_name = table_settings["table"] # Table name to insert datas
node = table_settings.get("node", table_name) # Node keyname in data json file
key_col = table_settings.get("key_col") # Map dict key as this col
val_col = table_settings.get("val_col") # Map dict value as this col
import_cols = table_settings.get("import_cols")
replaces = table_settings.get("replaces")
else: # Simple settings
table_name = table_settings
node = table_settings
key_col = None
val_col = None
import_cols = None
replaces = None
# Fill import cols from table cols
if not import_cols:
import_cols = set([item[0] for item in self.schema["tables"][table_name]["cols"]])
cur.execute("DELETE FROM %s WHERE json_id = ?" % table_name, (json_row["json_id"],))
if node not in data:
continue
if key_col: # Map as dict
for key, val in data[node].items():
if val_col: # Single value
cur.execute(
"INSERT OR REPLACE INTO %s ?" % table_name,
{key_col: key, val_col: val, "json_id": json_row["json_id"]}
)
else: # Multi value
if type(val) is dict: # Single row
row = val
if import_cols:
row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
row[key_col] = key
# Replace in value if necessary
if replaces:
for replace_key, replace in replaces.items():
if replace_key in row:
for replace_from, replace_to in replace.items():
row[replace_key] = row[replace_key].replace(replace_from, replace_to)
row["json_id"] = json_row["json_id"]
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
elif type(val) is list: # Multi row
for row in val:
row[key_col] = key
row["json_id"] = json_row["json_id"]
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
else: # Map as list
for row in data[node]:
row["json_id"] = json_row["json_id"]
if import_cols:
row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
# Cleanup json row
if not data:
self.log.debug("Cleanup json row for %s" % file_path)
cur.execute("DELETE FROM json WHERE json_id = %s" % json_row["json_id"])
return True
if __name__ == "__main__":
s = time.time()
console_log = logging.StreamHandler()
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console_log)
console_log.setLevel(logging.DEBUG)
dbjson = Db(json.load(open("zerotalk.schema.json")), "data/users/zerotalk.db")
dbjson.collect_stats = True
dbjson.checkTables()
cur = dbjson.getCursor()
cur.logging = False
dbjson.updateJson("data/users/content.json", cur=cur)
for user_dir in os.listdir("data/users"):
if os.path.isdir("data/users/%s" % user_dir):
dbjson.updateJson("data/users/%s/data.json" % user_dir, cur=cur)
# print ".",
cur.logging = True
print("Done in %.3fs" % (time.time() - s))
for query, stats in sorted(dbjson.query_stats.items()):
print("-", query, stats)

View file

@ -1,246 +0,0 @@
import time
import re
from util import helper
# Special sqlite cursor
class DbCursor:
def __init__(self, db):
self.db = db
self.logging = False
def quoteValue(self, value):
if type(value) is int:
return str(value)
else:
return "'%s'" % value.replace("'", "''")
def parseQuery(self, query, params):
query_type = query.split(" ", 1)[0].upper()
if isinstance(params, dict) and "?" in query: # Make easier select and insert by allowing dict params
if query_type in ("SELECT", "DELETE", "UPDATE"):
# Convert param dict to SELECT * FROM table WHERE key = ? AND key2 = ? format
query_wheres = []
values = []
for key, value in params.items():
if type(value) is list:
if key.startswith("not__"):
field = key.replace("not__", "")
operator = "NOT IN"
else:
field = key
operator = "IN"
if len(value) > 100:
# Embed values in query to avoid "too many SQL variables" error
query_values = ",".join(map(helper.sqlquote, value))
else:
query_values = ",".join(["?"] * len(value))
values += value
query_wheres.append(
"%s %s (%s)" %
(field, operator, query_values)
)
else:
if key.startswith("not__"):
query_wheres.append(key.replace("not__", "") + " != ?")
elif key.endswith("__like"):
query_wheres.append(key.replace("__like", "") + " LIKE ?")
elif key.endswith(">"):
query_wheres.append(key.replace(">", "") + " > ?")
elif key.endswith("<"):
query_wheres.append(key.replace("<", "") + " < ?")
else:
query_wheres.append(key + " = ?")
values.append(value)
wheres = " AND ".join(query_wheres)
if wheres == "":
wheres = "1"
query = re.sub("(.*)[?]", "\\1 %s" % wheres, query) # Replace the last ?
params = values
else:
# Convert param dict to INSERT INTO table (key, key2) VALUES (?, ?) format
keys = ", ".join(params.keys())
values = ", ".join(['?' for key in params.keys()])
keysvalues = "(%s) VALUES (%s)" % (keys, values)
query = re.sub("(.*)[?]", "\\1%s" % keysvalues, query) # Replace the last ?
params = tuple(params.values())
elif isinstance(params, dict) and ":" in query:
new_params = dict()
values = []
for key, value in params.items():
if type(value) is list:
for idx, val in enumerate(value):
new_params[key + "__" + str(idx)] = val
new_names = [":" + key + "__" + str(idx) for idx in range(len(value))]
query = re.sub(r":" + re.escape(key) + r"([)\s]|$)", "(%s)%s" % (", ".join(new_names), r"\1"), query)
else:
new_params[key] = value
params = new_params
return query, params
def execute(self, query, params=None):
query = query.strip()
while self.db.progress_sleeping or self.db.commiting:
time.sleep(0.1)
self.db.last_query_time = time.time()
query, params = self.parseQuery(query, params)
cursor = self.db.getConn().cursor()
self.db.cursors.add(cursor)
if self.db.lock.locked():
self.db.log.debug("Locked for %.3fs" % (time.time() - self.db.lock.time_lock))
try:
s = time.time()
self.db.lock.acquire(True)
if query.upper().strip("; ") == "VACUUM":
self.db.commit("vacuum called")
if params:
res = cursor.execute(query, params)
else:
res = cursor.execute(query)
finally:
self.db.lock.release()
taken_query = time.time() - s
if self.logging or taken_query > 1:
if params: # Query has parameters
self.db.log.debug("Query: " + query + " " + str(params) + " (Done in %.4f)" % (time.time() - s))
else:
self.db.log.debug("Query: " + query + " (Done in %.4f)" % (time.time() - s))
# Log query stats
if self.db.collect_stats:
if query not in self.db.query_stats:
self.db.query_stats[query] = {"call": 0, "time": 0.0}
self.db.query_stats[query]["call"] += 1
self.db.query_stats[query]["time"] += time.time() - s
query_type = query.split(" ", 1)[0].upper()
is_update_query = query_type in ["UPDATE", "DELETE", "INSERT", "CREATE"]
if not self.db.need_commit and is_update_query:
self.db.need_commit = True
if is_update_query:
return cursor
else:
return res
def executemany(self, query, params):
while self.db.progress_sleeping or self.db.commiting:
time.sleep(0.1)
self.db.last_query_time = time.time()
s = time.time()
cursor = self.db.getConn().cursor()
self.db.cursors.add(cursor)
try:
self.db.lock.acquire(True)
cursor.executemany(query, params)
finally:
self.db.lock.release()
taken_query = time.time() - s
if self.logging or taken_query > 0.1:
self.db.log.debug("Execute many: %s (Done in %.4f)" % (query, taken_query))
self.db.need_commit = True
return cursor
# Creates on updates a database row without incrementing the rowid
def insertOrUpdate(self, table, query_sets, query_wheres, oninsert={}):
sql_sets = ["%s = :%s" % (key, key) for key in query_sets.keys()]
sql_wheres = ["%s = :%s" % (key, key) for key in query_wheres.keys()]
params = query_sets
params.update(query_wheres)
res = self.execute(
"UPDATE %s SET %s WHERE %s" % (table, ", ".join(sql_sets), " AND ".join(sql_wheres)),
params
)
if res.rowcount == 0:
params.update(oninsert) # Add insert-only fields
self.execute("INSERT INTO %s ?" % table, params)
# Create new table
# Return: True on success
def createTable(self, table, cols):
# TODO: Check current structure
self.execute("DROP TABLE IF EXISTS %s" % table)
col_definitions = []
for col_name, col_type in cols:
col_definitions.append("%s %s" % (col_name, col_type))
self.execute("CREATE TABLE %s (%s)" % (table, ",".join(col_definitions)))
return True
# Create indexes on table
# Return: True on success
def createIndexes(self, table, indexes):
for index in indexes:
if not index.strip().upper().startswith("CREATE"):
self.db.log.error("Index command should start with CREATE: %s" % index)
continue
self.execute(index)
# Create table if not exist
# Return: True if updated
def needTable(self, table, cols, indexes=None, version=1):
current_version = self.db.getTableVersion(table)
if int(current_version) < int(version): # Table need update or not extis
self.db.log.debug("Table %s outdated...version: %s need: %s, rebuilding..." % (table, current_version, version))
self.createTable(table, cols)
if indexes:
self.createIndexes(table, indexes)
self.execute(
"INSERT OR REPLACE INTO keyvalue ?",
{"json_id": 0, "key": "table.%s.version" % table, "value": version}
)
return True
else: # Not changed
return False
# Get or create a row for json file
# Return: The database row
def getJsonRow(self, file_path):
directory, file_name = re.match("^(.*?)/*([^/]*)$", file_path).groups()
if self.db.schema["version"] == 1:
# One path field
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
row = res.fetchone()
if not row: # No row yet, create it
self.execute("INSERT INTO json ?", {"path": file_path})
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
row = res.fetchone()
elif self.db.schema["version"] == 2:
# Separate directory, file_name (easier join)
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
row = res.fetchone()
if not row: # No row yet, create it
self.execute("INSERT INTO json ?", {"directory": directory, "file_name": file_name})
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
row = res.fetchone()
elif self.db.schema["version"] == 3:
# Separate site, directory, file_name (for merger sites)
site_address, directory = re.match("^([^/]*)/(.*)$", directory).groups()
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
row = res.fetchone()
if not row: # No row yet, create it
self.execute("INSERT INTO json ?", {"site": site_address, "directory": directory, "file_name": file_name})
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
row = res.fetchone()
else:
raise Exception("Dbschema version %s not supported" % self.db.schema.get("version"))
return row
def close(self):
pass

View file

@ -1,46 +0,0 @@
import re
# Parse and modify sql queries
class DbQuery:
def __init__(self, query):
self.setQuery(query.strip())
# Split main parts of query
def parseParts(self, query):
parts = re.split("(SELECT|FROM|WHERE|ORDER BY|LIMIT)", query)
parts = [_f for _f in parts if _f] # Remove empty parts
parts = [s.strip() for s in parts] # Remove whitespace
return dict(list(zip(parts[0::2], parts[1::2])))
# Parse selected fields SELECT ... FROM
def parseFields(self, query_select):
fields = re.findall("([^,]+) AS ([^,]+)", query_select)
return {key: val.strip() for val, key in fields}
# Parse query conditions WHERE ...
def parseWheres(self, query_where):
if " AND " in query_where:
return query_where.split(" AND ")
elif query_where:
return [query_where]
else:
return []
# Set the query
def setQuery(self, query):
self.parts = self.parseParts(query)
self.fields = self.parseFields(self.parts["SELECT"])
self.wheres = self.parseWheres(self.parts.get("WHERE", ""))
# Convert query back to string
def __str__(self):
query_parts = []
for part_name in ["SELECT", "FROM", "WHERE", "ORDER BY", "LIMIT"]:
if part_name == "WHERE" and self.wheres:
query_parts.append("WHERE")
query_parts.append(" AND ".join(self.wheres))
elif part_name in self.parts:
query_parts.append(part_name)
query_parts.append(self.parts[part_name])
return "\n".join(query_parts)

View file

View file

@ -1,186 +0,0 @@
import sys
import os
import re
from Config import config
# Non fatal exception
class Notify(Exception):
def __init__(self, message=None):
if message:
self.message = message
def __str__(self):
return self.message
# Gevent greenlet.kill accept Exception type
def createNotifyType(message):
return type("Notify", (Notify, ), {"message": message})
def formatExceptionMessage(err):
err_type = err.__class__.__name__
if err.args:
err_message = err.args[-1]
else:
err_message = err.__str__()
return "%s: %s" % (err_type, err_message)
python_lib_dirs = [path.replace("\\", "/") for path in sys.path if re.sub(r".*[\\/]", "", path) in ("site-packages", "dist-packages")]
python_lib_dirs.append(os.path.dirname(os.__file__).replace("\\", "/")) # TODO: check if returns the correct path for PyPy
root_dir = os.path.realpath(os.path.dirname(__file__) + "/../../")
root_dir = root_dir.replace("\\", "/")
def formatTraceback(items, limit=None, fold_builtin=True):
back = []
i = 0
prev_file_title = ""
is_prev_builtin = False
for path, line in items:
i += 1
is_last = i == len(items)
path = path.replace("\\", "/")
if path.startswith("src/gevent/"):
file_title = "<gevent>/" + path[len("src/gevent/"):]
is_builtin = True
is_skippable_builtin = False
elif path in ("<frozen importlib._bootstrap>", "<frozen importlib._bootstrap_external>"):
file_title = "(importlib)"
is_builtin = True
is_skippable_builtin = True
else:
is_skippable_builtin = False
for base in python_lib_dirs:
if path.startswith(base + "/"):
file_title = path[len(base + "/"):]
module_name, *tail = file_title.split("/")
if module_name.endswith(".py"):
module_name = module_name[:-3]
file_title = "/".join(["<%s>" % module_name] + tail)
is_builtin = True
break
else:
is_builtin = False
for base in (root_dir + "/src", root_dir + "/plugins", root_dir):
if path.startswith(base + "/"):
file_title = path[len(base + "/"):]
break
else:
# For unknown paths, do our best to hide absolute path
file_title = path
for needle in ("/zeronet/", "/core/"):
if needle in file_title.lower():
file_title = "?/" + file_title[file_title.lower().rindex(needle) + len(needle):]
# Path compression: A/AB/ABC/X/Y.py -> ABC/X/Y.py
# E.g.: in 'Db/DbCursor.py' the directory part is unnecessary
if not file_title.startswith("/"):
prev_part = ""
for i, part in enumerate(file_title.split("/") + [""]):
if not part.startswith(prev_part):
break
prev_part = part
file_title = "/".join(file_title.split("/")[i - 1:])
if is_skippable_builtin and fold_builtin:
pass
elif is_builtin and is_prev_builtin and not is_last and fold_builtin:
if back[-1] != "...":
back.append("...")
else:
if file_title == prev_file_title:
back.append("%s" % line)
else:
back.append("%s line %s" % (file_title, line))
prev_file_title = file_title
is_prev_builtin = is_builtin
if limit and i >= limit:
back.append("...")
break
return back
def formatException(err=None, format="text"):
import traceback
if type(err) == Notify:
return err
elif type(err) == tuple and err and err[0] is not None: # Passed trackeback info
exc_type, exc_obj, exc_tb = err
err = None
else: # No trackeback info passed, get latest
exc_type, exc_obj, exc_tb = sys.exc_info()
if not err:
if hasattr(err, "message"):
err = exc_obj.message
else:
err = exc_obj
tb = formatTraceback([[frame[0], frame[1]] for frame in traceback.extract_tb(exc_tb)])
if format == "html":
return "%s: %s<br><small class='multiline'>%s</small>" % (repr(err), err, " > ".join(tb))
else:
return "%s: %s in %s" % (exc_type.__name__, err, " > ".join(tb))
def formatStack(limit=None):
import inspect
tb = formatTraceback([[frame[1], frame[2]] for frame in inspect.stack()[1:]], limit=limit)
return " > ".join(tb)
# Test if gevent eventloop blocks
import logging
import gevent
import time
num_block = 0
def testBlock():
global num_block
logging.debug("Gevent block checker started")
last_time = time.time()
while 1:
time.sleep(1)
if time.time() - last_time > 1.1:
logging.debug("Gevent block detected: %.3fs" % (time.time() - last_time - 1))
num_block += 1
last_time = time.time()
gevent.spawn(testBlock)
if __name__ == "__main__":
try:
print(1 / 0)
except Exception as err:
print(type(err).__name__)
print("1/0 error: %s" % formatException(err))
def loadJson():
json.loads("Errr")
import json
try:
loadJson()
except Exception as err:
print(err)
print("Json load error: %s" % formatException(err))
try:
raise Notify("nothing...")
except Exception as err:
print("Notify: %s" % formatException(err))
loadJson()

View file

@ -1,115 +0,0 @@
import sys
import logging
import signal
import importlib
import gevent
import gevent.hub
from Config import config
from . import Debug
last_error = None
def shutdown(reason="Unknown"):
logging.info("Shutting down (reason: %s)..." % reason)
import main
if "file_server" in dir(main):
try:
gevent.spawn(main.file_server.stop)
if "ui_server" in dir(main):
gevent.spawn(main.ui_server.stop)
except Exception as err:
print("Proper shutdown error: %s" % err)
sys.exit(0)
else:
sys.exit(0)
# Store last error, ignore notify, allow manual error logging
def handleError(*args, **kwargs):
global last_error
if not args: # Manual called
args = sys.exc_info()
silent = True
else:
silent = False
if args[0].__name__ != "Notify":
last_error = args
if args[0].__name__ == "KeyboardInterrupt":
shutdown("Keyboard interrupt")
elif not silent and args[0].__name__ != "Notify":
logging.exception("Unhandled exception")
if "greenlet.py" not in args[2].tb_frame.f_code.co_filename: # Don't display error twice
sys.__excepthook__(*args, **kwargs)
# Ignore notify errors
def handleErrorNotify(*args, **kwargs):
err = args[0]
if err.__name__ == "KeyboardInterrupt":
shutdown("Keyboard interrupt")
elif err.__name__ != "Notify":
logging.error("Unhandled exception: %s" % Debug.formatException(args))
sys.__excepthook__(*args, **kwargs)
if config.debug: # Keep last error for /Debug
sys.excepthook = handleError
else:
sys.excepthook = handleErrorNotify
# Override default error handler to allow silent killing / custom logging
if "handle_error" in dir(gevent.hub.Hub):
gevent.hub.Hub._original_handle_error = gevent.hub.Hub.handle_error
else:
logging.debug("gevent.hub.Hub.handle_error not found using old gevent hooks")
OriginalGreenlet = gevent.Greenlet
class ErrorhookedGreenlet(OriginalGreenlet):
def _report_error(self, exc_info):
sys.excepthook(exc_info[0], exc_info[1], exc_info[2])
gevent.Greenlet = gevent.greenlet.Greenlet = ErrorhookedGreenlet
importlib.reload(gevent)
def handleGreenletError(context, type, value, tb):
if context.__class__ is tuple and context[0].__class__.__name__ == "ThreadPool":
# Exceptions in ThreadPool will be handled in the main Thread
return None
if isinstance(value, str):
# Cython can raise errors where the value is a plain string
# e.g., AttributeError, "_semaphore.Semaphore has no attr", <traceback>
value = type(value)
if not issubclass(type, gevent.get_hub().NOT_ERROR):
sys.excepthook(type, value, tb)
gevent.get_hub().handle_error = handleGreenletError
try:
signal.signal(signal.SIGTERM, lambda signum, stack_frame: shutdown("SIGTERM"))
except Exception as err:
logging.debug("Error setting up SIGTERM watcher: %s" % err)
if __name__ == "__main__":
import time
from gevent import monkey
monkey.patch_all(thread=False, ssl=False)
from . import Debug
def sleeper(num):
print("started", num)
time.sleep(3)
raise Exception("Error")
print("stopped", num)
thread1 = gevent.spawn(sleeper, 1)
thread2 = gevent.spawn(sleeper, 2)
time.sleep(1)
print("killing...")
thread1.kill(exception=Debug.Notify("Worker stopped"))
#thread2.throw(Debug.Notify("Throw"))
print("killed")
gevent.joinall([thread1,thread2])

View file

@ -1,24 +0,0 @@
import time
import logging
import gevent.lock
from Debug import Debug
class DebugLock:
def __init__(self, log_after=0.01, name="Lock"):
self.name = name
self.log_after = log_after
self.lock = gevent.lock.Semaphore(1)
self.release = self.lock.release
def acquire(self, *args, **kwargs):
s = time.time()
res = self.lock.acquire(*args, **kwargs)
time_taken = time.time() - s
if time_taken >= self.log_after:
logging.debug("%s: Waited %.3fs after called by %s" %
(self.name, time_taken, Debug.formatStack())
)
return res

View file

@ -1,135 +0,0 @@
import os
import subprocess
import re
import logging
import time
import functools
from Config import config
from util import helper
# Find files with extension in path
def findfiles(path, find_ext):
def sorter(f1, f2):
f1 = f1[0].replace(path, "")
f2 = f2[0].replace(path, "")
if f1 == "":
return 1
elif f2 == "":
return -1
else:
return helper.cmp(f1.lower(), f2.lower())
for root, dirs, files in sorted(os.walk(path, topdown=False), key=functools.cmp_to_key(sorter)):
for file in sorted(files):
file_path = root + "/" + file
file_ext = file.split(".")[-1]
if file_ext in find_ext and not file.startswith("all."):
yield file_path.replace("\\", "/")
# Try to find coffeescript compiler in path
def findCoffeescriptCompiler():
coffeescript_compiler = None
try:
import distutils.spawn
coffeescript_compiler = helper.shellquote(distutils.spawn.find_executable("coffee")) + " --no-header -p"
except:
pass
if coffeescript_compiler:
return coffeescript_compiler
else:
return False
# Generates: all.js: merge *.js, compile coffeescript, all.css: merge *.css, vendor prefix features
def merge(merged_path):
merged_path = merged_path.replace("\\", "/")
merge_dir = os.path.dirname(merged_path)
s = time.time()
ext = merged_path.split(".")[-1]
if ext == "js": # If merging .js find .coffee too
find_ext = ["js", "coffee"]
else:
find_ext = [ext]
# If exist check the other files modification date
if os.path.isfile(merged_path):
merged_mtime = os.path.getmtime(merged_path)
else:
merged_mtime = 0
changed = {}
for file_path in findfiles(merge_dir, find_ext):
if os.path.getmtime(file_path) > merged_mtime + 1:
changed[file_path] = True
if not changed:
return # Assets not changed, nothing to do
old_parts = {}
if os.path.isfile(merged_path): # Find old parts to avoid unncessary recompile
merged_old = open(merged_path, "rb").read()
for match in re.findall(rb"(/\* ---- (.*?) ---- \*/(.*?)(?=/\* ----|$))", merged_old, re.DOTALL):
old_parts[match[1].decode()] = match[2].strip(b"\n\r")
logging.debug("Merging %s (changed: %s, old parts: %s)" % (merged_path, changed, len(old_parts)))
# Merge files
parts = []
s_total = time.time()
for file_path in findfiles(merge_dir, find_ext):
file_relative_path = file_path.replace(merge_dir + "/", "")
parts.append(b"\n/* ---- %s ---- */\n\n" % file_relative_path.encode("utf8"))
if file_path.endswith(".coffee"): # Compile coffee script
if file_path in changed or file_relative_path not in old_parts: # Only recompile if changed or its not compiled before
if config.coffeescript_compiler is None:
config.coffeescript_compiler = findCoffeescriptCompiler()
if not config.coffeescript_compiler:
logging.error("No coffeescript compiler defined, skipping compiling %s" % merged_path)
return False # No coffeescript compiler, skip this file
# Replace / with os separators and escape it
file_path_escaped = helper.shellquote(file_path.replace("/", os.path.sep))
if "%s" in config.coffeescript_compiler: # Replace %s with coffeescript file
command = config.coffeescript_compiler.replace("%s", file_path_escaped)
else: # Put coffeescript file to end
command = config.coffeescript_compiler + " " + file_path_escaped
# Start compiling
s = time.time()
compiler = subprocess.Popen(command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out = compiler.stdout.read()
compiler.wait()
logging.debug("Running: %s (Done in %.2fs)" % (command, time.time() - s))
# Check errors
if out and out.startswith(b"("): # No error found
parts.append(out)
else: # Put error message in place of source code
error = out
logging.error("%s Compile error: %s" % (file_relative_path, error))
error_escaped = re.escape(error).replace(b"\n", b"\\n").replace(br"\\n", br"\n")
parts.append(
b"alert('%s compile error: %s');" %
(file_relative_path.encode(), error_escaped)
)
else: # Not changed use the old_part
parts.append(old_parts[file_relative_path])
else: # Add to parts
parts.append(open(file_path, "rb").read())
merged = b"\n".join(parts)
if ext == "css": # Vendor prefix css
from lib.cssvendor import cssvendor
merged = cssvendor.prefix(merged)
merged = merged.replace(b"\r", b"")
open(merged_path, "wb").write(merged)
logging.debug("Merged %s (%.2fs)" % (merged_path, time.time() - s_total))
if __name__ == "__main__":
logging.getLogger().setLevel(logging.DEBUG)
os.chdir("..")
config.coffeescript_compiler = r'type "%s" | tools\coffee-node\bin\node.exe tools\coffee-node\bin\coffee --no-header -s -p'
merge("data/12Hw8rTgzrNo4DSh2AkqwPRqDyTticwJyH/js/all.js")

View file

@ -1,69 +0,0 @@
import logging
import time
import os
from Config import config
if config.debug and config.action == "main":
try:
import watchdog
import watchdog.observers
import watchdog.events
logging.debug("Watchdog fs listener detected, source code autoreload enabled")
enabled = True
except Exception as err:
logging.debug("Watchdog fs listener could not be loaded: %s" % err)
enabled = False
else:
enabled = False
class DebugReloader:
def __init__(self, paths=None):
if not paths:
paths = ["src", "plugins", config.data_dir + "/__plugins__"]
self.log = logging.getLogger("DebugReloader")
self.last_chaged = 0
self.callbacks = []
if enabled:
self.observer = watchdog.observers.Observer()
event_handler = watchdog.events.FileSystemEventHandler()
event_handler.on_modified = event_handler.on_deleted = self.onChanged
event_handler.on_created = event_handler.on_moved = self.onChanged
for path in paths:
if not os.path.isdir(path):
continue
self.log.debug("Adding autoreload: %s" % path)
self.observer.schedule(event_handler, path, recursive=True)
self.observer.start()
def addCallback(self, f):
self.callbacks.append(f)
def onChanged(self, evt):
path = evt.src_path
ext = path.rsplit(".", 1)[-1]
if ext not in ["py", "json"] or "Test" in path or time.time() - self.last_chaged < 1.0:
return False
self.last_chaged = time.time()
if os.path.isfile(path):
time_modified = os.path.getmtime(path)
else:
time_modified = 0
self.log.debug("File changed: %s reloading source code (modified %.3fs ago)" % (evt, time.time() - time_modified))
if time.time() - time_modified > 5: # Probably it's just an attribute change, ignore it
return False
time.sleep(0.1) # Wait for lock release
for callback in self.callbacks:
try:
callback()
except Exception as err:
self.log.exception(err)
def stop(self):
if enabled:
self.observer.stop()
self.log.debug("Stopped autoreload observer")
watcher = DebugReloader()

View file

View file

@ -1,450 +0,0 @@
# Included modules
import os
import time
import json
import collections
import itertools
# Third party modules
import gevent
from Debug import Debug
from Config import config
from util import RateLimit
from util import Msgpack
from util import helper
from Plugin import PluginManager
from contextlib import closing
FILE_BUFF = 1024 * 512
class RequestError(Exception):
pass
# Incoming requests
@PluginManager.acceptPlugins
class FileRequest(object):
__slots__ = ("server", "connection", "req_id", "sites", "log", "responded")
def __init__(self, server, connection):
self.server = server
self.connection = connection
self.req_id = None
self.sites = self.server.sites
self.log = server.log
self.responded = False # Responded to the request
def send(self, msg, streaming=False):
if not self.connection.closed:
self.connection.send(msg, streaming)
def sendRawfile(self, file, read_bytes):
if not self.connection.closed:
self.connection.sendRawfile(file, read_bytes)
def response(self, msg, streaming=False):
if self.responded:
if config.verbose:
self.log.debug("Req id %s already responded" % self.req_id)
return
if not isinstance(msg, dict): # If msg not a dict create a {"body": msg}
msg = {"body": msg}
msg["cmd"] = "response"
msg["to"] = self.req_id
self.responded = True
self.send(msg, streaming=streaming)
# Route file requests
def route(self, cmd, req_id, params):
self.req_id = req_id
# Don't allow other sites than locked
if "site" in params and self.connection.target_onion:
valid_sites = self.connection.getValidSites()
if params["site"] not in valid_sites and valid_sites != ["global"]:
self.response({"error": "Invalid site"})
self.connection.log(
"Site lock violation: %s not in %s, target onion: %s" %
(params["site"], valid_sites, self.connection.target_onion)
)
self.connection.badAction(5)
return False
if cmd == "update":
event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"])
# If called more than once within 15 sec only keep the last update
RateLimit.callAsync(event, max(self.connection.bad_actions, 15), self.actionUpdate, params)
else:
func_name = "action" + cmd[0].upper() + cmd[1:]
func = getattr(self, func_name, None)
if cmd not in ["getFile", "streamFile"]: # Skip IO bound functions
if self.connection.cpu_time > 0.5:
self.log.debug(
"Delay %s %s, cpu_time used by connection: %.3fs" %
(self.connection.ip, cmd, self.connection.cpu_time)
)
time.sleep(self.connection.cpu_time)
if self.connection.cpu_time > 5:
self.connection.close("Cpu time: %.3fs" % self.connection.cpu_time)
s = time.time()
if func:
func(params)
else:
self.actionUnknown(cmd, params)
if cmd not in ["getFile", "streamFile"]:
taken = time.time() - s
taken_sent = self.connection.last_sent_time - self.connection.last_send_time
self.connection.cpu_time += taken - taken_sent
# Update a site file request
def actionUpdate(self, params):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(1)
self.connection.badAction(5)
return False
inner_path = params.get("inner_path", "")
if not inner_path.endswith("content.json"):
self.response({"error": "Only content.json update allowed"})
self.connection.badAction(5)
return
current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
should_validate_content = True
if "modified" in params and params["modified"] <= current_content_modified:
should_validate_content = False
valid = None # Same or earlier content as we have
body = params["body"]
if not body: # No body sent, we have to download it first
site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
try:
body = peer.getFile(site.address, inner_path).read()
except Exception as err:
site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
self.response({"error": "Invalid File update: Failed to download updated file content"})
self.connection.badAction(5)
return
if should_validate_content:
try:
if type(body) is str:
body = body.encode()
# elif type(body) is list:
# content = json.loads(bytes(list).decode())
content = json.loads(body.decode())
except Exception as err:
site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
self.response({"error": "File invalid JSON"})
self.connection.badAction(5)
return
file_uri = "%s/%s:%s" % (site.address, inner_path, content["modified"])
if self.server.files_parsing.get(file_uri): # Check if we already working on it
valid = None # Same file
else:
try:
valid = site.content_manager.verifyFile(inner_path, content)
except Exception as err:
site.log.debug("Update for %s is invalid: %s" % (inner_path, err))
error = err
valid = False
if valid is True: # Valid and changed
site.log.info("Update for %s looks valid, saving..." % inner_path)
self.server.files_parsing[file_uri] = True
site.storage.write(inner_path, body)
del params["body"]
site.onFileDone(inner_path) # Trigger filedone
# Download every changed file from peer
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
# On complete publish to other peers
diffs = params.get("diffs", {})
site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
# Load new content file and download changed files in new thread
def downloader():
site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
del self.server.files_parsing[file_uri]
gevent.spawn(downloader)
self.response({"ok": "Thanks, file %s updated!" % inner_path})
self.connection.goodAction()
elif valid is None: # Not changed
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update old") # Add or get peer
if peer:
if not peer.connection:
peer.connect(self.connection) # Assign current connection to peer
if inner_path in site.content_manager.contents:
peer.last_content_json_update = site.content_manager.contents[inner_path]["modified"]
if config.verbose:
site.log.debug(
"Same version, adding new peer for locked files: %s, tasks: %s" %
(peer.key, len(site.worker_manager.tasks))
)
for task in site.worker_manager.tasks: # New peer add to every ongoing task
if task["peers"] and not task["optional_hash_id"]:
# Download file from this peer too if its peer locked
site.needFile(task["inner_path"], peer=peer, update=True, blocking=False)
self.response({"ok": "File not changed"})
self.connection.badAction()
else: # Invalid sign or sha hash
self.response({"error": "File %s invalid: %s" % (inner_path, error)})
self.connection.badAction(5)
def isReadable(self, site, inner_path, file, pos):
return True
# Send file content request
def handleGetFile(self, params, streaming=False):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
try:
file_path = site.storage.getPath(params["inner_path"])
if streaming:
file_obj = site.storage.open(params["inner_path"])
else:
file_obj = Msgpack.FilePart(file_path, "rb")
with file_obj as file:
file.seek(params["location"])
read_bytes = params.get("read_bytes", FILE_BUFF)
file_size = os.fstat(file.fileno()).st_size
if file_size > read_bytes: # Check if file is readable at current position (for big files)
if not self.isReadable(site, params["inner_path"], file, params["location"]):
raise RequestError("File not readable at position: %s" % params["location"])
else:
if params.get("file_size") and params["file_size"] != file_size:
self.connection.badAction(2)
raise RequestError("File size does not match: %sB != %sB" % (params["file_size"], file_size))
if not streaming:
file.read_bytes = read_bytes
if params["location"] > file_size:
self.connection.badAction(5)
raise RequestError("Bad file location")
if streaming:
back = {
"size": file_size,
"location": min(file.tell() + read_bytes, file_size),
"stream_bytes": min(read_bytes, file_size - params["location"])
}
self.response(back)
self.sendRawfile(file, read_bytes=read_bytes)
else:
back = {
"body": file,
"size": file_size,
"location": min(file.tell() + file.read_bytes, file_size)
}
self.response(back, streaming=True)
bytes_sent = min(read_bytes, file_size - params["location"]) # Number of bytes we going to send
site.settings["bytes_sent"] = site.settings.get("bytes_sent", 0) + bytes_sent
if config.debug_socket:
self.log.debug("File %s at position %s sent %s bytes" % (file_path, params["location"], bytes_sent))
# Add peer to site if not added before
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
if connected_peer: # Just added
connected_peer.connect(self.connection) # Assign current connection to peer
return {"bytes_sent": bytes_sent, "file_size": file_size, "location": params["location"]}
except RequestError as err:
self.log.debug("GetFile %s %s %s request error: %s" % (self.connection, params["site"], params["inner_path"], Debug.formatException(err)))
self.response({"error": "File read error: %s" % err})
except OSError as err:
if config.verbose:
self.log.debug("GetFile read error: %s" % Debug.formatException(err))
self.response({"error": "File read error"})
return False
except Exception as err:
self.log.error("GetFile exception: %s" % Debug.formatException(err))
self.response({"error": "File read exception"})
return False
def actionGetFile(self, params):
return self.handleGetFile(params)
def actionStreamFile(self, params):
return self.handleGetFile(params, streaming=True)
# Peer exchange request
def actionPex(self, params):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
got_peer_keys = []
added = 0
# Add requester peer to site
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
if connected_peer: # It was not registered before
added += 1
connected_peer.connect(self.connection) # Assign current connection to peer
# Add sent peers to site
for packed_address in itertools.chain(params.get("peers", []), params.get("peers_ipv6", [])):
address = helper.unpackAddress(packed_address)
got_peer_keys.append("%s:%s" % address)
if site.addPeer(*address, source="pex"):
added += 1
# Add sent onion peers to site
for packed_address in params.get("peers_onion", []):
address = helper.unpackOnionAddress(packed_address)
got_peer_keys.append("%s:%s" % address)
if site.addPeer(*address, source="pex"):
added += 1
# Send back peers that is not in the sent list and connectable (not port 0)
packed_peers = helper.packPeers(site.getConnectablePeers(params["need"], ignore=got_peer_keys, allow_private=False))
if added:
site.worker_manager.onPeers()
if config.verbose:
self.log.debug(
"Added %s peers to %s using pex, sending back %s" %
(added, site, {key: len(val) for key, val in packed_peers.items()})
)
back = {
"peers": packed_peers["ipv4"],
"peers_ipv6": packed_peers["ipv6"],
"peers_onion": packed_peers["onion"]
}
self.response(back)
# Get modified content.json files since
def actionListModified(self, params):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
modified_files = site.content_manager.listModified(params["since"])
# Add peer to site if not added before
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
if connected_peer: # Just added
connected_peer.connect(self.connection) # Assign current connection to peer
self.response({"modified_files": modified_files})
def actionGetHashfield(self, params):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
# Add peer to site if not added before
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="request")
if not peer.connection: # Just added
peer.connect(self.connection) # Assign current connection to peer
peer.time_my_hashfield_sent = time.time() # Don't send again if not changed
self.response({"hashfield_raw": site.content_manager.hashfield.tobytes()})
def findHashIds(self, site, hash_ids, limit=100):
back = collections.defaultdict(lambda: collections.defaultdict(list))
found = site.worker_manager.findOptionalHashIds(hash_ids, limit=limit)
for hash_id, peers in found.items():
for peer in peers:
ip_type = helper.getIpType(peer.ip)
if len(back[ip_type][hash_id]) < 20:
back[ip_type][hash_id].append(peer.packMyAddress())
return back
def actionFindHashIds(self, params):
site = self.sites.get(params["site"])
s = time.time()
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
event_key = "%s_findHashIds_%s_%s" % (self.connection.ip, params["site"], len(params["hash_ids"]))
if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed(event_key, 60 * 5):
time.sleep(0.1)
back = self.findHashIds(site, params["hash_ids"], limit=10)
else:
back = self.findHashIds(site, params["hash_ids"])
RateLimit.called(event_key)
my_hashes = []
my_hashfield_set = set(site.content_manager.hashfield)
for hash_id in params["hash_ids"]:
if hash_id in my_hashfield_set:
my_hashes.append(hash_id)
if config.verbose:
self.log.debug(
"Found: %s for %s hashids in %.3fs" %
({key: len(val) for key, val in back.items()}, len(params["hash_ids"]), time.time() - s)
)
self.response({"peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"], "my": my_hashes})
def actionSetHashfield(self, params):
site = self.sites.get(params["site"])
if not site or not site.isServing(): # Site unknown or not serving
self.response({"error": "Unknown site"})
self.connection.badAction(5)
return False
# Add or get peer
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection, source="request")
if not peer.connection:
peer.connect(self.connection)
peer.hashfield.replaceFromBytes(params["hashfield_raw"])
self.response({"ok": "Updated"})
# Send a simple Pong! answer
def actionPing(self, params):
self.response(b"Pong!")
# Check requested port of the other peer
def actionCheckport(self, params):
if helper.getIpType(self.connection.ip) == "ipv6":
sock_address = (self.connection.ip, params["port"], 0, 0)
else:
sock_address = (self.connection.ip, params["port"])
with closing(helper.createSocket(self.connection.ip)) as sock:
sock.settimeout(5)
if sock.connect_ex(sock_address) == 0:
self.response({"status": "open", "ip_external": self.connection.ip})
else:
self.response({"status": "closed", "ip_external": self.connection.ip})
# Unknown command
def actionUnknown(self, cmd, params):
self.response({"error": "Unknown command: %s" % cmd})
self.connection.badAction(5)

View file

@ -1,409 +0,0 @@
import logging
import time
import random
import socket
import sys
import gevent
import gevent.pool
from gevent.server import StreamServer
import util
from util import helper
from Config import config
from .FileRequest import FileRequest
from Peer import PeerPortchecker
from Site import SiteManager
from Connection import ConnectionServer
from Plugin import PluginManager
from Debug import Debug
@PluginManager.acceptPlugins
class FileServer(ConnectionServer):
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
self.site_manager = SiteManager.site_manager
self.portchecker = PeerPortchecker.PeerPortchecker(self)
self.log = logging.getLogger("FileServer")
self.ip_type = ip_type
self.ip_external_list = []
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported():
self.supported_ip_types.append("ipv6")
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
ip = ip.replace("*", "::")
else:
ip = ip.replace("*", "0.0.0.0")
if config.tor == "always":
port = config.tor_hs_port
config.fileserver_port = port
elif port == 0: # Use random port
port_range_from, port_range_to = list(map(int, config.fileserver_port_range.split("-")))
port = self.getRandomPort(ip, port_range_from, port_range_to)
config.fileserver_port = port
if not port:
raise Exception("Can't find bindable port")
if not config.tor == "always":
config.saveValue("fileserver_port", port) # Save random port value for next restart
config.arguments.fileserver_port = port
ConnectionServer.__init__(self, ip, port, self.handleRequest)
self.log.debug("Supported IP types: %s" % self.supported_ip_types)
if ip_type == "dual" and ip == "::":
# Also bind to ipv4 addres in dual mode
try:
self.log.debug("Binding proxy to %s:%s" % ("::", self.port))
self.stream_server_proxy = StreamServer(
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
)
except Exception as err:
self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
self.port_opened = {}
self.sites = self.site_manager.sites
self.last_request = time.time()
self.files_parsing = {}
self.ui_server = None
def getRandomPort(self, ip, port_range_from, port_range_to):
"""Generates Random Port from given range
Args:
ip: IP Address
port_range_from: From Range
port_range_to: to Range
"""
self.log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
tried = []
for bind_retry in range(100):
port = random.randint(port_range_from, port_range_to)
if port in tried:
continue
tried.append(port)
sock = helper.createSocket(ip)
try:
sock.bind((ip, port))
success = True
except Exception as err:
self.log.warning("Error binding to port %s: %s" % (port, err))
success = False
sock.close()
if success:
self.log.info("Found unused random port: %s" % port)
return port
else:
time.sleep(0.1)
return False
def isIpv6Supported(self):
if config.tor == "always":
return True
# Test if we can connect to ipv6 address
ipv6_testip = "fcec:ae97:8902:d810:6c92:ec67:efb2:3ec5"
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect((ipv6_testip, 80))
local_ipv6 = sock.getsockname()[0]
if local_ipv6 == "::1":
self.log.debug("IPv6 not supported, no local IPv6 address")
return False
else:
self.log.debug("IPv6 supported on IP %s" % local_ipv6)
return True
except socket.error as err:
self.log.warning("IPv6 not supported: %s" % err)
return False
except Exception as err:
self.log.error("IPv6 check error: %s" % err)
return False
def listenProxy(self):
try:
self.stream_server_proxy.serve_forever()
except Exception as err:
if err.errno == 98: # Address already in use error
self.log.debug("StreamServer proxy listen error: %s" % err)
else:
self.log.info("StreamServer proxy listen error: %s" % err)
# Handle request to fileserver
def handleRequest(self, connection, message):
if config.verbose:
if "params" in message:
self.log.debug(
"FileRequest: %s %s %s %s" %
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
)
else:
self.log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
req = FileRequest(self, connection)
req.route(message["cmd"], message.get("req_id"), message.get("params"))
if not self.has_internet and not connection.is_private_ip:
self.has_internet = True
self.onInternetOnline()
def onInternetOnline(self):
self.log.info("Internet online")
gevent.spawn(self.checkSites, check_files=False, force_port_check=True)
# Reload the FileRequest class to prevent restarts in debug mode
def reload(self):
global FileRequest
import imp
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
def portCheck(self):
if config.offline:
self.log.info("Offline mode: port check disabled")
res = {"ipv4": None, "ipv6": None}
self.port_opened = res
return res
if config.ip_external:
for ip_external in config.ip_external:
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
ip_external_types = set([helper.getIpType(ip) for ip in config.ip_external])
res = {
"ipv4": "ipv4" in ip_external_types,
"ipv6": "ipv6" in ip_external_types
}
self.ip_external_list = config.ip_external
self.port_opened.update(res)
self.log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
return res
self.port_opened = {}
if self.ui_server:
self.ui_server.updateWebsocket()
if "ipv6" in self.supported_ip_types:
res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6")
else:
res_ipv6_thread = None
res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
if not res_ipv4["opened"] and config.tor != "always":
if self.portchecker.portOpen(self.port):
res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
if res_ipv6_thread is None:
res_ipv6 = {"ip": None, "opened": None}
else:
res_ipv6 = res_ipv6_thread.get()
if res_ipv6["opened"] and not helper.getIpType(res_ipv6["ip"]) == "ipv6":
self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
res_ipv6["opened"] = False
self.ip_external_list = []
for res_ip in [res_ipv4, res_ipv6]:
if res_ip["ip"] and res_ip["ip"] not in self.ip_external_list:
self.ip_external_list.append(res_ip["ip"])
SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
# Add external IPs from local interfaces
interface_ips = helper.getInterfaceIps("ipv4")
if "ipv6" in self.supported_ip_types:
interface_ips += helper.getInterfaceIps("ipv6")
for ip in interface_ips:
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
self.ip_external_list.append(ip)
res[helper.getIpType(ip)] = True # We have opened port if we have external ip
SiteManager.peer_blacklist.append((ip, self.port))
self.log.debug("External ip found on interfaces: %s" % ip)
self.port_opened.update(res)
if self.ui_server:
self.ui_server.updateWebsocket()
return res
# Check site file integrity
def checkSite(self, site, check_files=False):
if site.isServing():
site.announce(mode="startup") # Announce site to tracker
site.update(check_files=check_files) # Update site's content.json and download changed files
site.sendMyHashfield()
site.updateHashfield()
# Check sites integrity
@util.Noparallel()
def checkSites(self, check_files=False, force_port_check=False):
self.log.debug("Checking sites...")
s = time.time()
sites_checking = False
if not self.port_opened or force_port_check: # Test and open port if not tested yet
if len(self.sites) <= 2: # Don't wait port opening on first startup
sites_checking = True
for address, site in list(self.sites.items()):
gevent.spawn(self.checkSite, site, check_files)
self.portCheck()
if not self.port_opened["ipv4"]:
self.tor_manager.startOnions()
if not sites_checking:
check_pool = gevent.pool.Pool(5)
# Check sites integrity
for site in sorted(list(self.sites.values()), key=lambda site: site.settings.get("modified", 0), reverse=True):
if not site.isServing():
continue
check_thread = check_pool.spawn(self.checkSite, site, check_files) # Check in new thread
time.sleep(2)
if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
check_thread.join(timeout=5)
self.log.debug("Checksites done in %.3fs" % (time.time() - s))
def cleanupSites(self):
import gc
startup = True
time.sleep(5 * 60) # Sites already cleaned up on startup
peers_protected = set([])
while 1:
# Sites health care every 20 min
self.log.debug(
"Running site cleanup, connections: %s, internet: %s, protected peers: %s" %
(len(self.connections), self.has_internet, len(peers_protected))
)
for address, site in list(self.sites.items()):
if not site.isServing():
continue
if not startup:
site.cleanupPeers(peers_protected)
time.sleep(1) # Prevent too quick request
peers_protected = set([])
for address, site in list(self.sites.items()):
if not site.isServing():
continue
if site.peers:
with gevent.Timeout(10, exception=False):
site.announcer.announcePex()
# Last check modification failed
if site.content_updated is False:
site.update()
elif site.bad_files:
site.retryBadFiles()
if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
# Keep active connections if site has been modified witin 7 days
connected_num = site.needConnections(check_site_on_reconnect=True)
if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing
peers_protected.update([peer.key for peer in site.getConnectedPeers()])
time.sleep(1) # Prevent too quick request
site = None
gc.collect() # Implicit garbage collection
startup = False
time.sleep(60 * 20)
def announceSite(self, site):
site.announce(mode="update", pex=False)
active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
if site.settings["own"] or active_site:
# Check connections more frequently on own and active sites to speed-up first connections
site.needConnections(check_site_on_reconnect=True)
site.sendMyHashfield(3)
site.updateHashfield(3)
# Announce sites every 20 min
def announceSites(self):
time.sleep(5 * 60) # Sites already announced on startup
while 1:
config.loadTrackersFile()
s = time.time()
for address, site in list(self.sites.items()):
if not site.isServing():
continue
gevent.spawn(self.announceSite, site).join(timeout=10)
time.sleep(1)
taken = time.time() - s
# Query all trackers one-by-one in 20 minutes evenly distributed
sleep = max(0, 60 * 20 / len(config.trackers) - taken)
self.log.debug("Site announce tracker done in %.3fs, sleeping for %.3fs..." % (taken, sleep))
time.sleep(sleep)
# Detects if computer back from wakeup
def wakeupWatcher(self):
last_time = time.time()
last_my_ips = socket.gethostbyname_ex('')[2]
while 1:
time.sleep(30)
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
if is_time_changed:
# If taken more than 3 minute then the computer was in sleep mode
self.log.info(
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
(last_time, time.time(), time.time() - last_time)
)
my_ips = socket.gethostbyname_ex('')[2]
is_ip_changed = my_ips != last_my_ips
if is_ip_changed:
self.log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
if is_time_changed or is_ip_changed:
self.checkSites(check_files=False, force_port_check=True)
last_time = time.time()
last_my_ips = my_ips
# Bind and start serving sites
def start(self, check_sites=True):
if self.stopping:
return False
ConnectionServer.start(self)
try:
self.stream_server.start()
except Exception as err:
self.log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
self.sites = self.site_manager.list()
if config.debug:
# Auto reload FileRequest on change
from Debug import DebugReloader
DebugReloader.watcher.addCallback(self.reload)
if check_sites: # Open port, Update sites, Check files integrity
gevent.spawn(self.checkSites)
thread_announce_sites = gevent.spawn(self.announceSites)
thread_cleanup_sites = gevent.spawn(self.cleanupSites)
thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
ConnectionServer.listen(self)
self.log.debug("Stopped.")
def stop(self):
if self.running and self.portchecker.upnp_port_opened:
self.log.debug('Closing port %d' % self.port)
try:
self.portchecker.portClose(self.port)
self.log.info('Closed port via upnp.')
except Exception as err:
self.log.info("Failed at attempt to use upnp to close port: %s" % err)
return ConnectionServer.stop(self)

View file

@ -1,2 +0,0 @@
from .FileServer import FileServer
from .FileRequest import FileRequest

View file

@ -1,410 +0,0 @@
import logging
import time
import sys
import itertools
import collections
import gevent
import io
from Debug import Debug
from Config import config
from util import helper
from .PeerHashfield import PeerHashfield
from Plugin import PluginManager
if config.use_tempfiles:
import tempfile
# Communicate remote peers
@PluginManager.acceptPlugins
class Peer(object):
__slots__ = (
"ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield",
"time_added", "has_hashfield", "is_tracker_connection", "time_my_hashfield_sent", "last_ping", "reputation",
"last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time"
)
def __init__(self, ip, port, site=None, connection_server=None):
self.ip = ip
self.port = port
self.site = site
self.key = "%s:%s" % (ip, port)
self.connection = None
self.connection_server = connection_server
self.has_hashfield = False # Lazy hashfield object not created yet
self.time_hashfield = None # Last time peer's hashfiled downloaded
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
self.time_found = time.time() # Time of last found in the torrent tracker
self.time_response = None # Time of last successful response from peer
self.time_added = time.time()
self.last_ping = None # Last response time for ping
self.is_tracker_connection = False # Tracker connection instead of normal peer
self.reputation = 0 # More likely to connect if larger
self.last_content_json_update = 0.0 # Modify date of last received content.json
self.connection_error = 0 # Series of connection error
self.hash_failed = 0 # Number of bad files from peer
self.download_bytes = 0 # Bytes downloaded
self.download_time = 0 # Time spent to download
def __getattr__(self, key):
if key == "hashfield":
self.has_hashfield = True
self.hashfield = PeerHashfield()
return self.hashfield
else:
return getattr(self, key)
def log(self, text):
if not config.verbose:
return # Only log if we are in debug mode
if self.site:
self.site.log.debug("%s:%s %s" % (self.ip, self.port, text))
else:
logging.debug("%s:%s %s" % (self.ip, self.port, text))
# Connect to host
def connect(self, connection=None):
if self.reputation < -10:
self.reputation = -10
if self.reputation > 10:
self.reputation = 10
if self.connection:
self.log("Getting connection (Closing %s)..." % self.connection)
self.connection.close("Connection change")
else:
self.log("Getting connection (reputation: %s)..." % self.reputation)
if connection: # Connection specified
self.log("Assigning connection %s" % connection)
self.connection = connection
self.connection.sites += 1
else: # Try to find from connection pool or create new connection
self.connection = None
try:
if self.connection_server:
connection_server = self.connection_server
elif self.site:
connection_server = self.site.connection_server
else:
import main
connection_server = main.file_server
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
self.reputation += 1
self.connection.sites += 1
except Exception as err:
self.onConnectionError("Getting connection error")
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
(Debug.formatException(err), self.connection_error, self.hash_failed))
self.connection = None
return self.connection
# Check if we have connection to peer
def findConnection(self):
if self.connection and self.connection.connected: # We have connection to peer
return self.connection
else: # Try to find from other sites connections
self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site)
if self.connection:
self.connection.sites += 1
return self.connection
def __str__(self):
if self.site:
return "Peer:%-12s of %s" % (self.ip, self.site.address_short)
else:
return "Peer:%-12s" % self.ip
def __repr__(self):
return "<%s>" % self.__str__()
def packMyAddress(self):
if self.ip.endswith(".onion"):
return helper.packOnionAddress(self.ip, self.port)
else:
return helper.packAddress(self.ip, self.port)
# Found a peer from a source
def found(self, source="other"):
if self.reputation < 5:
if source == "tracker":
if self.ip.endswith(".onion"):
self.reputation += 1
else:
self.reputation += 2
elif source == "local":
self.reputation += 20
if source in ("tracker", "local"):
self.site.peers_recent.appendleft(self)
self.time_found = time.time()
# Send a command to peer and return response value
def request(self, cmd, params={}, stream_to=None):
if not self.connection or self.connection.closed:
self.connect()
if not self.connection:
self.onConnectionError("Reconnect error")
return None # Connection failed
self.log("Send request: %s %s %s %s" % (params.get("site", ""), cmd, params.get("inner_path", ""), params.get("location", "")))
for retry in range(1, 4): # Retry 3 times
try:
if not self.connection:
raise Exception("No connection found")
res = self.connection.request(cmd, params, stream_to)
if not res:
raise Exception("Send error")
if "error" in res:
self.log("%s error: %s" % (cmd, res["error"]))
self.onConnectionError("Response error")
break
else: # Successful request, reset connection error num
self.connection_error = 0
self.time_response = time.time()
if res:
return res
else:
raise Exception("Invalid response: %s" % res)
except Exception as err:
if type(err).__name__ == "Notify": # Greenlet killed by worker
self.log("Peer worker got killed: %s, aborting cmd: %s" % (err.message, cmd))
break
else:
self.onConnectionError("Request error")
self.log(
"%s (connection_error: %s, hash_failed: %s, retry: %s)" %
(Debug.formatException(err), self.connection_error, self.hash_failed, retry)
)
time.sleep(1 * retry)
self.connect()
return None # Failed after 4 retry
# Get a file content from peer
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
if file_size and file_size > 5 * 1024 * 1024:
max_read_size = 1024 * 1024
else:
max_read_size = 512 * 1024
if pos_to:
read_bytes = min(max_read_size, pos_to - pos_from)
else:
read_bytes = max_read_size
location = pos_from
if config.use_tempfiles:
buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b')
else:
buff = io.BytesIO()
s = time.time()
while True: # Read in smaller parts
if config.stream_downloads or read_bytes > 256 * 1024 or streaming:
res = self.request("streamFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size}, stream_to=buff)
if not res or "location" not in res: # Error
return False
else:
self.log("Send: %s" % inner_path)
res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size})
if not res or "location" not in res: # Error
return False
self.log("Recv: %s" % inner_path)
buff.write(res["body"])
res["body"] = None # Save memory
if res["location"] == res["size"] or res["location"] == pos_to: # End of file
break
else:
location = res["location"]
if pos_to:
read_bytes = min(max_read_size, pos_to - location)
if pos_to:
recv = pos_to - pos_from
else:
recv = res["location"]
self.download_bytes += recv
self.download_time += (time.time() - s)
if self.site:
self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + recv
self.log("Downloaded: %s, pos: %s, read_bytes: %s" % (inner_path, buff.tell(), read_bytes))
buff.seek(0)
return buff
# Send a ping request
def ping(self):
response_time = None
for retry in range(1, 3): # Retry 3 times
s = time.time()
with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception
res = self.request("ping")
if res and "body" in res and res["body"] == b"Pong!":
response_time = time.time() - s
break # All fine, exit from for loop
# Timeout reached or bad response
self.onConnectionError("Ping timeout")
self.connect()
time.sleep(1)
if response_time:
self.log("Ping: %.3f" % response_time)
else:
self.log("Ping failed")
self.last_ping = response_time
return response_time
# Request peer exchange from peer
def pex(self, site=None, need_num=5):
if not site:
site = self.site # If no site defined request peers for this site
# give back 5 connectible peers
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
if packed_peers["onion"]:
request["peers_onion"] = packed_peers["onion"]
if packed_peers["ipv6"]:
request["peers_ipv6"] = packed_peers["ipv6"]
res = self.request("pex", request)
if not res or "error" in res:
return False
added = 0
# Remove unsupported peer types
if "peers_ipv6" in res and self.connection and "ipv6" not in self.connection.server.supported_ip_types:
del res["peers_ipv6"]
if "peers_onion" in res and self.connection and "onion" not in self.connection.server.supported_ip_types:
del res["peers_onion"]
# Add IPv4 + IPv6
for peer in itertools.chain(res.get("peers", []), res.get("peers_ipv6", [])):
address = helper.unpackAddress(peer)
if site.addPeer(*address, source="pex"):
added += 1
# Add Onion
for peer in res.get("peers_onion", []):
address = helper.unpackOnionAddress(peer)
if site.addPeer(*address, source="pex"):
added += 1
if added:
self.log("Added peers using pex: %s" % added)
return added
# List modified files since the date
# Return: {inner_path: modification date,...}
def listModified(self, since):
return self.request("listModified", {"since": since, "site": self.site.address})
def updateHashfield(self, force=False):
# Don't update hashfield again in 5 min
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
return False
self.time_hashfield = time.time()
res = self.request("getHashfield", {"site": self.site.address})
if not res or "error" in res or "hashfield_raw" not in res:
return False
self.hashfield.replaceFromBytes(res["hashfield_raw"])
return self.hashfield
# Find peers for hashids
# Return: {hash1: ["ip:port", "ip:port",...],...}
def findHashIds(self, hash_ids):
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
if not res or "error" in res or type(res) is not dict:
return False
back = collections.defaultdict(list)
for ip_type in ["ipv4", "ipv6", "onion"]:
if ip_type == "ipv4":
key = "peers"
else:
key = "peers_%s" % ip_type
for hash, peers in list(res.get(key, {}).items())[0:30]:
if ip_type == "onion":
unpacker_func = helper.unpackOnionAddress
else:
unpacker_func = helper.unpackAddress
back[hash] += list(map(unpacker_func, peers))
for hash in res.get("my", []):
if self.connection:
back[hash].append((self.connection.ip, self.connection.port))
else:
back[hash].append((self.ip, self.port))
return back
# Send my hashfield to peer
# Return: True if sent
def sendMyHashfield(self):
if self.connection and self.connection.handshake.get("rev", 0) < 510:
return False # Not supported
if self.time_my_hashfield_sent and self.site.content_manager.hashfield.time_changed <= self.time_my_hashfield_sent:
return False # Peer already has the latest hashfield
res = self.request("setHashfield", {"site": self.site.address, "hashfield_raw": self.site.content_manager.hashfield.tobytes()})
if not res or "error" in res:
return False
else:
self.time_my_hashfield_sent = time.time()
return True
def publish(self, address, inner_path, body, modified, diffs=[]):
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
# To save bw we don't push big content.json to peers
body = b""
return self.request("update", {
"site": address,
"inner_path": inner_path,
"body": body,
"modified": modified,
"diffs": diffs
})
# Stop and remove from site
def remove(self, reason="Removing"):
self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed))
if self.site and self.key in self.site.peers:
del(self.site.peers[self.key])
if self.site and self in self.site.peers_recent:
self.site.peers_recent.remove(self)
if self.connection:
self.connection.close(reason)
# - EVENTS -
# On connection error
def onConnectionError(self, reason="Unknown"):
self.connection_error += 1
if self.site and len(self.site.peers) > 200:
limit = 3
else:
limit = 6
self.reputation -= 1
if self.connection_error >= limit: # Dead peer
self.remove("Peer connection: %s" % reason)
# Done working with peer
def onWorkerDone(self):
pass

View file

@ -1,75 +0,0 @@
import array
import time
class PeerHashfield(object):
__slots__ = ("storage", "time_changed", "append", "remove", "tobytes", "frombytes", "__len__", "__iter__")
def __init__(self):
self.storage = self.createStorage()
self.time_changed = time.time()
def createStorage(self):
storage = array.array("H")
self.append = storage.append
self.remove = storage.remove
self.tobytes = storage.tobytes
self.frombytes = storage.frombytes
self.__len__ = storage.__len__
self.__iter__ = storage.__iter__
return storage
def appendHash(self, hash):
hash_id = int(hash[0:4], 16)
if hash_id not in self.storage:
self.storage.append(hash_id)
self.time_changed = time.time()
return True
else:
return False
def appendHashId(self, hash_id):
if hash_id not in self.storage:
self.storage.append(hash_id)
self.time_changed = time.time()
return True
else:
return False
def removeHash(self, hash):
hash_id = int(hash[0:4], 16)
if hash_id in self.storage:
self.storage.remove(hash_id)
self.time_changed = time.time()
return True
else:
return False
def removeHashId(self, hash_id):
if hash_id in self.storage:
self.storage.remove(hash_id)
self.time_changed = time.time()
return True
else:
return False
def getHashId(self, hash):
return int(hash[0:4], 16)
def hasHash(self, hash):
return int(hash[0:4], 16) in self.storage
def replaceFromBytes(self, hashfield_raw):
self.storage = self.createStorage()
self.storage.frombytes(hashfield_raw)
self.time_changed = time.time()
if __name__ == "__main__":
field = PeerHashfield()
s = time.time()
for i in range(10000):
field.appendHashId(i)
print(time.time()-s)
s = time.time()
for i in range(10000):
field.hasHash("AABB")
print(time.time()-s)

View file

@ -1,189 +0,0 @@
import logging
import urllib.request
import urllib.parse
import re
import time
from Debug import Debug
from util import UpnpPunch
class PeerPortchecker(object):
checker_functions = {
"ipv4": ["checkIpfingerprints", "checkCanyouseeme"],
"ipv6": ["checkMyaddr", "checkIpv6scanner"]
}
def __init__(self, file_server):
self.log = logging.getLogger("PeerPortchecker")
self.upnp_port_opened = False
self.file_server = file_server
def requestUrl(self, url, post_data=None):
if type(post_data) is dict:
post_data = urllib.parse.urlencode(post_data).encode("utf8")
req = urllib.request.Request(url, post_data)
req.add_header("Referer", url)
req.add_header("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11")
req.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
return urllib.request.urlopen(req, timeout=20.0)
def portOpen(self, port):
self.log.info("Trying to open port using UpnpPunch...")
try:
UpnpPunch.ask_to_open_port(port, 'ZeroNet', retries=3, protos=["TCP"])
self.upnp_port_opened = True
except Exception as err:
self.log.warning("UpnpPunch run error: %s" % Debug.formatException(err))
return False
return True
def portClose(self, port):
return UpnpPunch.ask_to_close_port(port, protos=["TCP"])
def portCheck(self, port, ip_type="ipv4"):
checker_functions = self.checker_functions[ip_type]
for func_name in checker_functions:
func = getattr(self, func_name)
s = time.time()
try:
res = func(port)
if res:
self.log.info(
"Checked port %s (%s) using %s result: %s in %.3fs" %
(port, ip_type, func_name, res, time.time() - s)
)
time.sleep(0.1)
if res["opened"] and not self.file_server.had_external_incoming:
res["opened"] = False
self.log.warning("Port %s:%s looks opened, but no incoming connection" % (res["ip"], port))
break
except Exception as err:
self.log.warning(
"%s check error: %s in %.3fs" %
(func_name, Debug.formatException(err), time.time() - s)
)
res = {"ip": None, "opened": False}
return res
def checkCanyouseeme(self, port):
data = urllib.request.urlopen("https://www.canyouseeme.org/", b"ip=1.1.1.1&port=%s" % str(port).encode("ascii"), timeout=20.0).read().decode("utf8")
message = re.match(r'.*<p style="padding-left:15px">(.*?)</p>', data, re.DOTALL).group(1)
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace("&nbsp;", " ")) # Strip http tags
match = re.match(r".*service on (.*?) on", message)
if match:
ip = match.group(1)
else:
raise Exception("Invalid response: %s" % message)
if "Success" in message:
return {"ip": ip, "opened": True}
elif "Error" in message:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message)
def checkIpfingerprints(self, port):
data = self.requestUrl("https://www.ipfingerprints.com/portscan.php").read().decode("utf8")
ip = re.match(r'.*name="remoteHost".*?value="(.*?)"', data, re.DOTALL).group(1)
post_data = {
"remoteHost": ip, "start_port": port, "end_port": port,
"normalScan": "Yes", "scan_type": "connect2", "ping_type": "none"
}
message = self.requestUrl("https://www.ipfingerprints.com/scripts/getPortsInfo.php", post_data).read().decode("utf8")
if "open" in message:
return {"ip": ip, "opened": True}
elif "filtered" in message or "closed" in message:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message)
def checkMyaddr(self, port):
url = "http://ipv6.my-addr.com/online-ipv6-port-scan.php"
data = self.requestUrl(url).read().decode("utf8")
ip = re.match(r'.*Your IP address is:[ ]*([0-9\.:a-z]+)', data.replace("&nbsp;", ""), re.DOTALL).group(1)
post_data = {"addr": ip, "ports_selected": "", "ports_list": port}
data = self.requestUrl(url, post_data).read().decode("utf8")
message = re.match(r".*<table class='table_font_16'>(.*?)</table>", data, re.DOTALL).group(1)
if "ok.png" in message:
return {"ip": ip, "opened": True}
elif "fail.png" in message:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message)
def checkIpv6scanner(self, port):
url = "http://www.ipv6scanner.com/cgi-bin/main.py"
data = self.requestUrl(url).read().decode("utf8")
ip = re.match(r'.*Your IP address is[ ]*([0-9\.:a-z]+)', data.replace("&nbsp;", ""), re.DOTALL).group(1)
post_data = {"host": ip, "scanType": "1", "port": port, "protocol": "tcp", "authorized": "yes"}
data = self.requestUrl(url, post_data).read().decode("utf8")
message = re.match(r".*<table id='scantable'>(.*?)</table>", data, re.DOTALL).group(1)
message_text = re.sub("<.*?>", " ", message.replace("<br>", " ").replace("&nbsp;", " ").strip()) # Strip http tags
if "OPEN" in message_text:
return {"ip": ip, "opened": True}
elif "CLOSED" in message_text or "FILTERED" in message_text:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message_text)
def checkPortchecker(self, port): # Not working: Forbidden
data = self.requestUrl("https://portchecker.co").read().decode("utf8")
csrf = re.match(r'.*name="_csrf" value="(.*?)"', data, re.DOTALL).group(1)
data = self.requestUrl("https://portchecker.co", {"port": port, "_csrf": csrf}).read().decode("utf8")
message = re.match(r'.*<div id="results-wrapper">(.*?)</div>', data, re.DOTALL).group(1)
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace("&nbsp;", " ").strip()) # Strip http tags
match = re.match(r".*targetIP.*?value=\"(.*?)\"", data, re.DOTALL)
if match:
ip = match.group(1)
else:
raise Exception("Invalid response: %s" % message)
if "open" in message:
return {"ip": ip, "opened": True}
elif "closed" in message:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message)
def checkSubnetonline(self, port): # Not working: Invalid response
url = "https://www.subnetonline.com/pages/ipv6-network-tools/online-ipv6-port-scanner.php"
data = self.requestUrl(url).read().decode("utf8")
ip = re.match(r'.*Your IP is.*?name="host".*?value="(.*?)"', data, re.DOTALL).group(1)
token = re.match(r'.*name="token".*?value="(.*?)"', data, re.DOTALL).group(1)
post_data = {"host": ip, "port": port, "allow": "on", "token": token, "submit": "Scanning.."}
data = self.requestUrl(url, post_data).read().decode("utf8")
print(post_data, data)
message = re.match(r".*<div class='formfield'>(.*?)</div>", data, re.DOTALL).group(1)
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace("&nbsp;", " ").strip()) # Strip http tags
if "online" in message:
return {"ip": ip, "opened": True}
elif "closed" in message:
return {"ip": ip, "opened": False}
else:
raise Exception("Invalid response: %s" % message)

View file

@ -1,2 +0,0 @@
from .Peer import Peer
from .PeerHashfield import PeerHashfield

View file

@ -1,292 +0,0 @@
import logging
import os
import sys
import shutil
import time
from collections import defaultdict
import importlib
import json
from Debug import Debug
from Config import config
import plugins
class PluginManager:
def __init__(self):
self.log = logging.getLogger("PluginManager")
self.path_plugins = None
if plugins.__file__:
self.path_plugins = os.path.dirname(os.path.abspath(plugins.__file__));
self.path_installed_plugins = config.data_dir + "/__plugins__"
self.plugins = defaultdict(list) # Registered plugins (key: class name, value: list of plugins for class)
self.subclass_order = {} # Record the load order of the plugins, to keep it after reload
self.pluggable = {}
self.plugin_names = [] # Loaded plugin names
self.plugins_updated = {} # List of updated plugins since restart
self.plugins_rev = {} # Installed plugins revision numbers
self.after_load = [] # Execute functions after loaded plugins
self.function_flags = {} # Flag function for permissions
self.reloading = False
self.config_path = config.data_dir + "/plugins.json"
self.loadConfig()
self.config.setdefault("builtin", {})
if self.path_plugins:
sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
self.migratePlugins()
if config.debug: # Auto reload Plugins on file change
from Debug import DebugReloader
DebugReloader.watcher.addCallback(self.reloadPlugins)
def loadConfig(self):
if os.path.isfile(self.config_path):
try:
self.config = json.load(open(self.config_path, encoding="utf8"))
except Exception as err:
self.log.error("Error loading %s: %s" % (self.config_path, err))
self.config = {}
else:
self.config = {}
def saveConfig(self):
f = open(self.config_path, "w", encoding="utf8")
json.dump(self.config, f, ensure_ascii=False, sort_keys=True, indent=2)
def migratePlugins(self):
for dir_name in os.listdir(self.path_plugins):
if dir_name == "Mute":
self.log.info("Deleting deprecated/renamed plugin: %s" % dir_name)
shutil.rmtree("%s/%s" % (self.path_plugins, dir_name))
# -- Load / Unload --
def listPlugins(self, list_disabled=False):
plugins = []
for dir_name in sorted(os.listdir(self.path_plugins)):
dir_path = os.path.join(self.path_plugins, dir_name)
plugin_name = dir_name.replace("disabled-", "")
if dir_name.startswith("disabled"):
is_enabled = False
else:
is_enabled = True
plugin_config = self.config["builtin"].get(plugin_name, {})
if "enabled" in plugin_config:
is_enabled = plugin_config["enabled"]
if dir_name == "__pycache__" or not os.path.isdir(dir_path):
continue # skip
if dir_name.startswith("Debug") and not config.debug:
continue # Only load in debug mode if module name starts with Debug
if not is_enabled and not list_disabled:
continue # Dont load if disabled
plugin = {}
plugin["source"] = "builtin"
plugin["name"] = plugin_name
plugin["dir_name"] = dir_name
plugin["dir_path"] = dir_path
plugin["inner_path"] = plugin_name
plugin["enabled"] = is_enabled
plugin["rev"] = config.rev
plugin["loaded"] = plugin_name in self.plugin_names
plugins.append(plugin)
plugins += self.listInstalledPlugins(list_disabled)
return plugins
def listInstalledPlugins(self, list_disabled=False):
plugins = []
for address, site_plugins in sorted(self.config.items()):
if address == "builtin":
continue
for plugin_inner_path, plugin_config in sorted(site_plugins.items()):
is_enabled = plugin_config.get("enabled", False)
if not is_enabled and not list_disabled:
continue
plugin_name = os.path.basename(plugin_inner_path)
dir_path = "%s/%s/%s" % (self.path_installed_plugins, address, plugin_inner_path)
plugin = {}
plugin["source"] = address
plugin["name"] = plugin_name
plugin["dir_name"] = plugin_name
plugin["dir_path"] = dir_path
plugin["inner_path"] = plugin_inner_path
plugin["enabled"] = is_enabled
plugin["rev"] = plugin_config.get("rev", 0)
plugin["loaded"] = plugin_name in self.plugin_names
plugins.append(plugin)
return plugins
# Load all plugin
def loadPlugins(self):
all_loaded = True
s = time.time()
if self.path_plugins is None:
return
for plugin in self.listPlugins():
self.log.debug("Loading plugin: %s (%s)" % (plugin["name"], plugin["source"]))
if plugin["source"] != "builtin":
self.plugins_rev[plugin["name"]] = plugin["rev"]
site_plugin_dir = os.path.dirname(plugin["dir_path"])
if site_plugin_dir not in sys.path:
sys.path.append(site_plugin_dir)
try:
sys.modules[plugin["name"]] = __import__(plugin["dir_name"])
except Exception as err:
self.log.error("Plugin %s load error: %s" % (plugin["name"], Debug.formatException(err)))
all_loaded = False
if plugin["name"] not in self.plugin_names:
self.plugin_names.append(plugin["name"])
self.log.debug("Plugins loaded in %.3fs" % (time.time() - s))
for func in self.after_load:
func()
return all_loaded
# Reload all plugins
def reloadPlugins(self):
self.reloading = True
self.after_load = []
self.plugins_before = self.plugins
self.plugins = defaultdict(list) # Reset registered plugins
for module_name, module in list(sys.modules.items()):
if not module or not getattr(module, "__file__", None):
continue
if self.path_plugins not in module.__file__ and self.path_installed_plugins not in module.__file__:
continue
if "allow_reload" in dir(module) and not module.allow_reload: # Reload disabled
# Re-add non-reloadable plugins
for class_name, classes in self.plugins_before.items():
for c in classes:
if c.__module__ != module.__name__:
continue
self.plugins[class_name].append(c)
else:
try:
importlib.reload(module)
except Exception as err:
self.log.error("Plugin %s reload error: %s" % (module_name, Debug.formatException(err)))
self.loadPlugins() # Load new plugins
# Change current classes in memory
import gc
patched = {}
for class_name, classes in self.plugins.items():
classes = classes[:] # Copy the current plugins
classes.reverse()
base_class = self.pluggable[class_name] # Original class
classes.append(base_class) # Add the class itself to end of inherience line
plugined_class = type(class_name, tuple(classes), dict()) # Create the plugined class
for obj in gc.get_objects():
if type(obj).__name__ == class_name:
obj.__class__ = plugined_class
patched[class_name] = patched.get(class_name, 0) + 1
self.log.debug("Patched objects: %s" % patched)
# Change classes in modules
patched = {}
for class_name, classes in self.plugins.items():
for module_name, module in list(sys.modules.items()):
if class_name in dir(module):
if "__class__" not in dir(getattr(module, class_name)): # Not a class
continue
base_class = self.pluggable[class_name]
classes = self.plugins[class_name][:]
classes.reverse()
classes.append(base_class)
plugined_class = type(class_name, tuple(classes), dict())
setattr(module, class_name, plugined_class)
patched[class_name] = patched.get(class_name, 0) + 1
self.log.debug("Patched modules: %s" % patched)
self.reloading = False
plugin_manager = PluginManager() # Singletone
# -- Decorators --
# Accept plugin to class decorator
def acceptPlugins(base_class):
class_name = base_class.__name__
plugin_manager.pluggable[class_name] = base_class
if class_name in plugin_manager.plugins: # Has plugins
classes = plugin_manager.plugins[class_name][:] # Copy the current plugins
# Restore the subclass order after reload
if class_name in plugin_manager.subclass_order:
classes = sorted(
classes,
key=lambda key:
plugin_manager.subclass_order[class_name].index(str(key))
if str(key) in plugin_manager.subclass_order[class_name]
else 9999
)
plugin_manager.subclass_order[class_name] = list(map(str, classes))
classes.reverse()
classes.append(base_class) # Add the class itself to end of inherience line
plugined_class = type(class_name, tuple(classes), dict()) # Create the plugined class
plugin_manager.log.debug("New class accepts plugins: %s (Loaded plugins: %s)" % (class_name, classes))
else: # No plugins just use the original
plugined_class = base_class
return plugined_class
# Register plugin to class name decorator
def registerTo(class_name):
if config.debug and not plugin_manager.reloading:
import gc
for obj in gc.get_objects():
if type(obj).__name__ == class_name:
raise Exception("Class %s instances already present in memory" % class_name)
break
plugin_manager.log.debug("New plugin registered to: %s" % class_name)
if class_name not in plugin_manager.plugins:
plugin_manager.plugins[class_name] = []
def classDecorator(self):
plugin_manager.plugins[class_name].append(self)
return self
return classDecorator
def afterLoad(func):
plugin_manager.after_load.append(func)
return func
# - Example usage -
if __name__ == "__main__":
@registerTo("Request")
class RequestPlugin(object):
def actionMainPage(self, path):
return "Hello MainPage!"
@acceptPlugins
class Request(object):
def route(self, path):
func = getattr(self, "action" + path, None)
if func:
return func(path)
else:
return "Can't route to", path
print(Request().route("MainPage"))

View file

File diff suppressed because it is too large Load diff

View file

@ -1,293 +0,0 @@
import random
import time
import hashlib
import re
import collections
import gevent
from Plugin import PluginManager
from Config import config
from Debug import Debug
from util import helper
from greenlet import GreenletExit
import util
class AnnounceError(Exception):
pass
global_stats = collections.defaultdict(lambda: collections.defaultdict(int))
@PluginManager.acceptPlugins
class SiteAnnouncer(object):
def __init__(self, site):
self.site = site
self.stats = {}
self.fileserver_port = config.fileserver_port
self.peer_id = self.site.connection_server.peer_id
self.last_tracker_id = random.randint(0, 10)
self.time_last_announce = 0
def getTrackers(self):
return config.trackers
def getSupportedTrackers(self):
trackers = self.getTrackers()
if not self.site.connection_server.tor_manager.enabled:
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
if "ipv6" not in self.site.connection_server.supported_ip_types:
trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
return trackers
def getAnnouncingTrackers(self, mode):
trackers = self.getSupportedTrackers()
if trackers and (mode == "update" or mode == "more"): # Only announce on one tracker, increment the queried tracker id
self.last_tracker_id += 1
self.last_tracker_id = self.last_tracker_id % len(trackers)
trackers_announcing = [trackers[self.last_tracker_id]] # We only going to use this one
else:
trackers_announcing = trackers
return trackers_announcing
def getOpenedServiceTypes(self):
back = []
# Type of addresses they can reach me
if config.trackers_proxy == "disable" and config.tor != "always":
for ip_type, opened in list(self.site.connection_server.port_opened.items()):
if opened:
back.append(ip_type)
if self.site.connection_server.tor_manager.start_onions:
back.append("onion")
return back
@util.Noparallel(blocking=False)
def announce(self, force=False, mode="start", pex=True):
if time.time() - self.time_last_announce < 30 and not force:
return # No reannouncing within 30 secs
if force:
self.site.log.debug("Force reannounce in mode %s" % mode)
self.fileserver_port = config.fileserver_port
self.time_last_announce = time.time()
trackers = self.getAnnouncingTrackers(mode)
if config.verbose:
self.site.log.debug("Tracker announcing, trackers: %s" % trackers)
errors = []
slow = []
s = time.time()
threads = []
num_announced = 0
for tracker in trackers: # Start announce threads
tracker_stats = global_stats[tracker]
# Reduce the announce time for trackers that looks unreliable
time_announce_allowed = time.time() - 60 * min(30, tracker_stats["num_error"])
if tracker_stats["num_error"] > 5 and tracker_stats["time_request"] > time_announce_allowed and not force:
if config.verbose:
self.site.log.debug("Tracker %s looks unreliable, announce skipped (error: %s)" % (tracker, tracker_stats["num_error"]))
continue
thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
threads.append(thread)
thread.tracker = tracker
time.sleep(0.01)
self.updateWebsocket(trackers="announcing")
gevent.joinall(threads, timeout=20) # Wait for announce finish
for thread in threads:
if thread.value is None:
continue
if thread.value is not False:
if thread.value > 1.0: # Takes more than 1 second to announce
slow.append("%.2fs %s" % (thread.value, thread.tracker))
num_announced += 1
else:
if thread.ready():
errors.append(thread.tracker)
else: # Still running
slow.append("30s+ %s" % thread.tracker)
# Save peers num
self.site.settings["peers"] = len(self.site.peers)
if len(errors) < len(threads): # At least one tracker finished
if len(trackers) == 1:
announced_to = trackers[0]
else:
announced_to = "%s/%s trackers" % (num_announced, len(threads))
if mode != "update" or config.verbose:
self.site.log.debug(
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
(mode, announced_to, time.time() - s, errors, slow)
)
else:
if len(threads) > 1:
self.site.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
if len(threads) == 1 and mode != "start": # Move to next tracker
self.site.log.debug("Tracker failed, skipping to next one...")
self.site.greenlet_manager.spawnLater(1.0, self.announce, force=force, mode=mode, pex=pex)
self.updateWebsocket(trackers="announced")
if pex:
self.updateWebsocket(pex="announcing")
if mode == "more": # Need more peers
self.announcePex(need_num=10)
else:
self.announcePex()
self.updateWebsocket(pex="announced")
def getTrackerHandler(self, protocol):
return None
def getAddressParts(self, tracker):
if "://" not in tracker or not re.match("^[A-Za-z0-9:/\\.#-]+$", tracker):
return None
protocol, address = tracker.split("://", 1)
if ":" in address:
ip, port = address.rsplit(":", 1)
else:
ip = address
if protocol.startswith("https"):
port = 443
else:
port = 80
back = {}
back["protocol"] = protocol
back["address"] = address
back["ip"] = ip
back["port"] = port
return back
def announceTracker(self, tracker, mode="start", num_want=10):
s = time.time()
address_parts = self.getAddressParts(tracker)
if not address_parts:
self.site.log.warning("Tracker %s error: Invalid address" % tracker)
return False
if tracker not in self.stats:
self.stats[tracker] = {"status": "", "num_request": 0, "num_success": 0, "num_error": 0, "time_request": 0, "time_last_error": 0}
last_status = self.stats[tracker]["status"]
self.stats[tracker]["status"] = "announcing"
self.stats[tracker]["time_request"] = time.time()
global_stats[tracker]["time_request"] = time.time()
if config.verbose:
self.site.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
if mode == "update":
num_want = 10
else:
num_want = 30
handler = self.getTrackerHandler(address_parts["protocol"])
error = None
try:
if handler:
peers = handler(address_parts["address"], mode=mode, num_want=num_want)
else:
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
except Exception as err:
self.site.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
error = err
if error:
self.stats[tracker]["status"] = "error"
self.stats[tracker]["time_status"] = time.time()
self.stats[tracker]["last_error"] = str(error)
self.stats[tracker]["time_last_error"] = time.time()
if self.site.connection_server.has_internet:
self.stats[tracker]["num_error"] += 1
self.stats[tracker]["num_request"] += 1
global_stats[tracker]["num_request"] += 1
if self.site.connection_server.has_internet:
global_stats[tracker]["num_error"] += 1
self.updateWebsocket(tracker="error")
return False
if peers is None: # Announce skipped
self.stats[tracker]["time_status"] = time.time()
self.stats[tracker]["status"] = last_status
return None
self.stats[tracker]["status"] = "announced"
self.stats[tracker]["time_status"] = time.time()
self.stats[tracker]["num_success"] += 1
self.stats[tracker]["num_request"] += 1
global_stats[tracker]["num_request"] += 1
global_stats[tracker]["num_error"] = 0
if peers is True: # Announce success, but no peers returned
return time.time() - s
# Adding peers
added = 0
for peer in peers:
if peer["port"] == 1: # Some trackers does not accept port 0, so we send port 1 as not-connectable
peer["port"] = 0
if not peer["port"]:
continue # Dont add peers with port 0
if self.site.addPeer(peer["addr"], peer["port"], source="tracker"):
added += 1
if added:
self.site.worker_manager.onPeers()
self.site.updateWebsocket(peers_added=added)
if config.verbose:
self.site.log.debug(
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
)
return time.time() - s
@util.Noparallel(blocking=False)
def announcePex(self, query_num=2, need_num=5):
peers = self.site.getConnectedPeers()
if len(peers) == 0: # Wait 3s for connections
time.sleep(3)
peers = self.site.getConnectedPeers()
if len(peers) == 0: # Small number of connected peers for this site, connect to any
peers = list(self.site.getRecentPeers(20))
need_num = 10
random.shuffle(peers)
done = 0
total_added = 0
for peer in peers:
num_added = peer.pex(need_num=need_num)
if num_added is not False:
done += 1
total_added += num_added
if num_added:
self.site.worker_manager.onPeers()
self.site.updateWebsocket(peers_added=num_added)
else:
time.sleep(0.1)
if done == query_num:
break
self.site.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
def updateWebsocket(self, **kwargs):
if kwargs:
param = {"event": list(kwargs.items())[0]}
else:
param = None
for ws in self.site.websockets:
ws.event("announcerChanged", self.site, param)

View file

@ -1,226 +0,0 @@
import json
import logging
import re
import os
import time
import atexit
import gevent
import util
from Plugin import PluginManager
from Content import ContentDb
from Config import config
from util import helper
from util import RateLimit
from util import Cached
@PluginManager.acceptPlugins
class SiteManager(object):
def __init__(self):
self.log = logging.getLogger("SiteManager")
self.log.debug("SiteManager created.")
self.sites = {}
self.sites_changed = int(time.time())
self.loaded = False
gevent.spawn(self.saveTimer)
atexit.register(lambda: self.save(recalculate_size=True))
# Load all sites from data/sites.json
@util.Noparallel()
def load(self, cleanup=True, startup=False):
from Debug import Debug
self.log.info("Loading sites... (cleanup: %s, startup: %s)" % (cleanup, startup))
self.loaded = False
from .Site import Site
address_found = []
added = 0
load_s = time.time()
# Load new adresses
try:
json_path = "%s/sites.json" % config.data_dir
data = json.load(open(json_path))
except Exception as err:
raise Exception("Unable to load %s: %s" % (json_path, err))
sites_need = []
for address, settings in data.items():
if address not in self.sites:
if os.path.isfile("%s/%s/content.json" % (config.data_dir, address)):
# Root content.json exists, try load site
s = time.time()
try:
site = Site(address, settings=settings)
site.content_manager.contents.get("content.json")
except Exception as err:
self.log.debug("Error loading site %s: %s" % (address, err))
continue
self.sites[address] = site
self.log.debug("Loaded site %s in %.3fs" % (address, time.time() - s))
added += 1
elif startup:
# No site directory, start download
self.log.debug("Found new site in sites.json: %s" % address)
sites_need.append([address, settings])
added += 1
address_found.append(address)
# Remove deleted adresses
if cleanup:
for address in list(self.sites.keys()):
if address not in address_found:
del(self.sites[address])
self.log.debug("Removed site: %s" % address)
# Remove orpan sites from contentdb
content_db = ContentDb.getContentDb()
for row in content_db.execute("SELECT * FROM site").fetchall():
address = row["address"]
if address not in self.sites and address not in address_found:
self.log.info("Deleting orphan site from content.db: %s" % address)
try:
content_db.execute("DELETE FROM site WHERE ?", {"address": address})
except Exception as err:
self.log.error("Can't delete site %s from content_db: %s" % (address, err))
if address in content_db.site_ids:
del content_db.site_ids[address]
if address in content_db.sites:
del content_db.sites[address]
self.loaded = True
for address, settings in sites_need:
gevent.spawn(self.need, address, settings=settings)
if added:
self.log.info("Added %s sites in %.3fs" % (added, time.time() - load_s))
def saveDelayed(self):
RateLimit.callAsync("Save sites.json", allowed_again=5, func=self.save)
def save(self, recalculate_size=False):
if not self.sites:
self.log.debug("Save skipped: No sites found")
return
if not self.loaded:
self.log.debug("Save skipped: Not loaded")
return
s = time.time()
data = {}
# Generate data file
s = time.time()
for address, site in list(self.list().items()):
if recalculate_size:
site.settings["size"], site.settings["size_optional"] = site.content_manager.getTotalSize() # Update site size
data[address] = site.settings
data[address]["cache"] = site.getSettingsCache()
time_generate = time.time() - s
s = time.time()
if data:
helper.atomicWrite("%s/sites.json" % config.data_dir, helper.jsonDumps(data).encode("utf8"))
else:
self.log.debug("Save error: No data")
time_write = time.time() - s
# Remove cache from site settings
for address, site in self.list().items():
site.settings["cache"] = {}
self.log.debug("Saved sites in %.2fs (generate: %.2fs, write: %.2fs)" % (time.time() - s, time_generate, time_write))
def saveTimer(self):
while 1:
time.sleep(60 * 10)
self.save(recalculate_size=True)
# Checks if its a valid address
def isAddress(self, address):
return re.match("^[A-Za-z0-9]{26,35}$", address)
def isDomain(self, address):
return False
@Cached(timeout=10)
def isDomainCached(self, address):
return self.isDomain(address)
def resolveDomain(self, domain):
return False
@Cached(timeout=10)
def resolveDomainCached(self, domain):
return self.resolveDomain(domain)
# Return: Site object or None if not found
def get(self, address):
if self.isDomainCached(address):
address_resolved = self.resolveDomainCached(address)
if address_resolved:
address = address_resolved
if not self.loaded: # Not loaded yet
self.log.debug("Loading site: %s)..." % address)
self.load()
site = self.sites.get(address)
return site
def add(self, address, all_file=True, settings=None, **kwargs):
from .Site import Site
self.sites_changed = int(time.time())
# Try to find site with differect case
for recover_address, recover_site in list(self.sites.items()):
if recover_address.lower() == address.lower():
return recover_site
if not self.isAddress(address):
return False # Not address: %s % address
self.log.debug("Added new site: %s" % address)
config.loadTrackersFile()
site = Site(address, settings=settings)
self.sites[address] = site
if not site.settings["serving"]: # Maybe it was deleted before
site.settings["serving"] = True
site.saveSettings()
if all_file: # Also download user files on first sync
site.download(check_size=True, blind_includes=True)
return site
# Return or create site and start download site files
def need(self, address, *args, **kwargs):
if self.isDomainCached(address):
address_resolved = self.resolveDomainCached(address)
if address_resolved:
address = address_resolved
site = self.get(address)
if not site: # Site not exist yet
site = self.add(address, *args, **kwargs)
return site
def delete(self, address):
self.sites_changed = int(time.time())
self.log.debug("Deleted site: %s" % address)
del(self.sites[address])
# Delete from sites.json
self.save()
# Lazy load sites
def list(self):
if not self.loaded: # Not loaded yet
self.log.debug("Sites not loaded yet...")
self.load(startup=True)
return self.sites
site_manager = SiteManager() # Singletone
if config.action == "main": # Don't connect / add myself to peerlist
peer_blacklist = [("127.0.0.1", config.fileserver_port), ("::1", config.fileserver_port)]
else:
peer_blacklist = []

View file

@ -1,636 +0,0 @@
import os
import re
import shutil
import json
import time
import errno
from collections import defaultdict
import sqlite3
import gevent.event
import util
from util import SafeRe
from Db.Db import Db
from Debug import Debug
from Config import config
from util import helper
from util import ThreadPool
from Plugin import PluginManager
from Translate import translate as _
thread_pool_fs_read = ThreadPool.ThreadPool(config.threads_fs_read, name="FS read")
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
@PluginManager.acceptPlugins
class SiteStorage(object):
def __init__(self, site, allow_create=True):
self.site = site
self.directory = "%s/%s" % (config.data_dir, self.site.address) # Site data diretory
self.allowed_dir = os.path.abspath(self.directory) # Only serve file within this dir
self.log = site.log
self.db = None # Db class
self.db_checked = False # Checked db tables since startup
self.event_db_busy = None # Gevent AsyncResult if db is working on rebuild
self.has_db = self.isFile("dbschema.json") # The site has schema
if not os.path.isdir(self.directory):
if allow_create:
os.mkdir(self.directory) # Create directory if not found
else:
raise Exception("Directory not exists: %s" % self.directory)
def getDbFile(self):
if self.db:
return self.db.schema["db_file"]
else:
if self.isFile("dbschema.json"):
schema = self.loadJson("dbschema.json")
return schema["db_file"]
else:
return False
# Create new databaseobject with the site's schema
def openDb(self, close_idle=False):
schema = self.getDbSchema()
db_path = self.getPath(schema["db_file"])
return Db(schema, db_path, close_idle=close_idle)
def closeDb(self, reason="Unknown (SiteStorage)"):
if self.db:
self.db.close(reason)
self.event_db_busy = None
self.db = None
def getDbSchema(self):
try:
self.site.needFile("dbschema.json")
schema = self.loadJson("dbschema.json")
except Exception as err:
raise Exception("dbschema.json is not a valid JSON: %s" % err)
return schema
def loadDb(self):
self.log.debug("No database, waiting for dbschema.json...")
self.site.needFile("dbschema.json", priority=3)
self.log.debug("Got dbschema.json")
self.has_db = self.isFile("dbschema.json") # Recheck if dbschema exist
if self.has_db:
schema = self.getDbSchema()
db_path = self.getPath(schema["db_file"])
if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0:
try:
self.rebuildDb(reason="Missing database")
except Exception as err:
self.log.error(err)
pass
if self.db:
self.db.close("Gettig new db for SiteStorage")
self.db = self.openDb(close_idle=True)
try:
changed_tables = self.db.checkTables()
if changed_tables:
self.rebuildDb(delete_db=False, reason="Changed tables") # TODO: only update the changed table datas
except sqlite3.OperationalError:
pass
# Return db class
@util.Noparallel()
def getDb(self):
if self.event_db_busy: # Db not ready for queries
self.log.debug("Wating for db...")
self.event_db_busy.get() # Wait for event
if not self.db:
self.loadDb()
return self.db
def updateDbFile(self, inner_path, file=None, cur=None):
path = self.getPath(inner_path)
if cur:
db = cur.db
else:
db = self.getDb()
return db.updateJson(path, file, cur)
# Return possible db files for the site
@thread_pool_fs_read.wrap
def getDbFiles(self):
found = 0
for content_inner_path, content in self.site.content_manager.contents.items():
# content.json file itself
if self.isFile(content_inner_path):
yield content_inner_path, self.getPath(content_inner_path)
else:
self.log.debug("[MISSING] %s" % content_inner_path)
# Data files in content.json
content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site
for file_relative_path in list(content.get("files", {}).keys()) + list(content.get("files_optional", {}).keys()):
if not file_relative_path.endswith(".json") and not file_relative_path.endswith("json.gz"):
continue # We only interesed in json files
file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
if self.isFile(file_inner_path):
yield file_inner_path, self.getPath(file_inner_path)
else:
self.log.debug("[MISSING] %s" % file_inner_path)
found += 1
if found % 100 == 0:
time.sleep(0.001) # Context switch to avoid UI block
# Rebuild sql cache
@util.Noparallel()
@thread_pool_fs_batch.wrap
def rebuildDb(self, delete_db=True, reason="Unknown"):
self.log.info("Rebuilding db (reason: %s)..." % reason)
self.has_db = self.isFile("dbschema.json")
if not self.has_db:
return False
schema = self.loadJson("dbschema.json")
db_path = self.getPath(schema["db_file"])
if os.path.isfile(db_path) and delete_db:
if self.db:
self.closeDb("rebuilding") # Close db if open
time.sleep(0.5)
self.log.info("Deleting %s" % db_path)
try:
os.unlink(db_path)
except Exception as err:
self.log.error("Delete error: %s" % err)
if not self.db:
self.db = self.openDb()
self.event_db_busy = gevent.event.AsyncResult()
self.log.info("Rebuild: Creating tables...")
# raise DbTableError if not valid
self.db.checkTables()
cur = self.db.getCursor()
cur.logging = False
s = time.time()
self.log.info("Rebuild: Getting db files...")
db_files = list(self.getDbFiles())
num_imported = 0
num_total = len(db_files)
num_error = 0
self.log.info("Rebuild: Importing data...")
try:
if num_total > 100:
self.site.messageWebsocket(
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
"0000", num_total, num_error
), "rebuild", 0
)
for file_inner_path, file_path in db_files:
try:
if self.updateDbFile(file_inner_path, file=open(file_path, "rb"), cur=cur):
num_imported += 1
except Exception as err:
self.log.error("Error importing %s: %s" % (file_inner_path, Debug.formatException(err)))
num_error += 1
if num_imported and num_imported % 100 == 0:
self.site.messageWebsocket(
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
num_imported, num_total, num_error
),
"rebuild", int(float(num_imported) / num_total * 100)
)
time.sleep(0.001) # Context switch to avoid UI block
finally:
cur.close()
if num_total > 100:
self.site.messageWebsocket(
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
num_imported, num_total, num_error
), "rebuild", 100
)
self.log.info("Rebuild: Imported %s data file in %.3fs" % (num_imported, time.time() - s))
self.event_db_busy.set(True) # Event done, notify waiters
self.event_db_busy = None # Clear event
self.db.commit("Rebuilt")
return True
# Execute sql query or rebuild on dberror
def query(self, query, params=None):
if not query.strip().upper().startswith("SELECT"):
raise Exception("Only SELECT query supported")
try:
res = self.getDb().execute(query, params)
except sqlite3.DatabaseError as err:
if err.__class__.__name__ == "DatabaseError":
self.log.error("Database error: %s, query: %s, try to rebuilding it..." % (err, query))
try:
self.rebuildDb(reason="Query error")
except sqlite3.OperationalError:
pass
res = self.db.cur.execute(query, params)
else:
raise err
return res
def ensureDir(self, inner_path):
try:
os.makedirs(self.getPath(inner_path))
except OSError as err:
if err.errno == errno.EEXIST:
return False
else:
raise err
return True
# Open file object
def open(self, inner_path, mode="rb", create_dirs=False, **kwargs):
file_path = self.getPath(inner_path)
if create_dirs:
file_inner_dir = os.path.dirname(inner_path)
self.ensureDir(file_inner_dir)
return open(file_path, mode, **kwargs)
# Open file object
@thread_pool_fs_read.wrap
def read(self, inner_path, mode="rb"):
return self.open(inner_path, mode).read()
@thread_pool_fs_write.wrap
def writeThread(self, inner_path, content):
file_path = self.getPath(inner_path)
# Create dir if not exist
self.ensureDir(os.path.dirname(inner_path))
# Write file
if hasattr(content, 'read'): # File-like object
with open(file_path, "wb") as file:
shutil.copyfileobj(content, file) # Write buff to disk
else: # Simple string
if inner_path == "content.json" and os.path.isfile(file_path):
helper.atomicWrite(file_path, content)
else:
with open(file_path, "wb") as file:
file.write(content)
# Write content to file
def write(self, inner_path, content):
self.writeThread(inner_path, content)
self.onUpdated(inner_path)
# Remove file from filesystem
def delete(self, inner_path):
file_path = self.getPath(inner_path)
os.unlink(file_path)
self.onUpdated(inner_path, file=False)
def deleteDir(self, inner_path):
dir_path = self.getPath(inner_path)
os.rmdir(dir_path)
def rename(self, inner_path_before, inner_path_after):
for retry in range(3):
rename_err = None
# To workaround "The process cannot access the file beacause it is being used by another process." error
try:
os.rename(self.getPath(inner_path_before), self.getPath(inner_path_after))
break
except Exception as err:
rename_err = err
self.log.error("%s rename error: %s (retry #%s)" % (inner_path_before, err, retry))
time.sleep(0.1 + retry)
if rename_err:
raise rename_err
# List files from a directory
@thread_pool_fs_read.wrap
def walk(self, dir_inner_path, ignore=None):
directory = self.getPath(dir_inner_path)
for root, dirs, files in os.walk(directory):
root = root.replace("\\", "/")
root_relative_path = re.sub("^%s" % re.escape(directory), "", root).lstrip("/")
for file_name in files:
if root_relative_path: # Not root dir
file_relative_path = root_relative_path + "/" + file_name
else:
file_relative_path = file_name
if ignore and SafeRe.match(ignore, file_relative_path):
continue
yield file_relative_path
# Don't scan directory that is in the ignore pattern
if ignore:
dirs_filtered = []
for dir_name in dirs:
if root_relative_path:
dir_relative_path = root_relative_path + "/" + dir_name
else:
dir_relative_path = dir_name
if ignore == ".*" or re.match(".*([|(]|^)%s([|)]|$)" % re.escape(dir_relative_path + "/.*"), ignore):
continue
dirs_filtered.append(dir_name)
dirs[:] = dirs_filtered
# list directories in a directory
@thread_pool_fs_read.wrap
def list(self, dir_inner_path):
directory = self.getPath(dir_inner_path)
return os.listdir(directory)
# Site content updated
def onUpdated(self, inner_path, file=None):
# Update Sql cache
should_load_to_db = inner_path.endswith(".json") or inner_path.endswith(".json.gz")
if inner_path == "dbschema.json":
self.has_db = self.isFile("dbschema.json")
# Reopen DB to check changes
if self.has_db:
self.closeDb("New dbschema")
gevent.spawn(self.getDb)
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
if config.verbose:
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
try:
self.updateDbFile(inner_path, file)
except Exception as err:
self.log.error("Json %s load error: %s" % (inner_path, Debug.formatException(err)))
self.closeDb("Json load error")
# Load and parse json file
@thread_pool_fs_read.wrap
def loadJson(self, inner_path):
try:
with self.open(inner_path, "r", encoding="utf8") as file:
return json.load(file)
except Exception as err:
self.log.warning("Json load error: %s" % Debug.formatException(err))
return None
# Write formatted json file
def writeJson(self, inner_path, data):
# Write to disk
self.write(inner_path, helper.jsonDumps(data).encode("utf8"))
# Get file size
def getSize(self, inner_path):
path = self.getPath(inner_path)
try:
return os.path.getsize(path)
except Exception:
return 0
# File exist
def isFile(self, inner_path):
return os.path.isfile(self.getPath(inner_path))
# File or directory exist
def isExists(self, inner_path):
return os.path.exists(self.getPath(inner_path))
# Dir exist
def isDir(self, inner_path):
return os.path.isdir(self.getPath(inner_path))
# Security check and return path of site's file
def getPath(self, inner_path):
inner_path = inner_path.replace("\\", "/") # Windows separator fix
if not inner_path:
return self.directory
if "../" in inner_path:
raise Exception("File not allowed: %s" % inner_path)
return "%s/%s" % (self.directory, inner_path)
# Get site dir relative path
def getInnerPath(self, path):
if path == self.directory:
inner_path = ""
else:
if path.startswith(self.directory):
inner_path = path[len(self.directory) + 1:]
else:
raise Exception("File not allowed: %s" % path)
return inner_path
# Verify all files sha512sum using content.json
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
bad_files = []
back = defaultdict(int)
back["bad_files"] = bad_files
i = 0
self.log.debug("Verifing files...")
if not self.site.content_manager.contents.get("content.json"): # No content.json, download it first
self.log.debug("VerifyFile content.json not exists")
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
self.site.content_manager.loadContent() # Reload content.json
for content_inner_path, content in list(self.site.content_manager.contents.items()):
back["num_content"] += 1
i += 1
if i % 50 == 0:
time.sleep(0.001) # Context switch to avoid gevent hangs
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
back["num_content_missing"] += 1
self.log.debug("[MISSING] %s" % content_inner_path)
bad_files.append(content_inner_path)
for file_relative_path in list(content.get("files", {}).keys()):
back["num_file"] += 1
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
file_path = self.getPath(file_inner_path)
if not os.path.isfile(file_path):
back["num_file_missing"] += 1
self.log.debug("[MISSING] %s" % file_inner_path)
bad_files.append(file_inner_path)
continue
if quick_check:
ok = os.path.getsize(file_path) == content["files"][file_relative_path]["size"]
if not ok:
err = "Invalid size"
else:
try:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
except Exception as _err:
err = _err
ok = False
if not ok:
back["num_file_invalid"] += 1
self.log.debug("[INVALID] %s: %s" % (file_inner_path, err))
if add_changed or content.get("cert_user_id"): # If updating own site only add changed user files
bad_files.append(file_inner_path)
# Optional files
optional_added = 0
optional_removed = 0
for file_relative_path in list(content.get("files_optional", {}).keys()):
back["num_optional"] += 1
file_node = content["files_optional"][file_relative_path]
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
file_path = self.getPath(file_inner_path)
hash_id = self.site.content_manager.hashfield.getHashId(file_node["sha512"])
if not os.path.isfile(file_path):
if self.site.content_manager.isDownloaded(file_inner_path, hash_id):
back["num_optional_removed"] += 1
self.log.debug("[OPTIONAL MISSING] %s" % file_inner_path)
self.site.content_manager.optionalRemoved(file_inner_path, hash_id, file_node["size"])
if add_optional and self.site.isDownloadable(file_inner_path):
self.log.debug("[OPTIONAL ADDING] %s" % file_inner_path)
bad_files.append(file_inner_path)
continue
if quick_check:
ok = os.path.getsize(file_path) == content["files_optional"][file_relative_path]["size"]
else:
try:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
except Exception as err:
ok = False
if ok:
if not self.site.content_manager.isDownloaded(file_inner_path, hash_id):
back["num_optional_added"] += 1
self.site.content_manager.optionalDownloaded(file_inner_path, hash_id, file_node["size"])
optional_added += 1
self.log.debug("[OPTIONAL FOUND] %s" % file_inner_path)
else:
if self.site.content_manager.isDownloaded(file_inner_path, hash_id):
back["num_optional_removed"] += 1
self.site.content_manager.optionalRemoved(file_inner_path, hash_id, file_node["size"])
optional_removed += 1
bad_files.append(file_inner_path)
self.log.debug("[OPTIONAL CHANGED] %s" % file_inner_path)
if config.verbose:
self.log.debug(
"%s verified: %s, quick: %s, optionals: +%s -%s" %
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
)
self.site.content_manager.contents.db.processDelayed()
time.sleep(0.001) # Context switch to avoid gevent hangs
return back
# Check and try to fix site files integrity
def updateBadFiles(self, quick_check=True):
s = time.time()
res = self.verifyFiles(
quick_check,
add_optional=True,
add_changed=not self.site.settings.get("own") # Don't overwrite changed files if site owned
)
bad_files = res["bad_files"]
self.site.bad_files = {}
if bad_files:
for bad_file in bad_files:
self.site.bad_files[bad_file] = 1
self.log.debug("Checked files in %.2fs... Found bad files: %s, Quick:%s" % (time.time() - s, len(bad_files), quick_check))
# Delete site's all file
@thread_pool_fs_batch.wrap
def deleteFiles(self):
site_title = self.site.content_manager.contents.get("content.json", {}).get("title", self.site.address)
message_id = "delete-%s" % self.site.address
self.log.debug("Deleting files from content.json (title: %s)..." % site_title)
files = [] # Get filenames
content_inner_paths = list(self.site.content_manager.contents.keys())
for i, content_inner_path in enumerate(content_inner_paths):
content = self.site.content_manager.contents.get(content_inner_path, {})
files.append(content_inner_path)
# Add normal files
for file_relative_path in list(content.get("files", {}).keys()):
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
files.append(file_inner_path)
# Add optional files
for file_relative_path in list(content.get("files_optional", {}).keys()):
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
files.append(file_inner_path)
if i % 100 == 0:
num_files = len(files)
self.site.messageWebsocket(
_("Deleting site <b>{site_title}</b>...<br>Collected {num_files} files"),
message_id, (i / len(content_inner_paths)) * 25
)
if self.isFile("dbschema.json"):
self.log.debug("Deleting db file...")
self.closeDb("Deleting site")
self.has_db = False
try:
schema = self.loadJson("dbschema.json")
db_path = self.getPath(schema["db_file"])
if os.path.isfile(db_path):
os.unlink(db_path)
except Exception as err:
self.log.error("Db file delete error: %s" % err)
num_files = len(files)
for i, inner_path in enumerate(files):
path = self.getPath(inner_path)
if os.path.isfile(path):
for retry in range(5):
try:
os.unlink(path)
break
except Exception as err:
self.log.error("Error removing %s: %s, try #%s" % (inner_path, err, retry))
time.sleep(float(retry) / 10)
if i % 100 == 0:
self.site.messageWebsocket(
_("Deleting site <b>{site_title}</b>...<br>Deleting file {i}/{num_files}"),
message_id, 25 + (i / num_files) * 50
)
self.onUpdated(inner_path, False)
self.log.debug("Deleting empty dirs...")
i = 0
for root, dirs, files in os.walk(self.directory, topdown=False):
for dir in dirs:
path = os.path.join(root, dir)
if os.path.isdir(path):
try:
i += 1
if i % 100 == 0:
self.site.messageWebsocket(
_("Deleting site <b>{site_title}</b>...<br>Deleting empty directories {i}"),
message_id, 85
)
os.rmdir(path)
except OSError: # Not empty
pass
if os.path.isdir(self.directory) and os.listdir(self.directory) == []:
os.rmdir(self.directory) # Remove sites directory if empty
if os.path.isdir(self.directory):
self.log.debug("Some unknown file remained in site data dir: %s..." % self.directory)
self.site.messageWebsocket(
_("Deleting site <b>{site_title}</b>...<br>Site deleted, but some unknown files left in the directory"),
message_id, 100
)
return False # Some files not deleted
else:
self.log.debug("Site %s data directory deleted: %s..." % (site_title, self.directory))
self.site.messageWebsocket(
_("Deleting site <b>{site_title}</b>...<br>All files deleted successfully"),
message_id, 100
)
return True # All clean

View file

View file

@ -1,162 +0,0 @@
#!/usr/bin/python2
from gevent import monkey
monkey.patch_all()
import os
import time
import sys
import socket
import ssl
sys.path.append(os.path.abspath("..")) # Imports relative to src dir
import io as StringIO
import gevent
from gevent.server import StreamServer
from gevent.pool import Pool
from Config import config
config.parse()
from util import SslPatch
# Server
socks = []
data = os.urandom(1024 * 100)
data += "\n"
def handle(sock_raw, addr):
socks.append(sock_raw)
sock = sock_raw
# sock = ctx.wrap_socket(sock, server_side=True)
# if sock_raw.recv( 1, gevent.socket.MSG_PEEK ) == "\x16":
# sock = gevent.ssl.wrap_socket(sock_raw, server_side=True, keyfile='key-cz.pem',
# certfile='cert-cz.pem', ciphers=ciphers, ssl_version=ssl.PROTOCOL_TLSv1)
# fp = os.fdopen(sock.fileno(), 'rb', 1024*512)
try:
while True:
line = sock.recv(16 * 1024)
if not line:
break
if line == "bye\n":
break
elif line == "gotssl\n":
sock.sendall("yes\n")
sock = gevent.ssl.wrap_socket(
sock_raw, server_side=True, keyfile='../../data/key-rsa.pem', certfile='../../data/cert-rsa.pem',
ciphers=ciphers, ssl_version=ssl.PROTOCOL_TLSv1
)
else:
sock.sendall(data)
except Exception as err:
print(err)
try:
sock.shutdown(gevent.socket.SHUT_WR)
sock.close()
except:
pass
socks.remove(sock_raw)
pool = Pool(1000) # do not accept more than 10000 connections
server = StreamServer(('127.0.0.1', 1234), handle)
server.start()
# Client
total_num = 0
total_bytes = 0
clipher = None
ciphers = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDH+AES128:ECDHE-RSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:HIGH:" + \
"!aNULL:!eNULL:!EXPORT:!DSS:!DES:!RC4:!3DES:!MD5:!PSK"
# ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def getData():
global total_num, total_bytes, clipher
data = None
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock = socket.ssl(s)
# sock = ssl.wrap_socket(sock)
sock.connect(("127.0.0.1", 1234))
# sock.do_handshake()
# clipher = sock.cipher()
sock.send("gotssl\n")
if sock.recv(128) == "yes\n":
sock = ssl.wrap_socket(sock, ciphers=ciphers, ssl_version=ssl.PROTOCOL_TLSv1)
sock.do_handshake()
clipher = sock.cipher()
for req in range(20):
sock.sendall("req\n")
buff = StringIO.StringIO()
data = sock.recv(16 * 1024)
buff.write(data)
if not data:
break
while not data.endswith("\n"):
data = sock.recv(16 * 1024)
if not data:
break
buff.write(data)
total_num += 1
total_bytes += buff.tell()
if not data:
print("No data")
sock.shutdown(gevent.socket.SHUT_WR)
sock.close()
s = time.time()
def info():
import psutil
import os
process = psutil.Process(os.getpid())
if "memory_info" in dir(process):
memory_info = process.memory_info
else:
memory_info = process.get_memory_info
while 1:
print(total_num, "req", (total_bytes / 1024), "kbytes", "transfered in", time.time() - s, end=' ')
print("using", clipher, "Mem:", memory_info()[0] / float(2 ** 20))
time.sleep(1)
gevent.spawn(info)
for test in range(1):
clients = []
for i in range(500): # Thread
clients.append(gevent.spawn(getData))
gevent.joinall(clients)
print(total_num, "req", (total_bytes / 1024), "kbytes", "transfered in", time.time() - s)
# Separate client/server process:
# 10*10*100:
# Raw: 10000 req 1000009 kbytes transfered in 5.39999985695
# RSA 2048: 10000 req 1000009 kbytes transfered in 27.7890000343 using ('ECDHE-RSA-AES256-SHA', 'TLSv1/SSLv3', 256)
# ECC: 10000 req 1000009 kbytes transfered in 26.1959998608 using ('ECDHE-ECDSA-AES256-SHA', 'TLSv1/SSLv3', 256)
# ECC: 10000 req 1000009 kbytes transfered in 28.2410001755 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 13.3828125
#
# 10*100*10:
# Raw: 10000 req 1000009 kbytes transfered in 7.02700018883 Mem: 14.328125
# RSA 2048: 10000 req 1000009 kbytes transfered in 44.8860001564 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.078125
# ECC: 10000 req 1000009 kbytes transfered in 37.9430000782 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.0234375
#
# 1*100*100:
# Raw: 10000 req 1000009 kbytes transfered in 4.64400005341 Mem: 14.06640625
# RSA: 10000 req 1000009 kbytes transfered in 24.2300000191 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 19.7734375
# ECC: 10000 req 1000009 kbytes transfered in 22.8849999905 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 17.8125
# AES128: 10000 req 1000009 kbytes transfered in 21.2839999199 using ('AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.1328125
# ECC+128: 10000 req 1000009 kbytes transfered in 20.496999979 using ('ECDHE-ECDSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.40234375
#
#
# Single process:
# 1*100*100
# RSA: 10000 req 1000009 kbytes transfered in 41.7899999619 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 26.91015625
#
# 10*10*100
# RSA: 10000 req 1000009 kbytes transfered in 40.1640000343 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.94921875

View file

@ -1,23 +0,0 @@
import logging
class Spy:
def __init__(self, obj, func_name):
self.obj = obj
self.__name__ = func_name
self.func_original = getattr(self.obj, func_name)
self.calls = []
def __enter__(self, *args, **kwargs):
logging.debug("Spy started")
def loggedFunc(cls, *args, **kwargs):
call = dict(enumerate(args, 1))
call[0] = cls
call.update(kwargs)
logging.debug("Spy call: %s" % call)
self.calls.append(call)
return self.func_original(cls, *args, **kwargs)
setattr(self.obj, self.__name__, loggedFunc)
return self.calls
def __exit__(self, *args, **kwargs):
setattr(self.obj, self.__name__, self.func_original)

View file

@ -1,59 +0,0 @@
import time
from util import Cached
class CachedObject:
def __init__(self):
self.num_called_add = 0
self.num_called_multiply = 0
self.num_called_none = 0
@Cached(timeout=1)
def calcAdd(self, a, b):
self.num_called_add += 1
return a + b
@Cached(timeout=1)
def calcMultiply(self, a, b):
self.num_called_multiply += 1
return a * b
@Cached(timeout=1)
def none(self):
self.num_called_none += 1
return None
class TestCached:
def testNoneValue(self):
cached_object = CachedObject()
assert cached_object.none() is None
assert cached_object.none() is None
assert cached_object.num_called_none == 1
time.sleep(2)
assert cached_object.none() is None
assert cached_object.num_called_none == 2
def testCall(self):
cached_object = CachedObject()
assert cached_object.calcAdd(1, 2) == 3
assert cached_object.calcAdd(1, 2) == 3
assert cached_object.calcMultiply(1, 2) == 2
assert cached_object.calcMultiply(1, 2) == 2
assert cached_object.num_called_add == 1
assert cached_object.num_called_multiply == 1
assert cached_object.calcAdd(2, 3) == 5
assert cached_object.calcAdd(2, 3) == 5
assert cached_object.num_called_add == 2
assert cached_object.calcAdd(1, 2) == 3
assert cached_object.calcMultiply(2, 3) == 6
assert cached_object.num_called_add == 2
assert cached_object.num_called_multiply == 2
time.sleep(2)
assert cached_object.calcAdd(1, 2) == 3
assert cached_object.num_called_add == 3

View file

@ -1,31 +0,0 @@
import pytest
import Config
@pytest.mark.usefixtures("resetSettings")
class TestConfig:
def testParse(self):
# Defaults
config_test = Config.Config("zeronet.py".split(" "))
config_test.parse(silent=True, parse_config=False)
assert not config_test.debug
assert not config_test.debug_socket
# Test parse command line with unknown parameters (ui_password)
config_test = Config.Config("zeronet.py --debug --debug_socket --ui_password hello".split(" "))
config_test.parse(silent=True, parse_config=False)
assert config_test.debug
assert config_test.debug_socket
with pytest.raises(AttributeError):
config_test.ui_password
# More complex test
args = "zeronet.py --unknown_arg --debug --debug_socket --ui_restrict 127.0.0.1 1.2.3.4 "
args += "--another_unknown argument --use_openssl False siteSign address privatekey --inner_path users/content.json"
config_test = Config.Config(args.split(" "))
config_test.parse(silent=True, parse_config=False)
assert config_test.debug
assert "1.2.3.4" in config_test.ui_restrict
assert not config_test.use_openssl
assert config_test.inner_path == "users/content.json"

View file

@ -1,118 +0,0 @@
import time
import socket
import gevent
import pytest
import mock
from Crypt import CryptConnection
from Connection import ConnectionServer
from Config import config
@pytest.mark.usefixtures("resetSettings")
class TestConnection:
def testIpv6(self, file_server6):
assert ":" in file_server6.ip
client = ConnectionServer(file_server6.ip, 1545)
connection = client.getConnection(file_server6.ip, 1544)
assert connection.ping()
# Close connection
connection.close()
client.stop()
time.sleep(0.01)
assert len(file_server6.connections) == 0
# Should not able to reach on ipv4 ip
with pytest.raises(socket.error) as err:
client = ConnectionServer("127.0.0.1", 1545)
connection = client.getConnection("127.0.0.1", 1544)
def testSslConnection(self, file_server):
client = ConnectionServer(file_server.ip, 1545)
assert file_server != client
# Connect to myself
with mock.patch('Config.config.ip_local', return_value=[]): # SSL not used for local ips
connection = client.getConnection(file_server.ip, 1544)
assert len(file_server.connections) == 1
assert connection.handshake
assert connection.crypt
# Close connection
connection.close("Test ended")
client.stop()
time.sleep(0.1)
assert len(file_server.connections) == 0
assert file_server.num_incoming == 2 # One for file_server fixture, one for the test
def testRawConnection(self, file_server):
client = ConnectionServer(file_server.ip, 1545)
assert file_server != client
# Remove all supported crypto
crypt_supported_bk = CryptConnection.manager.crypt_supported
CryptConnection.manager.crypt_supported = []
with mock.patch('Config.config.ip_local', return_value=[]): # SSL not used for local ips
connection = client.getConnection(file_server.ip, 1544)
assert len(file_server.connections) == 1
assert not connection.crypt
# Close connection
connection.close()
client.stop()
time.sleep(0.01)
assert len(file_server.connections) == 0
# Reset supported crypts
CryptConnection.manager.crypt_supported = crypt_supported_bk
def testPing(self, file_server, site):
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
assert connection.ping()
connection.close()
client.stop()
def testGetConnection(self, file_server):
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
# Get connection by ip/port
connection2 = client.getConnection(file_server.ip, 1544)
assert connection == connection2
# Get connection by peerid
assert not client.getConnection(file_server.ip, 1544, peer_id="notexists", create=False)
connection2 = client.getConnection(file_server.ip, 1544, peer_id=connection.handshake["peer_id"], create=False)
assert connection2 == connection
connection.close()
client.stop()
def testFloodProtection(self, file_server):
whitelist = file_server.whitelist # Save for reset
file_server.whitelist = [] # Disable 127.0.0.1 whitelist
client = ConnectionServer(file_server.ip, 1545)
# Only allow 6 connection in 1 minute
for reconnect in range(6):
connection = client.getConnection(file_server.ip, 1544)
assert connection.handshake
connection.close()
# The 7. one will timeout
with pytest.raises(gevent.Timeout):
with gevent.Timeout(0.1):
connection = client.getConnection(file_server.ip, 1544)
# Reset whitelist
file_server.whitelist = whitelist

View file

@ -1,273 +0,0 @@
import json
import time
import io
import pytest
from Crypt import CryptBitcoin
from Content.ContentManager import VerifyError, SignError
from util.SafeRe import UnsafePatternError
@pytest.mark.usefixtures("resetSettings")
class TestContent:
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
def testInclude(self, site):
# Rules defined in parent content.json
rules = site.content_manager.getRules("data/test_include/content.json")
assert rules["signers"] == ["15ik6LeBWnACWfaika1xqGapRZ1zh3JpCo"] # Valid signer
assert rules["user_name"] == "test" # Extra data
assert rules["max_size"] == 20000 # Max size of files
assert not rules["includes_allowed"] # Don't allow more includes
assert rules["files_allowed"] == "data.json" # Allowed file pattern
# Valid signers for "data/test_include/content.json"
valid_signers = site.content_manager.getValidSigners("data/test_include/content.json")
assert "15ik6LeBWnACWfaika1xqGapRZ1zh3JpCo" in valid_signers # Extra valid signer defined in parent content.json
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in valid_signers # The site itself
assert len(valid_signers) == 2 # No more
# Valid signers for "data/users/content.json"
valid_signers = site.content_manager.getValidSigners("data/users/content.json")
assert "1LSxsKfC9S9TVXGGNSM3vPHjyW82jgCX5f" in valid_signers # Extra valid signer defined in parent content.json
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in valid_signers # The site itself
assert len(valid_signers) == 2
# Valid signers for root content.json
assert site.content_manager.getValidSigners("content.json") == ["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
def testInlcudeLimits(self, site, crypt_bitcoin_lib):
# Data validation
res = []
data_dict = {
"files": {
"data.json": {
"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906",
"size": 505
}
},
"modified": time.time()
}
# Normal data
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)}
data_json = json.dumps(data_dict).encode()
data = io.BytesIO(data_json)
assert site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
# Reset
del data_dict["signs"]
# Too large
data_dict["files"]["data.json"]["size"] = 200000 # Emulate 2MB sized data.json
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "Include too large" in str(err.value)
# Reset
data_dict["files"]["data.json"]["size"] = 505
del data_dict["signs"]
# Not allowed file
data_dict["files"]["notallowed.exe"] = data_dict["files"]["data.json"]
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "File not allowed" in str(err.value)
# Reset
del data_dict["files"]["notallowed.exe"]
del data_dict["signs"]
# Should work again
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)}
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
@pytest.mark.parametrize("inner_path", ["content.json", "data/test_include/content.json", "data/users/content.json"])
def testSign(self, site, inner_path):
# Bad privatekey
with pytest.raises(SignError) as err:
site.content_manager.sign(inner_path, privatekey="5aaa3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMnaa", filewrite=False)
assert "Private key invalid" in str(err.value)
# Good privatekey
content = site.content_manager.sign(inner_path, privatekey=self.privatekey, filewrite=False)
content_old = site.content_manager.contents[inner_path] # Content before the sign
assert not content_old == content # Timestamp changed
assert site.address in content["signs"] # Used the site's private key to sign
if inner_path == "content.json":
assert len(content["files"]) == 17
elif inner_path == "data/test-include/content.json":
assert len(content["files"]) == 1
elif inner_path == "data/users/content.json":
assert len(content["files"]) == 0
# Everything should be same as before except the modified timestamp and the signs
assert (
{key: val for key, val in content_old.items() if key not in ["modified", "signs", "sign", "zeronet_version"]}
==
{key: val for key, val in content.items() if key not in ["modified", "signs", "sign", "zeronet_version"]}
)
def testSignOptionalFiles(self, site):
for hash in list(site.content_manager.hashfield):
site.content_manager.hashfield.remove(hash)
assert len(site.content_manager.hashfield) == 0
site.content_manager.contents["content.json"]["optional"] = "((data/img/zero.*))"
content_optional = site.content_manager.sign(privatekey=self.privatekey, filewrite=False, remove_missing_optional=True)
del site.content_manager.contents["content.json"]["optional"]
content_nooptional = site.content_manager.sign(privatekey=self.privatekey, filewrite=False, remove_missing_optional=True)
assert len(content_nooptional.get("files_optional", {})) == 0 # No optional files if no pattern
assert len(content_optional["files_optional"]) > 0
assert len(site.content_manager.hashfield) == len(content_optional["files_optional"]) # Hashed optional files should be added to hashfield
assert len(content_nooptional["files"]) > len(content_optional["files"])
def testFileInfo(self, site):
assert "sha512" in site.content_manager.getFileInfo("index.html")
assert site.content_manager.getFileInfo("data/img/domain.png")["content_inner_path"] == "content.json"
assert site.content_manager.getFileInfo("data/users/hello.png")["content_inner_path"] == "data/users/content.json"
assert site.content_manager.getFileInfo("data/users/content.json")["content_inner_path"] == "data/users/content.json"
assert not site.content_manager.getFileInfo("notexist")
# Optional file
file_info_optional = site.content_manager.getFileInfo("data/optional.txt")
assert "sha512" in file_info_optional
assert file_info_optional["optional"] is True
# Not exists yet user content.json
assert "cert_signers" in site.content_manager.getFileInfo("data/users/unknown/content.json")
# Optional user file
file_info_optional = site.content_manager.getFileInfo("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert "sha512" in file_info_optional
assert file_info_optional["optional"] is True
def testVerify(self, site, crypt_bitcoin_lib):
inner_path = "data/test_include/content.json"
data_dict = site.storage.loadJson(inner_path)
data = io.BytesIO(json.dumps(data_dict).encode("utf8"))
# Re-sign
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
# Wrong address
data_dict["address"] = "Othersite"
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Wrong site address" in str(err.value)
# Wrong inner_path
data_dict["address"] = "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"
data_dict["inner_path"] = "content.json"
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Wrong inner_path" in str(err.value)
# Everything right again
data_dict["address"] = "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"
data_dict["inner_path"] = inner_path
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
def testVerifyInnerPath(self, site, crypt_bitcoin_lib):
inner_path = "content.json"
data_dict = site.storage.loadJson(inner_path)
for good_relative_path in ["data.json", "out/data.json", "Any File [by none] (1).jpg", "árvzítűrő/tükörfúrógép.txt"]:
data_dict["files"] = {good_relative_path: {"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906", "size": 505}}
if "sign" in data_dict:
del data_dict["sign"]
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
for bad_relative_path in ["../data.json", "data/" * 100, "invalid|file.jpg", "con.txt", "any/con.txt"]:
data_dict["files"] = {bad_relative_path: {"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906", "size": 505}}
if "sign" in data_dict:
del data_dict["sign"]
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Invalid relative path" in str(err.value)
@pytest.mark.parametrize("key", ["ignore", "optional"])
def testSignUnsafePattern(self, site, key):
site.content_manager.contents["content.json"][key] = "([a-zA-Z]+)*"
with pytest.raises(UnsafePatternError) as err:
site.content_manager.sign("content.json", privatekey=self.privatekey, filewrite=False)
assert "Potentially unsafe" in str(err.value)
def testVerifyUnsafePattern(self, site, crypt_bitcoin_lib):
site.content_manager.contents["content.json"]["includes"]["data/test_include/content.json"]["files_allowed"] = "([a-zA-Z]+)*"
with pytest.raises(UnsafePatternError) as err:
with site.storage.open("data/test_include/content.json") as data:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "Potentially unsafe" in str(err.value)
site.content_manager.contents["data/users/content.json"]["user_contents"]["permission_rules"]["([a-zA-Z]+)*"] = {"max_size": 0}
with pytest.raises(UnsafePatternError) as err:
with site.storage.open("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json") as data:
site.content_manager.verifyFile("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", data, ignore_same=False)
assert "Potentially unsafe" in str(err.value)
def testPathValidation(self, site):
assert site.content_manager.isValidRelativePath("test.txt")
assert site.content_manager.isValidRelativePath("test/!@#$%^&().txt")
assert site.content_manager.isValidRelativePath("ÜøßÂŒƂÆÇ.txt")
assert site.content_manager.isValidRelativePath("тест.текст")
assert site.content_manager.isValidRelativePath("𝐮𝐧𝐢𝐜𝐨𝐝𝐞𝑖𝑠𝒂𝒘𝒆𝒔𝒐𝒎𝒆")
# Test rules based on https://stackoverflow.com/questions/1976007/what-characters-are-forbidden-in-windows-and-linux-directory-names
assert not site.content_manager.isValidRelativePath("any\\hello.txt") # \ not allowed
assert not site.content_manager.isValidRelativePath("/hello.txt") # Cannot start with /
assert not site.content_manager.isValidRelativePath("\\hello.txt") # Cannot start with \
assert not site.content_manager.isValidRelativePath("../hello.txt") # Not allowed .. in path
assert not site.content_manager.isValidRelativePath("\0hello.txt") # NULL character
assert not site.content_manager.isValidRelativePath("\31hello.txt") # 0-31 (ASCII control characters)
assert not site.content_manager.isValidRelativePath("any/hello.txt ") # Cannot end with space
assert not site.content_manager.isValidRelativePath("any/hello.txt.") # Cannot end with dot
assert site.content_manager.isValidRelativePath(".hello.txt") # Allow start with dot
assert not site.content_manager.isValidRelativePath("any/CON") # Protected names on Windows
assert not site.content_manager.isValidRelativePath("CON/any.txt")
assert not site.content_manager.isValidRelativePath("any/lpt1.txt")
assert site.content_manager.isValidRelativePath("any/CONAN")
assert not site.content_manager.isValidRelativePath("any/CONOUT$")
assert not site.content_manager.isValidRelativePath("a" * 256) # Max 255 characters allowed

View file

@ -1,390 +0,0 @@
import json
import io
import pytest
from Crypt import CryptBitcoin
from Content.ContentManager import VerifyError, SignError
@pytest.mark.usefixtures("resetSettings")
class TestContentUser:
def testSigners(self, site):
# File info for not existing user file
file_info = site.content_manager.getFileInfo("data/users/notexist/data.json")
assert file_info["content_inner_path"] == "data/users/notexist/content.json"
file_info = site.content_manager.getFileInfo("data/users/notexist/a/b/data.json")
assert file_info["content_inner_path"] == "data/users/notexist/content.json"
valid_signers = site.content_manager.getValidSigners("data/users/notexist/content.json")
assert valid_signers == ["14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet", "notexist", "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
# File info for exsitsing user file
valid_signers = site.content_manager.getValidSigners("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json")
assert '1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT' in valid_signers # The site address
assert '14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet' in valid_signers # Admin user defined in data/users/content.json
assert '1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C' in valid_signers # The user itself
assert len(valid_signers) == 3 # No more valid signers
# Valid signer for banned user
user_content = site.storage.loadJson("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json")
user_content["cert_user_id"] = "bad@zeroid.bit"
valid_signers = site.content_manager.getValidSigners("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert '1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT' in valid_signers # The site address
assert '14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet' in valid_signers # Admin user defined in data/users/content.json
assert '1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C' not in valid_signers # The user itself
def testRules(self, site):
# We going to manipulate it this test rules based on data/users/content.json
user_content = site.storage.loadJson("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json")
# Known user
user_content["cert_auth_type"] = "web"
user_content["cert_user_id"] = "nofish@zeroid.bit"
rules = site.content_manager.getRules("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert rules["max_size"] == 100000
assert "1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C" in rules["signers"]
# Unknown user
user_content["cert_auth_type"] = "web"
user_content["cert_user_id"] = "noone@zeroid.bit"
rules = site.content_manager.getRules("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert rules["max_size"] == 10000
assert "1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C" in rules["signers"]
# User with more size limit based on auth type
user_content["cert_auth_type"] = "bitmsg"
user_content["cert_user_id"] = "noone@zeroid.bit"
rules = site.content_manager.getRules("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert rules["max_size"] == 15000
assert "1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C" in rules["signers"]
# Banned user
user_content["cert_auth_type"] = "web"
user_content["cert_user_id"] = "bad@zeroid.bit"
rules = site.content_manager.getRules("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert "1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C" not in rules["signers"]
def testRulesAddress(self, site):
user_inner_path = "data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/content.json"
user_content = site.storage.loadJson(user_inner_path)
rules = site.content_manager.getRules(user_inner_path, user_content)
assert rules["max_size"] == 10000
assert "1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9" in rules["signers"]
users_content = site.content_manager.contents["data/users/content.json"]
# Ban user based on address
users_content["user_contents"]["permissions"]["1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9"] = False
rules = site.content_manager.getRules(user_inner_path, user_content)
assert "1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9" not in rules["signers"]
# Change max allowed size
users_content["user_contents"]["permissions"]["1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9"] = {"max_size": 20000}
rules = site.content_manager.getRules(user_inner_path, user_content)
assert rules["max_size"] == 20000
def testVerifyAddress(self, site):
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv" # For 1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT
user_inner_path = "data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/content.json"
data_dict = site.storage.loadJson(user_inner_path)
users_content = site.content_manager.contents["data/users/content.json"]
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
# Test error on 15k data.json
data_dict["files"]["data.json"]["size"] = 1024 * 15
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
assert "Include too large" in str(err.value)
# Give more space based on address
users_content["user_contents"]["permissions"]["1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9"] = {"max_size": 20000}
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
def testVerify(self, site):
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv" # For 1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT
user_inner_path = "data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/content.json"
data_dict = site.storage.loadJson(user_inner_path)
users_content = site.content_manager.contents["data/users/content.json"]
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
# Test max size exception by setting allowed to 0
rules = site.content_manager.getRules(user_inner_path, data_dict)
assert rules["max_size"] == 10000
assert users_content["user_contents"]["permission_rules"][".*"]["max_size"] == 10000
users_content["user_contents"]["permission_rules"][".*"]["max_size"] = 0
rules = site.content_manager.getRules(user_inner_path, data_dict)
assert rules["max_size"] == 0
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
assert "Include too large" in str(err.value)
users_content["user_contents"]["permission_rules"][".*"]["max_size"] = 10000 # Reset
# Test max optional size exception
# 1 MB gif = Allowed
data_dict["files_optional"]["peanut-butter-jelly-time.gif"]["size"] = 1024 * 1024
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
assert site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
# 100 MB gif = Not allowed
data_dict["files_optional"]["peanut-butter-jelly-time.gif"]["size"] = 100 * 1024 * 1024
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
assert "Include optional files too large" in str(err.value)
data_dict["files_optional"]["peanut-butter-jelly-time.gif"]["size"] = 1024 * 1024 # Reset
# hello.exe = Not allowed
data_dict["files_optional"]["hello.exe"] = data_dict["files_optional"]["peanut-butter-jelly-time.gif"]
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
assert "Optional file not allowed" in str(err.value)
del data_dict["files_optional"]["hello.exe"] # Reset
# Includes not allowed in user content
data_dict["includes"] = {"other.json": {}}
del data_dict["signs"] # Remove signs before signing
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), privatekey)
}
data = io.BytesIO(json.dumps(data_dict).encode())
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(user_inner_path, data, ignore_same=False)
assert "Includes not allowed" in str(err.value)
def testCert(self, site):
# user_addr = "1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C"
user_priv = "5Kk7FSA63FC2ViKmKLuBxk9gQkaQ5713hKq8LmFAf4cVeXh6K6A"
# cert_addr = "14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet"
cert_priv = "5JusJDSjHaMHwUjDT3o6eQ54pA6poo8La5fAgn1wNc3iK59jxjA"
# Check if the user file is loaded
assert "data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json" in site.content_manager.contents
user_content = site.content_manager.contents["data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json"]
rules_content = site.content_manager.contents["data/users/content.json"]
# Override valid cert signers for the test
rules_content["user_contents"]["cert_signers"]["zeroid.bit"] = [
"14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet",
"1iD5ZQJMNXu43w1qLB8sfdHVKppVMduGz"
]
# Check valid cert signers
rules = site.content_manager.getRules("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
assert rules["cert_signers"] == {"zeroid.bit": [
"14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet",
"1iD5ZQJMNXu43w1qLB8sfdHVKppVMduGz"
]}
# Sign a valid cert
user_content["cert_sign"] = CryptBitcoin.sign("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C#%s/%s" % (
user_content["cert_auth_type"],
user_content["cert_user_id"].split("@")[0]
), cert_priv)
# Verify cert
assert site.content_manager.verifyCert("data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_content)
# Verify if the cert is valid for other address
assert not site.content_manager.verifyCert("data/users/badaddress/content.json", user_content)
# Sign user content
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
# Test user cert
assert site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
# Test banned user
cert_user_id = user_content["cert_user_id"] # My username
site.content_manager.contents["data/users/content.json"]["user_contents"]["permissions"][cert_user_id] = False
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Valid signs: 0/1" in str(err.value)
del site.content_manager.contents["data/users/content.json"]["user_contents"]["permissions"][cert_user_id] # Reset
# Test invalid cert
user_content["cert_sign"] = CryptBitcoin.sign(
"badaddress#%s/%s" % (user_content["cert_auth_type"], user_content["cert_user_id"]), cert_priv
)
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Invalid cert" in str(err.value)
# Test banned user, signed by the site owner
user_content["cert_sign"] = CryptBitcoin.sign("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C#%s/%s" % (
user_content["cert_auth_type"],
user_content["cert_user_id"].split("@")[0]
), cert_priv)
cert_user_id = user_content["cert_user_id"] # My username
site.content_manager.contents["data/users/content.json"]["user_contents"]["permissions"][cert_user_id] = False
site_privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv" # For 1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT
del user_content["signs"] # Remove signs before signing
user_content["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(user_content, sort_keys=True), site_privatekey)
}
assert site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(user_content).encode()), ignore_same=False
)
def testMissingCert(self, site):
user_priv = "5Kk7FSA63FC2ViKmKLuBxk9gQkaQ5713hKq8LmFAf4cVeXh6K6A"
cert_priv = "5JusJDSjHaMHwUjDT3o6eQ54pA6poo8La5fAgn1wNc3iK59jxjA"
user_content = site.content_manager.contents["data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json"]
rules_content = site.content_manager.contents["data/users/content.json"]
# Override valid cert signers for the test
rules_content["user_contents"]["cert_signers"]["zeroid.bit"] = [
"14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet",
"1iD5ZQJMNXu43w1qLB8sfdHVKppVMduGz"
]
# Sign a valid cert
user_content["cert_sign"] = CryptBitcoin.sign("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C#%s/%s" % (
user_content["cert_auth_type"],
user_content["cert_user_id"].split("@")[0]
), cert_priv)
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
assert site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
# Test invalid cert_user_id
user_content["cert_user_id"] = "nodomain"
user_content["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(user_content, sort_keys=True), user_priv)
}
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Invalid domain in cert_user_id" in str(err.value)
# Test removed cert
del user_content["cert_user_id"]
del user_content["cert_auth_type"]
del user_content["signs"] # Remove signs before signing
user_content["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(user_content, sort_keys=True), user_priv)
}
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Missing cert_user_id" in str(err.value)
def testCertSignersPattern(self, site):
user_priv = "5Kk7FSA63FC2ViKmKLuBxk9gQkaQ5713hKq8LmFAf4cVeXh6K6A"
cert_priv = "5JusJDSjHaMHwUjDT3o6eQ54pA6poo8La5fAgn1wNc3iK59jxjA" # For 14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet
user_content = site.content_manager.contents["data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json"]
rules_content = site.content_manager.contents["data/users/content.json"]
# Override valid cert signers for the test
rules_content["user_contents"]["cert_signers_pattern"] = "14wgQ[0-9][A-Z]"
# Sign a valid cert
user_content["cert_user_id"] = "certuser@14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet"
user_content["cert_sign"] = CryptBitcoin.sign("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C#%s/%s" % (
user_content["cert_auth_type"],
"certuser"
), cert_priv)
signed_content = site.content_manager.sign(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json", user_priv, filewrite=False
)
assert site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
# Cert does not matches the pattern
rules_content["user_contents"]["cert_signers_pattern"] = "14wgX[0-9][A-Z]"
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Invalid cert signer: 14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet" in str(err.value)
# Removed cert_signers_pattern
del rules_content["user_contents"]["cert_signers_pattern"]
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(
"data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json",
io.BytesIO(json.dumps(signed_content).encode()), ignore_same=False
)
assert "Invalid cert signer: 14wgQ4VDDZNoRMFF4yCDuTrBSHmYhL3bet" in str(err.value)
def testNewFile(self, site):
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv" # For 1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT
inner_path = "data/users/1NEWrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json"
site.storage.writeJson(inner_path, {"test": "data"})
site.content_manager.sign(inner_path, privatekey)
assert "test" in site.storage.loadJson(inner_path)
site.storage.delete(inner_path)

View file

@ -1,48 +0,0 @@
from Crypt import CryptBitcoin
class TestCryptBitcoin:
def testSign(self, crypt_bitcoin_lib):
privatekey = "5K9S6dVpufGnroRgFrT6wsKiz2mJRYsC73eWDmajaHserAp3F1C"
privatekey_bad = "5Jbm9rrusXyApAoM8YoM4Rja337zMMoBUMRJ1uijiguU2aZRnwC"
# Get address by privatekey
address = crypt_bitcoin_lib.privatekeyToAddress(privatekey)
assert address == "1MpDMxFeDUkiHohxx9tbGLeEGEuR4ZNsJz"
address_bad = crypt_bitcoin_lib.privatekeyToAddress(privatekey_bad)
assert address_bad != "1MpDMxFeDUkiHohxx9tbGLeEGEuR4ZNsJz"
# Text signing
data_len_list = list(range(0, 300, 10))
data_len_list += [1024, 2048, 1024 * 128, 1024 * 1024, 1024 * 2048]
for data_len in data_len_list:
data = data_len * "!"
sign = crypt_bitcoin_lib.sign(data, privatekey)
assert crypt_bitcoin_lib.verify(data, address, sign)
assert not crypt_bitcoin_lib.verify("invalid" + data, address, sign)
# Signed by bad privatekey
sign_bad = crypt_bitcoin_lib.sign("hello", privatekey_bad)
assert not crypt_bitcoin_lib.verify("hello", address, sign_bad)
def testVerify(self, crypt_bitcoin_lib):
sign_uncompressed = b'G6YkcFTuwKMVMHI2yycGQIFGbCZVNsZEZvSlOhKpHUt/BlADY94egmDAWdlrbbFrP9wH4aKcEfbLO8sa6f63VU0='
assert crypt_bitcoin_lib.verify("1NQUem2M4cAqWua6BVFBADtcSP55P4QobM#web/gitcenter", "19Bir5zRm1yo4pw9uuxQL8xwf9b7jqMpR", sign_uncompressed)
sign_compressed = b'H6YkcFTuwKMVMHI2yycGQIFGbCZVNsZEZvSlOhKpHUt/BlADY94egmDAWdlrbbFrP9wH4aKcEfbLO8sa6f63VU0='
assert crypt_bitcoin_lib.verify("1NQUem2M4cAqWua6BVFBADtcSP55P4QobM#web/gitcenter", "1KH5BdNnqxh2KRWMMT8wUXzUgz4vVQ4S8p", sign_compressed)
def testNewPrivatekey(self):
assert CryptBitcoin.newPrivatekey() != CryptBitcoin.newPrivatekey()
assert CryptBitcoin.privatekeyToAddress(CryptBitcoin.newPrivatekey())
def testNewSeed(self):
assert CryptBitcoin.newSeed() != CryptBitcoin.newSeed()
assert CryptBitcoin.privatekeyToAddress(
CryptBitcoin.hdPrivatekey(CryptBitcoin.newSeed(), 0)
)
assert CryptBitcoin.privatekeyToAddress(
CryptBitcoin.hdPrivatekey(CryptBitcoin.newSeed(), 2**256)
)

View file

@ -1,23 +0,0 @@
import os
from Config import config
from Crypt import CryptConnection
class TestCryptConnection:
def testSslCert(self):
# Remove old certs
if os.path.isfile("%s/cert-rsa.pem" % config.data_dir):
os.unlink("%s/cert-rsa.pem" % config.data_dir)
if os.path.isfile("%s/key-rsa.pem" % config.data_dir):
os.unlink("%s/key-rsa.pem" % config.data_dir)
# Generate certs
CryptConnection.manager.loadCerts()
assert "tls-rsa" in CryptConnection.manager.crypt_supported
assert CryptConnection.manager.selectCrypt(["tls-rsa", "unknown"]) == "tls-rsa" # It should choose the known crypt
# Check openssl cert generation
assert os.path.isfile("%s/cert-rsa.pem" % config.data_dir)
assert os.path.isfile("%s/key-rsa.pem" % config.data_dir)

View file

@ -1,31 +0,0 @@
import base64
from Crypt import CryptHash
sha512t_sum_hex = "2e9466d8aa1f340c91203b4ddbe9b6669879616a1b8e9571058a74195937598d"
sha512t_sum_bin = b".\x94f\xd8\xaa\x1f4\x0c\x91 ;M\xdb\xe9\xb6f\x98yaj\x1b\x8e\x95q\x05\x8at\x19Y7Y\x8d"
sha256_sum_hex = "340cd04be7f530e3a7c1bc7b24f225ba5762ec7063a56e1ae01a30d56722e5c3"
class TestCryptBitcoin:
def testSha(self, site):
file_path = site.storage.getPath("dbschema.json")
assert CryptHash.sha512sum(file_path) == sha512t_sum_hex
assert CryptHash.sha512sum(open(file_path, "rb")) == sha512t_sum_hex
assert CryptHash.sha512sum(open(file_path, "rb"), format="digest") == sha512t_sum_bin
assert CryptHash.sha256sum(file_path) == sha256_sum_hex
assert CryptHash.sha256sum(open(file_path, "rb")) == sha256_sum_hex
with open(file_path, "rb") as f:
hash = CryptHash.Sha512t(f.read(100))
hash.hexdigest() != sha512t_sum_hex
hash.update(f.read(1024 * 1024))
assert hash.hexdigest() == sha512t_sum_hex
def testRandom(self):
assert len(CryptHash.random(64)) == 64
assert CryptHash.random() != CryptHash.random()
assert bytes.fromhex(CryptHash.random(encoding="hex"))
assert base64.b64decode(CryptHash.random(encoding="base64"))

View file

@ -1,137 +0,0 @@
import io
class TestDb:
def testCheckTables(self, db):
tables = [row["name"] for row in db.execute("SELECT name FROM sqlite_master WHERE type='table'")]
assert "keyvalue" in tables # To store simple key -> value
assert "json" in tables # Json file path registry
assert "test" in tables # The table defined in dbschema.json
# Verify test table
cols = [col["name"] for col in db.execute("PRAGMA table_info(test)")]
assert "test_id" in cols
assert "title" in cols
# Add new table
assert "newtest" not in tables
db.schema["tables"]["newtest"] = {
"cols": [
["newtest_id", "INTEGER"],
["newtitle", "TEXT"],
],
"indexes": ["CREATE UNIQUE INDEX newtest_id ON newtest(newtest_id)"],
"schema_changed": 1426195822
}
db.checkTables()
tables = [row["name"] for row in db.execute("SELECT name FROM sqlite_master WHERE type='table'")]
assert "test" in tables
assert "newtest" in tables
def testQueries(self, db):
# Test insert
for i in range(100):
db.execute("INSERT INTO test ?", {"test_id": i, "title": "Test #%s" % i})
assert db.execute("SELECT COUNT(*) AS num FROM test").fetchone()["num"] == 100
# Test single select
assert db.execute("SELECT COUNT(*) AS num FROM test WHERE ?", {"test_id": 1}).fetchone()["num"] == 1
# Test multiple select
assert db.execute("SELECT COUNT(*) AS num FROM test WHERE ?", {"test_id": [1, 2, 3]}).fetchone()["num"] == 3
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"test_id": [1, 2, 3], "title": "Test #2"}
).fetchone()["num"] == 1
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"test_id": [1, 2, 3], "title": ["Test #2", "Test #3", "Test #4"]}
).fetchone()["num"] == 2
# Test multiple select using named params
assert db.execute("SELECT COUNT(*) AS num FROM test WHERE test_id IN :test_id", {"test_id": [1, 2, 3]}).fetchone()["num"] == 3
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE test_id IN :test_id AND title = :title",
{"test_id": [1, 2, 3], "title": "Test #2"}
).fetchone()["num"] == 1
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE test_id IN :test_id AND title IN :title",
{"test_id": [1, 2, 3], "title": ["Test #2", "Test #3", "Test #4"]}
).fetchone()["num"] == 2
# Large ammount of IN values
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"not__test_id": list(range(2, 3000))}
).fetchone()["num"] == 2
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"test_id": list(range(50, 3000))}
).fetchone()["num"] == 50
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"not__title": ["Test #%s" % i for i in range(50, 3000)]}
).fetchone()["num"] == 50
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"title__like": "%20%"}
).fetchone()["num"] == 1
# Test named parameter escaping
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE test_id = :test_id AND title LIKE :titlelike",
{"test_id": 1, "titlelike": "Test%"}
).fetchone()["num"] == 1
def testEscaping(self, db):
# Test insert
for i in range(100):
db.execute("INSERT INTO test ?", {"test_id": i, "title": "Test '\" #%s" % i})
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"title": "Test '\" #1"}
).fetchone()["num"] == 1
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"title": ["Test '\" #%s" % i for i in range(0, 50)]}
).fetchone()["num"] == 50
assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?",
{"not__title": ["Test '\" #%s" % i for i in range(50, 3000)]}
).fetchone()["num"] == 50
def testUpdateJson(self, db):
f = io.BytesIO()
f.write("""
{
"test": [
{"test_id": 1, "title": "Test 1 title", "extra col": "Ignore it"}
]
}
""".encode())
f.seek(0)
assert db.updateJson(db.db_dir + "data.json", f) is True
assert db.execute("SELECT COUNT(*) AS num FROM test_importfilter").fetchone()["num"] == 1
assert db.execute("SELECT COUNT(*) AS num FROM test").fetchone()["num"] == 1
def testUnsafePattern(self, db):
db.schema["maps"] = {"[A-Za-z.]*": db.schema["maps"]["data.json"]} # Only repetition of . supported
f = io.StringIO()
f.write("""
{
"test": [
{"test_id": 1, "title": "Test 1 title", "extra col": "Ignore it"}
]
}
""")
f.seek(0)
assert db.updateJson(db.db_dir + "data.json", f) is False
assert db.execute("SELECT COUNT(*) AS num FROM test_importfilter").fetchone()["num"] == 0
assert db.execute("SELECT COUNT(*) AS num FROM test").fetchone()["num"] == 0

View file

@ -1,31 +0,0 @@
import re
from Db.DbQuery import DbQuery
class TestDbQuery:
def testParse(self):
query_text = """
SELECT
'comment' AS type,
date_added, post.title AS title,
keyvalue.value || ': ' || comment.body AS body,
'?Post:' || comment.post_id || '#Comments' AS url
FROM
comment
LEFT JOIN json USING (json_id)
LEFT JOIN json AS json_content ON (json_content.directory = json.directory AND json_content.file_name='content.json')
LEFT JOIN keyvalue ON (keyvalue.json_id = json_content.json_id AND key = 'cert_user_id')
LEFT JOIN post ON (comment.post_id = post.post_id)
WHERE
post.date_added > 123
ORDER BY
date_added DESC
LIMIT 20
"""
query = DbQuery(query_text)
assert query.parts["LIMIT"] == "20"
assert query.fields["body"] == "keyvalue.value || ': ' || comment.body"
assert re.sub("[ \r\n]", "", str(query)) == re.sub("[ \r\n]", "", query_text)
query.wheres.append("body LIKE '%hello%'")
assert "body LIKE '%hello%'" in str(query)

View file

@ -1,52 +0,0 @@
from Debug import Debug
import gevent
import os
import re
import pytest
class TestDebug:
@pytest.mark.parametrize("items,expected", [
(["@/src/A/B/C.py:17"], ["A/B/C.py line 17"]), # basic test
(["@/src/Db/Db.py:17"], ["Db.py line 17"]), # path compression
(["%s:1" % __file__], ["TestDebug.py line 1"]),
(["@/plugins/Chart/ChartDb.py:100"], ["ChartDb.py line 100"]), # plugins
(["@/main.py:17"], ["main.py line 17"]), # root
(["@\\src\\Db\\__init__.py:17"], ["Db/__init__.py line 17"]), # Windows paths
(["<frozen importlib._bootstrap>:1"], []), # importlib builtins
(["<frozen importlib._bootstrap_external>:1"], []), # importlib builtins
(["/home/ivanq/ZeroNet/src/main.py:13"], ["?/src/main.py line 13"]), # best-effort anonymization
(["C:\\ZeroNet\\core\\src\\main.py:13"], ["?/src/main.py line 13"]),
(["/root/main.py:17"], ["/root/main.py line 17"]),
(["{gevent}:13"], ["<gevent>/__init__.py line 13"]), # modules
(["{os}:13"], ["<os> line 13"]), # python builtin modules
(["src/gevent/event.py:17"], ["<gevent>/event.py line 17"]), # gevent-overriden __file__
(["@/src/Db/Db.py:17", "@/src/Db/DbQuery.py:1"], ["Db.py line 17", "DbQuery.py line 1"]), # mutliple args
(["@/src/Db/Db.py:17", "@/src/Db/Db.py:1"], ["Db.py line 17", "1"]), # same file
(["{os}:1", "@/src/Db/Db.py:17"], ["<os> line 1", "Db.py line 17"]), # builtins
(["{gevent}:1"] + ["{os}:3"] * 4 + ["@/src/Db/Db.py:17"], ["<gevent>/__init__.py line 1", "...", "Db.py line 17"])
])
def testFormatTraceback(self, items, expected):
q_items = []
for item in items:
file, line = item.rsplit(":", 1)
if file.startswith("@"):
file = Debug.root_dir + file[1:]
file = file.replace("{os}", os.__file__)
file = file.replace("{gevent}", gevent.__file__)
q_items.append((file, int(line)))
assert Debug.formatTraceback(q_items) == expected
def testFormatException(self):
try:
raise ValueError("Test exception")
except Exception:
assert re.match(r"ValueError: Test exception in TestDebug.py line [0-9]+", Debug.formatException())
try:
os.path.abspath(1)
except Exception:
assert re.search(r"in TestDebug.py line [0-9]+ > <(posixpath|ntpath)> line ", Debug.formatException())
def testFormatStack(self):
assert re.match(r"TestDebug.py line [0-9]+ > <_pytest>/python.py line [0-9]+", Debug.formatStack())

View file

@ -1,58 +0,0 @@
import io
from util import Diff
class TestDiff:
def testDiff(self):
assert Diff.diff(
[],
["one", "two", "three"]
) == [("+", ["one", "two","three"])]
assert Diff.diff(
["one", "two", "three"],
["one", "two", "three", "four", "five"]
) == [("=", 11), ("+", ["four", "five"])]
assert Diff.diff(
["one", "two", "three", "six"],
["one", "two", "three", "four", "five", "six"]
) == [("=", 11), ("+", ["four", "five"]), ("=", 3)]
assert Diff.diff(
["one", "two", "three", "hmm", "six"],
["one", "two", "three", "four", "five", "six"]
) == [("=", 11), ("-", 3), ("+", ["four", "five"]), ("=", 3)]
assert Diff.diff(
["one", "two", "three"],
[]
) == [("-", 11)]
def testUtf8(self):
assert Diff.diff(
["one", "\xe5\xad\xa6\xe4\xb9\xa0\xe4\xb8\x8b", "two", "three"],
["one", "\xe5\xad\xa6\xe4\xb9\xa0\xe4\xb8\x8b", "two", "three", "four", "five"]
) == [("=", 20), ("+", ["four", "five"])]
def testDiffLimit(self):
old_f = io.BytesIO(b"one\ntwo\nthree\nhmm\nsix")
new_f = io.BytesIO(b"one\ntwo\nthree\nfour\nfive\nsix")
actions = Diff.diff(list(old_f), list(new_f), limit=1024)
assert actions
old_f = io.BytesIO(b"one\ntwo\nthree\nhmm\nsix")
new_f = io.BytesIO(b"one\ntwo\nthree\nfour\nfive\nsix"*1024)
actions = Diff.diff(list(old_f), list(new_f), limit=1024)
assert actions is False
def testPatch(self):
old_f = io.BytesIO(b"one\ntwo\nthree\nhmm\nsix")
new_f = io.BytesIO(b"one\ntwo\nthree\nfour\nfive\nsix")
actions = Diff.diff(
list(old_f),
list(new_f)
)
old_f.seek(0)
assert Diff.patch(old_f, actions).getvalue() == new_f.getvalue()

View file

@ -1,65 +0,0 @@
import util
class ExampleClass(object):
def __init__(self):
self.called = []
self.onChanged = util.Event()
def increment(self, title):
self.called.append(title)
class TestEvent:
def testEvent(self):
test_obj = ExampleClass()
test_obj.onChanged.append(lambda: test_obj.increment("Called #1"))
test_obj.onChanged.append(lambda: test_obj.increment("Called #2"))
test_obj.onChanged.once(lambda: test_obj.increment("Once"))
assert test_obj.called == []
test_obj.onChanged()
assert test_obj.called == ["Called #1", "Called #2", "Once"]
test_obj.onChanged()
test_obj.onChanged()
assert test_obj.called == ["Called #1", "Called #2", "Once", "Called #1", "Called #2", "Called #1", "Called #2"]
def testOnce(self):
test_obj = ExampleClass()
test_obj.onChanged.once(lambda: test_obj.increment("Once test #1"))
# It should be called only once
assert test_obj.called == []
test_obj.onChanged()
assert test_obj.called == ["Once test #1"]
test_obj.onChanged()
test_obj.onChanged()
assert test_obj.called == ["Once test #1"]
def testOnceMultiple(self):
test_obj = ExampleClass()
# Allow queue more than once
test_obj.onChanged.once(lambda: test_obj.increment("Once test #1"))
test_obj.onChanged.once(lambda: test_obj.increment("Once test #2"))
test_obj.onChanged.once(lambda: test_obj.increment("Once test #3"))
assert test_obj.called == []
test_obj.onChanged()
assert test_obj.called == ["Once test #1", "Once test #2", "Once test #3"]
test_obj.onChanged()
test_obj.onChanged()
assert test_obj.called == ["Once test #1", "Once test #2", "Once test #3"]
def testOnceNamed(self):
test_obj = ExampleClass()
# Dont store more that one from same type
test_obj.onChanged.once(lambda: test_obj.increment("Once test #1/1"), "type 1")
test_obj.onChanged.once(lambda: test_obj.increment("Once test #1/2"), "type 1")
test_obj.onChanged.once(lambda: test_obj.increment("Once test #2"), "type 2")
assert test_obj.called == []
test_obj.onChanged()
assert test_obj.called == ["Once test #1/1", "Once test #2"]
test_obj.onChanged()
test_obj.onChanged()
assert test_obj.called == ["Once test #1/1", "Once test #2"]

View file

@ -1,124 +0,0 @@
import io
import pytest
import time
from Connection import ConnectionServer
from Connection import Connection
from File import FileServer
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.usefixtures("resetTempSettings")
class TestFileRequest:
def testGetFile(self, file_server, site):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
file_server.sites[site.address] = site
# Normal request
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
assert b"sign" in response["body"]
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0, "file_size": site.storage.getSize("content.json")})
assert b"sign" in response["body"]
# Invalid file
response = connection.request("getFile", {"site": site.address, "inner_path": "invalid.file", "location": 0})
assert "File read error" in response["error"]
# Location over size
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 1024 * 1024})
assert "File read error" in response["error"]
# Stream from parent dir
response = connection.request("getFile", {"site": site.address, "inner_path": "../users.json", "location": 0})
assert "File read exception" in response["error"]
# Invalid site
response = connection.request("getFile", {"site": "", "inner_path": "users.json", "location": 0})
assert "Unknown site" in response["error"]
response = connection.request("getFile", {"site": ".", "inner_path": "users.json", "location": 0})
assert "Unknown site" in response["error"]
# Invalid size
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0, "file_size": 1234})
assert "File size does not match" in response["error"]
# Invalid path
for path in ["../users.json", "./../users.json", "data/../content.json", ".../users.json"]:
for sep in ["/", "\\"]:
response = connection.request("getFile", {"site": site.address, "inner_path": path.replace("/", sep), "location": 0})
assert response["error"] == 'File read exception'
connection.close()
client.stop()
def testStreamFile(self, file_server, site):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
file_server.sites[site.address] = site
buff = io.BytesIO()
response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff)
assert "stream_bytes" in response
assert b"sign" in buff.getvalue()
# Invalid file
buff = io.BytesIO()
response = connection.request("streamFile", {"site": site.address, "inner_path": "invalid.file", "location": 0}, buff)
assert "File read error" in response["error"]
# Location over size
buff = io.BytesIO()
response = connection.request(
"streamFile", {"site": site.address, "inner_path": "content.json", "location": 1024 * 1024}, buff
)
assert "File read error" in response["error"]
# Stream from parent dir
buff = io.BytesIO()
response = connection.request("streamFile", {"site": site.address, "inner_path": "../users.json", "location": 0}, buff)
assert "File read exception" in response["error"]
connection.close()
client.stop()
def testPex(self, file_server, site, site_temp):
file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
connection = client.getConnection(file_server.ip, 1544)
# Add new fake peer to site
fake_peer = site.addPeer(file_server.ip_external, 11337, return_peer=True)
# Add fake connection to it
fake_peer.connection = Connection(file_server, file_server.ip_external, 11337)
fake_peer.connection.last_recv_time = time.time()
assert fake_peer in site.getConnectablePeers()
# Add file_server as peer to client
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
assert "%s:11337" % file_server.ip_external not in site_temp.peers
assert peer_file_server.pex()
assert "%s:11337" % file_server.ip_external in site_temp.peers
# Should not exchange private peers from local network
fake_peer_private = site.addPeer("192.168.0.1", 11337, return_peer=True)
assert fake_peer_private not in site.getConnectablePeers(allow_private=False)
fake_peer_private.connection = Connection(file_server, "192.168.0.1", 11337)
fake_peer_private.connection.last_recv_time = time.time()
assert "192.168.0.1:11337" not in site_temp.peers
assert not peer_file_server.pex()
assert "192.168.0.1:11337" not in site_temp.peers
connection.close()
client.stop()

View file

@ -1,39 +0,0 @@
import os
import pytest
from util.Flag import Flag
class TestFlag:
def testFlagging(self):
flag = Flag()
@flag.admin
@flag.no_multiuser
def testFn(anything):
return anything
assert "admin" in flag.db["testFn"]
assert "no_multiuser" in flag.db["testFn"]
def testSubclassedFlagging(self):
flag = Flag()
class Test:
@flag.admin
@flag.no_multiuser
def testFn(anything):
return anything
class SubTest(Test):
pass
assert "admin" in flag.db["testFn"]
assert "no_multiuser" in flag.db["testFn"]
def testInvalidFlag(self):
flag = Flag()
with pytest.raises(Exception) as err:
@flag.no_multiuser
@flag.unknown_flag
def testFn(anything):
return anything
assert "Invalid flag" in str(err.value)

View file

@ -1,79 +0,0 @@
import socket
import struct
import os
import pytest
from util import helper
from Config import config
@pytest.mark.usefixtures("resetSettings")
class TestHelper:
def testShellquote(self):
assert helper.shellquote("hel'lo") == "\"hel'lo\"" # Allow '
assert helper.shellquote('hel"lo') == '"hello"' # Remove "
assert helper.shellquote("hel'lo", 'hel"lo') == ('"hel\'lo"', '"hello"')
def testPackAddress(self):
for port in [1, 1000, 65535]:
for ip in ["1.1.1.1", "127.0.0.1", "0.0.0.0", "255.255.255.255", "192.168.1.1"]:
assert len(helper.packAddress(ip, port)) == 6
assert helper.unpackAddress(helper.packAddress(ip, port)) == (ip, port)
for ip in ["1:2:3:4:5:6:7:8", "::1", "2001:19f0:6c01:e76:5400:1ff:fed6:3eca", "2001:4860:4860::8888"]:
assert len(helper.packAddress(ip, port)) == 18
assert helper.unpackAddress(helper.packAddress(ip, port)) == (ip, port)
assert len(helper.packOnionAddress("boot3rdez4rzn36x.onion", port)) == 12
assert helper.unpackOnionAddress(helper.packOnionAddress("boot3rdez4rzn36x.onion", port)) == ("boot3rdez4rzn36x.onion", port)
with pytest.raises(struct.error):
helper.packAddress("1.1.1.1", 100000)
with pytest.raises(socket.error):
helper.packAddress("999.1.1.1", 1)
with pytest.raises(Exception):
helper.unpackAddress("X")
def testGetDirname(self):
assert helper.getDirname("data/users/content.json") == "data/users/"
assert helper.getDirname("data/users") == "data/"
assert helper.getDirname("") == ""
assert helper.getDirname("content.json") == ""
assert helper.getDirname("data/users/") == "data/users/"
assert helper.getDirname("/data/users/content.json") == "data/users/"
def testGetFilename(self):
assert helper.getFilename("data/users/content.json") == "content.json"
assert helper.getFilename("data/users") == "users"
assert helper.getFilename("") == ""
assert helper.getFilename("content.json") == "content.json"
assert helper.getFilename("data/users/") == ""
assert helper.getFilename("/data/users/content.json") == "content.json"
def testIsIp(self):
assert helper.isIp("1.2.3.4")
assert helper.isIp("255.255.255.255")
assert not helper.isIp("any.host")
assert not helper.isIp("1.2.3.4.com")
assert not helper.isIp("1.2.3.4.any.host")
def testIsPrivateIp(self):
assert helper.isPrivateIp("192.168.1.1")
assert not helper.isPrivateIp("1.1.1.1")
assert helper.isPrivateIp("fe80::44f0:3d0:4e6:637c")
assert not helper.isPrivateIp("fca5:95d6:bfde:d902:8951:276e:1111:a22c") # cjdns
def testOpenLocked(self):
locked_f = helper.openLocked(config.data_dir + "/locked.file")
assert locked_f
with pytest.raises(BlockingIOError):
locked_f_again = helper.openLocked(config.data_dir + "/locked.file")
locked_f_different = helper.openLocked(config.data_dir + "/locked_different.file")
locked_f.close()
locked_f_different.close()
os.unlink(locked_f.name)
os.unlink(locked_f_different.name)

View file

@ -1,88 +0,0 @@
import io
import os
import msgpack
import pytest
from Config import config
from util import Msgpack
from collections import OrderedDict
class TestMsgpack:
test_data = OrderedDict(
sorted({"cmd": "fileGet", "bin": b'p\x81zDhL\xf0O\xd0\xaf', "params": {"site": "1Site"}, "utf8": b'\xc3\xa1rv\xc3\xadzt\xc5\xb1r\xc5\x91'.decode("utf8"), "list": [b'p\x81zDhL\xf0O\xd0\xaf', b'p\x81zDhL\xf0O\xd0\xaf']}.items())
)
def testPacking(self):
assert Msgpack.pack(self.test_data) == b'\x85\xa3bin\xc4\np\x81zDhL\xf0O\xd0\xaf\xa3cmd\xa7fileGet\xa4list\x92\xc4\np\x81zDhL\xf0O\xd0\xaf\xc4\np\x81zDhL\xf0O\xd0\xaf\xa6params\x81\xa4site\xa51Site\xa4utf8\xad\xc3\xa1rv\xc3\xadzt\xc5\xb1r\xc5\x91'
assert Msgpack.pack(self.test_data, use_bin_type=False) == b'\x85\xa3bin\xaap\x81zDhL\xf0O\xd0\xaf\xa3cmd\xa7fileGet\xa4list\x92\xaap\x81zDhL\xf0O\xd0\xaf\xaap\x81zDhL\xf0O\xd0\xaf\xa6params\x81\xa4site\xa51Site\xa4utf8\xad\xc3\xa1rv\xc3\xadzt\xc5\xb1r\xc5\x91'
def testUnpackinkg(self):
assert Msgpack.unpack(Msgpack.pack(self.test_data)) == self.test_data
@pytest.mark.parametrize("unpacker_class", [msgpack.Unpacker, msgpack.fallback.Unpacker])
def testUnpacker(self, unpacker_class):
unpacker = unpacker_class(raw=False)
data = msgpack.packb(self.test_data, use_bin_type=True)
data += msgpack.packb(self.test_data, use_bin_type=True)
messages = []
for char in data:
unpacker.feed(bytes([char]))
for message in unpacker:
messages.append(message)
assert len(messages) == 2
assert messages[0] == self.test_data
assert messages[0] == messages[1]
def testStreaming(self):
bin_data = os.urandom(20)
f = Msgpack.FilePart("%s/users.json" % config.data_dir, "rb")
f.read_bytes = 30
data = {"cmd": "response", "body": f, "bin": bin_data}
out_buff = io.BytesIO()
Msgpack.stream(data, out_buff.write)
out_buff.seek(0)
data_packb = {
"cmd": "response",
"body": open("%s/users.json" % config.data_dir, "rb").read(30),
"bin": bin_data
}
out_buff.seek(0)
data_unpacked = Msgpack.unpack(out_buff.read())
assert data_unpacked == data_packb
assert data_unpacked["cmd"] == "response"
assert type(data_unpacked["body"]) == bytes
def testBackwardCompatibility(self):
packed = {}
packed["py3"] = Msgpack.pack(self.test_data, use_bin_type=False)
packed["py3_bin"] = Msgpack.pack(self.test_data, use_bin_type=True)
for key, val in packed.items():
unpacked = Msgpack.unpack(val)
type(unpacked["utf8"]) == str
type(unpacked["bin"]) == bytes
# Packed with use_bin_type=False (pre-ZeroNet 0.7.0)
unpacked = Msgpack.unpack(packed["py3"], decode=True)
type(unpacked["utf8"]) == str
type(unpacked["bin"]) == bytes
assert len(unpacked["utf8"]) == 9
assert len(unpacked["bin"]) == 10
with pytest.raises(UnicodeDecodeError) as err: # Try to decode binary as utf-8
unpacked = Msgpack.unpack(packed["py3"], decode=False)
# Packed with use_bin_type=True
unpacked = Msgpack.unpack(packed["py3_bin"], decode=False)
type(unpacked["utf8"]) == str
type(unpacked["bin"]) == bytes
assert len(unpacked["utf8"]) == 9
assert len(unpacked["bin"]) == 10

View file

@ -1,167 +0,0 @@
import time
import gevent
import pytest
import util
from util import ThreadPool
@pytest.fixture(params=['gevent.spawn', 'thread_pool.spawn'])
def queue_spawn(request):
thread_pool = ThreadPool.ThreadPool(10)
if request.param == "gevent.spawn":
return gevent.spawn
else:
return thread_pool.spawn
class ExampleClass(object):
def __init__(self):
self.counted = 0
@util.Noparallel()
def countBlocking(self, num=5):
for i in range(1, num + 1):
time.sleep(0.1)
self.counted += 1
return "counted:%s" % i
@util.Noparallel(queue=True, ignore_class=True)
def countQueue(self, num=5):
for i in range(1, num + 1):
time.sleep(0.1)
self.counted += 1
return "counted:%s" % i
@util.Noparallel(blocking=False)
def countNoblocking(self, num=5):
for i in range(1, num + 1):
time.sleep(0.01)
self.counted += 1
return "counted:%s" % i
class TestNoparallel:
def testBlocking(self, queue_spawn):
obj1 = ExampleClass()
obj2 = ExampleClass()
# Dont allow to call again until its running and wait until its running
threads = [
queue_spawn(obj1.countBlocking),
queue_spawn(obj1.countBlocking),
queue_spawn(obj1.countBlocking),
queue_spawn(obj2.countBlocking)
]
assert obj2.countBlocking() == "counted:5" # The call is ignored as obj2.countBlocking already counting, but block until its finishes
gevent.joinall(threads)
assert [thread.value for thread in threads] == ["counted:5", "counted:5", "counted:5", "counted:5"]
obj2.countBlocking() # Allow to call again as obj2.countBlocking finished
assert obj1.counted == 5
assert obj2.counted == 10
def testNoblocking(self):
obj1 = ExampleClass()
thread1 = obj1.countNoblocking()
thread2 = obj1.countNoblocking() # Ignored
assert obj1.counted == 0
time.sleep(0.1)
assert thread1.value == "counted:5"
assert thread2.value == "counted:5"
assert obj1.counted == 5
obj1.countNoblocking().join() # Allow again and wait until finishes
assert obj1.counted == 10
def testQueue(self, queue_spawn):
obj1 = ExampleClass()
queue_spawn(obj1.countQueue, num=1)
queue_spawn(obj1.countQueue, num=1)
queue_spawn(obj1.countQueue, num=1)
time.sleep(0.3)
assert obj1.counted == 2 # No multi-queue supported
obj2 = ExampleClass()
queue_spawn(obj2.countQueue, num=10)
queue_spawn(obj2.countQueue, num=10)
time.sleep(1.5) # Call 1 finished, call 2 still working
assert 10 < obj2.counted < 20
queue_spawn(obj2.countQueue, num=10)
time.sleep(2.0)
assert obj2.counted == 30
def testQueueOverload(self):
obj1 = ExampleClass()
threads = []
for i in range(1000):
thread = gevent.spawn(obj1.countQueue, num=5)
threads.append(thread)
gevent.joinall(threads)
assert obj1.counted == 5 * 2 # Only called twice (no multi-queue allowed)
def testIgnoreClass(self, queue_spawn):
obj1 = ExampleClass()
obj2 = ExampleClass()
threads = [
queue_spawn(obj1.countQueue),
queue_spawn(obj1.countQueue),
queue_spawn(obj1.countQueue),
queue_spawn(obj2.countQueue),
queue_spawn(obj2.countQueue)
]
s = time.time()
time.sleep(0.001)
gevent.joinall(threads)
# Queue limited to 2 calls (every call takes counts to 5 and takes 0.05 sec)
assert obj1.counted + obj2.counted == 10
taken = time.time() - s
assert 1.2 > taken >= 1.0 # 2 * 0.5s count = ~1s
def testException(self, queue_spawn):
class MyException(Exception):
pass
@util.Noparallel()
def raiseException():
raise MyException("Test error!")
with pytest.raises(MyException) as err:
raiseException()
assert str(err.value) == "Test error!"
with pytest.raises(MyException) as err:
queue_spawn(raiseException).get()
assert str(err.value) == "Test error!"
def testMultithreadMix(self, queue_spawn):
obj1 = ExampleClass()
with ThreadPool.ThreadPool(10) as thread_pool:
s = time.time()
t1 = queue_spawn(obj1.countBlocking, 5)
time.sleep(0.01)
t2 = thread_pool.spawn(obj1.countBlocking, 5)
time.sleep(0.01)
t3 = thread_pool.spawn(obj1.countBlocking, 5)
time.sleep(0.3)
t4 = gevent.spawn(obj1.countBlocking, 5)
threads = [t1, t2, t3, t4]
for thread in threads:
assert thread.get() == "counted:5"
time_taken = time.time() - s
assert obj1.counted == 5
assert 0.5 < time_taken < 0.7

View file

@ -1,159 +0,0 @@
import time
import io
import pytest
from File import FileServer
from File import FileRequest
from Crypt import CryptHash
from . import Spy
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.usefixtures("resetTempSettings")
class TestPeer:
def testPing(self, file_server, site, site_temp):
file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
connection = client.getConnection(file_server.ip, 1544)
# Add file_server as peer to client
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
assert peer_file_server.ping() is not None
assert peer_file_server in site_temp.peers.values()
peer_file_server.remove()
assert peer_file_server not in site_temp.peers.values()
connection.close()
client.stop()
def testDownloadFile(self, file_server, site, site_temp):
file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
connection = client.getConnection(file_server.ip, 1544)
# Add file_server as peer to client
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
# Testing streamFile
buff = peer_file_server.getFile(site_temp.address, "content.json", streaming=True)
assert b"sign" in buff.getvalue()
# Testing getFile
buff = peer_file_server.getFile(site_temp.address, "content.json")
assert b"sign" in buff.getvalue()
connection.close()
client.stop()
def testHashfield(self, site):
sample_hash = list(site.content_manager.contents["content.json"]["files_optional"].values())[0]["sha512"]
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
# Check if hashfield has any files
assert site.content_manager.hashfield
assert len(site.content_manager.hashfield) > 0
# Check exsist hash
assert site.content_manager.hashfield.getHashId(sample_hash) in site.content_manager.hashfield
# Add new hash
new_hash = CryptHash.sha512sum(io.BytesIO(b"hello"))
assert site.content_manager.hashfield.getHashId(new_hash) not in site.content_manager.hashfield
assert site.content_manager.hashfield.appendHash(new_hash)
assert not site.content_manager.hashfield.appendHash(new_hash) # Don't add second time
assert site.content_manager.hashfield.getHashId(new_hash) in site.content_manager.hashfield
# Remove new hash
assert site.content_manager.hashfield.removeHash(new_hash)
assert site.content_manager.hashfield.getHashId(new_hash) not in site.content_manager.hashfield
def testHashfieldExchange(self, file_server, site, site_temp):
server1 = file_server
server1.sites[site.address] = site
site.connection_server = server1
server2 = FileServer(file_server.ip, 1545)
server2.sites[site_temp.address] = site_temp
site_temp.connection_server = server2
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
# Add file_server as peer to client
server2_peer1 = site_temp.addPeer(file_server.ip, 1544)
# Check if hashfield has any files
assert len(site.content_manager.hashfield) > 0
# Testing hashfield sync
assert len(server2_peer1.hashfield) == 0
assert server2_peer1.updateHashfield() # Query hashfield from peer
assert len(server2_peer1.hashfield) > 0
# Test force push new hashfield
site_temp.content_manager.hashfield.appendHash("AABB")
server1_peer2 = site.addPeer(file_server.ip, 1545, return_peer=True)
with Spy.Spy(FileRequest, "route") as requests:
assert len(server1_peer2.hashfield) == 0
server2_peer1.sendMyHashfield()
assert len(server1_peer2.hashfield) == 1
server2_peer1.sendMyHashfield() # Hashfield not changed, should be ignored
assert len(requests) == 1
time.sleep(0.01) # To make hashfield change date different
site_temp.content_manager.hashfield.appendHash("AACC")
server2_peer1.sendMyHashfield() # Push hashfield
assert len(server1_peer2.hashfield) == 2
assert len(requests) == 2
site_temp.content_manager.hashfield.appendHash("AADD")
assert server1_peer2.updateHashfield(force=True) # Request hashfield
assert len(server1_peer2.hashfield) == 3
assert len(requests) == 3
assert not server2_peer1.sendMyHashfield() # Not changed, should be ignored
assert len(requests) == 3
server2.stop()
def testFindHash(self, file_server, site, site_temp):
file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Add file_server as peer to client
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
assert peer_file_server.findHashIds([1234]) == {}
# Add fake peer with requred hash
fake_peer_1 = site.addPeer(file_server.ip_external, 1544)
fake_peer_1.hashfield.append(1234)
fake_peer_2 = site.addPeer("1.2.3.5", 1545)
fake_peer_2.hashfield.append(1234)
fake_peer_2.hashfield.append(1235)
fake_peer_3 = site.addPeer("1.2.3.6", 1546)
fake_peer_3.hashfield.append(1235)
fake_peer_3.hashfield.append(1236)
res = peer_file_server.findHashIds([1234, 1235])
assert sorted(res[1234]) == sorted([(file_server.ip_external, 1544), ("1.2.3.5", 1545)])
assert sorted(res[1235]) == sorted([("1.2.3.5", 1545), ("1.2.3.6", 1546)])
# Test my address adding
site.content_manager.hashfield.append(1234)
res = peer_file_server.findHashIds([1234, 1235])
assert sorted(res[1234]) == sorted([(file_server.ip_external, 1544), ("1.2.3.5", 1545), (file_server.ip, 1544)])
assert sorted(res[1235]) == sorted([("1.2.3.5", 1545), ("1.2.3.6", 1546)])

View file

@ -1,100 +0,0 @@
import time
import gevent
from util import RateLimit
# Time is around limit +/- 0.05 sec
def around(t, limit):
return t >= limit - 0.05 and t <= limit + 0.05
class ExampleClass(object):
def __init__(self):
self.counted = 0
self.last_called = None
def count(self, back="counted"):
self.counted += 1
self.last_called = back
return back
class TestRateLimit:
def testCall(self):
obj1 = ExampleClass()
obj2 = ExampleClass()
s = time.time()
assert RateLimit.call("counting", allowed_again=0.1, func=obj1.count) == "counted"
assert around(time.time() - s, 0.0) # First allow to call instantly
assert obj1.counted == 1
# Call again
assert not RateLimit.isAllowed("counting", 0.1)
assert RateLimit.isAllowed("something else", 0.1)
assert RateLimit.call("counting", allowed_again=0.1, func=obj1.count) == "counted"
assert around(time.time() - s, 0.1) # Delays second call within interval
assert obj1.counted == 2
time.sleep(0.1) # Wait the cooldown time
# Call 3 times async
s = time.time()
assert obj2.counted == 0
threads = [
gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)), # Instant
gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)), # 0.1s delay
gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)) # 0.2s delay
]
gevent.joinall(threads)
assert [thread.value for thread in threads] == ["counted", "counted", "counted"]
assert around(time.time() - s, 0.2)
# Wait 0.1s cooldown
assert not RateLimit.isAllowed("counting", 0.1)
time.sleep(0.11)
assert RateLimit.isAllowed("counting", 0.1)
# No queue = instant again
s = time.time()
assert RateLimit.isAllowed("counting", 0.1)
assert RateLimit.call("counting", allowed_again=0.1, func=obj2.count) == "counted"
assert around(time.time() - s, 0.0)
assert obj2.counted == 4
def testCallAsync(self):
obj1 = ExampleClass()
obj2 = ExampleClass()
s = time.time()
RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #1").join()
assert obj1.counted == 1 # First instant
assert around(time.time() - s, 0.0)
# After that the calls delayed
s = time.time()
t1 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #2") # Dumped by the next call
time.sleep(0.03)
t2 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #3") # Dumped by the next call
time.sleep(0.03)
t3 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #4") # Will be called
assert obj1.counted == 1 # Delay still in progress: Not called yet
t3.join()
assert t3.value == "call #4"
assert around(time.time() - s, 0.1)
# Only the last one called
assert obj1.counted == 2
assert obj1.last_called == "call #4"
# Just called, not allowed again
assert not RateLimit.isAllowed("counting async", 0.1)
s = time.time()
t4 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #5").join()
assert obj1.counted == 3
assert around(time.time() - s, 0.1)
assert not RateLimit.isAllowed("counting async", 0.1)
time.sleep(0.11)
assert RateLimit.isAllowed("counting async", 0.1)

View file

@ -1,24 +0,0 @@
from util import SafeRe
import pytest
class TestSafeRe:
def testSafeMatch(self):
assert SafeRe.match(
"((js|css)/(?!all.(js|css))|data/users/.*db|data/users/.*/.*|data/archived|.*.py)",
"js/ZeroTalk.coffee"
)
assert SafeRe.match(".+/data.json", "data/users/1J3rJ8ecnwH2EPYa6MrgZttBNc61ACFiCj/data.json")
@pytest.mark.parametrize("pattern", ["([a-zA-Z]+)*", "(a|aa)+*", "(a|a?)+", "(.*a){10}", "((?!json).)*$", r"(\w+\d+)+C"])
def testUnsafeMatch(self, pattern):
with pytest.raises(SafeRe.UnsafePatternError) as err:
SafeRe.match(pattern, "aaaaaaaaaaaaaaaaaaaaaaaa!")
assert "Potentially unsafe" in str(err.value)
@pytest.mark.parametrize("pattern", ["^(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)(.*a)$"])
def testUnsafeRepetition(self, pattern):
with pytest.raises(SafeRe.UnsafePatternError) as err:
SafeRe.match(pattern, "aaaaaaaaaaaaaaaaaaaaaaaa!")
assert "More than" in str(err.value)

View file

@ -1,70 +0,0 @@
import shutil
import os
import pytest
from Site import SiteManager
TEST_DATA_PATH = "src/Test/testdata"
@pytest.mark.usefixtures("resetSettings")
class TestSite:
def testClone(self, site):
assert site.storage.directory == TEST_DATA_PATH + "/1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"
# Remove old files
if os.path.isdir(TEST_DATA_PATH + "/159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL"):
shutil.rmtree(TEST_DATA_PATH + "/159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL")
assert not os.path.isfile(TEST_DATA_PATH + "/159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL/content.json")
# Clone 1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT to 15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc
new_site = site.clone(
"159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL", "5JU2p5h3R7B1WrbaEdEDNZR7YHqRLGcjNcqwqVQzX2H4SuNe2ee", address_index=1
)
# Check if clone was successful
assert new_site.address == "159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL"
assert new_site.storage.isFile("content.json")
assert new_site.storage.isFile("index.html")
assert new_site.storage.isFile("data/users/content.json")
assert new_site.storage.isFile("data/zeroblog.db")
assert new_site.storage.verifyFiles()["bad_files"] == [] # No bad files allowed
assert new_site.storage.query("SELECT * FROM keyvalue WHERE key = 'title'").fetchone()["value"] == "MyZeroBlog"
# Optional files should be removed
assert len(new_site.storage.loadJson("content.json").get("files_optional", {})) == 0
# Test re-cloning (updating)
# Changes in non-data files should be overwritten
new_site.storage.write("index.html", b"this will be overwritten")
assert new_site.storage.read("index.html") == b"this will be overwritten"
# Changes in data file should be kept after re-cloning
changed_contentjson = new_site.storage.loadJson("content.json")
changed_contentjson["description"] = "Update Description Test"
new_site.storage.writeJson("content.json", changed_contentjson)
changed_data = new_site.storage.loadJson("data/data.json")
changed_data["title"] = "UpdateTest"
new_site.storage.writeJson("data/data.json", changed_data)
# The update should be reflected to database
assert new_site.storage.query("SELECT * FROM keyvalue WHERE key = 'title'").fetchone()["value"] == "UpdateTest"
# Re-clone the site
site.log.debug("Re-cloning")
site.clone("159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL")
assert new_site.storage.loadJson("data/data.json")["title"] == "UpdateTest"
assert new_site.storage.loadJson("content.json")["description"] == "Update Description Test"
assert new_site.storage.read("index.html") != "this will be overwritten"
# Delete created files
new_site.storage.deleteFiles()
assert not os.path.isdir(TEST_DATA_PATH + "/159EGD5srUsMP97UpcLy8AtKQbQLK2AbbL")
# Delete from site registry
assert new_site.address in SiteManager.site_manager.sites
SiteManager.site_manager.delete(new_site.address)
assert new_site.address not in SiteManager.site_manager.sites

View file

@ -1,562 +0,0 @@
import time
import pytest
import mock
import gevent
import gevent.event
import os
from Connection import ConnectionServer
from Config import config
from File import FileRequest
from File import FileServer
from Site.Site import Site
from . import Spy
@pytest.mark.usefixtures("resetTempSettings")
@pytest.mark.usefixtures("resetSettings")
class TestSiteDownload:
def testRename(self, file_server, site, site_temp):
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer(file_server.ip, 1544)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert site_temp.storage.isFile("content.json")
# Rename non-optional file
os.rename(site.storage.getPath("data/img/domain.png"), site.storage.getPath("data/img/domain-new.png"))
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
content = site.storage.loadJson("content.json")
assert "data/img/domain-new.png" in content["files"]
assert "data/img/domain.png" not in content["files"]
assert not site_temp.storage.isFile("data/img/domain-new.png")
assert site_temp.storage.isFile("data/img/domain.png")
settings_before = site_temp.settings
with Spy.Spy(FileRequest, "route") as requests:
site.publish()
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10) # Wait for download
assert "streamFile" not in [req[1] for req in requests]
content = site_temp.storage.loadJson("content.json")
assert "data/img/domain-new.png" in content["files"]
assert "data/img/domain.png" not in content["files"]
assert site_temp.storage.isFile("data/img/domain-new.png")
assert not site_temp.storage.isFile("data/img/domain.png")
assert site_temp.settings["size"] == settings_before["size"]
assert site_temp.settings["size_optional"] == settings_before["size_optional"]
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
def testRenameOptional(self, file_server, site, site_temp):
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer(file_server.ip, 1544)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert site_temp.settings["optional_downloaded"] == 0
site_temp.needFile("data/optional.txt")
assert site_temp.settings["optional_downloaded"] > 0
settings_before = site_temp.settings
hashfield_before = site_temp.content_manager.hashfield.tobytes()
# Rename optional file
os.rename(site.storage.getPath("data/optional.txt"), site.storage.getPath("data/optional-new.txt"))
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv", remove_missing_optional=True)
content = site.storage.loadJson("content.json")
assert "data/optional-new.txt" in content["files_optional"]
assert "data/optional.txt" not in content["files_optional"]
assert not site_temp.storage.isFile("data/optional-new.txt")
assert site_temp.storage.isFile("data/optional.txt")
with Spy.Spy(FileRequest, "route") as requests:
site.publish()
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10) # Wait for download
assert "streamFile" not in [req[1] for req in requests]
content = site_temp.storage.loadJson("content.json")
assert "data/optional-new.txt" in content["files_optional"]
assert "data/optional.txt" not in content["files_optional"]
assert site_temp.storage.isFile("data/optional-new.txt")
assert not site_temp.storage.isFile("data/optional.txt")
assert site_temp.settings["size"] == settings_before["size"]
assert site_temp.settings["size_optional"] == settings_before["size_optional"]
assert site_temp.settings["optional_downloaded"] == settings_before["optional_downloaded"]
assert site_temp.content_manager.hashfield.tobytes() == hashfield_before
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
def testArchivedDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Download normally
site_temp.addPeer(file_server.ip, 1544)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
bad_files = site_temp.storage.verifyFiles(quick_check=True)["bad_files"]
assert not bad_files
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" in site_temp.content_manager.contents
assert site_temp.storage.isFile("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 2
# Add archived data
assert "archived" not in site.content_manager.contents["data/users/content.json"]["user_contents"]
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", time.time()-1)
site.content_manager.contents["data/users/content.json"]["user_contents"]["archived"] = {"1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q": time.time()}
site.content_manager.sign("data/users/content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
date_archived = site.content_manager.contents["data/users/content.json"]["user_contents"]["archived"]["1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q"]
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived-1)
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived)
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived+1) # Allow user to update archived data later
# Push archived update
assert not "archived" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
site.publish()
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10) # Wait for download
# The archived content should disappear from remote client
assert "archived" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" not in site_temp.content_manager.contents
assert not site_temp.storage.isDir("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 1
assert len(list(site_temp.storage.query("SELECT * FROM json WHERE directory LIKE '%1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q%'"))) == 0
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
def testArchivedBeforeDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Download normally
site_temp.addPeer(file_server.ip, 1544)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
bad_files = site_temp.storage.verifyFiles(quick_check=True)["bad_files"]
assert not bad_files
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" in site_temp.content_manager.contents
assert site_temp.storage.isFile("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 2
# Add archived data
assert not "archived_before" in site.content_manager.contents["data/users/content.json"]["user_contents"]
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", time.time()-1)
content_modification_time = site.content_manager.contents["data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json"]["modified"]
site.content_manager.contents["data/users/content.json"]["user_contents"]["archived_before"] = content_modification_time
site.content_manager.sign("data/users/content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
date_archived = site.content_manager.contents["data/users/content.json"]["user_contents"]["archived_before"]
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived-1)
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived)
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived+1) # Allow user to update archived data later
# Push archived update
assert not "archived_before" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
site.publish()
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10) # Wait for download
# The archived content should disappear from remote client
assert "archived_before" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" not in site_temp.content_manager.contents
assert not site_temp.storage.isDir("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 1
assert len(list(site_temp.storage.query("SELECT * FROM json WHERE directory LIKE '%1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q%'"))) == 0
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
# Test when connected peer has the optional file
def testOptionalDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = ConnectionServer(file_server.ip, 1545)
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer(file_server.ip, 1544)
# Download site
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
# Download optional data/optional.txt
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
optional_file_info = site_temp.content_manager.getFileInfo("data/optional.txt")
assert site.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.storage.isFile("data/optional.txt")
assert site.storage.isFile("data/optional.txt")
site_temp.needFile("data/optional.txt")
assert site_temp.storage.isFile("data/optional.txt")
# Optional user file
assert not site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
optional_file_info = site_temp.content_manager.getFileInfo(
"data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif"
)
assert site.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
site_temp.needFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
# Test when connected peer does not has the file, so ask him if he know someone who has it
def testFindOptional(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init full source server (has optional files)
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
file_server_full = FileServer(file_server.ip, 1546)
site_full.connection_server = file_server_full
def listen():
ConnectionServer.start(file_server_full)
ConnectionServer.listen(file_server_full)
gevent.spawn(listen)
time.sleep(0.001) # Port opening
file_server_full.sites[site_full.address] = site_full # Add site
site_full.storage.verifyFiles(quick_check=True) # Check optional files
site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server
hashfield = site_full_peer.updateHashfield() # Update hashfield
assert len(site_full.content_manager.hashfield) == 8
assert hashfield
assert site_full.storage.isFile("data/optional.txt")
assert site_full.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert len(site_full_peer.hashfield) == 8
# Remove hashes from source server
for hash in list(site.content_manager.hashfield):
site.content_manager.hashfield.remove(hash)
# Init client server
site_temp.connection_server = ConnectionServer(file_server.ip, 1545)
site_temp.addPeer(file_server.ip, 1544) # Add source server
# Download normal files
site_temp.log.info("Start Downloading site")
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
# Download optional data/optional.txt
optional_file_info = site_temp.content_manager.getFileInfo("data/optional.txt")
optional_file_info2 = site_temp.content_manager.getFileInfo("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert not site_temp.storage.isFile("data/optional.txt")
assert not site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert not site.content_manager.hashfield.hasHash(optional_file_info["sha512"]) # Source server don't know he has the file
assert not site.content_manager.hashfield.hasHash(optional_file_info2["sha512"]) # Source server don't know he has the file
assert site_full_peer.hashfield.hasHash(optional_file_info["sha512"]) # Source full peer on source server has the file
assert site_full_peer.hashfield.hasHash(optional_file_info2["sha512"]) # Source full peer on source server has the file
assert site_full.content_manager.hashfield.hasHash(optional_file_info["sha512"]) # Source full server he has the file
assert site_full.content_manager.hashfield.hasHash(optional_file_info2["sha512"]) # Source full server he has the file
site_temp.log.info("Request optional files")
with Spy.Spy(FileRequest, "route") as requests:
# Request 2 file same time
threads = []
threads.append(site_temp.needFile("data/optional.txt", blocking=False))
threads.append(site_temp.needFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif", blocking=False))
gevent.joinall(threads)
assert len([request for request in requests if request[1] == "findHashIds"]) == 1 # findHashids should call only once
assert site_temp.storage.isFile("data/optional.txt")
assert site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.storage.deleteFiles()
file_server_full.stop()
[connection.close() for connection in file_server.connections]
site_full.content_manager.contents.db.close("FindOptional test end")
def testUpdate(self, file_server, site, site_temp):
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Don't try to find peers from the net
site.announce = mock.MagicMock(return_value=True)
site_temp.announce = mock.MagicMock(return_value=True)
# Connect peers
site_temp.addPeer(file_server.ip, 1544)
# Download site from site to site_temp
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert len(site_temp.bad_files) == 1
# Update file
data_original = site.storage.open("data/data.json").read()
data_new = data_original.replace(b'"ZeroBlog"', b'"UpdatedZeroBlog"')
assert data_original != data_new
site.storage.open("data/data.json", "wb").write(data_new)
assert site.storage.open("data/data.json").read() == data_new
assert site_temp.storage.open("data/data.json").read() == data_original
site.log.info("Publish new data.json without patch")
# Publish without patch
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
site.publish()
time.sleep(0.1)
site.log.info("Downloading site")
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert len([request for request in requests if request[1] in ("getFile", "streamFile")]) == 1
assert site_temp.storage.open("data/data.json").read() == data_new
# Close connection to avoid update spam limit
list(site.peers.values())[0].remove()
site.addPeer(file_server.ip, 1545)
list(site_temp.peers.values())[0].ping() # Connect back
time.sleep(0.1)
# Update with patch
data_new = data_original.replace(b'"ZeroBlog"', b'"PatchedZeroBlog"')
assert data_original != data_new
site.storage.open("data/data.json-new", "wb").write(data_new)
assert site.storage.open("data/data.json-new").read() == data_new
assert site_temp.storage.open("data/data.json").read() != data_new
# Generate diff
diffs = site.content_manager.getDiffs("content.json")
assert not site.storage.isFile("data/data.json-new") # New data file removed
assert site.storage.open("data/data.json").read() == data_new # -new postfix removed
assert "data/data.json" in diffs
assert diffs["data/data.json"] == [('=', 2), ('-', 29), ('+', [b'\t"title": "PatchedZeroBlog",\n']), ('=', 31102)]
# Publish with patch
site.log.info("Publish new data.json with patch")
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
event_done = gevent.event.AsyncResult()
site.publish(diffs=diffs)
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert [request for request in requests if request[1] in ("getFile", "streamFile")] == []
assert site_temp.storage.open("data/data.json").read() == data_new
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
def testBigUpdate(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Connect peers
site_temp.addPeer(file_server.ip, 1544)
# Download site from site to site_temp
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert list(site_temp.bad_files.keys()) == ["data/users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C/content.json"]
# Update file
data_original = site.storage.open("data/data.json").read()
data_new = data_original.replace(b'"ZeroBlog"', b'"PatchedZeroBlog"')
assert data_original != data_new
site.storage.open("data/data.json-new", "wb").write(data_new)
assert site.storage.open("data/data.json-new").read() == data_new
assert site_temp.storage.open("data/data.json").read() != data_new
# Generate diff
diffs = site.content_manager.getDiffs("content.json")
assert not site.storage.isFile("data/data.json-new") # New data file removed
assert site.storage.open("data/data.json").read() == data_new # -new postfix removed
assert "data/data.json" in diffs
content_json = site.storage.loadJson("content.json")
content_json["description"] = "BigZeroBlog" * 1024 * 10
site.storage.writeJson("content.json", content_json)
site.content_manager.loadContent("content.json", force=True)
# Publish with patch
site.log.info("Publish new data.json with patch")
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
assert site.storage.getSize("content.json") > 10 * 1024 # Make it a big content.json
site.publish(diffs=diffs)
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
file_requests = [request for request in requests if request[1] in ("getFile", "streamFile")]
assert len(file_requests) == 1
assert site_temp.storage.open("data/data.json").read() == data_new
assert site_temp.storage.open("content.json").read() == site.storage.open("content.json").read()
# Test what happened if the content.json of the site is bigger than the site limit
def testHugeContentSiteUpdate(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Connect peers
site_temp.addPeer(file_server.ip, 1544)
# Download site from site to site_temp
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
site_temp.settings["size_limit"] = int(20 * 1024 *1024)
site_temp.saveSettings()
# Raise limit size to 20MB on site so it can be signed
site.settings["size_limit"] = int(20 * 1024 *1024)
site.saveSettings()
content_json = site.storage.loadJson("content.json")
content_json["description"] = "PartirUnJour" * 1024 * 1024
site.storage.writeJson("content.json", content_json)
changed, deleted = site.content_manager.loadContent("content.json", force=True)
# Make sure we have 2 differents content.json
assert site_temp.storage.open("content.json").read() != site.storage.open("content.json").read()
# Generate diff
diffs = site.content_manager.getDiffs("content.json")
# Publish with patch
site.log.info("Publish new content.json bigger than 10MB")
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
assert site.storage.getSize("content.json") > 10 * 1024 * 1024 # verify it over 10MB
time.sleep(0.1)
site.publish(diffs=diffs)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
assert site_temp.storage.getSize("content.json") < site_temp.getSizeLimit() * 1024 * 1024
assert site_temp.storage.open("content.json").read() == site.storage.open("content.json").read()
def testUnicodeFilename(self, file_server, site, site_temp):
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer(file_server.ip, 1544)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10)
site.storage.write("data/img/árvíztűrő.png", b"test")
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
content = site.storage.loadJson("content.json")
assert "data/img/árvíztűrő.png" in content["files"]
assert not site_temp.storage.isFile("data/img/árvíztűrő.png")
settings_before = site_temp.settings
with Spy.Spy(FileRequest, "route") as requests:
site.publish()
time.sleep(0.1)
assert site_temp.download(blind_includes=True, retry_bad_files=False).get(timeout=10) # Wait for download
assert len([req[1] for req in requests if req[1] == "streamFile"]) == 1
content = site_temp.storage.loadJson("content.json")
assert "data/img/árvíztűrő.png" in content["files"]
assert site_temp.storage.isFile("data/img/árvíztűrő.png")
assert site_temp.settings["size"] == settings_before["size"]
assert site_temp.settings["size_optional"] == settings_before["size_optional"]
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]

View file

@ -1,25 +0,0 @@
import pytest
@pytest.mark.usefixtures("resetSettings")
class TestSiteStorage:
def testWalk(self, site):
# Rootdir
walk_root = list(site.storage.walk(""))
assert "content.json" in walk_root
assert "css/all.css" in walk_root
# Subdir
assert list(site.storage.walk("data-default")) == ["data.json", "users/content-default.json"]
def testList(self, site):
# Rootdir
list_root = list(site.storage.list(""))
assert "content.json" in list_root
assert "css/all.css" not in list_root
# Subdir
assert set(site.storage.list("data-default")) == set(["data.json", "users"])
def testDbRebuild(self, site):
assert site.storage.rebuildDb()

View file

@ -1,163 +0,0 @@
import time
import threading
import gevent
import pytest
from util import ThreadPool
class TestThreadPool:
def testExecutionOrder(self):
with ThreadPool.ThreadPool(4) as pool:
events = []
@pool.wrap
def blocker():
events.append("S")
out = 0
for i in range(10000000):
if i == 3000000:
events.append("M")
out += 1
events.append("D")
return out
threads = []
for i in range(3):
threads.append(gevent.spawn(blocker))
gevent.joinall(threads)
assert events == ["S"] * 3 + ["M"] * 3 + ["D"] * 3
res = blocker()
assert res == 10000000
def testLockBlockingSameThread(self):
lock = ThreadPool.Lock()
s = time.time()
def unlocker():
time.sleep(1)
lock.release()
gevent.spawn(unlocker)
lock.acquire(True)
lock.acquire(True, timeout=2)
unlock_taken = time.time() - s
assert 1.0 < unlock_taken < 1.5
def testLockBlockingDifferentThread(self):
lock = ThreadPool.Lock()
def locker():
lock.acquire(True)
time.sleep(0.5)
lock.release()
with ThreadPool.ThreadPool(10) as pool:
threads = [
pool.spawn(locker),
pool.spawn(locker),
gevent.spawn(locker),
pool.spawn(locker)
]
time.sleep(0.1)
s = time.time()
lock.acquire(True, 5.0)
unlock_taken = time.time() - s
assert 1.8 < unlock_taken < 2.2
gevent.joinall(threads)
def testMainLoopCallerThreadId(self):
main_thread_id = threading.current_thread().ident
with ThreadPool.ThreadPool(5) as pool:
def getThreadId(*args, **kwargs):
return threading.current_thread().ident
t = pool.spawn(getThreadId)
assert t.get() != main_thread_id
t = pool.spawn(lambda: ThreadPool.main_loop.call(getThreadId))
assert t.get() == main_thread_id
def testMainLoopCallerGeventSpawn(self):
main_thread_id = threading.current_thread().ident
with ThreadPool.ThreadPool(5) as pool:
def waiter():
time.sleep(1)
return threading.current_thread().ident
def geventSpawner():
event = ThreadPool.main_loop.call(gevent.spawn, waiter)
with pytest.raises(Exception) as greenlet_err:
event.get()
assert str(greenlet_err.value) == "cannot switch to a different thread"
waiter_thread_id = ThreadPool.main_loop.call(event.get)
return waiter_thread_id
s = time.time()
waiter_thread_id = pool.apply(geventSpawner)
assert main_thread_id == waiter_thread_id
time_taken = time.time() - s
assert 0.9 < time_taken < 1.2
def testEvent(self):
with ThreadPool.ThreadPool(5) as pool:
event = ThreadPool.Event()
def setter():
time.sleep(1)
event.set("done!")
def getter():
return event.get()
pool.spawn(setter)
t_gevent = gevent.spawn(getter)
t_pool = pool.spawn(getter)
s = time.time()
assert event.get() == "done!"
time_taken = time.time() - s
gevent.joinall([t_gevent, t_pool])
assert t_gevent.get() == "done!"
assert t_pool.get() == "done!"
assert 0.9 < time_taken < 1.2
with pytest.raises(Exception) as err:
event.set("another result")
assert "Event already has value" in str(err.value)
def testMemoryLeak(self):
import gc
thread_objs_before = [id(obj) for obj in gc.get_objects() if "threadpool" in str(type(obj))]
def worker():
time.sleep(0.1)
return "ok"
def poolTest():
with ThreadPool.ThreadPool(5) as pool:
for i in range(20):
pool.spawn(worker)
for i in range(5):
poolTest()
new_thread_objs = [obj for obj in gc.get_objects() if "threadpool" in str(type(obj)) and id(obj) not in thread_objs_before]
#print("New objs:", new_thread_objs, "run:", num_run)
# Make sure no threadpool object left behind
assert not new_thread_objs

View file

@ -1,153 +0,0 @@
import time
import pytest
import mock
from File import FileServer
from Crypt import CryptTor
from Config import config
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.usefixtures("resetTempSettings")
class TestTor:
def testDownload(self, tor_manager):
for retry in range(15):
time.sleep(1)
if tor_manager.enabled and tor_manager.conn:
break
assert tor_manager.enabled
def testManagerConnection(self, tor_manager):
assert "250-version" in tor_manager.request("GETINFO version")
def testAddOnion(self, tor_manager):
# Add
address = tor_manager.addOnion()
assert address
assert address in tor_manager.privatekeys
# Delete
assert tor_manager.delOnion(address)
assert address not in tor_manager.privatekeys
def testSignOnion(self, tor_manager):
address = tor_manager.addOnion()
# Sign
sign = CryptTor.sign(b"hello", tor_manager.getPrivatekey(address))
assert len(sign) == 128
# Verify
publickey = CryptTor.privatekeyToPublickey(tor_manager.getPrivatekey(address))
assert len(publickey) == 140
assert CryptTor.verify(b"hello", publickey, sign)
assert not CryptTor.verify(b"not hello", publickey, sign)
# Pub to address
assert CryptTor.publickeyToOnion(publickey) == address
# Delete
tor_manager.delOnion(address)
@pytest.mark.slow
def testConnection(self, tor_manager, file_server, site, site_temp):
file_server.tor_manager.start_onions = True
address = file_server.tor_manager.getOnion(site.address)
assert address
print("Connecting to", address)
for retry in range(5): # Wait for hidden service creation
time.sleep(10)
try:
connection = file_server.getConnection(address + ".onion", 1544)
if connection:
break
except Exception as err:
continue
assert connection.handshake
assert not connection.handshake["peer_id"] # No peer_id for Tor connections
# Return the same connection without site specified
assert file_server.getConnection(address + ".onion", 1544) == connection
# No reuse for different site
assert file_server.getConnection(address + ".onion", 1544, site=site) != connection
assert file_server.getConnection(address + ".onion", 1544, site=site) == file_server.getConnection(address + ".onion", 1544, site=site)
site_temp.address = "1OTHERSITE"
assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp)
# Only allow to query from the locked site
file_server.sites[site.address] = site
connection_locked = file_server.getConnection(address + ".onion", 1544, site=site)
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
def testPex(self, file_server, site, site_temp):
# Register site to currently running fileserver
site.connection_server = file_server
file_server.sites[site.address] = site
# Create a new file server to emulate new peer connecting to our peer
file_server_temp = FileServer(file_server.ip, 1545)
site_temp.connection_server = file_server_temp
file_server_temp.sites[site_temp.address] = site_temp
# We will request peers from this
peer_source = site_temp.addPeer(file_server.ip, 1544)
# Get ip4 peers from source site
site.addPeer("1.2.3.4", 1555) # Add peer to source site
assert peer_source.pex(need_num=10) == 1
assert len(site_temp.peers) == 2
assert "1.2.3.4:1555" in site_temp.peers
# Get onion peers from source site
site.addPeer("bka4ht2bzxchy44r.onion", 1555)
assert "bka4ht2bzxchy44r.onion:1555" not in site_temp.peers
# Don't add onion peers if not supported
assert "onion" not in file_server_temp.supported_ip_types
assert peer_source.pex(need_num=10) == 0
file_server_temp.supported_ip_types.append("onion")
assert peer_source.pex(need_num=10) == 1
assert "bka4ht2bzxchy44r.onion:1555" in site_temp.peers
def testFindHash(self, tor_manager, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
file_server.sites[site.address] = site
file_server.tor_manager = tor_manager
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
# Add file_server as peer to client
peer_file_server = site_temp.addPeer(file_server.ip, 1544)
assert peer_file_server.findHashIds([1234]) == {}
# Add fake peer with requred hash
fake_peer_1 = site.addPeer("bka4ht2bzxchy44r.onion", 1544)
fake_peer_1.hashfield.append(1234)
fake_peer_2 = site.addPeer("1.2.3.5", 1545)
fake_peer_2.hashfield.append(1234)
fake_peer_2.hashfield.append(1235)
fake_peer_3 = site.addPeer("1.2.3.6", 1546)
fake_peer_3.hashfield.append(1235)
fake_peer_3.hashfield.append(1236)
res = peer_file_server.findHashIds([1234, 1235])
assert sorted(res[1234]) == [('1.2.3.5', 1545), ("bka4ht2bzxchy44r.onion", 1544)]
assert sorted(res[1235]) == [('1.2.3.5', 1545), ('1.2.3.6', 1546)]
# Test my address adding
site.content_manager.hashfield.append(1234)
res = peer_file_server.findHashIds([1234, 1235])
assert sorted(res[1234]) == [('1.2.3.5', 1545), (file_server.ip, 1544), ("bka4ht2bzxchy44r.onion", 1544)]
assert sorted(res[1235]) == [('1.2.3.5', 1545), ('1.2.3.6', 1546)]
def testSiteOnion(self, tor_manager):
with mock.patch.object(config, "tor", "always"):
assert tor_manager.getOnion("address1") != tor_manager.getOnion("address2")
assert tor_manager.getOnion("address1") == tor_manager.getOnion("address1")

View file

@ -1,61 +0,0 @@
from Translate import Translate
class TestTranslate:
def testTranslateStrict(self):
translate = Translate()
data = """
translated = _("original")
not_translated = "original"
"""
data_translated = translate.translateData(data, {"_(original)": "translated"})
assert 'translated = _("translated")' in data_translated
assert 'not_translated = "original"' in data_translated
def testTranslateStrictNamed(self):
translate = Translate()
data = """
translated = _("original", "original named")
translated_other = _("original", "original other named")
not_translated = "original"
"""
data_translated = translate.translateData(data, {"_(original, original named)": "translated"})
assert 'translated = _("translated")' in data_translated
assert 'not_translated = "original"' in data_translated
def testTranslateUtf8(self):
translate = Translate()
data = """
greeting = "Hi again árvztűrőtökörfúrógép!"
"""
data_translated = translate.translateData(data, {"Hi again árvztűrőtökörfúrógép!": "Üdv újra árvztűrőtökörfúrógép!"})
assert data_translated == """
greeting = "Üdv újra árvztűrőtökörfúrógép!"
"""
def testTranslateEscape(self):
_ = Translate()
_["Hello"] = "Szia"
# Simple escaping
data = "{_[Hello]} {username}!"
username = "Hacker<script>alert('boom')</script>"
data_translated = _(data)
assert 'Szia' in data_translated
assert '<' not in data_translated
assert data_translated == "Szia Hacker&lt;script&gt;alert(&#x27;boom&#x27;)&lt;/script&gt;!"
# Escaping dicts
user = {"username": "Hacker<script>alert('boom')</script>"}
data = "{_[Hello]} {user[username]}!"
data_translated = _(data)
assert 'Szia' in data_translated
assert '<' not in data_translated
assert data_translated == "Szia Hacker&lt;script&gt;alert(&#x27;boom&#x27;)&lt;/script&gt;!"
# Escaping lists
users = [{"username": "Hacker<script>alert('boom')</script>"}]
data = "{_[Hello]} {users[0][username]}!"
data_translated = _(data)
assert 'Szia' in data_translated
assert '<' not in data_translated
assert data_translated == "Szia Hacker&lt;script&gt;alert(&#x27;boom&#x27;)&lt;/script&gt;!"

View file

@ -1,11 +0,0 @@
import sys
import pytest
@pytest.mark.usefixtures("resetSettings")
class TestUiWebsocket:
def testPermission(self, ui_websocket):
res = ui_websocket.testAction("ping")
assert res == "pong"
res = ui_websocket.testAction("certList")
assert "You don't have permission" in res["error"]

View file

@ -1,274 +0,0 @@
import socket
from urllib.parse import urlparse
import pytest
import mock
from util import UpnpPunch as upnp
@pytest.fixture
def mock_socket():
mock_socket = mock.MagicMock()
mock_socket.recv = mock.MagicMock(return_value=b'Hello')
mock_socket.bind = mock.MagicMock()
mock_socket.send_to = mock.MagicMock()
return mock_socket
@pytest.fixture
def url_obj():
return urlparse('http://192.168.1.1/ctrlPoint.xml')
@pytest.fixture(params=['WANPPPConnection', 'WANIPConnection'])
def igd_profile(request):
return """<root><serviceList><service>
<serviceType>urn:schemas-upnp-org:service:{}:1</serviceType>
<serviceId>urn:upnp-org:serviceId:wanpppc:pppoa</serviceId>
<controlURL>/upnp/control/wanpppcpppoa</controlURL>
<eventSubURL>/upnp/event/wanpppcpppoa</eventSubURL>
<SCPDURL>/WANPPPConnection.xml</SCPDURL>
</service></serviceList></root>""".format(request.param)
@pytest.fixture
def httplib_response():
class FakeResponse(object):
def __init__(self, status=200, body='OK'):
self.status = status
self.body = body
def read(self):
return self.body
return FakeResponse
class TestUpnpPunch(object):
def test_perform_m_search(self, mock_socket):
local_ip = '127.0.0.1'
with mock.patch('util.UpnpPunch.socket.socket',
return_value=mock_socket):
result = upnp.perform_m_search(local_ip)
assert result == 'Hello'
assert local_ip == mock_socket.bind.call_args_list[0][0][0][0]
assert ('239.255.255.250',
1900) == mock_socket.sendto.call_args_list[0][0][1]
def test_perform_m_search_socket_error(self, mock_socket):
mock_socket.recv.side_effect = socket.error('Timeout error')
with mock.patch('util.UpnpPunch.socket.socket',
return_value=mock_socket):
with pytest.raises(upnp.UpnpError):
upnp.perform_m_search('127.0.0.1')
def test_retrieve_location_from_ssdp(self, url_obj):
ctrl_location = url_obj.geturl()
parsed_location = urlparse(ctrl_location)
rsp = ('auth: gibberish\r\nlocation: {0}\r\n'
'Content-Type: text/html\r\n\r\n').format(ctrl_location)
result = upnp._retrieve_location_from_ssdp(rsp)
assert result == parsed_location
def test_retrieve_location_from_ssdp_no_header(self):
rsp = 'auth: gibberish\r\nContent-Type: application/json\r\n\r\n'
with pytest.raises(upnp.IGDError):
upnp._retrieve_location_from_ssdp(rsp)
def test_retrieve_igd_profile(self, url_obj):
with mock.patch('urllib.request.urlopen') as mock_urlopen:
upnp._retrieve_igd_profile(url_obj)
mock_urlopen.assert_called_with(url_obj.geturl(), timeout=5)
def test_retrieve_igd_profile_timeout(self, url_obj):
with mock.patch('urllib.request.urlopen') as mock_urlopen:
mock_urlopen.side_effect = socket.error('Timeout error')
with pytest.raises(upnp.IGDError):
upnp._retrieve_igd_profile(url_obj)
def test_parse_igd_profile_service_type(self, igd_profile):
control_path, upnp_schema = upnp._parse_igd_profile(igd_profile)
assert control_path == '/upnp/control/wanpppcpppoa'
assert upnp_schema in ('WANPPPConnection', 'WANIPConnection',)
def test_parse_igd_profile_no_ctrlurl(self, igd_profile):
igd_profile = igd_profile.replace('controlURL', 'nope')
with pytest.raises(upnp.IGDError):
control_path, upnp_schema = upnp._parse_igd_profile(igd_profile)
def test_parse_igd_profile_no_schema(self, igd_profile):
igd_profile = igd_profile.replace('Connection', 'nope')
with pytest.raises(upnp.IGDError):
control_path, upnp_schema = upnp._parse_igd_profile(igd_profile)
def test_create_open_message_parsable(self):
from xml.parsers.expat import ExpatError
msg, _ = upnp._create_open_message('127.0.0.1', 8888)
try:
upnp.parseString(msg)
except ExpatError as e:
pytest.fail('Incorrect XML message: {}'.format(e))
def test_create_open_message_contains_right_stuff(self):
settings = {'description': 'test desc',
'protocol': 'test proto',
'upnp_schema': 'test schema'}
msg, fn_name = upnp._create_open_message('127.0.0.1', 8888, **settings)
assert fn_name == 'AddPortMapping'
assert '127.0.0.1' in msg
assert '8888' in msg
assert settings['description'] in msg
assert settings['protocol'] in msg
assert settings['upnp_schema'] in msg
def test_parse_for_errors_bad_rsp(self, httplib_response):
rsp = httplib_response(status=500)
with pytest.raises(upnp.IGDError) as err:
upnp._parse_for_errors(rsp)
assert 'Unable to parse' in str(err.value)
def test_parse_for_errors_error(self, httplib_response):
soap_error = ('<document>'
'<errorCode>500</errorCode>'
'<errorDescription>Bad request</errorDescription>'
'</document>')
rsp = httplib_response(status=500, body=soap_error)
with pytest.raises(upnp.IGDError) as err:
upnp._parse_for_errors(rsp)
assert 'SOAP request error' in str(err.value)
def test_parse_for_errors_good_rsp(self, httplib_response):
rsp = httplib_response(status=200)
assert rsp == upnp._parse_for_errors(rsp)
def test_send_requests_success(self):
with mock.patch(
'util.UpnpPunch._send_soap_request') as mock_send_request:
mock_send_request.return_value = mock.MagicMock(status=200)
upnp._send_requests(['msg'], None, None, None)
assert mock_send_request.called
def test_send_requests_failed(self):
with mock.patch(
'util.UpnpPunch._send_soap_request') as mock_send_request:
mock_send_request.return_value = mock.MagicMock(status=500)
with pytest.raises(upnp.UpnpError):
upnp._send_requests(['msg'], None, None, None)
assert mock_send_request.called
def test_collect_idg_data(self):
pass
@mock.patch('util.UpnpPunch._get_local_ips')
@mock.patch('util.UpnpPunch._collect_idg_data')
@mock.patch('util.UpnpPunch._send_requests')
def test_ask_to_open_port_success(self, mock_send_requests,
mock_collect_idg, mock_local_ips):
mock_collect_idg.return_value = {'upnp_schema': 'schema-yo'}
mock_local_ips.return_value = ['192.168.0.12']
result = upnp.ask_to_open_port(retries=5)
soap_msg = mock_send_requests.call_args[0][0][0][0]
assert result is True
assert mock_collect_idg.called
assert '192.168.0.12' in soap_msg
assert '15441' in soap_msg
assert 'schema-yo' in soap_msg
@mock.patch('util.UpnpPunch._get_local_ips')
@mock.patch('util.UpnpPunch._collect_idg_data')
@mock.patch('util.UpnpPunch._send_requests')
def test_ask_to_open_port_failure(self, mock_send_requests,
mock_collect_idg, mock_local_ips):
mock_local_ips.return_value = ['192.168.0.12']
mock_collect_idg.return_value = {'upnp_schema': 'schema-yo'}
mock_send_requests.side_effect = upnp.UpnpError()
with pytest.raises(upnp.UpnpError):
upnp.ask_to_open_port()
@mock.patch('util.UpnpPunch._collect_idg_data')
@mock.patch('util.UpnpPunch._send_requests')
def test_orchestrate_soap_request(self, mock_send_requests,
mock_collect_idg):
soap_mock = mock.MagicMock()
args = ['127.0.0.1', 31337, soap_mock, 'upnp-test', {'upnp_schema':
'schema-yo'}]
mock_collect_idg.return_value = args[-1]
upnp._orchestrate_soap_request(*args[:-1])
assert mock_collect_idg.called
soap_mock.assert_called_with(
*args[:2] + ['upnp-test', 'UDP', 'schema-yo'])
assert mock_send_requests.called
@mock.patch('util.UpnpPunch._collect_idg_data')
@mock.patch('util.UpnpPunch._send_requests')
def test_orchestrate_soap_request_without_desc(self, mock_send_requests,
mock_collect_idg):
soap_mock = mock.MagicMock()
args = ['127.0.0.1', 31337, soap_mock, {'upnp_schema': 'schema-yo'}]
mock_collect_idg.return_value = args[-1]
upnp._orchestrate_soap_request(*args[:-1])
assert mock_collect_idg.called
soap_mock.assert_called_with(*args[:2] + [None, 'UDP', 'schema-yo'])
assert mock_send_requests.called
def test_create_close_message_parsable(self):
from xml.parsers.expat import ExpatError
msg, _ = upnp._create_close_message('127.0.0.1', 8888)
try:
upnp.parseString(msg)
except ExpatError as e:
pytest.fail('Incorrect XML message: {}'.format(e))
def test_create_close_message_contains_right_stuff(self):
settings = {'protocol': 'test proto',
'upnp_schema': 'test schema'}
msg, fn_name = upnp._create_close_message('127.0.0.1', 8888, **
settings)
assert fn_name == 'DeletePortMapping'
assert '8888' in msg
assert settings['protocol'] in msg
assert settings['upnp_schema'] in msg
@mock.patch('util.UpnpPunch._get_local_ips')
@mock.patch('util.UpnpPunch._orchestrate_soap_request')
def test_communicate_with_igd_success(self, mock_orchestrate,
mock_get_local_ips):
mock_get_local_ips.return_value = ['192.168.0.12']
upnp._communicate_with_igd()
assert mock_get_local_ips.called
assert mock_orchestrate.called
@mock.patch('util.UpnpPunch._get_local_ips')
@mock.patch('util.UpnpPunch._orchestrate_soap_request')
def test_communicate_with_igd_succeed_despite_single_failure(
self, mock_orchestrate, mock_get_local_ips):
mock_get_local_ips.return_value = ['192.168.0.12']
mock_orchestrate.side_effect = [upnp.UpnpError, None]
upnp._communicate_with_igd(retries=2)
assert mock_get_local_ips.called
assert mock_orchestrate.called
@mock.patch('util.UpnpPunch._get_local_ips')
@mock.patch('util.UpnpPunch._orchestrate_soap_request')
def test_communicate_with_igd_total_failure(self, mock_orchestrate,
mock_get_local_ips):
mock_get_local_ips.return_value = ['192.168.0.12']
mock_orchestrate.side_effect = [upnp.UpnpError, upnp.IGDError]
with pytest.raises(upnp.UpnpError):
upnp._communicate_with_igd(retries=2)
assert mock_get_local_ips.called
assert mock_orchestrate.called

View file

@ -1,50 +0,0 @@
import pytest
from Crypt import CryptBitcoin
@pytest.mark.usefixtures("resetSettings")
class TestUser:
def testAddress(self, user):
assert user.master_address == "15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc"
address_index = 1458664252141532163166741013621928587528255888800826689784628722366466547364755811
assert user.getAddressAuthIndex("15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc") == address_index
# Re-generate privatekey based on address_index
def testNewSite(self, user):
address, address_index, site_data = user.getNewSiteData() # Create a new random site
assert CryptBitcoin.hdPrivatekey(user.master_seed, address_index) == site_data["privatekey"]
user.sites = {} # Reset user data
# Site address and auth address is different
assert user.getSiteData(address)["auth_address"] != address
# Re-generate auth_privatekey for site
assert user.getSiteData(address)["auth_privatekey"] == site_data["auth_privatekey"]
def testAuthAddress(self, user):
# Auth address without Cert
auth_address = user.getAuthAddress("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr")
assert auth_address == "1MyJgYQjeEkR9QD66nkfJc9zqi9uUy5Lr2"
auth_privatekey = user.getAuthPrivatekey("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr")
assert CryptBitcoin.privatekeyToAddress(auth_privatekey) == auth_address
def testCert(self, user):
cert_auth_address = user.getAuthAddress("1iD5ZQJMNXu43w1qLB8sfdHVKppVMduGz") # Add site to user's registry
# Add cert
user.addCert(cert_auth_address, "zeroid.bit", "faketype", "fakeuser", "fakesign")
user.setCert("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr", "zeroid.bit")
# By using certificate the auth address should be same as the certificate provider
assert user.getAuthAddress("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr") == cert_auth_address
auth_privatekey = user.getAuthPrivatekey("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr")
assert CryptBitcoin.privatekeyToAddress(auth_privatekey) == cert_auth_address
# Test delete site data
assert "1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr" in user.sites
user.deleteSiteData("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr")
assert "1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr" not in user.sites
# Re-create add site should generate normal, unique auth_address
assert not user.getAuthAddress("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr") == cert_auth_address
assert user.getAuthAddress("1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr") == "1MyJgYQjeEkR9QD66nkfJc9zqi9uUy5Lr2"

View file

@ -1,105 +0,0 @@
import urllib.request
import pytest
try:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import staleness_of, title_is
from selenium.common.exceptions import NoSuchElementException
except:
pass
class WaitForPageLoad(object):
def __init__(self, browser):
self.browser = browser
def __enter__(self):
self.old_page = self.browser.find_element_by_tag_name('html')
def __exit__(self, *args):
WebDriverWait(self.browser, 10).until(staleness_of(self.old_page))
def getContextUrl(browser):
return browser.execute_script("return window.location.toString()")
def getUrl(url):
content = urllib.request.urlopen(url).read()
assert "server error" not in content.lower(), "Got a server error! " + repr(url)
return content
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.webtest
class TestWeb:
def testFileSecurity(self, site_url):
assert "Not Found" in getUrl("%s/media/sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/./sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/../config.py" % site_url)
assert "Forbidden" in getUrl("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url)
assert "Not Found" in getUrl("%s/raw/sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/./sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/../config.py" % site_url)
assert "Forbidden" in getUrl("%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url)
assert "Forbidden" in getUrl("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url)
assert "Forbidden" in getUrl("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url)
assert "Forbidden" in getUrl("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url)
assert "Forbidden" in getUrl("%s/content.db" % site_url)
assert "Forbidden" in getUrl("%s/./users.json" % site_url)
assert "Forbidden" in getUrl("%s/./key-rsa.pem" % site_url)
assert "Forbidden" in getUrl("%s/././././././././././//////sites.json" % site_url)
def testLinkSecurity(self, browser, site_url):
browser.get("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url)
WebDriverWait(browser, 10).until(title_is("ZeroHello - ZeroNet"))
assert getContextUrl(browser) == "%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
# Switch to inner frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in getContextUrl(browser)
assert browser.find_element_by_id("script_output").text == "Result: Works"
browser.switch_to.default_content()
# Clicking on links without target
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_current").click()
assert "wrapper_nonce" not in getContextUrl(browser) # The browser object back to default content
assert "Forbidden" not in browser.page_source
# Check if we have frame inside frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with pytest.raises(NoSuchElementException):
assert not browser.find_element_by_id("inner-iframe")
browser.switch_to.default_content()
# Clicking on link with target=_top
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_top").click()
assert "wrapper_nonce" not in getContextUrl(browser) # The browser object back to default content
assert "Forbidden" not in browser.page_source
browser.switch_to.default_content()
# Try to escape from inner_frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in getContextUrl(browser) # Make sure we are inside of the inner-iframe
with WaitForPageLoad(browser):
browser.execute_script("window.top.location = window.location")
assert "wrapper_nonce" in getContextUrl(browser) # We try to use nonce-ed html without iframe
assert "<iframe" in browser.page_source # Only allow to use nonce once-time
browser.switch_to.default_content()
def testRaw(self, browser, site_url):
browser.get("%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url)
WebDriverWait(browser, 10).until(title_is("Security tests"))
assert getContextUrl(browser) == "%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
assert browser.find_element_by_id("script_output").text == "Result: Fail"

View file

@ -1,128 +0,0 @@
import pytest
from Worker import WorkerTaskManager
from . import Spy
class TestUiWebsocket:
def checkSort(self, tasks): # Check if it has the same order as a list sorted separately
tasks_list = list(tasks)
tasks_list.sort(key=lambda task: task["id"])
assert tasks_list != list(tasks)
tasks_list.sort(key=lambda task: (0 - (task["priority"] - task["workers_num"] * 10), task["id"]))
assert tasks_list == list(tasks)
def testAppendSimple(self):
tasks = WorkerTaskManager.WorkerTaskManager()
tasks.append({"id": 1, "priority": 15, "workers_num": 1, "inner_path": "file1.json"})
tasks.append({"id": 2, "priority": 1, "workers_num": 0, "inner_path": "file2.json"})
tasks.append({"id": 3, "priority": 8, "workers_num": 0, "inner_path": "file3.json"})
assert [task["inner_path"] for task in tasks] == ["file3.json", "file1.json", "file2.json"]
self.checkSort(tasks)
def testAppendMany(self):
tasks = WorkerTaskManager.WorkerTaskManager()
for i in range(1000):
tasks.append({"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i})
assert tasks[0]["inner_path"] == "file39.json"
assert tasks[-1]["inner_path"] == "file980.json"
self.checkSort(tasks)
def testRemove(self):
tasks = WorkerTaskManager.WorkerTaskManager()
for i in range(1000):
tasks.append({"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i})
i = 333
task = {"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i}
assert task in tasks
with Spy.Spy(tasks, "indexSlow") as calls:
tasks.remove(task)
assert len(calls) == 0
assert task not in tasks
# Remove non existent item
with Spy.Spy(tasks, "indexSlow") as calls:
with pytest.raises(ValueError):
tasks.remove(task)
assert len(calls) == 0
self.checkSort(tasks)
def testRemoveAll(self):
tasks = WorkerTaskManager.WorkerTaskManager()
tasks_list = []
for i in range(1000):
task = {"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i}
tasks.append(task)
tasks_list.append(task)
for task in tasks_list:
tasks.remove(task)
assert len(tasks.inner_paths) == 0
assert len(tasks) == 0
def testModify(self):
tasks = WorkerTaskManager.WorkerTaskManager()
for i in range(1000):
tasks.append({"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i})
task = tasks[333]
task["priority"] += 10
with pytest.raises(AssertionError):
self.checkSort(tasks)
with Spy.Spy(tasks, "indexSlow") as calls:
tasks.updateItem(task)
assert len(calls) == 1
assert task in tasks
self.checkSort(tasks)
# Check reorder optimization
with Spy.Spy(tasks, "indexSlow") as calls:
tasks.updateItem(task, "priority", task["priority"] + 10)
assert len(calls) == 0
with Spy.Spy(tasks, "indexSlow") as calls:
tasks.updateItem(task, "priority", task["workers_num"] - 1)
assert len(calls) == 0
self.checkSort(tasks)
def testModifySamePriority(self):
tasks = WorkerTaskManager.WorkerTaskManager()
for i in range(1000):
tasks.append({"id": i, "priority": 10, "workers_num": 5, "inner_path": "file%s.json" % i})
task = tasks[333]
# Check reorder optimization
with Spy.Spy(tasks, "indexSlow") as calls:
tasks.updateItem(task, "priority", task["workers_num"] - 1)
assert len(calls) == 0
def testIn(self):
tasks = WorkerTaskManager.WorkerTaskManager()
i = 1
task = {"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i}
assert task not in tasks
def testFindTask(self):
tasks = WorkerTaskManager.WorkerTaskManager()
for i in range(1000):
tasks.append({"id": i, "priority": i % 20, "workers_num": i % 3, "inner_path": "file%s.json" % i})
assert tasks.findTask("file999.json")
assert not tasks.findTask("file-unknown.json")
tasks.remove(tasks.findTask("file999.json"))
assert not tasks.findTask("file999.json")

View file

View file

@ -1,497 +0,0 @@
import os
import sys
import urllib.request
import time
import logging
import json
import shutil
import gc
import datetime
import atexit
import threading
import socket
import pytest
import mock
import gevent
if "libev" not in str(gevent.config.loop):
# Workaround for random crash when libuv used with threads
gevent.config.loop = "libev-cext"
import gevent.event
from gevent import monkey
monkey.patch_all(thread=False, subprocess=False)
atexit_register = atexit.register
atexit.register = lambda func: "" # Don't register shutdown functions to avoid IO error on exit
def pytest_addoption(parser):
parser.addoption("--slow", action='store_true', default=False, help="Also run slow tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--slow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
# Config
if sys.platform == "win32":
CHROMEDRIVER_PATH = "tools/chrome/chromedriver.exe"
else:
CHROMEDRIVER_PATH = "chromedriver"
SITE_URL = "http://127.0.0.1:43110"
TEST_DATA_PATH = 'src/Test/testdata'
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/../lib")) # External modules directory
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/..")) # Imports relative to src dir
from Config import config
config.argv = ["none"] # Dont pass any argv to config parser
config.parse(silent=True, parse_config=False) # Plugins need to access the configuration
config.action = "test"
# Load plugins
from Plugin import PluginManager
config.data_dir = TEST_DATA_PATH # Use test data for unittests
config.debug = True
os.chdir(os.path.abspath(os.path.dirname(__file__) + "/../..")) # Set working dir
all_loaded = PluginManager.plugin_manager.loadPlugins()
assert all_loaded, "Not all plugin loaded successfully"
config.loadPlugins()
config.parse(parse_config=False) # Parse again to add plugin configuration options
config.action = "test"
config.debug = True
config.debug_socket = True # Use test data for unittests
config.verbose = True # Use test data for unittests
config.tor = "disable" # Don't start Tor client
config.trackers = []
config.data_dir = TEST_DATA_PATH # Use test data for unittests
if "ZERONET_LOG_DIR" in os.environ:
config.log_dir = os.environ["ZERONET_LOG_DIR"]
config.initLogging(console_logging=False)
# Set custom formatter with realative time format (via: https://stackoverflow.com/questions/31521859/python-logging-module-time-since-last-log)
time_start = time.time()
class TimeFilter(logging.Filter):
def __init__(self, *args, **kwargs):
self.time_last = time.time()
self.main_thread_id = threading.current_thread().ident
super().__init__(*args, **kwargs)
def filter(self, record):
if threading.current_thread().ident != self.main_thread_id:
record.thread_marker = "T"
record.thread_title = "(Thread#%s)" % self.main_thread_id
else:
record.thread_marker = " "
record.thread_title = ""
since_last = time.time() - self.time_last
if since_last > 0.1:
line_marker = "!"
elif since_last > 0.02:
line_marker = "*"
elif since_last > 0.01:
line_marker = "-"
else:
line_marker = " "
since_start = time.time() - time_start
record.since_start = "%s%.3fs" % (line_marker, since_start)
self.time_last = time.time()
return True
log = logging.getLogger()
fmt = logging.Formatter(fmt='%(since_start)s %(thread_marker)s %(levelname)-8s %(name)s %(message)s %(thread_title)s')
[hndl.addFilter(TimeFilter()) for hndl in log.handlers]
[hndl.setFormatter(fmt) for hndl in log.handlers]
from Site.Site import Site
from Site import SiteManager
from User import UserManager
from File import FileServer
from Connection import ConnectionServer
from Crypt import CryptConnection
from Crypt import CryptBitcoin
from Ui import UiWebsocket
from Tor import TorManager
from Content import ContentDb
from util import RateLimit
from Db import Db
from Debug import Debug
gevent.get_hub().NOT_ERROR += (Debug.Notify,)
def cleanup():
Db.dbCloseAll()
for dir_path in [config.data_dir, config.data_dir + "-temp"]:
if os.path.isdir(dir_path):
for file_name in os.listdir(dir_path):
ext = file_name.rsplit(".", 1)[-1]
if ext not in ["csr", "pem", "srl", "db", "json", "tmp"]:
continue
file_path = dir_path + "/" + file_name
if os.path.isfile(file_path):
os.unlink(file_path)
atexit_register(cleanup)
@pytest.fixture(scope="session")
def resetSettings(request):
open("%s/sites.json" % config.data_dir, "w").write("{}")
open("%s/filters.json" % config.data_dir, "w").write("{}")
open("%s/users.json" % config.data_dir, "w").write("""
{
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
@pytest.fixture(scope="session")
def resetTempSettings(request):
data_dir_temp = config.data_dir + "-temp"
if not os.path.isdir(data_dir_temp):
os.mkdir(data_dir_temp)
open("%s/sites.json" % data_dir_temp, "w").write("{}")
open("%s/filters.json" % data_dir_temp, "w").write("{}")
open("%s/users.json" % data_dir_temp, "w").write("""
{
"15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
"certs": {},
"master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
"sites": {}
}
}
""")
def cleanup():
os.unlink("%s/sites.json" % data_dir_temp)
os.unlink("%s/users.json" % data_dir_temp)
os.unlink("%s/filters.json" % data_dir_temp)
request.addfinalizer(cleanup)
@pytest.fixture()
def site(request):
threads_before = [obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)]
# Reset ratelimit
RateLimit.queue_db = {}
RateLimit.called_db = {}
site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
# Always use original data
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in site.storage.getPath("") # Make sure we dont delete everything
shutil.rmtree(site.storage.getPath(""), True)
shutil.copytree(site.storage.getPath("") + "-original", site.storage.getPath(""))
# Add to site manager
SiteManager.site_manager.get("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
site.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
def cleanup():
site.delete()
site.content_manager.contents.db.close("Test cleanup")
site.content_manager.contents.db.timer_check_optional.kill()
SiteManager.site_manager.sites.clear()
db_path = "%s/content.db" % config.data_dir
os.unlink(db_path)
del ContentDb.content_dbs[db_path]
gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
request.addfinalizer(cleanup)
site.greenlet_manager.stopGreenlets()
site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT") # Create new Site object to load content.json files
if not SiteManager.site_manager.sites:
SiteManager.site_manager.sites = {}
SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"] = site
site.settings["serving"] = True
return site
@pytest.fixture()
def site_temp(request):
threads_before = [obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)]
with mock.patch("Config.config.data_dir", config.data_dir + "-temp"):
site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
site_temp.settings["serving"] = True
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
def cleanup():
site_temp.delete()
site_temp.content_manager.contents.db.close("Test cleanup")
site_temp.content_manager.contents.db.timer_check_optional.kill()
db_path = "%s-temp/content.db" % config.data_dir
os.unlink(db_path)
del ContentDb.content_dbs[db_path]
gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
request.addfinalizer(cleanup)
site_temp.log = logging.getLogger("Temp:%s" % site_temp.address_short)
return site_temp
@pytest.fixture(scope="session")
def user():
user = UserManager.user_manager.get()
if not user:
user = UserManager.user_manager.create()
user.sites = {} # Reset user data
return user
@pytest.fixture(scope="session")
def browser(request):
try:
from selenium import webdriver
print("Starting chromedriver...")
options = webdriver.chrome.options.Options()
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
options.add_argument("--log-level=1")
browser = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH, service_log_path=os.path.devnull, options=options)
def quit():
browser.quit()
request.addfinalizer(quit)
except Exception as err:
raise pytest.skip("Test requires selenium + chromedriver: %s" % err)
return browser
@pytest.fixture(scope="session")
def site_url():
try:
urllib.request.urlopen(SITE_URL).read()
except Exception as err:
raise pytest.skip("Test requires zeronet client running: %s" % err)
return SITE_URL
@pytest.fixture(params=['ipv4', 'ipv6'])
def file_server(request):
if request.param == "ipv4":
return request.getfixturevalue("file_server4")
else:
return request.getfixturevalue("file_server6")
@pytest.fixture
def file_server4(request):
time.sleep(0.1)
file_server = FileServer("127.0.0.1", 1544)
file_server.ip_external = "1.2.3.4" # Fake external ip
def listen():
ConnectionServer.start(file_server)
ConnectionServer.listen(file_server)
gevent.spawn(listen)
# Wait for port opening
for retry in range(10):
time.sleep(0.1) # Port opening
try:
conn = file_server.getConnection("127.0.0.1", 1544)
conn.close()
break
except Exception as err:
print("FileServer6 startup error", Debug.formatException(err))
assert file_server.running
file_server.ip_incoming = {} # Reset flood protection
def stop():
file_server.stop()
request.addfinalizer(stop)
return file_server
@pytest.fixture
def file_server6(request):
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(("::1", 80, 1, 1))
has_ipv6 = True
except OSError:
has_ipv6 = False
if not has_ipv6:
pytest.skip("Ipv6 not supported")
time.sleep(0.1)
file_server6 = FileServer("::1", 1544)
file_server6.ip_external = 'fca5:95d6:bfde:d902:8951:276e:1111:a22c' # Fake external ip
def listen():
ConnectionServer.start(file_server6)
ConnectionServer.listen(file_server6)
gevent.spawn(listen)
# Wait for port opening
for retry in range(10):
time.sleep(0.1) # Port opening
try:
conn = file_server6.getConnection("::1", 1544)
conn.close()
break
except Exception as err:
print("FileServer6 startup error", Debug.formatException(err))
assert file_server6.running
file_server6.ip_incoming = {} # Reset flood protection
def stop():
file_server6.stop()
request.addfinalizer(stop)
return file_server6
@pytest.fixture()
def ui_websocket(site, user):
class WsMock:
def __init__(self):
self.result = gevent.event.AsyncResult()
def send(self, data):
logging.debug("WsMock: Set result (data: %s) called by %s" % (data, Debug.formatStack()))
self.result.set(json.loads(data)["result"])
def getResult(self):
logging.debug("WsMock: Get result")
back = self.result.get()
logging.debug("WsMock: Got result (data: %s)" % back)
self.result = gevent.event.AsyncResult()
return back
ws_mock = WsMock()
ui_websocket = UiWebsocket(ws_mock, site, None, user, None)
def testAction(action, *args, **kwargs):
ui_websocket.handleRequest({"id": 0, "cmd": action, "params": list(args) if args else kwargs})
return ui_websocket.ws.getResult()
ui_websocket.testAction = testAction
return ui_websocket
@pytest.fixture(scope="session")
def tor_manager():
try:
tor_manager = TorManager(fileserver_port=1544)
tor_manager.start()
assert tor_manager.conn is not None
tor_manager.startOnions()
except Exception as err:
raise pytest.skip("Test requires Tor with ControlPort: %s, %s" % (config.tor_controller, err))
return tor_manager
@pytest.fixture()
def db(request):
db_path = "%s/zeronet.db" % config.data_dir
schema = {
"db_name": "TestDb",
"db_file": "%s/zeronet.db" % config.data_dir,
"maps": {
"data.json": {
"to_table": [
"test",
{"node": "test", "table": "test_importfilter", "import_cols": ["test_id", "title"]}
]
}
},
"tables": {
"test": {
"cols": [
["test_id", "INTEGER"],
["title", "TEXT"],
["json_id", "INTEGER REFERENCES json (json_id)"]
],
"indexes": ["CREATE UNIQUE INDEX test_id ON test(test_id)"],
"schema_changed": 1426195822
},
"test_importfilter": {
"cols": [
["test_id", "INTEGER"],
["title", "TEXT"],
["json_id", "INTEGER REFERENCES json (json_id)"]
],
"indexes": ["CREATE UNIQUE INDEX test_importfilter_id ON test_importfilter(test_id)"],
"schema_changed": 1426195822
}
}
}
if os.path.isfile(db_path):
os.unlink(db_path)
db = Db.Db(schema, db_path)
db.checkTables()
def stop():
db.close("Test db cleanup")
os.unlink(db_path)
request.addfinalizer(stop)
return db
@pytest.fixture(params=["sslcrypto", "sslcrypto_fallback", "libsecp256k1"])
def crypt_bitcoin_lib(request, monkeypatch):
monkeypatch.setattr(CryptBitcoin, "lib_verify_best", request.param)
CryptBitcoin.loadLib(request.param)
return CryptBitcoin
@pytest.fixture(scope='function', autouse=True)
def logCaseStart(request):
global time_start
time_start = time.time()
logging.debug("---- Start test case: %s ----" % request._pyfuncitem)
yield None # Wait until all test done
# Workaround for pytest bug when logging in atexit/post-fixture handlers (I/O operation on closed file)
def workaroundPytestLogError():
import _pytest.capture
write_original = _pytest.capture.EncodedFile.write
def write_patched(obj, *args, **kwargs):
try:
write_original(obj, *args, **kwargs)
except ValueError as err:
if str(err) == "I/O operation on closed file":
pass
else:
raise err
def flush_patched(obj, *args, **kwargs):
try:
obj.buffer.flush(*args, **kwargs)
except ValueError as err:
if str(err).startswith("I/O operation on closed file"):
pass
else:
raise err
_pytest.capture.EncodedFile.write = write_patched
_pytest.capture.EncodedFile.flush = flush_patched
workaroundPytestLogError()
@pytest.fixture(scope='session', autouse=True)
def disableLog():
yield None # Wait until all test done
logging.getLogger('').setLevel(logging.getLevelName(logging.CRITICAL))

View file

@ -1,15 +0,0 @@
[run]
branch = True
concurrency = gevent
omit =
src/lib/*
src/Test/*
[report]
exclude_lines =
pragma: no cover
if __name__ == .__main__.:
if config.debug:
if config.debug_socket:
if self.logging:
def __repr__

View file

@ -1,6 +0,0 @@
[pytest]
python_files = Test*.py
addopts = -rsxX -v --durations=6 --capture=fd
markers =
slow: mark a tests as slow.
webtest: mark a test as a webtest.

Some files were not shown because too many files have changed in this diff Show more