diff --git a/.forgejo/workflows/build-on-commit.yml b/.forgejo/workflows/build-on-commit.yml
new file mode 100644
index 00000000..e8f0d2e3
--- /dev/null
+++ b/.forgejo/workflows/build-on-commit.yml
@@ -0,0 +1,40 @@
+name: Build Docker Image on Commit
+
+on:
+ push:
+ branches:
+ - main
+ tags:
+ - '!' # Exclude tags
+
+jobs:
+ build-and-publish:
+ runs-on: docker-builder
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set REPO_VARS
+ id: repo-url
+ run: |
+ echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
+ echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
+
+ - name: Login to OCI registry
+ run: |
+ echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
+
+ - name: Build and push Docker images
+ run: |
+ # Build Docker image with commit SHA
+ docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
+ docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
+
+ # Build Docker image with nightly tag
+ docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
+ docker push $REPO_HOST/$REPO_PATH:nightly
+
+ # Remove local images to save storage
+ docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
+ docker rmi $REPO_HOST/$REPO_PATH:nightly
diff --git a/.forgejo/workflows/build-on-tag.yml b/.forgejo/workflows/build-on-tag.yml
new file mode 100644
index 00000000..888102b6
--- /dev/null
+++ b/.forgejo/workflows/build-on-tag.yml
@@ -0,0 +1,37 @@
+name: Build and Publish Docker Image on Tag
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ build-and-publish:
+ runs-on: docker-builder
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set REPO_VARS
+ id: repo-url
+ run: |
+ echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
+ echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
+
+ - name: Login to OCI registry
+ run: |
+ echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
+
+ - name: Build and push Docker image
+ run: |
+ TAG=${{ github.ref_name }} # Get the tag name from the context
+ # Build and push multi-platform Docker images
+ docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
+ # Tag and push latest
+ docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
+ docker push $REPO_HOST/$REPO_PATH:latest
+
+ # Remove the local image to save storage
+ docker rmi $REPO_HOST/$REPO_PATH:$TAG
+ docker rmi $REPO_HOST/$REPO_PATH:latest
\ No newline at end of file
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000..aab991d5
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,10 @@
+github: canewsin
+patreon: # Replace with a single Patreon username e.g., user1
+open_collective: # Replace with a single Open Collective username e.g., user1
+ko_fi: canewsin
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: canewsin
+issuehunt: # Replace with a single IssueHunt username e.g., user1
+otechie: # Replace with a single Otechie username e.g., user1
+custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 00000000..b97ad556
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,33 @@
+---
+name: Bug report
+about: Create a report to help us improve ZeroNet
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+### Step 1: Please describe your environment
+
+ * ZeroNet version: _____
+ * Operating system: _____
+ * Web browser: _____
+ * Tor status: not available/always/disabled
+ * Opened port: yes/no
+ * Special configuration: ____
+
+### Step 2: Describe the problem:
+
+#### Steps to reproduce:
+
+ 1. _____
+ 2. _____
+ 3. _____
+
+#### Observed Results:
+
+ * What happened? This could be a screenshot, a description, log output (you can send log/debug.log file to hello@zeronet.io if necessary), etc.
+
+#### Expected Results:
+
+ * What did you expect to happen?
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..fe7c8178
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for ZeroNet
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000..27b5c924
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,72 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ py3-latest ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ py3-latest ]
+ schedule:
+ - cron: '32 19 * * 2'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'javascript', 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # âšī¸ Command-line programs to run using the OS shell.
+ # đ See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 00000000..2bdcaf95
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,51 @@
+name: tests
+
+on: [push, pull_request]
+
+jobs:
+ test:
+ runs-on: ubuntu-20.04
+ strategy:
+ max-parallel: 16
+ matrix:
+ python-version: ["3.7", "3.8", "3.9"]
+
+ steps:
+ - name: Checkout ZeroNet
+ uses: actions/checkout@v2
+ with:
+ submodules: "true"
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Prepare for installation
+ run: |
+ python3 -m pip install setuptools
+ python3 -m pip install --upgrade pip wheel
+ python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
+
+ - name: Install
+ run: |
+ python3 -m pip install --upgrade -r requirements.txt
+ python3 -m pip list
+
+ - name: Prepare for tests
+ run: |
+ openssl version -a
+ echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
+
+ - name: Test
+ run: |
+ catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
+ export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
+ export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
+ export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
+ export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
+ export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
+ export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
+ find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..636cd115
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,36 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# Log files
+**/*.log
+
+# Hidden files
+.*
+!/.forgejo
+!/.github
+!/.gitignore
+!/.travis.yml
+!/.gitlab-ci.yml
+
+# Temporary files
+*.bak
+
+# Data dir
+data/*
+*.db
+
+# Virtualenv
+env/*
+
+# Tor data
+tools/tor/data
+
+# PhantomJS, downloaded manually for unit tests
+tools/phantomjs
+
+# ZeroNet config file
+zeronet.conf
+
+# ZeroNet log files
+log/*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 00000000..f3e1ed29
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,48 @@
+stages:
+ - test
+
+.test_template: &test_template
+ stage: test
+ before_script:
+ - pip install --upgrade pip wheel
+ # Selenium and requests can't be installed without a requests hint on Python 3.4
+ - pip install --upgrade requests>=2.22.0
+ - pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
+ - pip install --upgrade -r requirements.txt
+ script:
+ - pip list
+ - openssl version -a
+ - python -m pytest -x plugins/CryptMessage/Test --color=yes
+ - python -m pytest -x plugins/Bigfile/Test --color=yes
+ - python -m pytest -x plugins/AnnounceLocal/Test --color=yes
+ - python -m pytest -x plugins/OptionalManager/Test --color=yes
+ - python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini --color=yes
+ - mv plugins/disabled-Multiuser plugins/Multiuser
+ - python -m pytest -x plugins/Multiuser/Test --color=yes
+ - mv plugins/disabled-Bootstrapper plugins/Bootstrapper
+ - python -m pytest -x plugins/Bootstrapper/Test --color=yes
+ - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
+
+test:py3.4:
+ image: python:3.4.3
+ <<: *test_template
+
+test:py3.5:
+ image: python:3.5.7
+ <<: *test_template
+
+test:py3.6:
+ image: python:3.6.9
+ <<: *test_template
+
+test:py3.7-openssl1.1.0:
+ image: python:3.7.0b5
+ <<: *test_template
+
+test:py3.7-openssl1.1.1:
+ image: python:3.7.4
+ <<: *test_template
+
+test:py3.8:
+ image: python:3.8.0b3
+ <<: *test_template
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000..2c602a5a
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "plugins"]
+ path = plugins
+ url = https://github.com/ZeroNetX/ZeroNet-Plugins.git
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..bdaafa22
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,47 @@
+language: python
+python:
+ - 3.4
+ - 3.5
+ - 3.6
+ - 3.7
+ - 3.8
+services:
+ - docker
+cache: pip
+before_install:
+ - pip install --upgrade pip wheel
+ - pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
+ # - docker build -t zeronet .
+ # - docker run -d -v $PWD:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 zeronet
+install:
+ - pip install --upgrade -r requirements.txt
+ - pip list
+before_script:
+ - openssl version -a
+ # Add an IPv6 config - see the corresponding Travis issue
+ # https://github.com/travis-ci/travis-ci/issues/8361
+ - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
+ sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6';
+ fi
+script:
+ - catchsegv python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
+ - export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python -m pytest -x plugins/CryptMessage/Test
+ - export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python -m pytest -x plugins/Bigfile/Test
+ - export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python -m pytest -x plugins/AnnounceLocal/Test
+ - export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python -m pytest -x plugins/OptionalManager/Test
+ - export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
+ - export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
+ - find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ - find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
+after_failure:
+ - zip -r log.zip log/
+ - curl --upload-file ./log.zip https://transfer.sh/log.zip
+after_success:
+ - codecov
+ - coveralls --rcfile=src/Test/coverage.ini
+notifications:
+ email:
+ recipients:
+ hello@zeronet.io
+ on_success: change
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..6974d18a
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,649 @@
+### ZeroNet 0.9.0 (2023-07-12) Rev4630
+ - Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
+ - Add trackers to Config.py for failsafety incase missing trackers.txt
+ - Added Proxy links
+ - Fix pysha3 dep installation issue
+ - FileRequest -> Remove Unnecessary check, Fix error wording
+ - Fix Response when site is missing for `actionAs`
+
+
+### ZeroNet 0.8.5 (2023-02-12) Rev4625
+ - Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
+ - default theme-class for missing value in `users.json`.
+ - Fetch Stats Plugin changes.
+
+### ZeroNet 0.8.4 (2022-12-12) Rev4620
+ - Increase Minimum Site size to 25MB.
+
+### ZeroNet 0.8.3 (2022-12-11) Rev4611
+ - main.py -> Fix accessing unassigned varible
+ - ContentManager -> Support for multiSig
+ - SiteStrorage.py -> Fix accessing unassigned varible
+ - ContentManager.py Improve Logging of Valid Signers
+
+### ZeroNet 0.8.2 (2022-11-01) Rev4610
+ - Fix Startup Error when plugins dir missing
+ - Move trackers to seperate file & Add more trackers
+ - Config:: Skip loading missing tracker files
+ - Added documentation for getRandomPort fn
+
+### ZeroNet 0.8.1 (2022-10-01) Rev4600
+ - fix readdress loop (cherry-pick previously added commit from conservancy)
+ - Remove Patreon badge
+ - Update README-ru.md (#177)
+ - Include inner_path of failed request for signing in error msg and response
+ - Don't Fail Silently When Cert is Not Selected
+ - Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
+ - Update FUNDING.yml
+
+### ZeroNet 0.8.0 (2022-05-27) Rev4591
+ - Revert File Open to catch File Access Errors.
+
+### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
+ - Use xescape(s) from zeronet-conservancy
+ - actionUpdate response Optimisation
+ - Fetch Plugins Repo Updates
+ - Fix Unhandled File Access Errors
+ - Create codeql-analysis.yml
+
+### ZeroNet 0.7.9 (2022-05-26) Rev4585
+ - Rust Version Compatibility for update Protocol msg
+ - Removed Non Working Trakers.
+ - Dynamically Load Trackers from Dashboard Site.
+ - Tracker Supply Improvements.
+ - Fix Repo Url for Bug Report
+ - First Party Tracker Update Service using Dashboard Site.
+ - remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
+
+### ZeroNet 0.7.8 (2022-03-02) Rev4580
+ - Update Plugins with some bug fixes and Improvements
+
+### ZeroNet 0.7.6 (2022-01-12) Rev4565
+ - Sync Plugin Updates
+ - Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
+ - Add More Default Plugins to Repo
+ - Doubled Site Publish Limits
+ - Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
+ - UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
+ - Moved Plugins to Seperate Repo
+ - Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
+ - Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
+ - Added current version: connections to /Stats page. see the previous point.
+
+### ZeroNet 0.7.5 (2021-11-28) Rev4560
+ - Add more default trackers
+ - Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
+ - Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
+
+### ZeroNet 0.7.3 (2021-11-28) Rev4555
+ - Fix xrange is undefined error
+ - Fix Incorrect viewport on mobile while loading
+ - Tor-V3 Patch by anonymoose
+
+
+### ZeroNet 0.7.1 (2019-07-01) Rev4206
+### Added
+ - Built-in logging console in the web UI to see what's happening in the background. (pull down top-right 0 button to see it)
+ - Display database rebuild errors [Thanks to Lola]
+ - New plugin system that allows to install and manage builtin/third party extensions to the ZeroNet client using the web interface.
+ - Support multiple trackers_file
+ - Add OpenSSL 1.1 support to CryptMessage plugin based on Bitmessage modifications [Thanks to radfish]
+ - Display visual error message on startup errors
+ - Fix max opened files changing on Windows platform
+ - Display TLS1.3 compatibility on /Stats page
+ - Add fake SNI and ALPN to peer connections to make it more like standard https connections
+ - Hide and ignore tracker_proxy setting in Tor: Always mode as it's going to use Tor anyway.
+ - Deny websocket connections from unknown origins
+ - Restrict open_browser values to avoid RCE on sandbox escape
+ - Offer access web interface by IP address in case of unknown host
+ - Link to site's sidebar with "#ZeroNet:OpenSidebar" hash
+
+### Changed
+ - Allow .. in file names [Thanks to imachug]
+ - Change unstable trackers
+ - More clean errors on sites.json/users.json load error
+ - Various tweaks for tracker rating on unstable connections
+ - Use OpenSSL 1.1 dlls from default Python Windows distribution if possible
+ - Re-factor domain resolving for easier domain plugins
+ - Disable UDP connections if --proxy is used
+ - New, decorator-based Websocket API permission system to avoid future typo mistakes
+
+### Fixed
+ - Fix parsing config lines that have no value
+ - Fix start.py [Thanks to imachug]
+ - Allow multiple values of the same key in the config file [Thanks ssdifnskdjfnsdjk for reporting]
+ - Fix parsing config file lines that has % in the value [Thanks slrslr for reporting]
+ - Fix bootstrapper plugin hash reloads [Thanks geekless for reporting]
+ - Fix CryptMessage plugin OpenSSL dll loading on Windows (ZeroMail errors) [Thanks cxgreat2014 for reporting]
+ - Fix startup error when using OpenSSL 1.1 [Thanks to imachug]
+ - Fix a bug that did not loaded merged site data for 5 sec after the merged site got added
+ - Fix typo that allowed to add new plugins in public proxy mode. [Thanks styromaniac for reporting]
+ - Fix loading non-big files with "|all" postfix [Thanks to krzotr]
+ - Fix OpenSSL cert generation error crash by change Windows console encoding to utf8
+
+#### Wrapper html injection vulnerability [Reported by ivanq]
+
+In ZeroNet before rev4188 the wrapper template variables was rendered incorrectly.
+
+Result: The opened site was able to gain WebSocket connection with unrestricted ADMIN/NOSANDBOX access, change configuration values and possible RCE on client's machine.
+
+Fix: Fixed the template rendering code, disallowed WebSocket connections from unknown locations, restricted open_browser configuration values to avoid possible RCE in case of sandbox escape.
+
+Note: The fix is also back ported to ZeroNet Py 2.x version (Rev3870)
+
+
+### ZeroNet 0.7.0 (2019-06-12) Rev4106 (First release targeting Python 3.4+)
+### Added
+ - 5-10x faster signature verification by using libsecp256k1 (Thanks to ZeroMux)
+ - Generated SSL certificate randomization to avoid protocol filters (Thanks to ValdikSS)
+ - Offline mode
+ - P2P source code update using ZeroNet protocol
+ - ecdsaSign/Verify commands to CryptMessage plugin (Thanks to imachug)
+ - Efficient file rename: change file names instead of re-downloading the file.
+ - Make redirect optional on site cloning (Thanks to Lola)
+ - EccPrivToPub / EccPubToPriv functions (Thanks to imachug)
+ - Detect and change dark/light theme based on OS setting (Thanks to filips123)
+
+### Changed
+ - Re-factored code to Python3 runtime (compatible with Python 3.4-3.8)
+ - More safe database sync mode
+ - Removed bundled third-party libraries where it's possible
+ - Use lang=en instead of lang={lang} in urls to avoid url encode problems
+ - Remove environment details from error page
+ - Don't push content.json updates larger than 10kb to significantly reduce bw usage for site with many files
+
+### Fixed
+ - Fix sending files with \0 characters
+ - Security fix: Escape error detail to avoid XSS (reported by krzotr)
+ - Fix signature verification using libsecp256k1 for compressed addresses (mostly certificates generated in the browser)
+ - Fix newsfeed if you have more than 1000 followed topic/post on one site.
+ - Fix site download as zip file
+ - Fix displaying sites with utf8 title
+ - Error message if dbRebuild fails (Thanks to Lola)
+ - Fix browser reopen if executing start.py again. (Thanks to imachug)
+
+
+### ZeroNet 0.6.5 (2019-02-16) Rev3851 (Last release targeting Python 2.7.x)
+### Added
+ - IPv6 support in peer exchange, bigfiles, optional file finding, tracker sharing, socket listening and connecting (based on tangdou1 modifications)
+ - New tracker database format with IPv6 support
+ - Display notification if there is an unpublished modification for your site
+ - Listen and shut down normally for SIGTERM (Thanks to blurHY)
+ - Support tilde `~` in filenames (by d14na)
+ - Support map for Namecoin subdomain names (Thanks to lola)
+ - Add log level to config page
+ - Support `{data}` for data dir variable in trackers_file value
+ - Quick check content.db on startup and rebuild if necessary
+ - Don't show meek proxy option if the tor client does not supports it
+
+### Changed
+ - Refactored port open checking with IPv6 support
+ - Consider non-local IPs as external even is the open port check fails (for CJDNS and Yggdrasil support)
+ - Add IPv6 tracker and change unstable tracker
+ - Don't correct sent local time with the calculated time correction
+ - Disable CSP for Edge
+ - Only support CREATE commands in dbschema indexes node and SELECT from storage.query
+
+### Fixed
+ - Check the length of master seed when executing cryptGetPrivatekey CLI command
+ - Only reload source code on file modification / creation
+ - Detection and issue warning for latest no-script plugin
+ - Fix atomic write of a non-existent file
+ - Fix sql queries with lots of variables and sites with lots of content.json
+ - Fix multi-line parsing of zeronet.conf
+ - Fix site deletion from users.json
+ - Fix site cloning before site downloaded (Reported by unsystemizer)
+ - Fix queryJson for non-list nodes (Reported by MingchenZhang)
+
+
+## ZeroNet 0.6.4 (2018-10-20) Rev3660
+### Added
+ - New plugin: UiConfig. A web interface that allows changing ZeroNet settings.
+ - New plugin: AnnounceShare. Share trackers between users, automatically announce client's ip as tracker if Bootstrapper plugin is enabled.
+ - Global tracker stats on ZeroHello: Include statistics from all served sites instead of displaying request statistics only for one site.
+ - Support custom proxy for trackers. (Configurable with /Config)
+ - Adding peers to sites manually using zeronet_peers get parameter
+ - Copy site address with peers link on the sidebar.
+ - Zip file listing and streaming support for Bigfiles.
+ - Tracker statistics on /Stats page
+ - Peer reputation save/restore to speed up sync time after startup.
+ - Full support fileGet, fileList, dirList calls on tar.gz/zip files.
+ - Archived_before support to user content rules to allow deletion of all user files before the specified date
+ - Show and manage "Connecting" sites on ZeroHello
+ - Add theme support to ZeroNet sites
+ - Dark theme for ZeroHello, ZeroBlog, ZeroTalk
+
+### Changed
+ - Dynamic big file allocation: More efficient storage usage by don't pre-allocate the whole file at the beginning, but expand the size as the content downloads.
+ - Reduce the request frequency to unreliable trackers.
+ - Only allow 5 concurrent checkSites to run in parallel to reduce load under Tor/slow connection.
+ - Stop site downloading if it reached 95% of site limit to avoid download loop for sites out of limit
+ - The pinned optional files won't be removed from download queue after 30 retries and won't be deleted even if the site owner removes it.
+ - Don't remove incomplete (downloading) sites on startup
+ - Remove --pin_bigfile argument as big files are automatically excluded from optional files limit.
+
+### Fixed
+ - Trayicon compatibility with latest gevent
+ - Request number counting for zero:// trackers
+ - Peer reputation boost for zero:// trackers.
+ - Blocklist of peers loaded from peerdb (Thanks tangdou1 for report)
+ - Sidebar map loading on foreign languages (Thx tangdou1 for report)
+ - FileGet on non-existent files (Thanks mcdev for reporting)
+ - Peer connecting bug for sites with low amount of peers
+
+#### "The Vacation" Sandbox escape bug [Reported by GitCenter / Krixano / ZeroLSTN]
+
+In ZeroNet 0.6.3 Rev3615 and earlier as a result of invalid file type detection, a malicious site could escape the iframe sandbox.
+
+Result: Browser iframe sandbox escape
+
+Applied fix: Replaced the previous, file extension based file type identification with a proper one.
+
+Affected versions: All versions before ZeroNet Rev3616
+
+
+## ZeroNet 0.6.3 (2018-06-26)
+### Added
+ - New plugin: ContentFilter that allows to have shared site and user block list.
+ - Support Tor meek proxies to avoid tracker blocking of GFW
+ - Detect network level tracker blocking and easy setting meek proxy for tracker connections.
+ - Support downloading 2GB+ sites as .zip (Thx to Radtoo)
+ - Support ZeroNet as a transparent proxy (Thx to JeremyRand)
+ - Allow fileQuery as CORS command (Thx to imachug)
+ - Windows distribution includes Tor and meek client by default
+ - Download sites as zip link to sidebar
+ - File server port randomization
+ - Implicit SSL for all connection
+ - fileList API command for zip files
+ - Auto download bigfiles size limit on sidebar
+ - Local peer number to the sidebar
+ - Open site directory button in sidebar
+
+### Changed
+ - Switched to Azure Tor meek proxy as Amazon one became unavailable
+ - Refactored/rewritten tracker connection manager
+ - Improved peer discovery for optional files without opened port
+ - Also delete Bigfile's piecemap on deletion
+
+### Fixed
+ - Important security issue: Iframe sandbox escape [Reported by Ivanq / gitcenter]
+ - Local peer discovery when running multiple clients on the same machine
+ - Uploading small files with Bigfile plugin
+ - Ctrl-c shutdown when running CLI commands
+ - High CPU/IO usage when Multiuser plugin enabled
+ - Firefox back button
+ - Peer discovery on older Linux kernels
+ - Optional file handling when multiple files have the same hash_id (first 4 chars of the hash)
+ - Msgpack 0.5.5 and 0.5.6 compatibility
+
+## ZeroNet 0.6.2 (2018-02-18)
+
+### Added
+ - New plugin: AnnounceLocal to make ZeroNet work without an internet connection on the local network.
+ - Allow dbQuey and userGetSettings using the `as` API command on different sites with Cors permission
+ - New config option: `--log_level` to reduce log verbosity and IO load
+ - Prefer to connect to recent peers from trackers first
+ - Mark peers with port 1 is also unconnectable for future fix for trackers that do not support port 0 announce
+
+### Changed
+ - Don't keep connection for sites that have not been modified in the last week
+ - Change unreliable trackers to new ones
+ - Send maximum 10 findhash request in one find optional files round (15sec)
+ - Change "Unique to site" to "No certificate" for default option in cert selection dialog.
+ - Dont print warnings if not in debug mode
+ - Generalized tracker logging format
+ - Only recover sites from sites.json if they had peers
+ - Message from local peers does not means internet connection
+ - Removed `--debug_gevent` and turned on Gevent block logging by default
+
+### Fixed
+ - Limit connections to 512 to avoid reaching 1024 limit on windows
+ - Exception when logging foreign operating system socket errors
+ - Don't send private (local) IPs on pex
+ - Don't connect to private IPs in tor always mode
+ - Properly recover data from msgpack unpacker on file stream start
+ - Symlinked data directory deletion when deleting site using Windows
+ - De-duplicate peers before publishing
+ - Bigfile info for non-existing files
+
+
+## ZeroNet 0.6.1 (2018-01-25)
+
+### Added
+ - New plugin: Chart
+ - Collect and display charts about your contribution to ZeroNet network
+ - Allow list as argument replacement in sql queries. (Thanks to imachug)
+ - Newsfeed query time statistics (Click on "From XX sites in X.Xs on ZeroHello)
+ - New UiWebsocket API command: As to run commands as other site
+ - Ranged ajax queries for big files
+ - Filter feed by type and site address
+ - FileNeed, Bigfile upload command compatibility with merger sites
+ - Send event on port open / tor status change
+ - More description on permission request
+
+### Changed
+ - Reduce memory usage of sidebar geoip database cache
+ - Change unreliable tracker to new one
+ - Don't display Cors permission ask if it already granted
+ - Avoid UI blocking when rebuilding a merger site
+ - Skip listing ignored directories on signing
+ - In Multiuser mode show the seed welcome message when adding new certificate instead of first visit
+ - Faster async port opening on multiple network interfaces
+ - Allow javascript modals
+ - Only zoom sidebar globe if mouse button is pressed down
+
+### Fixed
+ - Open port checking error reporting (Thanks to imachug)
+ - Out-of-range big file requests
+ - Don't output errors happened on gevent greenlets twice
+ - Newsfeed skip sites with no database
+ - Newsfeed queries with multiple params
+ - Newsfeed queries with UNION and UNION ALL
+ - Fix site clone with sites larger that 10MB
+ - Unreliable Websocket connection when requesting files from different sites at the same time
+
+
+## ZeroNet 0.6.0 (2017-10-17)
+
+### Added
+ - New plugin: Big file support
+ - Automatic pinning on Big file download
+ - Enable TCP_NODELAY for supporting sockets
+ - actionOptionalFileList API command arguments to list non-downloaded files or only big files
+ - serverShowdirectory API command arguments to allow to display site's directory in OS file browser
+ - fileNeed API command to initialize optional file downloading
+ - wrapperGetAjaxKey API command to request nonce for AJAX request
+ - Json.gz support for database files
+ - P2P port checking (Thanks for grez911)
+ - `--download_optional auto` argument to enable automatic optional file downloading for newly added site
+ - Statistics for big files and protocol command requests on /Stats
+ - Allow to set user limitation based on auth_address
+
+### Changed
+ - More aggressive and frequent connection timeout checking
+ - Use out of msgpack context file streaming for files larger than 512KB
+ - Allow optional files workers over the worker limit
+ - Automatic redirection to wrapper on nonce_error
+ - Send websocket event on optional file deletion
+ - Optimize sites.json saving
+ - Enable faster C-based msgpack packer by default
+ - Major optimization on Bootstrapper plugin SQL queries
+ - Don't reset bad file counter on restart, to allow easier give up on unreachable files
+ - Incoming connection limit changed from 1000 to 500 to avoid reaching socket limit on Windows
+ - Changed tracker boot.zeronet.io domain, because zeronet.io got banned in some countries
+
+#### Fixed
+ - Sub-directories in user directories
+
+## ZeroNet 0.5.7 (2017-07-19)
+### Added
+ - New plugin: CORS to request read permission to other site's content
+ - New API command: userSetSettings/userGetSettings to store site's settings in users.json
+ - Avoid file download if the file size does not match with the requested one
+ - JavaScript and wrapper less file access using /raw/ prefix ([Example](http://127.0.0.1:43110/raw/1AsRLpuRxr3pb9p3TKoMXPSWHzh6i7fMGi/en.tar.gz/index.html))
+ - --silent command line option to disable logging to stdout
+
+
+### Changed
+ - Better error reporting on sign/verification errors
+ - More test for sign and verification process
+ - Update to OpenSSL v1.0.2l
+ - Limit compressed files to 6MB to avoid zip/tar.gz bomb
+ - Allow space, [], () characters in filenames
+ - Disable cross-site resource loading to improve privacy. [Reported by Beardog108]
+ - Download directly accessed Pdf/Svg/Swf files instead of displaying them to avoid wrapper escape using in JS in SVG file. [Reported by Beardog108]
+ - Disallow potentially unsafe regular expressions to avoid ReDoS [Reported by MuxZeroNet]
+
+### Fixed
+ - Detecting data directory when running Windows distribution exe [Reported by Plasmmer]
+ - OpenSSL loading under Android 6+
+ - Error on exiting when no connection server started
+
+
+## ZeroNet 0.5.6 (2017-06-15)
+### Added
+ - Callback for certSelect API command
+ - More compact list formatting in json
+
+### Changed
+ - Remove obsolete auth_key_sha512 and signature format
+ - Improved Spanish translation (Thanks to Pupiloho)
+
+### Fixed
+ - Opened port checking (Thanks l5h5t7 & saber28 for reporting)
+ - Standalone update.py argument parsing (Thanks Zalex for reporting)
+ - uPnP crash on startup (Thanks Vertux for reporting)
+ - CoffeeScript 1.12.6 compatibility (Thanks kavamaken & imachug)
+ - Multi value argument parsing
+ - Database error when running from directory that contains special characters (Thanks Pupiloho for reporting)
+ - Site lock violation logging
+
+
+#### Proxy bypass during source upgrade [Reported by ZeroMux]
+
+In ZeroNet before 0.5.6 during the client's built-in source code upgrade mechanism,
+ZeroNet did not respect Tor and/or proxy settings.
+
+Result: ZeroNet downloaded the update without using the Tor network and potentially leaked the connections.
+
+Fix: Removed the problematic code line from the updater that removed the proxy settings from the socket library.
+
+Affected versions: ZeroNet 0.5.5 and earlier, Fixed in: ZeroNet 0.5.6
+
+
+#### XSS vulnerability using DNS rebinding. [Reported by Beardog108]
+
+In ZeroNet before 0.5.6 the web interface did not validate the request's Host parameter.
+
+Result: An attacker using a specially crafted DNS entry could have bypassed the browser's cross-site-scripting protection
+and potentially gained access to user's private data stored on site.
+
+Fix: By default ZeroNet only accept connections from 127.0.0.1 and localhost hosts.
+If you bind the ui server to an external interface, then it also adds the first http request's host to the allowed host list
+or you can define it manually using --ui_host.
+
+Affected versions: ZeroNet 0.5.5 and earlier, Fixed in: ZeroNet 0.5.6
+
+
+## ZeroNet 0.5.5 (2017-05-18)
+### Added
+- Outgoing socket binding by --bind parameter
+- Database rebuilding progress bar
+- Protect low traffic site's peers from cleanup closing
+- Local site blacklisting
+- Cloned site source code upgrade from parent
+- Input placeholder support for displayPrompt
+- Alternative interaction for wrapperConfirm
+
+### Changed
+- New file priorities for faster site display on first visit
+- Don't add ? to url if push/replaceState url starts with #
+
+### Fixed
+- PermissionAdd/Remove admin command requirement
+- Multi-line confirmation dialog
+
+
+## ZeroNet 0.5.4 (2017-04-14)
+### Added
+- Major speed and CPU usage enhancements in Tor always mode
+- Send skipped modifications to outdated clients
+
+### Changed
+- Upgrade libs to latest version
+- Faster port opening and closing
+- Deny site limit modification in MultiUser mode
+
+### Fixed
+- Filling database from optional files
+- OpenSSL detection on systems with OpenSSL 1.1
+- Users.json corruption on systems with slow hdd
+- Fix leaking files in data directory by webui
+
+
+## ZeroNet 0.5.3 (2017-02-27)
+### Added
+- Tar.gz/zip packed site support
+- Utf8 filenames in archive files
+- Experimental --db_mode secure database mode to prevent data loss on systems with unreliable power source.
+- Admin user support in MultiUser mode
+- Optional deny adding new sites in MultiUser mode
+
+### Changed
+- Faster update and publish times by new socket sharing algorithm
+
+### Fixed
+- Fix missing json_row errors when using Mute plugin
+
+
+## ZeroNet 0.5.2 (2017-02-09)
+### Added
+- User muting
+- Win/Mac signed exe/.app
+- Signed commits
+
+### Changed
+- Faster site updates after startup
+- New macOS package for 10.10 compatibility
+
+### Fixed
+- Fix "New version just released" popup on page first visit
+- Fix disappearing optional files bug (Thanks l5h5t7 for reporting)
+- Fix skipped updates on unreliable connections (Thanks P2P for reporting)
+- Sandbox escape security fix (Thanks Firebox for reporting)
+- Fix error reporting on async websocket functions
+
+
+## ZeroNet 0.5.1 (2016-11-18)
+### Added
+- Multi language interface
+- New plugin: Translation helper for site html and js files
+- Per-site favicon
+
+### Fixed
+- Parallel optional file downloading
+
+
+## ZeroNet 0.5.0 (2016-11-08)
+### Added
+- New Plugin: Allow list/delete/pin/manage files on ZeroHello
+- New API commands to follow user's optional files, and query stats for optional files
+- Set total size limit on optional files.
+- New Plugin: Save peers to database and keep them between restarts to allow more faster optional file search and make it work without trackers
+- Rewritten uPnP port opener + close port on exit (Thanks to sirMackk!)
+- Lower memory usage by lazy PeerHashfield creation
+- Loaded json files statistics and database info at /Stats page
+
+### Changed
+- Separate lock file for better Windows compatibility
+- When executing start.py open browser even if ZeroNet is already running
+- Keep plugin order after reload to allow plugins to extends an another plug-in
+- Only save sites.json if fully loaded to avoid data loss
+- Change aletorrenty tracker to a more reliable one
+- Much lower findhashid CPU usage
+- Pooled downloading of large amount of optional files
+- Lots of other optional file changes to make it better
+- If we have 1000 peers for a site make cleanup more aggressive
+- Use warning instead of error on verification errors
+- Push updates to newer clients first
+- Bad file reset improvements
+
+### Fixed
+- Fix site deletion errors on startup
+- Delay websocket messages until it's connected
+- Fix database import if data file contains extra data
+- Fix big site download
+- Fix diff sending bug (been chasing it for a long time)
+- Fix random publish errors when json file contained [] characters
+- Fix site delete and siteCreate bug
+- Fix file write confirmation dialog
+
+
+## ZeroNet 0.4.1 (2016-09-05)
+### Added
+- Major core changes to allow fast startup and lower memory usage
+- Try to reconnect to Tor on lost connection
+- Sidebar fade-in
+- Try to avoid incomplete data files overwrite
+- Faster database open
+- Display user file sizes in sidebar
+- Concurrent worker number depends on --connection_limit
+
+### Changed
+- Close databases after 5 min idle time
+- Better site size calculation
+- Allow "-" character in domains
+- Always try to keep connections for sites
+- Remove merger permission from merged sites
+- Newsfeed scans only last 3 days to speed up database queries
+- Updated ZeroBundle-win to Python 2.7.12
+
+### Fixed
+- Fix for important security problem, which is allowed anyone to publish new content without valid certificate from ID provider. Thanks Kaffie for pointing it out!
+- Fix sidebar error when no certificate provider selected
+- Skip invalid files on database rebuilding
+- Fix random websocket connection error popups
+- Fix new siteCreate command
+- Fix site size calculation
+- Fix port open checking after computer wake up
+- Fix --size_limit parsing from command line
+
+
+## ZeroNet 0.4.0 (2016-08-11)
+### Added
+- Merger site plugin
+- Live source code reloading: Faster core development by allowing me to make changes in ZeroNet source code without restarting it.
+- New json table format for merger sites
+- Database rebuild from sidebar.
+- Allow to store custom data directly in json table: Much simpler and faster SQL queries.
+- User file archiving: Allows the site owner to archive inactive user's content into single file. (Reducing initial sync time/cpu/memory usage)
+- Also trigger onUpdated/update database on file delete.
+- Permission request from ZeroFrame API.
+- Allow to store extra data in content.json using fileWrite API command.
+- Faster optional files downloading
+- Use alternative sources (Gogs, Gitlab) to download updates
+- Track provided sites/connection and prefer to keep the ones with more sites to reduce connection number
+
+### Changed
+- Keep at least 5 connection per site
+- Changed target connection for sites to 10 from 15
+- ZeroHello search function stability/speed improvements
+- Improvements for clients with slower HDD
+
+### Fixed
+- Fix IE11 wrapper nonce errors
+- Fix sidebar on mobile devices
+- Fix site size calculation
+- Fix IE10 compatibility
+- Windows XP ZeroBundle compatibility (THX to people of China)
+
+
+## ZeroNet 0.3.7 (2016-05-27)
+### Changed
+- Patch command to reduce bandwidth usage by transfer only the changed lines
+- Other cpu/memory optimizations
+
+
+## ZeroNet 0.3.6 (2016-05-27)
+### Added
+- New ZeroHello
+- Newsfeed function
+
+### Fixed
+- Security fixes
+
+
+## ZeroNet 0.3.5 (2016-02-02)
+### Added
+- Full Tor support with .onion hidden services
+- Bootstrap using ZeroNet protocol
+
+### Fixed
+- Fix Gevent 1.0.2 compatibility
+
+
+## ZeroNet 0.3.4 (2015-12-28)
+### Added
+- AES, ECIES API function support
+- PushState and ReplaceState url manipulation support in API
+- Multiuser localstorage
diff --git a/COPYING b/COPYING
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..3f1d3c18
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,33 @@
+FROM alpine:3.15
+
+#Base settings
+ENV HOME /root
+
+COPY requirements.txt /root/requirements.txt
+
+#Install ZeroNet
+RUN apk --update --no-cache --no-progress add python3 python3-dev py3-pip gcc g++ autoconf automake libtool libffi-dev musl-dev make tor openssl \
+ && pip3 install -r /root/requirements.txt \
+ && apk del python3-dev gcc g++ autoconf automake libtool libffi-dev musl-dev make \
+ && echo "ControlPort 9051" >> /etc/tor/torrc \
+ && echo "CookieAuthentication 1" >> /etc/tor/torrc
+
+RUN python3 -V \
+ && python3 -m pip list \
+ && tor --version \
+ && openssl version
+
+#Add Zeronet source
+COPY . /root
+VOLUME /root/data
+
+#Control if Tor proxy is started
+ENV ENABLE_TOR true
+
+WORKDIR /root
+
+#Set upstart command
+CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26117
+
+#Expose ports
+EXPOSE 43110 26117
diff --git a/Dockerfile.arm64v8 b/Dockerfile.arm64v8
new file mode 100644
index 00000000..d27b7620
--- /dev/null
+++ b/Dockerfile.arm64v8
@@ -0,0 +1,34 @@
+FROM alpine:3.12
+
+#Base settings
+ENV HOME /root
+
+COPY requirements.txt /root/requirements.txt
+
+#Install ZeroNet
+RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
+ && pip3 install -r /root/requirements.txt \
+ && apk del python3-dev gcc libffi-dev musl-dev make \
+ && echo "ControlPort 9051" >> /etc/tor/torrc \
+ && echo "CookieAuthentication 1" >> /etc/tor/torrc
+
+RUN python3 -V \
+ && python3 -m pip list \
+ && tor --version \
+ && openssl version
+
+#Add Zeronet source
+COPY . /root
+VOLUME /root/data
+
+#Control if Tor proxy is started
+ENV ENABLE_TOR false
+
+WORKDIR /root
+
+#Set upstart command
+CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
+
+#Expose ports
+EXPOSE 43110 26552
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..0d17b72d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, version 3.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+
+
+Additional Conditions :
+
+Contributing to this repo
+ This repo is governed by GPLv3, same is located at the root of the ZeroNet git repo,
+ unless specified separately all code is governed by that license, contributions to this repo
+ are divided into two key types, key contributions and non-key contributions, key contributions
+ are which, directly affects the code performance, quality and features of software,
+ non key contributions include things like translation datasets, image, graphic or video
+ contributions that does not affect the main usability of software but improves the existing
+ usability of certain thing or feature, these also include tests written with code, since their
+ purpose is to check, whether something is working or not as intended. All the non-key contributions
+ are governed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/), unless specified
+ above, a contribution is ruled by the type of contribution if there is a conflict between two
+ contributing parties of repo in any case.
diff --git a/README-ru.md b/README-ru.md
new file mode 100644
index 00000000..7d557727
--- /dev/null
+++ b/README-ru.md
@@ -0,0 +1,133 @@
+# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
+
+[įŽäŊ䏿](./README-zh-cn.md)
+[English](./README.md)
+
+ĐĐĩŅĐĩĐŊŅŅаĐģиСОваĐŊĐŊŅĐĩ вĐĩĐąŅаКŅŅ, иŅĐŋĐžĐģŅСŅŅŅиĐĩ ĐēŅиĐŋŅĐžĐŗŅаŅĐ¸Ņ Bitcoin и ĐŋŅĐžŅĐžĐēĐžĐģ BitTorrent â https://zeronet.dev ([ĐĐĩŅĐēаĐģĐž в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). Đ ĐžŅĐģиŅии ĐžŅ Bitcoin, ZeroNet'Ņ ĐŊĐĩ ŅŅĐĩĐąŅĐĩŅŅŅ ĐąĐģĐžĐēŅĐĩĐšĐŊ Đ´ĐģŅ ŅайОŅŅ, ОдĐŊаĐēĐž ĐžĐŊ иŅĐŋĐžĐģŅСŅĐĩŅ ŅŅ ĐļĐĩ ĐēŅиĐŋŅĐžĐŗŅаŅиŅ, ŅŅĐžĐąŅ ĐžĐąĐĩŅĐŋĐĩŅиŅŅ ŅĐžŅ ŅаĐŊĐŊĐžŅŅŅ Đ¸ ĐŋŅОвĐĩŅĐēŅ Đ´Đ°ĐŊĐŊŅŅ .
+
+## ĐаŅĐĩĐŧ?
+
+- ĐŅ Đ˛ĐĩŅиĐŧ в ĐžŅĐēŅŅŅŅŅ, ŅвОйОдĐŊŅŅ, и ĐŊĐĩĐŋОддаŅŅŅŅŅŅ ŅĐĩĐŊСŅŅĐĩ ŅĐĩŅŅ Đ¸ ŅвŅСŅ.
+- ĐĐĩŅ ĐĩдиĐŊОК ŅĐžŅĐēи ĐžŅĐēаСа: ĐĄĐ°ĐšŅ ĐžŅŅаŅŅŅŅ ĐžĐŊĐģаКĐŊ, ĐŋĐžĐēа ĐĩĐŗĐž ОйŅĐģŅĐļиваĐĩŅ Ņ ĐžŅŅ ĐąŅ 1 ĐŋиŅ.
+- ĐĐĩŅ ĐˇĐ°ŅŅĐ°Ņ ĐŊа Ņ ĐžŅŅиĐŊĐŗ: ХаКŅŅ ĐžĐąŅĐģŅĐļиваŅŅŅŅ ĐŋĐžŅĐĩŅиŅĐĩĐģŅĐŧи.
+- ĐĐĩвОСĐŧĐžĐļĐŊĐž ĐžŅĐēĐģŅŅиŅŅ: ĐĐŊ ĐŊĐ¸ĐŗĐ´Đĩ, ĐŋĐžŅĐžĐŧŅ ŅŅĐž ĐžĐŊ вĐĩСдĐĩ.
+- ĐĄĐēĐžŅĐžŅŅŅ Đ¸ вОСĐŧĐžĐļĐŊĐžŅŅŅ ŅайОŅаŅŅ ĐąĐĩС ĐĐŊŅĐĩŅĐŊĐĩŅа: ĐŅ ŅĐŧĐžĐļĐĩŅĐĩ ĐŋĐžĐģŅŅиŅŅ Đ´ĐžŅŅŅĐŋ Đē ŅаКŅŅ, ĐŋĐžŅĐžĐŧŅ ŅŅĐž ĐĩĐŗĐž ĐēĐžĐŋĐ¸Ņ Ņ ŅаĐŊиŅŅŅ ĐŊа ваŅĐĩĐŧ ĐēĐžĐŧĐŋŅŅŅĐĩŅĐĩ и Ņ Đ˛Đ°ŅĐ¸Ņ ĐŋиŅОв.
+
+## ĐŅОйĐĩĐŊĐŊĐžŅŅи
+
+- ĐĐąĐŊОвĐģĐĩĐŊиĐĩ ŅаКŅОв в ŅĐĩаĐģŅĐŊĐžĐŧ вŅĐĩĐŧĐĩĐŊи
+- ĐОддĐĩŅĐļĐēа Đ´ĐžĐŧĐĩĐŊОв `.bit` ([Namecoin](https://www.namecoin.org))
+- ĐĐĩĐŗĐēĐ°Ņ ŅŅŅаĐŊОвĐēа: ĐŋŅĐžŅŅĐž ŅаŅĐŋаĐēŅĐšŅĐĩ и СаĐŋŅŅŅиŅĐĩ
+- ĐĐģĐžĐŊиŅОваĐŊиĐĩ ŅаКŅОв "в ОдиĐŊ ĐēĐģиĐē"
+- ĐĐĩŅĐŋаŅĐžĐģŅĐŊĐ°Ņ [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+ авŅĐžŅиСаŅиŅ: ĐаŅа ŅŅĐĩŅĐŊĐ°Ņ ĐˇĐ°ĐŋиŅŅ ĐˇĐ°ŅиŅĐĩĐŊа ŅОК ĐļĐĩ ĐēŅиĐŋŅĐžĐŗŅаŅиĐĩĐš, ŅŅĐž и Đ˛Đ°Ņ Bitcoin-ĐēĐžŅĐĩĐģĐĩĐē
+- ĐŅŅŅĐžĐĩĐŊĐŊŅĐš SQL-ŅĐĩŅвĐĩŅ Ņ ŅиĐŊŅ ŅĐžĐŊиСаŅиĐĩĐš даĐŊĐŊŅŅ P2P: ĐОСвОĐģŅĐĩŅ ŅĐŋŅĐžŅŅиŅŅ ŅаСŅайОŅĐēŅ ŅаКŅа и ŅŅĐēĐžŅиŅŅ ĐˇĐ°ĐŗŅŅСĐēŅ ŅŅŅаĐŊиŅŅ
+- ĐĐŊĐžĐŊиĐŧĐŊĐžŅŅŅ: ĐĐžĐģĐŊĐ°Ņ ĐŋОддĐĩŅĐļĐēа ŅĐĩŅи Tor, иŅĐŋĐžĐģŅСŅŅ ŅĐēŅŅŅŅĐĩ ŅĐģŅĐļĐąŅ `.onion` вĐŧĐĩŅŅĐž адŅĐĩŅОв IPv4
+- ĐаŅиŅŅОваĐŊĐŊĐžĐĩ TLS ĐŋОдĐēĐģŅŅĐĩĐŊиĐĩ
+- ĐвŅĐžĐŧаŅиŅĐĩŅĐēĐžĐĩ ĐžŅĐēŅŅŅиĐĩ UPnPâĐŋĐžŅŅа
+- ĐĐģĐ°ĐŗĐ¸ĐŊ Đ´ĐģŅ ĐŋОддĐĩŅĐļĐēи ĐŊĐĩŅĐēĐžĐģŅĐēĐ¸Ņ ĐŋĐžĐģŅСОваŅĐĩĐģĐĩĐš (openproxy)
+- РайОŅа Ņ ĐģŅĐąŅĐŧи ĐąŅаŅСĐĩŅаĐŧи и ĐžĐŋĐĩŅаŅиОĐŊĐŊŅĐŧи ŅиŅŅĐĩĐŧаĐŧи
+
+## ĐĸĐĩĐēŅŅиĐĩ ĐžĐŗŅаĐŊиŅĐĩĐŊиŅ
+
+- ФаКĐģОвŅĐĩ ŅŅаĐŊСаĐēŅии ĐŊĐĩ ŅĐļаŅŅ
+- ĐĐĩŅ ĐŋŅиваŅĐŊŅŅ ŅаКŅОв
+
+## ĐаĐē ŅŅĐž ŅайОŅаĐĩŅ?
+
+- ĐĐžŅĐģĐĩ СаĐŋŅŅĐēа `zeronet.py` Đ˛Ņ ŅĐŧĐžĐļĐĩŅĐĩ ĐŋĐžŅĐĩŅаŅŅ ŅаКŅŅ Đ˛ ZeroNet, иŅĐŋĐžĐģŅСŅŅ Đ°Đ´ŅĐĩŅ
+ `http://127.0.0.1:43110/{zeronet_адŅĐĩŅ}`
+ (ĐаĐŋŅиĐŧĐĩŅ: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
+- ĐĐžĐŗĐ´Đ° Đ˛Ņ ĐŋĐžŅĐĩŅаĐĩŅĐĩ ĐŊОвŅĐš ŅĐ°ĐšŅ Đ˛ ZeroNet, ĐžĐŊ ĐŋŅŅаĐĩŅŅŅ ĐŊаКŅи ĐŋиŅОв Ņ ĐŋĐžĐŧĐžŅŅŅ ĐŋŅĐžŅĐžĐēĐžĐģа BitTorrent,
+ ŅŅĐžĐąŅ ŅĐēаŅаŅŅ Ņ ĐŊĐ¸Ņ ŅаКĐģŅ ŅаКŅа (HTML, CSS, JS и Ņ.Đ´.).
+- ĐĐžŅĐģĐĩ ĐŋĐžŅĐĩŅĐĩĐŊĐ¸Ņ ŅаКŅа Đ˛Ņ ŅĐžĐļĐĩ ŅŅаĐŊОвиŅĐĩŅŅ ĐĩĐŗĐž ĐŋиŅĐžĐŧ.
+- ĐаĐļĐ´ŅĐš ŅĐ°ĐšŅ ŅОдĐĩŅĐļĐ¸Ņ ŅаКĐģ `content.json`, ĐēĐžŅĐžŅŅĐš ŅОдĐĩŅĐļĐ¸Ņ SHA512 Ņ ĐĩŅи вŅĐĩŅ ĐžŅŅаĐģŅĐŊŅĐĩ ŅаКĐģŅ
+ и ĐŋОдĐŋиŅŅ, ŅОСдаĐŊĐŊŅŅ Ņ ĐŋĐžĐŧĐžŅŅŅ ĐˇĐ°ĐēŅŅŅĐžĐŗĐž ĐēĐģŅŅа ŅаКŅа.
+- ĐŅĐģи вĐģадĐĩĐģĐĩŅ ŅаКŅа (ŅĐžŅ, ĐēŅĐž вĐģадĐĩĐĩŅ ĐˇĐ°ĐēŅŅŅŅĐŧ ĐēĐģŅŅĐžĐŧ Đ´ĐģŅ Đ°Đ´ŅĐĩŅа ŅаКŅа) иСĐŧĐĩĐŊŅĐĩŅ ŅаКŅ, ĐžĐŊ
+ ĐŋОдĐŋиŅŅваĐĩŅ ĐŊОвŅĐš `content.json` и ĐŋŅĐąĐģиĐēŅĐĩŅ ĐĩĐŗĐž Đ´ĐģŅ ĐŋиŅОв. ĐĐžŅĐģĐĩ ŅŅĐžĐŗĐž ĐŋиŅŅ ĐŋŅОвĐĩŅŅŅŅ ŅĐĩĐģĐžŅŅĐŊĐžŅŅŅ `content.json`
+ (иŅĐŋĐžĐģŅСŅŅ ĐŋОдĐŋиŅŅ), ŅĐēаŅваŅŅ Đ¸ĐˇĐŧĐĩĐŊŅĐŊĐŊŅĐĩ ŅаКĐģŅ Đ¸ ŅаŅĐŋŅĐžŅŅŅаĐŊŅŅŅ ĐŊОвŅĐš ĐēĐžĐŊŅĐĩĐŊŅ Đ´ĐģŅ Đ´ŅŅĐŗĐ¸Ņ ĐŋиŅОв.
+
+[ĐŅĐĩСĐĩĐŊŅаŅĐ¸Ņ Đž ĐēŅиĐŋŅĐžĐŗŅаŅии ZeroNet, ОйĐŊОвĐģĐĩĐŊиŅŅ ŅаКŅОв, ĐŧĐŊĐžĐŗĐžĐŋĐžĐģŅСОваŅĐĩĐģŅŅĐēĐ¸Ņ ŅаКŅĐ°Ņ Âģ](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
+[ЧаŅŅĐž СадаваĐĩĐŧŅĐĩ вОĐŋŅĐžŅŅ Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+[ĐĐžĐēŅĐŧĐĩĐŊŅаŅĐ¸Ņ ŅаСŅайОŅŅиĐēа ZeroNet Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+## ĐĄĐēŅиĐŊŅĐžŅŅ
+
+
+
+[ĐĐžĐģŅŅĐĩ ŅĐēŅиĐŊŅĐžŅОв в Đ´ĐžĐēŅĐŧĐĩĐŊŅаŅии ZeroNet Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+
+## ĐаĐē ĐŋŅиŅĐžĐĩдиĐŊиŅŅŅŅ?
+
+### Windows
+
+- ĐĄĐēаŅаКŅĐĩ и ŅаŅĐŋаĐēŅĐšŅĐĩ аŅŅ Đ¸Đ˛ [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26ĐĐ)
+- ĐаĐŋŅŅŅиŅĐĩ `ZeroNet.exe`
+
+### macOS
+
+- ĐĄĐēаŅаКŅĐĩ и ŅаŅĐŋаĐēŅĐšŅĐĩ аŅŅ Đ¸Đ˛ [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14ĐĐ)
+- ĐаĐŋŅŅŅиŅĐĩ `ZeroNet.app`
+
+### Linux (64 йиŅ)
+
+- ĐĄĐēаŅаКŅĐĩ и ŅаŅĐŋаĐēŅĐšŅĐĩ аŅŅ Đ¸Đ˛ [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14ĐĐ)
+- ĐаĐŋŅŅŅиŅĐĩ `./ZeroNet.sh`
+
+> **Note**
+> ĐаĐŋŅŅŅиŅĐĩ ŅаĐēиĐŧ ОйŅаСОĐŧ: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваŅ_ip_адŅĐĩŅ`, ŅŅĐžĐąŅ ŅаСŅĐĩŅиŅŅ ŅдаĐģŅĐŊĐŊĐžĐĩ ĐŋОдĐēĐģŅŅĐĩĐŊиĐĩ Đē вĐĩĐąâиĐŊŅĐĩŅŅĐĩĐšŅŅ.
+
+### Docker
+
+ĐŅиŅиаĐģŅĐŊŅĐš ОйŅаС ĐŊĐ°Ņ ĐžĐ´Đ¸ŅŅŅ ĐˇĐ´ĐĩŅŅ: https://hub.docker.com/r/canewsin/zeronet/
+
+### Android (arm, arm64, x86)
+
+- ĐĐģŅ ŅайОŅŅ ŅŅĐĩĐąŅĐĩŅŅŅ Android ĐēаĐē ĐŧиĐŊиĐŧŅĐŧ вĐĩŅŅии 5.0 Lollipop
+- [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
+- ĐĄĐēаŅаŅŅ APK: https://github.com/canewsin/zeronet_mobile/releases
+
+### Android (arm, arm64, x86) ĐĐąĐģĐĩĐŗŅŅĐŊĐŊŅĐš ĐēĐģиĐĩĐŊŅ ŅĐžĐģŅĐēĐž Đ´ĐģŅ ĐŋŅĐžŅĐŧĐžŅŅа (1ĐĐ)
+
+- ĐĐģŅ ŅайОŅŅ ŅŅĐĩĐąŅĐĩŅŅŅ Android ĐēаĐē ĐŧиĐŊиĐŧŅĐŧ вĐĩŅŅии 4.1 Jelly Bean
+- [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
+
+### ĐŖŅŅаĐŊОвĐēа иС иŅŅ ĐžĐ´ĐŊĐžĐŗĐž ĐēОда
+
+```sh
+wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
+unzip ZeroNet-src.zip
+cd ZeroNet
+sudo apt-get update
+sudo apt-get install python3-pip
+sudo python3 -m pip install -r requirements.txt
+```
+- ĐаĐŋŅŅŅиŅĐĩ `python3 zeronet.py`
+
+ĐŅĐēŅОКŅĐĩ ĐŋŅивĐĩŅŅŅвĐĩĐŊĐŊŅŅ ŅŅŅаĐŊиŅŅ ZeroHello в ваŅĐĩĐŧ ĐąŅаŅСĐĩŅĐĩ ĐŋĐž ŅŅŅĐģĐēĐĩ http://127.0.0.1:43110/
+
+## ĐаĐē ĐŧĐŊĐĩ ŅОСдаŅŅ ŅĐ°ĐšŅ Đ˛ ZeroNet?
+
+- ĐĐģиĐēĐŊиŅĐĩ ĐŊа **âŽ** > **"Create new, empty site"** в ĐŧĐĩĐŊŅ ĐŊа ŅаКŅĐĩ [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
+- ĐŅ ĐąŅĐ´ĐĩŅĐĩ **ĐŋĐĩŅĐĩĐŊаĐŋŅавĐģĐĩĐŊŅ** ĐŊа ŅОвĐĩŅŅĐĩĐŊĐŊĐž ĐŊОвŅĐš ŅаКŅ, ĐēĐžŅĐžŅŅĐš ĐŧĐžĐļĐĩŅ ĐąŅŅŅ Đ¸ĐˇĐŧĐĩĐŊŅĐŊ ŅĐžĐģŅĐēĐž ваĐŧи!
+- ĐŅ ĐŧĐžĐļĐĩŅĐĩ ĐŊаКŅи и иСĐŧĐĩĐŊиŅŅ ĐēĐžĐŊŅĐĩĐŊŅ Đ˛Đ°ŅĐĩĐŗĐž ŅаКŅа в ĐēаŅаĐģĐžĐŗĐĩ **data/[адŅĐĩŅ_ваŅĐĩĐŗĐž_ŅаКŅа]**
+- ĐĐžŅĐģĐĩ иСĐŧĐĩĐŊĐĩĐŊиК ĐžŅĐēŅОКŅĐĩ Đ˛Đ°Ņ ŅаКŅ, ĐŋĐĩŅĐĩĐēĐģŅŅиŅĐĩ вĐģĐĩвО ĐēĐŊĐžĐŋĐēŅ "0" в ĐŋŅавОĐŧ вĐĩŅŅ ĐŊĐĩĐŧ ŅĐŗĐģŅ, СаŅĐĩĐŧ ĐŊаĐļĐŧиŅĐĩ ĐēĐŊĐžĐŋĐēи **sign** и **publish** вĐŊиСŅ
+
+ĐĄĐģĐĩĐ´ŅŅŅиĐĩ ŅĐ°ĐŗĐ¸: [ĐĐžĐēŅĐŧĐĩĐŊŅаŅĐ¸Ņ ŅаСŅайОŅŅиĐēа ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+## ĐОддĐĩŅĐļиŅĐĩ ĐŋŅĐžĐĩĐēŅ
+
+- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Đ ĐĩĐēĐžĐŧĐĩĐŊĐ´ŅĐĩĐŧ)
+- LiberaPay: https://liberapay.com/PramUkesh
+- Paypal: https://paypal.me/PramUkesh
+- ĐŅŅĐŗĐ¸Đĩ ŅĐŋĐžŅОйŅ: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+
+#### ĐĄĐŋаŅийО!
+
+- ĐĐ´ĐĩŅŅ Đ˛Ņ ĐŧĐžĐļĐĩŅĐĩ ĐŋĐžĐģŅŅиŅŅ ĐąĐžĐģŅŅĐĩ иĐŊŅĐžŅĐŧаŅии, ĐŋĐžĐŧĐžŅŅ, ĐŋŅĐžŅиŅаŅŅ ŅĐŋиŅĐžĐē иСĐŧĐĩĐŊĐĩĐŊиК и иŅŅĐģĐĩдОваŅŅ ZeroNet ŅаКŅŅ: https://www.reddit.com/r/zeronetx/
+- ĐĐąŅĐĩĐŊиĐĩ ĐŋŅОиŅŅ ĐžĐ´Đ¸Ņ ĐŊа ĐēаĐŊаĐģĐĩ [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) иĐģи в [Gitter](https://gitter.im/canewsin/ZeroNet)
+- ĐĐģĐĩĐēŅŅĐžĐŊĐŊĐ°Ņ ĐŋĐžŅŅа: canews.in@gmail.com
diff --git a/README-zh-cn.md b/README-zh-cn.md
new file mode 100644
index 00000000..37095ff6
--- /dev/null
+++ b/README-zh-cn.md
@@ -0,0 +1,132 @@
+# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
+
+[English](./README.md)
+
+äŊŋ፠Bitcoin å å¯å BitTorrent įŊįģįåģä¸åŋåįŊįģ - https://zeronet.dev
+
+
+## ä¸ēäģäšīŧ
+
+* æäģŦį¸äŋĄåŧæžīŧčĒįąīŧæ åŽĄæĨįįŊįģåéčŽ¯
+* ä¸äŧååįšæ éåŊąåīŧåĒčĻæå¨įēŋįčįšīŧįĢįšå°ąäŧäŋæå¨įēŋ
+* æ æįŽĄč´šį¨īŧįĢįšįąčŽŋéŽč æįŽĄ
+* æ æŗå ŗéīŧå ä¸ēčįšæ å¤ä¸å¨
+* åŋĢéåšļå¯įĻģįēŋčŋčĄīŧåŗäŊŋæ˛ĄæäēčįŊčŋæĨäšå¯äģĨäŊŋį¨
+
+
+## åčŊ
+ * åŽæļįĢįšæ´æ°
+ * æ¯æ Namecoin į .bit åå
+ * åŽčŖ æšäžŋīŧåĒéč§ŖååšļčŋčĄ
+ * ä¸éŽå éåå¨įįĢįš
+ * æ éå¯į ãåēäē [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+ į莤č¯īŧæ¨įč´ĻæˇčĸĢ䏿¯įšå¸éąå į¸åįå 坿šæŗäŋæ¤
+ * å åģē SQL æåĄå¨å P2P æ°æŽåæĨīŧ莊åŧåæ´įŽååšļæåå čŊŊéåēĻ
+ * åŋåæ§īŧåŽæ´į Tor įŊį쿝æīŧæ¯æéčŋ .onion éčæåĄį¸äēčŋæĨč䏿¯éčŋ IPv4 å°åčŋæĨ
+ * TLS å å¯čŋæĨ
+ * čĒ卿åŧ uPnP į̝åŖ
+ * å¤į¨æˇīŧopenproxyīŧæ¯æįæäģļ
+ * éį¨äēäģģäŊæĩč§å¨ / æäŊįŗģįģ
+
+
+## åį
+
+* å¨čŋčĄ `zeronet.py` åīŧæ¨å°å¯äģĨéčŋ
+ `http://127.0.0.1:43110/{zeronet_address}`īŧäžåĻīŧ
+ `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`īŧčŽŋéŽ zeronet ä¸įįĢįš
+* 卿¨æĩč§ zeronet įĢįšæļīŧåŽĸæˇį̝äŧå°č¯éčŋ BitTorrent įŊįģæĨå¯ģæžå¯į¨įčįšīŧäģčä¸čŊŊéčĻįæäģļīŧhtmlīŧcssīŧjs...īŧ
+* æ¨å°äŧå¨åæ¯ä¸ä¸Ēæĩč§čŋįįĢįš
+* æ¯ä¸ĒįĢįšéŊå åĢä¸ä¸Ēåä¸ē `content.json` įæäģļīŧåŽå¨åäēå ļäģæææäģļį sha512 æŖååŧäģĨåä¸ä¸ĒéčŋįĢįšį§éĨįæįįžå
+* åĻæįĢįšįææč īŧæĨæįĢįšå°åįį§éĨīŧäŋŽæšäēįĢįšīŧåšļä¸äģ / åĨšįžåäēæ°į `content.json` įļ忍éčŗå ļäģčįšīŧ
+ éŖäščŋäēčįšå°äŧå¨äŊŋį¨įžåéĒč¯ `content.json` įįåŽæ§åīŧä¸čŊŊäŋŽæšåįæäģļåšļå°æ°å 厚æ¨éčŗåĻå¤įčįš
+
+#### [å ŗäē ZeroNet å å¯īŧįĢįšæ´æ°īŧå¤į¨æˇįĢįšįåšģį¯į Âģ](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
+#### [常č§éŽéĸ Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+
+#### [ZeroNet åŧåč ææĄŖ Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+
+## åąåšæĒåž
+
+
+
+
+#### [ZeroNet ææĄŖä¸įæ´å¤åąåšæĒåž Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+
+
+## åĻäŊå å Ĩ
+
+### Windows
+
+ - ä¸čŊŊ [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
+ - å¨äģģæäŊįŊŽč§ŖåįŧŠ
+ - čŋčĄ `ZeroNet.exe`
+
+### macOS
+
+ - ä¸čŊŊ [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
+ - å¨äģģæäŊįŊŽč§ŖåįŧŠ
+ - čŋčĄ `ZeroNet.app`
+
+### Linux (x86-64bit)
+
+ - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
+ - `unzip ZeroNet-linux.zip`
+ - `cd ZeroNet-linux`
+ - äŊŋį¨äģĨä¸åŊäģ¤å¯å¨ `./ZeroNet.sh`
+ - 卿ĩč§å¨æåŧ http://127.0.0.1:43110/ åŗå¯čŽŋéŽ ZeroHello éĄĩéĸ
+
+ __æį¤ēīŧ__ čĨčĻå čŽ¸å¨ Web įéĸä¸įčŋį¨čŋæĨīŧäŊŋį¨äģĨä¸åŊäģ¤å¯å¨ `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address`
+
+### äģæēäģŖį åŽčŖ
+
+ - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
+ - `unzip ZeroNet-src.zip`
+ - `cd ZeroNet`
+ - `sudo apt-get update`
+ - `sudo apt-get install python3-pip`
+ - `sudo python3 -m pip install -r requirements.txt`
+ - äŊŋį¨äģĨä¸åŊäģ¤å¯å¨ `python3 zeronet.py`
+ - 卿ĩč§å¨æåŧ http://127.0.0.1:43110/ åŗå¯čŽŋéŽ ZeroHello éĄĩéĸ
+
+ ### Android (arm, arm64, x86)
+ - minimum Android version supported 21 (Android 5.0 Lollipop)
+ - [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
+ - APK download: https://github.com/canewsin/zeronet_mobile/releases
+
+### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
+ - minimum Android version supported 16 (JellyBean)
+ - [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
+
+## į°æéåļ
+
+* äŧ čžæäģļæļæ˛ĄæåįŧŠ
+* 䏿¯æį§æįĢįš
+
+
+## åĻäŊååģēä¸ä¸Ē ZeroNet įĢįšīŧ
+
+ * įšåģ [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d) įĢįšį **âŽ** > **ãæ°åģēįŠēįĢįšã** čå饚
+ * æ¨å°čĸĢ**éåŽå**å°ä¸ä¸Ēå ¨æ°įįĢįšīŧč¯ĨįĢįšåĒčŊįąæ¨äŋŽæš
+ * æ¨å¯äģĨå¨ **data/[æ¨įįĢįšå°å]** įŽåŊ䏿žå°åšļäŋŽæšįŊįĢįå 厚
+ * äŋŽæšåæåŧæ¨įįŊįĢīŧå°åŗä¸č§įã0ãæéŽæå°åˇĻäž§īŧįļåįšåģåēé¨į**įžå**åšļ**åå¸**æéŽ
+
+æĨ䏿ĨįæĨéǤīŧ[ZeroNet åŧåč ææĄŖ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+## 帎åŠčŋä¸Ē饚įŽ
+- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
+- LiberaPay: https://liberapay.com/PramUkesh
+- Paypal: https://paypal.me/PramUkesh
+- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+
+
+#### æč°ĸæ¨īŧ
+
+* æ´å¤äŋĄæ¯īŧ帎åŠīŧåæ´čŽ°åŊå zeronet įĢįšīŧhttps://www.reddit.com/r/zeronetx/
+* ååž [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) æ [gitter](https://gitter.im/canewsin/ZeroNet) åæäģŦč夊
+* [čŋé](https://gitter.im/canewsin/ZeroNet)æ¯ä¸ä¸Ē gitter ä¸į䏿č夊厤
+* Email: canews.in@gmail.com
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..70b79adc
--- /dev/null
+++ b/README.md
@@ -0,0 +1,156 @@
+# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
+
+Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
+
+
+## Why?
+
+* We believe in open, free, and uncensored network and communication.
+* No single point of failure: Site remains online so long as at least 1 peer is
+ serving it.
+* No hosting costs: Sites are served by visitors.
+* Impossible to shut down: It's nowhere because it's everywhere.
+* Fast and works offline: You can access the site even if Internet is
+ unavailable.
+
+
+## Features
+ * Real-time updated sites
+ * Namecoin .bit domains support
+ * Easy to setup: unpack & run
+ * Clone websites in one click
+ * Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+ based authorization: Your account is protected by the same cryptography as your Bitcoin wallet
+ * Built-in SQL server with P2P data synchronization: Allows easier site development and faster page load times
+ * Anonymity: Full Tor network support with .onion hidden services instead of IPv4 addresses
+ * TLS encrypted connections
+ * Automatic uPnP port opening
+ * Plugin for multiuser (openproxy) support
+ * Works with any browser/OS
+
+
+## How does it work?
+
+* After starting `zeronet.py` you will be able to visit zeronet sites using
+ `http://127.0.0.1:43110/{zeronet_address}` (eg.
+ `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
+* When you visit a new zeronet site, it tries to find peers using the BitTorrent
+ network so it can download the site files (html, css, js...) from them.
+* Each visited site is also served by you.
+* Every site contains a `content.json` file which holds all other files in a sha512 hash
+ and a signature generated using the site's private key.
+* If the site owner (who has the private key for the site address) modifies the
+ site and signs the new `content.json` and publishes it to the peers.
+ Afterwards, the peers verify the `content.json` integrity (using the
+ signature), they download the modified files and publish the new content to
+ other peers.
+
+#### [Slideshow about ZeroNet cryptography, site updates, multi-user sites Âģ](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
+#### [Frequently asked questions Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+
+#### [ZeroNet Developer Documentation Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+
+## Screenshots
+
+
+
+
+#### [More screenshots in ZeroNet docs Âģ](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+
+
+## How to join
+
+### Windows
+
+ - Download [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
+ - Unpack anywhere
+ - Run `ZeroNet.exe`
+
+### macOS
+
+ - Download [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
+ - Unpack anywhere
+ - Run `ZeroNet.app`
+
+### Linux (x86-64bit)
+ - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
+ - `unzip ZeroNet-linux.zip`
+ - `cd ZeroNet-linux`
+ - Start with: `./ZeroNet.sh`
+ - Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
+
+ __Tip:__ Start with `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address` to allow remote connections on the web interface.
+
+ ### Android (arm, arm64, x86)
+ - minimum Android version supported 21 (Android 5.0 Lollipop)
+ - [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
+ - APK download: https://github.com/canewsin/zeronet_mobile/releases
+
+### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
+ - minimum Android version supported 16 (JellyBean)
+ - [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
+
+
+#### Docker
+There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
+
+### Online Proxies
+Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
+
+#### Official ZNX Proxy :
+
+https://proxy.zeronet.dev/
+
+https://zeronet.dev/
+
+#### From Community
+
+https://0net-preview.com/
+
+https://portal.ngnoid.tv/
+
+https://zeronet.ipfsscan.io/
+
+
+### Install from source
+
+ - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
+ - `unzip ZeroNet-src.zip`
+ - `cd ZeroNet`
+ - `sudo apt-get update`
+ - `sudo apt-get install python3-pip`
+ - `sudo python3 -m pip install -r requirements.txt`
+ - Start with: `python3 zeronet.py`
+ - Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
+
+## Current limitations
+
+* File transactions are not compressed
+* No private sites
+
+
+## How can I create a ZeroNet site?
+
+ * Click on **âŽ** > **"Create new, empty site"** menu item on the site [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
+ * You will be **redirected** to a completely new site that is only modifiable by you!
+ * You can find and modify your site's content in **data/[yoursiteaddress]** directory
+ * After the modifications open your site, drag the topright "0" button to left, then press **sign** and **publish** buttons on the bottom
+
+Next steps: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+
+## Help keep this project alive
+- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
+- LiberaPay: https://liberapay.com/PramUkesh
+- Paypal: https://paypal.me/PramUkesh
+- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+
+#### Thank you!
+
+* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronetx/
+* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/canewsin/ZeroNet)
+* Email: canews.in@gmail.com
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 00000000..24fe0c45
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,45 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+VAGRANTFILE_API_VERSION = "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+
+ #Set box
+ config.vm.box = "ubuntu/trusty64"
+
+ #Do not check fo updates
+ config.vm.box_check_update = false
+
+ #Add private network
+ config.vm.network "private_network", type: "dhcp"
+
+ #Redirect ports
+ config.vm.network "forwarded_port", guest: 43110, host: 43110
+ config.vm.network "forwarded_port", guest: 15441, host: 15441
+
+ #Sync folder using NFS if not windows
+ config.vm.synced_folder ".", "/vagrant",
+ :nfs => !Vagrant::Util::Platform.windows?
+
+ #Virtal Box settings
+ config.vm.provider "virtualbox" do |vb|
+ # Don't boot with headless mode
+ #vb.gui = true
+
+ # Set VM settings
+ vb.customize ["modifyvm", :id, "--memory", "512"]
+ vb.customize ["modifyvm", :id, "--cpus", 1]
+ end
+
+ #Update system
+ config.vm.provision "shell",
+ inline: "sudo apt-get update -y && sudo apt-get upgrade -y"
+
+ #Install deps
+ config.vm.provision "shell",
+ inline: "sudo apt-get install msgpack-python python-gevent python-pip python-dev -y"
+ config.vm.provision "shell",
+ inline: "sudo pip install msgpack --upgrade"
+
+end
diff --git a/md5.hashes b/md5.hashes
deleted file mode 100644
index c11c8274..00000000
--- a/md5.hashes
+++ /dev/null
@@ -1,34 +0,0 @@
-[
- "794f5ac0675f66310963163e62527196",
- "f4fdc4ef9fcf3db65ea91fb46b3982ca",
- "4bdd9cc3fd3629a7e177bf37df5326c6",
- "3df0aae9c0f30941a3893f02b0533d65",
- "25001a7ef26550ec1fbb2ae7fbfff6a1",
- "634647a7ea916b29f3a8fe5f140341a8",
- "e09fab4484cf10d5bc29901f5c17df78",
- "11af969820fdc72db9d9c41abd98e4c9",
- "371da38ccd0dcdc49b71edd0872be41e",
- "a23aeb4308119a2e34e33c109d4ee496",
- "0386c7231f8af2706f3b8ca71bb30a82",
- "0f408bbceb7572631b0e1dcd97b257e1",
- "d4cfb19351a761ae1252934357772f1e",
- "7656733d355d0a31ee57ba3901374de8",
- "b522f9ad4d17d8962bba7fc1c6880d1a",
- "3e8dab64ea8c23463f83de1c68bc2342",
- "b5ebbd8c4a7fa865095e95853d5bee35",
- "0e7b811892a6abc0cbcf66161ac82bc5",
- "d2ba546cd3eae258b10c7fdbaafe9434",
- "f558010cc964e206eb03eafd90731e0b",
- "4cfcd90b9206701d96c7757222072e5c",
- "063cd806f972b6d0f0226d8c04474270",
- "c7d737758baf1d516cf3a0ed45176f6e",
- "b6cfb932d1499cbc2fba10c06efe9567",
- "30865832830c3bb1d67aeb48b0572774",
- "4908d51ff8f2daa35a209db0c86dc535",
- "336b451616f620743e6aecb30900b822",
- "98c9109d618094a9775866c1838d4666",
- "11e86b9a2aae72f854bf1f181946d78b",
- "28d0faceb156ad1e5f1befa770dce3cd",
- "93191cea5d81f6c2b2f5a4a547e2bdfd",
- "6b1f09c95720e730ef27970b7f9f3e5c"
-]
diff --git a/patches.json b/patches.json
deleted file mode 100644
index 54e91f51..00000000
--- a/patches.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[
- {
- "filename": "CryptConnection.py",
- "patchDir": "src/Crypt",
- "patchUrl": "https://raw.githubusercontent.com/canewsin/ZeroNet/py3-patches/CryptConnection.py"
- }
-]
\ No newline at end of file
diff --git a/plugins b/plugins
new file mode 160000
index 00000000..689d9309
--- /dev/null
+++ b/plugins
@@ -0,0 +1 @@
+Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..538a6dfc
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,13 @@
+gevent==1.4.0; python_version <= "3.6"
+greenlet==0.4.16; python_version <= "3.6"
+gevent>=20.9.0; python_version >= "3.7"
+msgpack>=0.4.4
+base58
+merkletools @ git+https://github.com/ZeroNetX/pymerkletools.git@dev
+rsa
+PySocks>=1.6.8
+pyasn1
+websocket_client
+gevent-ws
+coincurve
+maxminddb
diff --git a/src/Config.py b/src/Config.py
new file mode 100644
index 00000000..a9208d55
--- /dev/null
+++ b/src/Config.py
@@ -0,0 +1,675 @@
+import argparse
+import sys
+import os
+import locale
+import re
+import configparser
+import logging
+import logging.handlers
+import stat
+import time
+
+
+class Config(object):
+
+ def __init__(self, argv):
+ self.version = "0.9.0"
+ self.rev = 4630
+ self.argv = argv
+ self.action = None
+ self.test_parser = None
+ self.pending_changes = {}
+ self.need_restart = False
+ self.keys_api_change_allowed = set([
+ "tor", "fileserver_port", "language", "tor_use_bridges", "trackers_proxy", "trackers",
+ "trackers_file", "open_browser", "log_level", "fileserver_ip_type", "ip_external", "offline",
+ "threads_fs_read", "threads_fs_write", "threads_crypt", "threads_db"
+ ])
+ self.keys_restart_need = set([
+ "tor", "fileserver_port", "fileserver_ip_type", "threads_fs_read", "threads_fs_write", "threads_crypt", "threads_db"
+ ])
+ self.start_dir = self.getStartDir()
+
+ self.config_file = self.start_dir + "/zeronet.conf"
+ self.data_dir = self.start_dir + "/data"
+ self.log_dir = self.start_dir + "/log"
+ self.openssl_lib_file = None
+ self.openssl_bin_file = None
+
+ self.trackers_file = False
+ self.createParser()
+ self.createArguments()
+
+ def createParser(self):
+ # Create parser
+ self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ self.parser.register('type', 'bool', self.strToBool)
+ self.subparsers = self.parser.add_subparsers(title="Action to perform", dest="action")
+
+ def __str__(self):
+ return str(self.arguments).replace("Namespace", "Config") # Using argparse str output
+
+ # Convert string to bool
+ def strToBool(self, v):
+ return v.lower() in ("yes", "true", "t", "1")
+
+ def getStartDir(self):
+ this_file = os.path.abspath(__file__).replace("\\", "/").rstrip("cd")
+
+ if "--start_dir" in self.argv:
+ start_dir = self.argv[self.argv.index("--start_dir") + 1]
+ elif this_file.endswith("/Contents/Resources/core/src/Config.py"):
+ # Running as ZeroNet.app
+ if this_file.startswith("/Application") or this_file.startswith("/private") or this_file.startswith(os.path.expanduser("~/Library")):
+ # Runnig from non-writeable directory, put data to Application Support
+ start_dir = os.path.expanduser("~/Library/Application Support/ZeroNet")
+ else:
+ # Running from writeable directory put data next to .app
+ start_dir = re.sub("/[^/]+/Contents/Resources/core/src/Config.py", "", this_file)
+ elif this_file.endswith("/core/src/Config.py"):
+ # Running as exe or source is at Application Support directory, put var files to outside of core dir
+ start_dir = this_file.replace("/core/src/Config.py", "")
+ elif this_file.endswith("usr/share/zeronet/src/Config.py"):
+ # Running from non-writeable location, e.g., AppImage
+ start_dir = os.path.expanduser("~/ZeroNet")
+ else:
+ start_dir = "."
+
+ return start_dir
+
+ # Create command line arguments
+ def createArguments(self):
+ from Crypt import CryptHash
+ access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
+ trackers = [
+ "http://open.acgnxtracker.com:80/announce", # DE
+ "http://tracker.bt4g.com:2095/announce", # Cloudflare
+ "http://tracker.files.fm:6969/announce",
+ "http://t.publictracker.xyz:6969/announce",
+ "https://tracker.lilithraws.cf:443/announce",
+ "https://tracker.babico.name.tr:443/announce",
+ ]
+ # Platform specific
+ if sys.platform.startswith("win"):
+ coffeescript = "type %s | tools\\coffee\\coffee.cmd"
+ else:
+ coffeescript = None
+
+ try:
+ language, enc = locale.getdefaultlocale()
+ language = language.lower().replace("_", "-")
+ if language not in ["pt-br", "zh-tw"]:
+ language = language.split("-")[0]
+ except Exception:
+ language = "en"
+
+ use_openssl = True
+
+ if repr(1483108852.565) != "1483108852.565": # Fix for weird Android issue
+ fix_float_decimals = True
+ else:
+ fix_float_decimals = False
+
+ config_file = self.start_dir + "/zeronet.conf"
+ data_dir = self.start_dir + "/data"
+ log_dir = self.start_dir + "/log"
+
+ ip_local = ["127.0.0.1", "::1"]
+
+ # Main
+ action = self.subparsers.add_parser("main", help='Start UiServer and FileServer (default)')
+
+ # SiteCreate
+ action = self.subparsers.add_parser("siteCreate", help='Create a new site')
+ action.register('type', 'bool', self.strToBool)
+ action.add_argument('--use_master_seed', help="Allow created site's private key to be recovered using the master seed in users.json (default: True)", type="bool", choices=[True, False], default=True)
+
+ # SiteNeedFile
+ action = self.subparsers.add_parser("siteNeedFile", help='Get a file from site')
+ action.add_argument('address', help='Site address')
+ action.add_argument('inner_path', help='File inner path')
+
+ # SiteDownload
+ action = self.subparsers.add_parser("siteDownload", help='Download a new site')
+ action.add_argument('address', help='Site address')
+
+ # SiteSign
+ action = self.subparsers.add_parser("siteSign", help='Update and sign content.json: address [privatekey]')
+ action.add_argument('address', help='Site to sign')
+ action.add_argument('privatekey', help='Private key (default: ask on execute)', nargs='?')
+ action.add_argument('--inner_path', help='File you want to sign (default: content.json)',
+ default="content.json", metavar="inner_path")
+ action.add_argument('--remove_missing_optional', help='Remove optional files that is not present in the directory', action='store_true')
+ action.add_argument('--publish', help='Publish site after the signing', action='store_true')
+
+ # SitePublish
+ action = self.subparsers.add_parser("sitePublish", help='Publish site to other peers: address')
+ action.add_argument('address', help='Site to publish')
+ action.add_argument('peer_ip', help='Peer ip to publish (default: random peers ip from tracker)',
+ default=None, nargs='?')
+ action.add_argument('peer_port', help='Peer port to publish (default: random peer port from tracker)',
+ default=15441, nargs='?')
+ action.add_argument('--inner_path', help='Content.json you want to publish (default: content.json)',
+ default="content.json", metavar="inner_path")
+
+ # SiteVerify
+ action = self.subparsers.add_parser("siteVerify", help='Verify site files using sha512: address')
+ action.add_argument('address', help='Site to verify')
+
+ # SiteCmd
+ action = self.subparsers.add_parser("siteCmd", help='Execute a ZeroFrame API command on a site')
+ action.add_argument('address', help='Site address')
+ action.add_argument('cmd', help='API command name')
+ action.add_argument('parameters', help='Parameters of the command', nargs='?')
+
+ # dbRebuild
+ action = self.subparsers.add_parser("dbRebuild", help='Rebuild site database cache')
+ action.add_argument('address', help='Site to rebuild')
+
+ # dbQuery
+ action = self.subparsers.add_parser("dbQuery", help='Query site sql cache')
+ action.add_argument('address', help='Site to query')
+ action.add_argument('query', help='Sql query')
+
+ # PeerPing
+ action = self.subparsers.add_parser("peerPing", help='Send Ping command to peer')
+ action.add_argument('peer_ip', help='Peer ip')
+ action.add_argument('peer_port', help='Peer port', nargs='?')
+
+ # PeerGetFile
+ action = self.subparsers.add_parser("peerGetFile", help='Request and print a file content from peer')
+ action.add_argument('peer_ip', help='Peer ip')
+ action.add_argument('peer_port', help='Peer port')
+ action.add_argument('site', help='Site address')
+ action.add_argument('filename', help='File name to request')
+ action.add_argument('--benchmark', help='Request file 10x then displays the total time', action='store_true')
+
+ # PeerCmd
+ action = self.subparsers.add_parser("peerCmd", help='Request and print a file content from peer')
+ action.add_argument('peer_ip', help='Peer ip')
+ action.add_argument('peer_port', help='Peer port')
+ action.add_argument('cmd', help='Command to execute')
+ action.add_argument('parameters', help='Parameters to command', nargs='?')
+
+ # CryptSign
+ action = self.subparsers.add_parser("cryptSign", help='Sign message using Bitcoin private key')
+ action.add_argument('message', help='Message to sign')
+ action.add_argument('privatekey', help='Private key')
+
+ # Crypt Verify
+ action = self.subparsers.add_parser("cryptVerify", help='Verify message using Bitcoin public address')
+ action.add_argument('message', help='Message to verify')
+ action.add_argument('sign', help='Signiture for message')
+ action.add_argument('address', help='Signer\'s address')
+
+ # Crypt GetPrivatekey
+ action = self.subparsers.add_parser("cryptGetPrivatekey", help='Generate a privatekey from master seed')
+ action.add_argument('master_seed', help='Source master seed')
+ action.add_argument('site_address_index', help='Site address index', type=int)
+
+ action = self.subparsers.add_parser("getConfig", help='Return json-encoded info')
+ action = self.subparsers.add_parser("testConnection", help='Testing')
+ action = self.subparsers.add_parser("testAnnounce", help='Testing')
+
+ self.test_parser = self.subparsers.add_parser("test", help='Run a test')
+ self.test_parser.add_argument('test_name', help='Test name', nargs="?")
+ # self.test_parser.add_argument('--benchmark', help='Run the tests multiple times to measure the performance', action='store_true')
+
+ # Config parameters
+ self.parser.add_argument('--verbose', help='More detailed logging', action='store_true')
+ self.parser.add_argument('--debug', help='Debug mode', action='store_true')
+ self.parser.add_argument('--silent', help='Only log errors to terminal output', action='store_true')
+ self.parser.add_argument('--debug_socket', help='Debug socket connections', action='store_true')
+ self.parser.add_argument('--merge_media', help='Merge all.js and all.css', action='store_true')
+
+ self.parser.add_argument('--batch', help="Batch mode (No interactive input for commands)", action='store_true')
+
+ self.parser.add_argument('--start_dir', help='Path of working dir for variable content (data, log, .conf)', default=self.start_dir, metavar="path")
+ self.parser.add_argument('--config_file', help='Path of config file', default=config_file, metavar="path")
+ self.parser.add_argument('--data_dir', help='Path of data directory', default=data_dir, metavar="path")
+
+ self.parser.add_argument('--console_log_level', help='Level of logging to console', default="default", choices=["default", "DEBUG", "INFO", "ERROR", "off"])
+
+ self.parser.add_argument('--log_dir', help='Path of logging directory', default=log_dir, metavar="path")
+ self.parser.add_argument('--log_level', help='Level of logging to file', default="DEBUG", choices=["DEBUG", "INFO", "ERROR", "off"])
+ self.parser.add_argument('--log_rotate', help='Log rotate interval', default="daily", choices=["hourly", "daily", "weekly", "off"])
+ self.parser.add_argument('--log_rotate_backup_count', help='Log rotate backup count', default=5, type=int)
+
+ self.parser.add_argument('--language', help='Web interface language', default=language, metavar='language')
+ self.parser.add_argument('--ui_ip', help='Web interface bind address', default="127.0.0.1", metavar='ip')
+ self.parser.add_argument('--ui_port', help='Web interface bind port', default=43110, type=int, metavar='port')
+ self.parser.add_argument('--ui_restrict', help='Restrict web access', default=False, metavar='ip', nargs='*')
+ self.parser.add_argument('--ui_host', help='Allow access using this hosts', metavar='host', nargs='*')
+ self.parser.add_argument('--ui_trans_proxy', help='Allow access using a transparent proxy', action='store_true')
+
+ self.parser.add_argument('--open_browser', help='Open homepage in web browser automatically',
+ nargs='?', const="default_browser", metavar='browser_name')
+ self.parser.add_argument('--homepage', help='Web interface Homepage', default='1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d',
+ metavar='address')
+ self.parser.add_argument('--updatesite', help='Source code update site', default='1Update8crprmciJHwp2WXqkx2c4iYp18',
+ metavar='address')
+ self.parser.add_argument('--access_key', help='Plugin access key default: Random key generated at startup', default=access_key_default, metavar='key')
+ self.parser.add_argument('--dist_type', help='Type of installed distribution', default='source')
+
+ self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=25, type=int, metavar='limit')
+ self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
+ self.parser.add_argument('--connected_limit', help='Max connected peer per site', default=8, type=int, metavar='connected_limit')
+ self.parser.add_argument('--global_connected_limit', help='Max connections', default=512, type=int, metavar='global_connected_limit')
+ self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
+
+ self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
+ self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
+ self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')
+ self.parser.add_argument('--fileserver_ip_type', help='FileServer ip type', default="dual", choices=["ipv4", "ipv6", "dual"])
+ self.parser.add_argument('--ip_local', help='My local ips', default=ip_local, type=int, metavar='ip', nargs='*')
+ self.parser.add_argument('--ip_external', help='Set reported external ip (tested on start if None)', metavar='ip', nargs='*')
+ self.parser.add_argument('--offline', help='Disable network communication', action='store_true')
+
+ self.parser.add_argument('--disable_udp', help='Disable UDP connections', action='store_true')
+ self.parser.add_argument('--proxy', help='Socks proxy address', metavar='ip:port')
+ self.parser.add_argument('--bind', help='Bind outgoing sockets to this address', metavar='ip')
+ self.parser.add_argument('--trackers', help='Bootstraping torrent trackers', default=trackers, metavar='protocol://address', nargs='*')
+ self.parser.add_argument('--trackers_file', help='Load torrent trackers dynamically from a file', metavar='path', nargs='*')
+ self.parser.add_argument('--trackers_proxy', help='Force use proxy to connect to trackers (disable, tor, ip:port)', default="disable")
+ self.parser.add_argument('--use_libsecp256k1', help='Use Libsecp256k1 liblary for speedup', type='bool', choices=[True, False], default=True)
+ self.parser.add_argument('--use_openssl', help='Use OpenSSL liblary for speedup', type='bool', choices=[True, False], default=True)
+ self.parser.add_argument('--openssl_lib_file', help='Path for OpenSSL library file (default: detect)', default=argparse.SUPPRESS, metavar="path")
+ self.parser.add_argument('--openssl_bin_file', help='Path for OpenSSL binary file (default: detect)', default=argparse.SUPPRESS, metavar="path")
+ self.parser.add_argument('--disable_db', help='Disable database updating', action='store_true')
+ self.parser.add_argument('--disable_encryption', help='Disable connection encryption', action='store_true')
+ self.parser.add_argument('--force_encryption', help="Enforce encryption to all peer connections", action='store_true')
+ self.parser.add_argument('--disable_sslcompression', help='Disable SSL compression to save memory',
+ type='bool', choices=[True, False], default=True)
+ self.parser.add_argument('--keep_ssl_cert', help='Disable new SSL cert generation on startup', action='store_true')
+ self.parser.add_argument('--max_files_opened', help='Change maximum opened files allowed by OS to this value on startup',
+ default=2048, type=int, metavar='limit')
+ self.parser.add_argument('--stack_size', help='Change thread stack size', default=None, type=int, metavar='thread_stack_size')
+ self.parser.add_argument('--use_tempfiles', help='Use temporary files when downloading (experimental)',
+ type='bool', choices=[True, False], default=False)
+ self.parser.add_argument('--stream_downloads', help='Stream download directly to files (experimental)',
+ type='bool', choices=[True, False], default=False)
+ self.parser.add_argument("--msgpack_purepython", help='Use less memory, but a bit more CPU power',
+ type='bool', choices=[True, False], default=False)
+ self.parser.add_argument("--fix_float_decimals", help='Fix content.json modification date float precision on verification',
+ type='bool', choices=[True, False], default=fix_float_decimals)
+ self.parser.add_argument("--db_mode", choices=["speed", "security"], default="speed")
+
+ self.parser.add_argument('--threads_fs_read', help='Number of threads for file read operations', default=1, type=int)
+ self.parser.add_argument('--threads_fs_write', help='Number of threads for file write operations', default=1, type=int)
+ self.parser.add_argument('--threads_crypt', help='Number of threads for cryptographic operations', default=2, type=int)
+ self.parser.add_argument('--threads_db', help='Number of threads for database operations', default=1, type=int)
+
+ self.parser.add_argument("--download_optional", choices=["manual", "auto"], default="manual")
+
+ self.parser.add_argument('--coffeescript_compiler', help='Coffeescript compiler for developing', default=coffeescript,
+ metavar='executable_path')
+
+ self.parser.add_argument('--tor', help='enable: Use only for Tor peers, always: Use Tor for every connection', choices=["disable", "enable", "always"], default='enable')
+ self.parser.add_argument('--tor_controller', help='Tor controller address', metavar='ip:port', default='127.0.0.1:9051')
+ self.parser.add_argument('--tor_proxy', help='Tor proxy address', metavar='ip:port', default='127.0.0.1:9050')
+ self.parser.add_argument('--tor_password', help='Tor controller password', metavar='password')
+ self.parser.add_argument('--tor_use_bridges', help='Use obfuscated bridge relays to avoid Tor block', action='store_true')
+ self.parser.add_argument('--tor_hs_limit', help='Maximum number of hidden services in Tor always mode', metavar='limit', type=int, default=10)
+ self.parser.add_argument('--tor_hs_port', help='Hidden service port in Tor always mode', metavar='limit', type=int, default=15441)
+
+ self.parser.add_argument('--version', action='version', version='ZeroNet %s r%s' % (self.version, self.rev))
+ self.parser.add_argument('--end', help='Stop multi value argument parsing', action='store_true')
+
+ return self.parser
+
+ def loadTrackersFile(self):
+ if not self.trackers_file:
+ self.trackers_file = ["trackers.txt", "{data_dir}/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d/trackers.txt"]
+ self.trackers = self.arguments.trackers[:]
+
+ for trackers_file in self.trackers_file:
+ try:
+ if trackers_file.startswith("/"): # Absolute
+ trackers_file_path = trackers_file
+ elif trackers_file.startswith("{data_dir}"): # Relative to data_dir
+ trackers_file_path = trackers_file.replace("{data_dir}", self.data_dir)
+ else: # Relative to zeronet.py
+ trackers_file_path = self.start_dir + "/" + trackers_file
+
+ if not os.path.exists(trackers_file_path):
+ continue
+
+ for line in open(trackers_file_path):
+ tracker = line.strip()
+ if "://" in tracker and tracker not in self.trackers:
+ self.trackers.append(tracker)
+ except Exception as err:
+ print("Error loading trackers file: %s" % err)
+
+ # Find arguments specified for current action
+ def getActionArguments(self):
+ back = {}
+ arguments = self.parser._subparsers._group_actions[0].choices[self.action]._actions[1:] # First is --version
+ for argument in arguments:
+ back[argument.dest] = getattr(self, argument.dest)
+ return back
+
+ # Try to find action from argv
+ def getAction(self, argv):
+ actions = [list(action.choices.keys()) for action in self.parser._actions if action.dest == "action"][0] # Valid actions
+ found_action = False
+ for action in actions: # See if any in argv
+ if action in argv:
+ found_action = action
+ break
+ return found_action
+
+ # Move plugin parameters to end of argument list
+ def moveUnknownToEnd(self, argv, default_action):
+ valid_actions = sum([action.option_strings for action in self.parser._actions], [])
+ valid_parameters = []
+ plugin_parameters = []
+ plugin = False
+ for arg in argv:
+ if arg.startswith("--"):
+ if arg not in valid_actions:
+ plugin = True
+ else:
+ plugin = False
+ elif arg == default_action:
+ plugin = False
+
+ if plugin:
+ plugin_parameters.append(arg)
+ else:
+ valid_parameters.append(arg)
+ return valid_parameters + plugin_parameters
+
+ def getParser(self, argv):
+ action = self.getAction(argv)
+ if not action:
+ return self.parser
+ else:
+ return self.subparsers.choices[action]
+
+ # Parse arguments from config file and command line
+ def parse(self, silent=False, parse_config=True):
+ argv = self.argv[:] # Copy command line arguments
+ current_parser = self.getParser(argv)
+ if silent: # Don't display messages or quit on unknown parameter
+ original_print_message = self.parser._print_message
+ original_exit = self.parser.exit
+
+ def silencer(parser, function_name):
+ parser.exited = True
+ return None
+ current_parser.exited = False
+ current_parser._print_message = lambda *args, **kwargs: silencer(current_parser, "_print_message")
+ current_parser.exit = lambda *args, **kwargs: silencer(current_parser, "exit")
+
+ self.parseCommandline(argv, silent) # Parse argv
+ self.setAttributes()
+ if parse_config:
+ argv = self.parseConfig(argv) # Add arguments from config file
+
+ self.parseCommandline(argv, silent) # Parse argv
+ self.setAttributes()
+
+ if not silent:
+ if self.fileserver_ip != "*" and self.fileserver_ip not in self.ip_local:
+ self.ip_local.append(self.fileserver_ip)
+
+ if silent: # Restore original functions
+ if current_parser.exited and self.action == "main": # Argument parsing halted, don't start ZeroNet with main action
+ self.action = None
+ current_parser._print_message = original_print_message
+ current_parser.exit = original_exit
+
+ self.loadTrackersFile()
+
+ # Parse command line arguments
+ def parseCommandline(self, argv, silent=False):
+ # Find out if action is specificed on start
+ action = self.getAction(argv)
+ if not action:
+ argv.append("--end")
+ argv.append("main")
+ action = "main"
+ argv = self.moveUnknownToEnd(argv, action)
+ if silent:
+ res = self.parser.parse_known_args(argv[1:])
+ if res:
+ self.arguments = res[0]
+ else:
+ self.arguments = {}
+ else:
+ self.arguments = self.parser.parse_args(argv[1:])
+
+ # Parse config file
+ def parseConfig(self, argv):
+ # Find config file path from parameters
+ if "--config_file" in argv:
+ self.config_file = argv[argv.index("--config_file") + 1]
+ # Load config file
+ if os.path.isfile(self.config_file):
+ config = configparser.RawConfigParser(allow_no_value=True, strict=False)
+ config.read(self.config_file)
+ for section in config.sections():
+ for key, val in config.items(section):
+ if val == "True":
+ val = None
+ if section != "global": # If not global prefix key with section
+ key = section + "_" + key
+
+ if key == "open_browser": # Prefer config file value over cli argument
+ while "--%s" % key in argv:
+ pos = argv.index("--open_browser")
+ del argv[pos:pos + 2]
+
+ argv_extend = ["--%s" % key]
+ if val:
+ for line in val.strip().split("\n"): # Allow multi-line values
+ argv_extend.append(line)
+ if "\n" in val:
+ argv_extend.append("--end")
+
+ argv = argv[:1] + argv_extend + argv[1:]
+ return argv
+
+ # Return command line value of given argument
+ def getCmdlineValue(self, key):
+ if key not in self.argv:
+ return None
+ argv_index = self.argv.index(key)
+ if argv_index == len(self.argv) - 1: # last arg, test not specified
+ return None
+
+ return self.argv[argv_index + 1]
+
+ # Expose arguments as class attributes
+ def setAttributes(self):
+ # Set attributes from arguments
+ if self.arguments:
+ args = vars(self.arguments)
+ for key, val in args.items():
+ if type(val) is list:
+ val = val[:]
+ if key in ("data_dir", "log_dir", "start_dir", "openssl_bin_file", "openssl_lib_file"):
+ if val:
+ val = val.replace("\\", "/")
+ setattr(self, key, val)
+
+ def loadPlugins(self):
+ from Plugin import PluginManager
+
+ @PluginManager.acceptPlugins
+ class ConfigPlugin(object):
+ def __init__(self, config):
+ self.argv = config.argv
+ self.parser = config.parser
+ self.subparsers = config.subparsers
+ self.test_parser = config.test_parser
+ self.getCmdlineValue = config.getCmdlineValue
+ self.createArguments()
+
+ def createArguments(self):
+ pass
+
+ ConfigPlugin(self)
+
+ def saveValue(self, key, value):
+ if not os.path.isfile(self.config_file):
+ content = ""
+ else:
+ content = open(self.config_file).read()
+ lines = content.splitlines()
+
+ global_line_i = None
+ key_line_i = None
+ i = 0
+ for line in lines:
+ if line.strip() == "[global]":
+ global_line_i = i
+ if line.startswith(key + " =") or line == key:
+ key_line_i = i
+ i += 1
+
+ if key_line_i and len(lines) > key_line_i + 1:
+ while True: # Delete previous multiline values
+ is_value_line = lines[key_line_i + 1].startswith(" ") or lines[key_line_i + 1].startswith("\t")
+ if not is_value_line:
+ break
+ del lines[key_line_i + 1]
+
+ if value is None: # Delete line
+ if key_line_i:
+ del lines[key_line_i]
+
+ else: # Add / update
+ if type(value) is list:
+ value_lines = [""] + [str(line).replace("\n", "").replace("\r", "") for line in value]
+ else:
+ value_lines = [str(value).replace("\n", "").replace("\r", "")]
+ new_line = "%s = %s" % (key, "\n ".join(value_lines))
+ if key_line_i: # Already in the config, change the line
+ lines[key_line_i] = new_line
+ elif global_line_i is None: # No global section yet, append to end of file
+ lines.append("[global]")
+ lines.append(new_line)
+ else: # Has global section, append the line after it
+ lines.insert(global_line_i + 1, new_line)
+
+ open(self.config_file, "w").write("\n".join(lines))
+
+ def getServerInfo(self):
+ from Plugin import PluginManager
+ import main
+
+ info = {
+ "platform": sys.platform,
+ "fileserver_ip": self.fileserver_ip,
+ "fileserver_port": self.fileserver_port,
+ "ui_ip": self.ui_ip,
+ "ui_port": self.ui_port,
+ "version": self.version,
+ "rev": self.rev,
+ "language": self.language,
+ "debug": self.debug,
+ "plugins": PluginManager.plugin_manager.plugin_names,
+
+ "log_dir": os.path.abspath(self.log_dir),
+ "data_dir": os.path.abspath(self.data_dir),
+ "src_dir": os.path.dirname(os.path.abspath(__file__))
+ }
+
+ try:
+ info["ip_external"] = main.file_server.port_opened
+ info["tor_enabled"] = main.file_server.tor_manager.enabled
+ info["tor_status"] = main.file_server.tor_manager.status
+ except Exception:
+ pass
+
+ return info
+
+ def initConsoleLogger(self):
+ if self.action == "main":
+ format = '[%(asctime)s] %(name)s %(message)s'
+ else:
+ format = '%(name)s %(message)s'
+
+ if self.console_log_level == "default":
+ if self.silent:
+ level = logging.ERROR
+ elif self.debug:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+ else:
+ level = logging.getLevelName(self.console_log_level)
+
+ console_logger = logging.StreamHandler()
+ console_logger.setFormatter(logging.Formatter(format, "%H:%M:%S"))
+ console_logger.setLevel(level)
+ logging.getLogger('').addHandler(console_logger)
+
+ def initFileLogger(self):
+ if self.action == "main":
+ log_file_path = "%s/debug.log" % self.log_dir
+ else:
+ log_file_path = "%s/cmd.log" % self.log_dir
+
+ if self.log_rotate == "off":
+ file_logger = logging.FileHandler(log_file_path, "w", "utf-8")
+ else:
+ when_names = {"weekly": "w", "daily": "d", "hourly": "h"}
+ file_logger = logging.handlers.TimedRotatingFileHandler(
+ log_file_path, when=when_names[self.log_rotate], interval=1, backupCount=self.log_rotate_backup_count,
+ encoding="utf8"
+ )
+
+ if os.path.isfile(log_file_path):
+ file_logger.doRollover() # Always start with empty log file
+ file_logger.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)-8s %(name)s %(message)s'))
+ file_logger.setLevel(logging.getLevelName(self.log_level))
+ logging.getLogger('').setLevel(logging.getLevelName(self.log_level))
+ logging.getLogger('').addHandler(file_logger)
+
+ def initLogging(self, console_logging=None, file_logging=None):
+ if console_logging == None:
+ console_logging = self.console_log_level != "off"
+
+ if file_logging == None:
+ file_logging = self.log_level != "off"
+
+ # Create necessary files and dirs
+ if not os.path.isdir(self.log_dir):
+ os.mkdir(self.log_dir)
+ try:
+ os.chmod(self.log_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+ except Exception as err:
+ print("Can't change permission of %s: %s" % (self.log_dir, err))
+
+ # Make warning hidden from console
+ logging.WARNING = 15 # Don't display warnings if not in debug mode
+ logging.addLevelName(15, "WARNING")
+
+ logging.getLogger('').name = "-" # Remove root prefix
+
+ self.error_logger = ErrorLogHandler()
+ self.error_logger.setLevel(logging.getLevelName("ERROR"))
+ logging.getLogger('').addHandler(self.error_logger)
+
+ if console_logging:
+ self.initConsoleLogger()
+ if file_logging:
+ self.initFileLogger()
+
+
+class ErrorLogHandler(logging.StreamHandler):
+ def __init__(self):
+ self.lines = []
+ return super().__init__()
+
+ def emit(self, record):
+ self.lines.append([time.time(), record.levelname, self.format(record)])
+
+ def onNewRecord(self, record):
+ pass
+
+
+config = Config(sys.argv)
diff --git a/src/Connection/Connection.py b/src/Connection/Connection.py
new file mode 100644
index 00000000..22bcf29c
--- /dev/null
+++ b/src/Connection/Connection.py
@@ -0,0 +1,635 @@
+import socket
+import time
+
+import gevent
+try:
+ from gevent.coros import RLock
+except:
+ from gevent.lock import RLock
+
+from Config import config
+from Debug import Debug
+from util import Msgpack
+from Crypt import CryptConnection
+from util import helper
+
+
+class Connection(object):
+ __slots__ = (
+ "sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
+ "handshake", "crypt", "connected", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
+ "last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
+ "last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
+ )
+
+ def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
+ self.sock = sock
+ self.cert_pin = None
+ if "#" in ip:
+ ip, self.cert_pin = ip.split("#")
+ self.target_onion = target_onion # Requested onion adress
+ self.id = server.last_connection_id
+ server.last_connection_id += 1
+ self.protocol = "?"
+ self.type = "?"
+ self.ip_type = "?"
+ self.port = int(port)
+ self.setIp(ip)
+
+ if helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
+ self.is_private_ip = True
+ else:
+ self.is_private_ip = False
+ self.is_tracker_connection = is_tracker_connection
+
+ self.server = server
+ self.unpacker = None # Stream incoming socket messages here
+ self.unpacker_bytes = 0 # How many bytes the unpacker received
+ self.req_id = 0 # Last request id
+ self.handshake = {} # Handshake info got from peer
+ self.crypt = None # Connection encryption method
+ self.sock_wrapped = False # Socket wrapped to encryption
+
+ self.connected = False
+ self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
+ self.closed = False
+
+ # Stats
+ self.start_time = time.time()
+ self.handshake_time = 0
+ self.last_recv_time = 0
+ self.last_message_time = 0
+ self.last_send_time = 0
+ self.last_sent_time = 0
+ self.incomplete_buff_recv = 0
+ self.bytes_recv = 0
+ self.bytes_sent = 0
+ self.last_ping_delay = None
+ self.last_req_time = 0
+ self.last_cmd_sent = None
+ self.last_cmd_recv = None
+ self.bad_actions = 0
+ self.sites = 0
+ self.cpu_time = 0.0
+ self.send_lock = RLock()
+
+ self.name = None
+ self.updateName()
+
+ self.waiting_requests = {} # Waiting sent requests
+ self.waiting_streams = {} # Waiting response file streams
+
+ def setIp(self, ip):
+ self.ip = ip
+ self.ip_type = helper.getIpType(ip)
+ self.updateName()
+
+ def createSocket(self):
+ if helper.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
+ # Create IPv6 connection as IPv4 when using proxy
+ return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ def updateName(self):
+ self.name = "Conn#%2s %-12s [%s]" % (self.id, self.ip, self.protocol)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<%s>" % self.__str__()
+
+ def log(self, text):
+ self.server.log.debug("%s > %s" % (self.name, text))
+
+ def getValidSites(self):
+ return [key for key, val in self.server.tor_manager.site_onions.items() if val == self.target_onion]
+
+ def badAction(self, weight=1):
+ self.bad_actions += weight
+ if self.bad_actions > 40:
+ self.close("Too many bad actions")
+ elif self.bad_actions > 20:
+ time.sleep(5)
+
+ def goodAction(self):
+ self.bad_actions = 0
+
+ # Open connection to peer and wait for handshake
+ def connect(self):
+ self.type = "out"
+ if self.ip_type == "onion":
+ if not self.server.tor_manager or not self.server.tor_manager.enabled:
+ raise Exception("Can't connect to onion addresses, no Tor controller present")
+ self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
+ elif config.tor == "always" and helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
+ raise Exception("Can't connect to local IPs in Tor: always mode")
+ elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
+ if config.trackers_proxy == "tor":
+ self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
+ else:
+ import socks
+ self.sock = socks.socksocket()
+ proxy_ip, proxy_port = config.trackers_proxy.split(":")
+ self.sock.set_proxy(socks.PROXY_TYPE_SOCKS5, proxy_ip, int(proxy_port))
+ else:
+ self.sock = self.createSocket()
+
+ if "TCP_NODELAY" in dir(socket):
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ timeout_before = self.sock.gettimeout()
+ self.sock.settimeout(30)
+ if self.ip_type == "ipv6" and not hasattr(self.sock, "proxy"):
+ sock_address = (self.ip, self.port, 1, 1)
+ else:
+ sock_address = (self.ip, self.port)
+
+ self.sock.connect(sock_address)
+
+ # Implicit SSL
+ should_encrypt = not self.ip_type == "onion" and self.ip not in self.server.broken_ssl_ips and self.ip not in config.ip_local
+ if self.cert_pin:
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", cert_pin=self.cert_pin)
+ self.sock.do_handshake()
+ self.crypt = "tls-rsa"
+ self.sock_wrapped = True
+ elif should_encrypt and "tls-rsa" in CryptConnection.manager.crypt_supported:
+ try:
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa")
+ self.sock.do_handshake()
+ self.crypt = "tls-rsa"
+ self.sock_wrapped = True
+ except Exception as err:
+ if not config.force_encryption:
+ self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
+ self.server.broken_ssl_ips[self.ip] = True
+ self.sock.close()
+ self.crypt = None
+ self.sock = self.createSocket()
+ self.sock.settimeout(30)
+ self.sock.connect(sock_address)
+
+ # Detect protocol
+ self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
+ event_connected = self.event_connected
+ gevent.spawn(self.messageLoop)
+ connect_res = event_connected.get() # Wait for handshake
+ self.sock.settimeout(timeout_before)
+ return connect_res
+
+ # Handle incoming connection
+ def handleIncomingConnection(self, sock):
+ self.log("Incoming connection...")
+
+ if "TCP_NODELAY" in dir(socket):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ self.type = "in"
+ if self.ip not in config.ip_local: # Clearnet: Check implicit SSL
+ try:
+ first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
+ if first_byte == b"\x16":
+ self.log("Crypt in connection using implicit SSL")
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", True)
+ self.sock_wrapped = True
+ self.crypt = "tls-rsa"
+ except Exception as err:
+ self.log("Socket peek error: %s" % Debug.formatException(err))
+ self.messageLoop()
+
+ def getMsgpackUnpacker(self):
+ if self.handshake and self.handshake.get("use_bin_type"):
+ return Msgpack.getUnpacker(fallback=True, decode=False)
+ else: # Backward compatibility for <0.7.0
+ return Msgpack.getUnpacker(fallback=True, decode=True)
+
+ # Message loop for connection
+ def messageLoop(self):
+ if not self.sock:
+ self.log("Socket error: No socket found")
+ return False
+ self.protocol = "v2"
+ self.updateName()
+ self.connected = True
+ buff_len = 0
+ req_len = 0
+ self.unpacker_bytes = 0
+
+ try:
+ while not self.closed:
+ buff = self.sock.recv(64 * 1024)
+ if not buff:
+ break # Connection closed
+ buff_len = len(buff)
+
+ # Statistics
+ self.last_recv_time = time.time()
+ self.incomplete_buff_recv += 1
+ self.bytes_recv += buff_len
+ self.server.bytes_recv += buff_len
+ req_len += buff_len
+
+ if not self.unpacker:
+ self.unpacker = self.getMsgpackUnpacker()
+ self.unpacker_bytes = 0
+
+ self.unpacker.feed(buff)
+ self.unpacker_bytes += buff_len
+
+ while True:
+ try:
+ message = next(self.unpacker)
+ except StopIteration:
+ break
+ if not type(message) is dict:
+ if config.debug_socket:
+ self.log("Invalid message type: %s, content: %r, buffer: %r" % (type(message), message, buff[0:16]))
+ raise Exception("Invalid message type: %s" % type(message))
+
+ # Stats
+ self.incomplete_buff_recv = 0
+ stat_key = message.get("cmd", "unknown")
+ if stat_key == "response" and "to" in message:
+ cmd_sent = self.waiting_requests.get(message["to"], {"cmd": "unknown"})["cmd"]
+ stat_key = "response: %s" % cmd_sent
+ if stat_key == "update":
+ stat_key = "update: %s" % message["params"]["site"]
+ self.server.stat_recv[stat_key]["bytes"] += req_len
+ self.server.stat_recv[stat_key]["num"] += 1
+ if "stream_bytes" in message:
+ self.server.stat_recv[stat_key]["bytes"] += message["stream_bytes"]
+ req_len = 0
+
+ # Handle message
+ if "stream_bytes" in message:
+ buff_left = self.handleStream(message, buff)
+ self.unpacker = self.getMsgpackUnpacker()
+ self.unpacker.feed(buff_left)
+ self.unpacker_bytes = len(buff_left)
+ if config.debug_socket:
+ self.log("Start new unpacker with buff_left: %r" % buff_left)
+ else:
+ self.handleMessage(message)
+
+ message = None
+ except Exception as err:
+ if not self.closed:
+ self.log("Socket error: %s" % Debug.formatException(err))
+ self.server.stat_recv["error: %s" % err]["bytes"] += req_len
+ self.server.stat_recv["error: %s" % err]["num"] += 1
+ self.close("MessageLoop ended (closed: %s)" % self.closed) # MessageLoop ended, close connection
+
+ def getUnpackerUnprocessedBytesNum(self):
+ if "tell" in dir(self.unpacker):
+ bytes_num = self.unpacker_bytes - self.unpacker.tell()
+ else:
+ bytes_num = self.unpacker._fb_buf_n - self.unpacker._fb_buf_o
+ return bytes_num
+
+ # Stream socket directly to a file
+ def handleStream(self, message, buff):
+ stream_bytes_left = message["stream_bytes"]
+ file = self.waiting_streams[message["to"]]
+
+ unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
+
+ if unprocessed_bytes_num: # Found stream bytes in unpacker
+ unpacker_stream_bytes = min(unprocessed_bytes_num, stream_bytes_left)
+ buff_stream_start = len(buff) - unprocessed_bytes_num
+ file.write(buff[buff_stream_start:buff_stream_start + unpacker_stream_bytes])
+ stream_bytes_left -= unpacker_stream_bytes
+ else:
+ unpacker_stream_bytes = 0
+
+ if config.debug_socket:
+ self.log(
+ "Starting stream %s: %s bytes (%s from unpacker, buff size: %s, unprocessed: %s)" %
+ (message["to"], message["stream_bytes"], unpacker_stream_bytes, len(buff), unprocessed_bytes_num)
+ )
+
+ try:
+ while 1:
+ if stream_bytes_left <= 0:
+ break
+ stream_buff = self.sock.recv(min(64 * 1024, stream_bytes_left))
+ if not stream_buff:
+ break
+ buff_len = len(stream_buff)
+ stream_bytes_left -= buff_len
+ file.write(stream_buff)
+
+ # Statistics
+ self.last_recv_time = time.time()
+ self.incomplete_buff_recv += 1
+ self.bytes_recv += buff_len
+ self.server.bytes_recv += buff_len
+ except Exception as err:
+ self.log("Stream read error: %s" % Debug.formatException(err))
+
+ if config.debug_socket:
+ self.log("End stream %s, file pos: %s" % (message["to"], file.tell()))
+
+ self.incomplete_buff_recv = 0
+ self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
+ del self.waiting_streams[message["to"]]
+ del self.waiting_requests[message["to"]]
+
+ if unpacker_stream_bytes:
+ return buff[buff_stream_start + unpacker_stream_bytes:]
+ else:
+ return b""
+
+ # My handshake info
+ def getHandshakeInfo(self):
+ # No TLS for onion connections
+ if self.ip_type == "onion":
+ crypt_supported = []
+ elif self.ip in self.server.broken_ssl_ips:
+ crypt_supported = []
+ else:
+ crypt_supported = CryptConnection.manager.crypt_supported
+ # No peer id for onion connections
+ if self.ip_type == "onion" or self.ip in config.ip_local:
+ peer_id = ""
+ else:
+ peer_id = self.server.peer_id
+ # Setup peer lock from requested onion address
+ if self.handshake and self.handshake.get("target_ip", "").endswith(".onion") and self.server.tor_manager.start_onions:
+ self.target_onion = self.handshake.get("target_ip").replace(".onion", "") # My onion address
+ if not self.server.tor_manager.site_onions.values():
+ self.server.log.warning("Unknown target onion address: %s" % self.target_onion)
+
+ handshake = {
+ "version": config.version,
+ "protocol": "v2",
+ "use_bin_type": True,
+ "peer_id": peer_id,
+ "fileserver_port": self.server.port,
+ "port_opened": self.server.port_opened.get(self.ip_type, None),
+ "target_ip": self.ip,
+ "rev": config.rev,
+ "crypt_supported": crypt_supported,
+ "crypt": self.crypt,
+ "time": int(time.time())
+ }
+ if self.target_onion:
+ handshake["onion"] = self.target_onion
+ elif self.ip_type == "onion":
+ handshake["onion"] = self.server.tor_manager.getOnion("global")
+
+ if self.is_tracker_connection:
+ handshake["tracker_connection"] = True
+
+ if config.debug_socket:
+ self.log("My Handshake: %s" % handshake)
+
+ return handshake
+
+ def setHandshake(self, handshake):
+ if config.debug_socket:
+ self.log("Remote Handshake: %s" % handshake)
+
+ if handshake.get("peer_id") == self.server.peer_id and not handshake.get("tracker_connection") and not self.is_tracker_connection:
+ self.close("Same peer id, can't connect to myself")
+ self.server.peer_blacklist.append((handshake["target_ip"], handshake["fileserver_port"]))
+ return False
+
+ self.handshake = handshake
+ if handshake.get("port_opened", None) is False and "onion" not in handshake and not self.is_private_ip: # Not connectable
+ self.port = 0
+ else:
+ self.port = int(handshake["fileserver_port"]) # Set peer fileserver port
+
+ if handshake.get("use_bin_type") and self.unpacker:
+ unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
+ self.log("Changing unpacker to bin type (unprocessed bytes: %s)" % unprocessed_bytes_num)
+ unprocessed_bytes = self.unpacker.read_bytes(unprocessed_bytes_num)
+ self.unpacker = self.getMsgpackUnpacker() # Create new unpacker for different msgpack type
+ self.unpacker_bytes = 0
+ if unprocessed_bytes:
+ self.unpacker.feed(unprocessed_bytes)
+
+ # Check if we can encrypt the connection
+ if handshake.get("crypt_supported") and self.ip not in self.server.broken_ssl_ips:
+ if type(handshake["crypt_supported"][0]) is bytes:
+ handshake["crypt_supported"] = [item.decode() for item in handshake["crypt_supported"]] # Backward compatibility
+
+ if self.ip_type == "onion" or self.ip in config.ip_local:
+ crypt = None
+ elif handshake.get("crypt"): # Recommended crypt by server
+ crypt = handshake["crypt"]
+ else: # Select the best supported on both sides
+ crypt = CryptConnection.manager.selectCrypt(handshake["crypt_supported"])
+
+ if crypt:
+ self.crypt = crypt
+
+ if self.type == "in" and handshake.get("onion") and not self.ip_type == "onion": # Set incoming connection's onion address
+ if self.server.ips.get(self.ip) == self:
+ del self.server.ips[self.ip]
+ self.setIp(handshake["onion"] + ".onion")
+ self.log("Changing ip to %s" % self.ip)
+ self.server.ips[self.ip] = self
+ self.updateName()
+
+ self.event_connected.set(True) # Mark handshake as done
+ self.event_connected = None
+ self.handshake_time = time.time()
+
+ # Handle incoming message
+ def handleMessage(self, message):
+ cmd = message["cmd"]
+
+ self.last_message_time = time.time()
+ self.last_cmd_recv = cmd
+ if cmd == "response": # New style response
+ if message["to"] in self.waiting_requests:
+ if self.last_send_time and len(self.waiting_requests) == 1:
+ ping = time.time() - self.last_send_time
+ self.last_ping_delay = ping
+ self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
+ del self.waiting_requests[message["to"]]
+ elif message["to"] == 0: # Other peers handshake
+ ping = time.time() - self.start_time
+ if config.debug_socket:
+ self.log("Handshake response: %s, ping: %s" % (message, ping))
+ self.last_ping_delay = ping
+ # Server switched to crypt, lets do it also if not crypted already
+ if message.get("crypt") and not self.sock_wrapped:
+ self.crypt = message["crypt"]
+ server = (self.type == "in")
+ self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (self.crypt, server, ping))
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
+ self.sock.do_handshake()
+ self.sock_wrapped = True
+
+ if not self.sock_wrapped and self.cert_pin:
+ self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
+ return
+
+ self.setHandshake(message)
+ else:
+ self.log("Unknown response: %s" % message)
+ elif cmd:
+ self.server.num_recv += 1
+ if cmd == "handshake":
+ self.handleHandshake(message)
+ else:
+ self.server.handleRequest(self, message)
+
+ # Incoming handshake set request
+ def handleHandshake(self, message):
+ self.setHandshake(message["params"])
+ data = self.getHandshakeInfo()
+ data["cmd"] = "response"
+ data["to"] = message["req_id"]
+ self.send(data) # Send response to handshake
+ # Sent crypt request to client
+ if self.crypt and not self.sock_wrapped:
+ server = (self.type == "in")
+ self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
+ try:
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
+ self.sock_wrapped = True
+ except Exception as err:
+ if not config.force_encryption:
+ self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
+ self.server.broken_ssl_ips[self.ip] = True
+ self.close("Broken ssl")
+
+ if not self.sock_wrapped and self.cert_pin:
+ self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
+
+ # Send data to connection
+ def send(self, message, streaming=False):
+ self.last_send_time = time.time()
+ if config.debug_socket:
+ self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
+ message.get("cmd"), message.get("to"), streaming,
+ message.get("params", {}).get("site"), message.get("params", {}).get("inner_path"),
+ message.get("req_id"))
+ )
+
+ if not self.sock:
+ self.log("Send error: missing socket")
+ return False
+
+ if not self.connected and message.get("cmd") != "handshake":
+ self.log("Wait for handshake before send request")
+ self.event_connected.get()
+
+ try:
+ stat_key = message.get("cmd", "unknown")
+ if stat_key == "response":
+ stat_key = "response: %s" % self.last_cmd_recv
+ else:
+ self.server.num_sent += 1
+
+ self.server.stat_sent[stat_key]["num"] += 1
+ if streaming:
+ with self.send_lock:
+ bytes_sent = Msgpack.stream(message, self.sock.sendall)
+ self.bytes_sent += bytes_sent
+ self.server.bytes_sent += bytes_sent
+ self.server.stat_sent[stat_key]["bytes"] += bytes_sent
+ message = None
+ else:
+ data = Msgpack.pack(message)
+ self.bytes_sent += len(data)
+ self.server.bytes_sent += len(data)
+ self.server.stat_sent[stat_key]["bytes"] += len(data)
+ message = None
+ with self.send_lock:
+ self.sock.sendall(data)
+ except Exception as err:
+ self.close("Send error: %s (cmd: %s)" % (err, stat_key))
+ return False
+ self.last_sent_time = time.time()
+ return True
+
+ # Stream file to connection without msgpacking
+ def sendRawfile(self, file, read_bytes):
+ buff = 64 * 1024
+ bytes_left = read_bytes
+ bytes_sent = 0
+ while True:
+ self.last_send_time = time.time()
+ data = file.read(min(bytes_left, buff))
+ bytes_sent += len(data)
+ with self.send_lock:
+ self.sock.sendall(data)
+ bytes_left -= buff
+ if bytes_left <= 0:
+ break
+ self.bytes_sent += bytes_sent
+ self.server.bytes_sent += bytes_sent
+ self.server.stat_sent["raw_file"]["num"] += 1
+ self.server.stat_sent["raw_file"]["bytes"] += bytes_sent
+ return True
+
+ # Create and send a request to peer
+ def request(self, cmd, params={}, stream_to=None):
+ # Last command sent more than 10 sec ago, timeout
+ if self.waiting_requests and self.protocol == "v2" and time.time() - max(self.last_req_time, self.last_recv_time) > 10:
+ self.close("Request %s timeout: %.3fs" % (self.last_cmd_sent, time.time() - self.last_send_time))
+ return False
+
+ self.last_req_time = time.time()
+ self.last_cmd_sent = cmd
+ self.req_id += 1
+ data = {"cmd": cmd, "req_id": self.req_id, "params": params}
+ event = gevent.event.AsyncResult() # Create new event for response
+ self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
+ if stream_to:
+ self.waiting_streams[self.req_id] = stream_to
+ self.send(data) # Send request
+ res = event.get() # Wait until event solves
+ return res
+
+ def ping(self):
+ s = time.time()
+ response = None
+ with gevent.Timeout(10.0, False):
+ try:
+ response = self.request("ping")
+ except Exception as err:
+ self.log("Ping error: %s" % Debug.formatException(err))
+ if response and "body" in response and response["body"] == b"Pong!":
+ self.last_ping_delay = time.time() - s
+ return True
+ else:
+ return False
+
+ # Close connection
+ def close(self, reason="Unknown"):
+ if self.closed:
+ return False # Already closed
+ self.closed = True
+ self.connected = False
+ if self.event_connected:
+ self.event_connected.set(False)
+
+ self.log(
+ "Closing connection: %s, waiting_requests: %s, sites: %s, buff: %s..." %
+ (reason, len(self.waiting_requests), self.sites, self.incomplete_buff_recv)
+ )
+ for request in self.waiting_requests.values(): # Mark pending requests failed
+ request["evt"].set(False)
+ self.waiting_requests = {}
+ self.waiting_streams = {}
+ self.sites = 0
+ self.server.removeConnection(self) # Remove connection from server registry
+ try:
+ if self.sock:
+ self.sock.shutdown(gevent.socket.SHUT_WR)
+ self.sock.close()
+ except Exception as err:
+ if config.debug_socket:
+ self.log("Close error: %s" % err)
+
+ # Little cleanup
+ self.sock = None
+ self.unpacker = None
+ self.event_connected = None
diff --git a/src/Connection/ConnectionServer.py b/src/Connection/ConnectionServer.py
new file mode 100644
index 00000000..c9048398
--- /dev/null
+++ b/src/Connection/ConnectionServer.py
@@ -0,0 +1,386 @@
+import logging
+import time
+import sys
+import socket
+from collections import defaultdict
+
+import gevent
+import msgpack
+from gevent.server import StreamServer
+from gevent.pool import Pool
+
+import util
+from util import helper
+from Debug import Debug
+from .Connection import Connection
+from Config import config
+from Crypt import CryptConnection
+from Crypt import CryptHash
+from Tor import TorManager
+from Site import SiteManager
+
+
+class ConnectionServer(object):
+ def __init__(self, ip=None, port=None, request_handler=None):
+ if not ip:
+ if config.fileserver_ip_type == "ipv6":
+ ip = "::1"
+ else:
+ ip = "127.0.0.1"
+ port = 15441
+ self.ip = ip
+ self.port = port
+ self.last_connection_id = 0 # Connection id incrementer
+ self.last_connection_id_current_version = 0 # Connection id incrementer for current client version
+ self.last_connection_id_supported_version = 0 # Connection id incrementer for last supported version
+ self.log = logging.getLogger("ConnServer")
+ self.port_opened = {}
+ self.peer_blacklist = SiteManager.peer_blacklist
+
+ self.tor_manager = TorManager(self.ip, self.port)
+ self.connections = [] # Connections
+ self.whitelist = config.ip_local # No flood protection on this ips
+ self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
+ self.broken_ssl_ips = {} # Peerids of broken ssl connections
+ self.ips = {} # Connection by ip
+ self.has_internet = True # Internet outage detection
+
+ self.stream_server = None
+ self.stream_server_proxy = None
+ self.running = False
+ self.stopping = False
+ self.thread_checker = None
+
+ self.stat_recv = defaultdict(lambda: defaultdict(int))
+ self.stat_sent = defaultdict(lambda: defaultdict(int))
+ self.bytes_recv = 0
+ self.bytes_sent = 0
+ self.num_recv = 0
+ self.num_sent = 0
+
+ self.num_incoming = 0
+ self.num_outgoing = 0
+ self.had_external_incoming = False
+
+ self.timecorrection = 0.0
+ self.pool = Pool(500) # do not accept more than 500 connections
+
+ # Bittorrent style peerid
+ self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
+
+ # Check msgpack version
+ if msgpack.version[0] == 0 and msgpack.version[1] < 4:
+ self.log.error(
+ "Error: Unsupported msgpack version: %s (<0.4.0), please run `sudo apt-get install python-pip; sudo pip install msgpack --upgrade`" %
+ str(msgpack.version)
+ )
+ sys.exit(0)
+
+ if request_handler:
+ self.handleRequest = request_handler
+
+ def start(self, check_connections=True):
+ if self.stopping:
+ return False
+ self.running = True
+ if check_connections:
+ self.thread_checker = gevent.spawn(self.checkConnections)
+ CryptConnection.manager.loadCerts()
+ if config.tor != "disable":
+ self.tor_manager.start()
+ if not self.port:
+ self.log.info("No port found, not binding")
+ return False
+
+ self.log.debug("Binding to: %s:%s, (msgpack: %s), supported crypt: %s" % (
+ self.ip, self.port, ".".join(map(str, msgpack.version)),
+ CryptConnection.manager.crypt_supported
+ ))
+ try:
+ self.stream_server = StreamServer(
+ (self.ip, self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
+ )
+ except Exception as err:
+ self.log.info("StreamServer create error: %s" % Debug.formatException(err))
+
+ def listen(self):
+ if not self.running:
+ return None
+
+ if self.stream_server_proxy:
+ gevent.spawn(self.listenProxy)
+ try:
+ self.stream_server.serve_forever()
+ except Exception as err:
+ self.log.info("StreamServer listen error: %s" % err)
+ return False
+ self.log.debug("Stopped.")
+
+ def stop(self):
+ self.log.debug("Stopping %s" % self.stream_server)
+ self.stopping = True
+ self.running = False
+ if self.thread_checker:
+ gevent.kill(self.thread_checker)
+ if self.stream_server:
+ self.stream_server.stop()
+
+ def closeConnections(self):
+ self.log.debug("Closing all connection: %s" % len(self.connections))
+ for connection in self.connections[:]:
+ connection.close("Close all connections")
+
+ def handleIncomingConnection(self, sock, addr):
+ if config.offline:
+ sock.close()
+ return False
+
+ ip, port = addr[0:2]
+ ip = ip.lower()
+ if ip.startswith("::ffff:"): # IPv6 to IPv4 mapping
+ ip = ip.replace("::ffff:", "", 1)
+ self.num_incoming += 1
+
+ if not self.had_external_incoming and not helper.isPrivateIp(ip):
+ self.had_external_incoming = True
+
+ # Connection flood protection
+ if ip in self.ip_incoming and ip not in self.whitelist:
+ self.ip_incoming[ip] += 1
+ if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
+ self.log.debug("Connection flood detected from %s" % ip)
+ time.sleep(30)
+ sock.close()
+ return False
+ else:
+ self.ip_incoming[ip] = 1
+
+ connection = Connection(self, ip, port, sock)
+ self.connections.append(connection)
+ rev = connection.handshake.get("rev", 0)
+ if rev >= 4560:
+ self.last_connection_id_supported_version += 1
+ if rev == config.rev:
+ self.last_connection_id_current_version += 1
+ if ip not in config.ip_local:
+ self.ips[ip] = connection
+ connection.handleIncomingConnection(sock)
+
+ def handleMessage(self, *args, **kwargs):
+ pass
+
+ def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
+ ip_type = helper.getIpType(ip)
+ has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
+ if has_per_site_onion: # Site-unique connection for Tor
+ if ip.endswith(".onion"):
+ site_onion = self.tor_manager.getOnion(site.address)
+ else:
+ site_onion = self.tor_manager.getOnion("global")
+ key = ip + site_onion
+ else:
+ key = ip
+
+ # Find connection by ip
+ if key in self.ips:
+ connection = self.ips[key]
+ if not peer_id or connection.handshake.get("peer_id") == peer_id: # Filter by peer_id
+ if not connection.connected and create:
+ succ = connection.event_connected.get() # Wait for connection
+ if not succ:
+ raise Exception("Connection event return error")
+ return connection
+
+ # Recover from connection pool
+ for connection in self.connections:
+ if connection.ip == ip:
+ if peer_id and connection.handshake.get("peer_id") != peer_id: # Does not match
+ continue
+ if ip.endswith(".onion") and self.tor_manager.start_onions and ip.replace(".onion", "") != connection.target_onion:
+ # For different site
+ continue
+ if not connection.connected and create:
+ succ = connection.event_connected.get() # Wait for connection
+ if not succ:
+ raise Exception("Connection event return error")
+ return connection
+
+ # No connection found
+ if create and not config.offline: # Allow to create new connection if not found
+ if port == 0:
+ raise Exception("This peer is not connectable")
+
+ if (ip, port) in self.peer_blacklist and not is_tracker_connection:
+ raise Exception("This peer is blacklisted")
+
+ try:
+ if has_per_site_onion: # Lock connection to site
+ connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
+ else:
+ connection = Connection(self, ip, port, is_tracker_connection=is_tracker_connection)
+ self.num_outgoing += 1
+ self.ips[key] = connection
+ self.connections.append(connection)
+ connection.log("Connecting... (site: %s)" % site)
+ succ = connection.connect()
+ if not succ:
+ connection.close("Connection event return error")
+ raise Exception("Connection event return error")
+ else:
+ rev = connection.handshake.get("rev", 0)
+ if rev >= 4560:
+ self.last_connection_id_supported_version += 1
+ if rev == config.rev:
+ self.last_connection_id_current_version += 1
+
+ except Exception as err:
+ connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
+ raise err
+
+ if len(self.connections) > config.global_connected_limit:
+ gevent.spawn(self.checkMaxConnections)
+
+ return connection
+ else:
+ return None
+
+ def removeConnection(self, connection):
+ # Delete if same as in registry
+ if self.ips.get(connection.ip) == connection:
+ del self.ips[connection.ip]
+ # Site locked connection
+ if connection.target_onion:
+ if self.ips.get(connection.ip + connection.target_onion) == connection:
+ del self.ips[connection.ip + connection.target_onion]
+ # Cert pinned connection
+ if connection.cert_pin and self.ips.get(connection.ip + "#" + connection.cert_pin) == connection:
+ del self.ips[connection.ip + "#" + connection.cert_pin]
+
+ if connection in self.connections:
+ self.connections.remove(connection)
+
+ def checkConnections(self):
+ run_i = 0
+ time.sleep(15)
+ while self.running:
+ run_i += 1
+ self.ip_incoming = {} # Reset connected ips counter
+ last_message_time = 0
+ s = time.time()
+ for connection in self.connections[:]: # Make a copy
+ if connection.ip.endswith(".onion") or config.tor == "always":
+ timeout_multipler = 2
+ else:
+ timeout_multipler = 1
+
+ idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
+ if connection.last_message_time > last_message_time and not connection.is_private_ip:
+ # Message from local IPs does not means internet connection
+ last_message_time = connection.last_message_time
+
+ if connection.unpacker and idle > 30:
+ # Delete the unpacker if not needed
+ del connection.unpacker
+ connection.unpacker = None
+
+ elif connection.last_cmd_sent == "announce" and idle > 20: # Bootstrapper connection close after 20 sec
+ connection.close("[Cleanup] Tracker connection, idle: %.3fs" % idle)
+
+ if idle > 60 * 60:
+ # Wake up after 1h
+ connection.close("[Cleanup] After wakeup, idle: %.3fs" % idle)
+
+ elif idle > 20 * 60 and connection.last_send_time < time.time() - 10:
+ # Idle more than 20 min and we have not sent request in last 10 sec
+ if not connection.ping():
+ connection.close("[Cleanup] Ping timeout")
+
+ elif idle > 10 * timeout_multipler and connection.incomplete_buff_recv > 0:
+ # Incomplete data with more than 10 sec idle
+ connection.close("[Cleanup] Connection buff stalled")
+
+ elif idle > 10 * timeout_multipler and connection.protocol == "?": # No connection after 10 sec
+ connection.close(
+ "[Cleanup] Connect timeout: %.3fs" % idle
+ )
+
+ elif idle > 10 * timeout_multipler and connection.waiting_requests and time.time() - connection.last_send_time > 10 * timeout_multipler:
+ # Sent command and no response in 10 sec
+ connection.close(
+ "[Cleanup] Command %s timeout: %.3fs" % (connection.last_cmd_sent, time.time() - connection.last_send_time)
+ )
+
+ elif idle < 60 and connection.bad_actions > 40:
+ connection.close(
+ "[Cleanup] Too many bad actions: %s" % connection.bad_actions
+ )
+
+ elif idle > 5 * 60 and connection.sites == 0:
+ connection.close(
+ "[Cleanup] No site for connection"
+ )
+
+ elif run_i % 90 == 0:
+ # Reset bad action counter every 30 min
+ connection.bad_actions = 0
+
+ # Internet outage detection
+ if time.time() - last_message_time > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
+ # Offline: Last message more than 60-600sec depending on connection number
+ if self.has_internet and last_message_time:
+ self.has_internet = False
+ self.onInternetOffline()
+ else:
+ # Online
+ if not self.has_internet:
+ self.has_internet = True
+ self.onInternetOnline()
+
+ self.timecorrection = self.getTimecorrection()
+
+ if time.time() - s > 0.01:
+ self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
+
+ time.sleep(15)
+ self.log.debug("Checkconnections ended")
+
+ @util.Noparallel(blocking=False)
+ def checkMaxConnections(self):
+ if len(self.connections) < config.global_connected_limit:
+ return 0
+
+ s = time.time()
+ num_connected_before = len(self.connections)
+ self.connections.sort(key=lambda connection: connection.sites)
+ num_closed = 0
+ for connection in self.connections:
+ idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
+ if idle > 60:
+ connection.close("Connection limit reached")
+ num_closed += 1
+ if num_closed > config.global_connected_limit * 0.1:
+ break
+
+ self.log.debug("Closed %s connections of %s after reached limit %s in %.3fs" % (
+ num_closed, num_connected_before, config.global_connected_limit, time.time() - s
+ ))
+ return num_closed
+
+ def onInternetOnline(self):
+ self.log.info("Internet online")
+
+ def onInternetOffline(self):
+ self.had_external_incoming = False
+ self.log.info("Internet offline")
+
+ def getTimecorrection(self):
+ corrections = sorted([
+ connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
+ for connection in self.connections
+ if connection.handshake.get("time") and connection.last_ping_delay
+ ])
+ if len(corrections) < 9:
+ return 0.0
+ mid = int(len(corrections) / 2 - 1)
+ median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
+ return median
diff --git a/src/Connection/__init__.py b/src/Connection/__init__.py
new file mode 100644
index 00000000..d419a3f0
--- /dev/null
+++ b/src/Connection/__init__.py
@@ -0,0 +1,2 @@
+from .ConnectionServer import ConnectionServer
+from .Connection import Connection
diff --git a/src/Content/ContentDb.py b/src/Content/ContentDb.py
new file mode 100644
index 00000000..f284581e
--- /dev/null
+++ b/src/Content/ContentDb.py
@@ -0,0 +1,162 @@
+import os
+
+from Db.Db import Db, DbTableError
+from Config import config
+from Plugin import PluginManager
+from Debug import Debug
+
+
+@PluginManager.acceptPlugins
+class ContentDb(Db):
+ def __init__(self, path):
+ Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, path)
+ self.foreign_keys = True
+
+ def init(self):
+ try:
+ self.schema = self.getSchema()
+ try:
+ self.checkTables()
+ except DbTableError:
+ pass
+ self.log.debug("Checking foreign keys...")
+ foreign_key_error = self.execute("PRAGMA foreign_key_check").fetchone()
+ if foreign_key_error:
+ raise Exception("Database foreign key error: %s" % foreign_key_error)
+ except Exception as err:
+ self.log.error("Error loading content.db: %s, rebuilding..." % Debug.formatException(err))
+ self.close()
+ os.unlink(self.db_path) # Remove and try again
+ Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, self.db_path)
+ self.foreign_keys = True
+ self.schema = self.getSchema()
+ try:
+ self.checkTables()
+ except DbTableError:
+ pass
+ self.site_ids = {}
+ self.sites = {}
+
+ def getSchema(self):
+ schema = {}
+ schema["db_name"] = "ContentDb"
+ schema["version"] = 3
+ schema["tables"] = {}
+
+ if not self.getTableVersion("site"):
+ self.log.debug("Migrating from table version-less content.db")
+ version = int(self.execute("PRAGMA user_version").fetchone()[0])
+ if version > 0:
+ self.checkTables()
+ self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.site.version", "value": 1})
+ self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.content.version", "value": 1})
+
+ schema["tables"]["site"] = {
+ "cols": [
+ ["site_id", "INTEGER PRIMARY KEY ASC NOT NULL UNIQUE"],
+ ["address", "TEXT NOT NULL"]
+ ],
+ "indexes": [
+ "CREATE UNIQUE INDEX site_address ON site (address)"
+ ],
+ "schema_changed": 1
+ }
+
+ schema["tables"]["content"] = {
+ "cols": [
+ ["content_id", "INTEGER PRIMARY KEY UNIQUE NOT NULL"],
+ ["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
+ ["inner_path", "TEXT"],
+ ["size", "INTEGER"],
+ ["size_files", "INTEGER"],
+ ["size_files_optional", "INTEGER"],
+ ["modified", "INTEGER"]
+ ],
+ "indexes": [
+ "CREATE UNIQUE INDEX content_key ON content (site_id, inner_path)",
+ "CREATE INDEX content_modified ON content (site_id, modified)"
+ ],
+ "schema_changed": 1
+ }
+
+ return schema
+
+ def initSite(self, site):
+ self.sites[site.address] = site
+
+ def needSite(self, site):
+ if site.address not in self.site_ids:
+ self.execute("INSERT OR IGNORE INTO site ?", {"address": site.address})
+ self.site_ids = {}
+ for row in self.execute("SELECT * FROM site"):
+ self.site_ids[row["address"]] = row["site_id"]
+ return self.site_ids[site.address]
+
+ def deleteSite(self, site):
+ site_id = self.site_ids.get(site.address, 0)
+ if site_id:
+ self.execute("DELETE FROM site WHERE site_id = :site_id", {"site_id": site_id})
+ del self.site_ids[site.address]
+ del self.sites[site.address]
+
+ def setContent(self, site, inner_path, content, size=0):
+ self.insertOrUpdate("content", {
+ "size": size,
+ "size_files": sum([val["size"] for key, val in content.get("files", {}).items()]),
+ "size_files_optional": sum([val["size"] for key, val in content.get("files_optional", {}).items()]),
+ "modified": int(content.get("modified", 0))
+ }, {
+ "site_id": self.site_ids.get(site.address, 0),
+ "inner_path": inner_path
+ })
+
+ def deleteContent(self, site, inner_path):
+ self.execute("DELETE FROM content WHERE ?", {"site_id": self.site_ids.get(site.address, 0), "inner_path": inner_path})
+
+ def loadDbDict(self, site):
+ res = self.execute(
+ "SELECT GROUP_CONCAT(inner_path, '|') AS inner_paths FROM content WHERE ?",
+ {"site_id": self.site_ids.get(site.address, 0)}
+ )
+ row = res.fetchone()
+ if row and row["inner_paths"]:
+ inner_paths = row["inner_paths"].split("|")
+ return dict.fromkeys(inner_paths, False)
+ else:
+ return {}
+
+ def getTotalSize(self, site, ignore=None):
+ params = {"site_id": self.site_ids.get(site.address, 0)}
+ if ignore:
+ params["not__inner_path"] = ignore
+ res = self.execute("SELECT SUM(size) + SUM(size_files) AS size, SUM(size_files_optional) AS size_optional FROM content WHERE ?", params)
+ row = dict(res.fetchone())
+
+ if not row["size"]:
+ row["size"] = 0
+ if not row["size_optional"]:
+ row["size_optional"] = 0
+
+ return row["size"], row["size_optional"]
+
+ def listModified(self, site, after=None, before=None):
+ params = {"site_id": self.site_ids.get(site.address, 0)}
+ if after:
+ params["modified>"] = after
+ if before:
+ params["modified<"] = before
+ res = self.execute("SELECT inner_path, modified FROM content WHERE ?", params)
+ return {row["inner_path"]: row["modified"] for row in res}
+
+content_dbs = {}
+
+
+def getContentDb(path=None):
+ if not path:
+ path = "%s/content.db" % config.data_dir
+ if path not in content_dbs:
+ content_dbs[path] = ContentDb(path)
+ content_dbs[path].init()
+ return content_dbs[path]
+
+getContentDb() # Pre-connect to default one
diff --git a/src/Content/ContentDbDict.py b/src/Content/ContentDbDict.py
new file mode 100644
index 00000000..01df0427
--- /dev/null
+++ b/src/Content/ContentDbDict.py
@@ -0,0 +1,155 @@
+import time
+import os
+
+from . import ContentDb
+from Debug import Debug
+from Config import config
+
+
+class ContentDbDict(dict):
+ def __init__(self, site, *args, **kwargs):
+ s = time.time()
+ self.site = site
+ self.cached_keys = []
+ self.log = self.site.log
+ self.db = ContentDb.getContentDb()
+ self.db_id = self.db.needSite(site)
+ self.num_loaded = 0
+ super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database
+ self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids)))
+
+ def loadItem(self, key):
+ try:
+ self.num_loaded += 1
+ if self.num_loaded % 100 == 0:
+ if config.verbose:
+ self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack()))
+ else:
+ self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
+ content = self.site.storage.loadJson(key)
+ dict.__setitem__(self, key, content)
+ except IOError:
+ if dict.get(self, key):
+ self.__delitem__(key) # File not exists anymore
+ raise KeyError(key)
+
+ self.addCachedKey(key)
+ self.checkLimit()
+
+ return content
+
+ def getItemSize(self, key):
+ return self.site.storage.getSize(key)
+
+ # Only keep last 10 accessed json in memory
+ def checkLimit(self):
+ if len(self.cached_keys) > 10:
+ key_deleted = self.cached_keys.pop(0)
+ dict.__setitem__(self, key_deleted, False)
+
+ def addCachedKey(self, key):
+ if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char
+ self.cached_keys.append(key)
+
+ def __getitem__(self, key):
+ val = dict.get(self, key)
+ if val: # Already loaded
+ return val
+ elif val is None: # Unknown key
+ raise KeyError(key)
+ elif val is False: # Loaded before, but purged from cache
+ return self.loadItem(key)
+
+ def __setitem__(self, key, val):
+ self.addCachedKey(key)
+ self.checkLimit()
+ size = self.getItemSize(key)
+ self.db.setContent(self.site, key, val, size)
+ dict.__setitem__(self, key, val)
+
+ def __delitem__(self, key):
+ self.db.deleteContent(self.site, key)
+ dict.__delitem__(self, key)
+ try:
+ self.cached_keys.remove(key)
+ except ValueError:
+ pass
+
+ def iteritems(self):
+ for key in dict.keys(self):
+ try:
+ val = self[key]
+ except Exception as err:
+ self.log.warning("Error loading %s: %s" % (key, err))
+ continue
+ yield key, val
+
+ def items(self):
+ back = []
+ for key in dict.keys(self):
+ try:
+ val = self[key]
+ except Exception as err:
+ self.log.warning("Error loading %s: %s" % (key, err))
+ continue
+ back.append((key, val))
+ return back
+
+ def values(self):
+ back = []
+ for key, val in dict.iteritems(self):
+ if not val:
+ try:
+ val = self.loadItem(key)
+ except Exception:
+ continue
+ back.append(val)
+ return back
+
+ def get(self, key, default=None):
+ try:
+ return self.__getitem__(key)
+ except KeyError:
+ return default
+ except Exception as err:
+ self.site.bad_files[key] = self.site.bad_files.get(key, 1)
+ dict.__delitem__(self, key)
+ self.log.warning("Error loading %s: %s" % (key, err))
+ return default
+
+ def execute(self, query, params={}):
+ params["site_id"] = self.db_id
+ return self.db.execute(query, params)
+
+if __name__ == "__main__":
+ import psutil
+ process = psutil.Process(os.getpid())
+ s_mem = process.memory_info()[0] / float(2 ** 20)
+ root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27"
+ contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root)
+ print("Init len", len(contents))
+
+ s = time.time()
+ for dir_name in os.listdir(root + "/data/users/")[0:8000]:
+ contents["data/users/%s/content.json" % dir_name]
+ print("Load: %.3fs" % (time.time() - s))
+
+ s = time.time()
+ found = 0
+ for key, val in contents.items():
+ found += 1
+ assert key
+ assert val
+ print("Found:", found)
+ print("Iteritem: %.3fs" % (time.time() - s))
+
+ s = time.time()
+ found = 0
+ for key in list(contents.keys()):
+ found += 1
+ assert key in contents
+ print("In: %.3fs" % (time.time() - s))
+
+ print("Len:", len(list(contents.values())), len(list(contents.keys())))
+
+ print("Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem)
diff --git a/src/Content/ContentManager.py b/src/Content/ContentManager.py
new file mode 100644
index 00000000..623cc707
--- /dev/null
+++ b/src/Content/ContentManager.py
@@ -0,0 +1,1067 @@
+import json
+import time
+import re
+import os
+import copy
+import base64
+import sys
+
+import gevent
+
+from Debug import Debug
+from Crypt import CryptHash
+from Config import config
+from util import helper
+from util import Diff
+from util import SafeRe
+from Peer import PeerHashfield
+from .ContentDbDict import ContentDbDict
+from Plugin import PluginManager
+
+
+class VerifyError(Exception):
+ pass
+
+
+class SignError(Exception):
+ pass
+
+
+@PluginManager.acceptPlugins
+class ContentManager(object):
+
+ def __init__(self, site):
+ self.site = site
+ self.log = self.site.log
+ self.contents = ContentDbDict(site)
+ self.hashfield = PeerHashfield()
+ self.has_optional_files = False
+
+ # Load all content.json files
+ def loadContents(self):
+ if len(self.contents) == 0:
+ self.log.info("ContentDb not initialized, load files from filesystem...")
+ self.loadContent(add_bad_files=False, delete_removed_files=False)
+ self.site.settings["size"], self.site.settings["size_optional"] = self.getTotalSize()
+
+ # Load hashfield cache
+ if "hashfield" in self.site.settings.get("cache", {}):
+ self.hashfield.frombytes(base64.b64decode(self.site.settings["cache"]["hashfield"]))
+ del self.site.settings["cache"]["hashfield"]
+ elif self.contents.get("content.json") and self.site.settings["size_optional"] > 0:
+ self.site.storage.updateBadFiles() # No hashfield cache created yet
+ self.has_optional_files = bool(self.hashfield)
+
+ self.contents.db.initSite(self.site)
+
+ def getFileChanges(self, old_files, new_files):
+ deleted = {key: val for key, val in old_files.items() if key not in new_files}
+ deleted_hashes = {val.get("sha512"): key for key, val in old_files.items() if key not in new_files}
+ added = {key: val for key, val in new_files.items() if key not in old_files}
+ renamed = {}
+ for relative_path, node in added.items():
+ hash = node.get("sha512")
+ if hash in deleted_hashes:
+ relative_path_old = deleted_hashes[hash]
+ renamed[relative_path_old] = relative_path
+ del(deleted[relative_path_old])
+ return list(deleted), renamed
+
+ # Load content.json to self.content
+ # Return: Changed files ["index.html", "data/messages.json"], Deleted files ["old.jpg"]
+ def loadContent(self, content_inner_path="content.json", add_bad_files=True, delete_removed_files=True, load_includes=True, force=False):
+ content_inner_path = content_inner_path.strip("/") # Remove / from beginning
+ old_content = self.contents.get(content_inner_path)
+ content_path = self.site.storage.getPath(content_inner_path)
+ content_dir = helper.getDirname(self.site.storage.getPath(content_inner_path))
+ content_inner_dir = helper.getDirname(content_inner_path)
+
+ if os.path.isfile(content_path):
+ try:
+ # Check if file is newer than what we have
+ if not force and old_content and not self.site.settings.get("own"):
+ for line in open(content_path):
+ if '"modified"' not in line:
+ continue
+ match = re.search(r"([0-9\.]+),$", line.strip(" \r\n"))
+ if match and float(match.group(1)) <= old_content.get("modified", 0):
+ self.log.debug("%s loadContent same json file, skipping" % content_inner_path)
+ return [], []
+
+ new_content = self.site.storage.loadJson(content_inner_path)
+ except Exception as err:
+ self.log.warning("%s load error: %s" % (content_path, Debug.formatException(err)))
+ return [], []
+ else:
+ self.log.debug("Content.json not exist: %s" % content_path)
+ return [], [] # Content.json not exist
+
+ try:
+ # Get the files where the sha512 changed
+ changed = []
+ deleted = []
+ # Check changed
+ for relative_path, info in new_content.get("files", {}).items():
+ if "sha512" in info:
+ hash_type = "sha512"
+ else: # Backward compatibility
+ hash_type = "sha1"
+
+ new_hash = info[hash_type]
+ if old_content and old_content["files"].get(relative_path): # We have the file in the old content
+ old_hash = old_content["files"][relative_path].get(hash_type)
+ else: # The file is not in the old content
+ old_hash = None
+ if old_hash != new_hash:
+ changed.append(content_inner_dir + relative_path)
+
+ # Check changed optional files
+ for relative_path, info in new_content.get("files_optional", {}).items():
+ file_inner_path = content_inner_dir + relative_path
+ new_hash = info["sha512"]
+ if old_content and old_content.get("files_optional", {}).get(relative_path):
+ # We have the file in the old content
+ old_hash = old_content["files_optional"][relative_path].get("sha512")
+ if old_hash != new_hash and self.site.isDownloadable(file_inner_path):
+ changed.append(file_inner_path) # Download new file
+ elif old_hash != new_hash and self.hashfield.hasHash(old_hash) and not self.site.settings.get("own"):
+ try:
+ old_hash_id = self.hashfield.getHashId(old_hash)
+ self.optionalRemoved(file_inner_path, old_hash_id, old_content["files_optional"][relative_path]["size"])
+ self.optionalDelete(file_inner_path)
+ self.log.debug("Deleted changed optional file: %s" % file_inner_path)
+ except Exception as err:
+ self.log.warning("Error deleting file %s: %s" % (file_inner_path, Debug.formatException(err)))
+ else: # The file is not in the old content
+ if self.site.isDownloadable(file_inner_path):
+ changed.append(file_inner_path) # Download new file
+
+ # Check deleted
+ if old_content:
+ old_files = dict(
+ old_content.get("files", {}),
+ **old_content.get("files_optional", {})
+ )
+
+ new_files = dict(
+ new_content.get("files", {}),
+ **new_content.get("files_optional", {})
+ )
+
+ deleted, renamed = self.getFileChanges(old_files, new_files)
+
+ for relative_path_old, relative_path_new in renamed.items():
+ self.log.debug("Renaming: %s -> %s" % (relative_path_old, relative_path_new))
+ if relative_path_new in new_content.get("files_optional", {}):
+ self.optionalRenamed(content_inner_dir + relative_path_old, content_inner_dir + relative_path_new)
+ if self.site.storage.isFile(relative_path_old):
+ try:
+ self.site.storage.rename(relative_path_old, relative_path_new)
+ if relative_path_new in changed:
+ changed.remove(relative_path_new)
+ self.log.debug("Renamed: %s -> %s" % (relative_path_old, relative_path_new))
+ except Exception as err:
+ self.log.warning("Error renaming file: %s -> %s %s" % (relative_path_old, relative_path_new, err))
+
+ if deleted and not self.site.settings.get("own"):
+ # Deleting files that no longer in content.json
+ for file_relative_path in deleted:
+ file_inner_path = content_inner_dir + file_relative_path
+ try:
+ # Check if the deleted file is optional
+ if old_content.get("files_optional") and old_content["files_optional"].get(file_relative_path):
+ self.optionalDelete(file_inner_path)
+ old_hash = old_content["files_optional"][file_relative_path].get("sha512")
+ if self.hashfield.hasHash(old_hash):
+ old_hash_id = self.hashfield.getHashId(old_hash)
+ self.optionalRemoved(file_inner_path, old_hash_id, old_content["files_optional"][file_relative_path]["size"])
+ else:
+ self.site.storage.delete(file_inner_path)
+
+ self.log.debug("Deleted file: %s" % file_inner_path)
+ except Exception as err:
+ self.log.debug("Error deleting file %s: %s" % (file_inner_path, Debug.formatException(err)))
+
+ # Cleanup empty dirs
+ tree = {root: [dirs, files] for root, dirs, files in os.walk(self.site.storage.getPath(content_inner_dir))}
+ for root in sorted(tree, key=len, reverse=True):
+ dirs, files = tree[root]
+ if dirs == [] and files == []:
+ root_inner_path = self.site.storage.getInnerPath(root.replace("\\", "/"))
+ self.log.debug("Empty directory: %s, cleaning up." % root_inner_path)
+ try:
+ self.site.storage.deleteDir(root_inner_path)
+ # Remove from tree dict to reflect changed state
+ tree[os.path.dirname(root)][0].remove(os.path.basename(root))
+ except Exception as err:
+ self.log.debug("Error deleting empty directory %s: %s" % (root_inner_path, err))
+
+ # Check archived
+ if old_content and "user_contents" in new_content and "archived" in new_content["user_contents"]:
+ old_archived = old_content.get("user_contents", {}).get("archived", {})
+ new_archived = new_content.get("user_contents", {}).get("archived", {})
+ self.log.debug("old archived: %s, new archived: %s" % (len(old_archived), len(new_archived)))
+ archived_changed = {
+ key: date_archived
+ for key, date_archived in new_archived.items()
+ if old_archived.get(key) != new_archived[key]
+ }
+ if archived_changed:
+ self.log.debug("Archived changed: %s" % archived_changed)
+ for archived_dirname, date_archived in archived_changed.items():
+ archived_inner_path = content_inner_dir + archived_dirname + "/content.json"
+ if self.contents.get(archived_inner_path, {}).get("modified", 0) < date_archived:
+ self.removeContent(archived_inner_path)
+ deleted += archived_inner_path
+ self.site.settings["size"], self.site.settings["size_optional"] = self.getTotalSize()
+
+ # Check archived before
+ if old_content and "user_contents" in new_content and "archived_before" in new_content["user_contents"]:
+ old_archived_before = old_content.get("user_contents", {}).get("archived_before", 0)
+ new_archived_before = new_content.get("user_contents", {}).get("archived_before", 0)
+ if old_archived_before != new_archived_before:
+ self.log.debug("Archived before changed: %s -> %s" % (old_archived_before, new_archived_before))
+
+ # Remove downloaded archived files
+ num_removed_contents = 0
+ for archived_inner_path in self.listModified(before=new_archived_before):
+ if archived_inner_path.startswith(content_inner_dir) and archived_inner_path != content_inner_path:
+ self.removeContent(archived_inner_path)
+ num_removed_contents += 1
+ self.site.settings["size"], self.site.settings["size_optional"] = self.getTotalSize()
+
+ # Remove archived files from download queue
+ num_removed_bad_files = 0
+ for bad_file in list(self.site.bad_files.keys()):
+ if bad_file.endswith("content.json"):
+ del self.site.bad_files[bad_file]
+ num_removed_bad_files += 1
+
+ if num_removed_bad_files > 0:
+ self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False)
+ gevent.spawn(self.site.update, since=0)
+
+ self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files))
+
+ # Load includes
+ if load_includes and "includes" in new_content:
+ for relative_path, info in list(new_content["includes"].items()):
+ include_inner_path = content_inner_dir + relative_path
+ if self.site.storage.isFile(include_inner_path): # Content.json exists, load it
+ include_changed, include_deleted = self.loadContent(
+ include_inner_path, add_bad_files=add_bad_files, delete_removed_files=delete_removed_files
+ )
+ if include_changed:
+ changed += include_changed # Add changed files
+ if include_deleted:
+ deleted += include_deleted # Add changed files
+ else: # Content.json not exist, add to changed files
+ self.log.debug("Missing include: %s" % include_inner_path)
+ changed += [include_inner_path]
+
+ # Load blind user includes (all subdir)
+ if load_includes and "user_contents" in new_content:
+ for relative_dir in os.listdir(content_dir):
+ include_inner_path = content_inner_dir + relative_dir + "/content.json"
+ if not self.site.storage.isFile(include_inner_path):
+ continue # Content.json not exist
+ include_changed, include_deleted = self.loadContent(
+ include_inner_path, add_bad_files=add_bad_files, delete_removed_files=delete_removed_files,
+ load_includes=False
+ )
+ if include_changed:
+ changed += include_changed # Add changed files
+ if include_deleted:
+ deleted += include_deleted # Add changed files
+
+ # Save some memory
+ new_content["signs"] = None
+ if "cert_sign" in new_content:
+ new_content["cert_sign"] = None
+
+ if new_content.get("files_optional"):
+ self.has_optional_files = True
+ # Update the content
+ self.contents[content_inner_path] = new_content
+ except Exception as err:
+ self.log.warning("%s parse error: %s" % (content_inner_path, Debug.formatException(err)))
+ return [], [] # Content.json parse error
+
+ # Add changed files to bad files
+ if add_bad_files:
+ for inner_path in changed:
+ self.site.bad_files[inner_path] = self.site.bad_files.get(inner_path, 0) + 1
+ for inner_path in deleted:
+ if inner_path in self.site.bad_files:
+ del self.site.bad_files[inner_path]
+ self.site.worker_manager.removeSolvedFileTasks()
+
+ if new_content.get("modified", 0) > self.site.settings.get("modified", 0):
+ # Dont store modifications in the far future (more than 10 minute)
+ self.site.settings["modified"] = min(time.time() + 60 * 10, new_content["modified"])
+
+ return changed, deleted
+
+ def removeContent(self, inner_path):
+ inner_dir = helper.getDirname(inner_path)
+ try:
+ content = self.contents[inner_path]
+ files = dict(
+ content.get("files", {}),
+ **content.get("files_optional", {})
+ )
+ except Exception as err:
+ self.log.debug("Error loading %s for removeContent: %s" % (inner_path, Debug.formatException(err)))
+ files = {}
+ files["content.json"] = True
+ # Deleting files that no longer in content.json
+ for file_relative_path in files:
+ file_inner_path = inner_dir + file_relative_path
+ try:
+ self.site.storage.delete(file_inner_path)
+ self.log.debug("Deleted file: %s" % file_inner_path)
+ except Exception as err:
+ self.log.debug("Error deleting file %s: %s" % (file_inner_path, err))
+ try:
+ self.site.storage.deleteDir(inner_dir)
+ except Exception as err:
+ self.log.debug("Error deleting dir %s: %s" % (inner_dir, err))
+
+ try:
+ del self.contents[inner_path]
+ except Exception as err:
+ self.log.debug("Error key from contents: %s" % inner_path)
+
+ # Get total size of site
+ # Return: 32819 (size of files in kb)
+ def getTotalSize(self, ignore=None):
+ return self.contents.db.getTotalSize(self.site, ignore)
+
+ def listModified(self, after=None, before=None):
+ return self.contents.db.listModified(self.site, after=after, before=before)
+
+ def listContents(self, inner_path="content.json", user_files=False):
+ if inner_path not in self.contents:
+ return []
+ back = [inner_path]
+ content_inner_dir = helper.getDirname(inner_path)
+ for relative_path in list(self.contents[inner_path].get("includes", {}).keys()):
+ include_inner_path = content_inner_dir + relative_path
+ back += self.listContents(include_inner_path)
+ return back
+
+ # Returns if file with the given modification date is archived or not
+ def isArchived(self, inner_path, modified):
+ match = re.match(r"(.*)/(.*?)/", inner_path)
+ if not match:
+ return False
+ user_contents_inner_path = match.group(1) + "/content.json"
+ relative_directory = match.group(2)
+
+ file_info = self.getFileInfo(user_contents_inner_path)
+ if file_info:
+ time_archived_before = file_info.get("archived_before", 0)
+ time_directory_archived = file_info.get("archived", {}).get(relative_directory, 0)
+ if modified <= time_archived_before or modified <= time_directory_archived:
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def isDownloaded(self, inner_path, hash_id=None):
+ if not hash_id:
+ file_info = self.getFileInfo(inner_path)
+ if not file_info or "sha512" not in file_info:
+ return False
+ hash_id = self.hashfield.getHashId(file_info["sha512"])
+ return hash_id in self.hashfield
+
+ # Is modified since signing
+ def isModified(self, inner_path):
+ s = time.time()
+ if inner_path.endswith("content.json"):
+ try:
+ is_valid = self.verifyFile(inner_path, self.site.storage.open(inner_path), ignore_same=False)
+ if is_valid:
+ is_modified = False
+ else:
+ is_modified = True
+ except VerifyError:
+ is_modified = True
+ else:
+ try:
+ self.verifyFile(inner_path, self.site.storage.open(inner_path), ignore_same=False)
+ is_modified = False
+ except VerifyError:
+ is_modified = True
+ return is_modified
+
+ # Find the file info line from self.contents
+ # Return: { "sha512": "c29d73d...21f518", "size": 41 , "content_inner_path": "content.json"}
+ def getFileInfo(self, inner_path, new_file=False):
+ dirs = inner_path.split("/") # Parent dirs of content.json
+ inner_path_parts = [dirs.pop()] # Filename relative to content.json
+ while True:
+ content_inner_path = "%s/content.json" % "/".join(dirs)
+ content_inner_path = content_inner_path.strip("/")
+ content = self.contents.get(content_inner_path)
+
+ # Check in files
+ if content and "files" in content:
+ back = content["files"].get("/".join(inner_path_parts))
+ if back:
+ back["content_inner_path"] = content_inner_path
+ back["optional"] = False
+ back["relative_path"] = "/".join(inner_path_parts)
+ return back
+
+ # Check in optional files
+ if content and "files_optional" in content: # Check if file in this content.json
+ back = content["files_optional"].get("/".join(inner_path_parts))
+ if back:
+ back["content_inner_path"] = content_inner_path
+ back["optional"] = True
+ back["relative_path"] = "/".join(inner_path_parts)
+ return back
+
+ # Return the rules if user dir
+ if content and "user_contents" in content:
+ back = content["user_contents"]
+ content_inner_path_dir = helper.getDirname(content_inner_path)
+ relative_content_path = inner_path[len(content_inner_path_dir):]
+ user_auth_address_match = re.match(r"([A-Za-z0-9]+)/.*", relative_content_path)
+ if user_auth_address_match:
+ user_auth_address = user_auth_address_match.group(1)
+ back["content_inner_path"] = "%s%s/content.json" % (content_inner_path_dir, user_auth_address)
+ else:
+ back["content_inner_path"] = content_inner_path_dir + "content.json"
+ back["optional"] = None
+ back["relative_path"] = "/".join(inner_path_parts)
+ return back
+
+ if new_file and content:
+ back = {}
+ back["content_inner_path"] = content_inner_path
+ back["relative_path"] = "/".join(inner_path_parts)
+ back["optional"] = None
+ return back
+
+ # No inner path in this dir, lets try the parent dir
+ if dirs:
+ inner_path_parts.insert(0, dirs.pop())
+ else: # No more parent dirs
+ break
+
+ # Not found
+ return False
+
+ # Get rules for the file
+ # Return: The rules for the file or False if not allowed
+ def getRules(self, inner_path, content=None):
+ if not inner_path.endswith("content.json"): # Find the files content.json first
+ file_info = self.getFileInfo(inner_path)
+ if not file_info:
+ return False # File not found
+ inner_path = file_info["content_inner_path"]
+
+ if inner_path == "content.json": # Root content.json
+ rules = {}
+ rules["signers"] = self.getValidSigners(inner_path, content)
+ return rules
+
+ dirs = inner_path.split("/") # Parent dirs of content.json
+ inner_path_parts = [dirs.pop()] # Filename relative to content.json
+ inner_path_parts.insert(0, dirs.pop()) # Dont check in self dir
+ while True:
+ content_inner_path = "%s/content.json" % "/".join(dirs)
+ parent_content = self.contents.get(content_inner_path.strip("/"))
+ if parent_content and "includes" in parent_content:
+ return parent_content["includes"].get("/".join(inner_path_parts))
+ elif parent_content and "user_contents" in parent_content:
+ return self.getUserContentRules(parent_content, inner_path, content)
+ else: # No inner path in this dir, lets try the parent dir
+ if dirs:
+ inner_path_parts.insert(0, dirs.pop())
+ else: # No more parent dirs
+ break
+
+ return False
+
+ # Get rules for a user file
+ # Return: The rules of the file or False if not allowed
+ def getUserContentRules(self, parent_content, inner_path, content):
+ user_contents = parent_content["user_contents"]
+
+ # Delivered for directory
+ if "inner_path" in parent_content:
+ parent_content_dir = helper.getDirname(parent_content["inner_path"])
+ user_address = re.match(r"([A-Za-z0-9]*?)/", inner_path[len(parent_content_dir):]).group(1)
+ else:
+ user_address = re.match(r".*/([A-Za-z0-9]*?)/.*?$", inner_path).group(1)
+
+ try:
+ if not content:
+ content = self.site.storage.loadJson(inner_path) # Read the file if no content specified
+ user_urn = "%s/%s" % (content["cert_auth_type"], content["cert_user_id"]) # web/nofish@zeroid.bit
+ cert_user_id = content["cert_user_id"]
+ except Exception: # Content.json not exist
+ user_urn = "n-a/n-a"
+ cert_user_id = "n-a"
+
+ if user_address in user_contents["permissions"]:
+ rules = copy.copy(user_contents["permissions"].get(user_address, {})) # Default rules based on address
+ else:
+ rules = copy.copy(user_contents["permissions"].get(cert_user_id, {})) # Default rules based on username
+
+ if rules is False:
+ banned = True
+ rules = {}
+ else:
+ banned = False
+ if "signers" in rules:
+ rules["signers"] = rules["signers"][:] # Make copy of the signers
+ for permission_pattern, permission_rules in list(user_contents["permission_rules"].items()): # Regexp rules
+ if not SafeRe.match(permission_pattern, user_urn):
+ continue # Rule is not valid for user
+ # Update rules if its better than current recorded ones
+ for key, val in permission_rules.items():
+ if key not in rules:
+ if type(val) is list:
+ rules[key] = val[:] # Make copy
+ else:
+ rules[key] = val
+ elif type(val) is int: # Int, update if larger
+ if val > rules[key]:
+ rules[key] = val
+ elif hasattr(val, "startswith"): # String, update if longer
+ if len(val) > len(rules[key]):
+ rules[key] = val
+ elif type(val) is list: # List, append
+ rules[key] += val
+
+ # Accepted cert signers
+ rules["cert_signers"] = user_contents.get("cert_signers", {})
+ rules["cert_signers_pattern"] = user_contents.get("cert_signers_pattern")
+
+ if "signers" not in rules:
+ rules["signers"] = []
+
+ if not banned:
+ rules["signers"].append(user_address) # Add user as valid signer
+ rules["user_address"] = user_address
+ rules["includes_allowed"] = False
+
+ return rules
+
+ # Get diffs for changed files
+ def getDiffs(self, inner_path, limit=30 * 1024, update_files=True):
+ if inner_path not in self.contents:
+ return {}
+ diffs = {}
+ content_inner_path_dir = helper.getDirname(inner_path)
+ for file_relative_path in self.contents[inner_path].get("files", {}):
+ file_inner_path = content_inner_path_dir + file_relative_path
+ if self.site.storage.isFile(file_inner_path + "-new"): # New version present
+ diffs[file_relative_path] = Diff.diff(
+ list(self.site.storage.open(file_inner_path)),
+ list(self.site.storage.open(file_inner_path + "-new")),
+ limit=limit
+ )
+ if update_files:
+ self.site.storage.delete(file_inner_path)
+ self.site.storage.rename(file_inner_path + "-new", file_inner_path)
+ if self.site.storage.isFile(file_inner_path + "-old"): # Old version present
+ diffs[file_relative_path] = Diff.diff(
+ list(self.site.storage.open(file_inner_path + "-old")),
+ list(self.site.storage.open(file_inner_path)),
+ limit=limit
+ )
+ if update_files:
+ self.site.storage.delete(file_inner_path + "-old")
+ return diffs
+
+ def hashFile(self, dir_inner_path, file_relative_path, optional=False):
+ back = {}
+ file_inner_path = dir_inner_path + "/" + file_relative_path
+
+ file_path = self.site.storage.getPath(file_inner_path)
+ file_size = os.path.getsize(file_path)
+ sha512sum = CryptHash.sha512sum(file_path) # Calculate sha512 sum of file
+ if optional and not self.hashfield.hasHash(sha512sum):
+ self.optionalDownloaded(file_inner_path, self.hashfield.getHashId(sha512sum), file_size, own=True)
+
+ back[file_relative_path] = {"sha512": sha512sum, "size": os.path.getsize(file_path)}
+ return back
+
+ def isValidRelativePath(self, relative_path):
+ if ".." in relative_path.replace("\\", "/").split("/"):
+ return False
+ elif len(relative_path) > 255:
+ return False
+ elif relative_path[0] in ("/", "\\"): # Starts with
+ return False
+ elif relative_path[-1] in (".", " "): # Ends with
+ return False
+ elif re.match(r".*(^|/)(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9]|CONOUT\$|CONIN\$)(\.|/|$)", relative_path, re.IGNORECASE): # Protected on Windows
+ return False
+ else:
+ return re.match(r"^[^\x00-\x1F\"*:<>?\\|]+$", relative_path)
+
+ def sanitizePath(self, inner_path):
+ return re.sub("[\x00-\x1F\"*:<>?\\|]", "", inner_path)
+
+ # Hash files in directory
+ def hashFiles(self, dir_inner_path, ignore_pattern=None, optional_pattern=None):
+ files_node = {}
+ files_optional_node = {}
+ db_inner_path = self.site.storage.getDbFile()
+ if dir_inner_path and not self.isValidRelativePath(dir_inner_path):
+ ignored = True
+ self.log.error("- [ERROR] Only ascii encoded directories allowed: %s" % dir_inner_path)
+
+ for file_relative_path in self.site.storage.walk(dir_inner_path, ignore_pattern):
+ file_name = helper.getFilename(file_relative_path)
+
+ ignored = optional = False
+ if file_name == "content.json":
+ ignored = True
+ elif file_name.startswith(".") or file_name.endswith("-old") or file_name.endswith("-new"):
+ ignored = True
+ elif not self.isValidRelativePath(file_relative_path):
+ ignored = True
+ self.log.error("- [ERROR] Invalid filename: %s" % file_relative_path)
+ elif dir_inner_path == "" and db_inner_path and file_relative_path.startswith(db_inner_path):
+ ignored = True
+ elif optional_pattern and SafeRe.match(optional_pattern, file_relative_path):
+ optional = True
+
+ if ignored: # Ignore content.json, defined regexp and files starting with .
+ self.log.info("- [SKIPPED] %s" % file_relative_path)
+ else:
+ if optional:
+ self.log.info("- [OPTIONAL] %s" % file_relative_path)
+ files_optional_node.update(
+ self.hashFile(dir_inner_path, file_relative_path, optional=True)
+ )
+ else:
+ self.log.info("- %s" % file_relative_path)
+ files_node.update(
+ self.hashFile(dir_inner_path, file_relative_path)
+ )
+ return files_node, files_optional_node
+
+ # Create and sign a content.json
+ # Return: The new content if filewrite = False
+ def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False):
+ if not inner_path.endswith("content.json"):
+ raise SignError("Invalid file name, you can only sign content.json files")
+
+ if inner_path in self.contents:
+ content = self.contents.get(inner_path)
+ if content and content.get("cert_sign", False) is None and self.site.storage.isFile(inner_path):
+ # Recover cert_sign from file
+ content["cert_sign"] = self.site.storage.loadJson(inner_path).get("cert_sign")
+ else:
+ content = None
+ if not content: # Content not exist yet, load default one
+ self.log.info("File %s not exist yet, loading default values..." % inner_path)
+
+ if self.site.storage.isFile(inner_path):
+ content = self.site.storage.loadJson(inner_path)
+ if "files" not in content:
+ content["files"] = {}
+ if "signs" not in content:
+ content["signs"] = {}
+ else:
+ content = {"files": {}, "signs": {}} # Default content.json
+
+ if inner_path == "content.json": # It's the root content.json, add some more fields
+ content["title"] = "%s - ZeroNet_" % self.site.address
+ content["description"] = ""
+ content["signs_required"] = 1
+ content["ignore"] = ""
+
+ if extend:
+ # Add extend keys if not exists
+ for key, val in list(extend.items()):
+ if not content.get(key):
+ content[key] = val
+ self.log.info("Extending content.json with: %s" % key)
+
+ directory = helper.getDirname(self.site.storage.getPath(inner_path))
+ inner_directory = helper.getDirname(inner_path)
+ self.log.info("Opening site data directory: %s..." % directory)
+
+ changed_files = [inner_path]
+ files_node, files_optional_node = self.hashFiles(
+ helper.getDirname(inner_path), content.get("ignore"), content.get("optional")
+ )
+
+ if not remove_missing_optional:
+ for file_inner_path, file_details in content.get("files_optional", {}).items():
+ if file_inner_path not in files_optional_node:
+ files_optional_node[file_inner_path] = file_details
+
+ # Find changed files
+ files_merged = files_node.copy()
+ files_merged.update(files_optional_node)
+ for file_relative_path, file_details in files_merged.items():
+ old_hash = content.get("files", {}).get(file_relative_path, {}).get("sha512")
+ new_hash = files_merged[file_relative_path]["sha512"]
+ if old_hash != new_hash:
+ changed_files.append(inner_directory + file_relative_path)
+
+ self.log.debug("Changed files: %s" % changed_files)
+ if update_changed_files:
+ for file_path in changed_files:
+ self.site.storage.onUpdated(file_path)
+
+ # Generate new content.json
+ self.log.info("Adding timestamp and sha512sums to new content.json...")
+
+ new_content = content.copy() # Create a copy of current content.json
+ new_content["files"] = files_node # Add files sha512 hash
+ if files_optional_node:
+ new_content["files_optional"] = files_optional_node
+ elif "files_optional" in new_content:
+ del new_content["files_optional"]
+
+ if inner_path == "content.json":
+ new_content["zeronet_version"] = config.version
+ new_content["signs_required"] = content.get("signs_required", 1)
+
+ new_content["address"] = self.site.address
+ new_content["inner_path"] = inner_path
+
+ # Verify private key
+ from Crypt import CryptBitcoin
+ self.log.info("Verifying private key...")
+ privatekey_address = CryptBitcoin.privatekeyToAddress(privatekey)
+ valid_signers = self.getValidSigners(inner_path, new_content)
+ if privatekey_address not in valid_signers:
+ raise SignError(
+ "Private key invalid! Valid signers: %s, Private key address: %s" %
+ (valid_signers, privatekey_address)
+ )
+ self.log.info("Correct %s in valid signers: %s" % (privatekey_address, valid_signers))
+
+ signs_required = 1
+ if inner_path == "content.json" and privatekey_address == self.site.address:
+ # If signing using the root key, then sign the valid signers
+ signs_required = new_content["signs_required"]
+ signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
+ new_content["signers_sign"] = CryptBitcoin.sign(str(signers_data), privatekey)
+ if not new_content["signers_sign"]:
+ self.log.info("Old style address, signers_sign is none")
+
+ self.log.info("Signing %s..." % inner_path)
+
+ if "signs" in new_content:
+ # del(new_content["signs"]) # Delete old signs
+ old_signs_content = new_content["signs"]
+ del(new_content["signs"])
+ else:
+ old_signs_content = None
+ if "sign" in new_content:
+ del(new_content["sign"]) # Delete old sign (backward compatibility)
+
+ if signs_required > 1:
+ has_valid_sign = False
+ sign_content = json.dumps(new_content, sort_keys=True)
+ for signer in valid_signers:
+ res = CryptBitcoin.verify(sign_content,signer,old_signs_content[signer]);
+ print(res)
+ if res:
+ has_valid_sign = has_valid_sign or res
+ if has_valid_sign:
+ new_content["modified"] = content["modified"]
+ sign_content = json.dumps(new_content, sort_keys=True)
+ else:
+ new_content["modified"] = int(time.time()) # Add timestamp
+ sign_content = json.dumps(new_content, sort_keys=True)
+ sign = CryptBitcoin.sign(sign_content, privatekey)
+ # new_content["signs"] = content.get("signs", {}) # TODO: Multisig
+ if sign: # If signing is successful (not an old address)
+ new_content["signs"] = old_signs_content or {}
+ new_content["signs"][privatekey_address] = sign
+
+ self.verifyContent(inner_path, new_content)
+
+ if filewrite:
+ self.log.info("Saving to %s..." % inner_path)
+ self.site.storage.writeJson(inner_path, new_content)
+ self.contents[inner_path] = new_content
+
+ self.log.info("File %s signed!" % inner_path)
+
+ if filewrite: # Written to file
+ return True
+ else: # Return the new content
+ return new_content
+
+ # The valid signers of content.json file
+ # Return: ["1KRxE1s3oDyNDawuYWpzbLUwNm8oDbeEp6", "13ReyhCsjhpuCVahn1DHdf6eMqqEVev162"]
+ def getValidSigners(self, inner_path, content=None):
+ valid_signers = []
+ if inner_path == "content.json": # Root content.json
+ if "content.json" in self.contents and "signers" in self.contents["content.json"]:
+ valid_signers += self.contents["content.json"]["signers"][:]
+ else:
+ rules = self.getRules(inner_path, content)
+ if rules and "signers" in rules:
+ valid_signers += rules["signers"]
+
+ if self.site.address not in valid_signers:
+ valid_signers.append(self.site.address) # Site address always valid
+ return valid_signers
+
+ # Return: The required number of valid signs for the content.json
+ def getSignsRequired(self, inner_path, content=None):
+ if not content:
+ return 1
+ return content.get("signs_required", 1)
+
+ def verifyCertSign(self, user_address, user_auth_type, user_name, issuer_address, sign):
+ from Crypt import CryptBitcoin
+ cert_subject = "%s#%s/%s" % (user_address, user_auth_type, user_name)
+ return CryptBitcoin.verify(cert_subject, issuer_address, sign)
+
+ def verifyCert(self, inner_path, content):
+ rules = self.getRules(inner_path, content)
+
+ if not rules:
+ raise VerifyError("No rules for this file")
+
+ if not rules.get("cert_signers") and not rules.get("cert_signers_pattern"):
+ return True # Does not need cert
+
+ if "cert_user_id" not in content:
+ raise VerifyError("Missing cert_user_id")
+
+ if content["cert_user_id"].count("@") != 1:
+ raise VerifyError("Invalid domain in cert_user_id")
+
+ name, domain = content["cert_user_id"].rsplit("@", 1)
+ cert_address = rules["cert_signers"].get(domain)
+ if not cert_address: # Unknown Cert signer
+ if rules.get("cert_signers_pattern") and SafeRe.match(rules["cert_signers_pattern"], domain):
+ cert_address = domain
+ else:
+ raise VerifyError("Invalid cert signer: %s" % domain)
+
+ return self.verifyCertSign(rules["user_address"], content["cert_auth_type"], name, cert_address, content["cert_sign"])
+
+ # Checks if the content.json content is valid
+ # Return: True or False
+ def verifyContent(self, inner_path, content):
+ content_size = len(json.dumps(content, indent=1)) + sum([file["size"] for file in list(content["files"].values()) if file["size"] >= 0]) # Size of new content
+ # Calculate old content size
+ old_content = self.contents.get(inner_path)
+ if old_content:
+ old_content_size = len(json.dumps(old_content, indent=1)) + sum([file["size"] for file in list(old_content.get("files", {}).values())])
+ old_content_size_optional = sum([file["size"] for file in list(old_content.get("files_optional", {}).values())])
+ else:
+ old_content_size = 0
+ old_content_size_optional = 0
+
+ # Reset site site on first content.json
+ if not old_content and inner_path == "content.json":
+ self.site.settings["size"] = 0
+
+ content_size_optional = sum([file["size"] for file in list(content.get("files_optional", {}).values()) if file["size"] >= 0])
+ site_size = self.site.settings["size"] - old_content_size + content_size # Site size without old content plus the new
+ site_size_optional = self.site.settings["size_optional"] - old_content_size_optional + content_size_optional # Site size without old content plus the new
+
+ site_size_limit = self.site.getSizeLimit() * 1024 * 1024
+
+ # Check site address
+ if content.get("address") and content["address"] != self.site.address:
+ raise VerifyError("Wrong site address: %s != %s" % (content["address"], self.site.address))
+
+ # Check file inner path
+ if content.get("inner_path") and content["inner_path"] != inner_path:
+ raise VerifyError("Wrong inner_path: %s" % content["inner_path"])
+
+ # If our content.json file bigger than the size limit throw error
+ if inner_path == "content.json":
+ content_size_file = len(json.dumps(content, indent=1))
+ if content_size_file > site_size_limit:
+ # Save site size to display warning
+ self.site.settings["size"] = site_size
+ task = self.site.worker_manager.tasks.findTask(inner_path)
+ if task: # Dont try to download from other peers
+ self.site.worker_manager.failTask(task)
+ raise VerifyError("Content too large %s B > %s B, aborting task..." % (site_size, site_size_limit))
+
+ # Verify valid filenames
+ for file_relative_path in list(content.get("files", {}).keys()) + list(content.get("files_optional", {}).keys()):
+ if not self.isValidRelativePath(file_relative_path):
+ raise VerifyError("Invalid relative path: %s" % file_relative_path)
+
+ if inner_path == "content.json":
+ self.site.settings["size"] = site_size
+ self.site.settings["size_optional"] = site_size_optional
+ return True # Root content.json is passed
+ else:
+ if self.verifyContentInclude(inner_path, content, content_size, content_size_optional):
+ self.site.settings["size"] = site_size
+ self.site.settings["size_optional"] = site_size_optional
+ return True
+ else:
+ raise VerifyError("Content verify error")
+
+ def verifyContentInclude(self, inner_path, content, content_size, content_size_optional):
+ # Load include details
+ rules = self.getRules(inner_path, content)
+ if not rules:
+ raise VerifyError("No rules")
+
+ # Check include size limit
+ if rules.get("max_size") is not None: # Include size limit
+ if content_size > rules["max_size"]:
+ raise VerifyError("Include too large %sB > %sB" % (content_size, rules["max_size"]))
+
+ if rules.get("max_size_optional") is not None: # Include optional files limit
+ if content_size_optional > rules["max_size_optional"]:
+ raise VerifyError("Include optional files too large %sB > %sB" % (
+ content_size_optional, rules["max_size_optional"])
+ )
+
+ # Filename limit
+ if rules.get("files_allowed"):
+ for file_inner_path in list(content["files"].keys()):
+ if not SafeRe.match(r"^%s$" % rules["files_allowed"], file_inner_path):
+ raise VerifyError("File not allowed: %s" % file_inner_path)
+
+ if rules.get("files_allowed_optional"):
+ for file_inner_path in list(content.get("files_optional", {}).keys()):
+ if not SafeRe.match(r"^%s$" % rules["files_allowed_optional"], file_inner_path):
+ raise VerifyError("Optional file not allowed: %s" % file_inner_path)
+
+ # Check if content includes allowed
+ if rules.get("includes_allowed") is False and content.get("includes"):
+ raise VerifyError("Includes not allowed")
+
+ return True # All good
+
+ # Verify file validity
+ # Return: None = Same as before, False = Invalid, True = Valid
+ def verifyFile(self, inner_path, file, ignore_same=True):
+ if inner_path.endswith("content.json"): # content.json: Check using sign
+ from Crypt import CryptBitcoin
+ try:
+ if type(file) is dict:
+ new_content = file
+ else:
+ try:
+ if sys.version_info.major == 3 and sys.version_info.minor < 6:
+ new_content = json.loads(file.read().decode("utf8"))
+ else:
+ new_content = json.load(file)
+ except Exception as err:
+ raise VerifyError("Invalid json file: %s" % err)
+ if inner_path in self.contents:
+ old_content = self.contents.get(inner_path, {"modified": 0})
+ # Checks if its newer the ours
+ if old_content["modified"] == new_content["modified"] and ignore_same: # Ignore, have the same content.json
+ return None
+ elif old_content["modified"] > new_content["modified"]: # We have newer
+ raise VerifyError(
+ "We have newer (Our: %s, Sent: %s)" %
+ (old_content["modified"], new_content["modified"])
+ )
+ if new_content["modified"] > time.time() + 60 * 60 * 24: # Content modified in the far future (allow 1 day+)
+ raise VerifyError("Modify timestamp is in the far future!")
+ if self.isArchived(inner_path, new_content["modified"]):
+ if inner_path in self.site.bad_files:
+ del self.site.bad_files[inner_path]
+ raise VerifyError("This file is archived!")
+ # Check sign
+ sign = new_content.get("sign")
+ signs = new_content.get("signs", {})
+ if "sign" in new_content:
+ del(new_content["sign"]) # The file signed without the sign
+ if "signs" in new_content:
+ del(new_content["signs"]) # The file signed without the signs
+
+ sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitepsace
+
+ # Fix float representation error on Android
+ modified = new_content["modified"]
+ if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
+ modified_fixed = "{:.6f}".format(modified).strip("0.")
+ sign_content = sign_content.replace(
+ '"modified": %s' % repr(modified),
+ '"modified": %s' % modified_fixed
+ )
+
+ if signs: # New style signing
+ valid_signers = self.getValidSigners(inner_path, new_content)
+ signs_required = self.getSignsRequired(inner_path, new_content)
+
+ if inner_path == "content.json" and len(valid_signers) > 1: # Check signers_sign on root content.json
+ signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
+ if not CryptBitcoin.verify(signers_data, self.site.address, new_content["signers_sign"]):
+ raise VerifyError("Invalid signers_sign!")
+
+ if inner_path != "content.json" and not self.verifyCert(inner_path, new_content): # Check if cert valid
+ raise VerifyError("Invalid cert!")
+
+ valid_signs = []
+ for address in valid_signers:
+ if address in signs:
+ result = CryptBitcoin.verify(sign_content, address, signs[address])
+ if result:
+ valid_signs.append(address)
+ if len(valid_signs) >= signs_required:
+ break # Break if we has enough signs
+ if len(valid_signs) < signs_required:
+ raise VerifyError("Valid signs: %s/%s, Valid Signers : %s" % (len(valid_signs), signs_required, valid_signs))
+ else:
+ return self.verifyContent(inner_path, new_content)
+ else: # Old style signing
+ raise VerifyError("Invalid old-style sign")
+
+ except Exception as err:
+ self.log.warning("%s: verify sign error: %s" % (inner_path, Debug.formatException(err)))
+ raise err
+
+ else: # Check using sha512 hash
+ file_info = self.getFileInfo(inner_path)
+ if file_info:
+ if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
+ raise VerifyError("Invalid hash")
+
+ if file_info.get("size", 0) != file.tell():
+ raise VerifyError(
+ "File size does not match %s <> %s" %
+ (inner_path, file.tell(), file_info.get("size", 0))
+ )
+
+ return True
+
+ else: # File not in content.json
+ raise VerifyError("File not in content.json")
+
+ def optionalDelete(self, inner_path):
+ self.site.storage.delete(inner_path)
+
+ def optionalDownloaded(self, inner_path, hash_id, size=None, own=False):
+ if size is None:
+ size = self.site.storage.getSize(inner_path)
+
+ done = self.hashfield.appendHashId(hash_id)
+ self.site.settings["optional_downloaded"] += size
+ return done
+
+ def optionalRemoved(self, inner_path, hash_id, size=None):
+ if size is None:
+ size = self.site.storage.getSize(inner_path)
+ done = self.hashfield.removeHashId(hash_id)
+
+ self.site.settings["optional_downloaded"] -= size
+ return done
+
+ def optionalRenamed(self, inner_path_old, inner_path_new):
+ return True
diff --git a/src/Content/__init__.py b/src/Content/__init__.py
new file mode 100644
index 00000000..fbbd39f4
--- /dev/null
+++ b/src/Content/__init__.py
@@ -0,0 +1 @@
+from .ContentManager import ContentManager
\ No newline at end of file
diff --git a/src/Crypt/Crypt.py b/src/Crypt/Crypt.py
new file mode 100644
index 00000000..7d7d3659
--- /dev/null
+++ b/src/Crypt/Crypt.py
@@ -0,0 +1,4 @@
+from Config import config
+from util import ThreadPool
+
+thread_pool_crypt = ThreadPool.ThreadPool(config.threads_crypt)
\ No newline at end of file
diff --git a/src/Crypt/CryptBitcoin.py b/src/Crypt/CryptBitcoin.py
new file mode 100644
index 00000000..68b2caa2
--- /dev/null
+++ b/src/Crypt/CryptBitcoin.py
@@ -0,0 +1,101 @@
+import logging
+import base64
+import binascii
+import time
+import hashlib
+
+from util.Electrum import dbl_format
+from Config import config
+
+import util.OpensslFindPatch
+
+lib_verify_best = "sslcrypto"
+
+from lib import sslcrypto
+sslcurve_native = sslcrypto.ecc.get_curve("secp256k1")
+sslcurve_fallback = sslcrypto.fallback.ecc.get_curve("secp256k1")
+sslcurve = sslcurve_native
+
+def loadLib(lib_name, silent=False):
+ global sslcurve, libsecp256k1message, lib_verify_best
+ if lib_name == "libsecp256k1":
+ s = time.time()
+ from lib import libsecp256k1message
+ import coincurve
+ lib_verify_best = "libsecp256k1"
+ if not silent:
+ logging.info(
+ "Libsecpk256k1 loaded: %s in %.3fs" %
+ (type(coincurve._libsecp256k1.lib).__name__, time.time() - s)
+ )
+ elif lib_name == "sslcrypto":
+ sslcurve = sslcurve_native
+ if sslcurve_native == sslcurve_fallback:
+ logging.warning("SSLCurve fallback loaded instead of native")
+ elif lib_name == "sslcrypto_fallback":
+ sslcurve = sslcurve_fallback
+
+try:
+ if not config.use_libsecp256k1:
+ raise Exception("Disabled by config")
+ loadLib("libsecp256k1")
+ lib_verify_best = "libsecp256k1"
+except Exception as err:
+ logging.info("Libsecp256k1 load failed: %s" % err)
+
+
+def newPrivatekey(): # Return new private key
+ return sslcurve.private_to_wif(sslcurve.new_private_key()).decode()
+
+
+def newSeed():
+ return binascii.hexlify(sslcurve.new_private_key()).decode()
+
+
+def hdPrivatekey(seed, child):
+ # Too large child id could cause problems
+ privatekey_bin = sslcurve.derive_child(seed.encode(), child % 100000000)
+ return sslcurve.private_to_wif(privatekey_bin).decode()
+
+
+def privatekeyToAddress(privatekey): # Return address from private key
+ try:
+ if len(privatekey) == 64:
+ privatekey_bin = bytes.fromhex(privatekey)
+ else:
+ privatekey_bin = sslcurve.wif_to_private(privatekey.encode())
+ return sslcurve.private_to_address(privatekey_bin).decode()
+ except Exception: # Invalid privatekey
+ return False
+
+
+def sign(data, privatekey): # Return sign to data using private key
+ if privatekey.startswith("23") and len(privatekey) > 52:
+ return None # Old style private key not supported
+ return base64.b64encode(sslcurve.sign(
+ data.encode(),
+ sslcurve.wif_to_private(privatekey.encode()),
+ recoverable=True,
+ hash=dbl_format
+ )).decode()
+
+
+def verify(data, valid_address, sign, lib_verify=None): # Verify data using address and sign
+ if not lib_verify:
+ lib_verify = lib_verify_best
+
+ if not sign:
+ return False
+
+ if lib_verify == "libsecp256k1":
+ sign_address = libsecp256k1message.recover_address(data.encode("utf8"), sign).decode("utf8")
+ elif lib_verify in ("sslcrypto", "sslcrypto_fallback"):
+ publickey = sslcurve.recover(base64.b64decode(sign), data.encode(), hash=dbl_format)
+ sign_address = sslcurve.public_to_address(publickey).decode()
+ else:
+ raise Exception("No library enabled for signature verification")
+
+ if type(valid_address) is list: # Any address in the list
+ return sign_address in valid_address
+ else: # One possible address
+ return sign_address == valid_address
diff --git a/CryptConnection.py b/src/Crypt/CryptConnection.py
similarity index 86%
rename from CryptConnection.py
rename to src/Crypt/CryptConnection.py
index f3c6b7b6..c0903e84 100644
--- a/CryptConnection.py
+++ b/src/Crypt/CryptConnection.py
@@ -11,13 +11,12 @@ from util import helper
class CryptConnectionManager:
def __init__(self):
- this_file = os.path.abspath(__file__).replace("\\", "/").rstrip("cd")
- if sys.platform.startswith("win"):
+ if config.openssl_bin_file:
+ self.openssl_bin = config.openssl_bin_file
+ elif sys.platform.startswith("win"):
self.openssl_bin = "tools\\openssl\\openssl.exe"
elif config.dist_type.startswith("bundle_linux"):
self.openssl_bin = "../runtime/bin/openssl"
- elif "in.canews.zeronet" in this_file:
- self.openssl_bin = "../usr/bin/openssl"
else:
self.openssl_bin = "openssl"
@@ -91,17 +90,13 @@ class CryptConnectionManager:
def wrapSocket(self, sock, crypt, server=False, cert_pin=None):
if crypt == "tls-rsa":
if server:
- sock_wrapped = self.context_server.wrap_socket(
- sock, server_side=True)
+ sock_wrapped = self.context_server.wrap_socket(sock, server_side=True)
else:
- sock_wrapped = self.context_client.wrap_socket(
- sock, server_hostname=random.choice(self.fakedomains))
+ sock_wrapped = self.context_client.wrap_socket(sock, server_hostname=random.choice(self.fakedomains))
if cert_pin:
- cert_hash = hashlib.sha256(
- sock_wrapped.getpeercert(True)).hexdigest()
+ cert_hash = hashlib.sha256(sock_wrapped.getpeercert(True)).hexdigest()
if cert_hash != cert_pin:
- raise Exception(
- "Socket certificate does not match (%s != %s)" % (cert_hash, cert_pin))
+ raise Exception("Socket certificate does not match (%s != %s)" % (cert_hash, cert_pin))
return sock_wrapped
else:
return sock
@@ -132,6 +127,10 @@ class CryptConnectionManager:
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
]
self.openssl_env['CN'] = random.choice(self.fakedomains)
+ environ = os.environ
+ environ['OPENSSL_CONF'] = self.openssl_env['OPENSSL_CONF']
+ environ['RANDFILE'] = self.openssl_env['RANDFILE']
+ environ['CN'] = self.openssl_env['CN']
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
self.createSslContexts()
@@ -141,8 +140,7 @@ class CryptConnectionManager:
# Replace variables in config template
conf_template = open(self.openssl_conf_template).read()
- conf_template = conf_template.replace(
- "$ENV::CN", self.openssl_env['CN'])
+ conf_template = conf_template.replace("$ENV::CN", self.openssl_env['CN'])
open(self.openssl_conf, "w").write(conf_template)
# Generate CAcert and CAkey
@@ -158,14 +156,13 @@ class CryptConnectionManager:
self.log.debug("Running: %s" % cmd)
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
if not (os.path.isfile(self.cacert_pem) and os.path.isfile(self.cakey_pem)):
- self.log.error(
- "RSA ECC SSL CAcert generation failed, CAcert or CAkey files not exist. (%s)" % back)
+ self.log.error("RSA ECC SSL CAcert generation failed, CAcert or CAkey files not exist. (%s)" % back)
return False
else:
self.log.debug("Result: %s" % back)
@@ -182,7 +179,7 @@ class CryptConnectionManager:
self.log.debug("Generating certificate key and signing request...")
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
@@ -201,7 +198,7 @@ class CryptConnectionManager:
self.log.debug("Generating RSA cert...")
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
@@ -218,8 +215,7 @@ class CryptConnectionManager:
return True
else:
- self.log.error(
- "RSA ECC SSL cert generation failed, cert or key files not exist.")
+ self.log.error("RSA ECC SSL cert generation failed, cert or key files not exist.")
manager = CryptConnectionManager()
diff --git a/src/Crypt/CryptHash.py b/src/Crypt/CryptHash.py
new file mode 100644
index 00000000..f5901fb8
--- /dev/null
+++ b/src/Crypt/CryptHash.py
@@ -0,0 +1,56 @@
+import hashlib
+import os
+import base64
+
+
+def sha512sum(file, blocksize=65536, format="hexdigest"):
+ if type(file) is str: # Filename specified
+ file = open(file, "rb")
+ hash = hashlib.sha512()
+ for block in iter(lambda: file.read(blocksize), b""):
+ hash.update(block)
+
+ # Truncate to 256bits is good enough
+ if format == "hexdigest":
+ return hash.hexdigest()[0:64]
+ else:
+ return hash.digest()[0:32]
+
+
+def sha256sum(file, blocksize=65536):
+ if type(file) is str: # Filename specified
+ file = open(file, "rb")
+ hash = hashlib.sha256()
+ for block in iter(lambda: file.read(blocksize), b""):
+ hash.update(block)
+ return hash.hexdigest()
+
+
+def random(length=64, encoding="hex"):
+ if encoding == "base64": # Characters: A-Za-z0-9
+ hash = hashlib.sha512(os.urandom(256)).digest()
+ return base64.b64encode(hash).decode("ascii").replace("+", "").replace("/", "").replace("=", "")[0:length]
+ else: # Characters: a-f0-9 (faster)
+ return hashlib.sha512(os.urandom(256)).hexdigest()[0:length]
+
+
+# Sha512 truncated to 256bits
+class Sha512t:
+ def __init__(self, data):
+ if data:
+ self.sha512 = hashlib.sha512(data)
+ else:
+ self.sha512 = hashlib.sha512()
+
+ def hexdigest(self):
+ return self.sha512.hexdigest()[0:64]
+
+ def digest(self):
+ return self.sha512.digest()[0:32]
+
+ def update(self, data):
+ return self.sha512.update(data)
+
+
+def sha512t(data=None):
+ return Sha512t(data)
diff --git a/src/Crypt/CryptTor.py b/src/Crypt/CryptTor.py
new file mode 100644
index 00000000..78ba6fc2
--- /dev/null
+++ b/src/Crypt/CryptTor.py
@@ -0,0 +1,85 @@
+import base64
+import hashlib
+
+def sign(data, privatekey):
+ import rsa
+ from rsa import pkcs1
+ from lib import Ed25519
+
+ ## Onion Service V3
+ if len(privatekey) == 88:
+ prv_key = base64.b64decode(privatekey)
+ pub_key = Ed25519.publickey_unsafe(prv_key)
+ sign = Ed25519.signature_unsafe(data, prv_key, pub_key)
+
+ return sign
+
+ ## Onion Service V2
+ if "BEGIN RSA PRIVATE KEY" not in privatekey:
+ privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
+
+ priv = rsa.PrivateKey.load_pkcs1(privatekey)
+ sign = rsa.pkcs1.sign(data, priv, 'SHA-256')
+ return sign
+
+def verify(data, publickey, sign):
+ import rsa
+ from rsa import pkcs1
+ from lib import Ed25519
+
+ ## Onion Service V3
+ if len(publickey) == 32:
+
+ try:
+ valid = Ed25519.checkvalid(sign, data, publickey)
+ valid = 'SHA-256'
+
+ except Exception as err:
+ print(err)
+ valid = False
+
+ return valid
+
+ ## Onion Service V2
+ pub = rsa.PublicKey.load_pkcs1(publickey, format="DER")
+
+ try:
+ valid = rsa.pkcs1.verify(data, sign, pub)
+
+ except pkcs1.VerificationError:
+ valid = False
+
+ return valid
+
+def privatekeyToPublickey(privatekey):
+ import rsa
+ from rsa import pkcs1
+ from lib import Ed25519
+
+ ## Onion Service V3
+ if len(privatekey) == 88:
+ prv_key = base64.b64decode(privatekey)
+ pub_key = Ed25519.publickey_unsafe(prv_key)
+
+ return pub_key
+
+ ## Onion Service V2
+ if "BEGIN RSA PRIVATE KEY" not in privatekey:
+ privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
+
+ priv = rsa.PrivateKey.load_pkcs1(privatekey)
+ pub = rsa.PublicKey(priv.n, priv.e)
+
+ return pub.save_pkcs1("DER")
+
+def publickeyToOnion(publickey):
+ from lib import Ed25519
+
+ ## Onion Service V3
+ if len(publickey) == 32:
+ addr = Ed25519.publickey_to_onionaddress(publickey)[:-6]
+
+ return addr
+
+ ## Onion Service V2
+ return base64.b32encode(hashlib.sha1(publickey).digest()[:10]).lower().decode("ascii")
diff --git a/src/Crypt/__init__.py b/src/Crypt/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/Db/Db.py b/src/Db/Db.py
new file mode 100644
index 00000000..d1d9ce15
--- /dev/null
+++ b/src/Db/Db.py
@@ -0,0 +1,519 @@
+import sqlite3
+import json
+import time
+import logging
+import re
+import os
+import atexit
+import threading
+import sys
+import weakref
+import errno
+
+import gevent
+
+from Debug import Debug
+from .DbCursor import DbCursor
+from util import SafeRe
+from util import helper
+from util import ThreadPool
+from Config import config
+
+thread_pool_db = ThreadPool.ThreadPool(config.threads_db)
+
+next_db_id = 0
+opened_dbs = []
+
+
+# Close idle databases to save some memory
+def dbCleanup():
+ while 1:
+ time.sleep(60 * 5)
+ for db in opened_dbs[:]:
+ idle = time.time() - db.last_query_time
+ if idle > 60 * 5 and db.close_idle:
+ db.close("Cleanup")
+
+
+def dbCommitCheck():
+ while 1:
+ time.sleep(5)
+ for db in opened_dbs[:]:
+ if not db.need_commit:
+ continue
+
+ success = db.commit("Interval")
+ if success:
+ db.need_commit = False
+ time.sleep(0.1)
+
+
+def dbCloseAll():
+ for db in opened_dbs[:]:
+ db.close("Close all")
+
+
+gevent.spawn(dbCleanup)
+gevent.spawn(dbCommitCheck)
+atexit.register(dbCloseAll)
+
+
+class DbTableError(Exception):
+ def __init__(self, message, table):
+ super().__init__(message)
+ self.table = table
+
+
+class Db(object):
+
+ def __init__(self, schema, db_path, close_idle=False):
+ global next_db_id
+ self.db_path = db_path
+ self.db_dir = os.path.dirname(db_path) + "/"
+ self.schema = schema
+ self.schema["version"] = self.schema.get("version", 1)
+ self.conn = None
+ self.cur = None
+ self.cursors = weakref.WeakSet()
+ self.id = next_db_id
+ next_db_id += 1
+ self.progress_sleeping = False
+ self.commiting = False
+ self.log = logging.getLogger("Db#%s:%s" % (self.id, schema["db_name"]))
+ self.table_names = None
+ self.collect_stats = False
+ self.foreign_keys = False
+ self.need_commit = False
+ self.query_stats = {}
+ self.db_keyvalues = {}
+ self.delayed_queue = []
+ self.delayed_queue_thread = None
+ self.close_idle = close_idle
+ self.last_query_time = time.time()
+ self.last_sleep_time = time.time()
+ self.num_execute_since_sleep = 0
+ self.lock = ThreadPool.Lock()
+ self.connect_lock = ThreadPool.Lock()
+
+ def __repr__(self):
+ return "" % (id(self), self.db_path, self.close_idle)
+
+ def connect(self):
+ self.connect_lock.acquire(True)
+ try:
+ if self.conn:
+ self.log.debug("Already connected, connection ignored")
+ return
+
+ if self not in opened_dbs:
+ opened_dbs.append(self)
+ s = time.time()
+ try: # Directory not exist yet
+ os.makedirs(self.db_dir)
+ self.log.debug("Created Db path: %s" % self.db_dir)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ raise err
+ if not os.path.isfile(self.db_path):
+ self.log.debug("Db file not exist yet: %s" % self.db_path)
+ self.conn = sqlite3.connect(self.db_path, isolation_level="DEFERRED", check_same_thread=False)
+ self.conn.row_factory = sqlite3.Row
+ self.conn.set_progress_handler(self.progress, 5000000)
+ self.conn.execute('PRAGMA journal_mode=WAL')
+ if self.foreign_keys:
+ self.conn.execute("PRAGMA foreign_keys = ON")
+ self.cur = self.getCursor()
+
+ self.log.debug(
+ "Connected to %s in %.3fs (opened: %s, sqlite version: %s)..." %
+ (self.db_path, time.time() - s, len(opened_dbs), sqlite3.version)
+ )
+ self.log.debug("Connect by thread: %s" % threading.current_thread().ident)
+ self.log.debug("Connect called by %s" % Debug.formatStack())
+ finally:
+ self.connect_lock.release()
+
+ def getConn(self):
+ if not self.conn:
+ self.connect()
+ return self.conn
+
+ def progress(self, *args, **kwargs):
+ self.progress_sleeping = True
+ time.sleep(0.001)
+ self.progress_sleeping = False
+
+ # Execute query using dbcursor
+ def execute(self, query, params=None):
+ if not self.conn:
+ self.connect()
+ return self.cur.execute(query, params)
+
+ @thread_pool_db.wrap
+ def commit(self, reason="Unknown"):
+ if self.progress_sleeping:
+ self.log.debug("Commit ignored: Progress sleeping")
+ return False
+
+ if not self.conn:
+ self.log.debug("Commit ignored: No connection")
+ return False
+
+ if self.commiting:
+ self.log.debug("Commit ignored: Already commiting")
+ return False
+
+ try:
+ s = time.time()
+ self.commiting = True
+ self.conn.commit()
+ self.log.debug("Commited in %.3fs (reason: %s)" % (time.time() - s, reason))
+ return True
+ except Exception as err:
+ if "SQL statements in progress" in str(err):
+ self.log.warning("Commit delayed: %s (reason: %s)" % (Debug.formatException(err), reason))
+ else:
+ self.log.error("Commit error: %s (reason: %s)" % (Debug.formatException(err), reason))
+ return False
+ finally:
+ self.commiting = False
+
+ def insertOrUpdate(self, *args, **kwargs):
+ if not self.conn:
+ self.connect()
+ return self.cur.insertOrUpdate(*args, **kwargs)
+
+ def executeDelayed(self, *args, **kwargs):
+ if not self.delayed_queue_thread:
+ self.delayed_queue_thread = gevent.spawn_later(1, self.processDelayed)
+ self.delayed_queue.append(("execute", (args, kwargs)))
+
+ def insertOrUpdateDelayed(self, *args, **kwargs):
+ if not self.delayed_queue:
+ gevent.spawn_later(1, self.processDelayed)
+ self.delayed_queue.append(("insertOrUpdate", (args, kwargs)))
+
+ def processDelayed(self):
+ if not self.delayed_queue:
+ self.log.debug("processDelayed aborted")
+ return
+ if not self.conn:
+ self.connect()
+
+ s = time.time()
+ cur = self.getCursor()
+ for command, params in self.delayed_queue:
+ if command == "insertOrUpdate":
+ cur.insertOrUpdate(*params[0], **params[1])
+ else:
+ cur.execute(*params[0], **params[1])
+
+ if len(self.delayed_queue) > 10:
+ self.log.debug("Processed %s delayed queue in %.3fs" % (len(self.delayed_queue), time.time() - s))
+ self.delayed_queue = []
+ self.delayed_queue_thread = None
+
+ def close(self, reason="Unknown"):
+ if not self.conn:
+ return False
+ self.connect_lock.acquire()
+ s = time.time()
+ if self.delayed_queue:
+ self.processDelayed()
+ if self in opened_dbs:
+ opened_dbs.remove(self)
+ self.need_commit = False
+ self.commit("Closing: %s" % reason)
+ self.log.debug("Close called by %s" % Debug.formatStack())
+ for i in range(5):
+ if len(self.cursors) == 0:
+ break
+ self.log.debug("Pending cursors: %s" % len(self.cursors))
+ time.sleep(0.1 * i)
+ if len(self.cursors):
+ self.log.debug("Killing cursors: %s" % len(self.cursors))
+ self.conn.interrupt()
+
+ if self.cur:
+ self.cur.close()
+ if self.conn:
+ ThreadPool.main_loop.call(self.conn.close)
+ self.conn = None
+ self.cur = None
+ self.log.debug("%s closed (reason: %s) in %.3fs, opened: %s" % (self.db_path, reason, time.time() - s, len(opened_dbs)))
+ self.connect_lock.release()
+ return True
+
+ # Gets a cursor object to database
+ # Return: Cursor class
+ def getCursor(self):
+ if not self.conn:
+ self.connect()
+
+ cur = DbCursor(self)
+ return cur
+
+ def getSharedCursor(self):
+ if not self.conn:
+ self.connect()
+ return self.cur
+
+ # Get the table version
+ # Return: Table version or None if not exist
+ def getTableVersion(self, table_name):
+ if not self.db_keyvalues: # Get db keyvalues
+ try:
+ res = self.execute("SELECT * FROM keyvalue WHERE json_id=0") # json_id = 0 is internal keyvalues
+ except sqlite3.OperationalError as err: # Table not exist
+ self.log.debug("Query table version error: %s" % err)
+ return False
+
+ for row in res:
+ self.db_keyvalues[row["key"]] = row["value"]
+
+ return self.db_keyvalues.get("table.%s.version" % table_name, 0)
+
+ # Check Db tables
+ # Return: Changed table names
+ def checkTables(self):
+ s = time.time()
+ changed_tables = []
+
+ cur = self.getSharedCursor()
+
+ # Check internal tables
+ # Check keyvalue table
+ changed = cur.needTable("keyvalue", [
+ ["keyvalue_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
+ ["key", "TEXT"],
+ ["value", "INTEGER"],
+ ["json_id", "INTEGER"],
+ ], [
+ "CREATE UNIQUE INDEX key_id ON keyvalue(json_id, key)"
+ ], version=self.schema["version"])
+ if changed:
+ changed_tables.append("keyvalue")
+
+ # Create json table if no custom one defined
+ if "json" not in self.schema.get("tables", {}):
+ if self.schema["version"] == 1:
+ changed = cur.needTable("json", [
+ ["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
+ ["path", "VARCHAR(255)"]
+ ], [
+ "CREATE UNIQUE INDEX path ON json(path)"
+ ], version=self.schema["version"])
+ elif self.schema["version"] == 2:
+ changed = cur.needTable("json", [
+ ["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
+ ["directory", "VARCHAR(255)"],
+ ["file_name", "VARCHAR(255)"]
+ ], [
+ "CREATE UNIQUE INDEX path ON json(directory, file_name)"
+ ], version=self.schema["version"])
+ elif self.schema["version"] == 3:
+ changed = cur.needTable("json", [
+ ["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
+ ["site", "VARCHAR(255)"],
+ ["directory", "VARCHAR(255)"],
+ ["file_name", "VARCHAR(255)"]
+ ], [
+ "CREATE UNIQUE INDEX path ON json(directory, site, file_name)"
+ ], version=self.schema["version"])
+ if changed:
+ changed_tables.append("json")
+
+ # Check schema tables
+ for table_name, table_settings in self.schema.get("tables", {}).items():
+ try:
+ indexes = table_settings.get("indexes", [])
+ version = table_settings.get("schema_changed", 0)
+ changed = cur.needTable(
+ table_name, table_settings["cols"],
+ indexes, version=version
+ )
+ if changed:
+ changed_tables.append(table_name)
+ except Exception as err:
+ self.log.error("Error creating table %s: %s" % (table_name, Debug.formatException(err)))
+ raise DbTableError(err, table_name)
+
+ self.log.debug("Db check done in %.3fs, changed tables: %s" % (time.time() - s, changed_tables))
+ if changed_tables:
+ self.db_keyvalues = {} # Refresh table version cache
+
+ return changed_tables
+
+ # Update json file to db
+ # Return: True if matched
+ def updateJson(self, file_path, file=None, cur=None):
+ if not file_path.startswith(self.db_dir):
+ return False # Not from the db dir: Skipping
+ relative_path = file_path[len(self.db_dir):] # File path realative to db file
+
+ # Check if filename matches any of mappings in schema
+ matched_maps = []
+ for match, map_settings in self.schema["maps"].items():
+ try:
+ if SafeRe.match(match, relative_path):
+ matched_maps.append(map_settings)
+ except SafeRe.UnsafePatternError as err:
+ self.log.error(err)
+
+ # No match found for the file
+ if not matched_maps:
+ return False
+
+ # Load the json file
+ try:
+ if file is None: # Open file is not file object passed
+ file = open(file_path, "rb")
+
+ if file is False: # File deleted
+ data = {}
+ else:
+ if file_path.endswith("json.gz"):
+ file = helper.limitedGzipFile(fileobj=file)
+
+ if sys.version_info.major == 3 and sys.version_info.minor < 6:
+ data = json.loads(file.read().decode("utf8"))
+ else:
+ data = json.load(file)
+ except Exception as err:
+ self.log.debug("Json file %s load error: %s" % (file_path, err))
+ data = {}
+
+ # No cursor specificed
+ if not cur:
+ cur = self.getSharedCursor()
+ cur.logging = False
+
+ # Row for current json file if required
+ if not data or [dbmap for dbmap in matched_maps if "to_keyvalue" in dbmap or "to_table" in dbmap]:
+ json_row = cur.getJsonRow(relative_path)
+
+ # Check matched mappings in schema
+ for dbmap in matched_maps:
+ # Insert non-relational key values
+ if dbmap.get("to_keyvalue"):
+ # Get current values
+ res = cur.execute("SELECT * FROM keyvalue WHERE json_id = ?", (json_row["json_id"],))
+ current_keyvalue = {}
+ current_keyvalue_id = {}
+ for row in res:
+ current_keyvalue[row["key"]] = row["value"]
+ current_keyvalue_id[row["key"]] = row["keyvalue_id"]
+
+ for key in dbmap["to_keyvalue"]:
+ if key not in current_keyvalue: # Keyvalue not exist yet in the db
+ cur.execute(
+ "INSERT INTO keyvalue ?",
+ {"key": key, "value": data.get(key), "json_id": json_row["json_id"]}
+ )
+ elif data.get(key) != current_keyvalue[key]: # Keyvalue different value
+ cur.execute(
+ "UPDATE keyvalue SET value = ? WHERE keyvalue_id = ?",
+ (data.get(key), current_keyvalue_id[key])
+ )
+
+ # Insert data to json table for easier joins
+ if dbmap.get("to_json_table"):
+ directory, file_name = re.match("^(.*?)/*([^/]*)$", relative_path).groups()
+ data_json_row = dict(cur.getJsonRow(directory + "/" + dbmap.get("file_name", file_name)))
+ changed = False
+ for key in dbmap["to_json_table"]:
+ if data.get(key) != data_json_row.get(key):
+ changed = True
+ if changed:
+ # Add the custom col values
+ data_json_row.update({key: val for key, val in data.items() if key in dbmap["to_json_table"]})
+ cur.execute("INSERT OR REPLACE INTO json ?", data_json_row)
+
+ # Insert data to tables
+ for table_settings in dbmap.get("to_table", []):
+ if isinstance(table_settings, dict): # Custom settings
+ table_name = table_settings["table"] # Table name to insert datas
+ node = table_settings.get("node", table_name) # Node keyname in data json file
+ key_col = table_settings.get("key_col") # Map dict key as this col
+ val_col = table_settings.get("val_col") # Map dict value as this col
+ import_cols = table_settings.get("import_cols")
+ replaces = table_settings.get("replaces")
+ else: # Simple settings
+ table_name = table_settings
+ node = table_settings
+ key_col = None
+ val_col = None
+ import_cols = None
+ replaces = None
+
+ # Fill import cols from table cols
+ if not import_cols:
+ import_cols = set([item[0] for item in self.schema["tables"][table_name]["cols"]])
+
+ cur.execute("DELETE FROM %s WHERE json_id = ?" % table_name, (json_row["json_id"],))
+
+ if node not in data:
+ continue
+
+ if key_col: # Map as dict
+ for key, val in data[node].items():
+ if val_col: # Single value
+ cur.execute(
+ "INSERT OR REPLACE INTO %s ?" % table_name,
+ {key_col: key, val_col: val, "json_id": json_row["json_id"]}
+ )
+ else: # Multi value
+ if type(val) is dict: # Single row
+ row = val
+ if import_cols:
+ row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
+ row[key_col] = key
+ # Replace in value if necessary
+ if replaces:
+ for replace_key, replace in replaces.items():
+ if replace_key in row:
+ for replace_from, replace_to in replace.items():
+ row[replace_key] = row[replace_key].replace(replace_from, replace_to)
+
+ row["json_id"] = json_row["json_id"]
+ cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
+ elif type(val) is list: # Multi row
+ for row in val:
+ row[key_col] = key
+ row["json_id"] = json_row["json_id"]
+ cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
+ else: # Map as list
+ for row in data[node]:
+ row["json_id"] = json_row["json_id"]
+ if import_cols:
+ row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
+ cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
+
+ # Cleanup json row
+ if not data:
+ self.log.debug("Cleanup json row for %s" % file_path)
+ cur.execute("DELETE FROM json WHERE json_id = %s" % json_row["json_id"])
+
+ return True
+
+
+if __name__ == "__main__":
+ s = time.time()
+ console_log = logging.StreamHandler()
+ logging.getLogger('').setLevel(logging.DEBUG)
+ logging.getLogger('').addHandler(console_log)
+ console_log.setLevel(logging.DEBUG)
+ dbjson = Db(json.load(open("zerotalk.schema.json")), "data/users/zerotalk.db")
+ dbjson.collect_stats = True
+ dbjson.checkTables()
+ cur = dbjson.getCursor()
+ cur.logging = False
+ dbjson.updateJson("data/users/content.json", cur=cur)
+ for user_dir in os.listdir("data/users"):
+ if os.path.isdir("data/users/%s" % user_dir):
+ dbjson.updateJson("data/users/%s/data.json" % user_dir, cur=cur)
+ # print ".",
+ cur.logging = True
+ print("Done in %.3fs" % (time.time() - s))
+ for query, stats in sorted(dbjson.query_stats.items()):
+ print("-", query, stats)
diff --git a/src/Db/DbCursor.py b/src/Db/DbCursor.py
new file mode 100644
index 00000000..acb8846d
--- /dev/null
+++ b/src/Db/DbCursor.py
@@ -0,0 +1,246 @@
+import time
+import re
+from util import helper
+
+# Special sqlite cursor
+
+
+class DbCursor:
+
+ def __init__(self, db):
+ self.db = db
+ self.logging = False
+
+ def quoteValue(self, value):
+ if type(value) is int:
+ return str(value)
+ else:
+ return "'%s'" % value.replace("'", "''")
+
+ def parseQuery(self, query, params):
+ query_type = query.split(" ", 1)[0].upper()
+ if isinstance(params, dict) and "?" in query: # Make easier select and insert by allowing dict params
+ if query_type in ("SELECT", "DELETE", "UPDATE"):
+ # Convert param dict to SELECT * FROM table WHERE key = ? AND key2 = ? format
+ query_wheres = []
+ values = []
+ for key, value in params.items():
+ if type(value) is list:
+ if key.startswith("not__"):
+ field = key.replace("not__", "")
+ operator = "NOT IN"
+ else:
+ field = key
+ operator = "IN"
+ if len(value) > 100:
+ # Embed values in query to avoid "too many SQL variables" error
+ query_values = ",".join(map(helper.sqlquote, value))
+ else:
+ query_values = ",".join(["?"] * len(value))
+ values += value
+ query_wheres.append(
+ "%s %s (%s)" %
+ (field, operator, query_values)
+ )
+ else:
+ if key.startswith("not__"):
+ query_wheres.append(key.replace("not__", "") + " != ?")
+ elif key.endswith("__like"):
+ query_wheres.append(key.replace("__like", "") + " LIKE ?")
+ elif key.endswith(">"):
+ query_wheres.append(key.replace(">", "") + " > ?")
+ elif key.endswith("<"):
+ query_wheres.append(key.replace("<", "") + " < ?")
+ else:
+ query_wheres.append(key + " = ?")
+ values.append(value)
+ wheres = " AND ".join(query_wheres)
+ if wheres == "":
+ wheres = "1"
+ query = re.sub("(.*)[?]", "\\1 %s" % wheres, query) # Replace the last ?
+ params = values
+ else:
+ # Convert param dict to INSERT INTO table (key, key2) VALUES (?, ?) format
+ keys = ", ".join(params.keys())
+ values = ", ".join(['?' for key in params.keys()])
+ keysvalues = "(%s) VALUES (%s)" % (keys, values)
+ query = re.sub("(.*)[?]", "\\1%s" % keysvalues, query) # Replace the last ?
+ params = tuple(params.values())
+ elif isinstance(params, dict) and ":" in query:
+ new_params = dict()
+ values = []
+ for key, value in params.items():
+ if type(value) is list:
+ for idx, val in enumerate(value):
+ new_params[key + "__" + str(idx)] = val
+
+ new_names = [":" + key + "__" + str(idx) for idx in range(len(value))]
+ query = re.sub(r":" + re.escape(key) + r"([)\s]|$)", "(%s)%s" % (", ".join(new_names), r"\1"), query)
+ else:
+ new_params[key] = value
+
+ params = new_params
+ return query, params
+
+ def execute(self, query, params=None):
+ query = query.strip()
+ while self.db.progress_sleeping or self.db.commiting:
+ time.sleep(0.1)
+
+ self.db.last_query_time = time.time()
+
+ query, params = self.parseQuery(query, params)
+
+ cursor = self.db.getConn().cursor()
+ self.db.cursors.add(cursor)
+ if self.db.lock.locked():
+ self.db.log.debug("Locked for %.3fs" % (time.time() - self.db.lock.time_lock))
+
+ try:
+ s = time.time()
+ self.db.lock.acquire(True)
+ if query.upper().strip("; ") == "VACUUM":
+ self.db.commit("vacuum called")
+ if params:
+ res = cursor.execute(query, params)
+ else:
+ res = cursor.execute(query)
+ finally:
+ self.db.lock.release()
+
+ taken_query = time.time() - s
+ if self.logging or taken_query > 1:
+ if params: # Query has parameters
+ self.db.log.debug("Query: " + query + " " + str(params) + " (Done in %.4f)" % (time.time() - s))
+ else:
+ self.db.log.debug("Query: " + query + " (Done in %.4f)" % (time.time() - s))
+
+ # Log query stats
+ if self.db.collect_stats:
+ if query not in self.db.query_stats:
+ self.db.query_stats[query] = {"call": 0, "time": 0.0}
+ self.db.query_stats[query]["call"] += 1
+ self.db.query_stats[query]["time"] += time.time() - s
+
+ query_type = query.split(" ", 1)[0].upper()
+ is_update_query = query_type in ["UPDATE", "DELETE", "INSERT", "CREATE"]
+ if not self.db.need_commit and is_update_query:
+ self.db.need_commit = True
+
+ if is_update_query:
+ return cursor
+ else:
+ return res
+
+ def executemany(self, query, params):
+ while self.db.progress_sleeping or self.db.commiting:
+ time.sleep(0.1)
+
+ self.db.last_query_time = time.time()
+
+ s = time.time()
+ cursor = self.db.getConn().cursor()
+ self.db.cursors.add(cursor)
+
+ try:
+ self.db.lock.acquire(True)
+ cursor.executemany(query, params)
+ finally:
+ self.db.lock.release()
+
+ taken_query = time.time() - s
+ if self.logging or taken_query > 0.1:
+ self.db.log.debug("Execute many: %s (Done in %.4f)" % (query, taken_query))
+
+ self.db.need_commit = True
+
+ return cursor
+
+ # Creates on updates a database row without incrementing the rowid
+ def insertOrUpdate(self, table, query_sets, query_wheres, oninsert={}):
+ sql_sets = ["%s = :%s" % (key, key) for key in query_sets.keys()]
+ sql_wheres = ["%s = :%s" % (key, key) for key in query_wheres.keys()]
+
+ params = query_sets
+ params.update(query_wheres)
+ res = self.execute(
+ "UPDATE %s SET %s WHERE %s" % (table, ", ".join(sql_sets), " AND ".join(sql_wheres)),
+ params
+ )
+ if res.rowcount == 0:
+ params.update(oninsert) # Add insert-only fields
+ self.execute("INSERT INTO %s ?" % table, params)
+
+ # Create new table
+ # Return: True on success
+ def createTable(self, table, cols):
+ # TODO: Check current structure
+ self.execute("DROP TABLE IF EXISTS %s" % table)
+ col_definitions = []
+ for col_name, col_type in cols:
+ col_definitions.append("%s %s" % (col_name, col_type))
+
+ self.execute("CREATE TABLE %s (%s)" % (table, ",".join(col_definitions)))
+ return True
+
+ # Create indexes on table
+ # Return: True on success
+ def createIndexes(self, table, indexes):
+ for index in indexes:
+ if not index.strip().upper().startswith("CREATE"):
+ self.db.log.error("Index command should start with CREATE: %s" % index)
+ continue
+ self.execute(index)
+
+ # Create table if not exist
+ # Return: True if updated
+ def needTable(self, table, cols, indexes=None, version=1):
+ current_version = self.db.getTableVersion(table)
+ if int(current_version) < int(version): # Table need update or not extis
+ self.db.log.debug("Table %s outdated...version: %s need: %s, rebuilding..." % (table, current_version, version))
+ self.createTable(table, cols)
+ if indexes:
+ self.createIndexes(table, indexes)
+ self.execute(
+ "INSERT OR REPLACE INTO keyvalue ?",
+ {"json_id": 0, "key": "table.%s.version" % table, "value": version}
+ )
+ return True
+ else: # Not changed
+ return False
+
+ # Get or create a row for json file
+ # Return: The database row
+ def getJsonRow(self, file_path):
+ directory, file_name = re.match("^(.*?)/*([^/]*)$", file_path).groups()
+ if self.db.schema["version"] == 1:
+ # One path field
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
+ row = res.fetchone()
+ if not row: # No row yet, create it
+ self.execute("INSERT INTO json ?", {"path": file_path})
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
+ row = res.fetchone()
+ elif self.db.schema["version"] == 2:
+ # Separate directory, file_name (easier join)
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
+ row = res.fetchone()
+ if not row: # No row yet, create it
+ self.execute("INSERT INTO json ?", {"directory": directory, "file_name": file_name})
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
+ row = res.fetchone()
+ elif self.db.schema["version"] == 3:
+ # Separate site, directory, file_name (for merger sites)
+ site_address, directory = re.match("^([^/]*)/(.*)$", directory).groups()
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
+ row = res.fetchone()
+ if not row: # No row yet, create it
+ self.execute("INSERT INTO json ?", {"site": site_address, "directory": directory, "file_name": file_name})
+ res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
+ row = res.fetchone()
+ else:
+ raise Exception("Dbschema version %s not supported" % self.db.schema.get("version"))
+ return row
+
+ def close(self):
+ pass
diff --git a/src/Db/DbQuery.py b/src/Db/DbQuery.py
new file mode 100644
index 00000000..3fb5ef73
--- /dev/null
+++ b/src/Db/DbQuery.py
@@ -0,0 +1,46 @@
+import re
+
+
+# Parse and modify sql queries
+class DbQuery:
+ def __init__(self, query):
+ self.setQuery(query.strip())
+
+ # Split main parts of query
+ def parseParts(self, query):
+ parts = re.split("(SELECT|FROM|WHERE|ORDER BY|LIMIT)", query)
+ parts = [_f for _f in parts if _f] # Remove empty parts
+ parts = [s.strip() for s in parts] # Remove whitespace
+ return dict(list(zip(parts[0::2], parts[1::2])))
+
+ # Parse selected fields SELECT ... FROM
+ def parseFields(self, query_select):
+ fields = re.findall("([^,]+) AS ([^,]+)", query_select)
+ return {key: val.strip() for val, key in fields}
+
+ # Parse query conditions WHERE ...
+ def parseWheres(self, query_where):
+ if " AND " in query_where:
+ return query_where.split(" AND ")
+ elif query_where:
+ return [query_where]
+ else:
+ return []
+
+ # Set the query
+ def setQuery(self, query):
+ self.parts = self.parseParts(query)
+ self.fields = self.parseFields(self.parts["SELECT"])
+ self.wheres = self.parseWheres(self.parts.get("WHERE", ""))
+
+ # Convert query back to string
+ def __str__(self):
+ query_parts = []
+ for part_name in ["SELECT", "FROM", "WHERE", "ORDER BY", "LIMIT"]:
+ if part_name == "WHERE" and self.wheres:
+ query_parts.append("WHERE")
+ query_parts.append(" AND ".join(self.wheres))
+ elif part_name in self.parts:
+ query_parts.append(part_name)
+ query_parts.append(self.parts[part_name])
+ return "\n".join(query_parts)
diff --git a/src/Db/__init__.py b/src/Db/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/Debug/Debug.py b/src/Debug/Debug.py
new file mode 100644
index 00000000..0ec42615
--- /dev/null
+++ b/src/Debug/Debug.py
@@ -0,0 +1,186 @@
+import sys
+import os
+import re
+from Config import config
+
+
+# Non fatal exception
+class Notify(Exception):
+ def __init__(self, message=None):
+ if message:
+ self.message = message
+
+ def __str__(self):
+ return self.message
+
+
+# Gevent greenlet.kill accept Exception type
+def createNotifyType(message):
+ return type("Notify", (Notify, ), {"message": message})
+
+
+def formatExceptionMessage(err):
+ err_type = err.__class__.__name__
+ if err.args:
+ err_message = err.args[-1]
+ else:
+ err_message = err.__str__()
+ return "%s: %s" % (err_type, err_message)
+
+
+python_lib_dirs = [path.replace("\\", "/") for path in sys.path if re.sub(r".*[\\/]", "", path) in ("site-packages", "dist-packages")]
+python_lib_dirs.append(os.path.dirname(os.__file__).replace("\\", "/")) # TODO: check if returns the correct path for PyPy
+
+root_dir = os.path.realpath(os.path.dirname(__file__) + "/../../")
+root_dir = root_dir.replace("\\", "/")
+
+
+def formatTraceback(items, limit=None, fold_builtin=True):
+ back = []
+ i = 0
+ prev_file_title = ""
+ is_prev_builtin = False
+
+ for path, line in items:
+ i += 1
+ is_last = i == len(items)
+ path = path.replace("\\", "/")
+
+ if path.startswith("src/gevent/"):
+ file_title = "/" + path[len("src/gevent/"):]
+ is_builtin = True
+ is_skippable_builtin = False
+ elif path in ("", ""):
+ file_title = "(importlib)"
+ is_builtin = True
+ is_skippable_builtin = True
+ else:
+ is_skippable_builtin = False
+ for base in python_lib_dirs:
+ if path.startswith(base + "/"):
+ file_title = path[len(base + "/"):]
+ module_name, *tail = file_title.split("/")
+ if module_name.endswith(".py"):
+ module_name = module_name[:-3]
+ file_title = "/".join(["<%s>" % module_name] + tail)
+ is_builtin = True
+ break
+ else:
+ is_builtin = False
+ for base in (root_dir + "/src", root_dir + "/plugins", root_dir):
+ if path.startswith(base + "/"):
+ file_title = path[len(base + "/"):]
+ break
+ else:
+ # For unknown paths, do our best to hide absolute path
+ file_title = path
+ for needle in ("/zeronet/", "/core/"):
+ if needle in file_title.lower():
+ file_title = "?/" + file_title[file_title.lower().rindex(needle) + len(needle):]
+
+ # Path compression: A/AB/ABC/X/Y.py -> ABC/X/Y.py
+ # E.g.: in 'Db/DbCursor.py' the directory part is unnecessary
+ if not file_title.startswith("/"):
+ prev_part = ""
+ for i, part in enumerate(file_title.split("/") + [""]):
+ if not part.startswith(prev_part):
+ break
+ prev_part = part
+ file_title = "/".join(file_title.split("/")[i - 1:])
+
+ if is_skippable_builtin and fold_builtin:
+ pass
+ elif is_builtin and is_prev_builtin and not is_last and fold_builtin:
+ if back[-1] != "...":
+ back.append("...")
+ else:
+ if file_title == prev_file_title:
+ back.append("%s" % line)
+ else:
+ back.append("%s line %s" % (file_title, line))
+
+ prev_file_title = file_title
+ is_prev_builtin = is_builtin
+
+ if limit and i >= limit:
+ back.append("...")
+ break
+ return back
+
+
+def formatException(err=None, format="text"):
+ import traceback
+ if type(err) == Notify:
+ return err
+ elif type(err) == tuple and err and err[0] is not None: # Passed trackeback info
+ exc_type, exc_obj, exc_tb = err
+ err = None
+ else: # No trackeback info passed, get latest
+ exc_type, exc_obj, exc_tb = sys.exc_info()
+
+ if not err:
+ if hasattr(err, "message"):
+ err = exc_obj.message
+ else:
+ err = exc_obj
+
+ tb = formatTraceback([[frame[0], frame[1]] for frame in traceback.extract_tb(exc_tb)])
+ if format == "html":
+ return "%s: %s %s" % (repr(err), err, " > ".join(tb))
+ else:
+ return "%s: %s in %s" % (exc_type.__name__, err, " > ".join(tb))
+
+
+def formatStack(limit=None):
+ import inspect
+ tb = formatTraceback([[frame[1], frame[2]] for frame in inspect.stack()[1:]], limit=limit)
+ return " > ".join(tb)
+
+
+# Test if gevent eventloop blocks
+import logging
+import gevent
+import time
+
+
+num_block = 0
+
+
+def testBlock():
+ global num_block
+ logging.debug("Gevent block checker started")
+ last_time = time.time()
+ while 1:
+ time.sleep(1)
+ if time.time() - last_time > 1.1:
+ logging.debug("Gevent block detected: %.3fs" % (time.time() - last_time - 1))
+ num_block += 1
+ last_time = time.time()
+
+
+gevent.spawn(testBlock)
+
+
+if __name__ == "__main__":
+ try:
+ print(1 / 0)
+ except Exception as err:
+ print(type(err).__name__)
+ print("1/0 error: %s" % formatException(err))
+
+ def loadJson():
+ json.loads("Errr")
+
+ import json
+ try:
+ loadJson()
+ except Exception as err:
+ print(err)
+ print("Json load error: %s" % formatException(err))
+
+ try:
+ raise Notify("nothing...")
+ except Exception as err:
+ print("Notify: %s" % formatException(err))
+
+ loadJson()
diff --git a/src/Debug/DebugHook.py b/src/Debug/DebugHook.py
new file mode 100644
index 00000000..d100a3b8
--- /dev/null
+++ b/src/Debug/DebugHook.py
@@ -0,0 +1,115 @@
+import sys
+import logging
+import signal
+import importlib
+
+import gevent
+import gevent.hub
+
+from Config import config
+from . import Debug
+
+last_error = None
+
+def shutdown(reason="Unknown"):
+ logging.info("Shutting down (reason: %s)..." % reason)
+ import main
+ if "file_server" in dir(main):
+ try:
+ gevent.spawn(main.file_server.stop)
+ if "ui_server" in dir(main):
+ gevent.spawn(main.ui_server.stop)
+ except Exception as err:
+ print("Proper shutdown error: %s" % err)
+ sys.exit(0)
+ else:
+ sys.exit(0)
+
+# Store last error, ignore notify, allow manual error logging
+def handleError(*args, **kwargs):
+ global last_error
+ if not args: # Manual called
+ args = sys.exc_info()
+ silent = True
+ else:
+ silent = False
+ if args[0].__name__ != "Notify":
+ last_error = args
+
+ if args[0].__name__ == "KeyboardInterrupt":
+ shutdown("Keyboard interrupt")
+ elif not silent and args[0].__name__ != "Notify":
+ logging.exception("Unhandled exception")
+ if "greenlet.py" not in args[2].tb_frame.f_code.co_filename: # Don't display error twice
+ sys.__excepthook__(*args, **kwargs)
+
+
+# Ignore notify errors
+def handleErrorNotify(*args, **kwargs):
+ err = args[0]
+ if err.__name__ == "KeyboardInterrupt":
+ shutdown("Keyboard interrupt")
+ elif err.__name__ != "Notify":
+ logging.error("Unhandled exception: %s" % Debug.formatException(args))
+ sys.__excepthook__(*args, **kwargs)
+
+
+if config.debug: # Keep last error for /Debug
+ sys.excepthook = handleError
+else:
+ sys.excepthook = handleErrorNotify
+
+
+# Override default error handler to allow silent killing / custom logging
+if "handle_error" in dir(gevent.hub.Hub):
+ gevent.hub.Hub._original_handle_error = gevent.hub.Hub.handle_error
+else:
+ logging.debug("gevent.hub.Hub.handle_error not found using old gevent hooks")
+ OriginalGreenlet = gevent.Greenlet
+ class ErrorhookedGreenlet(OriginalGreenlet):
+ def _report_error(self, exc_info):
+ sys.excepthook(exc_info[0], exc_info[1], exc_info[2])
+
+ gevent.Greenlet = gevent.greenlet.Greenlet = ErrorhookedGreenlet
+ importlib.reload(gevent)
+
+def handleGreenletError(context, type, value, tb):
+ if context.__class__ is tuple and context[0].__class__.__name__ == "ThreadPool":
+ # Exceptions in ThreadPool will be handled in the main Thread
+ return None
+
+ if isinstance(value, str):
+ # Cython can raise errors where the value is a plain string
+ # e.g., AttributeError, "_semaphore.Semaphore has no attr",
+ value = type(value)
+
+ if not issubclass(type, gevent.get_hub().NOT_ERROR):
+ sys.excepthook(type, value, tb)
+
+gevent.get_hub().handle_error = handleGreenletError
+
+try:
+ signal.signal(signal.SIGTERM, lambda signum, stack_frame: shutdown("SIGTERM"))
+except Exception as err:
+ logging.debug("Error setting up SIGTERM watcher: %s" % err)
+
+
+if __name__ == "__main__":
+ import time
+ from gevent import monkey
+ monkey.patch_all(thread=False, ssl=False)
+ from . import Debug
+
+ def sleeper(num):
+ print("started", num)
+ time.sleep(3)
+ raise Exception("Error")
+ print("stopped", num)
+ thread1 = gevent.spawn(sleeper, 1)
+ thread2 = gevent.spawn(sleeper, 2)
+ time.sleep(1)
+ print("killing...")
+ thread1.kill(exception=Debug.Notify("Worker stopped"))
+ #thread2.throw(Debug.Notify("Throw"))
+ print("killed")
+ gevent.joinall([thread1,thread2])
diff --git a/src/Debug/DebugLock.py b/src/Debug/DebugLock.py
new file mode 100644
index 00000000..9cf22520
--- /dev/null
+++ b/src/Debug/DebugLock.py
@@ -0,0 +1,24 @@
+import time
+import logging
+
+import gevent.lock
+
+from Debug import Debug
+
+
+class DebugLock:
+ def __init__(self, log_after=0.01, name="Lock"):
+ self.name = name
+ self.log_after = log_after
+ self.lock = gevent.lock.Semaphore(1)
+ self.release = self.lock.release
+
+ def acquire(self, *args, **kwargs):
+ s = time.time()
+ res = self.lock.acquire(*args, **kwargs)
+ time_taken = time.time() - s
+ if time_taken >= self.log_after:
+ logging.debug("%s: Waited %.3fs after called by %s" %
+ (self.name, time_taken, Debug.formatStack())
+ )
+ return res
diff --git a/src/Debug/DebugMedia.py b/src/Debug/DebugMedia.py
new file mode 100644
index 00000000..a892dc56
--- /dev/null
+++ b/src/Debug/DebugMedia.py
@@ -0,0 +1,135 @@
+import os
+import subprocess
+import re
+import logging
+import time
+import functools
+
+from Config import config
+from util import helper
+
+
+# Find files with extension in path
+def findfiles(path, find_ext):
+ def sorter(f1, f2):
+ f1 = f1[0].replace(path, "")
+ f2 = f2[0].replace(path, "")
+ if f1 == "":
+ return 1
+ elif f2 == "":
+ return -1
+ else:
+ return helper.cmp(f1.lower(), f2.lower())
+
+ for root, dirs, files in sorted(os.walk(path, topdown=False), key=functools.cmp_to_key(sorter)):
+ for file in sorted(files):
+ file_path = root + "/" + file
+ file_ext = file.split(".")[-1]
+ if file_ext in find_ext and not file.startswith("all."):
+ yield file_path.replace("\\", "/")
+
+
+# Try to find coffeescript compiler in path
+def findCoffeescriptCompiler():
+ coffeescript_compiler = None
+ try:
+ import distutils.spawn
+ coffeescript_compiler = helper.shellquote(distutils.spawn.find_executable("coffee")) + " --no-header -p"
+ except:
+ pass
+ if coffeescript_compiler:
+ return coffeescript_compiler
+ else:
+ return False
+
+
+# Generates: all.js: merge *.js, compile coffeescript, all.css: merge *.css, vendor prefix features
+def merge(merged_path):
+ merged_path = merged_path.replace("\\", "/")
+ merge_dir = os.path.dirname(merged_path)
+ s = time.time()
+ ext = merged_path.split(".")[-1]
+ if ext == "js": # If merging .js find .coffee too
+ find_ext = ["js", "coffee"]
+ else:
+ find_ext = [ext]
+
+ # If exist check the other files modification date
+ if os.path.isfile(merged_path):
+ merged_mtime = os.path.getmtime(merged_path)
+ else:
+ merged_mtime = 0
+
+ changed = {}
+ for file_path in findfiles(merge_dir, find_ext):
+ if os.path.getmtime(file_path) > merged_mtime + 1:
+ changed[file_path] = True
+ if not changed:
+ return # Assets not changed, nothing to do
+
+ old_parts = {}
+ if os.path.isfile(merged_path): # Find old parts to avoid unncessary recompile
+ merged_old = open(merged_path, "rb").read()
+ for match in re.findall(rb"(/\* ---- (.*?) ---- \*/(.*?)(?=/\* ----|$))", merged_old, re.DOTALL):
+ old_parts[match[1].decode()] = match[2].strip(b"\n\r")
+
+ logging.debug("Merging %s (changed: %s, old parts: %s)" % (merged_path, changed, len(old_parts)))
+ # Merge files
+ parts = []
+ s_total = time.time()
+ for file_path in findfiles(merge_dir, find_ext):
+ file_relative_path = file_path.replace(merge_dir + "/", "")
+ parts.append(b"\n/* ---- %s ---- */\n\n" % file_relative_path.encode("utf8"))
+ if file_path.endswith(".coffee"): # Compile coffee script
+ if file_path in changed or file_relative_path not in old_parts: # Only recompile if changed or its not compiled before
+ if config.coffeescript_compiler is None:
+ config.coffeescript_compiler = findCoffeescriptCompiler()
+ if not config.coffeescript_compiler:
+ logging.error("No coffeescript compiler defined, skipping compiling %s" % merged_path)
+ return False # No coffeescript compiler, skip this file
+
+ # Replace / with os separators and escape it
+ file_path_escaped = helper.shellquote(file_path.replace("/", os.path.sep))
+
+ if "%s" in config.coffeescript_compiler: # Replace %s with coffeescript file
+ command = config.coffeescript_compiler.replace("%s", file_path_escaped)
+ else: # Put coffeescript file to end
+ command = config.coffeescript_compiler + " " + file_path_escaped
+
+ # Start compiling
+ s = time.time()
+ compiler = subprocess.Popen(command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+ out = compiler.stdout.read()
+ compiler.wait()
+ logging.debug("Running: %s (Done in %.2fs)" % (command, time.time() - s))
+
+ # Check errors
+ if out and out.startswith(b"("): # No error found
+ parts.append(out)
+ else: # Put error message in place of source code
+ error = out
+ logging.error("%s Compile error: %s" % (file_relative_path, error))
+ error_escaped = re.escape(error).replace(b"\n", b"\\n").replace(br"\\n", br"\n")
+ parts.append(
+ b"alert('%s compile error: %s');" %
+ (file_relative_path.encode(), error_escaped)
+ )
+ else: # Not changed use the old_part
+ parts.append(old_parts[file_relative_path])
+ else: # Add to parts
+ parts.append(open(file_path, "rb").read())
+
+ merged = b"\n".join(parts)
+ if ext == "css": # Vendor prefix css
+ from lib.cssvendor import cssvendor
+ merged = cssvendor.prefix(merged)
+ merged = merged.replace(b"\r", b"")
+ open(merged_path, "wb").write(merged)
+ logging.debug("Merged %s (%.2fs)" % (merged_path, time.time() - s_total))
+
+
+if __name__ == "__main__":
+ logging.getLogger().setLevel(logging.DEBUG)
+ os.chdir("..")
+ config.coffeescript_compiler = r'type "%s" | tools\coffee-node\bin\node.exe tools\coffee-node\bin\coffee --no-header -s -p'
+ merge("data/12Hw8rTgzrNo4DSh2AkqwPRqDyTticwJyH/js/all.js")
diff --git a/src/Debug/DebugReloader.py b/src/Debug/DebugReloader.py
new file mode 100644
index 00000000..482c7921
--- /dev/null
+++ b/src/Debug/DebugReloader.py
@@ -0,0 +1,69 @@
+import logging
+import time
+import os
+
+from Config import config
+
+if config.debug and config.action == "main":
+ try:
+ import watchdog
+ import watchdog.observers
+ import watchdog.events
+ logging.debug("Watchdog fs listener detected, source code autoreload enabled")
+ enabled = True
+ except Exception as err:
+ logging.debug("Watchdog fs listener could not be loaded: %s" % err)
+ enabled = False
+else:
+ enabled = False
+
+
+class DebugReloader:
+ def __init__(self, paths=None):
+ if not paths:
+ paths = ["src", "plugins", config.data_dir + "/__plugins__"]
+ self.log = logging.getLogger("DebugReloader")
+ self.last_chaged = 0
+ self.callbacks = []
+ if enabled:
+ self.observer = watchdog.observers.Observer()
+ event_handler = watchdog.events.FileSystemEventHandler()
+ event_handler.on_modified = event_handler.on_deleted = self.onChanged
+ event_handler.on_created = event_handler.on_moved = self.onChanged
+ for path in paths:
+ if not os.path.isdir(path):
+ continue
+ self.log.debug("Adding autoreload: %s" % path)
+ self.observer.schedule(event_handler, path, recursive=True)
+ self.observer.start()
+
+ def addCallback(self, f):
+ self.callbacks.append(f)
+
+ def onChanged(self, evt):
+ path = evt.src_path
+ ext = path.rsplit(".", 1)[-1]
+ if ext not in ["py", "json"] or "Test" in path or time.time() - self.last_chaged < 1.0:
+ return False
+ self.last_chaged = time.time()
+ if os.path.isfile(path):
+ time_modified = os.path.getmtime(path)
+ else:
+ time_modified = 0
+ self.log.debug("File changed: %s reloading source code (modified %.3fs ago)" % (evt, time.time() - time_modified))
+ if time.time() - time_modified > 5: # Probably it's just an attribute change, ignore it
+ return False
+
+ time.sleep(0.1) # Wait for lock release
+ for callback in self.callbacks:
+ try:
+ callback()
+ except Exception as err:
+ self.log.exception(err)
+
+ def stop(self):
+ if enabled:
+ self.observer.stop()
+ self.log.debug("Stopped autoreload observer")
+
+watcher = DebugReloader()
diff --git a/src/Debug/__init__.py b/src/Debug/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/File/FileRequest.py b/src/File/FileRequest.py
new file mode 100644
index 00000000..c082c378
--- /dev/null
+++ b/src/File/FileRequest.py
@@ -0,0 +1,450 @@
+# Included modules
+import os
+import time
+import json
+import collections
+import itertools
+
+# Third party modules
+import gevent
+
+from Debug import Debug
+from Config import config
+from util import RateLimit
+from util import Msgpack
+from util import helper
+from Plugin import PluginManager
+from contextlib import closing
+
+FILE_BUFF = 1024 * 512
+
+
+class RequestError(Exception):
+ pass
+
+
+# Incoming requests
+@PluginManager.acceptPlugins
+class FileRequest(object):
+ __slots__ = ("server", "connection", "req_id", "sites", "log", "responded")
+
+ def __init__(self, server, connection):
+ self.server = server
+ self.connection = connection
+
+ self.req_id = None
+ self.sites = self.server.sites
+ self.log = server.log
+ self.responded = False # Responded to the request
+
+ def send(self, msg, streaming=False):
+ if not self.connection.closed:
+ self.connection.send(msg, streaming)
+
+ def sendRawfile(self, file, read_bytes):
+ if not self.connection.closed:
+ self.connection.sendRawfile(file, read_bytes)
+
+ def response(self, msg, streaming=False):
+ if self.responded:
+ if config.verbose:
+ self.log.debug("Req id %s already responded" % self.req_id)
+ return
+ if not isinstance(msg, dict): # If msg not a dict create a {"body": msg}
+ msg = {"body": msg}
+ msg["cmd"] = "response"
+ msg["to"] = self.req_id
+ self.responded = True
+ self.send(msg, streaming=streaming)
+
+ # Route file requests
+ def route(self, cmd, req_id, params):
+ self.req_id = req_id
+ # Don't allow other sites than locked
+ if "site" in params and self.connection.target_onion:
+ valid_sites = self.connection.getValidSites()
+ if params["site"] not in valid_sites and valid_sites != ["global"]:
+ self.response({"error": "Invalid site"})
+ self.connection.log(
+ "Site lock violation: %s not in %s, target onion: %s" %
+ (params["site"], valid_sites, self.connection.target_onion)
+ )
+ self.connection.badAction(5)
+ return False
+
+ if cmd == "update":
+ event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"])
+ # If called more than once within 15 sec only keep the last update
+ RateLimit.callAsync(event, max(self.connection.bad_actions, 15), self.actionUpdate, params)
+ else:
+ func_name = "action" + cmd[0].upper() + cmd[1:]
+ func = getattr(self, func_name, None)
+ if cmd not in ["getFile", "streamFile"]: # Skip IO bound functions
+ if self.connection.cpu_time > 0.5:
+ self.log.debug(
+ "Delay %s %s, cpu_time used by connection: %.3fs" %
+ (self.connection.ip, cmd, self.connection.cpu_time)
+ )
+ time.sleep(self.connection.cpu_time)
+ if self.connection.cpu_time > 5:
+ self.connection.close("Cpu time: %.3fs" % self.connection.cpu_time)
+ s = time.time()
+ if func:
+ func(params)
+ else:
+ self.actionUnknown(cmd, params)
+
+ if cmd not in ["getFile", "streamFile"]:
+ taken = time.time() - s
+ taken_sent = self.connection.last_sent_time - self.connection.last_send_time
+ self.connection.cpu_time += taken - taken_sent
+
+ # Update a site file request
+ def actionUpdate(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(1)
+ self.connection.badAction(5)
+ return False
+
+ inner_path = params.get("inner_path", "")
+ if not inner_path.endswith("content.json"):
+ self.response({"error": "Only content.json update allowed"})
+ self.connection.badAction(5)
+ return
+
+ current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
+ should_validate_content = True
+ if "modified" in params and params["modified"] <= current_content_modified:
+ should_validate_content = False
+ valid = None # Same or earlier content as we have
+
+ body = params["body"]
+ if not body: # No body sent, we have to download it first
+ site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
+ try:
+ body = peer.getFile(site.address, inner_path).read()
+ except Exception as err:
+ site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
+ self.response({"error": "Invalid File update: Failed to download updated file content"})
+ self.connection.badAction(5)
+ return
+
+ if should_validate_content:
+ try:
+ if type(body) is str:
+ body = body.encode()
+ # elif type(body) is list:
+ # content = json.loads(bytes(list).decode())
+ content = json.loads(body.decode())
+ except Exception as err:
+ site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
+ self.response({"error": "File invalid JSON"})
+ self.connection.badAction(5)
+ return
+
+ file_uri = "%s/%s:%s" % (site.address, inner_path, content["modified"])
+
+ if self.server.files_parsing.get(file_uri): # Check if we already working on it
+ valid = None # Same file
+ else:
+ try:
+ valid = site.content_manager.verifyFile(inner_path, content)
+ except Exception as err:
+ site.log.debug("Update for %s is invalid: %s" % (inner_path, err))
+ error = err
+ valid = False
+
+ if valid is True: # Valid and changed
+ site.log.info("Update for %s looks valid, saving..." % inner_path)
+ self.server.files_parsing[file_uri] = True
+ site.storage.write(inner_path, body)
+ del params["body"]
+
+ site.onFileDone(inner_path) # Trigger filedone
+
+ # Download every changed file from peer
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
+ # On complete publish to other peers
+ diffs = params.get("diffs", {})
+ site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
+
+ # Load new content file and download changed files in new thread
+ def downloader():
+ site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
+ del self.server.files_parsing[file_uri]
+
+ gevent.spawn(downloader)
+
+ self.response({"ok": "Thanks, file %s updated!" % inner_path})
+ self.connection.goodAction()
+
+ elif valid is None: # Not changed
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update old") # Add or get peer
+ if peer:
+ if not peer.connection:
+ peer.connect(self.connection) # Assign current connection to peer
+ if inner_path in site.content_manager.contents:
+ peer.last_content_json_update = site.content_manager.contents[inner_path]["modified"]
+ if config.verbose:
+ site.log.debug(
+ "Same version, adding new peer for locked files: %s, tasks: %s" %
+ (peer.key, len(site.worker_manager.tasks))
+ )
+ for task in site.worker_manager.tasks: # New peer add to every ongoing task
+ if task["peers"] and not task["optional_hash_id"]:
+ # Download file from this peer too if its peer locked
+ site.needFile(task["inner_path"], peer=peer, update=True, blocking=False)
+
+ self.response({"ok": "File not changed"})
+ self.connection.badAction()
+
+ else: # Invalid sign or sha hash
+ self.response({"error": "File %s invalid: %s" % (inner_path, error)})
+ self.connection.badAction(5)
+
+ def isReadable(self, site, inner_path, file, pos):
+ return True
+
+ # Send file content request
+ def handleGetFile(self, params, streaming=False):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+ try:
+ file_path = site.storage.getPath(params["inner_path"])
+ if streaming:
+ file_obj = site.storage.open(params["inner_path"])
+ else:
+ file_obj = Msgpack.FilePart(file_path, "rb")
+
+ with file_obj as file:
+ file.seek(params["location"])
+ read_bytes = params.get("read_bytes", FILE_BUFF)
+ file_size = os.fstat(file.fileno()).st_size
+
+ if file_size > read_bytes: # Check if file is readable at current position (for big files)
+ if not self.isReadable(site, params["inner_path"], file, params["location"]):
+ raise RequestError("File not readable at position: %s" % params["location"])
+ else:
+ if params.get("file_size") and params["file_size"] != file_size:
+ self.connection.badAction(2)
+ raise RequestError("File size does not match: %sB != %sB" % (params["file_size"], file_size))
+
+ if not streaming:
+ file.read_bytes = read_bytes
+
+ if params["location"] > file_size:
+ self.connection.badAction(5)
+ raise RequestError("Bad file location")
+
+ if streaming:
+ back = {
+ "size": file_size,
+ "location": min(file.tell() + read_bytes, file_size),
+ "stream_bytes": min(read_bytes, file_size - params["location"])
+ }
+ self.response(back)
+ self.sendRawfile(file, read_bytes=read_bytes)
+ else:
+ back = {
+ "body": file,
+ "size": file_size,
+ "location": min(file.tell() + file.read_bytes, file_size)
+ }
+ self.response(back, streaming=True)
+
+ bytes_sent = min(read_bytes, file_size - params["location"]) # Number of bytes we going to send
+ site.settings["bytes_sent"] = site.settings.get("bytes_sent", 0) + bytes_sent
+ if config.debug_socket:
+ self.log.debug("File %s at position %s sent %s bytes" % (file_path, params["location"], bytes_sent))
+
+ # Add peer to site if not added before
+ connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
+ if connected_peer: # Just added
+ connected_peer.connect(self.connection) # Assign current connection to peer
+
+ return {"bytes_sent": bytes_sent, "file_size": file_size, "location": params["location"]}
+
+ except RequestError as err:
+ self.log.debug("GetFile %s %s %s request error: %s" % (self.connection, params["site"], params["inner_path"], Debug.formatException(err)))
+ self.response({"error": "File read error: %s" % err})
+ except OSError as err:
+ if config.verbose:
+ self.log.debug("GetFile read error: %s" % Debug.formatException(err))
+ self.response({"error": "File read error"})
+ return False
+ except Exception as err:
+ self.log.error("GetFile exception: %s" % Debug.formatException(err))
+ self.response({"error": "File read exception"})
+ return False
+
+ def actionGetFile(self, params):
+ return self.handleGetFile(params)
+
+ def actionStreamFile(self, params):
+ return self.handleGetFile(params, streaming=True)
+
+ # Peer exchange request
+ def actionPex(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ got_peer_keys = []
+ added = 0
+
+ # Add requester peer to site
+ connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
+
+ if connected_peer: # It was not registered before
+ added += 1
+ connected_peer.connect(self.connection) # Assign current connection to peer
+
+ # Add sent peers to site
+ for packed_address in itertools.chain(params.get("peers", []), params.get("peers_ipv6", [])):
+ address = helper.unpackAddress(packed_address)
+ got_peer_keys.append("%s:%s" % address)
+ if site.addPeer(*address, source="pex"):
+ added += 1
+
+ # Add sent onion peers to site
+ for packed_address in params.get("peers_onion", []):
+ address = helper.unpackOnionAddress(packed_address)
+ got_peer_keys.append("%s:%s" % address)
+ if site.addPeer(*address, source="pex"):
+ added += 1
+
+ # Send back peers that is not in the sent list and connectable (not port 0)
+ packed_peers = helper.packPeers(site.getConnectablePeers(params["need"], ignore=got_peer_keys, allow_private=False))
+
+ if added:
+ site.worker_manager.onPeers()
+ if config.verbose:
+ self.log.debug(
+ "Added %s peers to %s using pex, sending back %s" %
+ (added, site, {key: len(val) for key, val in packed_peers.items()})
+ )
+
+ back = {
+ "peers": packed_peers["ipv4"],
+ "peers_ipv6": packed_peers["ipv6"],
+ "peers_onion": packed_peers["onion"]
+ }
+
+ self.response(back)
+
+ # Get modified content.json files since
+ def actionListModified(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+ modified_files = site.content_manager.listModified(params["since"])
+
+ # Add peer to site if not added before
+ connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
+ if connected_peer: # Just added
+ connected_peer.connect(self.connection) # Assign current connection to peer
+
+ self.response({"modified_files": modified_files})
+
+ def actionGetHashfield(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ # Add peer to site if not added before
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="request")
+ if not peer.connection: # Just added
+ peer.connect(self.connection) # Assign current connection to peer
+
+ peer.time_my_hashfield_sent = time.time() # Don't send again if not changed
+
+ self.response({"hashfield_raw": site.content_manager.hashfield.tobytes()})
+
+ def findHashIds(self, site, hash_ids, limit=100):
+ back = collections.defaultdict(lambda: collections.defaultdict(list))
+ found = site.worker_manager.findOptionalHashIds(hash_ids, limit=limit)
+
+ for hash_id, peers in found.items():
+ for peer in peers:
+ ip_type = helper.getIpType(peer.ip)
+ if len(back[ip_type][hash_id]) < 20:
+ back[ip_type][hash_id].append(peer.packMyAddress())
+ return back
+
+ def actionFindHashIds(self, params):
+ site = self.sites.get(params["site"])
+ s = time.time()
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ event_key = "%s_findHashIds_%s_%s" % (self.connection.ip, params["site"], len(params["hash_ids"]))
+ if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed(event_key, 60 * 5):
+ time.sleep(0.1)
+ back = self.findHashIds(site, params["hash_ids"], limit=10)
+ else:
+ back = self.findHashIds(site, params["hash_ids"])
+ RateLimit.called(event_key)
+
+ my_hashes = []
+ my_hashfield_set = set(site.content_manager.hashfield)
+ for hash_id in params["hash_ids"]:
+ if hash_id in my_hashfield_set:
+ my_hashes.append(hash_id)
+
+ if config.verbose:
+ self.log.debug(
+ "Found: %s for %s hashids in %.3fs" %
+ ({key: len(val) for key, val in back.items()}, len(params["hash_ids"]), time.time() - s)
+ )
+ self.response({"peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"], "my": my_hashes})
+
+ def actionSetHashfield(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ # Add or get peer
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection, source="request")
+ if not peer.connection:
+ peer.connect(self.connection)
+ peer.hashfield.replaceFromBytes(params["hashfield_raw"])
+ self.response({"ok": "Updated"})
+
+ # Send a simple Pong! answer
+ def actionPing(self, params):
+ self.response(b"Pong!")
+
+ # Check requested port of the other peer
+ def actionCheckport(self, params):
+ if helper.getIpType(self.connection.ip) == "ipv6":
+ sock_address = (self.connection.ip, params["port"], 0, 0)
+ else:
+ sock_address = (self.connection.ip, params["port"])
+
+ with closing(helper.createSocket(self.connection.ip)) as sock:
+ sock.settimeout(5)
+ if sock.connect_ex(sock_address) == 0:
+ self.response({"status": "open", "ip_external": self.connection.ip})
+ else:
+ self.response({"status": "closed", "ip_external": self.connection.ip})
+
+ # Unknown command
+ def actionUnknown(self, cmd, params):
+ self.response({"error": "Unknown command: %s" % cmd})
+ self.connection.badAction(5)
diff --git a/src/File/FileServer.py b/src/File/FileServer.py
new file mode 100644
index 00000000..b7a942fc
--- /dev/null
+++ b/src/File/FileServer.py
@@ -0,0 +1,409 @@
+import logging
+import time
+import random
+import socket
+import sys
+
+import gevent
+import gevent.pool
+from gevent.server import StreamServer
+
+import util
+from util import helper
+from Config import config
+from .FileRequest import FileRequest
+from Peer import PeerPortchecker
+from Site import SiteManager
+from Connection import ConnectionServer
+from Plugin import PluginManager
+from Debug import Debug
+
+
+@PluginManager.acceptPlugins
+class FileServer(ConnectionServer):
+
+ def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
+ self.site_manager = SiteManager.site_manager
+ self.portchecker = PeerPortchecker.PeerPortchecker(self)
+ self.log = logging.getLogger("FileServer")
+ self.ip_type = ip_type
+ self.ip_external_list = []
+
+ self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
+ if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported():
+ self.supported_ip_types.append("ipv6")
+
+ if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
+ ip = ip.replace("*", "::")
+ else:
+ ip = ip.replace("*", "0.0.0.0")
+
+ if config.tor == "always":
+ port = config.tor_hs_port
+ config.fileserver_port = port
+ elif port == 0: # Use random port
+ port_range_from, port_range_to = list(map(int, config.fileserver_port_range.split("-")))
+ port = self.getRandomPort(ip, port_range_from, port_range_to)
+ config.fileserver_port = port
+ if not port:
+ raise Exception("Can't find bindable port")
+ if not config.tor == "always":
+ config.saveValue("fileserver_port", port) # Save random port value for next restart
+ config.arguments.fileserver_port = port
+
+ ConnectionServer.__init__(self, ip, port, self.handleRequest)
+ self.log.debug("Supported IP types: %s" % self.supported_ip_types)
+
+ if ip_type == "dual" and ip == "::":
+ # Also bind to ipv4 addres in dual mode
+ try:
+ self.log.debug("Binding proxy to %s:%s" % ("::", self.port))
+ self.stream_server_proxy = StreamServer(
+ ("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
+ )
+ except Exception as err:
+ self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
+
+ self.port_opened = {}
+
+ self.sites = self.site_manager.sites
+ self.last_request = time.time()
+ self.files_parsing = {}
+ self.ui_server = None
+
+ def getRandomPort(self, ip, port_range_from, port_range_to):
+ """Generates Random Port from given range
+ Args:
+ ip: IP Address
+ port_range_from: From Range
+ port_range_to: to Range
+ """
+ self.log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
+ tried = []
+ for bind_retry in range(100):
+ port = random.randint(port_range_from, port_range_to)
+ if port in tried:
+ continue
+ tried.append(port)
+ sock = helper.createSocket(ip)
+ try:
+ sock.bind((ip, port))
+ success = True
+ except Exception as err:
+ self.log.warning("Error binding to port %s: %s" % (port, err))
+ success = False
+ sock.close()
+ if success:
+ self.log.info("Found unused random port: %s" % port)
+ return port
+ else:
+ time.sleep(0.1)
+ return False
+
+ def isIpv6Supported(self):
+ if config.tor == "always":
+ return True
+ # Test if we can connect to ipv6 address
+ ipv6_testip = "fcec:ae97:8902:d810:6c92:ec67:efb2:3ec5"
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ sock.connect((ipv6_testip, 80))
+ local_ipv6 = sock.getsockname()[0]
+ if local_ipv6 == "::1":
+ self.log.debug("IPv6 not supported, no local IPv6 address")
+ return False
+ else:
+ self.log.debug("IPv6 supported on IP %s" % local_ipv6)
+ return True
+ except socket.error as err:
+ self.log.warning("IPv6 not supported: %s" % err)
+ return False
+ except Exception as err:
+ self.log.error("IPv6 check error: %s" % err)
+ return False
+
+ def listenProxy(self):
+ try:
+ self.stream_server_proxy.serve_forever()
+ except Exception as err:
+ if err.errno == 98: # Address already in use error
+ self.log.debug("StreamServer proxy listen error: %s" % err)
+ else:
+ self.log.info("StreamServer proxy listen error: %s" % err)
+
+ # Handle request to fileserver
+ def handleRequest(self, connection, message):
+ if config.verbose:
+ if "params" in message:
+ self.log.debug(
+ "FileRequest: %s %s %s %s" %
+ (str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
+ )
+ else:
+ self.log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
+ req = FileRequest(self, connection)
+ req.route(message["cmd"], message.get("req_id"), message.get("params"))
+ if not self.has_internet and not connection.is_private_ip:
+ self.has_internet = True
+ self.onInternetOnline()
+
+ def onInternetOnline(self):
+ self.log.info("Internet online")
+ gevent.spawn(self.checkSites, check_files=False, force_port_check=True)
+
+ # Reload the FileRequest class to prevent restarts in debug mode
+ def reload(self):
+ global FileRequest
+ import imp
+ FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
+
+ def portCheck(self):
+ if config.offline:
+ self.log.info("Offline mode: port check disabled")
+ res = {"ipv4": None, "ipv6": None}
+ self.port_opened = res
+ return res
+
+ if config.ip_external:
+ for ip_external in config.ip_external:
+ SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
+
+ ip_external_types = set([helper.getIpType(ip) for ip in config.ip_external])
+ res = {
+ "ipv4": "ipv4" in ip_external_types,
+ "ipv6": "ipv6" in ip_external_types
+ }
+ self.ip_external_list = config.ip_external
+ self.port_opened.update(res)
+ self.log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
+ return res
+
+ self.port_opened = {}
+ if self.ui_server:
+ self.ui_server.updateWebsocket()
+
+ if "ipv6" in self.supported_ip_types:
+ res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6")
+ else:
+ res_ipv6_thread = None
+
+ res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
+ if not res_ipv4["opened"] and config.tor != "always":
+ if self.portchecker.portOpen(self.port):
+ res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
+
+ if res_ipv6_thread is None:
+ res_ipv6 = {"ip": None, "opened": None}
+ else:
+ res_ipv6 = res_ipv6_thread.get()
+ if res_ipv6["opened"] and not helper.getIpType(res_ipv6["ip"]) == "ipv6":
+ self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
+ res_ipv6["opened"] = False
+
+ self.ip_external_list = []
+ for res_ip in [res_ipv4, res_ipv6]:
+ if res_ip["ip"] and res_ip["ip"] not in self.ip_external_list:
+ self.ip_external_list.append(res_ip["ip"])
+ SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
+
+ self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
+
+ res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
+
+ # Add external IPs from local interfaces
+ interface_ips = helper.getInterfaceIps("ipv4")
+ if "ipv6" in self.supported_ip_types:
+ interface_ips += helper.getInterfaceIps("ipv6")
+ for ip in interface_ips:
+ if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
+ self.ip_external_list.append(ip)
+ res[helper.getIpType(ip)] = True # We have opened port if we have external ip
+ SiteManager.peer_blacklist.append((ip, self.port))
+ self.log.debug("External ip found on interfaces: %s" % ip)
+
+ self.port_opened.update(res)
+
+ if self.ui_server:
+ self.ui_server.updateWebsocket()
+
+ return res
+
+ # Check site file integrity
+ def checkSite(self, site, check_files=False):
+ if site.isServing():
+ site.announce(mode="startup") # Announce site to tracker
+ site.update(check_files=check_files) # Update site's content.json and download changed files
+ site.sendMyHashfield()
+ site.updateHashfield()
+
+ # Check sites integrity
+ @util.Noparallel()
+ def checkSites(self, check_files=False, force_port_check=False):
+ self.log.debug("Checking sites...")
+ s = time.time()
+ sites_checking = False
+ if not self.port_opened or force_port_check: # Test and open port if not tested yet
+ if len(self.sites) <= 2: # Don't wait port opening on first startup
+ sites_checking = True
+ for address, site in list(self.sites.items()):
+ gevent.spawn(self.checkSite, site, check_files)
+
+ self.portCheck()
+
+ if not self.port_opened["ipv4"]:
+ self.tor_manager.startOnions()
+
+ if not sites_checking:
+ check_pool = gevent.pool.Pool(5)
+ # Check sites integrity
+ for site in sorted(list(self.sites.values()), key=lambda site: site.settings.get("modified", 0), reverse=True):
+ if not site.isServing():
+ continue
+ check_thread = check_pool.spawn(self.checkSite, site, check_files) # Check in new thread
+ time.sleep(2)
+ if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
+ check_thread.join(timeout=5)
+ self.log.debug("Checksites done in %.3fs" % (time.time() - s))
+
+ def cleanupSites(self):
+ import gc
+ startup = True
+ time.sleep(5 * 60) # Sites already cleaned up on startup
+ peers_protected = set([])
+ while 1:
+ # Sites health care every 20 min
+ self.log.debug(
+ "Running site cleanup, connections: %s, internet: %s, protected peers: %s" %
+ (len(self.connections), self.has_internet, len(peers_protected))
+ )
+
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
+ continue
+
+ if not startup:
+ site.cleanupPeers(peers_protected)
+
+ time.sleep(1) # Prevent too quick request
+
+ peers_protected = set([])
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
+ continue
+
+ if site.peers:
+ with gevent.Timeout(10, exception=False):
+ site.announcer.announcePex()
+
+ # Last check modification failed
+ if site.content_updated is False:
+ site.update()
+ elif site.bad_files:
+ site.retryBadFiles()
+
+ if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
+ # Keep active connections if site has been modified witin 7 days
+ connected_num = site.needConnections(check_site_on_reconnect=True)
+
+ if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing
+ peers_protected.update([peer.key for peer in site.getConnectedPeers()])
+
+ time.sleep(1) # Prevent too quick request
+
+ site = None
+ gc.collect() # Implicit garbage collection
+ startup = False
+ time.sleep(60 * 20)
+
+ def announceSite(self, site):
+ site.announce(mode="update", pex=False)
+ active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
+ if site.settings["own"] or active_site:
+ # Check connections more frequently on own and active sites to speed-up first connections
+ site.needConnections(check_site_on_reconnect=True)
+ site.sendMyHashfield(3)
+ site.updateHashfield(3)
+
+ # Announce sites every 20 min
+ def announceSites(self):
+ time.sleep(5 * 60) # Sites already announced on startup
+ while 1:
+ config.loadTrackersFile()
+ s = time.time()
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
+ continue
+ gevent.spawn(self.announceSite, site).join(timeout=10)
+ time.sleep(1)
+ taken = time.time() - s
+
+ # Query all trackers one-by-one in 20 minutes evenly distributed
+ sleep = max(0, 60 * 20 / len(config.trackers) - taken)
+
+ self.log.debug("Site announce tracker done in %.3fs, sleeping for %.3fs..." % (taken, sleep))
+ time.sleep(sleep)
+
+ # Detects if computer back from wakeup
+ def wakeupWatcher(self):
+ last_time = time.time()
+ last_my_ips = socket.gethostbyname_ex('')[2]
+ while 1:
+ time.sleep(30)
+ is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
+ if is_time_changed:
+ # If taken more than 3 minute then the computer was in sleep mode
+ self.log.info(
+ "Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
+ (last_time, time.time(), time.time() - last_time)
+ )
+
+ my_ips = socket.gethostbyname_ex('')[2]
+ is_ip_changed = my_ips != last_my_ips
+ if is_ip_changed:
+ self.log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
+
+ if is_time_changed or is_ip_changed:
+ self.checkSites(check_files=False, force_port_check=True)
+
+ last_time = time.time()
+ last_my_ips = my_ips
+
+ # Bind and start serving sites
+ def start(self, check_sites=True):
+ if self.stopping:
+ return False
+
+ ConnectionServer.start(self)
+
+ try:
+ self.stream_server.start()
+ except Exception as err:
+ self.log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
+
+ self.sites = self.site_manager.list()
+ if config.debug:
+ # Auto reload FileRequest on change
+ from Debug import DebugReloader
+ DebugReloader.watcher.addCallback(self.reload)
+
+ if check_sites: # Open port, Update sites, Check files integrity
+ gevent.spawn(self.checkSites)
+
+ thread_announce_sites = gevent.spawn(self.announceSites)
+ thread_cleanup_sites = gevent.spawn(self.cleanupSites)
+ thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
+
+ ConnectionServer.listen(self)
+
+ self.log.debug("Stopped.")
+
+ def stop(self):
+ if self.running and self.portchecker.upnp_port_opened:
+ self.log.debug('Closing port %d' % self.port)
+ try:
+ self.portchecker.portClose(self.port)
+ self.log.info('Closed port via upnp.')
+ except Exception as err:
+ self.log.info("Failed at attempt to use upnp to close port: %s" % err)
+
+ return ConnectionServer.stop(self)
diff --git a/src/File/__init__.py b/src/File/__init__.py
new file mode 100644
index 00000000..1eb602d6
--- /dev/null
+++ b/src/File/__init__.py
@@ -0,0 +1,2 @@
+from .FileServer import FileServer
+from .FileRequest import FileRequest
\ No newline at end of file
diff --git a/src/Peer/Peer.py b/src/Peer/Peer.py
new file mode 100644
index 00000000..03cc1f47
--- /dev/null
+++ b/src/Peer/Peer.py
@@ -0,0 +1,410 @@
+import logging
+import time
+import sys
+import itertools
+import collections
+
+import gevent
+
+import io
+from Debug import Debug
+from Config import config
+from util import helper
+from .PeerHashfield import PeerHashfield
+from Plugin import PluginManager
+
+if config.use_tempfiles:
+ import tempfile
+
+
+# Communicate remote peers
+@PluginManager.acceptPlugins
+class Peer(object):
+ __slots__ = (
+ "ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield",
+ "time_added", "has_hashfield", "is_tracker_connection", "time_my_hashfield_sent", "last_ping", "reputation",
+ "last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time"
+ )
+
+ def __init__(self, ip, port, site=None, connection_server=None):
+ self.ip = ip
+ self.port = port
+ self.site = site
+ self.key = "%s:%s" % (ip, port)
+
+ self.connection = None
+ self.connection_server = connection_server
+ self.has_hashfield = False # Lazy hashfield object not created yet
+ self.time_hashfield = None # Last time peer's hashfiled downloaded
+ self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
+ self.time_found = time.time() # Time of last found in the torrent tracker
+ self.time_response = None # Time of last successful response from peer
+ self.time_added = time.time()
+ self.last_ping = None # Last response time for ping
+ self.is_tracker_connection = False # Tracker connection instead of normal peer
+ self.reputation = 0 # More likely to connect if larger
+ self.last_content_json_update = 0.0 # Modify date of last received content.json
+
+ self.connection_error = 0 # Series of connection error
+ self.hash_failed = 0 # Number of bad files from peer
+ self.download_bytes = 0 # Bytes downloaded
+ self.download_time = 0 # Time spent to download
+
+ def __getattr__(self, key):
+ if key == "hashfield":
+ self.has_hashfield = True
+ self.hashfield = PeerHashfield()
+ return self.hashfield
+ else:
+ return getattr(self, key)
+
+ def log(self, text):
+ if not config.verbose:
+ return # Only log if we are in debug mode
+ if self.site:
+ self.site.log.debug("%s:%s %s" % (self.ip, self.port, text))
+ else:
+ logging.debug("%s:%s %s" % (self.ip, self.port, text))
+
+ # Connect to host
+ def connect(self, connection=None):
+ if self.reputation < -10:
+ self.reputation = -10
+ if self.reputation > 10:
+ self.reputation = 10
+
+ if self.connection:
+ self.log("Getting connection (Closing %s)..." % self.connection)
+ self.connection.close("Connection change")
+ else:
+ self.log("Getting connection (reputation: %s)..." % self.reputation)
+
+ if connection: # Connection specified
+ self.log("Assigning connection %s" % connection)
+ self.connection = connection
+ self.connection.sites += 1
+ else: # Try to find from connection pool or create new connection
+ self.connection = None
+
+ try:
+ if self.connection_server:
+ connection_server = self.connection_server
+ elif self.site:
+ connection_server = self.site.connection_server
+ else:
+ import main
+ connection_server = main.file_server
+ self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
+ self.reputation += 1
+ self.connection.sites += 1
+ except Exception as err:
+ self.onConnectionError("Getting connection error")
+ self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
+ (Debug.formatException(err), self.connection_error, self.hash_failed))
+ self.connection = None
+ return self.connection
+
+ # Check if we have connection to peer
+ def findConnection(self):
+ if self.connection and self.connection.connected: # We have connection to peer
+ return self.connection
+ else: # Try to find from other sites connections
+ self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site)
+ if self.connection:
+ self.connection.sites += 1
+ return self.connection
+
+ def __str__(self):
+ if self.site:
+ return "Peer:%-12s of %s" % (self.ip, self.site.address_short)
+ else:
+ return "Peer:%-12s" % self.ip
+
+ def __repr__(self):
+ return "<%s>" % self.__str__()
+
+ def packMyAddress(self):
+ if self.ip.endswith(".onion"):
+ return helper.packOnionAddress(self.ip, self.port)
+ else:
+ return helper.packAddress(self.ip, self.port)
+
+ # Found a peer from a source
+ def found(self, source="other"):
+ if self.reputation < 5:
+ if source == "tracker":
+ if self.ip.endswith(".onion"):
+ self.reputation += 1
+ else:
+ self.reputation += 2
+ elif source == "local":
+ self.reputation += 20
+
+ if source in ("tracker", "local"):
+ self.site.peers_recent.appendleft(self)
+ self.time_found = time.time()
+
+ # Send a command to peer and return response value
+ def request(self, cmd, params={}, stream_to=None):
+ if not self.connection or self.connection.closed:
+ self.connect()
+ if not self.connection:
+ self.onConnectionError("Reconnect error")
+ return None # Connection failed
+
+ self.log("Send request: %s %s %s %s" % (params.get("site", ""), cmd, params.get("inner_path", ""), params.get("location", "")))
+
+ for retry in range(1, 4): # Retry 3 times
+ try:
+ if not self.connection:
+ raise Exception("No connection found")
+ res = self.connection.request(cmd, params, stream_to)
+ if not res:
+ raise Exception("Send error")
+ if "error" in res:
+ self.log("%s error: %s" % (cmd, res["error"]))
+ self.onConnectionError("Response error")
+ break
+ else: # Successful request, reset connection error num
+ self.connection_error = 0
+ self.time_response = time.time()
+ if res:
+ return res
+ else:
+ raise Exception("Invalid response: %s" % res)
+ except Exception as err:
+ if type(err).__name__ == "Notify": # Greenlet killed by worker
+ self.log("Peer worker got killed: %s, aborting cmd: %s" % (err.message, cmd))
+ break
+ else:
+ self.onConnectionError("Request error")
+ self.log(
+ "%s (connection_error: %s, hash_failed: %s, retry: %s)" %
+ (Debug.formatException(err), self.connection_error, self.hash_failed, retry)
+ )
+ time.sleep(1 * retry)
+ self.connect()
+ return None # Failed after 4 retry
+
+ # Get a file content from peer
+ def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
+ if file_size and file_size > 5 * 1024 * 1024:
+ max_read_size = 1024 * 1024
+ else:
+ max_read_size = 512 * 1024
+
+ if pos_to:
+ read_bytes = min(max_read_size, pos_to - pos_from)
+ else:
+ read_bytes = max_read_size
+
+ location = pos_from
+
+ if config.use_tempfiles:
+ buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b')
+ else:
+ buff = io.BytesIO()
+
+ s = time.time()
+ while True: # Read in smaller parts
+ if config.stream_downloads or read_bytes > 256 * 1024 or streaming:
+ res = self.request("streamFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size}, stream_to=buff)
+ if not res or "location" not in res: # Error
+ return False
+ else:
+ self.log("Send: %s" % inner_path)
+ res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size})
+ if not res or "location" not in res: # Error
+ return False
+ self.log("Recv: %s" % inner_path)
+ buff.write(res["body"])
+ res["body"] = None # Save memory
+
+ if res["location"] == res["size"] or res["location"] == pos_to: # End of file
+ break
+ else:
+ location = res["location"]
+ if pos_to:
+ read_bytes = min(max_read_size, pos_to - location)
+
+ if pos_to:
+ recv = pos_to - pos_from
+ else:
+ recv = res["location"]
+
+ self.download_bytes += recv
+ self.download_time += (time.time() - s)
+ if self.site:
+ self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + recv
+ self.log("Downloaded: %s, pos: %s, read_bytes: %s" % (inner_path, buff.tell(), read_bytes))
+ buff.seek(0)
+ return buff
+
+ # Send a ping request
+ def ping(self):
+ response_time = None
+ for retry in range(1, 3): # Retry 3 times
+ s = time.time()
+ with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception
+ res = self.request("ping")
+
+ if res and "body" in res and res["body"] == b"Pong!":
+ response_time = time.time() - s
+ break # All fine, exit from for loop
+ # Timeout reached or bad response
+ self.onConnectionError("Ping timeout")
+ self.connect()
+ time.sleep(1)
+
+ if response_time:
+ self.log("Ping: %.3f" % response_time)
+ else:
+ self.log("Ping failed")
+ self.last_ping = response_time
+ return response_time
+
+ # Request peer exchange from peer
+ def pex(self, site=None, need_num=5):
+ if not site:
+ site = self.site # If no site defined request peers for this site
+
+ # give back 5 connectible peers
+ packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
+ request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
+ if packed_peers["onion"]:
+ request["peers_onion"] = packed_peers["onion"]
+ if packed_peers["ipv6"]:
+ request["peers_ipv6"] = packed_peers["ipv6"]
+ res = self.request("pex", request)
+ if not res or "error" in res:
+ return False
+ added = 0
+
+ # Remove unsupported peer types
+ if "peers_ipv6" in res and self.connection and "ipv6" not in self.connection.server.supported_ip_types:
+ del res["peers_ipv6"]
+
+ if "peers_onion" in res and self.connection and "onion" not in self.connection.server.supported_ip_types:
+ del res["peers_onion"]
+
+ # Add IPv4 + IPv6
+ for peer in itertools.chain(res.get("peers", []), res.get("peers_ipv6", [])):
+ address = helper.unpackAddress(peer)
+ if site.addPeer(*address, source="pex"):
+ added += 1
+
+ # Add Onion
+ for peer in res.get("peers_onion", []):
+ address = helper.unpackOnionAddress(peer)
+ if site.addPeer(*address, source="pex"):
+ added += 1
+
+ if added:
+ self.log("Added peers using pex: %s" % added)
+
+ return added
+
+ # List modified files since the date
+ # Return: {inner_path: modification date,...}
+ def listModified(self, since):
+ return self.request("listModified", {"since": since, "site": self.site.address})
+
+ def updateHashfield(self, force=False):
+ # Don't update hashfield again in 5 min
+ if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
+ return False
+
+ self.time_hashfield = time.time()
+ res = self.request("getHashfield", {"site": self.site.address})
+ if not res or "error" in res or "hashfield_raw" not in res:
+ return False
+ self.hashfield.replaceFromBytes(res["hashfield_raw"])
+
+ return self.hashfield
+
+ # Find peers for hashids
+ # Return: {hash1: ["ip:port", "ip:port",...],...}
+ def findHashIds(self, hash_ids):
+ res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
+ if not res or "error" in res or type(res) is not dict:
+ return False
+
+ back = collections.defaultdict(list)
+
+ for ip_type in ["ipv4", "ipv6", "onion"]:
+ if ip_type == "ipv4":
+ key = "peers"
+ else:
+ key = "peers_%s" % ip_type
+ for hash, peers in list(res.get(key, {}).items())[0:30]:
+ if ip_type == "onion":
+ unpacker_func = helper.unpackOnionAddress
+ else:
+ unpacker_func = helper.unpackAddress
+
+ back[hash] += list(map(unpacker_func, peers))
+
+ for hash in res.get("my", []):
+ if self.connection:
+ back[hash].append((self.connection.ip, self.connection.port))
+ else:
+ back[hash].append((self.ip, self.port))
+
+ return back
+
+ # Send my hashfield to peer
+ # Return: True if sent
+ def sendMyHashfield(self):
+ if self.connection and self.connection.handshake.get("rev", 0) < 510:
+ return False # Not supported
+ if self.time_my_hashfield_sent and self.site.content_manager.hashfield.time_changed <= self.time_my_hashfield_sent:
+ return False # Peer already has the latest hashfield
+
+ res = self.request("setHashfield", {"site": self.site.address, "hashfield_raw": self.site.content_manager.hashfield.tobytes()})
+ if not res or "error" in res:
+ return False
+ else:
+ self.time_my_hashfield_sent = time.time()
+ return True
+
+ def publish(self, address, inner_path, body, modified, diffs=[]):
+ if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
+ # To save bw we don't push big content.json to peers
+ body = b""
+
+ return self.request("update", {
+ "site": address,
+ "inner_path": inner_path,
+ "body": body,
+ "modified": modified,
+ "diffs": diffs
+ })
+
+ # Stop and remove from site
+ def remove(self, reason="Removing"):
+ self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed))
+ if self.site and self.key in self.site.peers:
+ del(self.site.peers[self.key])
+
+ if self.site and self in self.site.peers_recent:
+ self.site.peers_recent.remove(self)
+
+ if self.connection:
+ self.connection.close(reason)
+
+ # - EVENTS -
+
+ # On connection error
+ def onConnectionError(self, reason="Unknown"):
+ self.connection_error += 1
+ if self.site and len(self.site.peers) > 200:
+ limit = 3
+ else:
+ limit = 6
+ self.reputation -= 1
+ if self.connection_error >= limit: # Dead peer
+ self.remove("Peer connection: %s" % reason)
+
+ # Done working with peer
+ def onWorkerDone(self):
+ pass
diff --git a/src/Peer/PeerHashfield.py b/src/Peer/PeerHashfield.py
new file mode 100644
index 00000000..fdd414c8
--- /dev/null
+++ b/src/Peer/PeerHashfield.py
@@ -0,0 +1,75 @@
+import array
+import time
+
+
+class PeerHashfield(object):
+ __slots__ = ("storage", "time_changed", "append", "remove", "tobytes", "frombytes", "__len__", "__iter__")
+ def __init__(self):
+ self.storage = self.createStorage()
+ self.time_changed = time.time()
+
+ def createStorage(self):
+ storage = array.array("H")
+ self.append = storage.append
+ self.remove = storage.remove
+ self.tobytes = storage.tobytes
+ self.frombytes = storage.frombytes
+ self.__len__ = storage.__len__
+ self.__iter__ = storage.__iter__
+ return storage
+
+ def appendHash(self, hash):
+ hash_id = int(hash[0:4], 16)
+ if hash_id not in self.storage:
+ self.storage.append(hash_id)
+ self.time_changed = time.time()
+ return True
+ else:
+ return False
+
+ def appendHashId(self, hash_id):
+ if hash_id not in self.storage:
+ self.storage.append(hash_id)
+ self.time_changed = time.time()
+ return True
+ else:
+ return False
+
+ def removeHash(self, hash):
+ hash_id = int(hash[0:4], 16)
+ if hash_id in self.storage:
+ self.storage.remove(hash_id)
+ self.time_changed = time.time()
+ return True
+ else:
+ return False
+
+ def removeHashId(self, hash_id):
+ if hash_id in self.storage:
+ self.storage.remove(hash_id)
+ self.time_changed = time.time()
+ return True
+ else:
+ return False
+
+ def getHashId(self, hash):
+ return int(hash[0:4], 16)
+
+ def hasHash(self, hash):
+ return int(hash[0:4], 16) in self.storage
+
+ def replaceFromBytes(self, hashfield_raw):
+ self.storage = self.createStorage()
+ self.storage.frombytes(hashfield_raw)
+ self.time_changed = time.time()
+
+if __name__ == "__main__":
+ field = PeerHashfield()
+ s = time.time()
+ for i in range(10000):
+ field.appendHashId(i)
+ print(time.time()-s)
+ s = time.time()
+ for i in range(10000):
+ field.hasHash("AABB")
+ print(time.time()-s)
\ No newline at end of file
diff --git a/src/Peer/PeerPortchecker.py b/src/Peer/PeerPortchecker.py
new file mode 100644
index 00000000..3c4daecf
--- /dev/null
+++ b/src/Peer/PeerPortchecker.py
@@ -0,0 +1,189 @@
+import logging
+import urllib.request
+import urllib.parse
+import re
+import time
+
+from Debug import Debug
+from util import UpnpPunch
+
+
+class PeerPortchecker(object):
+ checker_functions = {
+ "ipv4": ["checkIpfingerprints", "checkCanyouseeme"],
+ "ipv6": ["checkMyaddr", "checkIpv6scanner"]
+ }
+ def __init__(self, file_server):
+ self.log = logging.getLogger("PeerPortchecker")
+ self.upnp_port_opened = False
+ self.file_server = file_server
+
+ def requestUrl(self, url, post_data=None):
+ if type(post_data) is dict:
+ post_data = urllib.parse.urlencode(post_data).encode("utf8")
+ req = urllib.request.Request(url, post_data)
+ req.add_header("Referer", url)
+ req.add_header("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11")
+ req.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
+ return urllib.request.urlopen(req, timeout=20.0)
+
+ def portOpen(self, port):
+ self.log.info("Trying to open port using UpnpPunch...")
+
+ try:
+ UpnpPunch.ask_to_open_port(port, 'ZeroNet', retries=3, protos=["TCP"])
+ self.upnp_port_opened = True
+ except Exception as err:
+ self.log.warning("UpnpPunch run error: %s" % Debug.formatException(err))
+ return False
+
+ return True
+
+ def portClose(self, port):
+ return UpnpPunch.ask_to_close_port(port, protos=["TCP"])
+
+ def portCheck(self, port, ip_type="ipv4"):
+ checker_functions = self.checker_functions[ip_type]
+
+ for func_name in checker_functions:
+ func = getattr(self, func_name)
+ s = time.time()
+ try:
+ res = func(port)
+ if res:
+ self.log.info(
+ "Checked port %s (%s) using %s result: %s in %.3fs" %
+ (port, ip_type, func_name, res, time.time() - s)
+ )
+ time.sleep(0.1)
+ if res["opened"] and not self.file_server.had_external_incoming:
+ res["opened"] = False
+ self.log.warning("Port %s:%s looks opened, but no incoming connection" % (res["ip"], port))
+ break
+ except Exception as err:
+ self.log.warning(
+ "%s check error: %s in %.3fs" %
+ (func_name, Debug.formatException(err), time.time() - s)
+ )
+ res = {"ip": None, "opened": False}
+
+ return res
+
+ def checkCanyouseeme(self, port):
+ data = urllib.request.urlopen("https://www.canyouseeme.org/", b"ip=1.1.1.1&port=%s" % str(port).encode("ascii"), timeout=20.0).read().decode("utf8")
+
+ message = re.match(r'.*
");
+ }
+ return _this.pageLoaded();
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.pageMain = function() {
+ return this.cmd("dbQuery", ["SELECT post.*, COUNT(comment_id) AS comments FROM post LEFT JOIN comment USING (post_id) GROUP BY post_id ORDER BY date_published"], (function(_this) {
+ return function(res) {
+ var elem, post, s, _i, _len;
+ s = +(new Date);
+ for (_i = 0, _len = res.length; _i < _len; _i++) {
+ post = res[_i];
+ elem = $("#post_" + post.post_id);
+ if (elem.length === 0) {
+ elem = $(".post.template").clone().removeClass("template").attr("id", "post_" + post.post_id);
+ elem.prependTo(".posts");
+ }
+ _this.applyPostdata(elem, post);
+ }
+ _this.pageLoaded();
+ _this.log("Posts loaded in", (+(new Date)) - s, "ms");
+ return $(".posts .new").on("click", function() {
+ _this.cmd("fileGet", ["data/data.json"], function(res) {
+ var data;
+ data = JSON.parse(res);
+ data.post.unshift({
+ post_id: data.next_post_id,
+ title: "New blog post",
+ date_published: (+(new Date)) / 1000,
+ body: "Blog post body"
+ });
+ data.next_post_id += 1;
+ elem = $(".post.template").clone().removeClass("template");
+ _this.applyPostdata(elem, data.post[0]);
+ elem.hide();
+ elem.prependTo(".posts").slideDown();
+ _this.addInlineEditors(elem);
+ return _this.writeData(data);
+ });
+ return false;
+ });
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.pageLoaded = function() {
+ $("body").addClass("loaded");
+ $('pre code').each(function(i, block) {
+ return hljs.highlightBlock(block);
+ });
+ this.event_page_load.resolve();
+ return this.cmd("innerLoaded", true);
+ };
+
+ ZeroBlog.prototype.addInlineEditors = function(parent) {
+ var editor, elem, elems, _i, _len;
+ this.logStart("Adding inline editors");
+ elems = $("[data-editable]:visible", parent);
+ for (_i = 0, _len = elems.length; _i < _len; _i++) {
+ elem = elems[_i];
+ elem = $(elem);
+ if (!elem.data("editor") && !elem.hasClass("editor")) {
+ editor = new InlineEditor(elem, this.getContent, this.saveContent, this.getObject);
+ elem.data("editor", editor);
+ }
+ }
+ return this.logEnd("Adding inline editors");
+ };
+
+ ZeroBlog.prototype.checkPublishbar = function() {
+ if (!this.site_modified || this.site_modified > this.site_info.content.modified) {
+ return $(".publishbar").addClass("visible");
+ } else {
+ return $(".publishbar").removeClass("visible");
+ }
+ };
+
+ ZeroBlog.prototype.publish = function() {
+ this.cmd("wrapperPrompt", ["Enter your private key:", "password"], (function(_this) {
+ return function(privatekey) {
+ $(".publishbar .button").addClass("loading");
+ return _this.cmd("sitePublish", [privatekey], function(res) {
+ $(".publishbar .button").removeClass("loading");
+ return _this.log("Publish result:", res);
+ });
+ };
+ })(this));
+ return false;
+ };
+
+ ZeroBlog.prototype.applyPostdata = function(elem, post, full) {
+ var body, date_published, title_hash;
+ if (full == null) {
+ full = false;
+ }
+ title_hash = post.title.replace(/[#?& ]/g, "+").replace(/[+]+/g, "+");
+ elem.data("object", "Post:" + post.post_id);
+ $(".title .editable", elem).html(post.title).attr("href", "?Post:" + post.post_id + ":" + title_hash).data("content", post.title);
+ date_published = Time.since(post.date_published);
+ if (post.body.match(/^---/m)) {
+ date_published += " · " + (Time.readtime(post.body));
+ $(".more", elem).css("display", "inline-block").attr("href", "?Post:" + post.post_id + ":" + title_hash);
+ }
+ $(".details .published", elem).html(date_published).data("content", post.date_published);
+ if (post.comments > 0) {
+ $(".details .comments-num", elem).css("display", "inline").attr("href", "?Post:" + post.post_id + ":" + title_hash + "#Comments");
+ $(".details .comments-num .num", elem).text(post.comments + " comments");
+ } else {
+ $(".details .comments-num", elem).css("display", "none");
+ }
+ if (full) {
+ body = post.body;
+ } else {
+ body = post.body.replace(/^([\s\S]*?)\n---\n[\s\S]*$/, "$1");
+ }
+ return $(".body", elem).html(Text.toMarked(body)).data("content", post.body);
+ };
+
+ ZeroBlog.prototype.onOpenWebsocket = function(e) {
+ this.loadData();
+ this.routeUrl(window.location.search.substring(1));
+ this.cmd("siteInfo", {}, this.setSiteinfo);
+ return this.cmd("serverInfo", {}, (function(_this) {
+ return function(ret) {
+ _this.server_info = ret;
+ if (_this.server_info.rev < 160) {
+ return _this.loadData("old");
+ }
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.getObject = function(elem) {
+ return elem.parents("[data-object]:first");
+ };
+
+ ZeroBlog.prototype.getContent = function(elem, raw) {
+ var content, id, type, _ref;
+ if (raw == null) {
+ raw = false;
+ }
+ _ref = this.getObject(elem).data("object").split(":"), type = _ref[0], id = _ref[1];
+ id = parseInt(id);
+ content = elem.data("content");
+ if (elem.data("editable-mode") === "timestamp") {
+ content = Time.date(content, "full");
+ }
+ if (elem.data("editable-mode") === "simple" || raw) {
+ return content;
+ } else {
+ return Text.toMarked(content);
+ }
+ };
+
+ ZeroBlog.prototype.saveContent = function(elem, content, cb) {
+ var id, type, _ref;
+ if (cb == null) {
+ cb = false;
+ }
+ if (elem.data("deletable") && content === null) {
+ return this.deleteObject(elem, cb);
+ }
+ elem.data("content", content);
+ _ref = this.getObject(elem).data("object").split(":"), type = _ref[0], id = _ref[1];
+ id = parseInt(id);
+ if (type === "Post" || type === "Site") {
+ return this.saveSite(elem, type, id, content, cb);
+ } else if (type === "Comment") {
+ return this.saveComment(elem, type, id, content, cb);
+ }
+ };
+
+ ZeroBlog.prototype.saveSite = function(elem, type, id, content, cb) {
+ return this.cmd("fileGet", ["data/data.json"], (function(_this) {
+ return function(res) {
+ var data, post;
+ data = JSON.parse(res);
+ if (type === "Post") {
+ post = ((function() {
+ var _i, _len, _ref, _results;
+ _ref = data.post;
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ post = _ref[_i];
+ if (post.post_id === id) {
+ _results.push(post);
+ }
+ }
+ return _results;
+ })())[0];
+ if (elem.data("editable-mode") === "timestamp") {
+ content = Time.timestamp(content);
+ }
+ post[elem.data("editable")] = content;
+ } else if (type === "Site") {
+ data[elem.data("editable")] = content;
+ }
+ return _this.writeData(data, function(res) {
+ if (cb) {
+ if (res === true) {
+ if (elem.data("editable-mode") === "simple") {
+ return cb(content);
+ } else if (elem.data("editable-mode") === "timestamp") {
+ return cb(Time.since(content));
+ } else {
+ return cb(Text.toMarked(content));
+ }
+ } else {
+ return cb(false);
+ }
+ }
+ });
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.saveComment = function(elem, type, id, content, cb) {
+ var inner_path;
+ this.log("Saving comment...", id);
+ this.getObject(elem).css("height", "auto");
+ inner_path = "data/users/" + Page.site_info.auth_address + "/data.json";
+ return Page.cmd("fileGet", {
+ "inner_path": inner_path,
+ "required": false
+ }, (function(_this) {
+ return function(data) {
+ var comment, json_raw;
+ data = JSON.parse(data);
+ comment = ((function() {
+ var _i, _len, _ref, _results;
+ _ref = data.comment;
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ comment = _ref[_i];
+ if (comment.comment_id === id) {
+ _results.push(comment);
+ }
+ }
+ return _results;
+ })())[0];
+ comment[elem.data("editable")] = content;
+ _this.log(data);
+ json_raw = unescape(encodeURIComponent(JSON.stringify(data, void 0, '\t')));
+ return _this.writePublish(inner_path, btoa(json_raw), function(res) {
+ if (res === true) {
+ Comments.checkCert("updaterules");
+ if (cb) {
+ return cb(Text.toMarked(content, {
+ "sanitize": true
+ }));
+ }
+ } else {
+ _this.cmd("wrapperNotification", ["error", "File write error: " + res]);
+ if (cb) {
+ return cb(false);
+ }
+ }
+ });
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.deleteObject = function(elem, cb) {
+ var id, inner_path, type, _ref;
+ if (cb == null) {
+ cb = False;
+ }
+ _ref = elem.data("object").split(":"), type = _ref[0], id = _ref[1];
+ id = parseInt(id);
+ if (type === "Post") {
+ return this.cmd("fileGet", ["data/data.json"], (function(_this) {
+ return function(res) {
+ var data, post;
+ data = JSON.parse(res);
+ if (type === "Post") {
+ post = ((function() {
+ var _i, _len, _ref1, _results;
+ _ref1 = data.post;
+ _results = [];
+ for (_i = 0, _len = _ref1.length; _i < _len; _i++) {
+ post = _ref1[_i];
+ if (post.post_id === id) {
+ _results.push(post);
+ }
+ }
+ return _results;
+ })())[0];
+ if (!post) {
+ return false;
+ }
+ data.post.splice(data.post.indexOf(post), 1);
+ return _this.writeData(data, function(res) {
+ if (cb) {
+ cb();
+ }
+ if (res === true) {
+ return elem.slideUp();
+ }
+ });
+ }
+ };
+ })(this));
+ } else if (type === "Comment") {
+ inner_path = "data/users/" + Page.site_info.auth_address + "/data.json";
+ return this.cmd("fileGet", {
+ "inner_path": inner_path,
+ "required": false
+ }, (function(_this) {
+ return function(data) {
+ var comment, json_raw;
+ data = JSON.parse(data);
+ comment = ((function() {
+ var _i, _len, _ref1, _results;
+ _ref1 = data.comment;
+ _results = [];
+ for (_i = 0, _len = _ref1.length; _i < _len; _i++) {
+ comment = _ref1[_i];
+ if (comment.comment_id === id) {
+ _results.push(comment);
+ }
+ }
+ return _results;
+ })())[0];
+ data.comment.splice(data.comment.indexOf(comment), 1);
+ json_raw = unescape(encodeURIComponent(JSON.stringify(data, void 0, '\t')));
+ return _this.writePublish(inner_path, btoa(json_raw), function(res) {
+ if (res === true) {
+ elem.slideUp();
+ }
+ if (cb) {
+ return cb();
+ }
+ });
+ };
+ })(this));
+ }
+ };
+
+ ZeroBlog.prototype.writeData = function(data, cb) {
+ var json_raw;
+ if (cb == null) {
+ cb = null;
+ }
+ if (!data) {
+ return this.log("Data missing");
+ }
+ this.data["modified"] = data.modified = Time.timestamp();
+ json_raw = unescape(encodeURIComponent(JSON.stringify(data, void 0, '\t')));
+ this.cmd("fileWrite", ["data/data.json", btoa(json_raw)], (function(_this) {
+ return function(res) {
+ if (res === "ok") {
+ if (cb) {
+ cb(true);
+ }
+ } else {
+ _this.cmd("wrapperNotification", ["error", "File write error: " + res]);
+ if (cb) {
+ cb(false);
+ }
+ }
+ return _this.checkPublishbar();
+ };
+ })(this));
+ return this.cmd("fileGet", ["content.json"], (function(_this) {
+ return function(content) {
+ content = content.replace(/"title": ".*?"/, "\"title\": \"" + data.title + "\"");
+ return _this.cmd("fileWrite", ["content.json", btoa(content)], function(res) {
+ if (res !== "ok") {
+ return _this.cmd("wrapperNotification", ["error", "Content.json write error: " + res]);
+ }
+ });
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.writePublish = function(inner_path, data, cb) {
+ return this.cmd("fileWrite", [inner_path, data], (function(_this) {
+ return function(res) {
+ if (res !== "ok") {
+ _this.cmd("wrapperNotification", ["error", "File write error: " + res]);
+ cb(false);
+ return false;
+ }
+ return _this.cmd("sitePublish", {
+ "inner_path": inner_path
+ }, function(res) {
+ if (res === "ok") {
+ return cb(true);
+ } else {
+ return cb(res);
+ }
+ });
+ };
+ })(this));
+ };
+
+ ZeroBlog.prototype.onRequest = function(cmd, message) {
+ if (cmd === "setSiteInfo") {
+ return this.actionSetSiteInfo(message);
+ } else {
+ return this.log("Unknown command", message);
+ }
+ };
+
+ ZeroBlog.prototype.actionSetSiteInfo = function(message) {
+ this.setSiteinfo(message.params);
+ return this.checkPublishbar();
+ };
+
+ ZeroBlog.prototype.setSiteinfo = function(site_info) {
+ var _ref, _ref1;
+ this.site_info = site_info;
+ this.event_site_info.resolve(site_info);
+ if ($("body").hasClass("page-post")) {
+ Comments.checkCert();
+ }
+ if (((_ref = site_info.event) != null ? _ref[0] : void 0) === "file_done" && site_info.event[1].match(/.*users.*data.json$/)) {
+ if ($("body").hasClass("page-post")) {
+ Comments.loadComments();
+ }
+ if ($("body").hasClass("page-main")) {
+ return RateLimit(500, (function(_this) {
+ return function() {
+ return _this.pageMain();
+ };
+ })(this));
+ }
+ } else if (((_ref1 = site_info.event) != null ? _ref1[0] : void 0) === "file_done" && site_info.event[1] === "data/data.json") {
+ this.loadData();
+ if ($("body").hasClass("page-main")) {
+ this.pageMain();
+ }
+ if ($("body").hasClass("page-post")) {
+ return this.pagePost();
+ }
+ } else {
+
+ }
+ };
+
+ return ZeroBlog;
+
+ })(ZeroFrame);
+
+ window.Page = new ZeroBlog();
+
+}).call(this);
diff --git a/src/Tor/TorManager.py b/src/Tor/TorManager.py
new file mode 100644
index 00000000..865d8fbf
--- /dev/null
+++ b/src/Tor/TorManager.py
@@ -0,0 +1,311 @@
+import logging
+import re
+import socket
+import binascii
+import sys
+import os
+import time
+import random
+import subprocess
+import atexit
+
+import gevent
+
+from Config import config
+
+from lib import Ed25519
+from Crypt import CryptTor
+
+from Site import SiteManager
+import socks
+from gevent.lock import RLock
+from Debug import Debug
+from Plugin import PluginManager
+
+
+@PluginManager.acceptPlugins
+class TorManager(object):
+ def __init__(self, fileserver_ip=None, fileserver_port=None):
+ self.privatekeys = {} # Onion: Privatekey
+ self.site_onions = {} # Site address: Onion
+ self.tor_exe = "tools/tor/tor.exe"
+ self.has_meek_bridges = os.path.isfile("tools/tor/PluggableTransports/meek-client.exe")
+ self.tor_process = None
+ self.log = logging.getLogger("TorManager")
+ self.start_onions = None
+ self.conn = None
+ self.lock = RLock()
+ self.starting = True
+ self.connecting = True
+ self.status = None
+ self.event_started = gevent.event.AsyncResult()
+
+ if config.tor == "disable":
+ self.enabled = False
+ self.start_onions = False
+ self.setStatus("Disabled")
+ else:
+ self.enabled = True
+ self.setStatus("Waiting")
+
+ if fileserver_port:
+ self.fileserver_port = fileserver_port
+ else:
+ self.fileserver_port = config.fileserver_port
+
+ self.ip, self.port = config.tor_controller.rsplit(":", 1)
+ self.port = int(self.port)
+
+ self.proxy_ip, self.proxy_port = config.tor_proxy.rsplit(":", 1)
+ self.proxy_port = int(self.proxy_port)
+
+ def start(self):
+ self.log.debug("Starting (Tor: %s)" % config.tor)
+ self.starting = True
+ try:
+ if not self.connect():
+ raise Exception(self.status)
+ self.log.debug("Tor proxy port %s check ok" % config.tor_proxy)
+ except Exception as err:
+ if sys.platform.startswith("win") and os.path.isfile(self.tor_exe):
+ self.log.info("Starting self-bundled Tor, due to Tor proxy port %s check error: %s" % (config.tor_proxy, err))
+ # Change to self-bundled Tor ports
+ self.port = 49051
+ self.proxy_port = 49050
+ if config.tor == "always":
+ socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", self.proxy_port)
+ self.enabled = True
+ if not self.connect():
+ self.startTor()
+ else:
+ self.log.info("Disabling Tor, because error while accessing Tor proxy at port %s: %s" % (config.tor_proxy, err))
+ self.enabled = False
+
+ def setStatus(self, status):
+ self.status = status
+ if "main" in sys.modules: # import main has side-effects, breaks tests
+ import main
+ if "ui_server" in dir(main):
+ main.ui_server.updateWebsocket()
+
+ def startTor(self):
+ if sys.platform.startswith("win"):
+ try:
+ self.log.info("Starting Tor client %s..." % self.tor_exe)
+ tor_dir = os.path.dirname(self.tor_exe)
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ cmd = r"%s -f torrc --defaults-torrc torrc-defaults --ignore-missing-torrc" % self.tor_exe
+ if config.tor_use_bridges:
+ cmd += " --UseBridges 1"
+
+ self.tor_process = subprocess.Popen(cmd, cwd=tor_dir, close_fds=True, startupinfo=startupinfo)
+ for wait in range(1, 3): # Wait for startup
+ time.sleep(wait * 0.5)
+ self.enabled = True
+ if self.connect():
+ if self.isSubprocessRunning():
+ self.request("TAKEOWNERSHIP") # Shut down Tor client when controll connection closed
+ break
+ # Terminate on exit
+ atexit.register(self.stopTor)
+ except Exception as err:
+ self.log.error("Error starting Tor client: %s" % Debug.formatException(str(err)))
+ self.enabled = False
+ self.starting = False
+ self.event_started.set(False)
+ return False
+
+ def isSubprocessRunning(self):
+ return self.tor_process and self.tor_process.pid and self.tor_process.poll() is None
+
+ def stopTor(self):
+ self.log.debug("Stopping...")
+ try:
+ if self.isSubprocessRunning():
+ self.request("SIGNAL SHUTDOWN")
+ except Exception as err:
+ self.log.error("Error stopping Tor: %s" % err)
+
+ def connect(self):
+ if not self.enabled:
+ return False
+ self.site_onions = {}
+ self.privatekeys = {}
+
+ return self.connectController()
+
+ def connectController(self):
+ if "socket_noproxy" in dir(socket): # Socket proxy-patched, use non-proxy one
+ conn = socket.socket_noproxy(socket.AF_INET, socket.SOCK_STREAM)
+ else:
+ conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ self.log.debug("Connecting to Tor Controller %s:%s" % (self.ip, self.port))
+ self.connecting = True
+ try:
+ with self.lock:
+ conn.connect((self.ip, self.port))
+
+ # Auth cookie file
+ res_protocol = self.send("PROTOCOLINFO", conn)
+ cookie_match = re.search('COOKIEFILE="(.*?)"', res_protocol)
+
+ if config.tor_password:
+ res_auth = self.send('AUTHENTICATE "%s"' % config.tor_password, conn)
+ elif cookie_match:
+ cookie_file = cookie_match.group(1).encode("ascii").decode("unicode_escape")
+ if not os.path.isfile(cookie_file) and self.tor_process:
+ # Workaround for tor client cookie auth file utf8 encoding bug (https://github.com/torproject/stem/issues/57)
+ cookie_file = os.path.dirname(self.tor_exe) + "\\data\\control_auth_cookie"
+ auth_hex = binascii.b2a_hex(open(cookie_file, "rb").read())
+ res_auth = self.send("AUTHENTICATE %s" % auth_hex.decode("utf8"), conn)
+ else:
+ res_auth = self.send("AUTHENTICATE", conn)
+
+ if "250 OK" not in res_auth:
+ raise Exception("Authenticate error %s" % res_auth)
+
+ # Version 0.2.7.5 required because ADD_ONION support
+ res_version = self.send("GETINFO version", conn)
+ version = re.search(r'version=([0-9\.]+)', res_version).group(1)
+ if float(version.replace(".", "0", 2)) < 207.5:
+ raise Exception("Tor version >=0.2.7.5 required, found: %s" % version)
+
+ self.setStatus("Connected (%s)" % res_auth)
+ self.event_started.set(True)
+ self.starting = False
+ self.connecting = False
+ self.conn = conn
+ except Exception as err:
+ self.conn = None
+ self.setStatus("Error (%s)" % str(err))
+ self.log.warning("Tor controller connect error: %s" % Debug.formatException(str(err)))
+ self.enabled = False
+ return self.conn
+
+ def disconnect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ def startOnions(self):
+ if self.enabled:
+ self.log.debug("Start onions")
+ self.start_onions = True
+ self.getOnion("global")
+
+ # Get new exit node ip
+ def resetCircuits(self):
+ res = self.request("SIGNAL NEWNYM")
+ if "250 OK" not in res:
+ self.setStatus("Reset circuits error (%s)" % res)
+ self.log.error("Tor reset circuits error: %s" % res)
+
+ def addOnion(self):
+ if len(self.privatekeys) >= config.tor_hs_limit:
+ return random.choice([key for key in list(self.privatekeys.keys()) if key != self.site_onions.get("global")])
+
+ result = self.makeOnionAndKey()
+ if result:
+ onion_address, onion_privatekey = result
+ self.privatekeys[onion_address] = onion_privatekey
+ self.setStatus("OK (%s onions running)" % len(self.privatekeys))
+ SiteManager.peer_blacklist.append((onion_address + ".onion", self.fileserver_port))
+ return onion_address
+ else:
+ return False
+
+ def makeOnionAndKey(self):
+ res = self.request("ADD_ONION NEW:ED25519-V3 port=%s" % self.fileserver_port)
+ match = re.search("ServiceID=([A-Za-z0-9]+).*PrivateKey=ED25519-V3:(.*?)[\r\n]", res, re.DOTALL)
+ if match:
+ onion_address, onion_privatekey = match.groups()
+ return (onion_address, onion_privatekey)
+ else:
+ self.setStatus("AddOnion error (%s)" % res)
+ self.log.error("Tor addOnion error: %s" % res)
+ return False
+
+ def delOnion(self, address):
+ res = self.request("DEL_ONION %s" % address)
+ if "250 OK" in res:
+ del self.privatekeys[address]
+ self.setStatus("OK (%s onion running)" % len(self.privatekeys))
+ return True
+ else:
+ self.setStatus("DelOnion error (%s)" % res)
+ self.log.error("Tor delOnion error: %s" % res)
+ self.disconnect()
+ return False
+
+ def request(self, cmd):
+ with self.lock:
+ if not self.enabled:
+ return False
+ if not self.conn:
+ if not self.connect():
+ return ""
+ return self.send(cmd)
+
+ def send(self, cmd, conn=None):
+ if not conn:
+ conn = self.conn
+ self.log.debug("> %s" % cmd)
+ back = ""
+ for retry in range(2):
+ try:
+ conn.sendall(b"%s\r\n" % cmd.encode("utf8"))
+ while not back.endswith("250 OK\r\n"):
+ back += conn.recv(1024 * 64).decode("utf8")
+ break
+ except Exception as err:
+ self.log.error("Tor send error: %s, reconnecting..." % err)
+ if not self.connecting:
+ self.disconnect()
+ time.sleep(1)
+ self.connect()
+ back = None
+ if back:
+ self.log.debug("< %s" % back.strip())
+ return back
+
+ def getPrivatekey(self, address):
+ return self.privatekeys[address]
+
+ def getPublickey(self, address):
+ return CryptTor.privatekeyToPublickey(self.privatekeys[address])
+
+ def getOnion(self, site_address):
+ if not self.enabled:
+ return None
+
+ if config.tor == "always": # Different onion for every site
+ onion = self.site_onions.get(site_address)
+ else: # Same onion for every site
+ onion = self.site_onions.get("global")
+ site_address = "global"
+
+ if not onion:
+ with self.lock:
+ self.site_onions[site_address] = self.addOnion()
+ onion = self.site_onions[site_address]
+ self.log.debug("Created new hidden service for %s: %s" % (site_address, onion))
+
+ return onion
+
+ # Creates and returns a
+ # socket that has connected to the Tor Network
+ def createSocket(self, onion, port):
+ if not self.enabled:
+ return False
+ self.log.debug("Creating new Tor socket to %s:%s" % (onion, port))
+ if self.starting:
+ self.log.debug("Waiting for startup...")
+ self.event_started.get()
+ if config.tor == "always": # Every socket is proxied by default, in this mode
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ else:
+ sock = socks.socksocket()
+ sock.set_proxy(socks.SOCKS5, self.proxy_ip, self.proxy_port)
+ return sock
diff --git a/src/Tor/__init__.py b/src/Tor/__init__.py
new file mode 100644
index 00000000..d0fcffaf
--- /dev/null
+++ b/src/Tor/__init__.py
@@ -0,0 +1 @@
+from .TorManager import TorManager
\ No newline at end of file
diff --git a/src/Translate/Translate.py b/src/Translate/Translate.py
new file mode 100644
index 00000000..e73f9be1
--- /dev/null
+++ b/src/Translate/Translate.py
@@ -0,0 +1,135 @@
+import os
+import json
+import logging
+import inspect
+import re
+import html
+import string
+
+from Config import config
+
+translates = []
+
+
+class EscapeProxy(dict):
+ # Automatically escape the accessed string values
+ def __getitem__(self, key):
+ val = dict.__getitem__(self, key)
+ if type(val) in (str, str):
+ return html.escape(val)
+ elif type(val) is dict:
+ return EscapeProxy(val)
+ elif type(val) is list:
+ return EscapeProxy(enumerate(val)) # Convert lists to dict
+ else:
+ return val
+
+
+class Translate(dict):
+ def __init__(self, lang_dir=None, lang=None):
+ if not lang_dir:
+ lang_dir = os.path.dirname(__file__) + "/languages/"
+ if not lang:
+ lang = config.language
+ self.lang = lang
+ self.lang_dir = lang_dir
+ self.setLanguage(lang)
+ self.formatter = string.Formatter()
+
+ if config.debug:
+ # Auto reload FileRequest on change
+ from Debug import DebugReloader
+ DebugReloader.watcher.addCallback(self.load)
+
+ translates.append(self)
+
+ def setLanguage(self, lang):
+ self.lang = re.sub("[^a-z-]", "", lang)
+ self.lang_file = self.lang_dir + "%s.json" % lang
+ self.load()
+
+ def __repr__(self):
+ return "" % self.lang
+
+ def load(self):
+ if self.lang == "en":
+ data = {}
+ dict.__init__(self, data)
+ self.clear()
+ elif os.path.isfile(self.lang_file):
+ try:
+ data = json.load(open(self.lang_file, encoding="utf8"))
+ logging.debug("Loaded translate file: %s (%s entries)" % (self.lang_file, len(data)))
+ except Exception as err:
+ logging.error("Error loading translate file %s: %s" % (self.lang_file, err))
+ data = {}
+ dict.__init__(self, data)
+ else:
+ data = {}
+ dict.__init__(self, data)
+ self.clear()
+ logging.debug("Translate file not exists: %s" % self.lang_file)
+
+ def format(self, s, kwargs, nested=False):
+ kwargs["_"] = self
+ if nested:
+ back = self.formatter.vformat(s, [], kwargs) # PY3 TODO: Change to format_map
+ return self.formatter.vformat(back, [], kwargs)
+ else:
+ return self.formatter.vformat(s, [], kwargs)
+
+ def formatLocals(self, s, nested=False):
+ kwargs = inspect.currentframe().f_back.f_locals
+ return self.format(s, kwargs, nested=nested)
+
+ def __call__(self, s, kwargs=None, nested=False, escape=True):
+ if not kwargs:
+ kwargs = inspect.currentframe().f_back.f_locals
+ if escape:
+ kwargs = EscapeProxy(kwargs)
+ return self.format(s, kwargs, nested=nested)
+
+ def __missing__(self, key):
+ return key
+
+ def pluralize(self, value, single, multi):
+ if value > 1:
+ return self[multi].format(value)
+ else:
+ return self[single].format(value)
+
+ def translateData(self, data, translate_table=None, mode="js"):
+ if not translate_table:
+ translate_table = self
+
+ patterns = []
+ for key, val in list(translate_table.items()):
+ if key.startswith("_("): # Problematic string: only match if called between _(" ") function
+ key = key.replace("_(", "").replace(")", "").replace(", ", '", "')
+ translate_table[key] = "|" + val
+ patterns.append(re.escape(key))
+
+ def replacer(match):
+ target = translate_table[match.group(1)]
+ if mode == "js":
+ if target and target[0] == "|": # Strict string match
+ if match.string[match.start() - 2] == "_": # Only if the match if called between _(" ") function
+ return '"' + target[1:] + '"'
+ else:
+ return '"' + match.group(1) + '"'
+ return '"' + target + '"'
+ else:
+ return match.group(0)[0] + target + match.group(0)[-1]
+
+ if mode == "html":
+ pattern = '[">](' + "|".join(patterns) + ')["<]'
+ else:
+ pattern = '"(' + "|".join(patterns) + ')"'
+ data = re.sub(pattern, replacer, data)
+
+ if mode == "html":
+ data = data.replace("lang={lang}", "lang=%s" % self.lang) # lang get parameter to .js file to avoid cache
+
+ return data
+
+translate = Translate()
diff --git a/src/Translate/__init__.py b/src/Translate/__init__.py
new file mode 100644
index 00000000..ba0ab6d4
--- /dev/null
+++ b/src/Translate/__init__.py
@@ -0,0 +1 @@
+from .Translate import *
\ No newline at end of file
diff --git a/src/Translate/languages/da.json b/src/Translate/languages/da.json
new file mode 100644
index 00000000..8e6f0845
--- /dev/null
+++ b/src/Translate/languages/da.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Tillykke, din port ({0}) er ÃĨben. Du er nu fuld klient pÃĨ ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "TOR er aktiv, alle forbindelser anvender Onions.",
+ "Successfully started Tor onion hidden services.": "OK. Startede TOR skjult onion service.",
+ "Unable to start hidden services, please check your config.": "Fejl. Kunne ikke starte TOR skjult onion service. Tjek din opsÃĻtning!",
+ "For faster connections open {0} port on your router.": "Ã ben port {0} pÃĨ din router for hurtigere forbindelse.",
+ "Your connection is restricted. Please, open {0} port on your router": "BegrÃĻnset forbindelse. Ã ben venligst port {0} pÃĨ din router",
+ "or configure Tor to become a full member of the ZeroNet network.": "eller opsÃĻt TOR for fuld adgang til ZeroNet!",
+
+ "Select account you want to use in this site:": "VÃĻlg bruger til brug pÃĨ denne side:",
+ "currently selected": "nuvÃĻrende bruger",
+ "Unique to site": "Unik pÃĨ siden",
+
+ "Content signing failed": "Signering af indhold fejlede",
+ "Content publish queued for {0:.0f} seconds.": "Indhold i kø for offentliggørelse i {0:.0f} sekunder.",
+ "Content published to {0} peers.": "Indhold offentliggjort til {0} klienter.",
+ "No peers found, but your content is ready to access.": "Ingen klienter fundet, men dit indhold er klar til hentning.",
+ "Your network connection is restricted. Please, open {0} port": "Din forbindelse er begrÃĻnset. Ã ben venligst port {0}",
+ "on your router to make your site accessible for everyone.": "pÃĨ din router for at dele din side med alle.",
+ "Content publish failed.": "Offentliggørelse af indhold fejlede.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Denne fil er endnu ikke delt fÃĻrdig. Tidligere indhold kan gÃĨ tabt hvis du skriver til filen nu.",
+ "Write content anyway": "Del indhold alligevel",
+ "New certificate added:": "Nyt certifikat oprettet:",
+ "You current certificate:": "Dit nuvÃĻrende certifikat: ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Skift certificat til {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certifikat ÃĻndret til {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Side klonet",
+
+ "You have successfully changed the web interface's language!": "OK. Du har nu skiftet sprog pÃĨ web brugergrÃĻnsefladen!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Pga. browser cache kan skift af sprog tage nogle minutter.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Forbindelse til UiServer Websocket blev tabt. Genopretter forbindelse...",
+ "Connection with UiServer Websocket recovered.": "Forbindelse til UiServer Websocket genoprettet.",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket fejl. GenindlÃĻs venligst siden (F5)!",
+ " Connecting...": " Opretter forbindelse...",
+ "Site size: ": "Side størrelse: ",
+ "MB is larger than default allowed ": "MB er større end den tilladte default ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "à ben side og sÃĻt max side størrelse til \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " filer skal downloades",
+ " downloaded": " downloadet",
+ " download failed": " download fejlede",
+ "Peers found: ": "Klienter fundet: ",
+ "No peers found": "Ingen klienter fundet",
+ "Running out of size limit (": "Siden fylder snart for meget (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Ret max side størrelse til \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Max side størrelse ÃĻndret til {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " Ny version af denne side er blevet offentliggjort. GenindlÃĻs venligst siden (F5) for at se nyt indhold!",
+ "This site requests permission:": "Denne side betyder om tilladdelse:",
+ "_(Accept)": "Tillad"
+
+}
diff --git a/src/Translate/languages/de.json b/src/Translate/languages/de.json
new file mode 100644
index 00000000..1cc63b74
--- /dev/null
+++ b/src/Translate/languages/de.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Gratulation, dein Port {0} ist offen. Du bist ein volles Mitglied des ZeroNet Netzwerks!",
+ "Tor mode active, every connection using Onion route.": "Tor Modus aktiv, jede Verbindung nutzt die Onion Route.",
+ "Successfully started Tor onion hidden services.": "Tor versteckte Dienste erfolgreich gestartet.",
+ "Unable to start hidden services, please check your config.": "Nicht mÃļglich versteckte Dienste zu starten.",
+ "For faster connections open {0} port on your router.": "FÃŧr schnellere Verbindungen, Ãļffne Port {0} auf deinem Router.",
+ "Your connection is restricted. Please, open {0} port on your router": "Deine Verbindung ist eingeschränkt. Bitte Ãļffne Port {0} auf deinem Router",
+ "or configure Tor to become a full member of the ZeroNet network.": "oder konfiguriere Tor um ein volles Mitglied des ZeroNet Netzwerks zu werden.",
+
+ "Select account you want to use in this site:": "Wähle das Konto, das du auf dieser Seite benutzen willst:",
+ "currently selected": "aktuell ausgewählt",
+ "Unique to site": "Eindeutig zur Seite",
+
+ "Content signing failed": "Signierung des Inhalts fehlgeschlagen",
+ "Content publish queued for {0:.0f} seconds.": "VerÃļffentlichung des Inhalts um {0:.0f} Sekunden verzÃļgert.",
+ "Content published to {0} peers.": "Inhalt zu {0} Peers verÃļffentlicht.",
+ "No peers found, but your content is ready to access.": "Keine Peers gefunden, aber dein Inhalt ist bereit zum Zugriff.",
+ "Your network connection is restricted. Please, open {0} port": "Deine Netzwerkverbindung ist beschränkt. Bitte Ãļffne Port {0}",
+ "on your router to make your site accessible for everyone.": "auf deinem Router um deine Seite fÃŧr Jeden zugänglich zu machen.",
+ "Content publish failed.": "Inhalt konnte nicht verÃļffentlicht werden.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Diese Datei wird noch synchronisiert. Wenn jetzt geschrieben wird geht der vorherige Inhalt verloren.",
+ "Write content anyway": "Inhalt trotzdem schreiben",
+ "New certificate added:": "Neues Zertifikat hinzugefÃŧgt:",
+ "You current certificate:": "Dein aktuelles Zertifikat:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Ãndere es zu {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Zertifikat geändert zu: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Seite geklont",
+
+ "You have successfully changed the web interface's language!": "Du hast die Sprache des Webinterface erfolgreich geändert!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Aufgrund des Browsercaches kann die volle Transformation Minuten dauern.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Die Verbindung mit UiServer Websocketist abgebrochen. Neu verbinden...",
+ "Connection with UiServer Websocket recovered.": "Die Verbindung mit UiServer Websocket wurde wiederhergestellt.",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket Fehler, bitte Seite neu laden.",
+ " Connecting...": " Verbinden...",
+ "Site size: ": "SeitengrÃļÃe: ",
+ "MB is larger than default allowed ": "MB ist grÃļÃer als der erlaubte Standart",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Ãffne Seite und setze das Limit auf \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " Dateien mÃŧssen noch heruntergeladen werden",
+ " downloaded": " heruntergeladen",
+ " download failed": " Herunterladen fehlgeschlagen",
+ "Peers found: ": "Peers gefunden: ",
+ "No peers found": "Keine Peers gefunden",
+ "Running out of size limit (": "Das Speicherlimit ist bald ausgeschÃļpft (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Limit auf \" + site_info.next_size_limit + \"MB ändern",
+ "Site size limit changed to {0}MB": "Speicherlimit fÃŧr diese Seite auf {0}MB geändert",
+ " New version of this page has just released. Reload to see the modified content.": " Neue version dieser Seite wurde gerade verÃļffentlicht. Lade die Seite neu um den geänderten Inhalt zu sehen.",
+ "This site requests permission:": "Diese Seite fordert rechte:",
+ "_(Accept)": "Genehmigen"
+
+}
diff --git a/src/Translate/languages/es.json b/src/Translate/languages/es.json
new file mode 100644
index 00000000..4cac077b
--- /dev/null
+++ b/src/Translate/languages/es.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "ÂĄFelicidades! tu puerto {0} estÃĄ abierto. ÂĄEres un miembro completo de la red Zeronet!",
+ "Tor mode active, every connection using Onion route.": "Modo Tor activado, cada conexiÃŗn usa una ruta Onion.",
+ "Successfully started Tor onion hidden services.": "Tor ha iniciado satisfactoriamente la ocultaciÃŗn de los servicios onion.",
+ "Unable to start hidden services, please check your config.": "No se puedo iniciar los servicios ocultos, por favor comprueba tu configuraciÃŗn.",
+ "For faster connections open {0} port on your router.": "Para conexiones mÃĄs rÃĄpidas abre el puerto {0} en tu router.",
+ "Your connection is restricted. Please, open {0} port on your router": "Tu conexiÃŗn estÃĄ limitada. Por favor, abre el puerto {0} en tu router",
+ "or configure Tor to become a full member of the ZeroNet network.": "o configura Tor para convertirte en un miembro completo de la red ZeroNet.",
+
+ "Select account you want to use in this site:": "Selecciona la cuenta que quieres utilizar en este sitio:",
+ "currently selected": "actualmente seleccionada",
+ "Unique to site": "Ãnica para el sitio",
+
+ "Content signing failed": "Firma del contenido fallida",
+ "Content publish queued for {0:.0f} seconds.": "PublicaciÃŗn de contenido en cola durante {0:.0f} segundos.",
+ "Content published to {0} peers.": "Contenido publicado para {0} pares.",
+ "No peers found, but your content is ready to access.": "No se ha encontrado pares, pero tu contenido estÃĄ listo para ser accedido.",
+ "Your network connection is restricted. Please, open {0} port": "Tu conexiÃŗn de red estÃĄ restringida. Por favor, abre el puerto{0}",
+ "on your router to make your site accessible for everyone.": "en tu router para hacer tu sitio accesible a todo el mundo.",
+ "Content publish failed.": "PublicaciÃŗn de contenido fallida.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Este archivo estÃĄ aÃēn sincronizado, si le escribes ahora el contenido previo podrÃa perderse.",
+ "Write content anyway": "Escribir el contenido de todas formas",
+ "New certificate added:": "Nuevo certificado aÃąadido:",
+ "You current certificate:": "Tu certificado actual:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Cambia esto a {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certificado cambiado a: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Sitio clonado",
+
+ "You have successfully changed the web interface's language!": "ÂĄHas cambiado con Êxito el idioma de la interfaz web!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Debido a la cachÊ del navegador, la transformaciÃŗn completa podrÃa llevar unos minutos.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Se perdiÃŗ la conexiÃŗn con UiServer Websocket. Reconectando...",
+ "Connection with UiServer Websocket recovered.": "ConexiÃŗn con UiServer Websocket recuperada.",
+ "UiServer Websocket error, please reload the page.": "Error de UiServer Websocket, por favor recarga la pÃĄgina.",
+ " Connecting...": " Conectando...",
+ "Site size: ": "TamaÃąo del sitio: ",
+ "MB is larger than default allowed ": "MB es mÃĄs grande de lo permitido por defecto",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Abre tu sitio and establece el lÃmite de tamaÃąo a \" + site_info.next_size_limit + \"MBs",
+ " files needs to be downloaded": " Los archivos necesitan ser descargados",
+ " downloaded": " descargados",
+ " download failed": " descarga fallida",
+ "Peers found: ": "Pares encontrados: ",
+ "No peers found": "No se han encontrado pares",
+ "Running out of size limit (": "Superando el tamaÃąo lÃmite (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Establece ellÃmite a \" + site_info.next_size_limit + \"MB ändern",
+ "Site size limit changed to {0}MB": "LÃmite de tamaÃąo del sitio cambiado a {0}MBs",
+ " New version of this page has just released. Reload to see the modified content.": " Se ha publicado una nueva versiÃŗn de esta pÃĄgina . Recarga para ver el contenido modificado.",
+ "This site requests permission:": "Este sitio solicita permiso:",
+ "_(Accept)": "Conceder"
+
+}
diff --git a/src/Translate/languages/fa.json b/src/Translate/languages/fa.json
new file mode 100644
index 00000000..e644247a
--- /dev/null
+++ b/src/Translate/languages/fa.json
@@ -0,0 +1,50 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "ØĒØ¨ØąÛÚŠØ Ø¯ØąÚ¯Ø§Ų {0} Ø´Ų Ø§ Ø¨Ø§Ø˛ Ø´Ø¯Ų Ø§ØŗØĒ. Ø´Ų Ø§ ÛÚŠ ØšØļŲ ØĒŲ Ø§Ų Ø´Ø¨ÚŠŲ ZeroNet ŲØŗØĒÛØ¯!",
+ "Tor mode active, every connection using Onion route.": "ØØ§ŲØĒ Tor ŲØšØ§Ų Ø§ØŗØĒØ ŲØą Ø§ØąØĒØ¨Ø§Øˇ Ø§Ø˛ Ų ØŗÛØąÛØ§Ø¨Û ŲžÛØ§Ø˛ (Onion) Ø§ØŗØĒŲØ§Ø¯Ų Ų ÛâÚŠŲØ¯.",
+ "Successfully started Tor onion hidden services.": "ØŽØ¯Ų Ø§ØĒ ŲžŲŲØ§Ų ŲžÛØ§Ø˛ (Onion) Tor با Ų ŲŲŲÛØĒ ØąØ§ŲâØ§ŲØ¯Ø§Ø˛Û شد.",
+ "Unable to start hidden services, please check your config.": "ŲØ§Ø¯Øą Ø¨Ų ØąØ§ŲâØ§ŲØ¯Ø§Ø˛Û ØŽØ¯Ų Ø§ØĒ ŲžŲŲØ§Ų ŲÛØŗØĒÛŲ Ø ŲØˇŲا ØĒŲØ¸ÛŲ Ø§ØĒ ØŽŲØ¯ ØąØ§ Ø¨ØąØąØŗÛ ŲŲ Ø§ÛÛØ¯.",
+ "For faster connections open {0} port on your router.": "Ø¨ØąØ§Û Ø§ØąØĒØ¨Ø§ØˇØ§ØĒ ØŗØąÛØšØĒØą Ø¯ØąÚ¯Ø§Ų {0} ØąØ§ Ø¨Øą ØąŲÛ Ų ØŗÛØąÛاب (ØąŲØĒØą) ØŽŲØ¯ Ø¨Ø§Ø˛ ŲŲ Ø§ÛÛØ¯.",
+ "Your connection is restricted. Please, open {0} port on your router": "Ø§ØąØĒØ¨Ø§Øˇ Ø´Ų Ø§ Ų ØØ¯ŲØ¯âØ´Ø¯Ų Ø§ØŗØĒ. ŲØˇŲا Ø¯ØąÚ¯Ø§Ų {0} ØąØ§ Ø¯Øą Ų ØŗÛØąÛاب (ØąŲØĒØą) ØŽŲØ¯ Ø¨Ø§Ø˛ ŲŲ Ø§ÛÛØ¯",
+ "or configure Tor to become a full member of the ZeroNet network.": "ÛØ§ ŲžÛÚŠØąØ¨ŲØ¯Û Tor ØąØ§ Ø§ŲØŦØ§Ų Ø¯ŲÛØ¯ ØĒا Ø¨Ų ÛÚŠ ØšØļŲ ØĒŲ Ø§Ų Ø´Ø¨ÚŠŲ ZeroNet ØĒبدÛŲ Ø´ŲÛØ¯.",
+
+ "Select account you want to use in this site:": "ØØŗØ§Ø¨Û ØąØ§ ÚŠŲ Ų ÛâØŽŲØ§ŲÛØ¯ Ø¯Øą اÛŲ ØŗØ§ÛØĒ Ø§ØŗØĒŲØ§Ø¯Ų ÚŠŲÛØ¯Ø Ø§ŲØĒØŽØ§Ø¨ ÚŠŲÛØ¯:",
+ "currently selected": "Ø¯Øą ØØ§Ų ØØ§ØļØą Ø§ŲØĒØŽØ§Ø¨âشدŲ",
+ "Unique to site": "Ų ØŽØĒØĩ Ø¨Ų ØŗØ§ÛØĒ",
+
+ "Content signing failed": "Ø§Ų ØļØ§Û Ų ØØĒŲØ§ با Ø´ÚŠØŗØĒ Ų ŲØ§ØŦŲ Ø´Ø¯",
+ "Content publish queued for {0:.0f} seconds.": "Ų ØØĒŲØ§ Ø¯Øą ØĩŲ Ø§ŲØĒØ´Ø§Øą با {0:.0f} ØĢاŲÛŲ ØĒØ§ØŽÛØą ŲØąØ§Øą Ú¯ØąŲØĒ.",
+ "Content published to {0} peers.": "Ų ØØĒŲØ§ Ø¨ØąØ§Û {0} ØĒؚداد ŲŲ ØĒا Ø§ŲØĒØ´Ø§Øą ÛØ§ŲØĒ.",
+ "No peers found, but your content is ready to access.": "ŲŲ ØĒاÛÛ ÛØ§ŲØĒ ŲØ´Ø¯Ø Ø§Ų Ø§ Ų ØØĒŲØ§Û Ø´Ų Ø§ ØĸŲ Ø§Ø¯Ų Ø¯ØŗØĒØąØŗÛ Ø§ØŗØĒ.",
+ "Your network connection is restricted. Please, open {0} port": "Ø§ØąØĒØ¨Ø§Øˇ Ø´Ø¨ÚŠŲ Ø´Ų Ø§ Ų ØØ¯ŲØ¯âØ´Ø¯Ų Ø§ØŗØĒ. ŲØˇŲا Ø¯ØąÚ¯Ø§Ų {0} ØąØ§",
+ "on your router to make your site accessible for everyone.": "Ø¯Øą Ų ØŗÛØąÛاب (ØąŲØĒØą) ØŽŲØ¯ Ø¨Ø§Ø˛ ÚŠŲÛØ¯ ØĒا ØŗØ§ÛØĒ ØŽŲØ¯ ØąØ§ Ø¨ØąØ§Û ŲŲ Ų Ø¯Øą Ø¯ØŗØĒØąØŗ ŲØąØ§Øą دŲÛØ¯.",
+ "Content publish failed.": "Ø§ŲØĒØ´Ø§Øą Ų ØØĒŲØ§ Ų ŲŲŲ ŲØ¨Ųد.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "اÛŲ ŲØ§ÛŲ ŲŲ ÚŲØ§Ų ŲŲ Ú¯Ø§Ų Ø§ØŗØĒØ Ø§Ú¯Ø˛ Ø´Ų Ø§ ØĸŲ ØąØ§ بŲŲÛØŗÛØ¯Ø Ų Ų ÚŠŲ Ø§ØŗØĒ Ų ØØĒŲØ§Û ŲØ¨ŲÛ Ø§Ø˛âØ¨ÛŲ ØąŲØ¯.",
+ "Write content anyway": "Ø¯Øą ŲØą ØĩŲØąØĒ Ų ØØĒŲØ§ ØąØ§ بŲŲÛØŗ",
+ "New certificate added:": "Ú¯ŲØ§ŲÛ ØŦØ¯ÛØ¯Û Ø§ŲØ˛ŲØ¯Ų Ø´Ø¯:",
+ "You current certificate:": "Ú¯ŲØ§ŲÛ ŲØšŲÛ Ø´Ų Ø§:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "ØĒØēÛÛØąØ´ Ø¨Ø¯Ų Ø¨Ų {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Ú¯ŲØ§ŲÛŲØ§Ų Ų Ø¨Ų: {auth_type}/{auth_user_name}@{domain} ØĒØēÛÛØą ŲžÛØ¯Ø§ ÚŠØąØ¯.",
+ "Site cloned": "ØŗØ§ÛØĒ ŲŲ ØŗØ§ŲâØŗØ§Ø˛Û Ø´Ø¯",
+
+ "You have successfully changed the web interface's language!": "Ø´Ų Ø§ با Ų ŲŲŲÛØĒ Ø˛Ø¨Ø§Ų ØąØ§Ø¨Øˇ ŲØ¨ ØąØ§ ØĒØēÛÛØą Ø¯Ø§Ø¯Ûد!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Ø¨Ų Ø¯ŲÛŲ Ø°ØŽÛØąŲâØŗØ§Ø˛Û Ø¯Øą Ų ØąŲØąâÚ¯ØąØ Ø§Ų ÚŠØ§Ų Ø¯Ø§ØąØ¯ ØĒØēÛÛØą Ø´ÚŠŲ ÚŠØ§Ų Ų ÚŲØ¯ دŲÛŲŲ ØˇŲŲ Ø¨ÚŠØ´Ø¯.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "اØĒØĩØ§Ų Ø¨Ø§ UiServer Websocket ŲØˇØš شد. اØĒØĩØ§Ų Ø¯ŲØ¨Ø§ØąŲ...",
+ "Connection with UiServer Websocket recovered.": "Ø§ØąØĒØ¨Ø§Øˇ با UiServer Websocket Ø¯ŲØ¨Ø§ØąŲ Ø¨ØąâŲØąØ§Øą شد.",
+ "UiServer Websocket error, please reload the page.": "ØŽØˇØ§Û UiServer Websocket, ŲØˇŲا ØĩŲØŲ ØąØ§ Ø¯ŲØ¨Ø§ØąŲ Ø¨Ø§ØąÚ¯ÛØąÛ ÚŠŲÛØ¯.",
+ " Connecting...": " Ø¨ØąŲØąØ§ØąÛ Ø§ØąØĒØ¨Ø§Øˇ...",
+ "Site size: ": "ØØŦŲ ØŗØ§ÛØĒ: ",
+ "MB is larger than default allowed ": "MB Ø¨ÛØ´ØĒØą Ø§Ø˛ ŲžÛØ´âŲØąØļ Ų ØŦØ§Ø˛ Ø§ØŗØĒ ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "ØŗØ§ÛØĒ ØąØ§ Ø¨Ø§Ø˛ ÚŠØąØ¯Ų Ų Ų ØØ¯ŲØ¯Ų ØØŦŲ ØąØ§ Ø¨Ų \" + site_info.next_size_limit + \"MB ØĒŲØ¸ÛŲ ÚŠŲ",
+ " files needs to be downloaded": " ŲØ§ÛŲâŲØ§ÛÛ ÚŠŲ ŲÛØ§Ø˛ Ø§ØŗØĒØ Ø¯Ø§ŲŲŲØ¯ Ø´ŲŲØ¯",
+ " downloaded": " داŲŲŲØ¯ شد",
+ " download failed": " داŲŲŲØ¯ Ų ŲŲŲ ŲØ¨Ųد",
+ "Peers found: ": "ÚŲØ¯ ŲŲ ØĒا ÛØ§ŲØĒ شد: ",
+ "No peers found": "ŲŲ ØĒاÛÛ ÛØ§ŲØĒ ŲØ´Ø¯",
+ "Running out of size limit (": "ØšØ¨ŲØą ÚŠØąØ¯Ų Ø§Ø˛ Ų ØØ¯ŲØ¯Ų ØØŦŲ (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Ų ØØ¯ŲØ¯Ų ØąØ§ Ø¨Ų \" + site_info.next_size_limit + \"MB ØĒŲØ¸ÛŲ ÚŠŲ",
+ "Site size limit changed to {0}MB": "Ų ØØ¯ŲØ¯Ų ØØŦŲ ØŗØ§ÛØĒ Ø¨Ų {0}MB ØĒØēÛÛØą ÚŠØąØ¯",
+ " New version of this page has just released. Reload to see the modified content.": " ŲØŗØŽŲ ØŦØ¯ÛØ¯Û Ø§Ø˛ اÛŲ ØĩŲØŲ Ų ŲØĒØ´Øą Ø´Ø¯Ų Ø§ØŗØĒ. Ø¨ØąØ§Û Ų Ø´Ø§ŲØ¯Ų Ų ØØĒŲØ§Û ØĒØēÛÛØąâÛØ§ŲØĒŲ Ø¯ŲØ¨Ø§ØąŲ Ø¨Ø§ØąÚ¯ÛØąÛ ŲŲ Ø§ÛÛØ¯.",
+ "This site requests permission:": "اÛŲ ØŗØ§ÛØĒ Ø¯ØąØŽŲØ§ØŗØĒ Ų ØŦŲØ˛ Ų ÛâÚŠŲØ¯:",
+ "_(Accept)": "_(ŲžØ°ÛØąŲØĒŲ)"
+}
diff --git a/src/Translate/languages/fr.json b/src/Translate/languages/fr.json
new file mode 100644
index 00000000..b46ef2c3
--- /dev/null
+++ b/src/Translate/languages/fr.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "FÊlicitations, le port ({0}) est ouvert. Vous ÃĒtes maintenant membre de ZeroNet!!",
+ "Tor mode active, every connection using Onion route.": "Tor actif, toutes les connexions utilisent un routage Onion.",
+ "Successfully started Tor onion hidden services.": "Tor activÊ avec succès.",
+ "Unable to start hidden services, please check your config.": "Impossible d'activer Tor, veuillez vÊrifier votre configuration.",
+ "For faster connections open {0} port on your router.": "Pour une meilleure connectivitÊ, ouvrez le port {0} sur votre routeur.",
+ "Your connection is restricted. Please, open {0} port on your router": "ConnectivitÊ limitÊe. Veuillez ouvrir le port {0} sur votre routeur",
+ "or configure Tor to become a full member of the ZeroNet network.": "ou configurez Tor afin d'avoir accès aux pairs ZeroNet Onion.",
+
+ "Select account you want to use in this site:": "SÊlectionnez le compte que vous voulez utiliser pour ce site:",
+ "currently selected": "prÊsentement sÊlectionnÊ",
+ "Unique to site": "Unique au site",
+
+ "Content signing failed": "Ãchec à la signature du contenu",
+ "Content publish queued for {0:.0f} seconds.": "Publication du contenu diffÊrÊe {0:.0f} secondes.",
+ "Content published to {0} peers.": "Contenu publiÊ à {0} pairs.",
+ "No peers found, but your content is ready to access.": "Aucun pair trouvÊ, mais votre contenu est accessible.",
+ "Your network connection is restricted. Please, open {0} port": "ConnectivitÊ limitÊe. Veuillez ouvrir le port {0}",
+ "on your router to make your site accessible for everyone.": "sur votre routeur pour que votre site soit accessible à tous.",
+ "Content publish failed.": "Ãchec de la publication du contenu.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Ce fichier n'est pas à jour, si vous le modifiez maintenant une version antÊrieure pourrait ÃĒtre perdue.",
+ "Write content anyway": "Enregistrer quand mÃĒme",
+ "New certificate added:": "Nouveau cetificat ajoutÊ :",
+ "You current certificate:": "Votre certificat actuel :",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Changer pour {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certificat changÊ pour : {auth_type}/{auth_user_name}@{domain}-ra.",
+ "Site cloned": "Site clonÊ",
+
+ "You have successfully changed the web interface's language!": "Vous avez modifiÊ la langue d'affichage avec succès!",
+ "Due to the browser's caching, the full transformation could take some minute.": "En fonction du cache du navigateur, la modification pourrait prendre quelques minutes.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Connexion avec UiServer Websocket rompue. Reconnexion...",
+ "Connection with UiServer Websocket recovered.": "Connexion avec UiServer Websocket rÊtablie.",
+ "UiServer Websocket error, please reload the page.": "Erreur du UiServer Websocket, veuillez recharger la page.",
+ " Connecting...": " Connexion...",
+ "Site size: ": "Taille du site : ",
+ "MB is larger than default allowed ": "MB est plus large que la taille permise par dÊfaut ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Ouvrez le site et augmentez la taille maximale à \" + site_info.next_size_limit + \"MB-ra",
+ " files needs to be downloaded": " fichiers doivent ÃĒtre tÊlÊchargÊs",
+ " downloaded": " tÊlÊchargÊs",
+ " download failed": " Êchec de tÊlÊchargement",
+ "Peers found: ": "Pairs trouvÊs: ",
+ "No peers found": "Aucun pair trouvÊ",
+ "Running out of size limit (": "Vous approchez la taille maximale (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Augmentez la taille maximale à \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Taille maximale du site changÊe à {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " Une nouvelle version de cette page vient d'ÃĒtre publiÊe. Rechargez pour voir les modifications.",
+ "This site requests permission:": "Ce site requiert une permission :",
+ "_(Accept)": "Autoriser"
+
+}
diff --git a/src/Translate/languages/hu.json b/src/Translate/languages/hu.json
new file mode 100644
index 00000000..f9487f1d
--- /dev/null
+++ b/src/Translate/languages/hu.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "GratulÃĄlunk, a portod ({0}) nyitva van. Teljes ÊrtÊkÅą tagja vagy a hÃĄlÃŗzatnak!",
+ "Tor mode active, every connection using Onion route.": "Tor mÃŗd aktÃv, minden kapcsolat az Onion hÃĄlÃŗzaton keresztÃŧl tÃļrtÊnik.",
+ "Successfully started Tor onion hidden services.": "Sikeresen elindultak a Tor onion titkos szolgÃĄltatÃĄsok.",
+ "Unable to start hidden services, please check your config.": "Nem sikerÃŧlt elindÃtani a Tor onion szolgÃĄltatÃĄsokat. KÊrjÃŧk, ellenÅrizd a beÃĄllÃtÃĄsokat!",
+ "For faster connections open {0} port on your router.": "A gyorsabb kapcsolatok ÊrdekÊben nyisd ki a {0} portot a routereden.",
+ "Your connection is restricted. Please, open {0} port on your router": "A kapcsolatod korlÃĄtozott. KÊrjÃŧk, nyisd ki a {0} portot a routereden",
+ "or configure Tor to become a full member of the ZeroNet network.": "vagy ÃĄllÃtsd be a Tor kliensed, hogy teljes ÊrtÊkÅą tagja legyÊl a hÃĄlÃŗzatnak!",
+
+ "Select account you want to use in this site:": "VÃĄlaszd ki az oldalhoz hasznÃĄlt felhasznÃĄlÃŗnevet:",
+ "currently selected": "jelenleg kijelÃļlt",
+ "Unique to site": "Egyedi az oldalon",
+
+ "Content signing failed": "Tartalom alÃĄÃrÃĄsa sikeretelen",
+ "Content publish queued for {0:.0f} seconds.": "Tartalom publikÃĄlÃĄsa elhalasztva {0:.0f} mÃĄsodperccel.",
+ "Content published to {0} peers.": "Tartalom publikÃĄlva {0} fÊl rÊszÊre.",
+ "No peers found, but your content is ready to access.": "AktÃv csatlakozÃĄsi pont nem talÃĄlhatÃŗ, de a tartalmad kÊszen ÃĄll a kiszolgÃĄlÃĄsra.",
+ "Your network connection is restricted. Please, open {0} port": "A kapcsolatod korlÃĄtozott. KÊrjÃŧk, nyisd ki a {0} portot",
+ "on your router to make your site accessible for everyone.": "a routereden, hogy az oldalad mindenki szÃĄmÃĄra elÊrhetÅ legyen.",
+ "Content publish failed.": "Sikertelen tartalom publikÃĄlÃĄs.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Ez a fÃĄjl mÊg letÃļltÊs alatt van, ha most felÃŧlÃrod a korÃĄbbi tartalma elveszhet.",
+ "Write content anyway": "FelÃŧlÃrÃĄs",
+ "New certificate added:": "Ãj tanÃēsÃtvÃĄny hozzÃĄadva:",
+ "You current certificate:": "A jelenlegi tanÃēsÃtvÃĄnyod: ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "VÃĄltoztatÃĄs {auth_type}/{auth_user_name}@{domain}-ra",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "A tanÃēsÃtvÃĄny megvÃĄltozott {auth_type}/{auth_user_name}@{domain}-ra.",
+ "Site cloned": "Az oldal klÃŗnozva",
+
+ "You have successfully changed the web interface's language!": "Sikeresen ÃĄtÃĄllÃtottad a web felÃŧlet nyelvÊt!",
+ "Due to the browser's caching, the full transformation could take some minute.": "A bÃļngÊszÅ cache-elÊse miatt egy pÃĄr percig eltarthat a teljes ÃĄtÃĄllÃĄs.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Az UiServer Websocket kapcsolat megszakadt. ÃjracsatlakozÃĄs...",
+ "Connection with UiServer Websocket recovered.": "Az UiServer Websocket kapcsolat visszaÃĄllt.",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket hiba, tÃļltsd Ãējra az oldalt!",
+ " Connecting...": " CsatlakozÃĄs...",
+ "Site size: ": "Oldal mÊrete: ",
+ "MB is larger than default allowed ": "MB nagyobb, mint az engedÊlyezett ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Az oldal megnyitÃĄsa Ês a korlÃĄt mÃŗdosÃtÃĄsa \" + site_info.next_size_limit + \"MB-ra",
+ " files needs to be downloaded": " fÃĄjlt kell letÃļlteni",
+ " downloaded": " letÃļltve",
+ " download failed": " letÃļltÊs sikertelen",
+ "Peers found: ": "TalÃĄlt csatlakozÃĄsi pontok: ",
+ "No peers found": "Nincs csatlakozÃĄsi pont",
+ "Running out of size limit (": "Az oldal hamarosan elÊri a mÊretkorlÃĄtot (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "A korlÃĄt mÃŗdosÃtÃĄsa \" + site_info.next_size_limit + \"MB-ra",
+ "Site size limit changed to {0}MB": "A mÊretkorlÃĄt mÃŗdosÃtva {0}MB-ra",
+ " New version of this page has just released. Reload to see the modified content.": "Az oldal Êpp most mÃŗdosult A megvÃĄltozott tartalomÊrt tÃļltsd Ãējra!",
+ "This site requests permission:": "Az oldal megtekintÊsÊhez szÃŧksÊges jog:",
+ "_(Accept)": "EngedÊlyezÊs"
+
+}
diff --git a/src/Translate/languages/it.json b/src/Translate/languages/it.json
new file mode 100644
index 00000000..47992328
--- /dev/null
+++ b/src/Translate/languages/it.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Congratulazioni, la tua porta {0} è aperta. Ora sei un membro effettivo della rete ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "Modalità Tor attiva, ogni connessione sta usando la rete Onion.",
+ "Successfully started Tor onion hidden services.": "Servizi Tor onion nascosti avviati con successo.",
+ "Unable to start hidden services, please check your config.": "Impossibile avviare i servizi nascosti. Si prega di controllare la propria configurazione!",
+ "For faster connections open {0} port on your router.": "Per avere connessioni piÚ veloci aprire la porta {0} sul router.",
+ "Your connection is restricted. Please, open {0} port on your router": "La tua connessione è limitata. Aprire la porta {0} sul router",
+ "or configure Tor to become a full member of the ZeroNet network.": "o configurare Tor per diventare membro effettivo della rete ZeroNet!",
+
+ "Select account you want to use in this site:": "Seleziona l'account che vuoi utilizzare per questo sito:",
+ "currently selected": "attualmente selezionato",
+ "Unique to site": "Unico sul sito",
+
+ "Content signing failed": "Firma contenuti fallita",
+ "Content publish queued for {0:.0f} seconds.": "Pubblicazione contenuti in coda per {0:.0f} secondi.",
+ "Content published to {0} peers.": "Contenuti pubblicati su {0} peer.",
+ "No peers found, but your content is ready to access.": "Nessun peer trovato, ma i tuoi contenuti sono pronti per l'accesso.",
+ "Your network connection is restricted. Please, open {0} port": "La tua connessione di rete è limitata. Aprire la porta {0} ",
+ "on your router to make your site accessible for everyone.": "sul router, per rendere il sito accessibile a chiunque.",
+ "Content publish failed.": "Pubblicazione contenuti fallita.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Questo file è ancora in sincronizzazione, se viene modificato i contenuti precedenti andranno persi.",
+ "Write content anyway": "Scrivere comunque i contenuti",
+ "New certificate added:": "Aggiunto nuovo certificato:",
+ "You current certificate:": "Il tuo attuale certificato:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Cambiarlo in {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certificato cambianto in: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Sito clonato",
+
+ "You have successfully changed the web interface's language!": "Hai cambiato con successo la lingua dell'interfaccia web!",
+ "Due to the browser's caching, the full transformation could take some minute.": "La trasformazione completa potrebbe richiedre alcuni minuti a causa della cache del browser.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "La connessione con UiServer Websocket è andata persa. Riconnessione...",
+ "Connection with UiServer Websocket recovered.": "Connessione con UiServer Websocket recuperata.",
+ "UiServer Websocket error, please reload the page.": "Errore UiServer Websocket, ricaricare la pagina!",
+ " Connecting...": " Connessione...",
+ "Site size: ": "Dimensione del sito: ",
+ "MB is larger than default allowed ": "MB è piÚ grande del valore predefinito consentito ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Aprire il sito e impostare la dimensione limite a \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " i file devono essere scaricati",
+ " downloaded": " scaricati",
+ " download failed": " scaricamento fallito",
+ "Peers found: ": "Peer trovati: ",
+ "No peers found": "Nessun peer trovato",
+ "Running out of size limit (": "Superato il limite di spazio (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Imposta il limite a \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Limite di spazio cambiato a {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": "E' stata rilasciata una nuova versione di questa pagina Ricaricare per vedere il contenuto modificato!",
+ "This site requests permission:": "Questo sito richiede permessi:",
+ "_(Accept)": "Concedere"
+
+}
diff --git a/src/Translate/languages/jp.json b/src/Translate/languages/jp.json
new file mode 100644
index 00000000..ff10aee4
--- /dev/null
+++ b/src/Translate/languages/jp.json
@@ -0,0 +1,66 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "ããã§ã¨ãããããžããããŧã {0} ãéããžãããããã§ZeroNetãããã¯ãŧã¯ãŽãĄãŗããŧã§ãã",
+ "Tor mode active, every connection using Onion route.": "Torãĸãŧãããĸã¯ããŖãã§ããå ¨ãĻãŽæĨįļã¯OnionãĢãŧããäŊŋį¨ããžãã",
+ "Successfully started Tor onion hidden services.": "Tor onionãĩãŧããšãæŖå¸¸ãĢéå§ããžããã",
+ "Unable to start hidden services, please check your config.": "é襨į¤ēãŽãĩãŧããšãéå§ã§ããžãããč¨åŽãįĸēčĒããĻãã ããã",
+ "For faster connections open {0} port on your router.": "æĨįļãéĢéåãããĢã¯ãĢãŧãŋãŧãŽããŧã {0} ãéããĻãã ããã",
+ "Your connection is restricted. Please, open {0} port on your router": "æĨįļãåļéãããĻããžãããĢãŧãŋãŧãŽããŧã {0} ãéããĻãã ããã",
+ "or configure Tor to become a full member of the ZeroNet network.": "ãžãã¯ãTorãZeroNetãããã¯ãŧã¯ãŽãĄãŗããŧãĢãĒããããĢč¨åŽããĻãã ããã",
+
+ "Select account you want to use in this site:": "ããŽãĩã¤ãã§äŊŋį¨ãããĸãĢãĻãŗãã鏿:",
+ "No certificate": "č¨ŧææ¸ããããžãã",
+ "currently selected": "įžå¨é¸æä¸",
+ "Unique to site": "ãĩã¤ãåēæ",
+
+ "Content signing failed": "ãŗãŗããŗããŽįŊ˛åãĢå¤ąæ",
+ "Content publish queued for {0:.0f} seconds.": "ãŗãŗããŗããŽå Ŧéã¯{0:.0f}į§ãŽããĨãŧãĢå Ĩããããžããã",
+ "Content published to {0}/{1} peers.": "ãĩã¤ããŽæ´æ°ãéįĨæ¸ {0}/{1} ããĸ",
+ "Content published to {0} peers.": "{0}ããĸãĢå Ŧéããããŗãŗããŗãã",
+ "No peers found, but your content is ready to access.": "ããĸã¯čĻã¤ãããžããã§ãããããŗãŗããŗããĢãĸã¯ãģãšããæēåãã§ããžããã",
+ "Your network connection is restricted. Please, open {0} port": "ãããã¯ãŧã¯æĨįļãåļéãããĻããžããããŧã {0} ãéããĻã",
+ "on your router to make your site accessible for everyone.": "čǰã§ããĩã¤ããĢãĸã¯ãģãšã§ãããããĢããĻãã ããã",
+ "Content publish failed.": "ãŗãŗããŗããŽå ŦéãĢå¤ąæããžããã",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "ããŽããĄã¤ãĢã¯ãžã åæããĻããžããäģããæ¸ãčžŧãã¨ãåãŽãŗãŗããŗããå¤ąãããå¯čŊæ§ããããžãã",
+ "Write content anyway": "ã¨ãĢãããŗãŗããŗããæ¸ã",
+ "New certificate added:": "æ°ããč¨ŧææ¸ãčŋŊå ãããžãã:",
+ "You current certificate:": "įžå¨ãŽč¨ŧææ¸:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "{auth_type}/{auth_user_name}@{domain} ãĢ夿´",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "夿´åžãŽč¨ŧææ¸: {auth_type}/{auth_user_name}@{domain}",
+ "Site cloned": "č¤čŖŊããããĩã¤ã",
+
+ "You have successfully changed the web interface's language!": "Webã¤ãŗãŋãŧãã§ãŧãšãŽč¨čĒãæŖå¸¸ãĢ夿´ãããžããīŧ",
+ "Due to the browser's caching, the full transformation could take some minute.": "ããŠãĻãļãŽããŖããˇãĨãĢãããåŽå ¨ãĒ夿ãĢã¯æ°åãããå ´åããããžãã",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "UiServer Websocketã¨ãŽæĨįļãå¤ąãããžãããåæĨįļããĻããžã...",
+ "Connection with UiServer Websocket recovered.": "UiServer Websocketã¨ãŽæĨįļãå垊ããžããã",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocketã¨ãŠãŧãããŧã¸ããĒããŧãããĻãã ããã",
+ " Connecting...": " æĨįļããĻããžã...",
+ "Site size: ": "ãĩã¤ããĩã¤ãē: ",
+ "MB is larger than default allowed ": "MBã¯ãããŠãĢããŽč¨ąåŽšå¤ããã大ããã§ãã ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "ãĩã¤ããéãããĩã¤ãēåļéã \" + site_info.next_size_limit + \"MB ãĢč¨åŽ",
+ " files needs to be downloaded": " ããĄã¤ãĢãããĻãŗããŧãããåŋ čĻããããžã",
+ " downloaded": " ããĻãŗããŧã",
+ " download failed": " ããĻãŗããŧãå¤ąæ",
+ "Peers found: ": "ããĸãčĻã¤ãããžãã: ",
+ "No peers found": "ããĸãčĻã¤ãããžãã",
+ "Running out of size limit (": "ãĩã¤ãēåļéãäŊŋãæãããžãã (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "åļéã \" + site_info.next_size_limit + \"MB ãĢč¨åŽ",
+ "Cloning site...": "ãĩã¤ããč¤čŖŊä¸âĻ",
+ "Site size limit changed to {0}MB": "ãĩã¤ããŽãĩã¤ãēåļéã {0}MB ãĢ夿´ãããžãã",
+ " New version of this page has just released. Reload to see the modified content.": " ããŽããŧã¸ãŽæ°ããããŧã¸ã§ãŗãå Ŧéãããžããã 夿´ããããŗãŗããŗããčĻããĢã¯åčĒãŋčžŧãŋããĻãã ããã",
+ "This site requests permission:": "ããŽãĩã¤ãã¯æ¨ŠéãčĻæąããĻããžã:",
+ "_(Accept)": "_(訹å¯)",
+
+ "Save": "äŋå",
+ "Trackers announcing": "ããŠããĢãŧããįĨãã",
+ "Error": "ã¨ãŠãŧ",
+ "Done": "åŽäē",
+ "Tracker connection error detected.": "ããŠããĢãŧæĨįļã¨ãŠãŧãæ¤åēãããžããã",
+
+ "Update ZeroNet client to latest version?": "ZeroNetã¯ãŠã¤ãĸãŗããææ°įãĢæ´æ°ããžããīŧ",
+ "Update": "æ´æ°",
+ "Restart ZeroNet client?": "ZeroNetã¯ãŠã¤ãĸãŗããåčĩˇåããžããīŧ",
+ "Restart": "åčĩˇå",
+ "Shut down ZeroNet client?": "ZeroNetã¯ãŠã¤ãĸãŗããįĩäēããžããīŧ",
+ "Shut down": "įĩäē"
+}
diff --git a/src/Translate/languages/nl.json b/src/Translate/languages/nl.json
new file mode 100644
index 00000000..985cce7a
--- /dev/null
+++ b/src/Translate/languages/nl.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Gefeliciteerd, je poort {0} is geopend. Je bent een volledig lid van het ZeroNet netwerk!",
+ "Tor mode active, every connection using Onion route.": "Tor modus actief, elke verbinding gebruikt een Onion route.",
+ "Successfully started Tor onion hidden services.": "Tor onion verborgen diensten zijn met succes gestart.",
+ "Unable to start hidden services, please check your config.": "Het was niet mogelijk om verborgen diensten te starten, controleer je configuratie.",
+ "For faster connections open {0} port on your router.": "Voor snellere verbindingen open je de poort {0} op je router.",
+ "Your connection is restricted. Please, open {0} port on your router": "Je verbinding is beperkt. Open altjeblieft poort {0} op je router",
+ "or configure Tor to become a full member of the ZeroNet network.": "of configureer Tor om een volledig lid van het ZeroNet netwerk te worden.",
+
+ "Select account you want to use in this site:": "Selecteer het account die je wilt gebruiken binnen deze site:",
+ "currently selected": "huidige selectie",
+ "Unique to site": "Uniek voor deze site",
+
+ "Content signing failed": "Inhoud ondertekenen mislukt",
+ "Content publish queued for {0:.0f} seconds.": "Publiceren van inhoud staat in de wachtrij voor {0:.0f} seconden.",
+ "Content published to {0} peers.": "Inhoud is gepubliceerd naar {0} peers",
+ "No peers found, but your content is ready to access.": "Geen peers gevonden, maar je inhoud is klaar voor toegang.",
+ "Your network connection is restricted. Please, open {0} port": "Je netwerkverbinding is beperkt. Open alsjeblieft poort {0}",
+ "on your router to make your site accessible for everyone.": "op je router om je site toegankelijk te maken voor iedereen.",
+ "Content publish failed.": "Inhoud publicatie mislukt.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Dit bestand is nog in sync, als je het nu overschrijft, dan is mogelijk de vorige inhoud verloren.",
+ "Write content anyway": "Inhoud toch schrijven",
+ "New certificate added:": "Nieuw certificaat toegevoegd:",
+ "You current certificate:": "Je huidige certificaat:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Verander het naar {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certificaat veranderd naar: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Site gecloned",
+
+ "You have successfully changed the web interface's language!": "Je hebt met succes de taal van de web interface aangepast!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Door caching van je browser kan de volledige transformatie enkele minuten duren.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Verbinding met UiServer Websocket verbroken. Opnieuw verbinden...",
+ "Connection with UiServer Websocket recovered.": "Verbinding met UiServer Websocket hersteld.",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket fout, herlaad alsjeblieft de pagina.",
+ " Connecting...": " Verbinden...",
+ "Site size: ": "Site grootte ",
+ "MB is larger than default allowed ": "MB is groter dan de standaard toegestaan ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Open de site en stel de limeit op de grootte in op \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " bestanden moeten worden gedownload",
+ " downloaded": " gedownload",
+ " download failed": " download mislukt",
+ "Peers found: ": "Peers gevonden: ",
+ "No peers found": "Geen peers gevonden",
+ "Running out of size limit (": "Limeit op grootte bereikt (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Stel limiet in op \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Site limiet op grootte is veranderd naar {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " Een nieuwe versie van deze pagina is zojuist uitgekomen. Herlaad de pagina om de bijgewerkte inhoud te zien.",
+ "This site requests permission:": "Deze site vraagt om permissie:",
+ "_(Accept)": "Toekennen"
+
+}
diff --git a/src/Translate/languages/pl.json b/src/Translate/languages/pl.json
new file mode 100644
index 00000000..679e909d
--- /dev/null
+++ b/src/Translate/languages/pl.json
@@ -0,0 +1,54 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Gratulacje, twÃŗj port {0} jest otwarty. JesteÅ peÅnoprawnym uÅŧytkownikiem sieci ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "Tryb Tor aktywny, kaÅŧde poÅÄ czenie przy uÅŧyciu trasy Cebulowej.",
+ "Successfully started Tor onion hidden services.": "PomyÅlnie zainicjowano ukryte usÅugi cebulowe Tor.",
+ "Unable to start hidden services, please check your config.": "Niezdolny do uruchomienia ukrytych usÅug, proszÄ sprawdÅē swojÄ konfiguracjÄ.",
+ "For faster connections open {0} port on your router.": "Dla szybszego poÅÄ czenia otwÃŗrz {0} port w swoim routerze.",
+ "Your connection is restricted. Please, open {0} port on your router": "PoÅÄ czenie jest ograniczone. ProszÄ, otwÃŗrz port {0} w swoim routerze",
+ "or configure Tor to become a full member of the ZeroNet network.": "bÄ dÅē skonfiguruj Tora by staÄ siÄ peÅnoprawnym uÅŧytkownikiem sieci ZeroNet.",
+
+ "Select account you want to use in this site:": "Wybierz konto ktÃŗrego chcesz uÅŧyÄ na tej stronie:",
+ "currently selected": "aktualnie wybrany",
+ "Unique to site": "Unikatowy dla strony",
+
+ "Content signing failed": "Podpisanie treÅci zawiodÅo",
+ "Content publish queued for {0:.0f} seconds.": "Publikacja treÅci wstrzymana na {0:.0f} sekund(y).",
+ "Content published to {0} peers.": "TreÅÄ opublikowana do {0} uzytkownikÃŗw.",
+ "No peers found, but your content is ready to access.": "Nie odnaleziono uÅŧytkownikÃŗw, ale twoja treÅÄ jest dostÄpna.",
+ "Your network connection is restricted. Please, open {0} port": "Twoje poÅÄ czenie sieciowe jest ograniczone. ProszÄ, otwÃŗrz port {0}",
+ "on your router to make your site accessible for everyone.": "w swoim routerze, by twoja strona mogÅabyÄ dostÄpna dla wszystkich.",
+ "Content publish failed.": "Publikacja treÅci zawiodÅa.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Ten plik wciÄ Åŧ siÄ synchronizuje, jeÅli zapiszesz go teraz, poprzednia treÅÄ moÅŧe zostaÄ utracona.",
+ "Write content anyway": "Zapisz treÅÄ mimo wszystko",
+ "New certificate added:": "Nowy certyfikat dodany:",
+ "You current certificate:": "TwÃŗj aktualny certyfikat: ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "ZmieÅ na {auth_type}/{auth_user_name}@{domain}-ra",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certyfikat zmieniony na {auth_type}/{auth_user_name}@{domain}-ra.",
+ "Site cloned": "Strona sklonowana",
+
+ "You have successfully changed the web interface's language!": "PomyÅlnie zmieniono jÄzyk interfejsu stron!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Ze wzglÄdu na buforowanie przeglÄ darki, peÅna zmiana moÅŧe zajÄ Ä parÄ minutÄ.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "PoÅÄ czenie z UiServer Websocket zostaÅo przerwane. Ponowne ÅÄ czenie...",
+ "Connection with UiServer Websocket recovered.": "PoÅÄ czenie z UiServer Websocket przywrÃŗcone.",
+ "UiServer Websocket error, please reload the page.": "BÅÄ d UiServer Websocket, prosze odÅwieÅŧyÄ stronÄ.",
+ " Connecting...": " ÅÄ czenie...",
+ "Site size: ": "Rozmiar strony: ",
+ "MB is larger than default allowed ": "MB jest wiÄkszy niÅŧ domyÅlnie dozwolony ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "OtwÃŗrz stronÄ i ustaw limit na \" + site_info.next_size_limit + \"MBÃŗw",
+ " files needs to be downloaded": " pliki muszÄ zostaÄ ÅciÄ gniÄte",
+ " downloaded": " ÅciÄ gniÄte",
+ " download failed": " ÅciÄ ganie nie powiodÅo siÄ",
+ "Peers found: ": "Odnaleziono uÅŧytkownikÃŗw: ",
+ "No peers found": "Nie odnaleziono uÅŧytkownikÃŗw",
+ "Running out of size limit (": "Limit rozmiaru na wyczerpaniu (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Ustaw limit na \" + site_info.next_size_limit + \"MBÃŗw",
+ "Site size limit changed to {0}MB": "Rozmiar limitu strony zmieniony na {0}MBÃŗw",
+ " New version of this page has just released. Reload to see the modified content.": "Nowa wersja tej strony wÅaÅnie zostaÅa wydana. OdÅwieÅŧ by zobaczyÄ nowÄ , zmodyfikowanÄ treÅÄ strony.",
+ "This site requests permission:": "Ta strona wymaga uprawnieÅ:",
+ "_(Accept)": "Przyznaj uprawnienia",
+
+ "Sign and publish": "Podpisz i opublikuj",
+ "Restart ZeroNet client?": "UruchomiÄ ponownie klienta ZeroNet?",
+ "Restart": "Uruchom ponownie"
+}
diff --git a/src/Translate/languages/pt-br.json b/src/Translate/languages/pt-br.json
new file mode 100644
index 00000000..a842684f
--- /dev/null
+++ b/src/Translate/languages/pt-br.json
@@ -0,0 +1,57 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "ParabÊns, a porta{0} estÃĄ aberta. VocÃĒ ÃŠ um membro completo da rede ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "Modo Tor ativado, todas as conexÃĩes usam a rota Onion.",
+ "Successfully started Tor onion hidden services.": "Os serviços ocultos Tor onion foram inciados com sucesso.",
+ "Unable to start hidden services, please check your config.": "NÃŖo foi possÃvel iniciar os serviços ocultos, por favor verifique suas configuraçÃĩes.",
+ "For faster connections open {0} port on your router.": "Para conexÃĩes mais rÃĄpidas, abra a porta {0} em seu roteador.",
+ "Your connection is restricted. Please, open {0} port on your router": "Sua conexÃŖo estÃĄ restrita. Por favor, abra a porta {0} em seu roteador",
+ "or configure Tor to become a full member of the ZeroNet network.": "ou configure o Tor para se tornar um membro completo da rede ZeroNet.",
+
+ "Select account you want to use in this site:": "Selecione a conta que deseja usar nesse site:",
+ "currently selected": "atualmente selecionada",
+ "Unique to site": "Ãnica para o site",
+
+ "Content signing failed": "Assinatura de conteÃēdo falhou",
+ "Content publish queued for {0:.0f} seconds.": "PublicaÃ§ÃŖo de conteÃēdo na fila por {0:.0f} segundos.",
+ "Content published to {0} peers.": "ConteÃēdo publicado para {0} peers.",
+ "No peers found, but your content is ready to access.": "Nenhum peer encontrado, mas seu conteÃēdo estÃĄ pronto para ser acessado.",
+ "Your network connection is restricted. Please, open {0} port": "Sua conexÃŖo de rede estÃĄ restrita. Por favor, abra a porta {0}",
+ "on your router to make your site accessible for everyone.": "em seu roteador para tornar seu site acessÃvel para todos.",
+ "Content publish failed.": "PublicaÃ§ÃŖo de conteÃēdo falhou.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Esse arquivo ainda estÃĄ sincronizado, se escreve-lo agora o conteÃēdo anterior poderÃĄ ser perdido.",
+ "Write content anyway": "Escrever o conteÃēdo mesmo assim",
+ "New certificate added:": "Novo certificado adicionado:",
+ "You current certificate:": "Seu certificado atual:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Alterar para {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Certificado alterado para: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Site clonado",
+
+ "You have successfully changed the web interface's language!": "VocÃĒ alterou o idioma da interface web com sucesso!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Devido ao cache do navegador, a transformaÃ§ÃŖo completa pode levar alguns minutos.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "A conexÃŖo com UiServer Websocket foi perdida. Reconectando...",
+ "Connection with UiServer Websocket recovered.": "ConexÃŖo com UiServer Websocket recuperada.",
+ "UiServer Websocket error, please reload the page.": "Erro de UiServer Websocket, por favor atualize a pÃĄgina.",
+ " Connecting...": " Conectando...",
+ "Site size: ": "Tamanho do site: ",
+ "MB is larger than default allowed ": "MB Ê maior do que o tamanho permitido por padrÃŖo",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Abrir site e definir limite de tamanho para \" + site_info.next_size_limit + \"MBs",
+ " files needs to be downloaded": " os arquivos precisam ser baixados",
+ " downloaded": " baixados",
+ " download failed": " falha no download",
+ "Peers found: ": "Peers encontrados: ",
+ "No peers found": "Nenhum peer encontrado",
+ "Running out of size limit (": "Passando do tamanho limite (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Definir limite para \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Limite de tamanho do site alterado para {0}MBs",
+ " New version of this page has just released. Reload to see the modified content.": " Uma nova versÃŖo desse site acaba de ser publicada. Atualize para ver o conteÃēdo modificado.",
+ "This site requests permission:": "Esse site solicita permissÃŖo:",
+ "_(Accept)": "Conceder",
+
+ "Save": "Salvar",
+ "Trackers announcing": "Trackers anunciando",
+ "Error": "Erro",
+ "Done": "ConcluÃdo",
+ "Tracker connection error detected.": "Erro de conexÃŖo com tracker foi detectado."
+
+}
diff --git a/src/Translate/languages/ru.json b/src/Translate/languages/ru.json
new file mode 100644
index 00000000..96c84b91
--- /dev/null
+++ b/src/Translate/languages/ru.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "ĐОСдŅавĐģŅĐĩĐŧ, Đ˛Đ°Ņ ĐŋĐžŅŅ {0} ĐžŅĐēŅŅŅ. ĐŅ ĐŋĐžĐģĐŊĐžŅĐĩĐŊĐŊŅĐš ŅŅаŅŅĐŊиĐē ŅĐĩŅи ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "Đ ĐĩĐļиĐŧ Tor вĐēĐģŅŅĐĩĐŊ, вŅĐĩ ŅĐžĐĩдиĐŊĐĩĐŊĐ¸Ņ ĐžŅŅŅĐĩŅŅвĐģŅŅŅŅŅ ŅĐĩŅĐĩС Tor.",
+ "Successfully started Tor onion hidden services.": "ĐĄĐēŅŅŅŅĐš ŅĐĩŅĐ˛Đ¸Ņ Tor СаĐŋŅŅĐĩĐŊĐž ŅŅĐŋĐĩŅĐŊĐž.",
+ "Unable to start hidden services, please check your config.": "ĐŅийĐēа ĐŋŅи СаĐŋŅŅĐēĐĩ ŅĐēŅŅŅĐžĐŗĐž ŅĐĩŅвиŅа, ĐŋĐžĐļаĐģŅĐšŅŅа ĐŋŅОвĐĩŅŅŅĐĩ ĐŊаŅŅŅОКĐēи",
+ "For faster connections open {0} port on your router.": "ĐĐģŅ ĐąĐžĐģĐĩĐĩ ĐąŅŅŅŅОК ŅайОŅŅ ŅĐĩŅи ĐžŅĐēŅОКŅĐĩ {0} ĐŋĐžŅŅ ĐŊа ваŅĐĩĐŧ ŅĐžŅŅĐĩŅĐĩ.",
+ "Your connection is restricted. Please, open {0} port on your router": "ĐОдĐēĐģŅŅĐĩĐŊиĐĩ ĐžĐŗŅаĐŊиŅĐĩĐŊĐž. ĐĐžĐļаĐģŅĐšŅŅа ĐžŅĐēŅОКŅĐĩ {0} ĐŋĐžŅŅ ĐŊа ваŅĐĩĐŧ ŅĐžŅŅĐĩŅĐĩ",
+ "or configure Tor to become a full member of the ZeroNet network.": "иĐģи ĐŊаŅŅŅОКŅĐĩ Tor ŅŅĐž ĐąŅ ŅŅаŅŅ ĐŋĐžĐģĐŊĐžŅĐĩĐŊĐŊŅĐŧ ŅŅаŅŅĐŊиĐēĐžĐŧ ŅĐĩŅи ZeroNet.",
+
+ "Select account you want to use in this site:": "ĐŅĐąĐĩŅиŅĐĩ аĐēĐēаŅĐŊŅ Đ´ĐģŅ Đ¸ŅĐŋĐžĐģŅСОваĐŊĐ¸Ņ ĐŊа ŅŅĐžĐŧ ŅаКŅĐĩ:",
+ "currently selected": "ŅĐĩĐšŅĐ°Ņ Đ˛ŅĐąŅаĐŊ",
+ "Unique to site": "ĐŖĐŊиĐēаĐģŅĐŊŅĐš Đ´ĐģŅ ŅŅĐžĐŗĐž ŅаКŅа",
+
+ "Content signing failed": "ĐОдĐŋиŅŅ ĐēĐžĐŊŅĐĩĐŊŅа ĐŊĐĩ ŅдаĐģаŅŅ",
+ "Content publish queued for {0:.0f} seconds.": "ĐŅĐąĐģиĐēаŅĐ¸Ņ ĐēĐžĐŊŅĐĩĐŊŅа ĐŋĐžŅŅавĐģĐĩĐŊа в ĐžŅĐĩŅĐĩĐ´Ņ {0:.0f} ŅĐĩĐēŅĐŊĐ´.",
+ "Content published to {0} peers.": "ĐĐžĐŊŅĐĩĐŊŅ ĐžĐŋŅĐąĐģиĐēОваĐŊ ĐŊа {0} ĐŋиŅĐ°Ņ .",
+ "No peers found, but your content is ready to access.": "ĐиŅŅ ĐŊĐĩ ĐŊаКдĐĩĐŊŅ, ĐŊĐž Đ˛Đ°Ņ ĐēĐžĐŊŅĐĩĐŊŅ Đ´ĐžŅŅŅĐŋĐĩĐŊ.",
+ "Your network connection is restricted. Please, open {0} port": "ĐаŅĐĩ ĐŋОдĐēĐģŅŅĐĩĐŊиĐĩ ĐžĐŗŅаĐŊиŅĐĩĐŊĐž. ĐĐžĐļаĐģŅĐšŅŅа ĐžŅĐēŅОКŅĐĩ {0} ĐŋĐžŅŅ. ",
+ "on your router to make your site accessible for everyone.": "ĐŊа ваŅĐĩĐŧ ŅĐžŅŅĐĩŅĐĩ, ŅŅĐž ĐąŅ Đ˛Đ°Ņ ŅĐ°ĐšŅ ŅŅаĐģ Đ´ĐžŅŅŅĐŋĐŊĐŗ ĐŋĐžŅĐĩŅиŅĐĩĐģŅĐŧ.",
+ "Content publish failed.": "ĐŅийĐēа ĐŋŅи ĐŋŅĐąĐģиĐēаŅии ĐēĐžĐŊŅĐĩĐŊŅа.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "ĐŅĐžŅ ŅаКĐģ вŅŅ ĐĩŅĐĩ ŅиĐŊŅ ŅĐžĐŊиСиŅŅĐĩŅŅŅ, ĐĩŅĐģи ĐŋŅОдОĐģĐļиŅŅ ĐĩĐŗĐž иСĐŧĐĩĐŊĐĩĐŊиĐĩ, ĐŋŅĐĩĐ´ŅĐ´ŅŅиК ĐēĐžĐŊŅĐĩĐŊŅ ĐŧĐžĐļĐĩŅ ĐąŅŅŅ ĐŋĐžŅĐĩŅŅĐŊ.",
+ "Write content anyway": "ĐаĐŋиŅаŅŅ ĐēĐžĐŊŅĐĩĐŊŅ Đ˛ ĐģŅйОĐŧ ŅĐģŅŅаĐĩ",
+ "New certificate added:": "ĐОйавĐģĐĩĐŊ ĐŊОвŅĐš ŅĐĩŅŅиŅиĐēаŅ:",
+ "You current certificate:": "ĐĐ°Ņ ŅĐĩĐēŅŅиК ŅĐĩŅŅиŅиĐēаŅ: ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "ĐСĐŧĐĩĐŊиŅŅ ĐĩĐŗĐž ĐŊа {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "ĐĄĐĩŅŅиŅиĐēĐ°Ņ Đ¸ĐˇĐŧĐĩĐŊĐĩĐŊ ĐŊа: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "ĐĄĐ°ĐšŅ ŅĐēĐģĐžĐŊиŅОваĐŊ",
+
+ "You have successfully changed the web interface's language!": "Đ¯ĐˇŅĐē иĐŊŅĐĩŅŅĐĩĐšŅа ŅŅĐŋĐĩŅĐŊĐž иСĐŧĐĩĐŊĐĩĐŊ!",
+ "Due to the browser's caching, the full transformation could take some minute.": "РСавиŅиĐŧĐžŅŅи ĐžŅ ŅайОŅŅ Đ˛Đ°ŅĐĩĐŗĐž ĐąŅаŅСĐĩŅа ĐŋĐžĐģĐŊĐžĐĩ ĐŋŅĐĩОйŅаСОваĐŊиĐĩ ĐŧĐžĐļĐĩŅ ĐˇĐ°ĐŊŅŅŅ ĐŋаŅŅ ĐŧиĐŊŅŅ.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "ĐОдĐēĐģŅŅĐĩĐŊиĐĩ Đē UiServer Websocket ĐŋŅĐĩŅваĐŊĐž. ĐĐĩŅĐĩĐŋОдĐēĐģŅŅаŅŅŅ...",
+ "Connection with UiServer Websocket recovered.": "ĐОдĐēĐģŅŅĐĩĐŊиĐĩ Đē UiServer Websocket вОŅŅŅаĐŊОвĐģĐĩĐŊĐž.",
+ "UiServer Websocket error, please reload the page.": "ĐŅийĐēа UiServer Websocket, ĐŋĐĩŅĐĩĐˇĐ°ĐŗŅŅСиŅĐĩ ŅŅŅаĐŊиŅŅ!",
+ " Connecting...": " ĐОдĐēĐģŅŅĐĩĐŊиĐĩ...",
+ "Site size: ": "РаСĐŧĐĩŅ ŅаКŅа: ",
+ "MB is larger than default allowed ": "MB йОĐģŅŅĐĩ ŅĐĩĐŧ ŅаСŅĐĩŅĐĩĐŊĐž ĐŋĐž ŅĐŧĐžĐģŅаĐŊĐ¸Ņ ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "ĐŅĐēŅŅŅŅ ŅĐ°ĐšŅ Đ¸ ŅŅŅаĐŊОвиŅŅ ĐģиĐŧĐ¸Ņ ĐˇĐ°ĐŊиĐŧаĐĩĐŧĐžĐŗĐž ĐŧĐĩŅŅа ĐŊа \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " ŅаКĐģŅ Đ´ĐžĐģĐļĐŊŅ ĐąŅŅŅ ĐˇĐ°ĐŗŅŅĐļĐĩĐŊŅ",
+ " downloaded": " ĐˇĐ°ĐŗŅŅĐļĐĩĐŊĐž",
+ " download failed": " ĐžŅийĐēа ĐˇĐ°ĐŗŅŅСĐēи",
+ "Peers found: ": "ĐиŅОв ĐŊаКдĐĩĐŊĐž: ",
+ "No peers found": "ĐиŅŅ ĐŊĐĩ ĐŊаКдĐĩĐŊŅ",
+ "Running out of size limit (": "ĐĐžŅŅŅĐŋĐŊĐžĐĩ ĐŧĐĩŅŅĐž СаĐēĐžĐŊŅиĐģĐžŅŅ (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "ĐŖŅŅаĐŊОвиŅŅ ĐģиĐŧĐ¸Ņ ĐŊа \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "ĐиĐŧĐ¸Ņ ĐŋаĐŧŅŅи ĐŊа диŅĐēĐĩ иСĐŧĐĩĐŊĐĩĐŊ ĐŊа {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": "ĐĐžŅŅŅĐŋĐŊа ĐŊĐžĐ˛Đ°Ņ Đ˛ĐĩŅŅĐ¸Ņ Đ´Đ°ĐŊĐŊОК ŅŅŅаĐŊиŅŅ ĐĐąĐŊОвиŅĐĩ ŅŅŅаĐŊиŅŅ, ŅŅĐž ĐąŅ ŅвидĐĩŅŅ Đ¸ĐˇĐŧĐĩĐŊĐĩĐŊиŅ!",
+ "This site requests permission:": "ĐаĐŊĐŊŅĐš ŅĐ°ĐšŅ ĐˇĐ°ĐŋŅаŅиваĐĩŅ ŅаСŅĐĩŅĐĩĐŊиŅ:",
+ "_(Accept)": "ĐŅĐĩĐ´ĐžŅŅавиŅŅ"
+
+}
diff --git a/src/Translate/languages/sk.json b/src/Translate/languages/sk.json
new file mode 100644
index 00000000..8fb4554b
--- /dev/null
+++ b/src/Translate/languages/sk.json
@@ -0,0 +1,57 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "BlahoÅželÃĄme, vÃĄÅĄ port {0} je otvorenÃŊ. Ste ÃēplnÃŊm Älenom siete ZeroNet!",
+ "Tor mode active, every connection using Onion route.": "Tor mÃŗd aktÃvny, vÅĄetky spojenia teraz pouÅžÃvajÃē Onion sieÅĨ.",
+ "Successfully started Tor onion hidden services.": "Tor ÃēspeÅĄne spustenÃŊ.",
+ "Unable to start hidden services, please check your config.": "Nebolo moÅžnÊ spustiÅĨ Tor, prosÃm skontrolujte nastavenia.",
+ "For faster connections open {0} port on your router.": "Pre rÃŊchlejÅĄie spojenie otvorte na vaÅĄom routery port {0}",
+ "Your connection is restricted. Please, open {0} port on your router": "VaÅĄe pripojenie je obmedzenÊ. ProsÃm otvorte port {0} na vaÅĄom routery.",
+ "or configure Tor to become a full member of the ZeroNet network.": "alebo nastavte Tor aby ste sa tali plnÃŊm Älenom siete ZeroNet.",
+
+ "Select account you want to use in this site:": "ZvoÄžte ÃēÄet ktorÃŊ chcete pouÅžÃvaÅĨ na tejto strÃĄnke:",
+ "currently selected": "aktuÃĄlne zvolenÊ",
+ "Unique to site": "UnikÃĄtny pre strÃĄnku",
+
+ "Content signing failed": "PodpÃsanie obsahu zlyhalo",
+ "Content publish queued for {0:.0f} seconds.": "PodpÃsanie obsahu bude na rade za {0:.0f} sekÃēnd",
+ "Content published to {0} peers.": "Obsah publikovanÃŊ {0} peer-erom",
+ "No peers found, but your content is ready to access.": "Neboli nÃĄjdenÃŊ Åžiadny peer-ery, ale vÃĄÅĄ obsah je pripravenÃŊ pre prÃstup.",
+ "Your network connection is restricted. Please, open {0} port": "VaÅĄe pripojenie k sieti je obmedzenÊ. ProsÃm otvorte port {0} na vaÅĄom routery.",
+ "on your router to make your site accessible for everyone.": "na vaÅĄom routery aby bola vaÅĄa strÃĄnka prÃstupnÃĄ pre vÅĄetkÃŊch.",
+ "Content publish failed.": "Publikovanie obsahu zlyhalo.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Tento sÃēbor sa stÃĄle synchronizuje, ak v Åom spravÃte zmeny, predchÃĄdzajÃēci obsah sa môŞe stratiÅĨ.",
+ "Write content anyway": "Aj tak spraviÅĨ zmeny",
+ "New certificate added:": "PridanÃŊ novÃŊ certifikÃĄt:",
+ "You current certificate:": "VÃĄÅĄ aktuÃĄlny certifikÃĄt:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "ZvoÄžte to na {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "CertifikÃĄt zmenenÃŊ na: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "StrÃĄnka naklonovanÃĄ",
+
+ "You have successfully changed the web interface's language!": "ÃspeÅĄne ste zmenili jazyk webovÊho rozhrania!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Kôli cachu webovÊho prehliadavaÄa, ceÄžkovÃĄ transformÃĄcia môŞe chvÃÄēu trvaÅĨ.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Spojenie s UiServer Websocket bolo stratenÊ. Znovu pripÃĄjame...",
+ "Connection with UiServer Websocket recovered.": "Spojenie s UiServer Websocket obnovenÊ.",
+ "UiServer Websocket error, please reload the page.": "Chyba UiServer Websocket-u, prosÃm znovu naÄÃtajte strÃĄnku.",
+ " Connecting...": " PripÃĄjanie...",
+ "Site size: ": "VeÄžkosÅĨ strÃĄnky: ",
+ "MB is larger than default allowed ": "MB je viac ako povolenÃĄ hodnota",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "OtvoriÅĨ strÃĄnku a nastaviÅĨ limit veÄžkosti na \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " sÃēbory je potrebnÊ stiahnuÅĨ",
+ " downloaded": " stiahnutÊ",
+ " download failed": " sÅĨahovanie zlyhalo",
+ "Peers found: ": "Peer-erov nÃĄjdenÃŊch: ",
+ "No peers found": "Neboli nÃĄjdenÃŊ Åžiadny peer-ery",
+ "Running out of size limit (": "Presahuje povolenÃŊ limit veÄžkosti pamäte (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "NastaviÅĨ limit na \" + site_info.next_size_limit + \"MB ändern",
+ "Site size limit changed to {0}MB": "Limit veÄžkosti pamäte nastavenÃŊ na {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " Bola vydanÃĄ novÃĄ verzia tejto strÃĄnky. Znovu naÄÃtajte tÃēto strÃĄnku aby bolo vidieÅĨ zmeny.",
+ "This site requests permission:": "TÃĄto strÃĄnka vyÅžaduje povolenie:",
+ "_(Accept)": "UdeliÅĨ",
+
+ "on": "",
+ "Oct": "Okt",
+ "May": "MÃĄj",
+ "Jun": "JÃēn",
+ "Jul": "JÃēl"
+
+}
diff --git a/src/Translate/languages/sl.json b/src/Translate/languages/sl.json
new file mode 100644
index 00000000..2aeb628e
--- /dev/null
+++ b/src/Translate/languages/sl.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Äestitke, vaÅĄa vrata {0} so odprta. Postali ste polnopravni Älan ZeroNet omreÅžja!",
+ "Tor mode active, every connection using Onion route.": "NaÄin Tor aktiven.",
+ "Successfully started Tor onion hidden services.": "Storitve Tor uspeÅĄno zagnane.",
+ "Unable to start hidden services, please check your config.": "Ni bilo mogoÄe zagnati Tor storitev. Preverite nastavitve.",
+ "For faster connections open {0} port on your router.": "Za hitrejÅĄe povezave na svojem usmerjevalniku odprite vrata {0}.",
+ "Your connection is restricted. Please, open {0} port on your router": "VaÅĄa povezava je omejena. Na svojem usmerjevalniku odprite vrata {0}",
+ "or configure Tor to become a full member of the ZeroNet network.": "ali nastavite Tor, da postanete polnopravni Älan ZeroNet omreÅžja.",
+
+ "Select account you want to use in this site:": "Izberite raÄun, ki ga Åželite uporabiti na tem spletnem mestu:",
+ "currently selected": "trenutno izbrano",
+ "Unique to site": "Edinstven za spletno mesto",
+
+ "Content signing failed": "Podpisovanje vsebine ni uspelo",
+ "Content publish queued for {0:.0f} seconds.": "Objava vsebine na Äakanju za {0:.0f} sekund.",
+ "Content published to {0} peers.": "Vsebina objavljena na {0} povezavah.",
+ "No peers found, but your content is ready to access.": "Ni nobenih povezav, vendar je vaÅĄa vsebina pripravljena za dostop.",
+ "Your network connection is restricted. Please, open {0} port": "VaÅĄa povezava je omejena. Prosimo, odprite vrata {0}",
+ "on your router to make your site accessible for everyone.": "na vaÅĄem usmerjevalniku, da bo vaÅĄe spletno mesto dostopno za vse.",
+ "Content publish failed.": "Objavljanje vsebine ni uspelo.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Ta datoteka se ÅĄe vedno sinhronizira. Äe jo uredite zdaj, se lahko zgodi, da bo prejÅĄnja vsebina izgubljena.",
+ "Write content anyway": "Vseeno uredi vsebino",
+ "New certificate added:": "Dodano novo potrdilo:",
+ "You current certificate:": "Trenutno potrdilo:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "Spremenite ga na {auth_type}/{auth_user_name}@{domain}",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "Potrdilo spremenjeno na: {auth_type}/{auth_user_name}@{domain}.",
+ "Site cloned": "Stran klonirana",
+
+ "You have successfully changed the web interface's language!": "UspeÅĄno ste spremenili jezik spletnega vmesnika!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Zaradi predpomnjenja brskalnika lahko popolna preobrazba traja nekaj minut.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "Povezava z UiServer Websocket je bila izgubljena. Ponovno povezovanje ...",
+ "Connection with UiServer Websocket recovered.": "Povezava z UiServer Websocket je vzpostavljena.",
+ "UiServer Websocket error, please reload the page.": "Napaka UiServer Websocket. Prosimo osveÅžite stran.",
+ " Connecting...": " Povezovanje ...",
+ "Site size: ": "Velikost strani: ",
+ "MB is larger than default allowed ": "MB je veÄja od dovoljenih",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Odpri to stran in nastavi omejitev na \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " datotek mora biti preneÅĄenih",
+ " downloaded": " preneseno",
+ " download failed": " prenos ni uspel",
+ "Peers found: ": "Najdene povezave: ",
+ "No peers found": "Ni najdenih povezav",
+ "Running out of size limit (": "Zmanjkuje dovoljenega prostora (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "Nastavi omejitev na \" + site_info.next_size_limit + \"MB",
+ "Site size limit changed to {0}MB": "Omejitev strani nastavljena na{0} MB",
+ " New version of this page has just released. Reload to see the modified content.": " Ravnokar je bila objavljena nova razliÄica te strani. OsveÅžite jo, da boste videli novo vsebino.",
+ "This site requests permission:": "Ta stran zahteva dovoljenja:",
+ "_(Accept)": "Dovoli"
+
+}
diff --git a/src/Translate/languages/tr.json b/src/Translate/languages/tr.json
new file mode 100644
index 00000000..09a1bdb5
--- /dev/null
+++ b/src/Translate/languages/tr.json
@@ -0,0 +1,51 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "Tebrikler, portunuz ({0}) aÃ§Äąk. ArtÄąk ZeroNet aÄÄąna katÄąldÄąnÄąz!",
+ "Tor mode active, every connection using Onion route.": "Tor aktif, tÃŧm baÄlantÄąlar Onion yÃļnlendircisini kullanÄąyor.",
+ "Successfully started Tor onion hidden services.": "Gizli Tor hizmetleri baÅlatÄąldÄą.",
+ "Unable to start hidden services, please check your config.": "Gizli hizmetler baÅlatÄąlamadÄą, lÃŧtfen ayarlarÄąnÄązÄą kontrol ediniz.",
+ "For faster connections open {0} port on your router.": "Daha hÄązlÄą baÄlantÄą için {0} nolu portu bilgisayarÄąnÄąza yÃļnlendirin.",
+ "Your connection is restricted. Please, open {0} port on your router": "SÄąnÄąrlÄą baÄlantÄą. LÃŧtfen, {0} nolu portu bilgisayarÄąnÄąza yÃļnlendirin",
+ "or configure Tor to become a full member of the ZeroNet network.": "ya da ZeroNet aÄÄąna tam olarak katÄąlabilmek için Tor'u kullanÄąn.",
+
+ "Select account you want to use in this site:": "Bu sitede kullanmak için bir hesap seçiniz:",
+ "currently selected": "kullanÄąlan",
+ "Unique to site": "Bu site için benzersiz",
+
+ "Content signing failed": "İçerik imzalama baÅarÄąsÄąz oldu",
+ "Content publish queued for {0:.0f} seconds.": "İçerik yayÄąmlanmak Ãŧzere {0:.0f} saniyedir kuyrukta.",
+ "Content published to {0} peers.": "İçerik {0} eÅe daÄÄątÄąldÄą.",
+ "No peers found, but your content is ready to access.": "EÅ bulunamadÄą, ama içeriÄiniz eriÅime hazÄąr.",
+ "Your network connection is restricted. Please, open {0} port": "SÄąnÄąrlÄą baÄlantÄą. LÃŧtfen, {0} nolu portu bilgisayarÄąnÄąza yÃļnlendirin",
+ "on your router to make your site accessible for everyone.": "bÃļylece sitenizi herkes için eriÅilebilir yapabilirsiniz",
+ "Content publish failed.": "İçerik yayÄąmlama baÅarÄąsÄąz oldu.",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "Bu dosya hala gÃŧncelleniyor, eÄer Åimdi kaydederseniz, Ãļnceki içerik kaybolabilir.",
+ "Write content anyway": "Yine de kaydet",
+ "New certificate added:": "Yeni sertifika eklendi:",
+ "You current certificate:": "KullanÄąlan sertifikanÄąz:",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "{auth_type}/{auth_user_name}@{domain} olarak deÄiÅtir.",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "{auth_type}/{auth_user_name}@{domain} olarak deÄiÅtirildi",
+ "Site cloned": "Site klonlandÄą",
+
+ "You have successfully changed the web interface's language!": "WEB ara yÃŧzÃŧ için dil baÅarÄąyla deÄiÅtirildi!",
+ "Due to the browser's caching, the full transformation could take some minute.": "Tam dÃļnÃŧÅÃŧmÃŧn saÄlanmasÄą, tarayÄącÄą Ãļnbelleklemesi yÃŧzÃŧnden zaman alabilir.",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "UiServer Websocket ile baÄlantÄą kesildi. Yeniden baÄlanÄąlÄąyor...",
+ "Connection with UiServer Websocket recovered.": "UiServer Websocket ile baÄlantÄą yeniden kuruldu.",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket hatasÄą, lÃŧtfen sayfayÄą yenileyin.",
+ " Connecting...": " BaÄlanÄąyor...",
+ "Site size: ": "Site boyutu: ",
+ "MB is larger than default allowed ": "MB izin verilenden fazla ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "Siteyi aÃ§Äąn ve boyut sÄąnÄąrÄąnÄą \" + site_info.next_size_limit + \"MB'ye yÃŧkseltin",
+ " files needs to be downloaded": " indirilmesi gereken dosyalar",
+ " downloaded": " indirildi",
+ " download failed": " indirme baÅarÄąsÄąz",
+ "Peers found: ": "Bulunan eÅler: ",
+ "No peers found": "EÅ bulunamadÄą",
+ "Running out of size limit (": "Boyut sÄąnÄąrlamasÄąnÄą aÅtÄą (",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "SÄąnÄąrlamayÄą \" + site_info.next_size_limit + \"MB'ye yÃŧkselt",
+ "Site size limit changed to {0}MB": "Site boyut sÄąnÄąrlamasÄą {0}MB olarak ayarlandÄą",
+ " New version of this page has just released. Reload to see the modified content.": " Bu sayfanÄąn yeni versiyonu yayÄąmlandÄą. DeÄiÅen içeriÄi gÃļrmek için yeniden yÃŧkleyiniz.",
+ "This site requests permission:": "Bu site bir izin istiyor:",
+ "_(Accept)": "İzin ver"
+
+}
diff --git a/src/Translate/languages/zh-tw.json b/src/Translate/languages/zh-tw.json
new file mode 100644
index 00000000..0ec071b4
--- /dev/null
+++ b/src/Translate/languages/zh-tw.json
@@ -0,0 +1,54 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "įĨčŗīŧäŊ įå ({0}) 厞įļæéã äŊ 厞į￝ ZeroNet įļ˛čˇ¯įæŖåŧæåĄäēīŧ",
+ "Tor mode active, every connection using Onion route.": "Tor æ¨Ąåŧåį¨īŧæ¯åéŖæĨæŖå¨äŊŋ፿´čĨ莝įąã",
+ "Successfully started Tor onion hidden services.": "æååå Tor æ´čĨéąčæåã",
+ "Unable to start hidden services, please check your config.": "įĄæŗæééąčæåīŧčĢæĒĸæĨäŊ įé įŊŽã",
+ "For faster connections open {0} port on your router.": "įēäēæ´åŋĢįéŖæĨčĢå¨čˇ¯įąå¨ä¸æé {0} å ã",
+ "Your connection is restricted. Please, open {0} port on your router": "äŊ įéŖæĨåéåļãčĢå¨äŊ į莝įąå¨ä¸æé {0} å ",
+ "or configure Tor to become a full member of the ZeroNet network.": "æč é įŊŽäŊ į Tor äžæįē ZeroNet įæŖåŧæåĄã",
+
+ "Select account you want to use in this site:": "鏿äŊ čĻå¨éåįļ˛įĢäŊŋį¨į叺æļīŧ",
+ "currently selected": "įļå鏿",
+ "Unique to site": "įļ˛įĢ፿čēĢäģŊ",
+
+ "Content signing failed": "å §åŽšį°ŊįŊ˛å¤ąæ",
+ "Content publish queued for {0:.0f} seconds.": "å §åŽšåˇ˛å å Ĩ {0:.0f} į§åžįįŧäŊéåã",
+ "Content published to {0}/{1} peers.": "å §åŽšåˇ˛įŧäŊå° {0}/{1} åį¯éģã",
+ "Content published to {0} peers.": "å §åŽšåˇ˛įŧäŊå° {0} åį¯éģã",
+ "No peers found, but your content is ready to access.": "æžä¸å°į¯éģīŧäŊæ¯äŊ įå §åŽšåˇ˛įļæēååĨŊčĸĢč¨Ēåã",
+ "Your network connection is restricted. Please, open {0} port": "äŊ įįļ˛čˇ¯éŖæĨåéåļãčĢå¨äŊ į莝įąå¨ä¸æé {0} å ",
+ "on your router to make your site accessible for everyone.": "įĸēäŋäŊ įįļ˛įĢčŊčĸĢæ¯ä¸åäēēč¨Ēåã",
+ "Content publish failed.": "å §åŽšįŧäŊå¤ąæã",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "éåæĒäģįļå¨åæĨä¸īŧåĻæäŊ įžå¨å¯Ģå ĨåŽīŧäšåįå §åŽšå¯čŊæčĸĢä¸å¤ąã",
+ "Write content anyway": "åŧˇåļå¯Ģå Ĩå §åŽš",
+ "New certificate added:": "æ°čæ¸īŧ",
+ "You current certificate:": "äŊ įļåį迏īŧ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "æščŽčŗ {auth_type}/{auth_user_name}@{domain}-ra",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "čæ¸æ´æščŗīŧ{auth_type}/{auth_user_name}@{domain}ã",
+ "Site cloned": "įļ˛įĢ厞å é",
+
+ "You have successfully changed the web interface's language!": "äŊ 厞įļæåæščŽäē Web įéĸįčĒč¨īŧ",
+ "Due to the browser's caching, the full transformation could take some minute.": "įąæŧäŊ įįčĻŊå¨įˇŠåīŧåŽæ´įįŋģč¯å¯čŊéčĻčąåšžåéã",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "UiServer Websocket įéŖįˇåˇ˛ä¸å¤ąãéæ°éŖįˇä¸...",
+ "Connection with UiServer Websocket recovered.": "UiServer Websocket įéŖįˇåˇ˛æĸ垊ã",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket é¯čǤīŧčĢéæ°čŧå Ĩé éĸã",
+ " Connecting...": " éŖįˇä¸...",
+ "Site size: ": "įļ˛įĢ大å°īŧ",
+ "MB is larger than default allowed ": "MB æ¯é č¨å 訹įåŧæ´å¤§ ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "æéįļ˛įĢä¸Ļč¨åŽå¤§å°éåļå° \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " åæĒéčĻä¸čŧ",
+ " downloaded": " 厞ä¸čŧ",
+ " download failed": " ä¸čŧå¤ąæ",
+ "Peers found: ": "厞æžå°į¯éģīŧ",
+ "No peers found": "æžä¸å°į¯éģ",
+ "Running out of size limit (": "čļ åē大å°éåļ",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "č¨åŽéåļå° \" + site_info.next_size_limit + \"MB",
+ "Cloning site...": "č¤čŖŊįļ˛įĢä¸...",
+ "Site cloned": "įļ˛įĢ厞č¤čŖŊ",
+ "Site size limit changed to {0}MB": "įļ˛įĢ大å°éåļ厞æščŽå° {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " æŦé éĸįæ°įæŦ厞įļįŧäŊã éæ°čŧå ĨäžæĨįæ´æšåžįå §åŽšã",
+ "This site requests permission:": "éåįļ˛įĢįčĢæąč¨ąå¯æŦīŧ",
+ "_(Accept)": "ææŦ"
+
+}
diff --git a/src/Translate/languages/zh.json b/src/Translate/languages/zh.json
new file mode 100644
index 00000000..16a40b1a
--- /dev/null
+++ b/src/Translate/languages/zh.json
@@ -0,0 +1,55 @@
+{
+ "Congratulations, your port {0} is opened. You are a full member of the ZeroNet network!": "įĨč´ēīŧæ¨įįĢ¯åŖ ({0}) 厞įģæåŧã æ¨åˇ˛į쿝 ZeroNet įŊįģįæŖåŧæåäēīŧ",
+ "Tor mode active, every connection using Onion route.": "Tor æ¨Ąåŧå¯į¨īŧæ¯ä¸ĒčŋæĨæŖå¨äŊŋ፿´čąčˇ¯įąã",
+ "Successfully started Tor onion hidden services.": "æåå¯å¨ Tor æ´čąéčæåĄã",
+ "Unable to start hidden services, please check your config.": "æ æŗæåŧéčæåĄīŧč¯ˇæŖæĨæ¨įé įŊŽã",
+ "For faster connections open {0} port on your router.": "ä¸ēäēæ´åŋĢįčŋæĨ蝎å¨čˇ¯įąå¨ä¸æåŧ {0} į̝åŖã",
+ "Your connection is restricted. Please, open {0} port on your router": "æ¨įčŋæĨåéåļã蝎卿¨į莝įąå¨ä¸æåŧ {0} į̝åŖ",
+ "or configure Tor to become a full member of the ZeroNet network.": "æč é įŊŽæ¨į Tor æĨæä¸ē ZeroNet įæŖåŧæåã",
+
+ "Select account you want to use in this site:": "éæŠæ¨čĻå¨čŋä¸ĒįŊįĢäŊŋį¨į叿ˇ:",
+ "No certificate": "æ˛Ąæč¯äšĻ",
+ "currently selected": "åŊåéæŠ",
+ "Unique to site": "įŊįĢįŦæčēĢäģŊ",
+
+ "Content signing failed": "å 厚įžåå¤ąč´Ĩ",
+ "Content publish queued for {0:.0f} seconds.": "å åŽšåˇ˛å å Ĩ {0:.0f} į§åįåå¸éåã",
+ "Content published to {0}/{1} peers.": "å åŽšåˇ˛åå¸å° {0}/{1} ä¸Ēčįšã",
+ "Content published to {0} peers.": "å åŽšåˇ˛åå¸å° {0} ä¸Ēčįšã",
+ "No peers found, but your content is ready to access.": "æžä¸å°čįšīŧäŊæ¯æ¨įå åŽšåˇ˛įģåå¤åĨŊčĸĢčŽŋéŽã",
+ "Your network connection is restricted. Please, open {0} port": "æ¨įįŊįģčŋæĨåéåļã蝎卿¨į莝įąå¨ä¸æåŧ {0} į̝åŖ",
+ "on your router to make your site accessible for everyone.": "įĄŽäŋæ¨įįĢįščŊčĸĢæ¯ä¸ä¸ĒäēēčŽŋéŽã",
+ "Content publish failed.": "å 厚åå¸å¤ąč´Ĩã",
+ "This file still in sync, if you write it now, then the previous content may be lost.": "čŋä¸Ēæäģļäģįļå¨åæĨä¸īŧåĻææ¨į°å¨åå ĨåŽīŧäšåįå 厚å¯čŊäŧčĸĢä¸ĸå¤ąã",
+ "Write content anyway": "åŧēåļåå Ĩå 厚",
+ "New certificate added:": "æ°č¯äšĻīŧ",
+ "You current certificate:": "æ¨åŊåįč¯äšĻīŧ",
+ "Change it to {auth_type}/{auth_user_name}@{domain}": "æ´æščŗ {auth_type}/{auth_user_name}@{domain}-ra",
+ "Certificate changed to: {auth_type}/{auth_user_name}@{domain}.": "č¯äšĻæ´æščŗīŧ{auth_type}/{auth_user_name}@{domain}ã",
+ "Site cloned": "įĢįšåˇ˛å é",
+
+ "You have successfully changed the web interface's language!": "æ¨åˇ˛įģæåæ´æšäē web įéĸįč¯č¨īŧ",
+ "Due to the browser's caching, the full transformation could take some minute.": "įąä翍įæĩč§å¨įŧåīŧåŽæ´įįŋģč¯å¯čŊéčĻčąå åéã",
+
+ "Connection with UiServer Websocket was lost. Reconnecting...": "UiServer Websocket įčŋæĨ厞ä¸ĸå¤ąãéæ°čŋæĨä¸...",
+ "Connection with UiServer Websocket recovered.": "UiServer Websocket įčŋæĨ厞æĸå¤ã",
+ "UiServer Websocket error, please reload the page.": "UiServer Websocket é蝝īŧ蝎鿰å čŊŊéĄĩéĸã",
+ " Connecting...": " čŋæĨä¸...",
+ "Site size: ": "įĢįšå¤§å°īŧ",
+ "MB is larger than default allowed ": "MB æ¯éģ莤å 莸įåŧæ´å¤§ ",
+ "Open site and set size limit to \" + site_info.next_size_limit + \"MB": "æåŧįĢįšåšļ莞įŊŽå¤§å°éåļå° \" + site_info.next_size_limit + \"MB",
+ " files needs to be downloaded": " ä¸ĒæäģļéčĻä¸čŊŊ",
+ " downloaded": " 厞ä¸čŊŊ",
+ " download failed": " ä¸čŊŊå¤ąč´Ĩ",
+ "Peers found: ": "厞æžå°čįšīŧ",
+ "No peers found": "æžä¸å°čįš",
+ "Running out of size limit (": "čļ åē大å°éåļ",
+ "Set limit to \" + site_info.next_size_limit + \"MB": "莞įŊŽéåļå° \" + site_info.next_size_limit + \"MB",
+ "Cloning site...": "å éįĢįšä¸...",
+ "Site cloned": "įĢįšåˇ˛å é",
+ "Site size limit changed to {0}MB": "įĢįšå¤§å°éåļåˇ˛æ´æšå° {0}MB",
+ " New version of this page has just released. Reload to see the modified content.": " æŦéĄĩéĸįæ°įæŦ厞įģåå¸ã éæ°å čŊŊæĨæĨįæ´æšåįå 厚ã",
+ "This site requests permission:": "čŋä¸ĒįĢįšįč¯ˇæąæéīŧ",
+ "_(Accept)": "ææ"
+
+}
diff --git a/src/Ui/UiRequest.py b/src/Ui/UiRequest.py
new file mode 100644
index 00000000..4a4e0545
--- /dev/null
+++ b/src/Ui/UiRequest.py
@@ -0,0 +1,974 @@
+import time
+import re
+import os
+import mimetypes
+import json
+import html
+import urllib
+import socket
+
+import gevent
+
+from Config import config
+from Site import SiteManager
+from User import UserManager
+from Plugin import PluginManager
+from Ui.UiWebsocket import UiWebsocket
+from Crypt import CryptHash
+from util import helper
+
+status_texts = {
+ 200: "200 OK",
+ 206: "206 Partial Content",
+ 400: "400 Bad Request",
+ 403: "403 Forbidden",
+ 404: "404 Not Found",
+ 500: "500 Internal Server Error",
+}
+
+content_types = {
+ "asc": "application/pgp-keys",
+ "css": "text/css",
+ "gpg": "application/pgp-encrypted",
+ "html": "text/html",
+ "js": "application/javascript",
+ "json": "application/json",
+ "oga": "audio/ogg",
+ "ogg": "application/ogg",
+ "ogv": "video/ogg",
+ "sig": "application/pgp-signature",
+ "txt": "text/plain",
+ "webmanifest": "application/manifest+json",
+ "wasm": "application/wasm",
+ "webp": "image/webp"
+}
+
+
+class SecurityError(Exception):
+ pass
+
+
+@PluginManager.acceptPlugins
+class UiRequest(object):
+
+ def __init__(self, server, get, env, start_response):
+ if server:
+ self.server = server
+ self.log = server.log
+ self.get = get # Get parameters
+ self.env = env # Enviroment settings
+ # ['CONTENT_LENGTH', 'CONTENT_TYPE', 'GATEWAY_INTERFACE', 'HTTP_ACCEPT', 'HTTP_ACCEPT_ENCODING', 'HTTP_ACCEPT_LANGUAGE',
+ # 'HTTP_COOKIE', 'HTTP_CACHE_CONTROL', 'HTTP_HOST', 'HTTP_HTTPS', 'HTTP_ORIGIN', 'HTTP_PROXY_CONNECTION', 'HTTP_REFERER',
+ # 'HTTP_USER_AGENT', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME',
+ # 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', 'SERVER_SOFTWARE', 'werkzeug.request', 'wsgi.errors',
+ # 'wsgi.input', 'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once', 'wsgi.url_scheme', 'wsgi.version']
+
+ self.start_response = start_response # Start response function
+ self.user = None
+ self.script_nonce = None # Nonce for script tags in wrapper html
+
+ def learnHost(self, host):
+ self.server.allowed_hosts.add(host)
+ self.server.log.info("Added %s as allowed host" % host)
+
+ def isHostAllowed(self, host):
+ if host in self.server.allowed_hosts:
+ return True
+
+ # Allow any IP address as they are not affected by DNS rebinding
+ # attacks
+ if helper.isIp(host):
+ self.learnHost(host)
+ return True
+
+ if ":" in host and helper.isIp(host.rsplit(":", 1)[0]): # Test without port
+ self.learnHost(host)
+ return True
+
+ if self.isProxyRequest(): # Support for chrome extension proxy
+ if self.isDomain(host):
+ return True
+ else:
+ return False
+
+ return False
+
+ def isDomain(self, address):
+ return self.server.site_manager.isDomainCached(address)
+
+ def resolveDomain(self, domain):
+ return self.server.site_manager.resolveDomainCached(domain)
+
+ # Call the request handler function base on path
+ def route(self, path):
+ # Restict Ui access by ip
+ if config.ui_restrict and self.env['REMOTE_ADDR'] not in config.ui_restrict:
+ return self.error403(details=False)
+
+ # Check if host allowed to do request
+ if not self.isHostAllowed(self.env.get("HTTP_HOST")):
+ ret_error = next(self.error403("Invalid host: %s" % self.env.get("HTTP_HOST"), details=False))
+
+ http_get = self.env["PATH_INFO"]
+ if self.env["QUERY_STRING"]:
+ http_get += "?{0}".format(self.env["QUERY_STRING"])
+ self_host = self.env["HTTP_HOST"].split(":")[0]
+ self_ip = self.env["HTTP_HOST"].replace(self_host, socket.gethostbyname(self_host))
+ link = "http://{0}{1}".format(self_ip, http_get)
+ ret_body = """
+