diff --git a/.forgejo/workflows/build-on-commit.yml b/.forgejo/workflows/build-on-commit.yml
deleted file mode 100644
index e8f0d2e3..00000000
--- a/.forgejo/workflows/build-on-commit.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Build Docker Image on Commit
-
-on:
- push:
- branches:
- - main
- tags:
- - '!' # Exclude tags
-
-jobs:
- build-and-publish:
- runs-on: docker-builder
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Set REPO_VARS
- id: repo-url
- run: |
- echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
- echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
-
- - name: Login to OCI registry
- run: |
- echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
-
- - name: Build and push Docker images
- run: |
- # Build Docker image with commit SHA
- docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
- docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
-
- # Build Docker image with nightly tag
- docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
- docker push $REPO_HOST/$REPO_PATH:nightly
-
- # Remove local images to save storage
- docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
- docker rmi $REPO_HOST/$REPO_PATH:nightly
diff --git a/.forgejo/workflows/build-on-tag.yml b/.forgejo/workflows/build-on-tag.yml
deleted file mode 100644
index 888102b6..00000000
--- a/.forgejo/workflows/build-on-tag.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-name: Build and Publish Docker Image on Tag
-
-on:
- push:
- tags:
- - '*'
-
-jobs:
- build-and-publish:
- runs-on: docker-builder
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Set REPO_VARS
- id: repo-url
- run: |
- echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
- echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
-
- - name: Login to OCI registry
- run: |
- echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
-
- - name: Build and push Docker image
- run: |
- TAG=${{ github.ref_name }} # Get the tag name from the context
- # Build and push multi-platform Docker images
- docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
- # Tag and push latest
- docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
- docker push $REPO_HOST/$REPO_PATH:latest
-
- # Remove the local image to save storage
- docker rmi $REPO_HOST/$REPO_PATH:$TAG
- docker rmi $REPO_HOST/$REPO_PATH:latest
\ No newline at end of file
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index aab991d5..00000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-github: canewsin
-patreon: # Replace with a single Patreon username e.g., user1
-open_collective: # Replace with a single Open Collective username e.g., user1
-ko_fi: canewsin
-tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
-community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
-liberapay: canewsin
-issuehunt: # Replace with a single IssueHunt username e.g., user1
-otechie: # Replace with a single Otechie username e.g., user1
-custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
deleted file mode 100644
index 27b5c924..00000000
--- a/.github/workflows/codeql-analysis.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-# For most projects, this workflow file will not need changing; you simply need
-# to commit it to your repository.
-#
-# You may wish to alter this file to override the set of languages analyzed,
-# or to provide custom queries or build logic.
-#
-# ******** NOTE ********
-# We have attempted to detect the languages in your repository. Please check
-# the `language` matrix defined below to confirm you have the correct set of
-# supported CodeQL languages.
-#
-name: "CodeQL"
-
-on:
- push:
- branches: [ py3-latest ]
- pull_request:
- # The branches below must be a subset of the branches above
- branches: [ py3-latest ]
- schedule:
- - cron: '32 19 * * 2'
-
-jobs:
- analyze:
- name: Analyze
- runs-on: ubuntu-latest
- permissions:
- actions: read
- contents: read
- security-events: write
-
- strategy:
- fail-fast: false
- matrix:
- language: [ 'javascript', 'python' ]
- # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
- # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v2
- with:
- languages: ${{ matrix.language }}
- # If you wish to specify custom queries, you can do so here or in a config file.
- # By default, queries listed here will override any specified in a config file.
- # Prefix the list here with "+" to use these queries and those in the config file.
-
- # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
- # queries: security-extended,security-and-quality
-
-
- # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
- # If this step fails, then you should remove it and run the build manually (see below)
- - name: Autobuild
- uses: github/codeql-action/autobuild@v2
-
- # ℹ️ Command-line programs to run using the OS shell.
- # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
-
- # If the Autobuild fails above, remove it and uncomment the following three lines.
- # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
-
- # - run: |
- # echo "Run, Build Application using script"
- # ./location_of_script_within_repo/buildscript.sh
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
deleted file mode 100644
index 2bdcaf95..00000000
--- a/.github/workflows/tests.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-name: tests
-
-on: [push, pull_request]
-
-jobs:
- test:
- runs-on: ubuntu-20.04
- strategy:
- max-parallel: 16
- matrix:
- python-version: ["3.7", "3.8", "3.9"]
-
- steps:
- - name: Checkout ZeroNet
- uses: actions/checkout@v2
- with:
- submodules: "true"
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
- with:
- python-version: ${{ matrix.python-version }}
-
- - name: Prepare for installation
- run: |
- python3 -m pip install setuptools
- python3 -m pip install --upgrade pip wheel
- python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
-
- - name: Install
- run: |
- python3 -m pip install --upgrade -r requirements.txt
- python3 -m pip list
-
- - name: Prepare for tests
- run: |
- openssl version -a
- echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
-
- - name: Test
- run: |
- catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
- export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
- export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
- export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
- export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
- export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
- export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
- find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
diff --git a/.gitignore b/.gitignore
index 636cd115..b3795821 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,11 +7,8 @@ __pycache__/
# Hidden files
.*
-!/.forgejo
-!/.github
!/.gitignore
!/.travis.yml
-!/.gitlab-ci.yml
# Temporary files
*.bak
@@ -31,6 +28,3 @@ tools/phantomjs
# ZeroNet config file
zeronet.conf
-
-# ZeroNet log files
-log/*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
deleted file mode 100644
index f3e1ed29..00000000
--- a/.gitlab-ci.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-stages:
- - test
-
-.test_template: &test_template
- stage: test
- before_script:
- - pip install --upgrade pip wheel
- # Selenium and requests can't be installed without a requests hint on Python 3.4
- - pip install --upgrade requests>=2.22.0
- - pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
- - pip install --upgrade -r requirements.txt
- script:
- - pip list
- - openssl version -a
- - python -m pytest -x plugins/CryptMessage/Test --color=yes
- - python -m pytest -x plugins/Bigfile/Test --color=yes
- - python -m pytest -x plugins/AnnounceLocal/Test --color=yes
- - python -m pytest -x plugins/OptionalManager/Test --color=yes
- - python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini --color=yes
- - mv plugins/disabled-Multiuser plugins/Multiuser
- - python -m pytest -x plugins/Multiuser/Test --color=yes
- - mv plugins/disabled-Bootstrapper plugins/Bootstrapper
- - python -m pytest -x plugins/Bootstrapper/Test --color=yes
- - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
-
-test:py3.4:
- image: python:3.4.3
- <<: *test_template
-
-test:py3.5:
- image: python:3.5.7
- <<: *test_template
-
-test:py3.6:
- image: python:3.6.9
- <<: *test_template
-
-test:py3.7-openssl1.1.0:
- image: python:3.7.0b5
- <<: *test_template
-
-test:py3.7-openssl1.1.1:
- image: python:3.7.4
- <<: *test_template
-
-test:py3.8:
- image: python:3.8.0b3
- <<: *test_template
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 2c602a5a..00000000
--- a/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "plugins"]
- path = plugins
- url = https://github.com/ZeroNetX/ZeroNet-Plugins.git
diff --git a/.travis.yml b/.travis.yml
index bdaafa22..9af1d69b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,17 +4,19 @@ python:
- 3.5
- 3.6
- 3.7
- - 3.8
+ - 3.8-dev
+dist: xenial
services:
- docker
cache: pip
before_install:
- pip install --upgrade pip wheel
- - pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
+ - pip install codecov coveralls flake8 mock pytest pytest-cov selenium
# - docker build -t zeronet .
# - docker run -d -v $PWD:/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 zeronet
install:
- - pip install --upgrade -r requirements.txt
+ - if [[ $TRAVIS_PYTHON_VERSION == 3.8-dev ]]; then pip install setuptools cffi 'cython>=0.28' git+git://github.com/gevent/gevent.git#egg=gevent; fi
+ - pip install -r requirements.txt
- pip list
before_script:
- openssl version -a
@@ -24,19 +26,14 @@ before_script:
sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6';
fi
script:
- - catchsegv python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
- - export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python -m pytest -x plugins/CryptMessage/Test
- - export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python -m pytest -x plugins/Bigfile/Test
- - export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python -m pytest -x plugins/AnnounceLocal/Test
- - export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python -m pytest -x plugins/OptionalManager/Test
- - export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
- - export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
- - find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- - find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
-after_failure:
- - zip -r log.zip log/
- - curl --upload-file ./log.zip https://transfer.sh/log.zip
+ - python -m pytest -x plugins/CryptMessage/Test
+ - python -m pytest -x plugins/Bigfile/Test
+ - python -m pytest -x plugins/AnnounceLocal/Test
+ - python -m pytest -x plugins/OptionalManager/Test
+ - python -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
+ - mv plugins/disabled-Multiuser plugins/Multiuser && python -m pytest -x plugins/Multiuser/Test
+ - mv plugins/disabled-Bootstrapper plugins/Bootstrapper && python -m pytest -x plugins/Bootstrapper/Test
+ - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pybitcointools/
after_success:
- codecov
- coveralls --rcfile=src/Test/coverage.ini
diff --git a/CHANGELOG-zh-cn.md b/CHANGELOG-zh-cn.md
new file mode 100644
index 00000000..c09ca401
--- /dev/null
+++ b/CHANGELOG-zh-cn.md
@@ -0,0 +1,134 @@
+## ZeroNet 0.5.1 (2016-11-18)
+### 新增
+- 多语言界面
+- 新插件:为站点 HTML 与 JS 文件提供的翻译助手
+- 每个站点独立的 favicon
+
+### 修复
+- 并行可选文件下载
+
+## ZeroNet 0.5.0 (2016-11-08)
+### 新增
+- 新插件:允许在 ZeroHello 列出/删除/固定/管理文件
+- 新的 API 命令来关注用户的可选文件,与可选文件的请求统计
+- 新的可选文件总大小限制
+- 新插件:保存节点到数据库并在重启时保持它们,使得更快的可选文件搜索以及在没有 Tracker 的情况下工作
+- 重写 UPnP 端口打开器 + 退出时关闭端口(感谢 sirMackk!)
+- 通过懒惰 PeerHashfield 创建来减少内存占用
+- 在 /Stats 页面加载 JSON 文件统计与数据库信息
+
+### 更改
+- 独立的锁定文件来获得更好的 Windows 兼容性
+- 当执行 start.py 时,即使 ZeroNet 已经运行也打开浏览器
+- 在重载时保持插件顺序来允许插件扩展另一个插件
+- 只在完整加载 sites.json 时保存来避免数据丢失
+- 将更多的 Tracker 更改为更可靠的 Tracker
+- 更少的 findhashid CPU 使用率
+- 合并下载大量可选文件
+- 更多对于可选文件的其他优化
+- 如果一个站点有 1000 个节点,更积极地清理
+- 为验证错误使用警告而不是错误
+- 首先推送更新到更新的客户端
+- 损坏文件重置改进
+
+### 修复
+- 修复启动时出现的站点删除错误
+- 延迟 WebSocket 消息直到连接上
+- 修复如果文件包含额外数据时的数据库导入
+- 修复大站点下载
+- 修复 diff 发送 bug (跟踪它好长时间了)
+- 修复当 JSON 文件包含 [] 字符时随机出现的发布错误
+- 修复 siteDelete 与 siteCreate bug
+- 修复文件写入确认对话框
+
+
+## ZeroNet 0.4.1 (2016-09-05)
+### 新增
+- 更快启动与更少内存使用的内核改变
+- 尝试连接丢失时重新连接 Tor
+- 侧边栏滑入
+- 尝试避免不完整的数据文件被覆盖
+- 更快地打开数据库
+- 在侧边栏显示用户文件大小
+- 依赖 --connection_limit 的并发 worker 数量
+
+
+### 更改
+- 在空闲 5 分钟后关闭数据库
+- 更好的站点大小计算
+- 允许在域名中使用“-”符号
+- 总是尝试为站点保持连接
+- 移除已合并站点的合并权限
+- 只扫描最后 3 天的新闻源来加快数据库请求
+- 更新 ZeroBundle-win 到 Python 2.7.12
+
+
+### 修复
+- 修复重要的安全问题:允许任意用户无需有效的来自 ID 提供者的证书发布新内容,感谢 Kaffie 指出
+- 修复在没有选择提供证书提供者时的侧边栏错误
+- 在数据库重建时跳过无效文件
+- 修复随机弹出的 WebSocket 连接错误
+- 修复新的 siteCreate 命令
+- 修复站点大小计算
+- 修复计算机唤醒后的端口打开检查
+- 修复 --size_limit 的命令行解析
+
+
+## ZeroNet 0.4.0 (2016-08-11)
+### 新增
+- 合并站点插件
+- Live source code reloading: Faster core development by allowing me to make changes in ZeroNet source code without restarting it.
+- 为合并站点设计的新 JSON 表
+- 从侧边栏重建数据库
+- 允许直接在 JSON 表中存储自定义数据:更简单与快速的 SQL 查询
+- 用户文件存档:允许站点拥有者存档不活跃的用户内容到单个文件(减少初始同步的时间/CPU/内存使用率)
+- 在文件删除时同时触发数据库 onUpdated/update
+- 从 ZeroFrame API 请求权限
+- 允许使用 fileWrite API 命令在 content.json 存储额外数据
+- 更快的可选文件下载
+- 使用替代源 (Gogs, Gitlab) 来下载更新
+- Track provided sites/connection and prefer to keep the ones with more sites to reduce connection number
+
+### 更改
+- 保持每个站点至少 5 个连接
+- 将目标站点连接从 10 更改到 15
+- ZeroHello 搜索功能稳定性/速度改进
+- 提升机械硬盘下的客户端性能
+
+### 修复
+- 修复 IE11 wrapper nonce 错误
+- 修复在移动设备上的侧边栏
+- 修复站点大小计算
+- 修复 IE10 兼容性
+- Windows XP ZeroBundle 兼容性(感谢中国人民)
+
+
+## ZeroNet 0.3.7 (2016-05-27)
+### 更改
+- 通过只传输补丁来减少带宽使用
+- 其他 CPU /内存优化
+
+
+## ZeroNet 0.3.6 (2016-05-27)
+### 新增
+- 新的 ZeroHello
+- Newsfeed 函数
+
+### 修复
+- 安全性修复
+
+
+## ZeroNet 0.3.5 (2016-02-02)
+### 新增
+- 带有 .onion 隐藏服务的完整 Tor 支持
+- 使用 ZeroNet 协议的 Bootstrap
+
+### 修复
+- 修复 Gevent 1.0.2 兼容性
+
+
+## ZeroNet 0.3.4 (2015-12-28)
+### 新增
+- AES, ECIES API 函数支持
+- PushState 与 ReplaceState URL 通过 API 的操作支持
+- 多用户 localstorage
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6974d18a..225e424a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,201 +1,3 @@
-### ZeroNet 0.9.0 (2023-07-12) Rev4630
- - Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
- - Add trackers to Config.py for failsafety incase missing trackers.txt
- - Added Proxy links
- - Fix pysha3 dep installation issue
- - FileRequest -> Remove Unnecessary check, Fix error wording
- - Fix Response when site is missing for `actionAs`
-
-
-### ZeroNet 0.8.5 (2023-02-12) Rev4625
- - Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
- - default theme-class for missing value in `users.json`.
- - Fetch Stats Plugin changes.
-
-### ZeroNet 0.8.4 (2022-12-12) Rev4620
- - Increase Minimum Site size to 25MB.
-
-### ZeroNet 0.8.3 (2022-12-11) Rev4611
- - main.py -> Fix accessing unassigned varible
- - ContentManager -> Support for multiSig
- - SiteStrorage.py -> Fix accessing unassigned varible
- - ContentManager.py Improve Logging of Valid Signers
-
-### ZeroNet 0.8.2 (2022-11-01) Rev4610
- - Fix Startup Error when plugins dir missing
- - Move trackers to seperate file & Add more trackers
- - Config:: Skip loading missing tracker files
- - Added documentation for getRandomPort fn
-
-### ZeroNet 0.8.1 (2022-10-01) Rev4600
- - fix readdress loop (cherry-pick previously added commit from conservancy)
- - Remove Patreon badge
- - Update README-ru.md (#177)
- - Include inner_path of failed request for signing in error msg and response
- - Don't Fail Silently When Cert is Not Selected
- - Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
- - Update FUNDING.yml
-
-### ZeroNet 0.8.0 (2022-05-27) Rev4591
- - Revert File Open to catch File Access Errors.
-
-### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
- - Use xescape(s) from zeronet-conservancy
- - actionUpdate response Optimisation
- - Fetch Plugins Repo Updates
- - Fix Unhandled File Access Errors
- - Create codeql-analysis.yml
-
-### ZeroNet 0.7.9 (2022-05-26) Rev4585
- - Rust Version Compatibility for update Protocol msg
- - Removed Non Working Trakers.
- - Dynamically Load Trackers from Dashboard Site.
- - Tracker Supply Improvements.
- - Fix Repo Url for Bug Report
- - First Party Tracker Update Service using Dashboard Site.
- - remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
-
-### ZeroNet 0.7.8 (2022-03-02) Rev4580
- - Update Plugins with some bug fixes and Improvements
-
-### ZeroNet 0.7.6 (2022-01-12) Rev4565
- - Sync Plugin Updates
- - Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
- - Add More Default Plugins to Repo
- - Doubled Site Publish Limits
- - Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
- - UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
- - Moved Plugins to Seperate Repo
- - Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
- - Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
- - Added current version: connections to /Stats page. see the previous point.
-
-### ZeroNet 0.7.5 (2021-11-28) Rev4560
- - Add more default trackers
- - Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
- - Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
-
-### ZeroNet 0.7.3 (2021-11-28) Rev4555
- - Fix xrange is undefined error
- - Fix Incorrect viewport on mobile while loading
- - Tor-V3 Patch by anonymoose
-
-
-### ZeroNet 0.7.1 (2019-07-01) Rev4206
-### Added
- - Built-in logging console in the web UI to see what's happening in the background. (pull down top-right 0 button to see it)
- - Display database rebuild errors [Thanks to Lola]
- - New plugin system that allows to install and manage builtin/third party extensions to the ZeroNet client using the web interface.
- - Support multiple trackers_file
- - Add OpenSSL 1.1 support to CryptMessage plugin based on Bitmessage modifications [Thanks to radfish]
- - Display visual error message on startup errors
- - Fix max opened files changing on Windows platform
- - Display TLS1.3 compatibility on /Stats page
- - Add fake SNI and ALPN to peer connections to make it more like standard https connections
- - Hide and ignore tracker_proxy setting in Tor: Always mode as it's going to use Tor anyway.
- - Deny websocket connections from unknown origins
- - Restrict open_browser values to avoid RCE on sandbox escape
- - Offer access web interface by IP address in case of unknown host
- - Link to site's sidebar with "#ZeroNet:OpenSidebar" hash
-
-### Changed
- - Allow .. in file names [Thanks to imachug]
- - Change unstable trackers
- - More clean errors on sites.json/users.json load error
- - Various tweaks for tracker rating on unstable connections
- - Use OpenSSL 1.1 dlls from default Python Windows distribution if possible
- - Re-factor domain resolving for easier domain plugins
- - Disable UDP connections if --proxy is used
- - New, decorator-based Websocket API permission system to avoid future typo mistakes
-
-### Fixed
- - Fix parsing config lines that have no value
- - Fix start.py [Thanks to imachug]
- - Allow multiple values of the same key in the config file [Thanks ssdifnskdjfnsdjk for reporting]
- - Fix parsing config file lines that has % in the value [Thanks slrslr for reporting]
- - Fix bootstrapper plugin hash reloads [Thanks geekless for reporting]
- - Fix CryptMessage plugin OpenSSL dll loading on Windows (ZeroMail errors) [Thanks cxgreat2014 for reporting]
- - Fix startup error when using OpenSSL 1.1 [Thanks to imachug]
- - Fix a bug that did not loaded merged site data for 5 sec after the merged site got added
- - Fix typo that allowed to add new plugins in public proxy mode. [Thanks styromaniac for reporting]
- - Fix loading non-big files with "|all" postfix [Thanks to krzotr]
- - Fix OpenSSL cert generation error crash by change Windows console encoding to utf8
-
-#### Wrapper html injection vulnerability [Reported by ivanq]
-
-In ZeroNet before rev4188 the wrapper template variables was rendered incorrectly.
-
-Result: The opened site was able to gain WebSocket connection with unrestricted ADMIN/NOSANDBOX access, change configuration values and possible RCE on client's machine.
-
-Fix: Fixed the template rendering code, disallowed WebSocket connections from unknown locations, restricted open_browser configuration values to avoid possible RCE in case of sandbox escape.
-
-Note: The fix is also back ported to ZeroNet Py 2.x version (Rev3870)
-
-
-### ZeroNet 0.7.0 (2019-06-12) Rev4106 (First release targeting Python 3.4+)
-### Added
- - 5-10x faster signature verification by using libsecp256k1 (Thanks to ZeroMux)
- - Generated SSL certificate randomization to avoid protocol filters (Thanks to ValdikSS)
- - Offline mode
- - P2P source code update using ZeroNet protocol
- - ecdsaSign/Verify commands to CryptMessage plugin (Thanks to imachug)
- - Efficient file rename: change file names instead of re-downloading the file.
- - Make redirect optional on site cloning (Thanks to Lola)
- - EccPrivToPub / EccPubToPriv functions (Thanks to imachug)
- - Detect and change dark/light theme based on OS setting (Thanks to filips123)
-
-### Changed
- - Re-factored code to Python3 runtime (compatible with Python 3.4-3.8)
- - More safe database sync mode
- - Removed bundled third-party libraries where it's possible
- - Use lang=en instead of lang={lang} in urls to avoid url encode problems
- - Remove environment details from error page
- - Don't push content.json updates larger than 10kb to significantly reduce bw usage for site with many files
-
-### Fixed
- - Fix sending files with \0 characters
- - Security fix: Escape error detail to avoid XSS (reported by krzotr)
- - Fix signature verification using libsecp256k1 for compressed addresses (mostly certificates generated in the browser)
- - Fix newsfeed if you have more than 1000 followed topic/post on one site.
- - Fix site download as zip file
- - Fix displaying sites with utf8 title
- - Error message if dbRebuild fails (Thanks to Lola)
- - Fix browser reopen if executing start.py again. (Thanks to imachug)
-
-
-### ZeroNet 0.6.5 (2019-02-16) Rev3851 (Last release targeting Python 2.7.x)
-### Added
- - IPv6 support in peer exchange, bigfiles, optional file finding, tracker sharing, socket listening and connecting (based on tangdou1 modifications)
- - New tracker database format with IPv6 support
- - Display notification if there is an unpublished modification for your site
- - Listen and shut down normally for SIGTERM (Thanks to blurHY)
- - Support tilde `~` in filenames (by d14na)
- - Support map for Namecoin subdomain names (Thanks to lola)
- - Add log level to config page
- - Support `{data}` for data dir variable in trackers_file value
- - Quick check content.db on startup and rebuild if necessary
- - Don't show meek proxy option if the tor client does not supports it
-
-### Changed
- - Refactored port open checking with IPv6 support
- - Consider non-local IPs as external even is the open port check fails (for CJDNS and Yggdrasil support)
- - Add IPv6 tracker and change unstable tracker
- - Don't correct sent local time with the calculated time correction
- - Disable CSP for Edge
- - Only support CREATE commands in dbschema indexes node and SELECT from storage.query
-
-### Fixed
- - Check the length of master seed when executing cryptGetPrivatekey CLI command
- - Only reload source code on file modification / creation
- - Detection and issue warning for latest no-script plugin
- - Fix atomic write of a non-existent file
- - Fix sql queries with lots of variables and sites with lots of content.json
- - Fix multi-line parsing of zeronet.conf
- - Fix site deletion from users.json
- - Fix site cloning before site downloaded (Reported by unsystemizer)
- - Fix queryJson for non-list nodes (Reported by MingchenZhang)
-
-
## ZeroNet 0.6.4 (2018-10-20) Rev3660
### Added
- New plugin: UiConfig. A web interface that allows changing ZeroNet settings.
diff --git a/COPYING b/COPYING
deleted file mode 100644
index f288702d..00000000
--- a/COPYING
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
diff --git a/Dockerfile b/Dockerfile
index 3f1d3c18..75d18a37 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,33 +1,27 @@
-FROM alpine:3.15
+FROM alpine:3.8
#Base settings
ENV HOME /root
-COPY requirements.txt /root/requirements.txt
-
-#Install ZeroNet
-RUN apk --update --no-cache --no-progress add python3 python3-dev py3-pip gcc g++ autoconf automake libtool libffi-dev musl-dev make tor openssl \
- && pip3 install -r /root/requirements.txt \
- && apk del python3-dev gcc g++ autoconf automake libtool libffi-dev musl-dev make \
- && echo "ControlPort 9051" >> /etc/tor/torrc \
- && echo "CookieAuthentication 1" >> /etc/tor/torrc
-
-RUN python3 -V \
- && python3 -m pip list \
- && tor --version \
- && openssl version
-
#Add Zeronet source
COPY . /root
+
+#Install ZeroNet
+RUN apk --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
+ && pip3 install -r /root/requirements.txt \
+ && apk del python3-dev gcc libffi-dev musl-dev make \
+ && echo "ControlPort 9051" >> /etc/tor/torrc \
+ && echo "CookieAuthentication 1" >> /etc/tor/torrc
+
VOLUME /root/data
#Control if Tor proxy is started
-ENV ENABLE_TOR true
+ENV ENABLE_TOR false
WORKDIR /root
#Set upstart command
-CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26117
+CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
#Expose ports
-EXPOSE 43110 26117
+EXPOSE 43110 26552
diff --git a/Dockerfile.arm64v8 b/Dockerfile.arm64v8
deleted file mode 100644
index d27b7620..00000000
--- a/Dockerfile.arm64v8
+++ /dev/null
@@ -1,34 +0,0 @@
-FROM alpine:3.12
-
-#Base settings
-ENV HOME /root
-
-COPY requirements.txt /root/requirements.txt
-
-#Install ZeroNet
-RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
- && pip3 install -r /root/requirements.txt \
- && apk del python3-dev gcc libffi-dev musl-dev make \
- && echo "ControlPort 9051" >> /etc/tor/torrc \
- && echo "CookieAuthentication 1" >> /etc/tor/torrc
-
-RUN python3 -V \
- && python3 -m pip list \
- && tor --version \
- && openssl version
-
-#Add Zeronet source
-COPY . /root
-VOLUME /root/data
-
-#Control if Tor proxy is started
-ENV ENABLE_TOR false
-
-WORKDIR /root
-
-#Set upstart command
-CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
-
-#Expose ports
-EXPOSE 43110 26552
-
diff --git a/LICENSE b/LICENSE
index 0d17b72d..d6a93266 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,27 +1,340 @@
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, version 3.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
-
-
-Additional Conditions :
-
-Contributing to this repo
- This repo is governed by GPLv3, same is located at the root of the ZeroNet git repo,
- unless specified separately all code is governed by that license, contributions to this repo
- are divided into two key types, key contributions and non-key contributions, key contributions
- are which, directly affects the code performance, quality and features of software,
- non key contributions include things like translation datasets, image, graphic or video
- contributions that does not affect the main usability of software but improves the existing
- usability of certain thing or feature, these also include tests written with code, since their
- purpose is to check, whether something is working or not as intended. All the non-key contributions
- are governed by [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/), unless specified
- above, a contribution is ruled by the type of contribution if there is a conflict between two
- contributing parties of repo in any case.
+GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {description}
+ Copyright (C) {year} {fullname}
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ {signature of Ty Coon}, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
+
diff --git a/README-ru.md b/README-ru.md
index 7d557727..75abbfab 100644
--- a/README-ru.md
+++ b/README-ru.md
@@ -1,133 +1,211 @@
-# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
+# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
[简体中文](./README-zh-cn.md)
[English](./README.md)
-Децентрализованные вебсайты, использующие криптографию Bitcoin и протокол BitTorrent — https://zeronet.dev ([Зеркало в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). В отличии от Bitcoin, ZeroNet'у не требуется блокчейн для работы, однако он использует ту же криптографию, чтобы обеспечить сохранность и проверку данных.
+Децентрализованные вебсайты использующие Bitcoin криптографию и BitTorrent сеть - https://zeronet.io
+
## Зачем?
-- Мы верим в открытую, свободную, и неподдающуюся цензуре сеть и связь.
-- Нет единой точки отказа: Сайт остаётся онлайн, пока его обслуживает хотя бы 1 пир.
-- Нет затрат на хостинг: Сайты обслуживаются посетителями.
-- Невозможно отключить: Он нигде, потому что он везде.
-- Скорость и возможность работать без Интернета: Вы сможете получить доступ к сайту, потому что его копия хранится на вашем компьютере и у ваших пиров.
+* Мы верим в открытую, свободную, и не отцензуренную сеть и коммуникацию.
+* Нет единой точки отказа: Сайт онлайн пока по крайней мере 1 пир обслуживает его.
+* Никаких затрат на хостинг: Сайты обслуживаются посетителями.
+* Невозможно отключить: Он нигде, потому что он везде.
+* Быстр и работает оффлайн: Вы можете получить доступ к сайту, даже если Интернет недоступен.
+
## Особенности
+ * Обновляемые в реальном времени сайты
+ * Поддержка Namecoin .bit доменов
+ * Лёгок в установке: распаковал & запустил
+ * Клонирование вебсайтов в один клик
+ * Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+ based authorization: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
+ * Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
+ * Анонимность: Полная поддержка сети Tor с помощью скрытых служб .onion вместо адресов IPv4
+ * TLS зашифрованные связи
+ * Автоматическое открытие uPnP порта
+ * Плагин для поддержки многопользовательской (openproxy)
+ * Работает с любыми браузерами и операционными системами
-- Обновление сайтов в реальном времени
-- Поддержка доменов `.bit` ([Namecoin](https://www.namecoin.org))
-- Легкая установка: просто распакуйте и запустите
-- Клонирование сайтов "в один клик"
-- Беспарольная [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
- авторизация: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
-- Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
-- Анонимность: Полная поддержка сети Tor, используя скрытые службы `.onion` вместо адресов IPv4
-- Зашифрованное TLS подключение
-- Автоматическое открытие UPnP–порта
-- Плагин для поддержки нескольких пользователей (openproxy)
-- Работа с любыми браузерами и операционными системами
-
-## Текущие ограничения
-
-- Файловые транзакции не сжаты
-- Нет приватных сайтов
## Как это работает?
-- После запуска `zeronet.py` вы сможете посещать сайты в ZeroNet, используя адрес
- `http://127.0.0.1:43110/{zeronet_адрес}`
- (Например: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
-- Когда вы посещаете новый сайт в ZeroNet, он пытается найти пиров с помощью протокола BitTorrent,
- чтобы скачать у них файлы сайта (HTML, CSS, JS и т.д.).
-- После посещения сайта вы тоже становитесь его пиром.
-- Каждый сайт содержит файл `content.json`, который содержит SHA512 хеши всех остальные файлы
- и подпись, созданную с помощью закрытого ключа сайта.
-- Если владелец сайта (тот, кто владеет закрытым ключом для адреса сайта) изменяет сайт, он
+* После запуска `zeronet.py` вы сможете посетить зайты (zeronet сайты) используя адрес
+ `http://127.0.0.1:43110/{zeronet_address}`
+(например. `http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`).
+* Когда вы посещаете новый сайт zeronet, он пытается найти пиров с помощью BitTorrent
+ чтобы загрузить файлы сайтов (html, css, js ...) из них.
+* Каждый посещенный зайт также обслуживается вами. (Т.е хранится у вас на компьютере)
+* Каждый сайт содержит файл `content.json`, который содержит все остальные файлы в хэше sha512
+ и подпись, созданную с использованием частного ключа сайта.
+* Если владелец сайта (у которого есть закрытый ключ для адреса сайта) изменяет сайт, то он/она
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
- (используя подпись), скачвают изменённые файлы и распространяют новый контент для других пиров.
+ (используя подпись), они загружают измененные файлы и публикуют новый контент для других пиров.
+
+#### [Слайд-шоу о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
+#### [Часто задаваемые вопросы »](https://zeronet.io/docs/faq/)
+
+#### [Документация разработчика ZeroNet »](https://zeronet.io/docs/site_development/getting_started/)
-[Презентация о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
-[Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
-[Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Скриншоты


-[Больше скриншотов в документации ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
-## Как присоединиться?
+#### [Больше скриншотов в ZeroNet документации »](https://zeronet.io/docs/using_zeronet/sample_sites/)
-### Windows
-- Скачайте и распакуйте архив [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26МБ)
-- Запустите `ZeroNet.exe`
+## Как вступить
-### macOS
+* Скачайте ZeroBundle пакет:
+ * [Microsoft Windows](https://github.com/HelloZeroNet/ZeroNet-win/archive/dist/ZeroNet-win.zip)
+ * [Apple macOS](https://github.com/HelloZeroNet/ZeroNet-mac/archive/dist/ZeroNet-mac.zip)
+ * [Linux 64-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz)
+ * [Linux 32-bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux32.tar.gz)
+* Распакуйте где угодно
+* Запустите `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
-- Скачайте и распакуйте архив [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14МБ)
-- Запустите `ZeroNet.app`
+### Linux терминал
-### Linux (64 бит)
+* `wget https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz`
+* `tar xvpfz ZeroBundle-linux64.tar.gz`
+* `cd ZeroBundle`
+* Запустите с помощью `./ZeroNet.sh`
-- Скачайте и распакуйте архив [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14МБ)
-- Запустите `./ZeroNet.sh`
+Он загружает последнюю версию ZeroNet, затем запускает её автоматически.
-> **Note**
-> Запустите таким образом: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваш_ip_адрес`, чтобы разрешить удалённое подключение к веб–интерфейсу.
+#### Ручная установка для Debian Linux
-### Docker
+* `sudo apt-get update`
+* `sudo apt-get install msgpack-python python-gevent`
+* `wget https://github.com/HelloZeroNet/ZeroNet/archive/master.tar.gz`
+* `tar xvpfz master.tar.gz`
+* `cd ZeroNet-master`
+* Запустите с помощью `python2 zeronet.py`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
-Официальный образ находится здесь: https://hub.docker.com/r/canewsin/zeronet/
+### [Arch Linux](https://www.archlinux.org)
-### Android (arm, arm64, x86)
+* `git clone https://aur.archlinux.org/zeronet.git`
+* `cd zeronet`
+* `makepkg -srci`
+* `systemctl start zeronet`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
-- Для работы требуется Android как минимум версии 5.0 Lollipop
-- [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
-- Скачать APK: https://github.com/canewsin/zeronet_mobile/releases
+Смотрите [ArchWiki](https://wiki.archlinux.org)'s [ZeroNet
+article](https://wiki.archlinux.org/index.php/ZeroNet) для дальнейшей помощи.
-### Android (arm, arm64, x86) Облегчённый клиент только для просмотра (1МБ)
+### [Gentoo Linux](https://www.gentoo.org)
-- Для работы требуется Android как минимум версии 4.1 Jelly Bean
-- [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
+* [`layman -a raiagent`](https://github.com/leycec/raiagent)
+* `echo '>=net-vpn/zeronet-0.5.4' >> /etc/portage/package.accept_keywords`
+* *(Опционально)* Включить поддержку Tor: `echo 'net-vpn/zeronet tor' >>
+ /etc/portage/package.use`
+* `emerge zeronet`
+* `rc-service zeronet start`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
-### Установка из исходного кода
+Смотрите `/usr/share/doc/zeronet-*/README.gentoo.bz2` для дальнейшей помощи.
-```sh
-wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
-unzip ZeroNet-src.zip
-cd ZeroNet
-sudo apt-get update
-sudo apt-get install python3-pip
-sudo python3 -m pip install -r requirements.txt
+### [FreeBSD](https://www.freebsd.org/)
+
+* `pkg install zeronet` or `cd /usr/ports/security/zeronet/ && make install clean`
+* `sysrc zeronet_enable="YES"`
+* `service zeronet start`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
+
+### [Vagrant](https://www.vagrantup.com/)
+
+* `vagrant up`
+* Подключитесь к VM с помощью `vagrant ssh`
+* `cd /vagrant`
+* Запустите `python2 zeronet.py --ui_ip 0.0.0.0`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
+
+### [Docker](https://www.docker.com/)
+* `docker run -d -v :/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 nofish/zeronet`
+* Это изображение Docker включает в себя прокси-сервер Tor, который по умолчанию отключён.
+ Остерегайтесь что некоторые хостинг-провайдеры могут не позволить вам запускать Tor на своих серверах.
+ Если вы хотите включить его,установите переменную среды `ENABLE_TOR` в` true` (по умолчанию: `false`) Например:
+
+ `docker run -d -e "ENABLE_TOR=true" -v :/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 nofish/zeronet`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
+
+### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
+
+* `virtualenv env`
+* `source env/bin/activate`
+* `pip install msgpack gevent`
+* `python2 zeronet.py`
+* Откройте http://127.0.0.1:43110/ в вашем браузере.
+
+## Текущие ограничения
+
+* ~~Нет torrent-похожего файла разделения для поддержки больших файлов~~ (поддержка больших файлов добавлена)
+* ~~Не анонимнее чем Bittorrent~~ (добавлена встроенная поддержка Tor)
+* Файловые транзакции не сжаты ~~ или незашифрованы еще ~~ (добавлено шифрование TLS)
+* Нет приватных сайтов
+
+
+## Как я могу создать сайт в Zeronet?
+
+Завершите работу zeronet, если он запущен
+
+```bash
+$ zeronet.py siteCreate
+...
+- Site private key (Приватный ключ сайта): 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
+- Site address (Адрес сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+- Site created! (Сайт создан)
+$ zeronet.py
+...
```
-- Запустите `python3 zeronet.py`
-Откройте приветственную страницу ZeroHello в вашем браузере по ссылке http://127.0.0.1:43110/
+Поздравляем, вы закончили! Теперь каждый может получить доступ к вашему зайту используя
+`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
-## Как мне создать сайт в ZeroNet?
+Следующие шаги: [ZeroNet Developer Documentation](https://zeronet.io/docs/site_development/getting_started/)
-- Кликните на **⋮** > **"Create new, empty site"** в меню на сайте [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
-- Вы будете **перенаправлены** на совершенно новый сайт, который может быть изменён только вами!
-- Вы можете найти и изменить контент вашего сайта в каталоге **data/[адрес_вашего_сайта]**
-- После изменений откройте ваш сайт, переключите влево кнопку "0" в правом верхнем углу, затем нажмите кнопки **sign** и **publish** внизу
-Следующие шаги: [Документация разработчика ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+## Как я могу модифицировать Zeronet сайт?
+
+* Измените файлы расположенные в data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 директории.
+ Когда закончите с изменением:
+
+```bash
+$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+- Signing site (Подпись сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
+Private key (Приватный ключ) (input hidden):
+```
+
+* Введите секретный ключ, который вы получили при создании сайта, потом:
+
+```bash
+$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+Site:13DNDk..bhC2 Publishing to 3/10 peers...
+Site:13DNDk..bhC2 Successfuly published to 3 peers
+- Serving files....
+```
+
+* Вот и всё! Вы успешно подписали и опубликовали свои изменения.
+
## Поддержите проект
-- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Рекомендуем)
-- LiberaPay: https://liberapay.com/PramUkesh
-- Paypal: https://paypal.me/PramUkesh
-- Другие способы: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
+- Paypal: https://zeronet.io/docs/help_zeronet/donate/
+
+### Спонсоры
+
+* Улучшенная совместимость с MacOS / Safari стала возможной благодаря [BrowserStack.com](https://www.browserstack.com)
#### Спасибо!
-- Здесь вы можете получить больше информации, помощь, прочитать список изменений и исследовать ZeroNet сайты: https://www.reddit.com/r/zeronetx/
-- Общение происходит на канале [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или в [Gitter](https://gitter.im/canewsin/ZeroNet)
-- Электронная почта: canews.in@gmail.com
+* Больше информации, помощь, журнал изменений, zeronet сайты: https://www.reddit.com/r/zeronet/
+* Приходите, пообщайтесь с нами: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или на [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
+* Email: hello@zeronet.io (PGP: CB9613AE)
diff --git a/README-zh-cn.md b/README-zh-cn.md
index 37095ff6..103194ea 100644
--- a/README-zh-cn.md
+++ b/README-zh-cn.md
@@ -1,49 +1,51 @@
-# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
+# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
[English](./README.md)
-使用 Bitcoin 加密和 BitTorrent 网络的去中心化网络 - https://zeronet.dev
+使用 Bitcoin 加密和 BitTorrent 网络的去中心化网络 - https://zeronet.io
-## 为什么?
+## 为什么?
-* 我们相信开放,自由,无审查的网络和通讯
+* 我们相信开放,自由,无审查的网络
* 不会受单点故障影响:只要有在线的节点,站点就会保持在线
-* 无托管费用:站点由访问者托管
-* 无法关闭:因为节点无处不在
-* 快速并可离线运行:即使没有互联网连接也可以使用
+* 无托管费用: 站点由访问者托管
+* 无法关闭: 因为节点无处不在
+* 快速并可离线运行: 即使没有互联网连接也可以使用
## 功能
* 实时站点更新
* 支持 Namecoin 的 .bit 域名
- * 安装方便:只需解压并运行
+ * 安装方便: 只需解压并运行
* 一键克隆存在的站点
- * 无需密码、基于 [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
- 的认证:您的账户被与比特币钱包相同的加密方法保护
- * 内建 SQL 服务器和 P2P 数据同步:让开发更简单并提升加载速度
- * 匿名性:完整的 Tor 网络支持,支持通过 .onion 隐藏服务相互连接而不是通过 IPv4 地址连接
+ * 无需密码、基于 [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki) 的认证:用与比特币钱包相同的加密方法用来保护你的账户
+你的账户被使用和比特币钱包相同的加密方法
+ * 内建 SQL 服务器和 P2P 数据同步: 让开发更简单并提升加载速度
+ * 匿名性: 完整的 Tor 网络支持,支持通过 .onion 隐藏服务相互连接而不是通过IPv4地址连接
* TLS 加密连接
* 自动打开 uPnP 端口
- * 多用户(openproxy)支持的插件
- * 适用于任何浏览器 / 操作系统
+ * 插件和多用户 (开放式代理) 支持
+ * 全平台兼容
## 原理
-* 在运行 `zeronet.py` 后,您将可以通过
- `http://127.0.0.1:43110/{zeronet_address}`(例如:
- `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`)访问 zeronet 中的站点
-* 在您浏览 zeronet 站点时,客户端会尝试通过 BitTorrent 网络来寻找可用的节点,从而下载需要的文件(html,css,js...)
-* 您将会储存每一个浏览过的站点
-* 每个站点都包含一个名为 `content.json` 的文件,它储存了其他所有文件的 sha512 散列值以及一个通过站点私钥生成的签名
-* 如果站点的所有者(拥有站点地址的私钥)修改了站点,并且他 / 她签名了新的 `content.json` 然后推送至其他节点,
- 那么这些节点将会在使用签名验证 `content.json` 的真实性后,下载修改后的文件并将新内容推送至另外的节点
+* 在你运行`zeronet.py`后你将可以通过`http://127.0.0.1:43110/{zeronet_address}` (比如.
+`http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`)。访问 zeronet 中的站点。
-#### [关于 ZeroNet 加密,站点更新,多用户站点的幻灯片 »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
-#### [常见问题 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+* 在你浏览 zeronet 站点时,客户端会尝试通过 BitTorrent 网络来寻找可用的节点,从而下载需要的文件 (html, css, js...)
-#### [ZeroNet 开发者文档 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+* 你将会储存每一个浏览过的站点
+* 每个站点都包含一个名为 `content.json` ,它储存了其他所有文件的 sha512 hash 值
+ 和一个通过站点私钥建立的签名
+* 如果站点的所有者 (拥有私钥的那个人) 修改了站点, 并且他/她签名了新的 `content.json` 然后推送至其他节点,
+那么所有节点将会在验证 `content.json` 的真实性 (使用签名)后, 下载修改后的文件并推送至其他节点。
+
+#### [有关于 ZeroNet 加密, 站点更新, 多用户站点的幻灯片 »](https://docs.google.com/presentation/d/1qBxkroB_iiX2zHEn0dt-N-qRZgyEzui46XS2hEa3AA4/pub?start=false&loop=false&delayms=3000)
+#### [常见问题 »](https://zeronet.io/docs/faq/)
+
+#### [ZeroNet开发者文档 »](https://zeronet.io/docs/site_development/getting_started/)
## 屏幕截图
@@ -51,82 +53,136 @@


-#### [ZeroNet 文档中的更多屏幕截图 »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+#### [在 ZeroNet 文档里查看更多的屏幕截图 »](https://zeronet.io/docs/using_zeronet/sample_sites/)
-## 如何加入
+## 如何加入 ?
-### Windows
+* 下载 ZeroBundle 文件包:
+ * [Microsoft Windows](https://github.com/HelloZeroNet/ZeroNet-win/archive/dist/ZeroNet-win.zip)
+ * [Apple macOS](https://github.com/HelloZeroNet/ZeroNet-mac/archive/dist/ZeroNet-mac.zip)
+ * [Linux 64bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz)
+ * [Linux 32bit](https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux32.tar.gz)
+* 解压缩
+* 运行 `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
- - 下载 [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
- - 在任意位置解压缩
- - 运行 `ZeroNet.exe`
-
-### macOS
+### Linux 命令行
- - 下载 [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
- - 在任意位置解压缩
- - 运行 `ZeroNet.app`
-
-### Linux (x86-64bit)
+* `wget https://github.com/HelloZeroNet/ZeroBundle/raw/master/dist/ZeroBundle-linux64.tar.gz`
+* `tar xvpfz ZeroBundle-linux64.tar.gz`
+* `cd ZeroBundle`
+* 执行 `./ZeroNet.sh` 来启动
- - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
- - `unzip ZeroNet-linux.zip`
- - `cd ZeroNet-linux`
- - 使用以下命令启动 `./ZeroNet.sh`
- - 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
-
- __提示:__ 若要允许在 Web 界面上的远程连接,使用以下命令启动 `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address`
+在你打开时他将会自动下载最新版本的 ZeroNet 。
-### 从源代码安装
+#### 在 Debian Linux 中手动安装
- - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
- - `unzip ZeroNet-src.zip`
- - `cd ZeroNet`
- - `sudo apt-get update`
- - `sudo apt-get install python3-pip`
- - `sudo python3 -m pip install -r requirements.txt`
- - 使用以下命令启动 `python3 zeronet.py`
- - 在浏览器打开 http://127.0.0.1:43110/ 即可访问 ZeroHello 页面
+* `sudo apt-get update`
+* `sudo apt-get install msgpack-python python-gevent`
+* `wget https://github.com/HelloZeroNet/ZeroNet/archive/master.tar.gz`
+* `tar xvpfz master.tar.gz`
+* `cd ZeroNet-master`
+* 执行 `python2 zeronet.py` 来启动
+* 在你的浏览器中打开 http://127.0.0.1:43110/
- ### Android (arm, arm64, x86)
- - minimum Android version supported 21 (Android 5.0 Lollipop)
- - [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
- - APK download: https://github.com/canewsin/zeronet_mobile/releases
+### [FreeBSD](https://www.freebsd.org/)
-### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
- - minimum Android version supported 16 (JellyBean)
- - [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
+* `pkg install zeronet` 或者 `cd /usr/ports/security/zeronet/ && make install clean`
+* `sysrc zeronet_enable="YES"`
+* `service zeronet start`
+* 在你的浏览器中打开 http://127.0.0.1:43110/
+
+### [Vagrant](https://www.vagrantup.com/)
+
+* `vagrant up`
+* 通过 `vagrant ssh` 连接到 VM
+* `cd /vagrant`
+* 运行 `python2 zeronet.py --ui_ip 0.0.0.0`
+* 在你的浏览器中打开 http://127.0.0.1:43110/
+
+### [Docker](https://www.docker.com/)
+* `docker run -d -v :/root/data -p 26552:26552 -p 43110:43110 nofish/zeronet`
+* 这个 Docker 镜像包含了 Tor ,但默认是禁用的,因为一些托管商不允许你在他们的服务器上运行 Tor。如果你希望启用它,
+设置 `ENABLE_TOR` 环境变量为 `true` (默认: `false`). E.g.:
+
+ `docker run -d -e "ENABLE_TOR=true" -v :/root/data -p 26552:26552 -p 43110:43110 nofish/zeronet`
+* 在你的浏览器中打开 http://127.0.0.1:43110/
+
+### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
+
+* `virtualenv env`
+* `source env/bin/activate`
+* `pip install msgpack gevent`
+* `python2 zeronet.py`
+* 在你的浏览器中打开 http://127.0.0.1:43110/
## 现有限制
-* 传输文件时没有压缩
+* ~~没有类似于 BitTorrent 的文件拆分来支持大文件~~ (已添加大文件支持)
+* ~~没有比 BitTorrent 更好的匿名性~~ (已添加内置的完整 Tor 支持)
+* 传输文件时没有压缩~~和加密~~ (已添加 TLS 支持)
* 不支持私有站点
-## 如何创建一个 ZeroNet 站点?
+## 如何创建一个 ZeroNet 站点?
- * 点击 [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d) 站点的 **⋮** > **「新建空站点」** 菜单项
- * 您将被**重定向**到一个全新的站点,该站点只能由您修改
- * 您可以在 **data/[您的站点地址]** 目录中找到并修改网站的内容
- * 修改后打开您的网站,将右上角的「0」按钮拖到左侧,然后点击底部的**签名**并**发布**按钮
-接下来的步骤:[ZeroNet 开发者文档](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+如果 zeronet 在运行,把它关掉
+执行:
+```bash
+$ zeronet.py siteCreate
+...
+- Site private key: 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
+- Site address: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+- Site created!
+$ zeronet.py
+...
+```
+
+你已经完成了! 现在任何人都可以通过
+`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
+来访问你的站点
+
+下一步: [ZeroNet 开发者文档](https://zeronet.io/docs/site_development/getting_started/)
+
+
+## 我要如何修改 ZeroNet 站点?
+
+* 修改位于 data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 的目录.
+ 在你改好之后:
+
+```bash
+$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+- Signing site: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
+Private key (input hidden):
+```
+
+* 输入你在创建站点时获得的私钥
+
+```bash
+$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+Site:13DNDk..bhC2 Publishing to 3/10 peers...
+Site:13DNDk..bhC2 Successfuly published to 3 peers
+- Serving files....
+```
+
+* 就是这样! 你现在已经成功的签名并推送了你的更改。
+
## 帮助这个项目
-- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
-- LiberaPay: https://liberapay.com/PramUkesh
-- Paypal: https://paypal.me/PramUkesh
-- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
+- Paypal: https://zeronet.io/docs/help_zeronet/donate/
-#### 感谢您!
+### 赞助商
-* 更多信息,帮助,变更记录和 zeronet 站点:https://www.reddit.com/r/zeronetx/
-* 前往 [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) 或 [gitter](https://gitter.im/canewsin/ZeroNet) 和我们聊天
-* [这里](https://gitter.im/canewsin/ZeroNet)是一个 gitter 上的中文聊天室
-* Email: canews.in@gmail.com
+* 在 OSX/Safari 下 [BrowserStack.com](https://www.browserstack.com) 带来更好的兼容性
+
+#### 感谢!
+
+* 更多信息, 帮助, 变更记录和 zeronet 站点: https://www.reddit.com/r/zeronet/
+* 在: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) 和我们聊天,或者使用 [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
+* [这里](https://gitter.im/ZeroNet-zh/Lobby)是一个 gitter 上的中文聊天室
+* Email: hello@noloop.me
diff --git a/README.md b/README.md
index 70b79adc..708116e3 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,8 @@
-# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
-
-Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
+__Warning: Development test version, do not use on live data__
+
+# ZeroNet [](https://travis-ci.org/HelloZeroNet/ZeroNet) [](https://zeronet.io/docs/faq/) [](https://zeronet.io/docs/help_zeronet/donate/)
+
+Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.io
## Why?
@@ -33,22 +35,22 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
* After starting `zeronet.py` you will be able to visit zeronet sites using
`http://127.0.0.1:43110/{zeronet_address}` (eg.
- `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
+ `http://127.0.0.1:43110/1HeLLo4uzjaLetFx6NH3PMwFP3qbRbTf3D`).
* When you visit a new zeronet site, it tries to find peers using the BitTorrent
network so it can download the site files (html, css, js...) from them.
* Each visited site is also served by you.
* Every site contains a `content.json` file which holds all other files in a sha512 hash
and a signature generated using the site's private key.
* If the site owner (who has the private key for the site address) modifies the
- site and signs the new `content.json` and publishes it to the peers.
+ site, then he/she signs the new `content.json` and publishes it to the peers.
Afterwards, the peers verify the `content.json` integrity (using the
signature), they download the modified files and publish the new content to
other peers.
#### [Slideshow about ZeroNet cryptography, site updates, multi-user sites »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
-#### [Frequently asked questions »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+#### [Frequently asked questions »](https://zeronet.io/docs/faq/)
-#### [ZeroNet Developer Documentation »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+#### [ZeroNet Developer Documentation »](https://zeronet.io/docs/site_development/getting_started/)
## Screenshots
@@ -56,101 +58,116 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/


-#### [More screenshots in ZeroNet docs »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+#### [More screenshots in ZeroNet docs »](https://zeronet.io/docs/using_zeronet/sample_sites/)
## How to join
-### Windows
-
- - Download [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26MB)
- - Unpack anywhere
- - Run `ZeroNet.exe`
-
-### macOS
-
- - Download [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14MB)
- - Unpack anywhere
- - Run `ZeroNet.app`
-
-### Linux (x86-64bit)
- - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
- - `unzip ZeroNet-linux.zip`
- - `cd ZeroNet-linux`
- - Start with: `./ZeroNet.sh`
- - Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
-
- __Tip:__ Start with `./ZeroNet.sh --ui_ip '*' --ui_restrict your.ip.address` to allow remote connections on the web interface.
-
- ### Android (arm, arm64, x86)
- - minimum Android version supported 21 (Android 5.0 Lollipop)
- - [](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
- - APK download: https://github.com/canewsin/zeronet_mobile/releases
-
-### Android (arm, arm64, x86) Thin Client for Preview Only (Size 1MB)
- - minimum Android version supported 16 (JellyBean)
- - [](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
-
-
-#### Docker
-There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
-
-### Online Proxies
-Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
-
-#### Official ZNX Proxy :
-
-https://proxy.zeronet.dev/
-
-https://zeronet.dev/
-
-#### From Community
-
-https://0net-preview.com/
-
-https://portal.ngnoid.tv/
-
-https://zeronet.ipfsscan.io/
+### Install from package for your distribution
+* Arch Linux: [zeronet](https://aur.archlinux.org/zeronet.git), [zeronet-git](https://aur.archlinux.org/zeronet-git.git)
+* Gentoo: [emerge repository](https://github.com/leycec/raiagent)
+* FreeBSD: zeronet
+* Whonix: [instructions](https://www.whonix.org/wiki/ZeroNet)
### Install from source
- - `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
- - `unzip ZeroNet-src.zip`
- - `cd ZeroNet`
- - `sudo apt-get update`
- - `sudo apt-get install python3-pip`
- - `sudo python3 -m pip install -r requirements.txt`
- - Start with: `python3 zeronet.py`
- - Open the ZeroHello landing page in your browser by navigating to: http://127.0.0.1:43110/
+Fetch and extract the source:
+
+ wget https://github.com/HelloZeroNet/ZeroNet/archive/py3.tar.gz
+ tar xvpfz py3.tar.gz
+ cd ZeroNet-py3
+
+Install Python module dependencies either:
+
+* (Option A) into a [virtual env](https://virtualenv.readthedocs.org/en/latest/)
+
+ ```
+ virtualenv zeronet
+ source zeronet/bin/activate
+ python -m pip install -r requirements.txt
+ ```
+
+* (Option B) into the system (requires root), for example, on Debian/Ubuntu:
+
+ ```
+ sudo apt-get update
+ sudo apt-get install python3-pip
+ sudo python3 -m pip install -r requirements.txt
+ ```
+
+Start Zeronet:
+
+ python3 zeronet.py
+
+Open the ZeroHello landing page in your browser by navigating to:
+
+ http://127.0.0.1:43110/
## Current limitations
-* File transactions are not compressed
+* ~~No torrent-like file splitting for big file support~~ (big file support added)
+* ~~No more anonymous than Bittorrent~~ (built-in full Tor support added)
+* File transactions are not compressed ~~or encrypted yet~~ (TLS encryption added)
* No private sites
## How can I create a ZeroNet site?
- * Click on **⋮** > **"Create new, empty site"** menu item on the site [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
- * You will be **redirected** to a completely new site that is only modifiable by you!
- * You can find and modify your site's content in **data/[yoursiteaddress]** directory
- * After the modifications open your site, drag the topright "0" button to left, then press **sign** and **publish** buttons on the bottom
+Shut down zeronet if you are running it already
+
+```bash
+$ zeronet.py siteCreate
+...
+- Site private key: 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
+- Site address: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+- Site created!
+$ zeronet.py
+...
+```
+
+Congratulations, you're finished! Now anyone can access your site using
+`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
+
+Next steps: [ZeroNet Developer Documentation](https://zeronet.io/docs/site_development/getting_started/)
+
+
+## How can I modify a ZeroNet site?
+
+* Modify files located in data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 directory.
+ After you're finished:
+
+```bash
+$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+- Signing site: 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
+Private key (input hidden):
+```
+
+* Enter the private key you got when you created the site, then:
+
+```bash
+$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
+...
+Site:13DNDk..bhC2 Publishing to 3/10 peers...
+Site:13DNDk..bhC2 Successfuly published to 3 peers
+- Serving files....
+```
+
+* That's it! You've successfully signed and published your modifications.
-Next steps: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Help keep this project alive
-- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
-- LiberaPay: https://liberapay.com/PramUkesh
-- Paypal: https://paypal.me/PramUkesh
-- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
+
+- Bitcoin: 1QDhxQ6PraUZa21ET5fYUCPgdrwBomnFgX
+- Paypal: https://zeronet.io/docs/help_zeronet/donate/
+
+### Sponsors
+
+* Better macOS/Safari compatibility made possible by [BrowserStack.com](https://www.browserstack.com)
#### Thank you!
-* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronetx/
-* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/canewsin/ZeroNet)
-* Email: canews.in@gmail.com
+* More info, help, changelog, zeronet sites: https://www.reddit.com/r/zeronet/
+* Come, chat with us: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) or on [gitter](https://gitter.im/HelloZeroNet/ZeroNet)
+* Email: hello@zeronet.io (PGP: CB9613AE)
diff --git a/plugins b/plugins
deleted file mode 160000
index 689d9309..00000000
--- a/plugins
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
diff --git a/plugins/AnnounceLocal/AnnounceLocalPlugin.py b/plugins/AnnounceLocal/AnnounceLocalPlugin.py
new file mode 100644
index 00000000..0919762a
--- /dev/null
+++ b/plugins/AnnounceLocal/AnnounceLocalPlugin.py
@@ -0,0 +1,148 @@
+import time
+
+import gevent
+
+from Plugin import PluginManager
+from Config import config
+from . import BroadcastServer
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def announce(self, force=False, *args, **kwargs):
+ local_announcer = self.site.connection_server.local_announcer
+
+ thread = None
+ if local_announcer and (force or time.time() - local_announcer.last_discover > 5 * 60):
+ thread = gevent.spawn(local_announcer.discover, force=force)
+ back = super(SiteAnnouncerPlugin, self).announce(force=force, *args, **kwargs)
+
+ if thread:
+ thread.join()
+
+ return back
+
+
+class LocalAnnouncer(BroadcastServer.BroadcastServer):
+ def __init__(self, server, listen_port):
+ super(LocalAnnouncer, self).__init__("zeronet", listen_port=listen_port)
+ self.server = server
+
+ self.sender_info["peer_id"] = self.server.peer_id
+ self.sender_info["port"] = self.server.port
+ self.sender_info["broadcast_port"] = listen_port
+ self.sender_info["rev"] = config.rev
+
+ self.known_peers = {}
+ self.last_discover = 0
+
+ def discover(self, force=False):
+ self.log.debug("Sending discover request (force: %s)" % force)
+ self.last_discover = time.time()
+ if force: # Probably new site added, clean cache
+ self.known_peers = {}
+
+ for peer_id, known_peer in list(self.known_peers.items()):
+ if time.time() - known_peer["found"] > 20 * 60:
+ del(self.known_peers[peer_id])
+ self.log.debug("Timeout, removing from known_peers: %s" % peer_id)
+ self.broadcast({"cmd": "discoverRequest", "params": {}}, port=self.listen_port)
+
+ def actionDiscoverRequest(self, sender, params):
+ back = {
+ "cmd": "discoverResponse",
+ "params": {
+ "sites_changed": self.server.site_manager.sites_changed
+ }
+ }
+
+ if sender["peer_id"] not in self.known_peers:
+ self.known_peers[sender["peer_id"]] = {"added": time.time(), "sites_changed": 0, "updated": 0, "found": time.time()}
+ self.log.debug("Got discover request from unknown peer %s (%s), time to refresh known peers" % (sender["ip"], sender["peer_id"]))
+ gevent.spawn_later(1.0, self.discover) # Let the response arrive first to the requester
+
+ return back
+
+ def actionDiscoverResponse(self, sender, params):
+ if sender["peer_id"] in self.known_peers:
+ self.known_peers[sender["peer_id"]]["found"] = time.time()
+ if params["sites_changed"] != self.known_peers.get(sender["peer_id"], {}).get("sites_changed"):
+ # Peer's site list changed, request the list of new sites
+ return {"cmd": "siteListRequest"}
+ else:
+ # Peer's site list is the same
+ for site in self.server.sites.values():
+ peer = site.peers.get("%s:%s" % (sender["ip"], sender["port"]))
+ if peer:
+ peer.found("local")
+
+ def actionSiteListRequest(self, sender, params):
+ back = []
+ sites = list(self.server.sites.values())
+
+ # Split adresses to group of 100 to avoid UDP size limit
+ site_groups = [sites[i:i + 100] for i in range(0, len(sites), 100)]
+ for site_group in site_groups:
+ res = {}
+ res["sites_changed"] = self.server.site_manager.sites_changed
+ res["sites"] = [site.address_hash for site in site_group]
+ back.append({"cmd": "siteListResponse", "params": res})
+ return back
+
+ def actionSiteListResponse(self, sender, params):
+ s = time.time()
+ peer_sites = set(params["sites"])
+ num_found = 0
+ added_sites = []
+ for site in self.server.sites.values():
+ if site.address_hash in peer_sites:
+ added = site.addPeer(sender["ip"], sender["port"], source="local")
+ num_found += 1
+ if added:
+ site.worker_manager.onPeers()
+ site.updateWebsocket(peers_added=1)
+ added_sites.append(site)
+
+ # Save sites changed value to avoid unnecessary site list download
+ if sender["peer_id"] not in self.known_peers:
+ self.known_peers[sender["peer_id"]] = {"added": time.time()}
+
+ self.known_peers[sender["peer_id"]]["sites_changed"] = params["sites_changed"]
+ self.known_peers[sender["peer_id"]]["updated"] = time.time()
+ self.known_peers[sender["peer_id"]]["found"] = time.time()
+
+ self.log.debug(
+ "Tracker result: Discover from %s response parsed in %.3fs, found: %s added: %s of %s" %
+ (sender["ip"], time.time() - s, num_found, added_sites, len(peer_sites))
+ )
+
+
+@PluginManager.registerTo("FileServer")
+class FileServerPlugin(object):
+ def __init__(self, *args, **kwargs):
+ res = super(FileServerPlugin, self).__init__(*args, **kwargs)
+ if config.broadcast_port and config.tor != "always" and not config.disable_udp:
+ self.local_announcer = LocalAnnouncer(self, config.broadcast_port)
+ else:
+ self.local_announcer = None
+ return res
+
+ def start(self, *args, **kwargs):
+ if self.local_announcer:
+ gevent.spawn(self.local_announcer.start)
+ return super(FileServerPlugin, self).start(*args, **kwargs)
+
+ def stop(self):
+ if self.local_announcer:
+ self.local_announcer.stop()
+ res = super(FileServerPlugin, self).stop()
+ return res
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ group = self.parser.add_argument_group("AnnounceLocal plugin")
+ group.add_argument('--broadcast_port', help='UDP broadcasting port for local peer discovery', default=1544, type=int, metavar='port')
+
+ return super(ConfigPlugin, self).createArguments()
diff --git a/plugins/AnnounceLocal/BroadcastServer.py b/plugins/AnnounceLocal/BroadcastServer.py
new file mode 100644
index 00000000..74678896
--- /dev/null
+++ b/plugins/AnnounceLocal/BroadcastServer.py
@@ -0,0 +1,139 @@
+import socket
+import logging
+import time
+from contextlib import closing
+
+from Debug import Debug
+from util import UpnpPunch
+from util import Msgpack
+
+
+class BroadcastServer(object):
+ def __init__(self, service_name, listen_port=1544, listen_ip=''):
+ self.log = logging.getLogger("BroadcastServer")
+ self.listen_port = listen_port
+ self.listen_ip = listen_ip
+
+ self.running = False
+ self.sock = None
+ self.sender_info = {"service": service_name}
+
+ def createBroadcastSocket(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if hasattr(socket, 'SO_REUSEPORT'):
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except Exception as err:
+ self.log.warning("Error setting SO_REUSEPORT: %s" % err)
+
+ binded = False
+ for retry in range(3):
+ try:
+ sock.bind((self.listen_ip, self.listen_port))
+ binded = True
+ break
+ except Exception as err:
+ self.log.error(
+ "Socket bind to %s:%s error: %s, retry #%s" %
+ (self.listen_ip, self.listen_port, Debug.formatException(err), retry)
+ )
+ time.sleep(retry)
+
+ if binded:
+ return sock
+ else:
+ return False
+
+ def start(self): # Listens for discover requests
+ self.sock = self.createBroadcastSocket()
+ if not self.sock:
+ self.log.error("Unable to listen on port %s" % self.listen_port)
+ return
+
+ self.log.debug("Started on port %s" % self.listen_port)
+
+ self.running = True
+
+ while self.running:
+ try:
+ data, addr = self.sock.recvfrom(8192)
+ except Exception as err:
+ if self.running:
+ self.log.error("Listener receive error: %s" % err)
+ continue
+
+ if not self.running:
+ break
+
+ try:
+ message = Msgpack.unpack(data)
+ response_addr, message = self.handleMessage(addr, message)
+ if message:
+ self.send(response_addr, message)
+ except Exception as err:
+ self.log.error("Handlemessage error: %s" % Debug.formatException(err))
+ self.log.debug("Stopped listening on port %s" % self.listen_port)
+
+ def stop(self):
+ self.log.debug("Stopping, socket: %s" % self.sock)
+ self.running = False
+ if self.sock:
+ self.sock.close()
+
+ def send(self, addr, message):
+ if type(message) is not list:
+ message = [message]
+
+ for message_part in message:
+ message_part["sender"] = self.sender_info
+
+ self.log.debug("Send to %s: %s" % (addr, message_part["cmd"]))
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.sendto(Msgpack.pack(message_part), addr)
+
+ def getMyIps(self):
+ return UpnpPunch._get_local_ips()
+
+ def broadcast(self, message, port=None):
+ if not port:
+ port = self.listen_port
+
+ my_ips = self.getMyIps()
+ addr = ("255.255.255.255", port)
+
+ message["sender"] = self.sender_info
+ self.log.debug("Broadcast using ips %s on port %s: %s" % (my_ips, port, message["cmd"]))
+
+ for my_ip in my_ips:
+ try:
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ sock.bind((my_ip, 0))
+ sock.sendto(Msgpack.pack(message), addr)
+ except Exception as err:
+ self.log.warning("Error sending broadcast using ip %s: %s" % (my_ip, err))
+
+ def handleMessage(self, addr, message):
+ self.log.debug("Got from %s: %s" % (addr, message["cmd"]))
+ cmd = message["cmd"]
+ params = message.get("params", {})
+ sender = message["sender"]
+ sender["ip"] = addr[0]
+
+ func_name = "action" + cmd[0].upper() + cmd[1:]
+ func = getattr(self, func_name, None)
+
+ if sender["service"] != "zeronet" or sender["peer_id"] == self.sender_info["peer_id"]:
+ # Skip messages not for us or sent by us
+ message = None
+ elif func:
+ message = func(sender, params)
+ else:
+ self.log.debug("Unknown cmd: %s" % cmd)
+ message = None
+
+ return (sender["ip"], sender["broadcast_port"]), message
diff --git a/plugins/AnnounceLocal/Test/TestAnnounce.py b/plugins/AnnounceLocal/Test/TestAnnounce.py
new file mode 100644
index 00000000..4def02ed
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/TestAnnounce.py
@@ -0,0 +1,113 @@
+import time
+import copy
+
+import gevent
+import pytest
+import mock
+
+from AnnounceLocal import AnnounceLocalPlugin
+from File import FileServer
+from Test import Spy
+
+@pytest.fixture
+def announcer(file_server, site):
+ file_server.sites[site.address] = site
+ announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server, listen_port=1100)
+ file_server.local_announcer = announcer
+ announcer.listen_port = 1100
+ announcer.sender_info["broadcast_port"] = 1100
+ announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
+ announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
+ gevent.spawn(announcer.start)
+ time.sleep(0.5)
+
+ assert file_server.local_announcer.running
+ return file_server.local_announcer
+
+@pytest.fixture
+def announcer_remote(request, site_temp):
+ file_server_remote = FileServer("127.0.0.1", 1545)
+ file_server_remote.sites[site_temp.address] = site_temp
+ announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server_remote, listen_port=1101)
+ file_server_remote.local_announcer = announcer
+ announcer.listen_port = 1101
+ announcer.sender_info["broadcast_port"] = 1101
+ announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
+ announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
+ gevent.spawn(announcer.start)
+ time.sleep(0.5)
+
+ assert file_server_remote.local_announcer.running
+
+ def cleanup():
+ file_server_remote.stop()
+ request.addfinalizer(cleanup)
+
+
+ return file_server_remote.local_announcer
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestAnnounce:
+ def testSenderInfo(self, announcer):
+ sender_info = announcer.sender_info
+ assert sender_info["port"] > 0
+ assert len(sender_info["peer_id"]) == 20
+ assert sender_info["rev"] > 0
+
+ def testIgnoreSelfMessages(self, announcer):
+ # No response to messages that has same peer_id as server
+ assert not announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": announcer.sender_info, "params": {}})[1]
+
+ # Response to messages with different peer id
+ sender_info = copy.copy(announcer.sender_info)
+ sender_info["peer_id"] += "-"
+ addr, res = announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": sender_info, "params": {}})
+ assert res["params"]["sites_changed"] > 0
+
+ def testDiscoverRequest(self, announcer, announcer_remote):
+ assert len(announcer_remote.known_peers) == 0
+ with Spy.Spy(announcer_remote, "handleMessage") as responses:
+ announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
+ time.sleep(0.1)
+
+ response_cmds = [response[1]["cmd"] for response in responses]
+ assert response_cmds == ["discoverResponse", "siteListResponse"]
+ assert len(responses[-1][1]["params"]["sites"]) == 1
+
+ # It should only request siteList if sites_changed value is different from last response
+ with Spy.Spy(announcer_remote, "handleMessage") as responses:
+ announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
+ time.sleep(0.1)
+
+ response_cmds = [response[1]["cmd"] for response in responses]
+ assert response_cmds == ["discoverResponse"]
+
+ def testPeerDiscover(self, announcer, announcer_remote, site):
+ assert announcer.server.peer_id != announcer_remote.server.peer_id
+ assert len(list(announcer.server.sites.values())[0].peers) == 0
+ announcer.broadcast({"cmd": "discoverRequest"}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert len(list(announcer.server.sites.values())[0].peers) == 1
+
+ def testRecentPeerList(self, announcer, announcer_remote, site):
+ assert len(site.peers_recent) == 0
+ assert len(site.peers) == 0
+ with Spy.Spy(announcer, "handleMessage") as responses:
+ announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert [response[1]["cmd"] for response in responses] == ["discoverResponse", "siteListResponse"]
+ assert len(site.peers_recent) == 1
+ assert len(site.peers) == 1
+
+ # It should update peer without siteListResponse
+ last_time_found = list(site.peers.values())[0].time_found
+ site.peers_recent.clear()
+ with Spy.Spy(announcer, "handleMessage") as responses:
+ announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert [response[1]["cmd"] for response in responses] == ["discoverResponse"]
+ assert len(site.peers_recent) == 1
+ assert list(site.peers.values())[0].time_found > last_time_found
+
+
diff --git a/plugins/AnnounceLocal/Test/conftest.py b/plugins/AnnounceLocal/Test/conftest.py
new file mode 100644
index 00000000..a88c642c
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/conftest.py
@@ -0,0 +1,4 @@
+from src.Test.conftest import *
+
+from Config import config
+config.broadcast_port = 0
diff --git a/plugins/AnnounceLocal/Test/pytest.ini b/plugins/AnnounceLocal/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/AnnounceLocal/__init__.py b/plugins/AnnounceLocal/__init__.py
new file mode 100644
index 00000000..5b80abd2
--- /dev/null
+++ b/plugins/AnnounceLocal/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceLocalPlugin
\ No newline at end of file
diff --git a/plugins/AnnounceShare/AnnounceSharePlugin.py b/plugins/AnnounceShare/AnnounceSharePlugin.py
new file mode 100644
index 00000000..057ce55a
--- /dev/null
+++ b/plugins/AnnounceShare/AnnounceSharePlugin.py
@@ -0,0 +1,190 @@
+import time
+import os
+import logging
+import json
+import atexit
+
+import gevent
+
+from Config import config
+from Plugin import PluginManager
+from util import helper
+
+
+class TrackerStorage(object):
+ def __init__(self):
+ self.log = logging.getLogger("TrackerStorage")
+ self.file_path = "%s/trackers.json" % config.data_dir
+ self.load()
+ self.time_discover = 0.0
+ atexit.register(self.save)
+
+ def getDefaultFile(self):
+ return {"shared": {}}
+
+ def onTrackerFound(self, tracker_address, type="shared", my=False):
+ if not tracker_address.startswith("zero://"):
+ return False
+
+ trackers = self.getTrackers()
+ added = False
+ if tracker_address not in trackers:
+ trackers[tracker_address] = {
+ "time_added": time.time(),
+ "time_success": 0,
+ "latency": 99.0,
+ "num_error": 0,
+ "my": False
+ }
+ self.log.debug("New tracker found: %s" % tracker_address)
+ added = True
+
+ trackers[tracker_address]["time_found"] = time.time()
+ trackers[tracker_address]["my"] = my
+ return added
+
+ def onTrackerSuccess(self, tracker_address, latency):
+ trackers = self.getTrackers()
+ if tracker_address not in trackers:
+ return False
+
+ trackers[tracker_address]["latency"] = latency
+ trackers[tracker_address]["time_success"] = time.time()
+ trackers[tracker_address]["num_error"] = 0
+
+ def onTrackerError(self, tracker_address):
+ trackers = self.getTrackers()
+ if tracker_address not in trackers:
+ return False
+
+ trackers[tracker_address]["time_error"] = time.time()
+ trackers[tracker_address]["num_error"] += 1
+
+ if len(self.getWorkingTrackers()) >= config.working_shared_trackers_limit:
+ error_limit = 5
+ else:
+ error_limit = 30
+ error_limit
+
+ if trackers[tracker_address]["num_error"] > error_limit and trackers[tracker_address]["time_success"] < time.time() - 60 * 60:
+ self.log.debug("Tracker %s looks down, removing." % tracker_address)
+ del trackers[tracker_address]
+
+ def getTrackers(self, type="shared"):
+ return self.file_content.setdefault(type, {})
+
+ def getWorkingTrackers(self, type="shared"):
+ trackers = {
+ key: tracker for key, tracker in self.getTrackers(type).items()
+ if tracker["time_success"] > time.time() - 60 * 60
+ }
+ return trackers
+
+ def getFileContent(self):
+ if not os.path.isfile(self.file_path):
+ open(self.file_path, "w").write("{}")
+ return self.getDefaultFile()
+ try:
+ return json.load(open(self.file_path))
+ except Exception as err:
+ self.log.error("Error loading trackers list: %s" % err)
+ return self.getDefaultFile()
+
+ def load(self):
+ self.file_content = self.getFileContent()
+
+ trackers = self.getTrackers()
+ self.log.debug("Loaded %s shared trackers" % len(trackers))
+ for address, tracker in list(trackers.items()):
+ tracker["num_error"] = 0
+ if not address.startswith("zero://"):
+ del trackers[address]
+
+ def save(self):
+ s = time.time()
+ helper.atomicWrite(self.file_path, json.dumps(self.file_content, indent=2, sort_keys=True).encode("utf8"))
+ self.log.debug("Saved in %.3fs" % (time.time() - s))
+
+ def discoverTrackers(self, peers):
+ if len(self.getWorkingTrackers()) > config.working_shared_trackers_limit:
+ return False
+ s = time.time()
+ num_success = 0
+ for peer in peers:
+ if peer.connection and peer.connection.handshake.get("rev", 0) < 3560:
+ continue # Not supported
+
+ res = peer.request("getTrackers")
+ if not res or "error" in res:
+ continue
+
+ num_success += 1
+ for tracker_address in res["trackers"]:
+ if type(tracker_address) is bytes: # Backward compatibilitys
+ tracker_address = tracker_address.decode("utf8")
+ added = self.onTrackerFound(tracker_address)
+ if added: # Only add one tracker from one source
+ break
+
+ if not num_success and len(peers) < 20:
+ self.time_discover = 0.0
+
+ if num_success:
+ self.save()
+
+ self.log.debug("Trackers discovered from %s/%s peers in %.3fs" % (num_success, len(peers), time.time() - s))
+
+
+if "tracker_storage" not in locals():
+ tracker_storage = TrackerStorage()
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def getTrackers(self):
+ if tracker_storage.time_discover < time.time() - 5 * 60:
+ tracker_storage.time_discover = time.time()
+ gevent.spawn(tracker_storage.discoverTrackers, self.site.getConnectedPeers())
+ trackers = super(SiteAnnouncerPlugin, self).getTrackers()
+ shared_trackers = list(tracker_storage.getTrackers("shared").keys())
+ if shared_trackers:
+ return trackers + shared_trackers
+ else:
+ return trackers
+
+ def announceTracker(self, tracker, *args, **kwargs):
+ res = super(SiteAnnouncerPlugin, self).announceTracker(tracker, *args, **kwargs)
+ if res:
+ latency = res
+ tracker_storage.onTrackerSuccess(tracker, latency)
+ elif res is False:
+ tracker_storage.onTrackerError(tracker)
+
+ return res
+
+
+@PluginManager.registerTo("FileRequest")
+class FileRequestPlugin(object):
+ def actionGetTrackers(self, params):
+ shared_trackers = list(tracker_storage.getWorkingTrackers("shared").keys())
+ self.response({"trackers": shared_trackers})
+
+
+@PluginManager.registerTo("FileServer")
+class FileServerPlugin(object):
+ def portCheck(self, *args, **kwargs):
+ res = super(FileServerPlugin, self).portCheck(*args, **kwargs)
+ if res and not config.tor == "always" and "Bootstrapper" in PluginManager.plugin_manager.plugin_names:
+ for ip in self.ip_external_list:
+ my_tracker_address = "zero://%s:%s" % (ip, config.fileserver_port)
+ tracker_storage.onTrackerFound(my_tracker_address, my=True)
+ return res
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ group = self.parser.add_argument_group("AnnounceShare plugin")
+ group.add_argument('--working_shared_trackers_limit', help='Stop discovering new shared trackers after this number of shared trackers reached', default=5, type=int, metavar='limit')
+
+ return super(ConfigPlugin, self).createArguments()
diff --git a/plugins/AnnounceShare/Test/TestAnnounceShare.py b/plugins/AnnounceShare/Test/TestAnnounceShare.py
new file mode 100644
index 00000000..7178eac8
--- /dev/null
+++ b/plugins/AnnounceShare/Test/TestAnnounceShare.py
@@ -0,0 +1,24 @@
+import pytest
+
+from AnnounceShare import AnnounceSharePlugin
+from Peer import Peer
+from Config import config
+
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestAnnounceShare:
+ def testAnnounceList(self, file_server):
+ open("%s/trackers.json" % config.data_dir, "w").write("{}")
+ tracker_storage = AnnounceSharePlugin.tracker_storage
+ tracker_storage.load()
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ assert peer.request("getTrackers")["trackers"] == []
+
+ tracker_storage.onTrackerFound("zero://%s:15441" % file_server.ip)
+ assert peer.request("getTrackers")["trackers"] == []
+
+ # It needs to have at least one successfull announce to be shared to other peers
+ tracker_storage.onTrackerSuccess("zero://%s:15441" % file_server.ip, 1.0)
+ assert peer.request("getTrackers")["trackers"] == ["zero://%s:15441" % file_server.ip]
+
diff --git a/plugins/AnnounceShare/Test/conftest.py b/plugins/AnnounceShare/Test/conftest.py
new file mode 100644
index 00000000..5abd4dd6
--- /dev/null
+++ b/plugins/AnnounceShare/Test/conftest.py
@@ -0,0 +1,3 @@
+from src.Test.conftest import *
+
+from Config import config
diff --git a/plugins/AnnounceShare/Test/pytest.ini b/plugins/AnnounceShare/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/AnnounceShare/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/AnnounceShare/__init__.py b/plugins/AnnounceShare/__init__.py
new file mode 100644
index 00000000..dc1e40bd
--- /dev/null
+++ b/plugins/AnnounceShare/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceSharePlugin
diff --git a/plugins/AnnounceZero/AnnounceZeroPlugin.py b/plugins/AnnounceZero/AnnounceZeroPlugin.py
new file mode 100644
index 00000000..dcaa04f0
--- /dev/null
+++ b/plugins/AnnounceZero/AnnounceZeroPlugin.py
@@ -0,0 +1,138 @@
+import time
+import itertools
+
+from Plugin import PluginManager
+from util import helper
+from Crypt import CryptRsa
+
+allow_reload = False # No source reload supported in this plugin
+time_full_announced = {} # Tracker address: Last announced all site to tracker
+connection_pool = {} # Tracker address: Peer object
+
+
+# We can only import plugin host clases after the plugins are loaded
+@PluginManager.afterLoad
+def importHostClasses():
+ global Peer, AnnounceError
+ from Peer import Peer
+ from Site.SiteAnnouncer import AnnounceError
+
+
+# Process result got back from tracker
+def processPeerRes(tracker_address, site, peers):
+ added = 0
+ # Ip4
+ found_ipv4 = 0
+ peers_normal = itertools.chain(peers.get("ip4", []), peers.get("ipv4", []), peers.get("ipv6", []))
+ for packed_address in peers_normal:
+ found_ipv4 += 1
+ peer_ip, peer_port = helper.unpackAddress(packed_address)
+ if site.addPeer(peer_ip, peer_port, source="tracker"):
+ added += 1
+ # Onion
+ found_onion = 0
+ for packed_address in peers["onion"]:
+ found_onion += 1
+ peer_onion, peer_port = helper.unpackOnionAddress(packed_address)
+ if site.addPeer(peer_onion, peer_port, source="tracker"):
+ added += 1
+
+ if added:
+ site.worker_manager.onPeers()
+ site.updateWebsocket(peers_added=added)
+ return added
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def getTrackerHandler(self, protocol):
+ if protocol == "zero":
+ return self.announceTrackerZero
+ else:
+ return super(SiteAnnouncerPlugin, self).getTrackerHandler(protocol)
+
+ def announceTrackerZero(self, tracker_address, mode="start", num_want=10):
+ global time_full_announced
+ s = time.time()
+
+ need_types = ["ip4"] # ip4 for backward compatibility reasons
+ need_types += self.site.connection_server.supported_ip_types
+ if self.site.connection_server.tor_manager.enabled:
+ need_types.append("onion")
+
+ if mode == "start" or mode == "more": # Single: Announce only this site
+ sites = [self.site]
+ full_announce = False
+ else: # Multi: Announce all currently serving site
+ full_announce = True
+ if time.time() - time_full_announced.get(tracker_address, 0) < 60 * 15: # No reannounce all sites within short time
+ return None
+ time_full_announced[tracker_address] = time.time()
+ from Site import SiteManager
+ sites = [site for site in SiteManager.site_manager.sites.values() if site.isServing()]
+
+ # Create request
+ add_types = self.getOpenedServiceTypes()
+ request = {
+ "hashes": [], "onions": [], "port": self.fileserver_port, "need_types": need_types, "need_num": 20, "add": add_types
+ }
+ for site in sites:
+ if "onion" in add_types:
+ onion = self.site.connection_server.tor_manager.getOnion(site.address)
+ request["onions"].append(onion)
+ request["hashes"].append(site.address_hash)
+
+ # Tracker can remove sites that we don't announce
+ if full_announce:
+ request["delete"] = True
+
+ # Sent request to tracker
+ tracker_peer = connection_pool.get(tracker_address) # Re-use tracker connection if possible
+ if not tracker_peer:
+ tracker_ip, tracker_port = tracker_address.rsplit(":", 1)
+ tracker_peer = Peer(str(tracker_ip), int(tracker_port), connection_server=self.site.connection_server)
+ tracker_peer.is_tracker_connection = True
+ connection_pool[tracker_address] = tracker_peer
+
+ res = tracker_peer.request("announce", request)
+
+ if not res or "peers" not in res:
+ if full_announce:
+ time_full_announced[tracker_address] = 0
+ raise AnnounceError("Invalid response: %s" % res)
+
+ # Add peers from response to site
+ site_index = 0
+ peers_added = 0
+ for site_res in res["peers"]:
+ site = sites[site_index]
+ peers_added += processPeerRes(tracker_address, site, site_res)
+ site_index += 1
+
+ # Check if we need to sign prove the onion addresses
+ if "onion_sign_this" in res:
+ self.site.log.debug("Signing %s for %s to add %s onions" % (res["onion_sign_this"], tracker_address, len(sites)))
+ request["onion_signs"] = {}
+ request["onion_sign_this"] = res["onion_sign_this"]
+ request["need_num"] = 0
+ for site in sites:
+ onion = self.site.connection_server.tor_manager.getOnion(site.address)
+ publickey = self.site.connection_server.tor_manager.getPublickey(onion)
+ if publickey not in request["onion_signs"]:
+ sign = CryptRsa.sign(res["onion_sign_this"].encode("utf8"), self.site.connection_server.tor_manager.getPrivatekey(onion))
+ request["onion_signs"][publickey] = sign
+ res = tracker_peer.request("announce", request)
+ if not res or "onion_sign_this" in res:
+ if full_announce:
+ time_full_announced[tracker_address] = 0
+ raise AnnounceError("Announce onion address to failed: %s" % res)
+
+ if full_announce:
+ tracker_peer.remove() # Close connection, we don't need it in next 5 minute
+
+ self.site.log.debug(
+ "Tracker announce result: zero://%s (sites: %s, new peers: %s, add: %s) in %.3fs" %
+ (tracker_address, site_index, peers_added, add_types, time.time() - s)
+ )
+
+ return True
diff --git a/plugins/AnnounceZero/__init__.py b/plugins/AnnounceZero/__init__.py
new file mode 100644
index 00000000..8aec5ddb
--- /dev/null
+++ b/plugins/AnnounceZero/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceZeroPlugin
\ No newline at end of file
diff --git a/plugins/Bigfile/BigfilePiecefield.py b/plugins/Bigfile/BigfilePiecefield.py
new file mode 100644
index 00000000..ee770573
--- /dev/null
+++ b/plugins/Bigfile/BigfilePiecefield.py
@@ -0,0 +1,164 @@
+import array
+
+
+def packPiecefield(data):
+ assert isinstance(data, bytes) or isinstance(data, bytearray)
+ res = []
+ if not data:
+ return array.array("H", b"")
+
+ if data[0] == b"\x00":
+ res.append(0)
+ find = b"\x01"
+ else:
+ find = b"\x00"
+ last_pos = 0
+ pos = 0
+ while 1:
+ pos = data.find(find, pos)
+ if find == b"\x00":
+ find = b"\x01"
+ else:
+ find = b"\x00"
+ if pos == -1:
+ res.append(len(data) - last_pos)
+ break
+ res.append(pos - last_pos)
+ last_pos = pos
+ return array.array("H", res)
+
+
+def unpackPiecefield(data):
+ if not data:
+ return b""
+
+ res = []
+ char = b"\x01"
+ for times in data:
+ if times > 10000:
+ return b""
+ res.append(char * times)
+ if char == b"\x01":
+ char = b"\x00"
+ else:
+ char = b"\x01"
+ return b"".join(res)
+
+
+def spliceBit(data, idx, bit):
+ assert bit == b"\x00" or bit == b"\x01"
+ if len(data) < idx:
+ data = data.ljust(idx + 1, b"\x00")
+ return data[:idx] + bit + data[idx+ 1:]
+
+class Piecefield(object):
+ def tostring(self):
+ return "".join(["1" if b else "0" for b in self.tobytes()])
+
+
+class BigfilePiecefield(Piecefield):
+ __slots__ = ["data"]
+
+ def __init__(self):
+ self.data = b""
+
+ def frombytes(self, s):
+ assert isinstance(s, bytes) or isinstance(s, bytearray)
+ self.data = s
+
+ def tobytes(self):
+ return self.data
+
+ def pack(self):
+ return packPiecefield(self.data).tobytes()
+
+ def unpack(self, s):
+ self.data = unpackPiecefield(array.array("H", s))
+
+ def __getitem__(self, key):
+ try:
+ return self.data[key]
+ except IndexError:
+ return False
+
+ def __setitem__(self, key, value):
+ self.data = spliceBit(self.data, key, value)
+
+class BigfilePiecefieldPacked(Piecefield):
+ __slots__ = ["data"]
+
+ def __init__(self):
+ self.data = b""
+
+ def frombytes(self, data):
+ assert isinstance(data, bytes) or isinstance(data, bytearray)
+ self.data = packPiecefield(data).tobytes()
+
+ def tobytes(self):
+ return unpackPiecefield(array.array("H", self.data))
+
+ def pack(self):
+ return array.array("H", self.data).tobytes()
+
+ def unpack(self, data):
+ self.data = data
+
+ def __getitem__(self, key):
+ try:
+ return self.tobytes()[key]
+ except IndexError:
+ return False
+
+ def __setitem__(self, key, value):
+ data = spliceBit(self.tobytes(), key, value)
+ self.frombytes(data)
+
+
+if __name__ == "__main__":
+ import os
+ import psutil
+ import time
+ testdata = b"\x01" * 100 + b"\x00" * 900 + b"\x01" * 4000 + b"\x00" * 4999 + b"\x01"
+ meminfo = psutil.Process(os.getpid()).memory_info
+
+ for storage in [BigfilePiecefieldPacked, BigfilePiecefield]:
+ print("-- Testing storage: %s --" % storage)
+ m = meminfo()[0]
+ s = time.time()
+ piecefields = {}
+ for i in range(10000):
+ piecefield = storage()
+ piecefield.frombytes(testdata[:i] + b"\x00" + testdata[i + 1:])
+ piecefields[i] = piecefield
+
+ print("Create x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data)))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ val = piecefield[1000]
+
+ print("Query one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ piecefield[1000] = b"\x01"
+
+ print("Change one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ packed = piecefield.pack()
+
+ print("Pack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(packed)))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ piecefield.unpack(packed)
+
+ print("Unpack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data)))
+
+ piecefields = {}
diff --git a/plugins/Bigfile/BigfilePlugin.py b/plugins/Bigfile/BigfilePlugin.py
new file mode 100644
index 00000000..03a0f44f
--- /dev/null
+++ b/plugins/Bigfile/BigfilePlugin.py
@@ -0,0 +1,784 @@
+import time
+import os
+import subprocess
+import shutil
+import collections
+import math
+import warnings
+import base64
+import binascii
+import json
+
+import gevent
+import gevent.lock
+
+from Plugin import PluginManager
+from Debug import Debug
+from Crypt import CryptHash
+with warnings.catch_warnings():
+ warnings.filterwarnings("ignore") # Ignore missing sha3 warning
+ import merkletools
+
+from util import helper
+from util import Msgpack
+import util
+from .BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
+
+
+# We can only import plugin host clases after the plugins are loaded
+@PluginManager.afterLoad
+def importPluginnedClasses():
+ global VerifyError, config
+ from Content.ContentManager import VerifyError
+ from Config import config
+
+if "upload_nonces" not in locals():
+ upload_nonces = {}
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ def isCorsAllowed(self, path):
+ if path == "/ZeroNet-Internal/BigfileUpload":
+ return True
+ else:
+ return super(UiRequestPlugin, self).isCorsAllowed(path)
+
+ @helper.encodeResponse
+ def actionBigfileUpload(self):
+ nonce = self.get.get("upload_nonce")
+ if nonce not in upload_nonces:
+ return self.error403("Upload nonce error.")
+
+ upload_info = upload_nonces[nonce]
+ del upload_nonces[nonce]
+
+ self.sendHeader(200, "text/html", noscript=True, extra_headers={
+ "Access-Control-Allow-Origin": "null",
+ "Access-Control-Allow-Credentials": "true"
+ })
+
+ self.readMultipartHeaders(self.env['wsgi.input']) # Skip http headers
+
+ site = upload_info["site"]
+ inner_path = upload_info["inner_path"]
+
+ with site.storage.open(inner_path, "wb", create_dirs=True) as out_file:
+ merkle_root, piece_size, piecemap_info = site.content_manager.hashBigfile(
+ self.env['wsgi.input'], upload_info["size"], upload_info["piece_size"], out_file
+ )
+
+ if len(piecemap_info["sha512_pieces"]) == 1: # Small file, don't split
+ hash = binascii.hexlify(piecemap_info["sha512_pieces"][0])
+ hash_id = site.content_manager.hashfield.getHashId(hash)
+ site.content_manager.optionalDownloaded(inner_path, hash_id, upload_info["size"], own=True)
+
+ else: # Big file
+ file_name = helper.getFilename(inner_path)
+ site.storage.open(upload_info["piecemap"], "wb").write(Msgpack.pack({file_name: piecemap_info}))
+
+ # Find piecemap and file relative path to content.json
+ file_info = site.content_manager.getFileInfo(inner_path, new_file=True)
+ content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
+ piecemap_relative_path = upload_info["piecemap"][len(content_inner_path_dir):]
+ file_relative_path = inner_path[len(content_inner_path_dir):]
+
+ # Add file to content.json
+ if site.storage.isFile(file_info["content_inner_path"]):
+ content = site.storage.loadJson(file_info["content_inner_path"])
+ else:
+ content = {}
+ if "files_optional" not in content:
+ content["files_optional"] = {}
+
+ content["files_optional"][file_relative_path] = {
+ "sha512": merkle_root,
+ "size": upload_info["size"],
+ "piecemap": piecemap_relative_path,
+ "piece_size": piece_size
+ }
+
+ merkle_root_hash_id = site.content_manager.hashfield.getHashId(merkle_root)
+ site.content_manager.optionalDownloaded(inner_path, merkle_root_hash_id, upload_info["size"], own=True)
+ site.storage.writeJson(file_info["content_inner_path"], content)
+
+ site.content_manager.contents.loadItem(file_info["content_inner_path"]) # reload cache
+
+ return json.dumps({
+ "merkle_root": merkle_root,
+ "piece_num": len(piecemap_info["sha512_pieces"]),
+ "piece_size": piece_size,
+ "inner_path": inner_path
+ })
+
+ def readMultipartHeaders(self, wsgi_input):
+ found = False
+ for i in range(100):
+ line = wsgi_input.readline()
+ if line == b"\r\n":
+ found = True
+ break
+ if not found:
+ raise Exception("No multipart header found")
+ return i
+
+ def actionFile(self, file_path, *args, **kwargs):
+ if kwargs.get("file_size", 0) > 1024 * 1024 and kwargs.get("path_parts"): # Only check files larger than 1MB
+ path_parts = kwargs["path_parts"]
+ site = self.server.site_manager.get(path_parts["address"])
+ big_file = site.storage.openBigfile(path_parts["inner_path"], prebuffer=2 * 1024 * 1024)
+ if big_file:
+ kwargs["file_obj"] = big_file
+ kwargs["file_size"] = big_file.size
+
+ return super(UiRequestPlugin, self).actionFile(file_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ def actionBigfileUploadInit(self, to, inner_path, size):
+ valid_signers = self.site.content_manager.getValidSigners(inner_path)
+ auth_address = self.user.getAuthAddress(self.site.address)
+ if not self.site.settings["own"] and auth_address not in valid_signers:
+ self.log.error("FileWrite forbidden %s not in valid_signers %s" % (auth_address, valid_signers))
+ return self.response(to, {"error": "Forbidden, you can only modify your own files"})
+
+ nonce = CryptHash.random()
+ piece_size = 1024 * 1024
+ inner_path = self.site.content_manager.sanitizePath(inner_path)
+ file_info = self.site.content_manager.getFileInfo(inner_path, new_file=True)
+
+ content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
+ file_relative_path = inner_path[len(content_inner_path_dir):]
+
+ upload_nonces[nonce] = {
+ "added": time.time(),
+ "site": self.site,
+ "inner_path": inner_path,
+ "websocket_client": self,
+ "size": size,
+ "piece_size": piece_size,
+ "piecemap": inner_path + ".piecemap.msgpack"
+ }
+ return {
+ "url": "/ZeroNet-Internal/BigfileUpload?upload_nonce=" + nonce,
+ "piece_size": piece_size,
+ "inner_path": inner_path,
+ "file_relative_path": file_relative_path
+ }
+
+ def actionSiteSetAutodownloadBigfileLimit(self, to, limit):
+ permissions = self.getPermissions(to)
+ if "ADMIN" not in permissions:
+ return self.response(to, "You don't have permission to run this command")
+
+ self.site.settings["autodownload_bigfile_size_limit"] = int(limit)
+ self.response(to, "ok")
+
+ def actionFileDelete(self, to, inner_path):
+ piecemap_inner_path = inner_path + ".piecemap.msgpack"
+ if self.hasFilePermission(inner_path) and self.site.storage.isFile(piecemap_inner_path):
+ # Also delete .piecemap.msgpack file if exists
+ self.log.debug("Deleting piecemap: %s" % piecemap_inner_path)
+ file_info = self.site.content_manager.getFileInfo(piecemap_inner_path)
+ if file_info:
+ content_json = self.site.storage.loadJson(file_info["content_inner_path"])
+ relative_path = file_info["relative_path"]
+ if relative_path in content_json.get("files_optional", {}):
+ del content_json["files_optional"][relative_path]
+ self.site.storage.writeJson(file_info["content_inner_path"], content_json)
+ self.site.content_manager.loadContent(file_info["content_inner_path"], add_bad_files=False, force=True)
+ try:
+ self.site.storage.delete(piecemap_inner_path)
+ except Exception as err:
+ self.log.error("File %s delete error: %s" % (piecemap_inner_path, err))
+
+ return super(UiWebsocketPlugin, self).actionFileDelete(to, inner_path)
+
+
+@PluginManager.registerTo("ContentManager")
+class ContentManagerPlugin(object):
+ def getFileInfo(self, inner_path, *args, **kwargs):
+ if "|" not in inner_path:
+ return super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_info = super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
+ return file_info
+
+ def readFile(self, file_in, size, buff_size=1024 * 64):
+ part_num = 0
+ recv_left = size
+
+ while 1:
+ part_num += 1
+ read_size = min(buff_size, recv_left)
+ part = file_in.read(read_size)
+
+ if not part:
+ break
+ yield part
+
+ if part_num % 100 == 0: # Avoid blocking ZeroNet execution during upload
+ time.sleep(0.001)
+
+ recv_left -= read_size
+ if recv_left <= 0:
+ break
+
+ def hashBigfile(self, file_in, size, piece_size=1024 * 1024, file_out=None):
+ self.site.settings["has_bigfile"] = True
+
+ recv = 0
+ try:
+ piece_hash = CryptHash.sha512t()
+ piece_hashes = []
+ piece_recv = 0
+
+ mt = merkletools.MerkleTools()
+ mt.hash_function = CryptHash.sha512t
+
+ part = ""
+ for part in self.readFile(file_in, size):
+ if file_out:
+ file_out.write(part)
+
+ recv += len(part)
+ piece_recv += len(part)
+ piece_hash.update(part)
+ if piece_recv >= piece_size:
+ piece_digest = piece_hash.digest()
+ piece_hashes.append(piece_digest)
+ mt.leaves.append(piece_digest)
+ piece_hash = CryptHash.sha512t()
+ piece_recv = 0
+
+ if len(piece_hashes) % 100 == 0 or recv == size:
+ self.log.info("- [HASHING:%.0f%%] Pieces: %s, %.1fMB/%.1fMB" % (
+ float(recv) / size * 100, len(piece_hashes), recv / 1024 / 1024, size / 1024 / 1024
+ ))
+ part = ""
+ if len(part) > 0:
+ piece_digest = piece_hash.digest()
+ piece_hashes.append(piece_digest)
+ mt.leaves.append(piece_digest)
+ except Exception as err:
+ raise err
+ finally:
+ if file_out:
+ file_out.close()
+
+ mt.make_tree()
+ merkle_root = mt.get_merkle_root()
+ if type(merkle_root) is bytes: # Python <3.5
+ merkle_root = merkle_root.decode()
+ return merkle_root, piece_size, {
+ "sha512_pieces": piece_hashes
+ }
+
+ def hashFile(self, dir_inner_path, file_relative_path, optional=False):
+ inner_path = dir_inner_path + file_relative_path
+
+ file_size = self.site.storage.getSize(inner_path)
+ # Only care about optional files >1MB
+ if not optional or file_size < 1 * 1024 * 1024:
+ return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
+
+ back = {}
+ content = self.contents.get(dir_inner_path + "content.json")
+
+ hash = None
+ piecemap_relative_path = None
+ piece_size = None
+
+ # Don't re-hash if it's already in content.json
+ if content and file_relative_path in content.get("files_optional", {}):
+ file_node = content["files_optional"][file_relative_path]
+ if file_node["size"] == file_size:
+ self.log.info("- [SAME SIZE] %s" % file_relative_path)
+ hash = file_node.get("sha512")
+ piecemap_relative_path = file_node.get("piecemap")
+ piece_size = file_node.get("piece_size")
+
+ if not hash or not piecemap_relative_path: # Not in content.json yet
+ if file_size < 5 * 1024 * 1024: # Don't create piecemap automatically for files smaller than 5MB
+ return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
+
+ self.log.info("- [HASHING] %s" % file_relative_path)
+ merkle_root, piece_size, piecemap_info = self.hashBigfile(self.site.storage.open(inner_path, "rb"), file_size)
+ if not hash:
+ hash = merkle_root
+
+ if not piecemap_relative_path:
+ file_name = helper.getFilename(file_relative_path)
+ piecemap_relative_path = file_relative_path + ".piecemap.msgpack"
+ piecemap_inner_path = inner_path + ".piecemap.msgpack"
+
+ self.site.storage.open(piecemap_inner_path, "wb").write(Msgpack.pack({file_name: piecemap_info}))
+
+ back.update(super(ContentManagerPlugin, self).hashFile(dir_inner_path, piecemap_relative_path, optional=True))
+
+ piece_num = int(math.ceil(float(file_size) / piece_size))
+
+ # Add the merkle root to hashfield
+ hash_id = self.site.content_manager.hashfield.getHashId(hash)
+ self.optionalDownloaded(inner_path, hash_id, file_size, own=True)
+ self.site.storage.piecefields[hash].frombytes(b"\x01" * piece_num)
+
+ back[file_relative_path] = {"sha512": hash, "size": file_size, "piecemap": piecemap_relative_path, "piece_size": piece_size}
+ return back
+
+ def getPiecemap(self, inner_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
+ self.site.needFile(piecemap_inner_path, priority=20)
+ piecemap = Msgpack.unpack(self.site.storage.open(piecemap_inner_path, "rb").read())[helper.getFilename(inner_path)]
+ piecemap["piece_size"] = file_info["piece_size"]
+ return piecemap
+
+ def verifyPiece(self, inner_path, pos, piece):
+ piecemap = self.getPiecemap(inner_path)
+ piece_i = int(pos / piecemap["piece_size"])
+ if CryptHash.sha512sum(piece, format="digest") != piecemap["sha512_pieces"][piece_i]:
+ raise VerifyError("Invalid hash")
+ return True
+
+ def verifyFile(self, inner_path, file, ignore_same=True):
+ if "|" not in inner_path:
+ return super(ContentManagerPlugin, self).verifyFile(inner_path, file, ignore_same)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+
+ return self.verifyPiece(inner_path, pos_from, file)
+
+ def optionalDownloaded(self, inner_path, hash_id, size=None, own=False):
+ if "|" in inner_path:
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_info = self.getFileInfo(inner_path)
+
+ # Mark piece downloaded
+ piece_i = int(pos_from / file_info["piece_size"])
+ self.site.storage.piecefields[file_info["sha512"]][piece_i] = b"\x01"
+
+ # Only add to site size on first request
+ if hash_id in self.hashfield:
+ size = 0
+ elif size > 1024 * 1024:
+ file_info = self.getFileInfo(inner_path)
+ if file_info and "sha512" in file_info: # We already have the file, but not in piecefield
+ sha512 = file_info["sha512"]
+ if sha512 not in self.site.storage.piecefields:
+ self.site.storage.checkBigfile(inner_path)
+
+ return super(ContentManagerPlugin, self).optionalDownloaded(inner_path, hash_id, size, own)
+
+ def optionalRemoved(self, inner_path, hash_id, size=None):
+ if size and size > 1024 * 1024:
+ file_info = self.getFileInfo(inner_path)
+ sha512 = file_info["sha512"]
+ if sha512 in self.site.storage.piecefields:
+ del self.site.storage.piecefields[sha512]
+
+ # Also remove other pieces of the file from download queue
+ for key in list(self.site.bad_files.keys()):
+ if key.startswith(inner_path + "|"):
+ del self.site.bad_files[key]
+ self.site.worker_manager.removeSolvedFileTasks()
+ return super(ContentManagerPlugin, self).optionalRemoved(inner_path, hash_id, size)
+
+
+@PluginManager.registerTo("SiteStorage")
+class SiteStoragePlugin(object):
+ def __init__(self, *args, **kwargs):
+ super(SiteStoragePlugin, self).__init__(*args, **kwargs)
+ self.piecefields = collections.defaultdict(BigfilePiecefield)
+ if "piecefields" in self.site.settings.get("cache", {}):
+ for sha512, piecefield_packed in self.site.settings["cache"].get("piecefields").items():
+ if piecefield_packed:
+ self.piecefields[sha512].unpack(base64.b64decode(piecefield_packed))
+ self.site.settings["cache"]["piecefields"] = {}
+
+ def createSparseFile(self, inner_path, size, sha512=None):
+ file_path = self.getPath(inner_path)
+
+ file_dir = os.path.dirname(file_path)
+ if not os.path.isdir(file_dir):
+ os.makedirs(file_dir)
+
+ f = open(file_path, 'wb')
+ f.truncate(min(1024 * 1024 * 5, size)) # Only pre-allocate up to 5MB
+ f.close()
+ if os.name == "nt":
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ subprocess.call(["fsutil", "sparse", "setflag", file_path], close_fds=True, startupinfo=startupinfo)
+
+ if sha512 and sha512 in self.piecefields:
+ self.log.debug("%s: File not exists, but has piecefield. Deleting piecefield." % inner_path)
+ del self.piecefields[sha512]
+
+ def write(self, inner_path, content):
+ if "|" not in inner_path:
+ return super(SiteStoragePlugin, self).write(inner_path, content)
+
+ # Write to specific position by passing |{pos} after the filename
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_path = self.getPath(inner_path)
+
+ # Create dir if not exist
+ file_dir = os.path.dirname(file_path)
+ if not os.path.isdir(file_dir):
+ os.makedirs(file_dir)
+
+ if not os.path.isfile(file_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ self.createSparseFile(inner_path, file_info["size"])
+
+ # Write file
+ with open(file_path, "rb+") as file:
+ file.seek(pos_from)
+ if hasattr(content, 'read'): # File-like object
+ shutil.copyfileobj(content, file) # Write buff to disk
+ else: # Simple string
+ file.write(content)
+ del content
+ self.onUpdated(inner_path)
+
+ def checkBigfile(self, inner_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ if not file_info or (file_info and "piecemap" not in file_info): # It's not a big file
+ return False
+
+ self.site.settings["has_bigfile"] = True
+ file_path = self.getPath(inner_path)
+ sha512 = file_info["sha512"]
+ piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
+ if os.path.isfile(file_path):
+ if sha512 not in self.piecefields:
+ if open(file_path, "rb").read(128) == b"\0" * 128:
+ piece_data = b"\x00"
+ else:
+ piece_data = b"\x01"
+ self.log.debug("%s: File exists, but not in piecefield. Filling piecefiled with %s * %s." % (inner_path, piece_num, piece_data))
+ self.piecefields[sha512].frombytes(piece_data * piece_num)
+ else:
+ self.log.debug("Creating bigfile: %s" % inner_path)
+ self.createSparseFile(inner_path, file_info["size"], sha512)
+ self.piecefields[sha512].frombytes(b"\x00" * piece_num)
+ self.log.debug("Created bigfile: %s" % inner_path)
+ return True
+
+ def openBigfile(self, inner_path, prebuffer=0):
+ if not self.checkBigfile(inner_path):
+ return False
+ self.site.needFile(inner_path, blocking=False) # Download piecemap
+ return BigFile(self.site, inner_path, prebuffer=prebuffer)
+
+
+class BigFile(object):
+ def __init__(self, site, inner_path, prebuffer=0):
+ self.site = site
+ self.inner_path = inner_path
+ file_path = site.storage.getPath(inner_path)
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ self.piece_size = file_info["piece_size"]
+ self.sha512 = file_info["sha512"]
+ self.size = file_info["size"]
+ self.prebuffer = prebuffer
+ self.read_bytes = 0
+
+ self.piecefield = self.site.storage.piecefields[self.sha512]
+ self.f = open(file_path, "rb+")
+ self.read_lock = gevent.lock.Semaphore()
+
+ def read(self, buff=64 * 1024):
+ with self.read_lock:
+ pos = self.f.tell()
+ read_until = min(self.size, pos + buff)
+ requests = []
+ # Request all required blocks
+ while 1:
+ piece_i = int(pos / self.piece_size)
+ if piece_i * self.piece_size >= read_until:
+ break
+ pos_from = piece_i * self.piece_size
+ pos_to = pos_from + self.piece_size
+ if not self.piecefield[piece_i]:
+ requests.append(self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=10))
+ pos += self.piece_size
+
+ if not all(requests):
+ return None
+
+ # Request prebuffer
+ if self.prebuffer:
+ prebuffer_until = min(self.size, read_until + self.prebuffer)
+ priority = 3
+ while 1:
+ piece_i = int(pos / self.piece_size)
+ if piece_i * self.piece_size >= prebuffer_until:
+ break
+ pos_from = piece_i * self.piece_size
+ pos_to = pos_from + self.piece_size
+ if not self.piecefield[piece_i]:
+ self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=max(0, priority))
+ priority -= 1
+ pos += self.piece_size
+
+ gevent.joinall(requests)
+ self.read_bytes += buff
+
+ # Increase buffer for long reads
+ if self.read_bytes > 7 * 1024 * 1024 and self.prebuffer < 5 * 1024 * 1024:
+ self.site.log.debug("%s: Increasing bigfile buffer size to 5MB..." % self.inner_path)
+ self.prebuffer = 5 * 1024 * 1024
+
+ return self.f.read(buff)
+
+ def seek(self, pos, whence=0):
+ with self.read_lock:
+ if whence == 2: # Relative from file end
+ pos = self.size + pos # Use the real size instead of size on the disk
+ whence = 0
+ return self.f.seek(pos, whence)
+
+ def tell(self):
+ return self.f.tell()
+
+ def close(self):
+ self.f.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+
+@PluginManager.registerTo("WorkerManager")
+class WorkerManagerPlugin(object):
+ def addTask(self, inner_path, *args, **kwargs):
+ file_info = kwargs.get("file_info")
+ if file_info and "piecemap" in file_info: # Bigfile
+ self.site.settings["has_bigfile"] = True
+
+ piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
+ piecemap_task = None
+ if not self.site.storage.isFile(piecemap_inner_path):
+ # Start download piecemap
+ piecemap_task = super(WorkerManagerPlugin, self).addTask(piecemap_inner_path, priority=30)
+ autodownload_bigfile_size_limit = self.site.settings.get("autodownload_bigfile_size_limit", config.autodownload_bigfile_size_limit)
+ if "|" not in inner_path and self.site.isDownloadable(inner_path) and file_info["size"] / 1024 / 1024 <= autodownload_bigfile_size_limit:
+ gevent.spawn_later(0.1, self.site.needFile, inner_path + "|all") # Download all pieces
+
+ if "|" in inner_path:
+ # Start download piece
+ task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ task["piece_i"] = int(pos_from / file_info["piece_size"])
+ task["sha512"] = file_info["sha512"]
+ else:
+ if inner_path in self.site.bad_files:
+ del self.site.bad_files[inner_path]
+ if piecemap_task:
+ task = piecemap_task
+ else:
+ fake_evt = gevent.event.AsyncResult() # Don't download anything if no range specified
+ fake_evt.set(True)
+ task = {"evt": fake_evt}
+
+ if not self.site.storage.isFile(inner_path):
+ self.site.storage.createSparseFile(inner_path, file_info["size"], file_info["sha512"])
+ piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
+ self.site.storage.piecefields[file_info["sha512"]].frombytes(b"\x00" * piece_num)
+ else:
+ task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
+ return task
+
+ def taskAddPeer(self, task, peer):
+ if "piece_i" in task:
+ if not peer.piecefields[task["sha512"]][task["piece_i"]]:
+ if task["sha512"] not in peer.piecefields:
+ gevent.spawn(peer.updatePiecefields, force=True)
+ elif not task["peers"]:
+ gevent.spawn(peer.updatePiecefields)
+
+ return False # Deny to add peers to task if file not in piecefield
+ return super(WorkerManagerPlugin, self).taskAddPeer(task, peer)
+
+
+@PluginManager.registerTo("FileRequest")
+class FileRequestPlugin(object):
+ def isReadable(self, site, inner_path, file, pos):
+ # Peek into file
+ if file.read(10) == b"\0" * 10:
+ # Looks empty, but makes sures we don't have that piece
+ file_info = site.content_manager.getFileInfo(inner_path)
+ if "piece_size" in file_info:
+ piece_i = int(pos / file_info["piece_size"])
+ if not site.storage.piecefields[file_info["sha512"]][piece_i]:
+ return False
+ # Seek back to position we want to read
+ file.seek(pos)
+ return super(FileRequestPlugin, self).isReadable(site, inner_path, file, pos)
+
+ def actionGetPiecefields(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ return False
+
+ # Add peer to site if not added before
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True)
+ if not peer.connection: # Just added
+ peer.connect(self.connection) # Assign current connection to peer
+
+ piecefields_packed = {sha512: piecefield.pack() for sha512, piecefield in site.storage.piecefields.items()}
+ self.response({"piecefields_packed": piecefields_packed})
+
+ def actionSetPiecefields(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ # Add or get peer
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection)
+ if not peer.connection:
+ peer.connect(self.connection)
+
+ peer.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ for sha512, piecefield_packed in params["piecefields_packed"].items():
+ peer.piecefields[sha512].unpack(piecefield_packed)
+ site.settings["has_bigfile"] = True
+
+ self.response({"ok": "Updated"})
+
+
+@PluginManager.registerTo("Peer")
+class PeerPlugin(object):
+ def __getattr__(self, key):
+ if key == "piecefields":
+ self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ return self.piecefields
+ elif key == "time_piecefields_updated":
+ self.time_piecefields_updated = None
+ return self.time_piecefields_updated
+ else:
+ return super(PeerPlugin, self).__getattr__(key)
+
+ @util.Noparallel(ignore_args=True)
+ def updatePiecefields(self, force=False):
+ if self.connection and self.connection.handshake.get("rev", 0) < 2190:
+ return False # Not supported
+
+ # Don't update piecefield again in 1 min
+ if self.time_piecefields_updated and time.time() - self.time_piecefields_updated < 60 and not force:
+ return False
+
+ self.time_piecefields_updated = time.time()
+ res = self.request("getPiecefields", {"site": self.site.address})
+ if not res or "error" in res:
+ return False
+
+ self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ try:
+ for sha512, piecefield_packed in res["piecefields_packed"].items():
+ self.piecefields[sha512].unpack(piecefield_packed)
+ except Exception as err:
+ self.log("Invalid updatePiecefields response: %s" % Debug.formatException(err))
+
+ return self.piecefields
+
+ def sendMyHashfield(self, *args, **kwargs):
+ return super(PeerPlugin, self).sendMyHashfield(*args, **kwargs)
+
+ def updateHashfield(self, *args, **kwargs):
+ if self.site.settings.get("has_bigfile"):
+ thread = gevent.spawn(self.updatePiecefields, *args, **kwargs)
+ back = super(PeerPlugin, self).updateHashfield(*args, **kwargs)
+ thread.join()
+ return back
+ else:
+ return super(PeerPlugin, self).updateHashfield(*args, **kwargs)
+
+ def getFile(self, site, inner_path, *args, **kwargs):
+ if "|" in inner_path:
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ kwargs["pos_from"] = pos_from
+ kwargs["pos_to"] = pos_to
+ return super(PeerPlugin, self).getFile(site, inner_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("Site")
+class SitePlugin(object):
+ def isFileDownloadAllowed(self, inner_path, file_info):
+ if "piecemap" in file_info:
+ file_size_mb = file_info["size"] / 1024 / 1024
+ if config.bigfile_size_limit and file_size_mb > config.bigfile_size_limit:
+ self.log.debug(
+ "Bigfile size %s too large: %sMB > %sMB, skipping..." %
+ (inner_path, file_size_mb, config.bigfile_size_limit)
+ )
+ return False
+
+ file_info = file_info.copy()
+ file_info["size"] = file_info["piece_size"]
+ return super(SitePlugin, self).isFileDownloadAllowed(inner_path, file_info)
+
+ def getSettingsCache(self):
+ back = super(SitePlugin, self).getSettingsCache()
+ if self.storage.piecefields:
+ back["piecefields"] = {sha512: base64.b64encode(piecefield.pack()).decode("utf8") for sha512, piecefield in self.storage.piecefields.items()}
+ return back
+
+ def needFile(self, inner_path, *args, **kwargs):
+ if inner_path.endswith("|all"):
+ @util.Pooled(20)
+ def pooledNeedBigfile(inner_path, *args, **kwargs):
+ if inner_path not in self.bad_files:
+ self.log.debug("Cancelled piece, skipping %s" % inner_path)
+ return False
+ return self.needFile(inner_path, *args, **kwargs)
+
+ inner_path = inner_path.replace("|all", "")
+ file_info = self.needFileInfo(inner_path)
+ file_size = file_info["size"]
+ piece_size = file_info["piece_size"]
+
+ piece_num = int(math.ceil(float(file_size) / piece_size))
+
+ file_threads = []
+
+ piecefield = self.storage.piecefields.get(file_info["sha512"])
+
+ for piece_i in range(piece_num):
+ piece_from = piece_i * piece_size
+ piece_to = min(file_size, piece_from + piece_size)
+ if not piecefield or not piecefield[piece_i]:
+ inner_path_piece = "%s|%s-%s" % (inner_path, piece_from, piece_to)
+ self.bad_files[inner_path_piece] = self.bad_files.get(inner_path_piece, 1)
+ res = pooledNeedBigfile(inner_path_piece, blocking=False)
+ if res is not True and res is not False:
+ file_threads.append(res)
+ gevent.joinall(file_threads)
+ else:
+ return super(SitePlugin, self).needFile(inner_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ group = self.parser.add_argument_group("Bigfile plugin")
+ group.add_argument('--autodownload_bigfile_size_limit', help='Also download bigfiles smaller than this limit if help distribute option is checked', default=1, metavar="MB", type=int)
+ group.add_argument('--bigfile_size_limit', help='Maximum size of downloaded big files', default=False, metavar="MB", type=int)
+
+ return super(ConfigPlugin, self).createArguments()
diff --git a/plugins/Bigfile/Test/TestBigfile.py b/plugins/Bigfile/Test/TestBigfile.py
new file mode 100644
index 00000000..7d112860
--- /dev/null
+++ b/plugins/Bigfile/Test/TestBigfile.py
@@ -0,0 +1,574 @@
+import time
+import io
+import binascii
+
+import pytest
+import mock
+
+from Connection import ConnectionServer
+from Content.ContentManager import VerifyError
+from File import FileServer
+from File import FileRequest
+from Worker import WorkerManager
+from Peer import Peer
+from Bigfile import BigfilePiecefield, BigfilePiecefieldPacked
+from Test import Spy
+from util import Msgpack
+
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestBigfile:
+ privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
+ piece_size = 1024 * 1024
+
+ def createBigfile(self, site, inner_path="data/optional.any.iso", pieces=10):
+ f = site.storage.open(inner_path, "w")
+ for i in range(pieces * 100):
+ f.write(("Test%s" % i).ljust(10, "-") * 1000)
+ f.close()
+ assert site.content_manager.sign("content.json", self.privatekey)
+ return inner_path
+
+ def testPiecemapCreate(self, site):
+ inner_path = self.createBigfile(site)
+ content = site.storage.loadJson("content.json")
+ assert "data/optional.any.iso" in content["files_optional"]
+ file_node = content["files_optional"][inner_path]
+ assert file_node["size"] == 10 * 1000 * 1000
+ assert file_node["sha512"] == "47a72cde3be80b4a829e7674f72b7c6878cf6a70b0c58c6aa6c17d7e9948daf6"
+ assert file_node["piecemap"] == inner_path + ".piecemap.msgpack"
+
+ piecemap = Msgpack.unpack(site.storage.open(file_node["piecemap"], "rb").read())["optional.any.iso"]
+ assert len(piecemap["sha512_pieces"]) == 10
+ assert piecemap["sha512_pieces"][0] != piecemap["sha512_pieces"][1]
+ assert binascii.hexlify(piecemap["sha512_pieces"][0]) == b"a73abad9992b3d0b672d0c2a292046695d31bebdcb1e150c8410bbe7c972eff3"
+
+ def testVerifyPiece(self, site):
+ inner_path = self.createBigfile(site)
+
+ # Verify all 10 piece
+ f = site.storage.open(inner_path, "rb")
+ for i in range(10):
+ piece = io.BytesIO(f.read(1024 * 1024))
+ piece.seek(0)
+ site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
+ f.close()
+
+ # Try to verify piece 0 with piece 1 hash
+ with pytest.raises(VerifyError) as err:
+ i = 1
+ f = site.storage.open(inner_path, "rb")
+ piece = io.BytesIO(f.read(1024 * 1024))
+ f.close()
+ site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
+ assert "Invalid hash" in str(err)
+
+ def testSparseFile(self, site):
+ inner_path = "sparsefile"
+
+ # Create a 100MB sparse file
+ site.storage.createSparseFile(inner_path, 100 * 1024 * 1024)
+
+ # Write to file beginning
+ s = time.time()
+ f = site.storage.write("%s|%s-%s" % (inner_path, 0, 1024 * 1024), b"hellostart" * 1024)
+ time_write_start = time.time() - s
+
+ # Write to file end
+ s = time.time()
+ f = site.storage.write("%s|%s-%s" % (inner_path, 99 * 1024 * 1024, 99 * 1024 * 1024 + 1024 * 1024), b"helloend" * 1024)
+ time_write_end = time.time() - s
+
+ # Verify writes
+ f = site.storage.open(inner_path)
+ assert f.read(10) == b"hellostart"
+ f.seek(99 * 1024 * 1024)
+ assert f.read(8) == b"helloend"
+ f.close()
+
+ site.storage.delete(inner_path)
+
+ # Writing to end shold not take much longer, than writing to start
+ assert time_write_end <= max(0.1, time_write_start * 1.1)
+
+ def testRangedFileRequest(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ file_server.sites[site.address] = site
+ client = FileServer(file_server.ip, 1545)
+ client.sites[site_temp.address] = site_temp
+ site_temp.connection_server = client
+ connection = client.getConnection(file_server.ip, 1544)
+
+ # Add file_server as peer to client
+ peer_file_server = site_temp.addPeer(file_server.ip, 1544)
+
+ buff = peer_file_server.getFile(site_temp.address, "%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+
+ assert len(buff.getvalue()) == 1 * 1024 * 1024 # Correct block size
+ assert buff.getvalue().startswith(b"Test524") # Correct data
+ buff.seek(0)
+ assert site.content_manager.verifyPiece(inner_path, 5 * 1024 * 1024, buff) # Correct hash
+
+ connection.close()
+ client.stop()
+
+ def testRangedFileDownload(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Make sure the file and the piecemap in the optional hashfield
+ file_info = site.content_manager.getFileInfo(inner_path)
+ assert site.content_manager.hashfield.hasHash(file_info["sha512"])
+
+ piecemap_hash = site.content_manager.getFileInfo(file_info["piecemap"])["sha512"]
+ assert site.content_manager.hashfield.hasHash(piecemap_hash)
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ peer_client = site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ bad_files = site_temp.storage.verifyFiles(quick_check=True)["bad_files"]
+ assert not bad_files
+
+ # client_piecefield = peer_client.piecefields[file_info["sha512"]].tostring()
+ # assert client_piecefield == "1" * 10
+
+ # Download 5. and 10. block
+
+ site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+ site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
+
+ # Verify 0. block not downloaded
+ f = site_temp.storage.open(inner_path)
+ assert f.read(10) == b"\0" * 10
+ # Verify 5. and 10. block downloaded
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+ f.seek(9 * 1024 * 1024)
+ assert f.read(7) == b"943---T"
+
+ # Verify hashfield
+ assert set(site_temp.content_manager.hashfield) == set([18343, 43727]) # 18343: data/optional.any.iso, 43727: data/optional.any.iso.hashmap.msgpack
+
+ def testOpenBigfile(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+
+ f.seek(9 * 1024 * 1024)
+ assert f.read(7) == b"943---T"
+
+ assert len(requests) == 4 # 1x peicemap + 1x getpiecefield + 2x for pieces
+
+ assert set(site_temp.content_manager.hashfield) == set([18343, 43727])
+
+ assert site_temp.storage.piecefields[f.sha512].tostring() == "0000010001"
+ assert f.sha512 in site_temp.getSettingsCache()["piecefields"]
+
+ # Test requesting already downloaded
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+
+ assert len(requests) == 0
+
+ # Test requesting multi-block overflow reads
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024) # We already have this block
+ data = f.read(1024 * 1024 * 3) # Our read overflow to 6. and 7. block
+ assert data.startswith(b"Test524")
+ assert data.endswith(b"Test838-")
+ assert b"\0" not in data # No null bytes allowed
+
+ assert len(requests) == 2 # Two block download
+
+ # Test out of range request
+ f.seek(5 * 1024 * 1024)
+ data = f.read(1024 * 1024 * 30)
+ assert len(data) == 10 * 1000 * 1000 - (5 * 1024 * 1024)
+
+ f.seek(30 * 1024 * 1024)
+ data = f.read(1024 * 1024 * 30)
+ assert len(data) == 0
+
+ @pytest.mark.parametrize("piecefield_obj", [BigfilePiecefield, BigfilePiecefieldPacked])
+ def testPiecefield(self, piecefield_obj, site):
+ testdatas = [
+ b"\x01" * 100 + b"\x00" * 900 + b"\x01" * 4000 + b"\x00" * 4999 + b"\x01",
+ b"\x00\x01\x00\x01\x00\x01" * 10 + b"\x00\x01" * 90 + b"\x01\x00" * 400 + b"\x00" * 4999,
+ b"\x01" * 10000,
+ b"\x00" * 10000
+ ]
+ for testdata in testdatas:
+ piecefield = piecefield_obj()
+
+ piecefield.frombytes(testdata)
+ assert piecefield.tobytes() == testdata
+ assert piecefield[0] == testdata[0]
+ assert piecefield[100] == testdata[100]
+ assert piecefield[1000] == testdata[1000]
+ assert piecefield[len(testdata) - 1] == testdata[len(testdata) - 1]
+
+ packed = piecefield.pack()
+ piecefield_new = piecefield_obj()
+ piecefield_new.unpack(packed)
+ assert piecefield.tobytes() == piecefield_new.tobytes()
+ assert piecefield_new.tobytes() == testdata
+
+ def testFileGet(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Download second block
+ with site_temp.storage.openBigfile(inner_path) as f:
+ f.seek(1024 * 1024)
+ assert f.read(1024)[0:1] != b"\0"
+
+ # Make sure first block not download
+ with site_temp.storage.open(inner_path) as f:
+ assert f.read(1024)[0:1] == b"\0"
+
+ peer2 = site.addPeer(file_server.ip, 1545, return_peer=True)
+
+ # Should drop error on first block request
+ assert not peer2.getFile(site.address, "%s|0-%s" % (inner_path, 1024 * 1024 * 1))
+
+ # Should not drop error for second block request
+ assert peer2.getFile(site.address, "%s|%s-%s" % (inner_path, 1024 * 1024 * 1, 1024 * 1024 * 2))
+
+ def benchmarkPeerMemory(self, site, file_server):
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ import psutil, os
+ meminfo = psutil.Process(os.getpid()).memory_info
+
+ mem_s = meminfo()[0]
+ s = time.time()
+ for i in range(25000):
+ site.addPeer(file_server.ip, i)
+ print("%.3fs MEM: + %sKB" % (time.time() - s, (meminfo()[0] - mem_s) / 1024)) # 0.082s MEM: + 6800KB
+ print(list(site.peers.values())[0].piecefields)
+
+ def testUpdatePiecefield(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+
+ # Add file_server as peer to client
+ server2_peer1 = site_temp.addPeer(file_server.ip, 1544)
+
+ # Testing piecefield sync
+ assert len(server2_peer1.piecefields) == 0
+ assert server2_peer1.updatePiecefields() # Query piecefields from peer
+ assert len(server2_peer1.piecefields) > 0
+
+ def testWorkerManagerPiecefieldDeny(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+
+ # Add file_server as peer to client
+ server2_peer1 = site_temp.addPeer(file_server.ip, 1544) # Working
+
+ site_temp.downloadContent("content.json", download_files=False)
+ site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
+
+ # Add fake peers with optional files downloaded
+ for i in range(5):
+ fake_peer = site_temp.addPeer("127.0.1.%s" % i, 1544)
+ fake_peer.hashfield = site.content_manager.hashfield
+ fake_peer.has_hashfield = True
+
+ with Spy.Spy(WorkerManager, "addWorker") as requests:
+ site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+ site_temp.needFile("%s|%s-%s" % (inner_path, 6 * 1024 * 1024, 7 * 1024 * 1024))
+
+ # It should only request parts from peer1 as the other peers does not have the requested parts in piecefields
+ assert len([request[1] for request in requests if request[1] != server2_peer1]) == 0
+
+ def testWorkerManagerPiecefieldDownload(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+ sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
+
+ # Create 10 fake peer for each piece
+ for i in range(10):
+ peer = Peer(file_server.ip, 1544, site_temp, server2)
+ peer.piecefields[sha512][i] = b"\x01"
+ peer.updateHashfield = mock.MagicMock(return_value=False)
+ peer.updatePiecefields = mock.MagicMock(return_value=False)
+ peer.findHashIds = mock.MagicMock(return_value={"nope": []})
+ peer.hashfield = site.content_manager.hashfield
+ peer.has_hashfield = True
+ peer.key = "Peer:%s" % i
+ site_temp.peers["Peer:%s" % i] = peer
+
+ site_temp.downloadContent("content.json", download_files=False)
+ site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
+
+ with Spy.Spy(Peer, "getFile") as requests:
+ for i in range(10):
+ site_temp.needFile("%s|%s-%s" % (inner_path, i * 1024 * 1024, (i + 1) * 1024 * 1024))
+
+ assert len(requests) == 10
+ for i in range(10):
+ assert requests[i][0] == site_temp.peers["Peer:%s" % i] # Every part should be requested from piece owner peer
+
+ def testDownloadStats(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ # Check size before downloads
+ assert site_temp.settings["size"] < 10 * 1024 * 1024
+ assert site_temp.settings["optional_downloaded"] == 0
+ size_piecemap = site_temp.content_manager.getFileInfo(inner_path + ".piecemap.msgpack")["size"]
+ size_bigfile = site_temp.content_manager.getFileInfo(inner_path)["size"]
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ assert b"\0" not in f.read(1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ # Don't count twice
+ assert b"\0" not in f.read(1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ # Add second block
+ assert b"\0" not in f.read(1024 * 1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ def testPrebuffer(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with site_temp.storage.openBigfile(inner_path, prebuffer=1024 * 1024 * 2) as f:
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+ # assert len(requests) == 3 # 1x piecemap + 1x getpiecefield + 1x for pieces
+ assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 2
+
+ time.sleep(0.5) # Wait prebuffer download
+
+ sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
+ assert site_temp.storage.piecefields[sha512].tostring() == "0000011100"
+
+ # No prebuffer beyond end of the file
+ f.seek(9 * 1024 * 1024)
+ assert b"\0" not in f.read(7)
+
+ assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 0
+
+ def testDownloadAllPieces(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|all" % inner_path)
+
+ assert len(requests) == 12 # piecemap.msgpack, getPiecefields, 10 x piece
+
+ # Don't re-download already got pieces
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|all" % inner_path)
+
+ assert len(requests) == 0
+
+ def testFileSize(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ # Download first block
+ site_temp.needFile("%s|%s-%s" % (inner_path, 0 * 1024 * 1024, 1 * 1024 * 1024))
+ assert site_temp.storage.getSize(inner_path) < 1000 * 1000 * 10 # Size on the disk should be smaller than the real size
+
+ site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
+ assert site_temp.storage.getSize(inner_path) == site.storage.getSize(inner_path)
+
+ def testFileRename(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|%s-%s" % (inner_path, 0, 1 * self.piece_size))
+
+ assert len([req for req in requests if req[1] == "streamFile"]) == 2 # 1 piece + piecemap
+
+ # Rename the file
+ inner_path_new = inner_path.replace(".iso", "-new.iso")
+ site.storage.rename(inner_path, inner_path_new)
+ site.storage.delete("data/optional.any.iso.piecemap.msgpack")
+ assert site.content_manager.sign("content.json", self.privatekey, remove_missing_optional=True)
+
+ files_optional = site.content_manager.contents["content.json"]["files_optional"].keys()
+
+ assert "data/optional.any-new.iso.piecemap.msgpack" in files_optional
+ assert "data/optional.any.iso.piecemap.msgpack" not in files_optional
+ assert "data/optional.any.iso" not in files_optional
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site.publish()
+ time.sleep(0.1)
+ site_temp.download(blind_includes=True).join(timeout=5) # Wait for download
+
+ assert len([req[1] for req in requests if req[1] == "streamFile"]) == 0
+
+ with site_temp.storage.openBigfile(inner_path_new, prebuffer=0) as f:
+ f.read(1024)
+
+ # First piece already downloaded
+ assert [req for req in requests if req[1] == "streamFile"] == []
+
+ # Second piece needs to be downloaded + changed piecemap
+ f.seek(self.piece_size)
+ f.read(1024)
+ assert [req[3]["inner_path"] for req in requests if req[1] == "streamFile"] == [inner_path_new + ".piecemap.msgpack", inner_path_new]
+
+ @pytest.mark.parametrize("size", [1024 * 3, 1024 * 1024 * 3, 1024 * 1024 * 30])
+ def testNullFileRead(self, file_server, site, site_temp, size):
+ inner_path = "data/optional.iso"
+
+ f = site.storage.open(inner_path, "w")
+ f.write("\0" * size)
+ f.close()
+ assert site.content_manager.sign("content.json", self.privatekey)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True).join(timeout=5)
+
+ if "piecemap" in site.content_manager.getFileInfo(inner_path): # Bigfile
+ site_temp.needFile(inner_path + "|all")
+ else:
+ site_temp.needFile(inner_path)
+
+
+ assert site_temp.storage.getSize(inner_path) == size
diff --git a/plugins/Bigfile/Test/conftest.py b/plugins/Bigfile/Test/conftest.py
new file mode 100644
index 00000000..634e66e2
--- /dev/null
+++ b/plugins/Bigfile/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
diff --git a/plugins/Bigfile/Test/pytest.ini b/plugins/Bigfile/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/Bigfile/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/Bigfile/__init__.py b/plugins/Bigfile/__init__.py
new file mode 100644
index 00000000..cf2dcb49
--- /dev/null
+++ b/plugins/Bigfile/__init__.py
@@ -0,0 +1,2 @@
+from . import BigfilePlugin
+from .BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
\ No newline at end of file
diff --git a/plugins/Chart/ChartCollector.py b/plugins/Chart/ChartCollector.py
new file mode 100644
index 00000000..776343af
--- /dev/null
+++ b/plugins/Chart/ChartCollector.py
@@ -0,0 +1,181 @@
+import time
+import sys
+import collections
+import itertools
+import logging
+
+import gevent
+from util import helper
+from Config import config
+
+
+class ChartCollector(object):
+ def __init__(self, db):
+ self.db = db
+ if config.action == "main":
+ gevent.spawn_later(60 * 3, self.collector)
+ self.log = logging.getLogger("ChartCollector")
+ self.last_values = collections.defaultdict(dict)
+
+ def setInitialLastValues(self, sites):
+ # Recover last value of site bytes/sent
+ for site in sites:
+ self.last_values["site:" + site.address]["site_bytes_recv"] = site.settings.get("bytes_recv", 0)
+ self.last_values["site:" + site.address]["site_bytes_sent"] = site.settings.get("bytes_sent", 0)
+
+ def getCollectors(self):
+ collectors = {}
+ import main
+ file_server = main.file_server
+ sites = file_server.sites
+ if not sites:
+ return collectors
+ content_db = list(sites.values())[0].content_manager.contents.db
+
+ # Connection stats
+ collectors["connection"] = lambda: len(file_server.connections)
+ collectors["connection_in"] = (
+ lambda: len([1 for connection in file_server.connections if connection.type == "in"])
+ )
+ collectors["connection_onion"] = (
+ lambda: len([1 for connection in file_server.connections if connection.ip.endswith(".onion")])
+ )
+ collectors["connection_ping_avg"] = (
+ lambda: round(1000 * helper.avg(
+ [connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
+ ))
+ )
+ collectors["connection_ping_min"] = (
+ lambda: round(1000 * min(
+ [connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
+ ))
+ )
+ collectors["connection_rev_avg"] = (
+ lambda: helper.avg(
+ [connection.handshake["rev"] for connection in file_server.connections if connection.handshake]
+ )
+ )
+
+ # Request stats
+ collectors["file_bytes_recv|change"] = lambda: file_server.bytes_recv
+ collectors["file_bytes_sent|change"] = lambda: file_server.bytes_sent
+ collectors["request_num_recv|change"] = lambda: file_server.num_recv
+ collectors["request_num_sent|change"] = lambda: file_server.num_sent
+
+ # Limit
+ collectors["optional_limit"] = lambda: content_db.getOptionalLimitBytes()
+ collectors["optional_used"] = lambda: content_db.getOptionalUsedBytes()
+ collectors["optional_downloaded"] = lambda: sum([site.settings.get("optional_downloaded", 0) for site in sites.values()])
+
+ # Peers
+ collectors["peer"] = lambda peers: len(peers)
+ collectors["peer_onion"] = lambda peers: len([True for peer in peers if ".onion" in peer])
+
+ # Size
+ collectors["size"] = lambda: sum([site.settings.get("size", 0) for site in sites.values()])
+ collectors["size_optional"] = lambda: sum([site.settings.get("size_optional", 0) for site in sites.values()])
+ collectors["content"] = lambda: sum([len(site.content_manager.contents) for site in sites.values()])
+
+ return collectors
+
+ def getSiteCollectors(self):
+ site_collectors = {}
+
+ # Size
+ site_collectors["site_size"] = lambda site: site.settings.get("size", 0)
+ site_collectors["site_size_optional"] = lambda site: site.settings.get("size_optional", 0)
+ site_collectors["site_optional_downloaded"] = lambda site: site.settings.get("optional_downloaded", 0)
+ site_collectors["site_content"] = lambda site: len(site.content_manager.contents)
+
+ # Data transfer
+ site_collectors["site_bytes_recv|change"] = lambda site: site.settings.get("bytes_recv", 0)
+ site_collectors["site_bytes_sent|change"] = lambda site: site.settings.get("bytes_sent", 0)
+
+ # Peers
+ site_collectors["site_peer"] = lambda site: len(site.peers)
+ site_collectors["site_peer_onion"] = lambda site: len(
+ [True for peer in site.peers.values() if peer.ip.endswith(".onion")]
+ )
+ site_collectors["site_peer_connected"] = lambda site: len([True for peer in site.peers.values() if peer.connection])
+
+ return site_collectors
+
+ def getUniquePeers(self):
+ import main
+ sites = main.file_server.sites
+ return set(itertools.chain.from_iterable(
+ [site.peers.keys() for site in sites.values()]
+ ))
+
+ def collectDatas(self, collectors, last_values, site=None):
+ if site is None:
+ peers = self.getUniquePeers()
+ datas = {}
+ for key, collector in collectors.items():
+ try:
+ if site:
+ value = collector(site)
+ elif key.startswith("peer"):
+ value = collector(peers)
+ else:
+ value = collector()
+ except Exception as err:
+ self.log.info("Collector %s error: %s" % (key, err))
+ value = None
+
+ if "|change" in key: # Store changes relative to last value
+ key = key.replace("|change", "")
+ last_value = last_values.get(key, 0)
+ last_values[key] = value
+ value = value - last_value
+
+ if value is None:
+ datas[key] = None
+ else:
+ datas[key] = round(value, 3)
+ return datas
+
+ def collectGlobal(self, collectors, last_values):
+ now = int(time.time())
+ s = time.time()
+ datas = self.collectDatas(collectors, last_values["global"])
+ values = []
+ for key, value in datas.items():
+ values.append((self.db.getTypeId(key), value, now))
+ self.log.debug("Global collectors done in %.3fs" % (time.time() - s))
+
+ s = time.time()
+ cur = self.db.getCursor()
+ cur.cursor.executemany("INSERT INTO data (type_id, value, date_added) VALUES (?, ?, ?)", values)
+ cur.close()
+ self.log.debug("Global collectors inserted in %.3fs" % (time.time() - s))
+
+ def collectSites(self, sites, collectors, last_values):
+ now = int(time.time())
+ s = time.time()
+ values = []
+ for address, site in sites.items():
+ site_datas = self.collectDatas(collectors, last_values["site:%s" % address], site)
+ for key, value in site_datas.items():
+ values.append((self.db.getTypeId(key), self.db.getSiteId(address), value, now))
+ time.sleep(0.001)
+ self.log.debug("Site collections done in %.3fs" % (time.time() - s))
+
+ s = time.time()
+ cur = self.db.getCursor()
+ cur.cursor.executemany("INSERT INTO data (type_id, site_id, value, date_added) VALUES (?, ?, ?, ?)", values)
+ cur.close()
+ self.log.debug("Site collectors inserted in %.3fs" % (time.time() - s))
+
+ def collector(self):
+ collectors = self.getCollectors()
+ site_collectors = self.getSiteCollectors()
+ import main
+ sites = main.file_server.sites
+ i = 0
+ while 1:
+ self.collectGlobal(collectors, self.last_values)
+ if i % 12 == 0: # Only collect sites data every hour
+ self.collectSites(sites, site_collectors, self.last_values)
+ time.sleep(60 * 5)
+ i += 1
diff --git a/plugins/Chart/ChartDb.py b/plugins/Chart/ChartDb.py
new file mode 100644
index 00000000..9dd4d3db
--- /dev/null
+++ b/plugins/Chart/ChartDb.py
@@ -0,0 +1,133 @@
+from Config import config
+from Db.Db import Db
+import time
+
+
+class ChartDb(Db):
+ def __init__(self):
+ self.version = 2
+ super(ChartDb, self).__init__(self.getSchema(), "%s/chart.db" % config.data_dir)
+ self.foreign_keys = True
+ self.checkTables()
+ self.sites = self.loadSites()
+ self.types = self.loadTypes()
+
+ def getSchema(self):
+ schema = {}
+ schema["db_name"] = "Chart"
+ schema["tables"] = {}
+ schema["tables"]["data"] = {
+ "cols": [
+ ["data_id", "INTEGER PRIMARY KEY ASC AUTOINCREMENT NOT NULL UNIQUE"],
+ ["type_id", "INTEGER NOT NULL"],
+ ["site_id", "INTEGER"],
+ ["value", "INTEGER"],
+ ["date_added", "DATETIME DEFAULT (CURRENT_TIMESTAMP)"]
+ ],
+ "indexes": [
+ "CREATE INDEX site_id ON data (site_id)",
+ "CREATE INDEX date_added ON data (date_added)"
+ ],
+ "schema_changed": 2
+ }
+ schema["tables"]["type"] = {
+ "cols": [
+ ["type_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
+ ["name", "TEXT"]
+ ],
+ "schema_changed": 1
+ }
+ schema["tables"]["site"] = {
+ "cols": [
+ ["site_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
+ ["address", "TEXT"]
+ ],
+ "schema_changed": 1
+ }
+ return schema
+
+ def getTypeId(self, name):
+ if name not in self.types:
+ self.execute("INSERT INTO type ?", {"name": name})
+ self.types[name] = self.cur.cursor.lastrowid
+
+ return self.types[name]
+
+ def getSiteId(self, address):
+ if address not in self.sites:
+ self.execute("INSERT INTO site ?", {"address": address})
+ self.sites[address] = self.cur.cursor.lastrowid
+
+ return self.sites[address]
+
+ def loadSites(self):
+ sites = {}
+ for row in self.execute("SELECT * FROM site"):
+ sites[row["address"]] = row["site_id"]
+ return sites
+
+ def loadTypes(self):
+ types = {}
+ for row in self.execute("SELECT * FROM type"):
+ types[row["name"]] = row["type_id"]
+ return types
+
+ def deleteSite(self, address):
+ if address in self.sites:
+ site_id = self.sites[address]
+ del self.sites[address]
+ self.execute("DELETE FROM site WHERE ?", {"site_id": site_id})
+ self.execute("DELETE FROM data WHERE ?", {"site_id": site_id})
+
+ def archive(self):
+ week_back = 1
+ while 1:
+ s = time.time()
+ date_added_from = time.time() - 60 * 60 * 24 * 7 * (week_back + 1)
+ date_added_to = date_added_from + 60 * 60 * 24 * 7
+ res = self.execute("""
+ SELECT
+ MAX(date_added) AS date_added,
+ SUM(value) AS value,
+ GROUP_CONCAT(data_id) AS data_ids,
+ type_id,
+ site_id,
+ COUNT(*) AS num
+ FROM data
+ WHERE
+ site_id IS NULL AND
+ date_added > :date_added_from AND
+ date_added < :date_added_to
+ GROUP BY strftime('%Y-%m-%d %H', date_added, 'unixepoch', 'localtime'), type_id
+ """, {"date_added_from": date_added_from, "date_added_to": date_added_to})
+
+ num_archived = 0
+ cur = self.getCursor()
+ for row in res:
+ if row["num"] == 1:
+ continue
+ cur.execute("INSERT INTO data ?", {
+ "type_id": row["type_id"],
+ "site_id": row["site_id"],
+ "value": row["value"],
+ "date_added": row["date_added"]
+ })
+ cur.execute("DELETE FROM data WHERE data_id IN (%s)" % row["data_ids"])
+ num_archived += row["num"]
+ self.log.debug("Archived %s data from %s weeks ago in %.3fs" % (num_archived, week_back, time.time() - s))
+ week_back += 1
+ time.sleep(0.1)
+ if num_archived == 0:
+ break
+ # Only keep 6 month of global stats
+ self.execute(
+ "DELETE FROM data WHERE site_id IS NULL AND date_added < :date_added_limit",
+ {"date_added_limit": time.time() - 60 * 60 * 24 * 30 * 6 }
+ )
+ # Only keep 1 month of site stats
+ self.execute(
+ "DELETE FROM data WHERE site_id IS NOT NULL AND date_added < :date_added_limit",
+ {"date_added_limit": time.time() - 60 * 60 * 24 * 30 }
+ )
+ if week_back > 1:
+ self.execute("VACUUM")
diff --git a/plugins/Chart/ChartPlugin.py b/plugins/Chart/ChartPlugin.py
new file mode 100644
index 00000000..ddc1e609
--- /dev/null
+++ b/plugins/Chart/ChartPlugin.py
@@ -0,0 +1,60 @@
+import time
+import itertools
+
+import gevent
+
+from Config import config
+from util import helper
+from Plugin import PluginManager
+from .ChartDb import ChartDb
+from .ChartCollector import ChartCollector
+
+if "db" not in locals().keys(): # Share on reloads
+ db = ChartDb()
+ gevent.spawn_later(10 * 60, db.archive)
+ helper.timer(60 * 60 * 6, db.archive)
+ collector = ChartCollector(db)
+
+@PluginManager.registerTo("SiteManager")
+class SiteManagerPlugin(object):
+ def load(self, *args, **kwargs):
+ back = super(SiteManagerPlugin, self).load(*args, **kwargs)
+ collector.setInitialLastValues(self.sites.values())
+ return back
+
+ def delete(self, address, *args, **kwargs):
+ db.deleteSite(address)
+ return super(SiteManagerPlugin, self).delete(address, *args, **kwargs)
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ def actionChartDbQuery(self, to, query, params=None):
+ if not "ADMIN" in self.permissions:
+ return {"error": "No permission"}
+
+ if config.debug or config.verbose:
+ s = time.time()
+ rows = []
+ try:
+ if not query.strip().upper().startswith("SELECT"):
+ raise Exception("Only SELECT query supported")
+ res = db.execute(query, params)
+ except Exception as err: # Response the error to client
+ self.log.error("ChartDbQuery error: %s" % err)
+ return {"error": str(err)}
+ # Convert result to dict
+ for row in res:
+ rows.append(dict(row))
+ if config.verbose and time.time() - s > 0.1: # Log slow query
+ self.log.debug("Slow query: %s (%.3fs)" % (query, time.time() - s))
+ return rows
+
+ def actionChartGetPeerLocations(self, to):
+ if not "ADMIN" in self.permissions:
+ return {"error": "No permission"}
+
+ peers = {}
+ for site in self.server.sites.values():
+ peers.update(site.peers)
+ peer_locations = self.getPeerLocations(peers)
+ return peer_locations
diff --git a/plugins/Chart/__init__.py b/plugins/Chart/__init__.py
new file mode 100644
index 00000000..2c284609
--- /dev/null
+++ b/plugins/Chart/__init__.py
@@ -0,0 +1 @@
+from . import ChartPlugin
\ No newline at end of file
diff --git a/plugins/ContentFilter/ContentFilterPlugin.py b/plugins/ContentFilter/ContentFilterPlugin.py
new file mode 100644
index 00000000..f6d74e7a
--- /dev/null
+++ b/plugins/ContentFilter/ContentFilterPlugin.py
@@ -0,0 +1,216 @@
+import time
+import re
+import html
+import hashlib
+
+from Plugin import PluginManager
+from Translate import Translate
+from Config import config
+
+from .ContentFilterStorage import ContentFilterStorage
+
+
+if "_" not in locals():
+ _ = Translate("plugins/ContentFilter/languages/")
+
+
+@PluginManager.registerTo("SiteManager")
+class SiteManagerPlugin(object):
+ def load(self, *args, **kwargs):
+ global filter_storage
+ super(SiteManagerPlugin, self).load(*args, **kwargs)
+ filter_storage = ContentFilterStorage(site_manager=self)
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ # Mute
+ def cbMuteAdd(self, to, auth_address, cert_user_id, reason):
+ filter_storage.file_content["mutes"][auth_address] = {
+ "cert_user_id": cert_user_id, "reason": reason, "source": self.site.address, "date_added": time.time()
+ }
+ filter_storage.save()
+ filter_storage.changeDbs(auth_address, "remove")
+ self.response(to, "ok")
+
+ def actionMuteAdd(self, to, auth_address, cert_user_id, reason):
+ if "ADMIN" in self.getPermissions(to):
+ self.cbMuteAdd(to, auth_address, cert_user_id, reason)
+ else:
+ self.cmd(
+ "confirm",
+ [_["Hide all content from %s?"] % html.escape(cert_user_id), _["Mute"]],
+ lambda res: self.cbMuteAdd(to, auth_address, cert_user_id, reason)
+ )
+
+ def cbMuteRemove(self, to, auth_address):
+ del filter_storage.file_content["mutes"][auth_address]
+ filter_storage.save()
+ filter_storage.changeDbs(auth_address, "load")
+ self.response(to, "ok")
+
+ def actionMuteRemove(self, to, auth_address):
+ if "ADMIN" in self.getPermissions(to):
+ self.cbMuteRemove(to, auth_address)
+ else:
+ self.cmd(
+ "confirm",
+ [_["Unmute %s?"] % html.escape(filter_storage.file_content["mutes"][auth_address]["cert_user_id"]), _["Unmute"]],
+ lambda res: self.cbMuteRemove(to, auth_address)
+ )
+
+ def actionMuteList(self, to):
+ if "ADMIN" in self.getPermissions(to):
+ self.response(to, filter_storage.file_content["mutes"])
+ else:
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can list mutes"})
+
+ # Siteblock
+ def actionSiteblockAdd(self, to, site_address, reason=None):
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can add to blocklist"})
+ filter_storage.file_content["siteblocks"][site_address] = {"date_added": time.time(), "reason": reason}
+ filter_storage.save()
+ self.response(to, "ok")
+
+ def actionSiteblockRemove(self, to, site_address):
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can remove from blocklist"})
+ del filter_storage.file_content["siteblocks"][site_address]
+ filter_storage.save()
+ self.response(to, "ok")
+
+ def actionSiteblockList(self, to):
+ if "ADMIN" in self.getPermissions(to):
+ self.response(to, filter_storage.file_content["siteblocks"])
+ else:
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can list blocklists"})
+
+ # Include
+ def actionFilterIncludeAdd(self, to, inner_path, description=None, address=None):
+ if address:
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
+ site = self.server.sites[address]
+ else:
+ address = self.site.address
+ site = self.site
+
+ if "ADMIN" in self.getPermissions(to):
+ self.cbFilterIncludeAdd(to, True, address, inner_path, description)
+ else:
+ content = site.storage.loadJson(inner_path)
+ title = _["New shared global content filter: %s (%s sites, %s users)"] % (
+ html.escape(inner_path), len(content.get("siteblocks", {})), len(content.get("mutes", {}))
+ )
+
+ self.cmd(
+ "confirm",
+ [title, "Add"],
+ lambda res: self.cbFilterIncludeAdd(to, res, address, inner_path, description)
+ )
+
+ def cbFilterIncludeAdd(self, to, res, address, inner_path, description):
+ if not res:
+ self.response(to, res)
+ return False
+
+ filter_storage.includeAdd(address, inner_path, description)
+ self.response(to, "ok")
+
+ def actionFilterIncludeRemove(self, to, inner_path, address=None):
+ if address:
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
+ else:
+ address = self.site.address
+
+ key = "%s/%s" % (address, inner_path)
+ if key not in filter_storage.file_content["includes"]:
+ self.response(to, {"error": "Include not found"})
+ filter_storage.includeRemove(address, inner_path)
+ self.response(to, "ok")
+
+ def actionFilterIncludeList(self, to, all_sites=False, filters=False):
+ if all_sites and "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can list all sites includes"})
+
+ back = []
+ includes = filter_storage.file_content.get("includes", {}).values()
+ for include in includes:
+ if not all_sites and include["address"] != self.site.address:
+ continue
+ if filters:
+ include = dict(include) # Don't modify original file_content
+ include_site = filter_storage.site_manager.get(include["address"])
+ if not include_site:
+ continue
+ content = include_site.storage.loadJson(include["inner_path"])
+ include["mutes"] = content.get("mutes", {})
+ include["siteblocks"] = content.get("siteblocks", {})
+ back.append(include)
+ self.response(to, back)
+
+
+@PluginManager.registerTo("SiteStorage")
+class SiteStoragePlugin(object):
+ def updateDbFile(self, inner_path, file=None, cur=None):
+ if file is not False: # File deletion always allowed
+ # Find for bitcoin addresses in file path
+ matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
+ # Check if any of the adresses are in the mute list
+ for auth_address in matches:
+ if filter_storage.isMuted(auth_address):
+ self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))
+ return False
+
+ return super(SiteStoragePlugin, self).updateDbFile(inner_path, file=file, cur=cur)
+
+ def onUpdated(self, inner_path, file=None):
+ file_path = "%s/%s" % (self.site.address, inner_path)
+ if file_path in filter_storage.file_content["includes"]:
+ self.log.debug("Filter file updated: %s" % inner_path)
+ filter_storage.includeUpdateAll()
+ return super(SiteStoragePlugin, self).onUpdated(inner_path, file=file)
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ def actionWrapper(self, path, extra_headers=None):
+ match = re.match("/(?P[A-Za-z0-9\._-]+)(?P/.*|$)", path)
+ if not match:
+ return False
+ address = match.group("address")
+
+ if self.server.site_manager.get(address): # Site already exists
+ return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
+
+ if self.server.site_manager.isDomain(address):
+ address = self.server.site_manager.resolveDomain(address)
+
+ if address:
+ address_sha256 = "0x" + hashlib.sha256(address.encode("utf8")).hexdigest()
+ else:
+ address_sha256 = None
+
+ if filter_storage.isSiteblocked(address) or filter_storage.isSiteblocked(address_sha256):
+ site = self.server.site_manager.get(config.homepage)
+ if not extra_headers:
+ extra_headers = {}
+
+ script_nonce = self.getScriptNonce()
+
+ self.sendHeader(extra_headers=extra_headers, script_nonce=script_nonce)
+ return iter([super(UiRequestPlugin, self).renderWrapper(
+ site, path, "uimedia/plugins/contentfilter/blocklisted.html?address=" + address,
+ "Blacklisted site", extra_headers, show_loadingscreen=False, script_nonce=script_nonce
+ )])
+ else:
+ return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
+
+ def actionUiMedia(self, path, *args, **kwargs):
+ if path.startswith("/uimedia/plugins/contentfilter/"):
+ file_path = path.replace("/uimedia/plugins/contentfilter/", "plugins/ContentFilter/media/")
+ return self.actionFile(file_path)
+ else:
+ return super(UiRequestPlugin, self).actionUiMedia(path)
diff --git a/plugins/ContentFilter/ContentFilterStorage.py b/plugins/ContentFilter/ContentFilterStorage.py
new file mode 100644
index 00000000..3df0b435
--- /dev/null
+++ b/plugins/ContentFilter/ContentFilterStorage.py
@@ -0,0 +1,140 @@
+import os
+import json
+import logging
+import collections
+import time
+
+from Debug import Debug
+from Plugin import PluginManager
+from Config import config
+from util import helper
+
+class ContentFilterStorage(object):
+ def __init__(self, site_manager):
+ self.log = logging.getLogger("ContentFilterStorage")
+ self.file_path = "%s/filters.json" % config.data_dir
+ self.site_manager = site_manager
+ self.file_content = self.load()
+
+ # Set default values for filters.json
+ if not self.file_content:
+ self.file_content = {}
+
+ # Site blacklist renamed to site blocks
+ if "site_blacklist" in self.file_content:
+ self.file_content["siteblocks"] = self.file_content["site_blacklist"]
+ del self.file_content["site_blacklist"]
+
+ for key in ["mutes", "siteblocks", "includes"]:
+ if key not in self.file_content:
+ self.file_content[key] = {}
+
+ self.include_filters = collections.defaultdict(set) # Merged list of mutes and blacklists from all include
+ self.includeUpdateAll(update_site_dbs=False)
+
+ def load(self):
+ # Rename previously used mutes.json -> filters.json
+ if os.path.isfile("%s/mutes.json" % config.data_dir):
+ self.log.info("Renaming mutes.json to filters.json...")
+ os.rename("%s/mutes.json" % config.data_dir, self.file_path)
+ if os.path.isfile(self.file_path):
+ try:
+ return json.load(open(self.file_path))
+ except Exception as err:
+ self.log.error("Error loading filters.json: %s" % err)
+ return None
+ else:
+ return None
+
+ def includeUpdateAll(self, update_site_dbs=True):
+ s = time.time()
+ new_include_filters = collections.defaultdict(set)
+
+ # Load all include files data into a merged set
+ for include_path in self.file_content["includes"]:
+ address, inner_path = include_path.split("/", 1)
+ try:
+ content = self.site_manager.get(address).storage.loadJson(inner_path)
+ except Exception as err:
+ self.log.warning(
+ "Error loading include %s: %s" %
+ (include_path, Debug.formatException(err))
+ )
+ continue
+
+ for key, val in content.items():
+ if type(val) is not dict:
+ continue
+
+ new_include_filters[key].update(val.keys())
+
+ mutes_added = new_include_filters["mutes"].difference(self.include_filters["mutes"])
+ mutes_removed = self.include_filters["mutes"].difference(new_include_filters["mutes"])
+
+ self.include_filters = new_include_filters
+
+ if update_site_dbs:
+ for auth_address in mutes_added:
+ self.changeDbs(auth_address, "remove")
+
+ for auth_address in mutes_removed:
+ if not self.isMuted(auth_address):
+ self.changeDbs(auth_address, "load")
+
+ num_mutes = len(self.include_filters["mutes"])
+ num_siteblocks = len(self.include_filters["siteblocks"])
+ self.log.debug(
+ "Loaded %s mutes, %s blocked sites from %s includes in %.3fs" %
+ (num_mutes, num_siteblocks, len(self.file_content["includes"]), time.time() - s)
+ )
+
+ def includeAdd(self, address, inner_path, description=None):
+ self.file_content["includes"]["%s/%s" % (address, inner_path)] = {
+ "date_added": time.time(),
+ "address": address,
+ "description": description,
+ "inner_path": inner_path
+ }
+ self.includeUpdateAll()
+ self.save()
+
+ def includeRemove(self, address, inner_path):
+ del self.file_content["includes"]["%s/%s" % (address, inner_path)]
+ self.includeUpdateAll()
+ self.save()
+
+ def save(self):
+ s = time.time()
+ helper.atomicWrite(self.file_path, json.dumps(self.file_content, indent=2, sort_keys=True).encode("utf8"))
+ self.log.debug("Saved in %.3fs" % (time.time() - s))
+
+ def isMuted(self, auth_address):
+ if auth_address in self.file_content["mutes"] or auth_address in self.include_filters["mutes"]:
+ return True
+ else:
+ return False
+
+ def isSiteblocked(self, address):
+ if address in self.file_content["siteblocks"] or address in self.include_filters["siteblocks"]:
+ return True
+ else:
+ return False
+
+ # Search and remove or readd files of an user
+ def changeDbs(self, auth_address, action):
+ self.log.debug("Mute action %s on user %s" % (action, auth_address))
+ res = list(self.site_manager.list().values())[0].content_manager.contents.db.execute(
+ "SELECT * FROM content LEFT JOIN site USING (site_id) WHERE inner_path LIKE :inner_path",
+ {"inner_path": "%%/%s/%%" % auth_address}
+ )
+ for row in res:
+ site = self.site_manager.sites.get(row["address"])
+ if not site:
+ continue
+ dir_inner_path = helper.getDirname(row["inner_path"])
+ for file_name in site.storage.walk(dir_inner_path):
+ if action == "remove":
+ site.storage.onUpdated(dir_inner_path + file_name, False)
+ else:
+ site.storage.onUpdated(dir_inner_path + file_name)
+ site.onFileDone(dir_inner_path + file_name)
diff --git a/plugins/ContentFilter/Test/TestContentFilter.py b/plugins/ContentFilter/Test/TestContentFilter.py
new file mode 100644
index 00000000..e1b37b16
--- /dev/null
+++ b/plugins/ContentFilter/Test/TestContentFilter.py
@@ -0,0 +1,82 @@
+import pytest
+from ContentFilter import ContentFilterPlugin
+from Site import SiteManager
+
+
+@pytest.fixture
+def filter_storage():
+ ContentFilterPlugin.filter_storage = ContentFilterPlugin.ContentFilterStorage(SiteManager.site_manager)
+ return ContentFilterPlugin.filter_storage
+
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestContentFilter:
+ def createInclude(self, site):
+ site.storage.writeJson("filters.json", {
+ "mutes": {"1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C": {}},
+ "siteblocks": {site.address: {}}
+ })
+
+ def testIncludeLoad(self, site, filter_storage):
+ self.createInclude(site)
+ filter_storage.file_content["includes"]["%s/%s" % (site.address, "filters.json")] = {
+ "date_added": 1528295893,
+ }
+
+ assert not filter_storage.include_filters["mutes"]
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert not filter_storage.isSiteblocked(site.address)
+ filter_storage.includeUpdateAll(update_site_dbs=False)
+ assert len(filter_storage.include_filters["mutes"]) == 1
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert filter_storage.isSiteblocked(site.address)
+
+ def testIncludeAdd(self, site, filter_storage):
+ self.createInclude(site)
+ query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C'"
+ assert not filter_storage.isSiteblocked(site.address)
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ # Add include
+ filter_storage.includeAdd(site.address, "filters.json")
+
+ assert filter_storage.isSiteblocked(site.address)
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 0
+
+ # Remove include
+ filter_storage.includeRemove(site.address, "filters.json")
+
+ assert not filter_storage.isSiteblocked(site.address)
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ def testIncludeChange(self, site, filter_storage):
+ self.createInclude(site)
+ filter_storage.includeAdd(site.address, "filters.json")
+ assert filter_storage.isSiteblocked(site.address)
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+
+ # Add new blocked site
+ assert not filter_storage.isSiteblocked("1Hello")
+
+ filter_content = site.storage.loadJson("filters.json")
+ filter_content["siteblocks"]["1Hello"] = {}
+ site.storage.writeJson("filters.json", filter_content)
+
+ assert filter_storage.isSiteblocked("1Hello")
+
+ # Add new muted user
+ query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q'"
+ assert not filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ filter_content["mutes"]["1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q"] = {}
+ site.storage.writeJson("filters.json", filter_content)
+
+ assert filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 0
+
+
diff --git a/plugins/ContentFilter/Test/conftest.py b/plugins/ContentFilter/Test/conftest.py
new file mode 100644
index 00000000..634e66e2
--- /dev/null
+++ b/plugins/ContentFilter/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
diff --git a/plugins/ContentFilter/Test/pytest.ini b/plugins/ContentFilter/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/ContentFilter/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/ContentFilter/__init__.py b/plugins/ContentFilter/__init__.py
new file mode 100644
index 00000000..2cbca8ee
--- /dev/null
+++ b/plugins/ContentFilter/__init__.py
@@ -0,0 +1 @@
+from . import ContentFilterPlugin
diff --git a/plugins/ContentFilter/languages/hu.json b/plugins/ContentFilter/languages/hu.json
new file mode 100644
index 00000000..9b57e697
--- /dev/null
+++ b/plugins/ContentFilter/languages/hu.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s tartalmaniak elrejtése?",
+ "Mute": "Elnémítás",
+ "Unmute %s?": "%s tartalmaniak megjelenítése?",
+ "Unmute": "Némítás visszavonása"
+}
diff --git a/plugins/ContentFilter/languages/it.json b/plugins/ContentFilter/languages/it.json
new file mode 100644
index 00000000..9a2c6761
--- /dev/null
+++ b/plugins/ContentFilter/languages/it.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s Vuoi nascondere i contenuti di questo utente ?",
+ "Mute": "Attiva Silenzia",
+ "Unmute %s?": "%s Vuoi mostrare i contenuti di questo utente ?",
+ "Unmute": "Disattiva Silenzia"
+}
diff --git a/plugins/ContentFilter/languages/pt-br.json b/plugins/ContentFilter/languages/pt-br.json
new file mode 100644
index 00000000..3c6bfbdc
--- /dev/null
+++ b/plugins/ContentFilter/languages/pt-br.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s Ocultar todo o conteúdo de ?",
+ "Mute": "Ativar o Silêncio",
+ "Unmute %s?": "%s Você quer mostrar o conteúdo deste usuário ?",
+ "Unmute": "Desligar o silêncio"
+}
diff --git a/plugins/ContentFilter/languages/zh-tw.json b/plugins/ContentFilter/languages/zh-tw.json
new file mode 100644
index 00000000..0995f3a0
--- /dev/null
+++ b/plugins/ContentFilter/languages/zh-tw.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "屏蔽 %s 的所有內容?",
+ "Mute": "屏蔽",
+ "Unmute %s?": "對 %s 解除屏蔽?",
+ "Unmute": "解除屏蔽"
+}
diff --git a/plugins/ContentFilter/languages/zh.json b/plugins/ContentFilter/languages/zh.json
new file mode 100644
index 00000000..bf63f107
--- /dev/null
+++ b/plugins/ContentFilter/languages/zh.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "屏蔽 %s 的所有内容?",
+ "Mute": "屏蔽",
+ "Unmute %s?": "对 %s 解除屏蔽?",
+ "Unmute": "解除屏蔽"
+}
diff --git a/plugins/ContentFilter/media/blocklisted.html b/plugins/ContentFilter/media/blocklisted.html
new file mode 100644
index 00000000..9a287b72
--- /dev/null
+++ b/plugins/ContentFilter/media/blocklisted.html
@@ -0,0 +1,107 @@
+
+
+
+
+
+
{_[Choose]}: "))
+ for content in contents:
+ body.append(_("{content} "))
+ body.append("
")
+ body.append("
")
+
+ def actionSidebarGetHtmlTag(self, to):
+ permissions = self.getPermissions(to)
+ if "ADMIN" not in permissions:
+ return self.response(to, "You don't have permission to run this command")
+
+ site = self.site
+
+ body = []
+
+ body.append("
')
+ # @scrollable()
+ @when_loaded.resolve()
+
+ else # Not first update, patch the html to keep unchanged dom elements
+ morphdom @tag.find(".content")[0], '
'+res+'
', {
+ onBeforeMorphEl: (from_el, to_el) -> # Ignore globe loaded state
+ if from_el.className == "globe" or from_el.className.indexOf("noupdate") >= 0
+ return false
+ else
+ return true
+ }
+
+ # Save and forgot privatekey for site signing
+ @tag.find("#privatekey-add").off("click, touchend").on "click touchend", (e) =>
+ @wrapper.displayPrompt "Enter your private key:", "password", "Save", "", (privatekey) =>
+ @wrapper.ws.cmd "userSetSitePrivatekey", [privatekey], (res) =>
+ @wrapper.notifications.add "privatekey", "done", "Private key saved for site signing", 5000
+ return false
+
+ @tag.find("#privatekey-forgot").off("click, touchend").on "click touchend", (e) =>
+ @wrapper.displayConfirm "Remove saved private key for this site?", "Forgot", (res) =>
+ if not res
+ return false
+ @wrapper.ws.cmd "userSetSitePrivatekey", [""], (res) =>
+ @wrapper.notifications.add "privatekey", "done", "Saved private key removed", 5000
+ return false
+
+
+
+ animDrag: (e) =>
+ mousex = e.pageX
+ mousey = e.pageY
+ if not mousex and e.originalEvent.touches
+ mousex = e.originalEvent.touches[0].pageX
+ mousey = e.originalEvent.touches[0].pageY
+
+ overdrag = @fixbutton_initx - @width - mousex
+ if overdrag > 0 # Overdragged
+ overdrag_percent = 1 + overdrag/300
+ mousex = (mousex + (@fixbutton_initx-@width)*overdrag_percent)/(1+overdrag_percent)
+ targetx = @fixbutton_initx - mousex - @fixbutton_addx
+ targety = @fixbutton_inity - mousey - @fixbutton_addy
+
+ if @move_lock == "x"
+ targety = @fixbutton_inity
+ else if @move_lock == "y"
+ targetx = @fixbutton_initx
+
+ if not @move_lock or @move_lock == "x"
+ @fixbutton[0].style.left = (mousex + @fixbutton_addx) + "px"
+ if @tag
+ @tag[0].style.transform = "translateX(#{0 - targetx}px)"
+
+ if not @move_lock or @move_lock == "y"
+ @fixbutton[0].style.top = (mousey + @fixbutton_addy) + "px"
+ if @internals.tag
+ @internals.tag[0].style.transform = "translateY(#{0 - targety}px)"
+
+ #if @move_lock == "x"
+ # @fixbutton[0].style.left = "#{@fixbutton_targetx} px"
+ #@fixbutton[0].style.top = "#{@fixbutton_inity}px"
+ #if @move_lock == "y"
+ # @fixbutton[0].style.top = "#{@fixbutton_targety} px"
+
+ # Check if opened
+ if (not @opened and targetx > @width/3) or (@opened and targetx > @width*0.9)
+ @fixbutton_targetx = @fixbutton_initx - @width # Make it opened
+ else
+ @fixbutton_targetx = @fixbutton_initx
+
+ if (not @internals.opened and 0 - targety > @page_height/10) or (@internals.opened and 0 - targety > @page_height*0.95)
+ @fixbutton_targety = @page_height - @fixbutton_inity - 50
+ else
+ @fixbutton_targety = @fixbutton_inity
+
+
+ # Stop dragging the fixbutton
+ stopDrag: ->
+ @fixbutton.parents().off "mousemove touchmove"
+ @fixbutton.off "mousemove touchmove"
+ @fixbutton.css("pointer-events", "")
+ $(".drag-bg").remove()
+ if not @fixbutton.hasClass("dragging")
+ return
+ @fixbutton.removeClass("dragging")
+
+ # Move back to initial position
+ if @fixbutton_targetx != @fixbutton.offset().left
+ # Animate fixbutton
+ if @move_lock == "y"
+ top = @fixbutton_targety
+ left = @fixbutton_initx
+ if @move_lock == "x"
+ top = @fixbutton_inity
+ left = @fixbutton_targetx
+ @fixbutton.stop().animate {"left": left, "top": top}, 500, "easeOutBack", =>
+ # Switch back to auto align
+ if @fixbutton_targetx == @fixbutton_initx # Closed
+ @fixbutton.css("left", "auto")
+ else # Opened
+ @fixbutton.css("left", left)
+
+ $(".fixbutton-bg").trigger "mouseout" # Switch fixbutton back to normal status
+
+ @stopDragX()
+ @internals.stopDragY()
+ @move_lock = null
+
+ stopDragX: ->
+ # Animate sidebar and iframe
+ if @fixbutton_targetx == @fixbutton_initx or @move_lock == "y"
+ # Closed
+ targetx = 0
+ @opened = false
+ else
+ # Opened
+ targetx = @width
+ if @opened
+ @onOpened()
+ else
+ @when_loaded.done =>
+ @onOpened()
+ @opened = true
+
+ # Revent sidebar transitions
+ if @tag
+ @tag.css("transition", "0.4s ease-out")
+ @tag.css("transform", "translateX(-#{targetx}px)").one transitionEnd, =>
+ @tag.css("transition", "")
+ if not @opened
+ @container.remove()
+ @container = null
+ if @tag
+ @tag.remove()
+ @tag = null
+
+ # Revert body transformations
+ @log "stopdrag", "opened:", @opened
+ if not @opened
+ @onClosed()
+
+ sign: (inner_path, privatekey) ->
+ @wrapper.displayProgress("sign", "Signing: #{inner_path}...", 0)
+ @wrapper.ws.cmd "siteSign", {privatekey: privatekey, inner_path: inner_path, update_changed_files: true}, (res) =>
+ if res == "ok"
+ @wrapper.displayProgress("sign", "#{inner_path} signed!", 100)
+ else
+ @wrapper.displayProgress("sign", "Error signing #{inner_path}", -1)
+
+ publish: (inner_path, privatekey) ->
+ @wrapper.ws.cmd "sitePublish", {privatekey: privatekey, inner_path: inner_path, sign: true, update_changed_files: true}, (res) =>
+ if res == "ok"
+ @wrapper.notifications.add "sign", "done", "#{inner_path} Signed and published!", 5000
+
+ onOpened: ->
+ @log "Opened"
+ @scrollable()
+
+ # Re-calculate height when site admin opened or closed
+ @tag.find("#checkbox-owned, #checkbox-autodownloadoptional").off("click touchend").on "click touchend", =>
+ setTimeout (=>
+ @scrollable()
+ ), 300
+
+ # Site limit button
+ @tag.find("#button-sitelimit").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "siteSetLimit", $("#input-sitelimit").val(), (res) =>
+ if res == "ok"
+ @wrapper.notifications.add "done-sitelimit", "done", "Site storage limit modified!", 5000
+ @updateHtmlTag()
+ return false
+
+ # Site autodownload limit button
+ @tag.find("#button-autodownload_bigfile_size_limit").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "siteSetAutodownloadBigfileLimit", $("#input-autodownload_bigfile_size_limit").val(), (res) =>
+ if res == "ok"
+ @wrapper.notifications.add "done-bigfilelimit", "done", "Site bigfile auto download limit modified!", 5000
+ @updateHtmlTag()
+ return false
+
+ # Database reload
+ @tag.find("#button-dbreload").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "dbReload", [], =>
+ @wrapper.notifications.add "done-dbreload", "done", "Database schema reloaded!", 5000
+ @updateHtmlTag()
+ return false
+
+ # Database rebuild
+ @tag.find("#button-dbrebuild").off("click touchend").on "click touchend", =>
+ @wrapper.notifications.add "done-dbrebuild", "info", "Database rebuilding...."
+ @wrapper.ws.cmd "dbRebuild", [], =>
+ @wrapper.notifications.add "done-dbrebuild", "done", "Database rebuilt!", 5000
+ @updateHtmlTag()
+ return false
+
+ # Update site
+ @tag.find("#button-update").off("click touchend").on "click touchend", =>
+ @tag.find("#button-update").addClass("loading")
+ @wrapper.ws.cmd "siteUpdate", @wrapper.site_info.address, =>
+ @wrapper.notifications.add "done-updated", "done", "Site updated!", 5000
+ @tag.find("#button-update").removeClass("loading")
+ return false
+
+ # Pause site
+ @tag.find("#button-pause").off("click touchend").on "click touchend", =>
+ @tag.find("#button-pause").addClass("hidden")
+ @wrapper.ws.cmd "sitePause", @wrapper.site_info.address
+ return false
+
+ # Resume site
+ @tag.find("#button-resume").off("click touchend").on "click touchend", =>
+ @tag.find("#button-resume").addClass("hidden")
+ @wrapper.ws.cmd "siteResume", @wrapper.site_info.address
+ return false
+
+ # Delete site
+ @tag.find("#button-delete").off("click touchend").on "click touchend", =>
+ @wrapper.displayConfirm "Are you sure?", ["Delete this site", "Blacklist"], (confirmed) =>
+ if confirmed == 1
+ @tag.find("#button-delete").addClass("loading")
+ @wrapper.ws.cmd "siteDelete", @wrapper.site_info.address, ->
+ document.location = $(".fixbutton-bg").attr("href")
+ else if confirmed == 2
+ @wrapper.displayPrompt "Blacklist this site", "text", "Delete and Blacklist", "Reason", (reason) =>
+ @tag.find("#button-delete").addClass("loading")
+ @wrapper.ws.cmd "siteblockAdd", [@wrapper.site_info.address, reason]
+ @wrapper.ws.cmd "siteDelete", @wrapper.site_info.address, ->
+ document.location = $(".fixbutton-bg").attr("href")
+
+
+ return false
+
+ # Owned checkbox
+ @tag.find("#checkbox-owned").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "siteSetOwned", [@tag.find("#checkbox-owned").is(":checked")]
+
+ # Owned checkbox
+ @tag.find("#checkbox-autodownloadoptional").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "siteSetAutodownloadoptional", [@tag.find("#checkbox-autodownloadoptional").is(":checked")]
+
+ # Change identity button
+ @tag.find("#button-identity").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "certSelect"
+ return false
+
+ # Save settings
+ @tag.find("#button-settings").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "fileGet", "content.json", (res) =>
+ data = JSON.parse(res)
+ data["title"] = $("#settings-title").val()
+ data["description"] = $("#settings-description").val()
+ json_raw = unescape(encodeURIComponent(JSON.stringify(data, undefined, '\t')))
+ @wrapper.ws.cmd "fileWrite", ["content.json", btoa(json_raw), true], (res) =>
+ if res != "ok" # fileWrite failed
+ @wrapper.notifications.add "file-write", "error", "File write error: #{res}"
+ else
+ @wrapper.notifications.add "file-write", "done", "Site settings saved!", 5000
+ if @wrapper.site_info.privatekey
+ @wrapper.ws.cmd "siteSign", {privatekey: "stored", inner_path: "content.json", update_changed_files: true}
+ @updateHtmlTag()
+ return false
+
+
+ # Open site directory
+ @tag.find("#link-directory").off("click touchend").on "click touchend", =>
+ @wrapper.ws.cmd "serverShowdirectory", ["site", @wrapper.site_info.address]
+ return false
+
+ # Copy site with peers
+ @tag.find("#link-copypeers").off("click touchend").on "click touchend", (e) =>
+ copy_text = e.currentTarget.href
+ handler = (e) =>
+ e.clipboardData.setData('text/plain', copy_text)
+ e.preventDefault()
+ @wrapper.notifications.add "copy", "done", "Site address with peers copied to your clipboard", 5000
+ document.removeEventListener('copy', handler, true)
+
+ document.addEventListener('copy', handler, true)
+ document.execCommand('copy')
+ return false
+
+ # Sign and publish content.json
+ $(document).on "click touchend", =>
+ @tag?.find("#button-sign-publish-menu").removeClass("visible")
+ @tag?.find(".contents + .flex").removeClass("sign-publish-flex")
+
+ @tag.find(".contents-content").off("click touchend").on "click touchend", (e) =>
+ $("#input-contents").val(e.currentTarget.innerText);
+ return false;
+
+ menu = new Menu(@tag.find("#menu-sign-publish"))
+ menu.elem.css("margin-top", "-130px") # Open upwards
+ menu.addItem "Sign", =>
+ inner_path = @tag.find("#input-contents").val()
+
+ @wrapper.ws.cmd "fileRules", {inner_path: inner_path}, (rules) =>
+ if @wrapper.site_info.auth_address in rules.signers
+ # ZeroID or other ID provider
+ @sign(inner_path)
+ else if @wrapper.site_info.privatekey
+ # Privatekey stored in users.json
+ @sign(inner_path, "stored")
+ else
+ # Ask the user for privatekey
+ @wrapper.displayPrompt "Enter your private key:", "password", "Sign", "", (privatekey) => # Prompt the private key
+ @sign(inner_path, privatekey)
+
+ @tag.find(".contents + .flex").removeClass "active"
+ menu.hide()
+
+ menu.addItem "Publish", =>
+ inner_path = @tag.find("#input-contents").val()
+ @wrapper.ws.cmd "sitePublish", {"inner_path": inner_path, "sign": false}
+
+ @tag.find(".contents + .flex").removeClass "active"
+ menu.hide()
+
+ @tag.find("#menu-sign-publish").off("click touchend").on "click touchend", =>
+ if window.visible_menu == menu
+ @tag.find(".contents + .flex").removeClass "active"
+ menu.hide()
+ else
+ @tag.find(".contents + .flex").addClass "active"
+ @tag.find(".content-wrapper").prop "scrollTop", 10000
+ menu.show()
+ return false
+
+ $("body").on "click", =>
+ if @tag
+ @tag.find(".contents + .flex").removeClass "active"
+
+ @tag.find("#button-sign-publish").off("click touchend").on "click touchend", =>
+ inner_path = @tag.find("#input-contents").val()
+
+ @wrapper.ws.cmd "fileRules", {inner_path: inner_path}, (rules) =>
+ if @wrapper.site_info.auth_address in rules.signers
+ # ZeroID or other ID provider
+ @publish(inner_path, null)
+ else if @wrapper.site_info.privatekey
+ # Privatekey stored in users.json
+ @publish(inner_path, "stored")
+ else
+ # Ask the user for privatekey
+ @wrapper.displayPrompt "Enter your private key:", "password", "Sign", "", (privatekey) => # Prompt the private key
+ @publish(inner_path, privatekey)
+ return false
+
+ # Close
+ @tag.find(".close").off("click touchend").on "click touchend", (e) =>
+ @close()
+ return false
+
+ @loadGlobe()
+
+ close: ->
+ @move_lock = "x"
+ @startDrag()
+ @stopDrag()
+
+
+ onClosed: ->
+ $(window).off "resize"
+ $(window).on "resize", @resized
+ $(document.body).css("transition", "0.6s ease-in-out").removeClass("body-sidebar").on transitionEnd, (e) =>
+ if e.target == document.body and not $(document.body).hasClass("body-sidebar") and not $(document.body).hasClass("body-internals")
+ $(document.body).css("height", "auto").css("perspective", "").css("will-change", "").css("transition", "").off transitionEnd
+ @unloadGlobe()
+
+ # We dont need site info anymore
+ @wrapper.setSiteInfo = @original_set_site_info
+
+
+ loadGlobe: =>
+ if @tag.find(".globe").hasClass("loading")
+ setTimeout (=>
+ if typeof(DAT) == "undefined" # Globe script not loaded, do it first
+ script_tag = $("
+