diff --git a/.forgejo/workflows/build-on-commit.yml b/.forgejo/workflows/build-on-commit.yml
new file mode 100644
index 00000000..e8f0d2e3
--- /dev/null
+++ b/.forgejo/workflows/build-on-commit.yml
@@ -0,0 +1,40 @@
+name: Build Docker Image on Commit
+
+on:
+ push:
+ branches:
+ - main
+ tags:
+ - '!' # Exclude tags
+
+jobs:
+ build-and-publish:
+ runs-on: docker-builder
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set REPO_VARS
+ id: repo-url
+ run: |
+ echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
+ echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
+
+ - name: Login to OCI registry
+ run: |
+ echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
+
+ - name: Build and push Docker images
+ run: |
+ # Build Docker image with commit SHA
+ docker build -t $REPO_HOST/$REPO_PATH:${{ github.sha }} .
+ docker push $REPO_HOST/$REPO_PATH:${{ github.sha }}
+
+ # Build Docker image with nightly tag
+ docker tag $REPO_HOST/$REPO_PATH:${{ github.sha }} $REPO_HOST/$REPO_PATH:nightly
+ docker push $REPO_HOST/$REPO_PATH:nightly
+
+ # Remove local images to save storage
+ docker rmi $REPO_HOST/$REPO_PATH:${{ github.sha }}
+ docker rmi $REPO_HOST/$REPO_PATH:nightly
diff --git a/.forgejo/workflows/build-on-tag.yml b/.forgejo/workflows/build-on-tag.yml
new file mode 100644
index 00000000..888102b6
--- /dev/null
+++ b/.forgejo/workflows/build-on-tag.yml
@@ -0,0 +1,37 @@
+name: Build and Publish Docker Image on Tag
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ build-and-publish:
+ runs-on: docker-builder
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set REPO_VARS
+ id: repo-url
+ run: |
+ echo "REPO_HOST=$(echo "${{ github.server_url }}" | sed 's~http[s]*://~~g')" >> $GITHUB_ENV
+ echo "REPO_PATH=${{ github.repository }}" >> $GITHUB_ENV
+
+ - name: Login to OCI registry
+ run: |
+ echo "${{ secrets.OCI_TOKEN }}" | docker login $REPO_HOST -u "${{ secrets.OCI_USER }}" --password-stdin
+
+ - name: Build and push Docker image
+ run: |
+ TAG=${{ github.ref_name }} # Get the tag name from the context
+ # Build and push multi-platform Docker images
+ docker build -t $REPO_HOST/$REPO_PATH:$TAG --push .
+ # Tag and push latest
+ docker tag $REPO_HOST/$REPO_PATH:$TAG $REPO_HOST/$REPO_PATH:latest
+ docker push $REPO_HOST/$REPO_PATH:latest
+
+ # Remove the local image to save storage
+ docker rmi $REPO_HOST/$REPO_PATH:$TAG
+ docker rmi $REPO_HOST/$REPO_PATH:latest
\ No newline at end of file
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
index 8c9f6621..aab991d5 100644
--- a/.github/FUNDING.yml
+++ b/.github/FUNDING.yml
@@ -1 +1,10 @@
-custom: https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/
+github: canewsin
+patreon: # Replace with a single Patreon username e.g., user1
+open_collective: # Replace with a single Open Collective username e.g., user1
+ko_fi: canewsin
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: canewsin
+issuehunt: # Replace with a single IssueHunt username e.g., user1
+otechie: # Replace with a single Otechie username e.g., user1
+custom: ['https://paypal.me/PramUkesh', 'https://zerolink.ml/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/']
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000..27b5c924
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,72 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ py3-latest ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ py3-latest ]
+ schedule:
+ - cron: '32 19 * * 2'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'javascript', 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 6eaf3c6b..2bdcaf95 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -4,49 +4,48 @@ on: [push, pull_request]
jobs:
test:
-
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
strategy:
max-parallel: 16
matrix:
- python-version: [3.6, 3.7, 3.8, 3.9]
+ python-version: ["3.7", "3.8", "3.9"]
steps:
- - name: Checkout ZeroNet
- uses: actions/checkout@v2
- with:
- submodules: 'true'
+ - name: Checkout ZeroNet
+ uses: actions/checkout@v2
+ with:
+ submodules: "true"
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
- with:
- python-version: ${{ matrix.python-version }}
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
- - name: Prepare for installation
- run: |
- python3 -m pip install setuptools
- python3 -m pip install --upgrade pip wheel
- python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
+ - name: Prepare for installation
+ run: |
+ python3 -m pip install setuptools
+ python3 -m pip install --upgrade pip wheel
+ python3 -m pip install --upgrade codecov coveralls flake8 mock pytest==4.6.3 pytest-cov selenium
- - name: Install
- run: |
- python3 -m pip install --upgrade -r requirements.txt
- python3 -m pip list
+ - name: Install
+ run: |
+ python3 -m pip install --upgrade -r requirements.txt
+ python3 -m pip list
- - name: Prepare for tests
- run: |
- openssl version -a
- echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
+ - name: Prepare for tests
+ run: |
+ openssl version -a
+ echo 0 | sudo tee /proc/sys/net/ipv6/conf/all/disable_ipv6
- - name: Test
- run: |
- catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
- export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
- export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
- export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
- export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
- export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
- export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
- find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
- flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
+ - name: Test
+ run: |
+ catchsegv python3 -m pytest src/Test --cov=src --cov-config src/Test/coverage.ini
+ export ZERONET_LOG_DIR="log/CryptMessage"; catchsegv python3 -m pytest -x plugins/CryptMessage/Test
+ export ZERONET_LOG_DIR="log/Bigfile"; catchsegv python3 -m pytest -x plugins/Bigfile/Test
+ export ZERONET_LOG_DIR="log/AnnounceLocal"; catchsegv python3 -m pytest -x plugins/AnnounceLocal/Test
+ export ZERONET_LOG_DIR="log/OptionalManager"; catchsegv python3 -m pytest -x plugins/OptionalManager/Test
+ export ZERONET_LOG_DIR="log/Multiuser"; mv plugins/disabled-Multiuser plugins/Multiuser && catchsegv python -m pytest -x plugins/Multiuser/Test
+ export ZERONET_LOG_DIR="log/Bootstrapper"; mv plugins/disabled-Bootstrapper plugins/Bootstrapper && catchsegv python -m pytest -x plugins/Bootstrapper/Test
+ find src -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ find plugins -name "*.json" | xargs -n 1 python3 -c "import json, sys; print(sys.argv[1], end=' '); json.load(open(sys.argv[1])); print('[OK]')"
+ flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics --exclude=src/lib/pyaes/
diff --git a/.gitignore b/.gitignore
index 38dd3a34..636cd115 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,6 +7,7 @@ __pycache__/
# Hidden files
.*
+!/.forgejo
!/.github
!/.gitignore
!/.travis.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b49b9ef6..6974d18a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,85 @@
-### ZeroNet 0.7.2 (2020-09-?) Rev4206?
+### ZeroNet 0.9.0 (2023-07-12) Rev4630
+ - Fix RDos Issue in Plugins https://github.com/ZeroNetX/ZeroNet-Plugins/pull/9
+ - Add trackers to Config.py for failsafety incase missing trackers.txt
+ - Added Proxy links
+ - Fix pysha3 dep installation issue
+ - FileRequest -> Remove Unnecessary check, Fix error wording
+ - Fix Response when site is missing for `actionAs`
+### ZeroNet 0.8.5 (2023-02-12) Rev4625
+ - Fix(https://github.com/ZeroNetX/ZeroNet/pull/202) for SSL cert gen failed on Windows.
+ - default theme-class for missing value in `users.json`.
+ - Fetch Stats Plugin changes.
+
+### ZeroNet 0.8.4 (2022-12-12) Rev4620
+ - Increase Minimum Site size to 25MB.
+
+### ZeroNet 0.8.3 (2022-12-11) Rev4611
+ - main.py -> Fix accessing unassigned varible
+ - ContentManager -> Support for multiSig
+ - SiteStrorage.py -> Fix accessing unassigned varible
+ - ContentManager.py Improve Logging of Valid Signers
+
+### ZeroNet 0.8.2 (2022-11-01) Rev4610
+ - Fix Startup Error when plugins dir missing
+ - Move trackers to seperate file & Add more trackers
+ - Config:: Skip loading missing tracker files
+ - Added documentation for getRandomPort fn
+
+### ZeroNet 0.8.1 (2022-10-01) Rev4600
+ - fix readdress loop (cherry-pick previously added commit from conservancy)
+ - Remove Patreon badge
+ - Update README-ru.md (#177)
+ - Include inner_path of failed request for signing in error msg and response
+ - Don't Fail Silently When Cert is Not Selected
+ - Console Log Updates, Specify min supported ZeroNet version for Rust version Protocol Compatibility
+ - Update FUNDING.yml
+
+### ZeroNet 0.8.0 (2022-05-27) Rev4591
+ - Revert File Open to catch File Access Errors.
+
+### ZeroNet 0.7.9-patch (2022-05-26) Rev4586
+ - Use xescape(s) from zeronet-conservancy
+ - actionUpdate response Optimisation
+ - Fetch Plugins Repo Updates
+ - Fix Unhandled File Access Errors
+ - Create codeql-analysis.yml
+
+### ZeroNet 0.7.9 (2022-05-26) Rev4585
+ - Rust Version Compatibility for update Protocol msg
+ - Removed Non Working Trakers.
+ - Dynamically Load Trackers from Dashboard Site.
+ - Tracker Supply Improvements.
+ - Fix Repo Url for Bug Report
+ - First Party Tracker Update Service using Dashboard Site.
+ - remove old v2 onion service [#158](https://github.com/ZeroNetX/ZeroNet/pull/158)
+
+### ZeroNet 0.7.8 (2022-03-02) Rev4580
+ - Update Plugins with some bug fixes and Improvements
+
+### ZeroNet 0.7.6 (2022-01-12) Rev4565
+ - Sync Plugin Updates
+ - Clean up tor v3 patch [#115](https://github.com/ZeroNetX/ZeroNet/pull/115)
+ - Add More Default Plugins to Repo
+ - Doubled Site Publish Limits
+ - Update ZeroNet Repo Urls [#103](https://github.com/ZeroNetX/ZeroNet/pull/103)
+ - UI/UX: Increases Size of Notifications Close Button [#106](https://github.com/ZeroNetX/ZeroNet/pull/106)
+ - Moved Plugins to Seperate Repo
+ - Added `access_key` variable in Config, this used to access restrited plugins when multiuser plugin is enabled. When MultiUserPlugin is enabled we cannot access some pages like /Stats, this key will remove such restriction with access key.
+ - Added `last_connection_id_current_version` to ConnectionServer, helpful to estimate no of connection from current client version.
+ - Added current version: connections to /Stats page. see the previous point.
+
+### ZeroNet 0.7.5 (2021-11-28) Rev4560
+ - Add more default trackers
+ - Change default homepage address to `1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`
+ - Change default update site address to `1Update8crprmciJHwp2WXqkx2c4iYp18`
+
+### ZeroNet 0.7.3 (2021-11-28) Rev4555
+ - Fix xrange is undefined error
+ - Fix Incorrect viewport on mobile while loading
+ - Tor-V3 Patch by anonymoose
+
### ZeroNet 0.7.1 (2019-07-01) Rev4206
### Added
diff --git a/dockerfiles/zeronet-Dockerfile b/Dockerfile.arm64v8
similarity index 55%
rename from dockerfiles/zeronet-Dockerfile
rename to Dockerfile.arm64v8
index 92c67c84..d27b7620 100644
--- a/dockerfiles/zeronet-Dockerfile
+++ b/Dockerfile.arm64v8
@@ -1,36 +1,34 @@
-# Base settings
+FROM alpine:3.12
+
+#Base settings
ENV HOME /root
-# Install packages
-
-COPY install-dep-packages.sh /root/install-dep-packages.sh
-
-RUN /root/install-dep-packages.sh install
-
COPY requirements.txt /root/requirements.txt
-RUN pip3 install -r /root/requirements.txt \
- && /root/install-dep-packages.sh remove-makedeps \
+#Install ZeroNet
+RUN apk --update --no-cache --no-progress add python3 python3-dev gcc libffi-dev musl-dev make tor openssl \
+ && pip3 install -r /root/requirements.txt \
+ && apk del python3-dev gcc libffi-dev musl-dev make \
&& echo "ControlPort 9051" >> /etc/tor/torrc \
&& echo "CookieAuthentication 1" >> /etc/tor/torrc
-
+
RUN python3 -V \
&& python3 -m pip list \
&& tor --version \
&& openssl version
-# Add Zeronet source
-
+#Add Zeronet source
COPY . /root
VOLUME /root/data
-# Control if Tor proxy is started
+#Control if Tor proxy is started
ENV ENABLE_TOR false
WORKDIR /root
-# Set upstart command
+#Set upstart command
CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
-# Expose ports
+#Expose ports
EXPOSE 43110 26552
+
diff --git a/README-ru.md b/README-ru.md
index 1d0bafc1..7d557727 100644
--- a/README-ru.md
+++ b/README-ru.md
@@ -3,206 +3,131 @@
[简体中文](./README-zh-cn.md)
[English](./README.md)
-Децентрализованные вебсайты использующие Bitcoin криптографию и BitTorrent сеть - https://zeronet.dev
-
+Децентрализованные вебсайты, использующие криптографию Bitcoin и протокол BitTorrent — https://zeronet.dev ([Зеркало в ZeroNet](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/)). В отличии от Bitcoin, ZeroNet'у не требуется блокчейн для работы, однако он использует ту же криптографию, чтобы обеспечить сохранность и проверку данных.
## Зачем?
-* Мы верим в открытую, свободную, и не отцензуренную сеть и коммуникацию.
-* Нет единой точки отказа: Сайт онлайн пока по крайней мере 1 пир обслуживает его.
-* Никаких затрат на хостинг: Сайты обслуживаются посетителями.
-* Невозможно отключить: Он нигде, потому что он везде.
-* Быстр и работает оффлайн: Вы можете получить доступ к сайту, даже если Интернет недоступен.
-
+- Мы верим в открытую, свободную, и неподдающуюся цензуре сеть и связь.
+- Нет единой точки отказа: Сайт остаётся онлайн, пока его обслуживает хотя бы 1 пир.
+- Нет затрат на хостинг: Сайты обслуживаются посетителями.
+- Невозможно отключить: Он нигде, потому что он везде.
+- Скорость и возможность работать без Интернета: Вы сможете получить доступ к сайту, потому что его копия хранится на вашем компьютере и у ваших пиров.
## Особенности
- * Обновляемые в реальном времени сайты
- * Поддержка Namecoin .bit доменов
- * Лёгок в установке: распаковал & запустил
- * Клонирование вебсайтов в один клик
- * Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
- based authorization: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
- * Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
- * Анонимность: Полная поддержка сети Tor с помощью скрытых служб .onion вместо адресов IPv4
- * TLS зашифрованные связи
- * Автоматическое открытие uPnP порта
- * Плагин для поддержки многопользовательской (openproxy)
- * Работает с любыми браузерами и операционными системами
+- Обновление сайтов в реальном времени
+- Поддержка доменов `.bit` ([Namecoin](https://www.namecoin.org))
+- Легкая установка: просто распакуйте и запустите
+- Клонирование сайтов "в один клик"
+- Беспарольная [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
+ авторизация: Ваша учетная запись защищена той же криптографией, что и ваш Bitcoin-кошелек
+- Встроенный SQL-сервер с синхронизацией данных P2P: Позволяет упростить разработку сайта и ускорить загрузку страницы
+- Анонимность: Полная поддержка сети Tor, используя скрытые службы `.onion` вместо адресов IPv4
+- Зашифрованное TLS подключение
+- Автоматическое открытие UPnP–порта
+- Плагин для поддержки нескольких пользователей (openproxy)
+- Работа с любыми браузерами и операционными системами
+
+## Текущие ограничения
+
+- Файловые транзакции не сжаты
+- Нет приватных сайтов
## Как это работает?
-* После запуска `zeronet.py` вы сможете посетить зайты (zeronet сайты) используя адрес
- `http://127.0.0.1:43110/{zeronet_address}`
-(например. `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
-* Когда вы посещаете новый сайт zeronet, он пытается найти пиров с помощью BitTorrent
- чтобы загрузить файлы сайтов (html, css, js ...) из них.
-* Каждый посещенный зайт также обслуживается вами. (Т.е хранится у вас на компьютере)
-* Каждый сайт содержит файл `content.json`, который содержит все остальные файлы в хэше sha512
- и подпись, созданную с использованием частного ключа сайта.
-* Если владелец сайта (у которого есть закрытый ключ для адреса сайта) изменяет сайт, то он/она
+- После запуска `zeronet.py` вы сможете посещать сайты в ZeroNet, используя адрес
+ `http://127.0.0.1:43110/{zeronet_адрес}`
+ (Например: `http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d`).
+- Когда вы посещаете новый сайт в ZeroNet, он пытается найти пиров с помощью протокола BitTorrent,
+ чтобы скачать у них файлы сайта (HTML, CSS, JS и т.д.).
+- После посещения сайта вы тоже становитесь его пиром.
+- Каждый сайт содержит файл `content.json`, который содержит SHA512 хеши всех остальные файлы
+ и подпись, созданную с помощью закрытого ключа сайта.
+- Если владелец сайта (тот, кто владеет закрытым ключом для адреса сайта) изменяет сайт, он
подписывает новый `content.json` и публикует его для пиров. После этого пиры проверяют целостность `content.json`
- (используя подпись), они загружают измененные файлы и публикуют новый контент для других пиров.
-
-#### [Слайд-шоу о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
-#### [Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
-
-#### [Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+ (используя подпись), скачвают изменённые файлы и распространяют новый контент для других пиров.
+[Презентация о криптографии ZeroNet, обновлениях сайтов, многопользовательских сайтах »](https://docs.google.com/presentation/d/1_2qK1IuOKJ51pgBvllZ9Yu7Au2l551t3XBgyTSvilew/pub?start=false&loop=false&delayms=3000)
+[Часто задаваемые вопросы »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/)
+[Документация разработчика ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Скриншоты


+[Больше скриншотов в документации ZeroNet »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
-#### [Больше скриншотов в ZeroNet документации »](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/using_zeronet/sample_sites/)
+## Как присоединиться?
+### Windows
-## Как вступить
+- Скачайте и распакуйте архив [ZeroNet-win.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip) (26МБ)
+- Запустите `ZeroNet.exe`
-* Скачайте ZeroBundle пакет:
- * [Microsoft Windows](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-win.zip)
- * [Apple macOS](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip)
- * [Linux 64-bit](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip)
- * [Linux 32-bit](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip)
-* Распакуйте где угодно
-* Запустите `ZeroNet.exe` (win), `ZeroNet(.app)` (osx), `ZeroNet.sh` (linux)
+### macOS
-### Linux терминал
+- Скачайте и распакуйте архив [ZeroNet-mac.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-mac.zip) (14МБ)
+- Запустите `ZeroNet.app`
-* `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip`
-* `unzip ZeroNet-linux.zip`
-* `cd ZeroNet-linux`
-* Запустите с помощью `./ZeroNet.sh`
+### Linux (64 бит)
-Он загружает последнюю версию ZeroNet, затем запускает её автоматически.
+- Скачайте и распакуйте архив [ZeroNet-linux.zip](https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-linux.zip) (14МБ)
+- Запустите `./ZeroNet.sh`
-#### Ручная установка для Debian Linux
+> **Note**
+> Запустите таким образом: `./ZeroNet.sh --ui_ip '*' --ui_restrict ваш_ip_адрес`, чтобы разрешить удалённое подключение к веб–интерфейсу.
-* `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
-* `unzip ZeroNet-src.zip`
-* `cd ZeroNet`
-* `sudo apt-get update`
-* `sudo apt-get install python3-pip`
-* `sudo python3 -m pip install -r requirements.txt`
-* Запустите с помощью `python3 zeronet.py`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
+### Docker
-### [Arch Linux](https://www.archlinux.org)
+Официальный образ находится здесь: https://hub.docker.com/r/canewsin/zeronet/
-* `git clone https://aur.archlinux.org/zeronet.git`
-* `cd zeronet`
-* `makepkg -srci`
-* `systemctl start zeronet`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
+### Android (arm, arm64, x86)
-Смотрите [ArchWiki](https://wiki.archlinux.org)'s [ZeroNet
-article](https://wiki.archlinux.org/index.php/ZeroNet) для дальнейшей помощи.
+- Для работы требуется Android как минимум версии 5.0 Lollipop
+- [
](https://play.google.com/store/apps/details?id=in.canews.zeronetmobile)
+- Скачать APK: https://github.com/canewsin/zeronet_mobile/releases
-### [Gentoo Linux](https://www.gentoo.org)
+### Android (arm, arm64, x86) Облегчённый клиент только для просмотра (1МБ)
-* [`layman -a raiagent`](https://github.com/leycec/raiagent)
-* `echo '>=net-vpn/zeronet-0.5.4' >> /etc/portage/package.accept_keywords`
-* *(Опционально)* Включить поддержку Tor: `echo 'net-vpn/zeronet tor' >>
- /etc/portage/package.use`
-* `emerge zeronet`
-* `rc-service zeronet start`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
+- Для работы требуется Android как минимум версии 4.1 Jelly Bean
+- [
](https://play.google.com/store/apps/details?id=dev.zeronetx.app.lite)
-Смотрите `/usr/share/doc/zeronet-*/README.gentoo.bz2` для дальнейшей помощи.
+### Установка из исходного кода
-### [FreeBSD](https://www.freebsd.org/)
-
-* `pkg install zeronet` or `cd /usr/ports/security/zeronet/ && make install clean`
-* `sysrc zeronet_enable="YES"`
-* `service zeronet start`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
-
-### [Vagrant](https://www.vagrantup.com/)
-
-* `vagrant up`
-* Подключитесь к VM с помощью `vagrant ssh`
-* `cd /vagrant`
-* Запустите `python3 zeronet.py --ui_ip 0.0.0.0`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
-
-### [Docker](https://www.docker.com/)
-* `docker run -d -v :/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 canewsin/zeronet`
-* Это изображение Docker включает в себя прокси-сервер Tor, который по умолчанию отключён.
- Остерегайтесь что некоторые хостинг-провайдеры могут не позволить вам запускать Tor на своих серверах.
- Если вы хотите включить его,установите переменную среды `ENABLE_TOR` в` true` (по умолчанию: `false`) Например:
-
- `docker run -d -e "ENABLE_TOR=true" -v :/root/data -p 15441:15441 -p 127.0.0.1:43110:43110 canewsin/zeronet`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
-
-### [Virtualenv](https://virtualenv.readthedocs.org/en/latest/)
-
-* `virtualenv env`
-* `source env/bin/activate`
-* `pip install msgpack gevent`
-* `python3 zeronet.py`
-* Откройте http://127.0.0.1:43110/ в вашем браузере.
-
-## Текущие ограничения
-
-* Файловые транзакции не сжаты
-* Нет приватных сайтов
-
-
-## Как я могу создать сайт в Zeronet?
-
-Завершите работу zeronet, если он запущен
-
-```bash
-$ zeronet.py siteCreate
-...
-- Site private key (Приватный ключ сайта): 23DKQpzxhbVBrAtvLEc2uvk7DZweh4qL3fn3jpM3LgHDczMK2TtYUq
-- Site address (Адрес сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
-...
-- Site created! (Сайт создан)
-$ zeronet.py
-...
+```sh
+wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip
+unzip ZeroNet-src.zip
+cd ZeroNet
+sudo apt-get update
+sudo apt-get install python3-pip
+sudo python3 -m pip install -r requirements.txt
```
+- Запустите `python3 zeronet.py`
-Поздравляем, вы закончили! Теперь каждый может получить доступ к вашему зайту используя
-`http://localhost:43110/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2`
+Откройте приветственную страницу ZeroHello в вашем браузере по ссылке http://127.0.0.1:43110/
-Следующие шаги: [ZeroNet Developer Documentation](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
+## Как мне создать сайт в ZeroNet?
+- Кликните на **⋮** > **"Create new, empty site"** в меню на сайте [ZeroHello](http://127.0.0.1:43110/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d).
+- Вы будете **перенаправлены** на совершенно новый сайт, который может быть изменён только вами!
+- Вы можете найти и изменить контент вашего сайта в каталоге **data/[адрес_вашего_сайта]**
+- После изменений откройте ваш сайт, переключите влево кнопку "0" в правом верхнем углу, затем нажмите кнопки **sign** и **publish** внизу
-## Как я могу модифицировать Zeronet сайт?
-
-* Измените файлы расположенные в data/13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 директории.
- Когда закончите с изменением:
-
-```bash
-$ zeronet.py siteSign 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
-- Signing site (Подпись сайта): 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2...
-Private key (Приватный ключ) (input hidden):
-```
-
-* Введите секретный ключ, который вы получили при создании сайта, потом:
-
-```bash
-$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
-...
-Site:13DNDk..bhC2 Publishing to 3/10 peers...
-Site:13DNDk..bhC2 Successfuly published to 3 peers
-- Serving files....
-```
-
-* Вот и всё! Вы успешно подписали и опубликовали свои изменения.
-
+Следующие шаги: [Документация разработчика ZeroNet](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/site_development/getting_started/)
## Поддержите проект
-- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Preferred)
+
+- Bitcoin: 1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX (Рекомендуем)
- LiberaPay: https://liberapay.com/PramUkesh
- Paypal: https://paypal.me/PramUkesh
-- Others: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
-
+- Другие способы: [Donate](!https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/#help-to-keep-zeronet-development-alive)
#### Спасибо!
-* Больше информации, помощь, журнал изменений, zeronet сайты: https://www.reddit.com/r/zeronetx/
-* Приходите, пообщайтесь с нами: [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или на [gitter](https://gitter.im/canewsin/ZeroNet)
-* Email: canews.in@gmail.com
+- Здесь вы можете получить больше информации, помощь, прочитать список изменений и исследовать ZeroNet сайты: https://www.reddit.com/r/zeronetx/
+- Общение происходит на канале [#zeronet @ FreeNode](https://kiwiirc.com/client/irc.freenode.net/zeronet) или в [Gitter](https://gitter.im/canewsin/ZeroNet)
+- Электронная почта: canews.in@gmail.com
diff --git a/README.md b/README.md
index e45d5cad..70b79adc 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,4 @@
# ZeroNet [](https://github.com/ZeroNetX/ZeroNet/actions/workflows/tests.yml) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/faq/) [](https://docs.zeronet.dev/1DeveLopDZL1cHfKi8UXHh2UBEhzH6HhMp/help_zeronet/donate/) [](https://hub.docker.com/r/canewsin/zeronet)
-
Decentralized websites using Bitcoin crypto and the BitTorrent network - https://zeronet.dev / [ZeroNet Site](http://127.0.0.1:43110/1ZeroNetyV5mKY9JF1gsm82TuBXHpfdLX/), Unlike Bitcoin, ZeroNet Doesn't need a blockchain to run, But uses cryptography used by BTC, to ensure data integrity and validation.
@@ -100,6 +99,24 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
#### Docker
There is an official image, built from source at: https://hub.docker.com/r/canewsin/zeronet/
+### Online Proxies
+Proxies are like seed boxes for sites(i.e ZNX runs on a cloud vps), you can try zeronet experience from proxies. Add your proxy below if you have one.
+
+#### Official ZNX Proxy :
+
+https://proxy.zeronet.dev/
+
+https://zeronet.dev/
+
+#### From Community
+
+https://0net-preview.com/
+
+https://portal.ngnoid.tv/
+
+https://zeronet.ipfsscan.io/
+
+
### Install from source
- `wget https://github.com/ZeroNetX/ZeroNet/releases/latest/download/ZeroNet-src.zip`
diff --git a/build-docker-images.sh b/build-docker-images.sh
deleted file mode 100755
index 8eff34f4..00000000
--- a/build-docker-images.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-set -e
-
-arg_push=
-
-case "$1" in
- --push) arg_push=y ; shift ;;
-esac
-
-default_suffix=alpine
-prefix="${1:-local/}"
-
-for dokerfile in dockerfiles/Dockerfile.* ; do
- suffix="`echo "$dokerfile" | sed 's/.*\/Dockerfile\.//'`"
- image_name="${prefix}zeronet:$suffix"
-
- latest=""
- t_latest=""
- if [ "$suffix" = "$default_suffix" ] ; then
- latest="${prefix}zeronet:latest"
- t_latest="-t ${latest}"
- fi
-
- echo "DOCKER BUILD $image_name"
- docker build -f "$dokerfile" -t "$image_name" $t_latest .
- if [ -n "$arg_push" ] ; then
- docker push "$image_name"
- if [ -n "$latest" ] ; then
- docker push "$latest"
- fi
- fi
-done
diff --git a/dockerfiles/Dockerfile.alpine b/dockerfiles/Dockerfile.alpine
deleted file mode 120000
index 0f848cc8..00000000
--- a/dockerfiles/Dockerfile.alpine
+++ /dev/null
@@ -1 +0,0 @@
-Dockerfile.alpine3.13
\ No newline at end of file
diff --git a/dockerfiles/Dockerfile.alpine3.13 b/dockerfiles/Dockerfile.alpine3.13
deleted file mode 100644
index 79f15b9b..00000000
--- a/dockerfiles/Dockerfile.alpine3.13
+++ /dev/null
@@ -1,44 +0,0 @@
-# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
-# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
-
-FROM alpine:3.13
-
-# Base settings
-ENV HOME /root
-
-# Install packages
-
-# Install packages
-
-COPY install-dep-packages.sh /root/install-dep-packages.sh
-
-RUN /root/install-dep-packages.sh install
-
-COPY requirements.txt /root/requirements.txt
-
-RUN pip3 install -r /root/requirements.txt \
- && /root/install-dep-packages.sh remove-makedeps \
- && echo "ControlPort 9051" >> /etc/tor/torrc \
- && echo "CookieAuthentication 1" >> /etc/tor/torrc
-
-RUN python3 -V \
- && python3 -m pip list \
- && tor --version \
- && openssl version
-
-# Add Zeronet source
-
-COPY . /root
-VOLUME /root/data
-
-# Control if Tor proxy is started
-ENV ENABLE_TOR false
-
-WORKDIR /root
-
-# Set upstart command
-CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
-
-# Expose ports
-EXPOSE 43110 26552
-
diff --git a/dockerfiles/Dockerfile.ubuntu b/dockerfiles/Dockerfile.ubuntu
deleted file mode 120000
index 29adf7ef..00000000
--- a/dockerfiles/Dockerfile.ubuntu
+++ /dev/null
@@ -1 +0,0 @@
-Dockerfile.ubuntu20.04
\ No newline at end of file
diff --git a/dockerfiles/Dockerfile.ubuntu20.04 b/dockerfiles/Dockerfile.ubuntu20.04
deleted file mode 100644
index bc32cf86..00000000
--- a/dockerfiles/Dockerfile.ubuntu20.04
+++ /dev/null
@@ -1,44 +0,0 @@
-# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
-# SEE zeronet-Dockerfile FOR THE SOURCE FILE.
-
-FROM ubuntu:20.04
-
-# Base settings
-ENV HOME /root
-
-# Install packages
-
-# Install packages
-
-COPY install-dep-packages.sh /root/install-dep-packages.sh
-
-RUN /root/install-dep-packages.sh install
-
-COPY requirements.txt /root/requirements.txt
-
-RUN pip3 install -r /root/requirements.txt \
- && /root/install-dep-packages.sh remove-makedeps \
- && echo "ControlPort 9051" >> /etc/tor/torrc \
- && echo "CookieAuthentication 1" >> /etc/tor/torrc
-
-RUN python3 -V \
- && python3 -m pip list \
- && tor --version \
- && openssl version
-
-# Add Zeronet source
-
-COPY . /root
-VOLUME /root/data
-
-# Control if Tor proxy is started
-ENV ENABLE_TOR false
-
-WORKDIR /root
-
-# Set upstart command
-CMD (! ${ENABLE_TOR} || tor&) && python3 zeronet.py --ui_ip 0.0.0.0 --fileserver_port 26552
-
-# Expose ports
-EXPOSE 43110 26552
-
diff --git a/dockerfiles/gen-dockerfiles.sh b/dockerfiles/gen-dockerfiles.sh
deleted file mode 100755
index 75a6edf6..00000000
--- a/dockerfiles/gen-dockerfiles.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-set -e
-
-die() {
- echo "$@" > /dev/stderr
- exit 1
-}
-
-for os in alpine:3.13 ubuntu:20.04 ; do
- prefix="`echo "$os" | sed -e 's/://'`"
- short_prefix="`echo "$os" | sed -e 's/:.*//'`"
-
- zeronet="zeronet-Dockerfile"
-
- dockerfile="Dockerfile.$prefix"
- dockerfile_short="Dockerfile.$short_prefix"
-
- echo "GEN $dockerfile"
-
- if ! test -f "$zeronet" ; then
- die "No such file: $zeronet"
- fi
-
- echo "\
-# THIS FILE IS AUTOGENERATED BY gen-dockerfiles.sh.
-# SEE $zeronet FOR THE SOURCE FILE.
-
-FROM $os
-
-`cat "$zeronet"`
-" > "$dockerfile.tmp" && mv "$dockerfile.tmp" "$dockerfile" && ln -s -f "$dockerfile" "$dockerfile_short"
-done
-
diff --git a/install-dep-packages.sh b/install-dep-packages.sh
deleted file mode 100755
index 655a33aa..00000000
--- a/install-dep-packages.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/sh
-set -e
-
-do_alpine() {
- local deps="python3 py3-pip openssl tor"
- local makedeps="python3-dev gcc g++ libffi-dev musl-dev make automake autoconf libtool"
-
- case "$1" in
- install)
- apk --update --no-cache --no-progress add $deps $makedeps
- ;;
- remove-makedeps)
- apk del $makedeps
- ;;
- esac
-}
-
-do_ubuntu() {
- local deps="python3 python3-pip openssl tor"
- local makedeps="python3-dev gcc g++ libffi-dev make automake autoconf libtool"
-
- case "$1" in
- install)
- apt-get update && \
- apt-get install --no-install-recommends -y $deps $makedeps && \
- rm -rf /var/lib/apt/lists/*
- ;;
- remove-makedeps)
- apt-get remove -y $makedeps
- ;;
- esac
-}
-
-if test -f /etc/os-release ; then
- . /etc/os-release
-elif test -f /usr/lib/os-release ; then
- . /usr/lib/os-release
-else
- echo "No such file: /etc/os-release" > /dev/stderr
- exit 1
-fi
-
-case "$ID" in
- ubuntu) do_ubuntu "$@" ;;
- alpine) do_alpine "$@" ;;
- *)
- echo "Unsupported OS ID: $ID" > /dev/stderr
- exit 1
-esac
diff --git a/plugins b/plugins
new file mode 160000
index 00000000..689d9309
--- /dev/null
+++ b/plugins
@@ -0,0 +1 @@
+Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
diff --git a/requirements.txt b/requirements.txt
index b3df57ea..538a6dfc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@ greenlet==0.4.16; python_version <= "3.6"
gevent>=20.9.0; python_version >= "3.7"
msgpack>=0.4.4
base58
-merkletools
+merkletools @ git+https://github.com/ZeroNetX/pymerkletools.git@dev
rsa
PySocks>=1.6.8
pyasn1
diff --git a/src/Config.py b/src/Config.py
index edc63d26..a9208d55 100644
--- a/src/Config.py
+++ b/src/Config.py
@@ -13,8 +13,8 @@ import time
class Config(object):
def __init__(self, argv):
- self.version = "0.7.6"
- self.rev = 4565
+ self.version = "0.9.0"
+ self.rev = 4630
self.argv = argv
self.action = None
self.test_parser = None
@@ -82,45 +82,12 @@ class Config(object):
from Crypt import CryptHash
access_key_default = CryptHash.random(24, "base64") # Used to allow restrited plugins when multiuser plugin is enabled
trackers = [
- # by zeroseed at http://127.0.0.1:43110/19HKdTAeBh5nRiKn791czY7TwRB1QNrf1Q/?:users/1HvNGwHKqhj3ZMEM53tz6jbdqe4LRpanEu:zn:dc17f896-bf3f-4962-bdd4-0a470040c9c5
- "zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441",
- "zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441",
- "zero://my562dxpjropcd5hy3nd5pemsc4aavbiptci5amwxzbelmzgkkuxpvid.onion:15441",
- "zero://pn4q2zzt2pw4nk7yidxvsxmydko7dfibuzxdswi6gu6ninjpofvqs2id.onion:15441",
- "zero://6i54dd5th73oelv636ivix6sjnwfgk2qsltnyvswagwphub375t3xcad.onion:15441",
- "zero://tl74auz4tyqv4bieeclmyoe4uwtoc2dj7fdqv4nc4gl5j2bwg2r26bqd.onion:15441",
- "zero://wlxav3szbrdhest4j7dib2vgbrd7uj7u7rnuzg22cxbih7yxyg2hsmid.onion:15441",
- "zero://zy7wttvjtsijt5uwmlar4yguvjc2gppzbdj4v6bujng6xwjmkdg7uvqd.onion:15441",
-
- # ZeroNet 0.7.2 defaults:
- "zero://boot3rdez4rzn36x.onion:15441",
"http://open.acgnxtracker.com:80/announce", # DE
"http://tracker.bt4g.com:2095/announce", # Cloudflare
- "zero://2602:ffc5::c5b2:5360:26312", # US/ATL
- "zero://145.239.95.38:15441",
- "zero://188.116.183.41:26552",
- "zero://145.239.95.38:15441",
- "zero://211.125.90.79:22234",
- "zero://216.189.144.82:26312",
- "zero://45.77.23.92:15555",
- "zero://51.15.54.182:21041",
+ "http://tracker.files.fm:6969/announce",
+ "http://t.publictracker.xyz:6969/announce",
"https://tracker.lilithraws.cf:443/announce",
- "udp://code2chicken.nl:6969/announce",
- "udp://abufinzio.monocul.us:6969/announce",
- "udp://tracker.0x.tf:6969/announce",
- "udp://tracker.zerobytes.xyz:1337/announce",
- "udp://vibe.sleepyinternetfun.xyz:1738/announce",
- "udp://www.torrent.eu.org:451/announce",
- "zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441",
- "zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441",
- "zero://gugt43coc5tkyrhrc3esf6t6aeycvcqzw7qafxrjpqbwt4ssz5czgzyd.onion:15441",
- "zero://hb6ozikfiaafeuqvgseiik4r46szbpjfu66l67wjinnyv6dtopuwhtqd.onion:15445",
- "zero://75pmmcbp4vvo2zndmjnrkandvbg6jyptygvvpwsf2zguj7urq7t4jzyd.onion:7777",
- "zero://dw4f4sckg2ultdj5qu7vtkf3jsfxsah3mz6pivwfd6nv3quji3vfvhyd.onion:6969",
- "zero://5vczpwawviukvd7grfhsfxp7a6huz77hlis4fstjkym5kmf4pu7i7myd.onion:15441",
- "zero://ow7in4ftwsix5klcbdfqvfqjvimqshbm2o75rhtpdnsderrcbx74wbad.onion:15441",
- "zero://agufghdtniyfwty3wk55drxxwj2zxgzzo7dbrtje73gmvcpxy4ngs4ad.onion:15441",
- "zero://qn65si4gtcwdiliq7vzrwu62qrweoxb6tx2cchwslaervj6szuje66qd.onion:26117",
+ "https://tracker.babico.name.tr:443/announce",
]
# Platform specific
if sys.platform.startswith("win"):
@@ -284,31 +251,12 @@ class Config(object):
self.parser.add_argument('--access_key', help='Plugin access key default: Random key generated at startup', default=access_key_default, metavar='key')
self.parser.add_argument('--dist_type', help='Type of installed distribution', default='source')
- self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=10, type=int, metavar='limit')
+ self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=25, type=int, metavar='limit')
self.parser.add_argument('--file_size_limit', help='Maximum per file size limit in MB', default=10, type=int, metavar='limit')
- self.parser.add_argument('--connected_limit', help='Max number of connected peers per site. Soft limit.', default=10, type=int, metavar='connected_limit')
- self.parser.add_argument('--global_connected_limit', help='Max number of connections. Soft limit.', default=512, type=int, metavar='global_connected_limit')
+ self.parser.add_argument('--connected_limit', help='Max connected peer per site', default=8, type=int, metavar='connected_limit')
+ self.parser.add_argument('--global_connected_limit', help='Max connections', default=512, type=int, metavar='global_connected_limit')
self.parser.add_argument('--workers', help='Download workers per site', default=5, type=int, metavar='workers')
- self.parser.add_argument('--site_announce_interval_min', help='Site announce interval for the most active sites, in minutes.', default=4, type=int, metavar='site_announce_interval_min')
- self.parser.add_argument('--site_announce_interval_max', help='Site announce interval for inactive sites, in minutes.', default=30, type=int, metavar='site_announce_interval_max')
-
- self.parser.add_argument('--site_peer_check_interval_min', help='Connectable peers check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_peer_check_interval_min')
- self.parser.add_argument('--site_peer_check_interval_max', help='Connectable peers check interval for inactive sites, in minutes.', default=20, type=int, metavar='site_peer_check_interval_max')
-
- self.parser.add_argument('--site_update_check_interval_min', help='Site update check interval for the most active sites, in minutes.', default=5, type=int, metavar='site_update_check_interval_min')
- self.parser.add_argument('--site_update_check_interval_max', help='Site update check interval for inactive sites, in minutes.', default=45, type=int, metavar='site_update_check_interval_max')
-
- self.parser.add_argument('--site_connectable_peer_count_max', help='Search for as many connectable peers for the most active sites', default=10, type=int, metavar='site_connectable_peer_count_max')
- self.parser.add_argument('--site_connectable_peer_count_min', help='Search for as many connectable peers for inactive sites', default=2, type=int, metavar='site_connectable_peer_count_min')
-
- self.parser.add_argument('--send_back_lru_size', help='Size of the send back LRU cache', default=5000, type=int, metavar='send_back_lru_size')
- self.parser.add_argument('--send_back_limit', help='Send no more than so many files at once back to peer, when we discovered that the peer held older file versions', default=3, type=int, metavar='send_back_limit')
-
- self.parser.add_argument('--expose_no_ownership', help='By default, ZeroNet tries checking updates for own sites more frequently. This can be used by a third party for revealing the network addresses of a site owner. If this option is enabled, ZeroNet performs the checks in the same way for any sites.', type='bool', choices=[True, False], default=False)
-
- self.parser.add_argument('--simultaneous_connection_throttle_threshold', help='Throttle opening new connections when the number of outgoing connections in not fully established state exceeds the threshold.', default=15, type=int, metavar='simultaneous_connection_throttle_threshold')
-
self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip')
self.parser.add_argument('--fileserver_port', help='FileServer bind port (0: randomize)', default=0, type=int, metavar='port')
self.parser.add_argument('--fileserver_port_range', help='FileServer randomization range', default="10000-40000", metavar='port')
@@ -371,8 +319,7 @@ class Config(object):
def loadTrackersFile(self):
if not self.trackers_file:
- return None
-
+ self.trackers_file = ["trackers.txt", "{data_dir}/1HELLoE3sFD9569CLCbHEAVqvqV7U2Ri9d/trackers.txt"]
self.trackers = self.arguments.trackers[:]
for trackers_file in self.trackers_file:
@@ -384,6 +331,9 @@ class Config(object):
else: # Relative to zeronet.py
trackers_file_path = self.start_dir + "/" + trackers_file
+ if not os.path.exists(trackers_file_path):
+ continue
+
for line in open(trackers_file_path):
tracker = line.strip()
if "://" in tracker and tracker not in self.trackers:
diff --git a/src/Connection/Connection.py b/src/Connection/Connection.py
index 8916afbe..22bcf29c 100644
--- a/src/Connection/Connection.py
+++ b/src/Connection/Connection.py
@@ -17,13 +17,12 @@ from util import helper
class Connection(object):
__slots__ = (
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
- "handshake", "crypt", "connected", "connecting", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
+ "handshake", "crypt", "connected", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
)
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
- self.server = server
self.sock = sock
self.cert_pin = None
if "#" in ip:
@@ -43,6 +42,7 @@ class Connection(object):
self.is_private_ip = False
self.is_tracker_connection = is_tracker_connection
+ self.server = server
self.unpacker = None # Stream incoming socket messages here
self.unpacker_bytes = 0 # How many bytes the unpacker received
self.req_id = 0 # Last request id
@@ -50,7 +50,6 @@ class Connection(object):
self.crypt = None # Connection encryption method
self.sock_wrapped = False # Socket wrapped to encryption
- self.connecting = False
self.connected = False
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
self.closed = False
@@ -82,11 +81,11 @@ class Connection(object):
def setIp(self, ip):
self.ip = ip
- self.ip_type = self.server.getIpType(ip)
+ self.ip_type = helper.getIpType(ip)
self.updateName()
def createSocket(self):
- if self.server.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
+ if helper.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
# Create IPv6 connection as IPv4 when using proxy
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
@@ -119,28 +118,13 @@ class Connection(object):
# Open connection to peer and wait for handshake
def connect(self):
- self.connecting = True
- try:
- return self._connect()
- except Exception as err:
- self.connecting = False
- self.connected = False
- raise
-
- def _connect(self):
- self.updateOnlineStatus(outgoing_activity=True)
-
- if not self.event_connected or self.event_connected.ready():
- self.event_connected = gevent.event.AsyncResult()
-
self.type = "out"
-
- unreachability = self.server.getIpUnreachability(self.ip)
- if unreachability:
- raise Exception(unreachability)
-
if self.ip_type == "onion":
+ if not self.server.tor_manager or not self.server.tor_manager.enabled:
+ raise Exception("Can't connect to onion addresses, no Tor controller present")
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
+ elif config.tor == "always" and helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
+ raise Exception("Can't connect to local IPs in Tor: always mode")
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
if config.trackers_proxy == "tor":
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
@@ -164,56 +148,37 @@ class Connection(object):
self.sock.connect(sock_address)
- if self.shouldEncrypt():
+ # Implicit SSL
+ should_encrypt = not self.ip_type == "onion" and self.ip not in self.server.broken_ssl_ips and self.ip not in config.ip_local
+ if self.cert_pin:
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", cert_pin=self.cert_pin)
+ self.sock.do_handshake()
+ self.crypt = "tls-rsa"
+ self.sock_wrapped = True
+ elif should_encrypt and "tls-rsa" in CryptConnection.manager.crypt_supported:
try:
- self.wrapSocket()
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa")
+ self.sock.do_handshake()
+ self.crypt = "tls-rsa"
+ self.sock_wrapped = True
except Exception as err:
- if self.sock:
- self.sock.close()
- self.sock = None
- if self.mustEncrypt():
- raise
- self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
- self.server.broken_ssl_ips[self.ip] = True
- return self.connect()
+ if not config.force_encryption:
+ self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
+ self.server.broken_ssl_ips[self.ip] = True
+ self.sock.close()
+ self.crypt = None
+ self.sock = self.createSocket()
+ self.sock.settimeout(30)
+ self.sock.connect(sock_address)
# Detect protocol
- event_connected = self.event_connected
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
- self.server.outgoing_pool.spawn(self.messageLoop)
+ event_connected = self.event_connected
+ gevent.spawn(self.messageLoop)
connect_res = event_connected.get() # Wait for handshake
- if self.sock:
- self.sock.settimeout(timeout_before)
+ self.sock.settimeout(timeout_before)
return connect_res
- def mustEncrypt(self):
- if self.cert_pin:
- return True
- if (not self.ip_type == "onion") and config.force_encryption:
- return True
- return False
-
- def shouldEncrypt(self):
- if self.mustEncrypt():
- return True
- return (
- (not self.ip_type == "onion")
- and
- (self.ip not in self.server.broken_ssl_ips)
- and
- (self.ip not in config.ip_local)
- and
- ("tls-rsa" in CryptConnection.manager.crypt_supported)
- )
-
- def wrapSocket(self, crypt="tls-rsa", do_handshake=True):
- server = (self.type == "in")
- sock = CryptConnection.manager.wrapSocket(self.sock, crypt, server=server, cert_pin=self.cert_pin)
- sock.do_handshake()
- self.crypt = crypt
- self.sock_wrapped = True
- self.sock = sock
-
# Handle incoming connection
def handleIncomingConnection(self, sock):
self.log("Incoming connection...")
@@ -227,7 +192,9 @@ class Connection(object):
first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
if first_byte == b"\x16":
self.log("Crypt in connection using implicit SSL")
- self.wrapSocket(do_handshake=False)
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, "tls-rsa", True)
+ self.sock_wrapped = True
+ self.crypt = "tls-rsa"
except Exception as err:
self.log("Socket peek error: %s" % Debug.formatException(err))
self.messageLoop()
@@ -246,7 +213,6 @@ class Connection(object):
self.protocol = "v2"
self.updateName()
self.connected = True
- self.connecting = False
buff_len = 0
req_len = 0
self.unpacker_bytes = 0
@@ -469,13 +435,13 @@ class Connection(object):
self.updateName()
self.event_connected.set(True) # Mark handshake as done
+ self.event_connected = None
self.handshake_time = time.time()
# Handle incoming message
def handleMessage(self, message):
cmd = message["cmd"]
- self.updateOnlineStatus(successful_activity=True)
self.last_message_time = time.time()
self.last_cmd_recv = cmd
if cmd == "response": # New style response
@@ -492,10 +458,12 @@ class Connection(object):
self.last_ping_delay = ping
# Server switched to crypt, lets do it also if not crypted already
if message.get("crypt") and not self.sock_wrapped:
- crypt = message["crypt"]
+ self.crypt = message["crypt"]
server = (self.type == "in")
- self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (crypt, server, ping))
- self.wrapSocket(crypt)
+ self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (self.crypt, server, ping))
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
+ self.sock.do_handshake()
+ self.sock_wrapped = True
if not self.sock_wrapped and self.cert_pin:
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
@@ -523,7 +491,8 @@ class Connection(object):
server = (self.type == "in")
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
try:
- self.wrapSocket(self.crypt)
+ self.sock = CryptConnection.manager.wrapSocket(self.sock, self.crypt, server, cert_pin=self.cert_pin)
+ self.sock_wrapped = True
except Exception as err:
if not config.force_encryption:
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
@@ -535,7 +504,6 @@ class Connection(object):
# Send data to connection
def send(self, message, streaming=False):
- self.updateOnlineStatus(outgoing_activity=True)
self.last_send_time = time.time()
if config.debug_socket:
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
@@ -575,11 +543,6 @@ class Connection(object):
message = None
with self.send_lock:
self.sock.sendall(data)
- # XXX: Should not be used here:
- # self.updateOnlineStatus(successful_activity=True)
- # Looks like self.sock.sendall() returns normally, instead of
- # raising an Exception (at least, some times).
- # So the only way of detecting the network activity is self.handleMessage()
except Exception as err:
self.close("Send error: %s (cmd: %s)" % (err, stat_key))
return False
@@ -591,7 +554,7 @@ class Connection(object):
buff = 64 * 1024
bytes_left = read_bytes
bytes_sent = 0
- while True and self.sock != None:
+ while True:
self.last_send_time = time.time()
data = file.read(min(bytes_left, buff))
bytes_sent += len(data)
@@ -621,8 +584,7 @@ class Connection(object):
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
if stream_to:
self.waiting_streams[self.req_id] = stream_to
- if not self.send(data): # Send request
- return False
+ self.send(data) # Send request
res = event.get() # Wait until event solves
return res
@@ -646,7 +608,6 @@ class Connection(object):
return False # Already closed
self.closed = True
self.connected = False
- self.connecting = False
if self.event_connected:
self.event_connected.set(False)
@@ -672,12 +633,3 @@ class Connection(object):
self.sock = None
self.unpacker = None
self.event_connected = None
- self.crypt = None
- self.sock_wrapped = False
-
- return True
-
- def updateOnlineStatus(self, outgoing_activity=False, successful_activity=False):
- self.server.updateOnlineStatus(self,
- outgoing_activity=outgoing_activity,
- successful_activity=successful_activity)
diff --git a/src/Connection/ConnectionServer.py b/src/Connection/ConnectionServer.py
index 5f5e7a29..c9048398 100644
--- a/src/Connection/ConnectionServer.py
+++ b/src/Connection/ConnectionServer.py
@@ -1,5 +1,4 @@
import logging
-import re
import time
import sys
import socket
@@ -9,7 +8,6 @@ import gevent
import msgpack
from gevent.server import StreamServer
from gevent.pool import Pool
-import gevent.event
import util
from util import helper
@@ -34,36 +32,25 @@ class ConnectionServer(object):
self.port = port
self.last_connection_id = 0 # Connection id incrementer
self.last_connection_id_current_version = 0 # Connection id incrementer for current client version
+ self.last_connection_id_supported_version = 0 # Connection id incrementer for last supported version
self.log = logging.getLogger("ConnServer")
self.port_opened = {}
self.peer_blacklist = SiteManager.peer_blacklist
- self.managed_pools = {}
-
self.tor_manager = TorManager(self.ip, self.port)
self.connections = [] # Connections
self.whitelist = config.ip_local # No flood protection on this ips
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
self.broken_ssl_ips = {} # Peerids of broken ssl connections
self.ips = {} # Connection by ip
-
self.has_internet = True # Internet outage detection
- self.internet_online_since = 0
- self.internet_offline_since = 0
- self.last_outgoing_internet_activity_time = 0 # Last time the application tried to send any data
- self.last_successful_internet_activity_time = 0 # Last time the application successfully sent or received any data
- self.internet_outage_threshold = 60 * 2
self.stream_server = None
self.stream_server_proxy = None
self.running = False
self.stopping = False
- self.stopping_event = gevent.event.Event()
self.thread_checker = None
- self.thread_pool = Pool(None)
- self.managed_pools["thread"] = self.thread_pool
-
self.stat_recv = defaultdict(lambda: defaultdict(int))
self.stat_sent = defaultdict(lambda: defaultdict(int))
self.bytes_recv = 0
@@ -75,14 +62,8 @@ class ConnectionServer(object):
self.num_outgoing = 0
self.had_external_incoming = False
-
-
self.timecorrection = 0.0
self.pool = Pool(500) # do not accept more than 500 connections
- self.managed_pools["incoming"] = self.pool
-
- self.outgoing_pool = Pool(None)
- self.managed_pools["outgoing"] = self.outgoing_pool
# Bittorrent style peerid
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
@@ -103,11 +84,10 @@ class ConnectionServer(object):
return False
self.running = True
if check_connections:
- self.thread_checker = self.spawn(self.checkConnections)
+ self.thread_checker = gevent.spawn(self.checkConnections)
CryptConnection.manager.loadCerts()
if config.tor != "disable":
self.tor_manager.start()
- self.tor_manager.startOnions()
if not self.port:
self.log.info("No port found, not binding")
return False
@@ -128,7 +108,7 @@ class ConnectionServer(object):
return None
if self.stream_server_proxy:
- self.spawn(self.listenProxy)
+ gevent.spawn(self.listenProxy)
try:
self.stream_server.serve_forever()
except Exception as err:
@@ -136,92 +116,22 @@ class ConnectionServer(object):
return False
self.log.debug("Stopped.")
- def stop(self, ui_websocket=None):
+ def stop(self):
self.log.debug("Stopping %s" % self.stream_server)
self.stopping = True
self.running = False
- self.stopping_event.set()
- self.onStop(ui_websocket=ui_websocket)
-
- def onStop(self, ui_websocket=None):
- timeout = 30
- start_time = time.time()
- join_quantum = 0.1
- prev_msg = None
- while True:
- if time.time() >= start_time + timeout:
- break
-
- total_size = 0
- sizes = {}
- timestep = 0
- for name, pool in list(self.managed_pools.items()):
- timestep += join_quantum
- pool.join(timeout=join_quantum)
- size = len(pool)
- if size:
- sizes[name] = size
- total_size += size
-
- if len(sizes) == 0:
- break
-
- if timestep < 1:
- time.sleep(1 - timestep)
-
- # format message
- s = ""
- for name, size in sizes.items():
- s += "%s pool: %s, " % (name, size)
- msg = "Waiting for tasks in managed pools to stop: %s" % s
- # Prevent flooding to log
- if msg != prev_msg:
- prev_msg = msg
- self.log.info("%s", msg)
-
- percent = 100 * (time.time() - start_time) / timeout
- msg = "File Server: waiting for %s tasks to stop" % total_size
- self.sendShutdownProgress(ui_websocket, msg, percent)
-
- for name, pool in list(self.managed_pools.items()):
- size = len(pool)
- if size:
- self.log.info("Killing %s tasks in %s pool", size, name)
- pool.kill()
-
- self.sendShutdownProgress(ui_websocket, "File Server stopped. Now to exit.", 100)
-
if self.thread_checker:
gevent.kill(self.thread_checker)
- self.thread_checker = None
if self.stream_server:
self.stream_server.stop()
- def sendShutdownProgress(self, ui_websocket, message, progress):
- if not ui_websocket:
- return
- ui_websocket.cmd("progress", ["shutdown", message, progress])
- time.sleep(0.01)
-
- # Sleeps the specified amount of time or until ConnectionServer is stopped
- def sleep(self, t):
- if t:
- self.stopping_event.wait(timeout=t)
- else:
- time.sleep(t)
-
- # Spawns a thread that will be waited for on server being stopped (and killed after a timeout)
- def spawn(self, *args, **kwargs):
- thread = self.thread_pool.spawn(*args, **kwargs)
- return thread
-
def closeConnections(self):
self.log.debug("Closing all connection: %s" % len(self.connections))
for connection in self.connections[:]:
connection.close("Close all connections")
def handleIncomingConnection(self, sock, addr):
- if not self.allowsAcceptingConnections():
+ if config.offline:
sock.close()
return False
@@ -239,7 +149,7 @@ class ConnectionServer(object):
self.ip_incoming[ip] += 1
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
self.log.debug("Connection flood detected from %s" % ip)
- self.sleep(30)
+ time.sleep(30)
sock.close()
return False
else:
@@ -248,8 +158,10 @@ class ConnectionServer(object):
connection = Connection(self, ip, port, sock)
self.connections.append(connection)
rev = connection.handshake.get("rev", 0)
- if rev > 0 and rev == config.rev:
- self.last_connection_id_current_version += 1
+ if rev >= 4560:
+ self.last_connection_id_supported_version += 1
+ if rev == config.rev:
+ self.last_connection_id_current_version += 1
if ip not in config.ip_local:
self.ips[ip] = connection
connection.handleIncomingConnection(sock)
@@ -258,7 +170,7 @@ class ConnectionServer(object):
pass
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
- ip_type = self.getIpType(ip)
+ ip_type = helper.getIpType(ip)
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
if has_per_site_onion: # Site-unique connection for Tor
if ip.endswith(".onion"):
@@ -294,7 +206,7 @@ class ConnectionServer(object):
return connection
# No connection found
- if create and self.allowsCreatingConnections():
+ if create and not config.offline: # Allow to create new connection if not found
if port == 0:
raise Exception("This peer is not connectable")
@@ -302,7 +214,6 @@ class ConnectionServer(object):
raise Exception("This peer is blacklisted")
try:
- #self.log.info("Connection to: %s:%s", ip, port)
if has_per_site_onion: # Lock connection to site
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
else:
@@ -317,16 +228,17 @@ class ConnectionServer(object):
raise Exception("Connection event return error")
else:
rev = connection.handshake.get("rev", 0)
- if rev > 0 and rev == config.rev:
- self.last_connection_id_current_version += 1
+ if rev >= 4560:
+ self.last_connection_id_supported_version += 1
+ if rev == config.rev:
+ self.last_connection_id_current_version += 1
except Exception as err:
- #self.log.info("Connection error (%s, %s): %s", ip, port, Debug.formatException(err))
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
raise err
if len(self.connections) > config.global_connected_limit:
- self.spawn(self.checkMaxConnections)
+ gevent.spawn(self.checkMaxConnections)
return connection
else:
@@ -349,12 +261,12 @@ class ConnectionServer(object):
def checkConnections(self):
run_i = 0
- self.sleep(15)
+ time.sleep(15)
while self.running:
run_i += 1
self.ip_incoming = {} # Reset connected ips counter
+ last_message_time = 0
s = time.time()
- self.updateOnlineStatus(None)
for connection in self.connections[:]: # Make a copy
if connection.ip.endswith(".onion") or config.tor == "always":
timeout_multipler = 2
@@ -362,6 +274,9 @@ class ConnectionServer(object):
timeout_multipler = 1
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
+ if connection.last_message_time > last_message_time and not connection.is_private_ip:
+ # Message from local IPs does not means internet connection
+ last_message_time = connection.last_message_time
if connection.unpacker and idle > 30:
# Delete the unpacker if not needed
@@ -409,12 +324,24 @@ class ConnectionServer(object):
# Reset bad action counter every 30 min
connection.bad_actions = 0
+ # Internet outage detection
+ if time.time() - last_message_time > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
+ # Offline: Last message more than 60-600sec depending on connection number
+ if self.has_internet and last_message_time:
+ self.has_internet = False
+ self.onInternetOffline()
+ else:
+ # Online
+ if not self.has_internet:
+ self.has_internet = True
+ self.onInternetOnline()
+
self.timecorrection = self.getTimecorrection()
if time.time() - s > 0.01:
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
- self.sleep(15)
+ time.sleep(15)
self.log.debug("Checkconnections ended")
@util.Noparallel(blocking=False)
@@ -439,68 +366,6 @@ class ConnectionServer(object):
))
return num_closed
- # Returns True if we should slow down opening new connections as at the moment
- # there are too many connections being established and not connected completely
- # (not entered the message loop yet).
- def shouldThrottleNewConnections(self):
- threshold = config.simultaneous_connection_throttle_threshold
- if len(self.connections) <= threshold:
- return False
- nr_connections_being_established = 0
- for connection in self.connections[:]: # Make a copy
- if connection.connecting and not connection.connected and connection.type == "out":
- nr_connections_being_established += 1
- if nr_connections_being_established > threshold:
- return True
- return False
-
- # Internet outage detection
- def updateOnlineStatus(self, connection, outgoing_activity=False, successful_activity=False):
-
- now = time.time()
-
- if connection and not connection.is_private_ip:
- if outgoing_activity:
- self.last_outgoing_internet_activity_time = now
- if successful_activity:
- self.last_successful_internet_activity_time = now
- self.setInternetStatus(True)
- return
-
- if not self.last_outgoing_internet_activity_time:
- return
-
- if (
- (self.last_successful_internet_activity_time < now - self.internet_outage_threshold)
- and
- (self.last_successful_internet_activity_time < self.last_outgoing_internet_activity_time)
- ):
- self.setInternetStatus(False)
- return
-
- # This is the old algorithm just in case we missed something
- idle = now - self.last_successful_internet_activity_time
- if idle > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
- # Offline: Last successful activity more than 60-600sec depending on connection number
- self.setInternetStatus(False)
- return
-
- def setInternetStatus(self, status):
- if self.has_internet == status:
- return
-
- self.has_internet = status
-
- if self.has_internet:
- self.internet_online_since = time.time()
- self.spawn(self.onInternetOnline)
- else:
- self.internet_offline_since = time.time()
- self.spawn(self.onInternetOffline)
-
- def isInternetOnline(self):
- return self.has_internet
-
def onInternetOnline(self):
self.log.info("Internet online")
@@ -508,32 +373,6 @@ class ConnectionServer(object):
self.had_external_incoming = False
self.log.info("Internet offline")
- def setOfflineMode(self, offline_mode):
- if config.offline == offline_mode:
- return
- config.offline = offline_mode # Yep, awkward
- if offline_mode:
- self.log.info("offline mode is ON")
- else:
- self.log.info("offline mode is OFF")
-
- def isOfflineMode(self):
- return config.offline
-
- def allowsCreatingConnections(self):
- if self.isOfflineMode():
- return False
- if self.stopping:
- return False
- return True
-
- def allowsAcceptingConnections(self):
- if self.isOfflineMode():
- return False
- if self.stopping:
- return False
- return True
-
def getTimecorrection(self):
corrections = sorted([
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
@@ -545,48 +384,3 @@ class ConnectionServer(object):
mid = int(len(corrections) / 2 - 1)
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
return median
-
-
- ############################################################################
-
- # Methods for handling network address types
- # (ipv4, ipv6, onion etc... more to be implemented by plugins)
- #
- # All the functions handling network address types have "Ip" in the name.
- # So it was in the initial codebase, and I keep the naming, since I couldn't
- # think of a better option.
- # "IP" is short and quite clear and lets you understand that a variable
- # contains a peer address or other transport-level address and not
- # an address of ZeroNet site.
- #
-
- # Returns type of the given network address.
- # Since: 0.8.0
- # Replaces helper.getIpType() in order to be extensible by plugins.
- def getIpType(self, ip):
- if ip.endswith(".onion"):
- return "onion"
- elif ":" in ip:
- return "ipv6"
- elif re.match(r"[0-9\.]+$", ip):
- return "ipv4"
- else:
- return "unknown"
-
- # Checks if a network address can be reachable in the current configuration
- # and returs a string describing why it cannot.
- # If the network address can be reachable, returns False.
- # Since: 0.8.0
- def getIpUnreachability(self, ip):
- ip_type = self.getIpType(ip)
- if ip_type == 'onion' and not self.tor_manager.enabled:
- return "Can't connect to onion addresses, no Tor controller present"
- if config.tor == "always" and helper.isPrivateIp(ip) and ip not in config.ip_local:
- return "Can't connect to local IPs in Tor: always mode"
- return False
-
- # Returns True if ConnctionServer has means for establishing outgoing
- # connections to the given address.
- # Since: 0.8.0
- def isIpReachable(self, ip):
- return self.getIpUnreachability(ip) == False
diff --git a/src/Content/ContentManager.py b/src/Content/ContentManager.py
index c6a64750..623cc707 100644
--- a/src/Content/ContentManager.py
+++ b/src/Content/ContentManager.py
@@ -239,7 +239,7 @@ class ContentManager(object):
if num_removed_bad_files > 0:
self.site.worker_manager.removeSolvedFileTasks(mark_as_good=False)
- self.site.spawn(self.site.update, since=0)
+ gevent.spawn(self.site.update, since=0)
self.log.debug("Archived removed contents: %s, removed bad files: %s" % (num_removed_contents, num_removed_bad_files))
@@ -651,25 +651,6 @@ class ContentManager(object):
)
return files_node, files_optional_node
- def serializeForSigning(self, content):
- if "sign" in content:
- del(content["sign"]) # The file signed without the sign
- if "signs" in content:
- del(content["signs"]) # The file signed without the signs
-
- sign_content = json.dumps(content, sort_keys=True) # Dump the json to string to remove whitespaces
-
- # Fix float representation error on Android
- modified = content["modified"]
- if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
- modified_fixed = "{:.6f}".format(modified).strip("0.")
- sign_content = sign_content.replace(
- '"modified": %s' % repr(modified),
- '"modified": %s' % modified_fixed
- )
-
- return sign_content
-
# Create and sign a content.json
# Return: The new content if filewrite = False
def sign(self, inner_path="content.json", privatekey=None, filewrite=True, update_changed_files=False, extend=None, remove_missing_optional=False):
@@ -746,7 +727,6 @@ class ContentManager(object):
elif "files_optional" in new_content:
del new_content["files_optional"]
- new_content["modified"] = int(time.time()) # Add timestamp
if inner_path == "content.json":
new_content["zeronet_version"] = config.version
new_content["signs_required"] = content.get("signs_required", 1)
@@ -766,20 +746,44 @@ class ContentManager(object):
)
self.log.info("Correct %s in valid signers: %s" % (privatekey_address, valid_signers))
+ signs_required = 1
if inner_path == "content.json" and privatekey_address == self.site.address:
# If signing using the root key, then sign the valid signers
- signers_data = "%s:%s" % (new_content["signs_required"], ",".join(valid_signers))
+ signs_required = new_content["signs_required"]
+ signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
new_content["signers_sign"] = CryptBitcoin.sign(str(signers_data), privatekey)
if not new_content["signers_sign"]:
self.log.info("Old style address, signers_sign is none")
self.log.info("Signing %s..." % inner_path)
- sign_content = self.serializeForSigning(new_content)
+ if "signs" in new_content:
+ # del(new_content["signs"]) # Delete old signs
+ old_signs_content = new_content["signs"]
+ del(new_content["signs"])
+ else:
+ old_signs_content = None
+ if "sign" in new_content:
+ del(new_content["sign"]) # Delete old sign (backward compatibility)
+
+ if signs_required > 1:
+ has_valid_sign = False
+ sign_content = json.dumps(new_content, sort_keys=True)
+ for signer in valid_signers:
+ res = CryptBitcoin.verify(sign_content,signer,old_signs_content[signer]);
+ print(res)
+ if res:
+ has_valid_sign = has_valid_sign or res
+ if has_valid_sign:
+ new_content["modified"] = content["modified"]
+ sign_content = json.dumps(new_content, sort_keys=True)
+ else:
+ new_content["modified"] = int(time.time()) # Add timestamp
+ sign_content = json.dumps(new_content, sort_keys=True)
sign = CryptBitcoin.sign(sign_content, privatekey)
# new_content["signs"] = content.get("signs", {}) # TODO: Multisig
if sign: # If signing is successful (not an old address)
- new_content["signs"] = {}
+ new_content["signs"] = old_signs_content or {}
new_content["signs"][privatekey_address] = sign
self.verifyContent(inner_path, new_content)
@@ -814,7 +818,9 @@ class ContentManager(object):
# Return: The required number of valid signs for the content.json
def getSignsRequired(self, inner_path, content=None):
- return 1 # Todo: Multisig
+ if not content:
+ return 1
+ return content.get("signs_required", 1)
def verifyCertSign(self, user_address, user_auth_type, user_name, issuer_address, sign):
from Crypt import CryptBitcoin
@@ -939,95 +945,104 @@ class ContentManager(object):
return True # All good
- def verifyContentFile(self, inner_path, file, ignore_same=True):
- from Crypt import CryptBitcoin
-
- if type(file) is dict:
- new_content = file
- else:
- try:
- if sys.version_info.major == 3 and sys.version_info.minor < 6:
- new_content = json.loads(file.read().decode("utf8"))
- else:
- new_content = json.load(file)
- except Exception as err:
- raise VerifyError("Invalid json file: %s" % err)
- if inner_path in self.contents:
- old_content = self.contents.get(inner_path, {"modified": 0})
- # Checks if its newer the ours
- if old_content["modified"] == new_content["modified"] and ignore_same: # Ignore, have the same content.json
- return None
- elif old_content["modified"] > new_content["modified"]: # We have newer
- raise VerifyError(
- "We have newer (Our: %s, Sent: %s)" %
- (old_content["modified"], new_content["modified"])
- )
- if new_content["modified"] > time.time() + 60 * 60 * 24: # Content modified in the far future (allow 1 day+)
- raise VerifyError("Modify timestamp is in the far future!")
- if self.isArchived(inner_path, new_content["modified"]):
- if inner_path in self.site.bad_files:
- del self.site.bad_files[inner_path]
- raise VerifyError("This file is archived!")
- # Check sign
- sign = new_content.get("sign")
- signs = new_content.get("signs", {})
- sign_content = self.serializeForSigning(new_content)
-
- if signs: # New style signing
- valid_signers = self.getValidSigners(inner_path, new_content)
- signs_required = self.getSignsRequired(inner_path, new_content)
-
- if inner_path == "content.json" and len(valid_signers) > 1: # Check signers_sign on root content.json
- signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
- if not CryptBitcoin.verify(signers_data, self.site.address, new_content["signers_sign"]):
- raise VerifyError("Invalid signers_sign!")
-
- if inner_path != "content.json" and not self.verifyCert(inner_path, new_content): # Check if cert valid
- raise VerifyError("Invalid cert!")
-
- valid_signs = 0
- for address in valid_signers:
- if address in signs:
- valid_signs += CryptBitcoin.verify(sign_content, address, signs[address])
- if valid_signs >= signs_required:
- break # Break if we has enough signs
- if valid_signs < signs_required:
- raise VerifyError("Valid signs: %s/%s" % (valid_signs, signs_required))
- else:
- return self.verifyContent(inner_path, new_content)
- elif sign: # Old style signing
- raise VerifyError("Invalid old-style sign")
- else:
- raise VerifyError("Not signed")
-
- def verifyOrdinaryFile(self, inner_path, file, ignore_same=True):
- file_info = self.getFileInfo(inner_path)
- if file_info:
- if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
- raise VerifyError("Invalid hash")
-
- if file_info.get("size", 0) != file.tell():
- raise VerifyError(
- "File size does not match %s <> %s" %
- (inner_path, file.tell(), file_info.get("size", 0))
- )
-
- return True
-
- else: # File not in content.json
- raise VerifyError("File not in content.json")
-
# Verify file validity
# Return: None = Same as before, False = Invalid, True = Valid
def verifyFile(self, inner_path, file, ignore_same=True):
- try:
- if inner_path.endswith("content.json"):
- return self.verifyContentFile(inner_path, file, ignore_same)
- else:
- return self.verifyOrdinaryFile(inner_path, file, ignore_same)
- except Exception as err:
- self.log.info("%s: verify error: %s" % (inner_path, Debug.formatException(err)))
- raise err
+ if inner_path.endswith("content.json"): # content.json: Check using sign
+ from Crypt import CryptBitcoin
+ try:
+ if type(file) is dict:
+ new_content = file
+ else:
+ try:
+ if sys.version_info.major == 3 and sys.version_info.minor < 6:
+ new_content = json.loads(file.read().decode("utf8"))
+ else:
+ new_content = json.load(file)
+ except Exception as err:
+ raise VerifyError("Invalid json file: %s" % err)
+ if inner_path in self.contents:
+ old_content = self.contents.get(inner_path, {"modified": 0})
+ # Checks if its newer the ours
+ if old_content["modified"] == new_content["modified"] and ignore_same: # Ignore, have the same content.json
+ return None
+ elif old_content["modified"] > new_content["modified"]: # We have newer
+ raise VerifyError(
+ "We have newer (Our: %s, Sent: %s)" %
+ (old_content["modified"], new_content["modified"])
+ )
+ if new_content["modified"] > time.time() + 60 * 60 * 24: # Content modified in the far future (allow 1 day+)
+ raise VerifyError("Modify timestamp is in the far future!")
+ if self.isArchived(inner_path, new_content["modified"]):
+ if inner_path in self.site.bad_files:
+ del self.site.bad_files[inner_path]
+ raise VerifyError("This file is archived!")
+ # Check sign
+ sign = new_content.get("sign")
+ signs = new_content.get("signs", {})
+ if "sign" in new_content:
+ del(new_content["sign"]) # The file signed without the sign
+ if "signs" in new_content:
+ del(new_content["signs"]) # The file signed without the signs
+
+ sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitepsace
+
+ # Fix float representation error on Android
+ modified = new_content["modified"]
+ if config.fix_float_decimals and type(modified) is float and not str(modified).endswith(".0"):
+ modified_fixed = "{:.6f}".format(modified).strip("0.")
+ sign_content = sign_content.replace(
+ '"modified": %s' % repr(modified),
+ '"modified": %s' % modified_fixed
+ )
+
+ if signs: # New style signing
+ valid_signers = self.getValidSigners(inner_path, new_content)
+ signs_required = self.getSignsRequired(inner_path, new_content)
+
+ if inner_path == "content.json" and len(valid_signers) > 1: # Check signers_sign on root content.json
+ signers_data = "%s:%s" % (signs_required, ",".join(valid_signers))
+ if not CryptBitcoin.verify(signers_data, self.site.address, new_content["signers_sign"]):
+ raise VerifyError("Invalid signers_sign!")
+
+ if inner_path != "content.json" and not self.verifyCert(inner_path, new_content): # Check if cert valid
+ raise VerifyError("Invalid cert!")
+
+ valid_signs = []
+ for address in valid_signers:
+ if address in signs:
+ result = CryptBitcoin.verify(sign_content, address, signs[address])
+ if result:
+ valid_signs.append(address)
+ if len(valid_signs) >= signs_required:
+ break # Break if we has enough signs
+ if len(valid_signs) < signs_required:
+ raise VerifyError("Valid signs: %s/%s, Valid Signers : %s" % (len(valid_signs), signs_required, valid_signs))
+ else:
+ return self.verifyContent(inner_path, new_content)
+ else: # Old style signing
+ raise VerifyError("Invalid old-style sign")
+
+ except Exception as err:
+ self.log.warning("%s: verify sign error: %s" % (inner_path, Debug.formatException(err)))
+ raise err
+
+ else: # Check using sha512 hash
+ file_info = self.getFileInfo(inner_path)
+ if file_info:
+ if CryptHash.sha512sum(file) != file_info.get("sha512", ""):
+ raise VerifyError("Invalid hash")
+
+ if file_info.get("size", 0) != file.tell():
+ raise VerifyError(
+ "File size does not match %s <> %s" %
+ (inner_path, file.tell(), file_info.get("size", 0))
+ )
+
+ return True
+
+ else: # File not in content.json
+ raise VerifyError("File not in content.json")
def optionalDelete(self, inner_path):
self.site.storage.delete(inner_path)
diff --git a/src/Crypt/CryptConnection.py b/src/Crypt/CryptConnection.py
index ebbc6295..c0903e84 100644
--- a/src/Crypt/CryptConnection.py
+++ b/src/Crypt/CryptConnection.py
@@ -127,6 +127,10 @@ class CryptConnectionManager:
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
]
self.openssl_env['CN'] = random.choice(self.fakedomains)
+ environ = os.environ
+ environ['OPENSSL_CONF'] = self.openssl_env['OPENSSL_CONF']
+ environ['RANDFILE'] = self.openssl_env['RANDFILE']
+ environ['CN'] = self.openssl_env['CN']
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
self.createSslContexts()
@@ -152,7 +156,7 @@ class CryptConnectionManager:
self.log.debug("Running: %s" % cmd)
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
@@ -175,7 +179,7 @@ class CryptConnectionManager:
self.log.debug("Generating certificate key and signing request...")
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
@@ -194,7 +198,7 @@ class CryptConnectionManager:
self.log.debug("Generating RSA cert...")
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, env=self.openssl_env
+ stdout=subprocess.PIPE, env=environ
)
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
proc.wait()
diff --git a/src/Debug/DebugHook.py b/src/Debug/DebugHook.py
index c11185da..d100a3b8 100644
--- a/src/Debug/DebugHook.py
+++ b/src/Debug/DebugHook.py
@@ -10,33 +10,19 @@ from Config import config
from . import Debug
last_error = None
-thread_shutdown = None
-
-thread_shutdown = None
-
-def shutdownThread():
- import main
- try:
- if "file_server" in dir(main):
- thread = gevent.spawn(main.file_server.stop)
- thread.join(timeout=60)
- if "ui_server" in dir(main):
- thread = gevent.spawn(main.ui_server.stop)
- thread.join(timeout=10)
- except Exception as err:
- print("Error in shutdown thread: %s" % err)
- sys.exit(0)
- else:
- sys.exit(0)
def shutdown(reason="Unknown"):
- global thread_shutdown
logging.info("Shutting down (reason: %s)..." % reason)
- try:
- if not thread_shutdown:
- thread_shutdown = gevent.spawn(shutdownThread)
- except Exception as err:
- print("Proper shutdown error: %s" % err)
+ import main
+ if "file_server" in dir(main):
+ try:
+ gevent.spawn(main.file_server.stop)
+ if "ui_server" in dir(main):
+ gevent.spawn(main.ui_server.stop)
+ except Exception as err:
+ print("Proper shutdown error: %s" % err)
+ sys.exit(0)
+ else:
sys.exit(0)
# Store last error, ignore notify, allow manual error logging
diff --git a/src/File/FileRequest.py b/src/File/FileRequest.py
index d2dd1346..c082c378 100644
--- a/src/File/FileRequest.py
+++ b/src/File/FileRequest.py
@@ -33,7 +33,7 @@ class FileRequest(object):
self.connection = connection
self.req_id = None
- self.sites = self.server.getSites()
+ self.sites = self.server.sites
self.log = server.log
self.responded = False # Responded to the request
@@ -109,31 +109,35 @@ class FileRequest(object):
return False
inner_path = params.get("inner_path", "")
- current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
- body = params["body"]
-
if not inner_path.endswith("content.json"):
self.response({"error": "Only content.json update allowed"})
self.connection.badAction(5)
return
+ current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
should_validate_content = True
if "modified" in params and params["modified"] <= current_content_modified:
should_validate_content = False
valid = None # Same or earlier content as we have
- elif not body: # No body sent, we have to download it first
+
+ body = params["body"]
+ if not body: # No body sent, we have to download it first
site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
try:
body = peer.getFile(site.address, inner_path).read()
except Exception as err:
site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
- self.response({"error": "File invalid update: Can't download updaed file"})
+ self.response({"error": "Invalid File update: Failed to download updated file content"})
self.connection.badAction(5)
return
if should_validate_content:
try:
+ if type(body) is str:
+ body = body.encode()
+ # elif type(body) is list:
+ # content = json.loads(bytes(list).decode())
content = json.loads(body.decode())
except Exception as err:
site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
@@ -161,21 +165,19 @@ class FileRequest(object):
site.onFileDone(inner_path) # Trigger filedone
- if inner_path.endswith("content.json"): # Download every changed file from peer
- peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
- # On complete publish to other peers
- diffs = params.get("diffs", {})
- site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
+ # Download every changed file from peer
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
+ # On complete publish to other peers
+ diffs = params.get("diffs", {})
+ site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs, limit=6), "publish_%s" % inner_path)
- # Load new content file and download changed files in new thread
- def downloader():
- site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
- del self.server.files_parsing[file_uri]
-
- gevent.spawn(downloader)
- else:
+ # Load new content file and download changed files in new thread
+ def downloader():
+ site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
del self.server.files_parsing[file_uri]
+ gevent.spawn(downloader)
+
self.response({"ok": "Thanks, file %s updated!" % inner_path})
self.connection.goodAction()
@@ -376,7 +378,7 @@ class FileRequest(object):
for hash_id, peers in found.items():
for peer in peers:
- ip_type = self.server.getIpType(peer.ip)
+ ip_type = helper.getIpType(peer.ip)
if len(back[ip_type][hash_id]) < 20:
back[ip_type][hash_id].append(peer.packMyAddress())
return back
@@ -430,7 +432,7 @@ class FileRequest(object):
# Check requested port of the other peer
def actionCheckport(self, params):
- if self.server.getIpType(self.connection.ip) == "ipv6":
+ if helper.getIpType(self.connection.ip) == "ipv6":
sock_address = (self.connection.ip, params["port"], 0, 0)
else:
sock_address = (self.connection.ip, params["port"])
diff --git a/src/File/FileServer.py b/src/File/FileServer.py
index 7114849b..b7a942fc 100644
--- a/src/File/FileServer.py
+++ b/src/File/FileServer.py
@@ -3,7 +3,6 @@ import time
import random
import socket
import sys
-import weakref
import gevent
import gevent.pool
@@ -19,13 +18,6 @@ from Connection import ConnectionServer
from Plugin import PluginManager
from Debug import Debug
-log = logging.getLogger("FileServer")
-
-class FakeThread(object):
- def __init__(self):
- pass
- def ready(self):
- return False
@PluginManager.acceptPlugins
class FileServer(ConnectionServer):
@@ -33,31 +25,12 @@ class FileServer(ConnectionServer):
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
self.site_manager = SiteManager.site_manager
self.portchecker = PeerPortchecker.PeerPortchecker(self)
+ self.log = logging.getLogger("FileServer")
self.ip_type = ip_type
self.ip_external_list = []
- # This is wrong:
- # self.log = logging.getLogger("FileServer")
- # The value of self.log will be overwritten in ConnectionServer.__init__()
-
- self.recheck_port = True
-
- self.active_mode_thread_pool = gevent.pool.Pool(None)
- self.site_pool = gevent.pool.Pool(None)
-
- self.update_pool = gevent.pool.Pool(10)
- self.update_start_time = 0
- self.update_sites_task_next_nr = 1
-
- self.update_threads = weakref.WeakValueDictionary()
-
- self.passive_mode = None
- self.active_mode = None
- self.active_mode_threads = {}
-
-
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
- if self.getIpType(ip) == "ipv6" or self.isIpv6Supported():
+ if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported():
self.supported_ip_types.append("ipv6")
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
@@ -79,50 +52,33 @@ class FileServer(ConnectionServer):
config.arguments.fileserver_port = port
ConnectionServer.__init__(self, ip, port, self.handleRequest)
- log.debug("Supported IP types: %s" % self.supported_ip_types)
-
- self.managed_pools["active_mode_thread"] = self.active_mode_thread_pool
- self.managed_pools["update"] = self.update_pool
- self.managed_pools["site"] = self.site_pool
+ self.log.debug("Supported IP types: %s" % self.supported_ip_types)
if ip_type == "dual" and ip == "::":
# Also bind to ipv4 addres in dual mode
try:
- log.debug("Binding proxy to %s:%s" % ("::", self.port))
+ self.log.debug("Binding proxy to %s:%s" % ("::", self.port))
self.stream_server_proxy = StreamServer(
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
)
except Exception as err:
- log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
+ self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
self.port_opened = {}
+ self.sites = self.site_manager.sites
self.last_request = time.time()
self.files_parsing = {}
self.ui_server = None
- def getSites(self):
- sites = self.site_manager.list()
- # We need to keep self.sites for the backward compatibility with plugins.
- # Never. Ever. Use it.
- # TODO: fix plugins
- self.sites = sites
- return sites
-
- def getSite(self, address):
- return self.getSites().get(address, None)
-
- def getSiteAddresses(self):
- # Avoid saving the site list on the stack, since a site may be deleted
- # from the original list while iterating.
- # Use the list of addresses instead.
- return [
- site.address for site in
- sorted(list(self.getSites().values()), key=lambda site: site.settings.get("modified", 0), reverse=True)
- ]
-
def getRandomPort(self, ip, port_range_from, port_range_to):
- log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
+ """Generates Random Port from given range
+ Args:
+ ip: IP Address
+ port_range_from: From Range
+ port_range_to: to Range
+ """
+ self.log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
tried = []
for bind_retry in range(100):
port = random.randint(port_range_from, port_range_to)
@@ -134,14 +90,14 @@ class FileServer(ConnectionServer):
sock.bind((ip, port))
success = True
except Exception as err:
- log.warning("Error binding to port %s: %s" % (port, err))
+ self.log.warning("Error binding to port %s: %s" % (port, err))
success = False
sock.close()
if success:
- log.info("Found unused random port: %s" % port)
+ self.log.info("Found unused random port: %s" % port)
return port
else:
- self.sleep(0.1)
+ time.sleep(0.1)
return False
def isIpv6Supported(self):
@@ -154,16 +110,16 @@ class FileServer(ConnectionServer):
sock.connect((ipv6_testip, 80))
local_ipv6 = sock.getsockname()[0]
if local_ipv6 == "::1":
- log.debug("IPv6 not supported, no local IPv6 address")
+ self.log.debug("IPv6 not supported, no local IPv6 address")
return False
else:
- log.debug("IPv6 supported on IP %s" % local_ipv6)
+ self.log.debug("IPv6 supported on IP %s" % local_ipv6)
return True
except socket.error as err:
- log.warning("IPv6 not supported: %s" % err)
+ self.log.warning("IPv6 not supported: %s" % err)
return False
except Exception as err:
- log.error("IPv6 check error: %s" % err)
+ self.log.error("IPv6 check error: %s" % err)
return False
def listenProxy(self):
@@ -171,34 +127,29 @@ class FileServer(ConnectionServer):
self.stream_server_proxy.serve_forever()
except Exception as err:
if err.errno == 98: # Address already in use error
- log.debug("StreamServer proxy listen error: %s" % err)
+ self.log.debug("StreamServer proxy listen error: %s" % err)
else:
- log.info("StreamServer proxy listen error: %s" % err)
+ self.log.info("StreamServer proxy listen error: %s" % err)
# Handle request to fileserver
def handleRequest(self, connection, message):
if config.verbose:
if "params" in message:
- log.debug(
+ self.log.debug(
"FileRequest: %s %s %s %s" %
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
)
else:
- log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
+ self.log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
req = FileRequest(self, connection)
req.route(message["cmd"], message.get("req_id"), message.get("params"))
- if not connection.is_private_ip:
- self.setInternetStatus(True)
+ if not self.has_internet and not connection.is_private_ip:
+ self.has_internet = True
+ self.onInternetOnline()
def onInternetOnline(self):
- log.info("Internet online")
- invalid_interval=(
- self.internet_offline_since - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
- time.time()
- )
- self.invalidateUpdateTime(invalid_interval)
- self.recheck_port = True
- self.spawn(self.updateSites)
+ self.log.info("Internet online")
+ gevent.spawn(self.checkSites, check_files=False, force_port_check=True)
# Reload the FileRequest class to prevent restarts in debug mode
def reload(self):
@@ -207,8 +158,8 @@ class FileServer(ConnectionServer):
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
def portCheck(self):
- if self.isOfflineMode():
- log.info("Offline mode: port check disabled")
+ if config.offline:
+ self.log.info("Offline mode: port check disabled")
res = {"ipv4": None, "ipv6": None}
self.port_opened = res
return res
@@ -217,14 +168,14 @@ class FileServer(ConnectionServer):
for ip_external in config.ip_external:
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
- ip_external_types = set([self.getIpType(ip) for ip in config.ip_external])
+ ip_external_types = set([helper.getIpType(ip) for ip in config.ip_external])
res = {
"ipv4": "ipv4" in ip_external_types,
"ipv6": "ipv6" in ip_external_types
}
self.ip_external_list = config.ip_external
self.port_opened.update(res)
- log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
+ self.log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
return res
self.port_opened = {}
@@ -232,7 +183,7 @@ class FileServer(ConnectionServer):
self.ui_server.updateWebsocket()
if "ipv6" in self.supported_ip_types:
- res_ipv6_thread = self.spawn(self.portchecker.portCheck, self.port, "ipv6")
+ res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6")
else:
res_ipv6_thread = None
@@ -245,8 +196,8 @@ class FileServer(ConnectionServer):
res_ipv6 = {"ip": None, "opened": None}
else:
res_ipv6 = res_ipv6_thread.get()
- if res_ipv6["opened"] and not self.getIpType(res_ipv6["ip"]) == "ipv6":
- log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
+ if res_ipv6["opened"] and not helper.getIpType(res_ipv6["ip"]) == "ipv6":
+ self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
res_ipv6["opened"] = False
self.ip_external_list = []
@@ -255,7 +206,7 @@ class FileServer(ConnectionServer):
self.ip_external_list.append(res_ip["ip"])
SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
- log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
+ self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
@@ -266,9 +217,9 @@ class FileServer(ConnectionServer):
for ip in interface_ips:
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
self.ip_external_list.append(ip)
- res[self.getIpType(ip)] = True # We have opened port if we have external ip
+ res[helper.getIpType(ip)] = True # We have opened port if we have external ip
SiteManager.peer_blacklist.append((ip, self.port))
- log.debug("External ip found on interfaces: %s" % ip)
+ self.log.debug("External ip found on interfaces: %s" % ip)
self.port_opened.update(res)
@@ -277,381 +228,131 @@ class FileServer(ConnectionServer):
return res
- @util.Noparallel(queue=True)
- def recheckPort(self):
- if self.recheck_port:
- self.portCheck()
- self.recheck_port = False
+ # Check site file integrity
+ def checkSite(self, site, check_files=False):
+ if site.isServing():
+ site.announce(mode="startup") # Announce site to tracker
+ site.update(check_files=check_files) # Update site's content.json and download changed files
+ site.sendMyHashfield()
+ site.updateHashfield()
- # Returns False if Internet is immediately available
- # Returns True if we've spent some time waiting for Internet
- # Returns None if FileServer is stopping or the Offline mode is enabled
+ # Check sites integrity
@util.Noparallel()
- def waitForInternetOnline(self):
- if self.isOfflineMode() or self.stopping:
- return None
+ def checkSites(self, check_files=False, force_port_check=False):
+ self.log.debug("Checking sites...")
+ s = time.time()
+ sites_checking = False
+ if not self.port_opened or force_port_check: # Test and open port if not tested yet
+ if len(self.sites) <= 2: # Don't wait port opening on first startup
+ sites_checking = True
+ for address, site in list(self.sites.items()):
+ gevent.spawn(self.checkSite, site, check_files)
- if self.isInternetOnline():
- return False
+ self.portCheck()
- while not self.isInternetOnline():
- self.sleep(30)
- if self.isOfflineMode() or self.stopping:
- return None
- if self.isInternetOnline():
- break
- if len(self.update_pool) == 0:
- log.info("Internet connection seems to be broken. Running an update for a random site to check if we are able to connect to any peer.")
- thread = self.thread_pool.spawn(self.updateRandomSite)
- thread.join()
+ if not self.port_opened["ipv4"]:
+ self.tor_manager.startOnions()
- self.recheckPort()
- return True
-
- def updateRandomSite(self, site_addresses=None, force=False):
- if not site_addresses:
- site_addresses = self.getSiteAddresses()
-
- site_addresses = random.sample(site_addresses, 1)
- if len(site_addresses) < 1:
- return
-
- address = site_addresses[0]
- site = self.getSite(address)
-
- if not site:
- return
-
- log.info("Randomly chosen site: %s", site.address_short)
-
- self.spawnUpdateSite(site).join()
-
- def updateSite(self, site, check_files=False, verify_files=False):
- if not site:
- return
- if verify_files:
- mode = 'verify'
- elif check_files:
- mode = 'check'
- else:
- mode = 'update'
- log.info("running <%s> for %s" % (mode, site.address_short))
- site.update2(check_files=check_files, verify_files=verify_files)
-
- def spawnUpdateSite(self, site, check_files=False, verify_files=False):
- fake_thread = FakeThread()
- self.update_threads[site.address] = fake_thread
- thread = self.update_pool.spawn(self.updateSite, site,
- check_files=check_files, verify_files=verify_files)
- self.update_threads[site.address] = thread
- return thread
-
- def lookupInUpdatePool(self, site_address):
- thread = self.update_threads.get(site_address, None)
- if not thread or thread.ready():
- return None
- return thread
-
- def siteIsInUpdatePool(self, site_address):
- return self.lookupInUpdatePool(site_address) is not None
-
- def invalidateUpdateTime(self, invalid_interval):
- for address in self.getSiteAddresses():
- site = self.getSite(address)
- if site:
- site.invalidateUpdateTime(invalid_interval)
-
- def isSiteUpdateTimeValid(self, site_address):
- site = self.getSite(site_address)
- if not site:
- return False
- return site.isUpdateTimeValid()
-
- def updateSites(self):
- task_nr = self.update_sites_task_next_nr
- self.update_sites_task_next_nr += 1
-
- task_description = "updateSites [#%d]" % task_nr
- log.info("%s: started", task_description)
-
- # Don't wait port opening on first startup. Do the instant check now.
- if len(self.getSites()) <= 2:
- for address, site in list(self.getSites().items()):
- self.updateSite(site, check_files=True)
-
- self.recheckPort()
-
- all_site_addresses = self.getSiteAddresses()
- site_addresses = [
- address for address in all_site_addresses
- if not self.isSiteUpdateTimeValid(address)
- ]
-
- log.info("%s: chosen %d sites (of %d)", task_description, len(site_addresses), len(all_site_addresses))
-
- sites_processed = 0
- sites_skipped = 0
- start_time = time.time()
- self.update_start_time = start_time
- progress_print_time = time.time()
-
- # Check sites integrity
- for site_address in site_addresses:
- site = None
- self.sleep(1)
- self.waitForInternetOnline()
-
- while self.isActiveMode() and self.shouldThrottleNewConnections():
- self.sleep(1)
-
- if not self.isActiveMode():
- break
-
- site = self.getSite(site_address)
- if not site or site.isUpdateTimeValid() or self.siteIsInUpdatePool(site_address):
- sites_skipped += 1
- continue
-
- sites_processed += 1
- thread = self.spawnUpdateSite(site)
-
- if not self.isActiveMode():
- break
-
- if time.time() - progress_print_time > 60:
- progress_print_time = time.time()
- time_spent = time.time() - start_time
- time_per_site = time_spent / float(sites_processed)
- sites_left = len(site_addresses) - sites_processed
- time_left = time_per_site * sites_left
- log.info("%s: DONE: %d sites in %.2fs (%.2fs per site); SKIPPED: %d sites; LEFT: %d sites in %.2fs",
- task_description,
- sites_processed,
- time_spent,
- time_per_site,
- sites_skipped,
- sites_left,
- time_left
- )
-
- if not self.isActiveMode():
- log.info("%s: stopped", task_description)
- else:
- log.info("%s: finished in %.2fs", task_description, time.time() - start_time)
-
- def peekSiteForVerification(self):
- check_files_interval = 60 * 60 * 24
- verify_files_interval = 60 * 60 * 24 * 10
- site_addresses = self.getSiteAddresses()
- random.shuffle(site_addresses)
- for site_address in site_addresses:
- site = self.getSite(site_address)
- if not site:
- continue
- mode = site.isFileVerificationExpired(check_files_interval, verify_files_interval)
- if mode:
- return (site_address, mode)
- return (None, None)
-
-
- def sitesVerificationThread(self):
- log.info("sitesVerificationThread started")
- short_timeout = 20
- long_timeout = 120
-
- self.sleep(long_timeout)
-
- while self.isActiveMode():
- site = None
- self.sleep(short_timeout)
- self.waitForInternetOnline()
-
- while self.isActiveMode() and self.shouldThrottleNewConnections():
- self.sleep(1)
-
- if not self.isActiveMode():
- break
-
- site_address, mode = self.peekSiteForVerification()
- if not site_address:
- self.sleep(long_timeout)
- continue
-
- while self.siteIsInUpdatePool(site_address) and self.isActiveMode():
- self.sleep(1)
-
- if not self.isActiveMode():
- break
-
- site = self.getSite(site_address)
- if not site:
- continue
-
- if mode == "verify":
- check_files = False
- verify_files = True
- elif mode == "check":
- check_files = True
- verify_files = False
- else:
- continue
-
- thread = self.spawnUpdateSite(site,
- check_files=check_files, verify_files=verify_files)
-
- log.info("sitesVerificationThread stopped")
-
- def sitesMaintenanceThread(self, mode="full"):
- log.info("sitesMaintenanceThread(%s) started" % mode)
+ if not sites_checking:
+ check_pool = gevent.pool.Pool(5)
+ # Check sites integrity
+ for site in sorted(list(self.sites.values()), key=lambda site: site.settings.get("modified", 0), reverse=True):
+ if not site.isServing():
+ continue
+ check_thread = check_pool.spawn(self.checkSite, site, check_files) # Check in new thread
+ time.sleep(2)
+ if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
+ check_thread.join(timeout=5)
+ self.log.debug("Checksites done in %.3fs" % (time.time() - s))
+ def cleanupSites(self):
+ import gc
startup = True
-
- short_timeout = 2
- min_long_timeout = 10
- max_long_timeout = 60 * 10
- long_timeout = min_long_timeout
- short_cycle_time_limit = 60 * 2
-
- while self.isActiveMode():
- self.sleep(long_timeout)
-
- while self.isActiveMode() and self.shouldThrottleNewConnections():
- self.sleep(1)
-
- if not self.isActiveMode():
- break
-
- start_time = time.time()
-
- log.debug(
- "Starting <%s> maintenance cycle: connections=%s, internet=%s",
- mode,
- len(self.connections), self.isInternetOnline()
+ time.sleep(5 * 60) # Sites already cleaned up on startup
+ peers_protected = set([])
+ while 1:
+ # Sites health care every 20 min
+ self.log.debug(
+ "Running site cleanup, connections: %s, internet: %s, protected peers: %s" %
+ (len(self.connections), self.has_internet, len(peers_protected))
)
- start_time = time.time()
- site_addresses = self.getSiteAddresses()
-
- sites_processed = 0
-
- for site_address in site_addresses:
- if not self.isActiveMode():
- break
-
- site = self.getSite(site_address)
- if not site:
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
continue
- log.debug("Running maintenance for site: %s", site.address_short)
+ if not startup:
+ site.cleanupPeers(peers_protected)
- done = site.runPeriodicMaintenance(startup=startup)
- site = None
- if done:
- sites_processed += 1
- self.sleep(short_timeout)
+ time.sleep(1) # Prevent too quick request
- # If we host hundreds of sites, the full maintenance cycle may take very
- # long time, especially on startup ( > 1 hour).
- # This means we are not able to run the maintenance procedure for active
- # sites frequently enough using just a single maintenance thread.
- # So we run 2 maintenance threads:
- # * One running full cycles.
- # * And one running short cycles for the most active sites.
- # When the short cycle runs out of the time limit, it restarts
- # from the beginning of the site list.
- if mode == "short" and time.time() - start_time > short_cycle_time_limit:
- break
+ peers_protected = set([])
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
+ continue
- log.debug("<%s> maintenance cycle finished in %.2fs. Total sites: %d. Processed sites: %d. Timeout: %d",
- mode,
- time.time() - start_time,
- len(site_addresses),
- sites_processed,
- long_timeout
- )
+ if site.peers:
+ with gevent.Timeout(10, exception=False):
+ site.announcer.announcePex()
- if sites_processed:
- long_timeout = max(int(long_timeout / 2), min_long_timeout)
- else:
- long_timeout = min(long_timeout + 1, max_long_timeout)
+ # Last check modification failed
+ if site.content_updated is False:
+ site.update()
+ elif site.bad_files:
+ site.retryBadFiles()
- site_addresses = None
+ if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
+ # Keep active connections if site has been modified witin 7 days
+ connected_num = site.needConnections(check_site_on_reconnect=True)
+
+ if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing
+ peers_protected.update([peer.key for peer in site.getConnectedPeers()])
+
+ time.sleep(1) # Prevent too quick request
+
+ site = None
+ gc.collect() # Implicit garbage collection
startup = False
- log.info("sitesMaintenanceThread(%s) stopped" % mode)
+ time.sleep(60 * 20)
- def keepAliveThread(self):
- # This thread is mostly useless on a system under load, since it never does
- # any works, if we have active traffic.
- #
- # We should initiate some network activity to detect the Internet outage
- # and avoid false positives. We normally have some network activity
- # initiated by various parts on the application as well as network peers.
- # So it's not a problem.
- #
- # However, if it actually happens that we have no network traffic for
- # some time (say, we host just a couple of inactive sites, and no peers
- # are interested in connecting to them), we initiate some traffic by
- # performing the update for a random site. It's way better than just
- # silly pinging a random peer for no profit.
- log.info("keepAliveThread started")
- while self.isActiveMode():
- self.waitForInternetOnline()
+ def announceSite(self, site):
+ site.announce(mode="update", pex=False)
+ active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
+ if site.settings["own"] or active_site:
+ # Check connections more frequently on own and active sites to speed-up first connections
+ site.needConnections(check_site_on_reconnect=True)
+ site.sendMyHashfield(3)
+ site.updateHashfield(3)
- threshold = self.internet_outage_threshold / 2.0
-
- self.sleep(threshold / 2.0)
-
- while self.isActiveMode() and self.shouldThrottleNewConnections():
- self.sleep(1)
-
- if not self.isActiveMode():
- break
-
- last_activity_time = max(
- self.last_successful_internet_activity_time,
- self.last_outgoing_internet_activity_time)
- now = time.time()
- if not len(self.getSites()):
- continue
- if last_activity_time > now - threshold:
- continue
- if len(self.update_pool) != 0:
- continue
-
- log.info("No network activity for %.2fs. Running an update for a random site.",
- now - last_activity_time
- )
- self.update_pool.spawn(self.updateRandomSite, force=True)
- log.info("keepAliveThread stopped")
-
- # Periodic reloading of tracker files
- def reloadTrackerFilesThread(self):
- # TODO:
- # This should probably be more sophisticated.
- # We should check if the files have actually changed,
- # and do it more often.
- log.info("reloadTrackerFilesThread started")
- interval = 60 * 10
- while self.isActiveMode():
- self.sleep(interval)
- if not self.isActiveMode():
- break
+ # Announce sites every 20 min
+ def announceSites(self):
+ time.sleep(5 * 60) # Sites already announced on startup
+ while 1:
config.loadTrackersFile()
- log.info("reloadTrackerFilesThread stopped")
+ s = time.time()
+ for address, site in list(self.sites.items()):
+ if not site.isServing():
+ continue
+ gevent.spawn(self.announceSite, site).join(timeout=10)
+ time.sleep(1)
+ taken = time.time() - s
+
+ # Query all trackers one-by-one in 20 minutes evenly distributed
+ sleep = max(0, 60 * 20 / len(config.trackers) - taken)
+
+ self.log.debug("Site announce tracker done in %.3fs, sleeping for %.3fs..." % (taken, sleep))
+ time.sleep(sleep)
# Detects if computer back from wakeup
- def wakeupWatcherThread(self):
- log.info("wakeupWatcherThread started")
+ def wakeupWatcher(self):
last_time = time.time()
last_my_ips = socket.gethostbyname_ex('')[2]
- while self.isActiveMode():
- self.sleep(30)
- if not self.isActiveMode():
- break
+ while 1:
+ time.sleep(30)
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
if is_time_changed:
# If taken more than 3 minute then the computer was in sleep mode
- log.info(
+ self.log.info(
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
(last_time, time.time(), time.time() - last_time)
)
@@ -659,130 +360,50 @@ class FileServer(ConnectionServer):
my_ips = socket.gethostbyname_ex('')[2]
is_ip_changed = my_ips != last_my_ips
if is_ip_changed:
- log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
+ self.log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
if is_time_changed or is_ip_changed:
- invalid_interval=(
- last_time - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
- time.time()
- )
- self.invalidateUpdateTime(invalid_interval)
- self.recheck_port = True
- self.spawn(self.updateSites)
+ self.checkSites(check_files=False, force_port_check=True)
last_time = time.time()
last_my_ips = my_ips
- log.info("wakeupWatcherThread stopped")
-
- def setOfflineMode(self, offline_mode):
- ConnectionServer.setOfflineMode(self, offline_mode)
- self.setupActiveMode()
-
- def setPassiveMode(self, passive_mode):
- if self.passive_mode == passive_mode:
- return
- self.passive_mode = passive_mode
- if self.passive_mode:
- log.info("passive mode is ON");
- else:
- log.info("passive mode is OFF");
- self.setupActiveMode()
-
- def isPassiveMode(self):
- return self.passive_mode
-
- def setupActiveMode(self):
- active_mode = (not self.passive_mode) and (not self.isOfflineMode())
- if self.active_mode == active_mode:
- return
- self.active_mode = active_mode
- if self.active_mode:
- log.info("active mode is ON");
- self.enterActiveMode();
- else:
- log.info("active mode is OFF");
- self.leaveActiveMode();
-
- def killActiveModeThreads(self):
- for key, thread in list(self.active_mode_threads.items()):
- if thread:
- if not thread.ready():
- log.info("killing %s" % key)
- gevent.kill(thread)
- del self.active_mode_threads[key]
-
- def leaveActiveMode(self):
- pass
-
- def enterActiveMode(self):
- self.killActiveModeThreads()
- x = self.active_mode_threads
- p = self.active_mode_thread_pool
- x["thread_keep_alive"] = p.spawn(self.keepAliveThread)
- x["thread_wakeup_watcher"] = p.spawn(self.wakeupWatcherThread)
- x["thread_sites_verification"] = p.spawn(self.sitesVerificationThread)
- x["thread_reload_tracker_files"] = p.spawn(self.reloadTrackerFilesThread)
- x["thread_sites_maintenance_full"] = p.spawn(self.sitesMaintenanceThread, mode="full")
- x["thread_sites_maintenance_short"] = p.spawn(self.sitesMaintenanceThread, mode="short")
- x["thread_initial_site_updater"] = p.spawn(self.updateSites)
-
- # Returns True, if an active mode thread should keep going,
- # i.e active mode is enabled and the server not going to shutdown
- def isActiveMode(self):
- self.setupActiveMode()
- if not self.active_mode:
- return False
- if not self.running:
- return False
- if self.stopping:
- return False
- return True
# Bind and start serving sites
- # If passive_mode is False, FileServer starts the full-featured file serving:
- # * Checks for updates at startup.
- # * Checks site's integrity.
- # * Runs periodic update checks.
- # * Watches for internet being up or down and for computer to wake up and runs update checks.
- # If passive_mode is True, all the mentioned activity is disabled.
- def start(self, passive_mode=False, check_sites=None, check_connections=True):
-
- # Backward compatibility for a misnamed argument:
- if check_sites is not None:
- passive_mode = not check_sites
-
+ def start(self, check_sites=True):
if self.stopping:
return False
- ConnectionServer.start(self, check_connections=check_connections)
+ ConnectionServer.start(self)
try:
self.stream_server.start()
except Exception as err:
- log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
+ self.log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
+ self.sites = self.site_manager.list()
if config.debug:
# Auto reload FileRequest on change
from Debug import DebugReloader
DebugReloader.watcher.addCallback(self.reload)
- # XXX: for initializing self.sites
- # Remove this line when self.sites gets completely unused
- self.getSites()
+ if check_sites: # Open port, Update sites, Check files integrity
+ gevent.spawn(self.checkSites)
- self.setPassiveMode(passive_mode)
+ thread_announce_sites = gevent.spawn(self.announceSites)
+ thread_cleanup_sites = gevent.spawn(self.cleanupSites)
+ thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
ConnectionServer.listen(self)
- log.info("Stopped.")
+ self.log.debug("Stopped.")
- def stop(self, ui_websocket=None):
+ def stop(self):
if self.running and self.portchecker.upnp_port_opened:
- log.debug('Closing port %d' % self.port)
+ self.log.debug('Closing port %d' % self.port)
try:
self.portchecker.portClose(self.port)
- log.info('Closed port via upnp.')
+ self.log.info('Closed port via upnp.')
except Exception as err:
- log.info("Failed at attempt to use upnp to close port: %s" % err)
+ self.log.info("Failed at attempt to use upnp to close port: %s" % err)
- return ConnectionServer.stop(self, ui_websocket=ui_websocket)
+ return ConnectionServer.stop(self)
diff --git a/src/Peer/Peer.py b/src/Peer/Peer.py
index 43c2932f..03cc1f47 100644
--- a/src/Peer/Peer.py
+++ b/src/Peer/Peer.py
@@ -20,135 +20,51 @@ if config.use_tempfiles:
# Communicate remote peers
@PluginManager.acceptPlugins
class Peer(object):
+ __slots__ = (
+ "ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield",
+ "time_added", "has_hashfield", "is_tracker_connection", "time_my_hashfield_sent", "last_ping", "reputation",
+ "last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time"
+ )
+
def __init__(self, ip, port, site=None, connection_server=None):
self.ip = ip
self.port = port
self.site = site
self.key = "%s:%s" % (ip, port)
- self.ip_type = None
-
- self.removed = False
-
- self.log_level = logging.DEBUG
- self.connection_error_log_level = logging.DEBUG
-
self.connection = None
self.connection_server = connection_server
self.has_hashfield = False # Lazy hashfield object not created yet
self.time_hashfield = None # Last time peer's hashfiled downloaded
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
self.time_found = time.time() # Time of last found in the torrent tracker
- self.time_response = 0 # Time of last successful response from peer
+ self.time_response = None # Time of last successful response from peer
self.time_added = time.time()
self.last_ping = None # Last response time for ping
- self.last_pex = 0 # Last query/response time for pex
self.is_tracker_connection = False # Tracker connection instead of normal peer
self.reputation = 0 # More likely to connect if larger
self.last_content_json_update = 0.0 # Modify date of last received content.json
- self.protected = 0
- self.reachable = None
self.connection_error = 0 # Series of connection error
self.hash_failed = 0 # Number of bad files from peer
self.download_bytes = 0 # Bytes downloaded
self.download_time = 0 # Time spent to download
- self.protectedRequests = ["getFile", "streamFile", "update", "listModified"]
-
def __getattr__(self, key):
if key == "hashfield":
self.has_hashfield = True
self.hashfield = PeerHashfield()
return self.hashfield
else:
- # Raise appropriately formatted attribute error
- return object.__getattribute__(self, key)
-
- def log(self, text, log_level = None):
- if log_level is None:
- log_level = self.log_level
- if log_level <= logging.DEBUG:
- if not config.verbose:
- return # Only log if we are in debug mode
-
- logger = None
+ return getattr(self, key)
+ def log(self, text):
+ if not config.verbose:
+ return # Only log if we are in debug mode
if self.site:
- logger = self.site.log
+ self.site.log.debug("%s:%s %s" % (self.ip, self.port, text))
else:
- logger = logging.getLogger()
-
- logger.log(log_level, "%s:%s %s" % (self.ip, self.port, text))
-
- # Protect connection from being closed by site.cleanupPeers()
- def markProtected(self, interval=60*2):
- self.protected = max(self.protected, time.time() + interval)
-
- def isProtected(self):
- if self.protected > 0:
- if self.protected < time.time():
- self.protected = 0
- return self.protected > 0
-
- def isTtlExpired(self, ttl):
- last_activity = max(self.time_found, self.time_response)
- return (time.time() - last_activity) > ttl
-
- # Since 0.8.0
- def isConnected(self):
- if self.connection and not self.connection.connected:
- self.connection = None
- return self.connection and self.connection.connected
-
- # Peer proved to to be connectable recently
- # Since 0.8.0
- def isConnectable(self):
- if self.connection_error >= 1: # The last connection attempt failed
- return False
- if time.time() - self.time_response > 60 * 60 * 2: # Last successful response more than 2 hours ago
- return False
- return self.isReachable()
-
- # Since 0.8.0
- def isReachable(self):
- if self.reachable is None:
- self.updateCachedState()
- return self.reachable
-
- # Since 0.8.0
- def getIpType(self):
- if not self.ip_type:
- self.updateCachedState()
- return self.ip_type
-
- # We cache some ConnectionServer-related state for better performance.
- # This kind of state currently doesn't change during a program session,
- # and it's safe to read and cache it just once. But future versions
- # may bring more pieces of dynamic configuration. So we update the state
- # on each peer.found().
- def updateCachedState(self):
- connection_server = self.getConnectionServer()
- if not self.port or self.port == 1: # Port 1 considered as "no open port"
- self.reachable = False
- else:
- self.reachable = connection_server.isIpReachable(self.ip)
- self.ip_type = connection_server.getIpType(self.ip)
-
-
- # FIXME:
- # This should probably be changed.
- # When creating a peer object, the caller must provide either `connection_server`,
- # or `site`, so Peer object is able to use `site.connection_server`.
- def getConnectionServer(self):
- if self.connection_server:
- connection_server = self.connection_server
- elif self.site:
- connection_server = self.site.connection_server
- else:
- import main
- connection_server = main.file_server
- return connection_server
+ logging.debug("%s:%s %s" % (self.ip, self.port, text))
# Connect to host
def connect(self, connection=None):
@@ -171,30 +87,29 @@ class Peer(object):
self.connection = None
try:
- connection_server = self.getConnectionServer()
+ if self.connection_server:
+ connection_server = self.connection_server
+ elif self.site:
+ connection_server = self.site.connection_server
+ else:
+ import main
+ connection_server = main.file_server
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
- if self.connection and self.connection.connected:
- self.reputation += 1
- self.connection.sites += 1
+ self.reputation += 1
+ self.connection.sites += 1
except Exception as err:
self.onConnectionError("Getting connection error")
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
- (Debug.formatException(err), self.connection_error, self.hash_failed),
- log_level=self.connection_error_log_level)
+ (Debug.formatException(err), self.connection_error, self.hash_failed))
self.connection = None
return self.connection
- def disconnect(self, reason="Unknown"):
- if self.connection:
- self.connection.close(reason)
- self.connection = None
-
# Check if we have connection to peer
def findConnection(self):
if self.connection and self.connection.connected: # We have connection to peer
return self.connection
else: # Try to find from other sites connections
- self.connection = self.getConnectionServer().getConnection(self.ip, self.port, create=False, site=self.site)
+ self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site)
if self.connection:
self.connection.sites += 1
return self.connection
@@ -228,13 +143,9 @@ class Peer(object):
if source in ("tracker", "local"):
self.site.peers_recent.appendleft(self)
self.time_found = time.time()
- self.updateCachedState()
# Send a command to peer and return response value
def request(self, cmd, params={}, stream_to=None):
- if self.removed:
- return False
-
if not self.connection or self.connection.closed:
self.connect()
if not self.connection:
@@ -245,8 +156,6 @@ class Peer(object):
for retry in range(1, 4): # Retry 3 times
try:
- if cmd in self.protectedRequests:
- self.markProtected()
if not self.connection:
raise Exception("No connection found")
res = self.connection.request(cmd, params, stream_to)
@@ -279,9 +188,6 @@ class Peer(object):
# Get a file content from peer
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
- if self.removed:
- return False
-
if file_size and file_size > 5 * 1024 * 1024:
max_read_size = 1024 * 1024
else:
@@ -335,14 +241,11 @@ class Peer(object):
return buff
# Send a ping request
- def ping(self, timeout=10.0, tryes=3):
- if self.removed:
- return False
-
+ def ping(self):
response_time = None
- for retry in range(1, tryes): # Retry 3 times
+ for retry in range(1, 3): # Retry 3 times
s = time.time()
- with gevent.Timeout(timeout, False):
+ with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception
res = self.request("ping")
if res and "body" in res and res["body"] == b"Pong!":
@@ -361,18 +264,10 @@ class Peer(object):
return response_time
# Request peer exchange from peer
- def pex(self, site=None, need_num=5, request_interval=60*2):
- if self.removed:
- return False
-
+ def pex(self, site=None, need_num=5):
if not site:
site = self.site # If no site defined request peers for this site
- if self.last_pex + request_interval >= time.time():
- return False
-
- self.last_pex = time.time()
-
# give back 5 connectible peers
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
@@ -381,7 +276,6 @@ class Peer(object):
if packed_peers["ipv6"]:
request["peers_ipv6"] = packed_peers["ipv6"]
res = self.request("pex", request)
- self.last_pex = time.time()
if not res or "error" in res:
return False
added = 0
@@ -413,14 +307,9 @@ class Peer(object):
# List modified files since the date
# Return: {inner_path: modification date,...}
def listModified(self, since):
- if self.removed:
- return False
return self.request("listModified", {"since": since, "site": self.site.address})
def updateHashfield(self, force=False):
- if self.removed:
- return False
-
# Don't update hashfield again in 5 min
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
return False
@@ -436,9 +325,6 @@ class Peer(object):
# Find peers for hashids
# Return: {hash1: ["ip:port", "ip:port",...],...}
def findHashIds(self, hash_ids):
- if self.removed:
- return False
-
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
if not res or "error" in res or type(res) is not dict:
return False
@@ -482,9 +368,6 @@ class Peer(object):
return True
def publish(self, address, inner_path, body, modified, diffs=[]):
- if self.removed:
- return False
-
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
# To save bw we don't push big content.json to peers
body = b""
@@ -499,22 +382,20 @@ class Peer(object):
# Stop and remove from site
def remove(self, reason="Removing"):
- self.removed = True
- self.log("Removing peer with reason: <%s>. Connection error: %s, Hash failed: %s" % (reason, self.connection_error, self.hash_failed))
- if self.site:
- self.site.deregisterPeer(self)
- # No way: self.site = None
- # We don't assign None to self.site here because it leads to random exceptions in various threads,
- # that hold references to the peer and still believe it belongs to the site.
+ self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed))
+ if self.site and self.key in self.site.peers:
+ del(self.site.peers[self.key])
- self.disconnect(reason)
+ if self.site and self in self.site.peers_recent:
+ self.site.peers_recent.remove(self)
+
+ if self.connection:
+ self.connection.close(reason)
# - EVENTS -
# On connection error
def onConnectionError(self, reason="Unknown"):
- if not self.getConnectionServer().isInternetOnline():
- return
self.connection_error += 1
if self.site and len(self.site.peers) > 200:
limit = 3
@@ -522,7 +403,7 @@ class Peer(object):
limit = 6
self.reputation -= 1
if self.connection_error >= limit: # Dead peer
- self.remove("Connection error limit reached: %s. Provided message: %s" % (limit, reason))
+ self.remove("Peer connection: %s" % reason)
# Done working with peer
def onWorkerDone(self):
diff --git a/src/Plugin/PluginManager.py b/src/Plugin/PluginManager.py
index dbafa98f..56540e60 100644
--- a/src/Plugin/PluginManager.py
+++ b/src/Plugin/PluginManager.py
@@ -16,7 +16,9 @@ import plugins
class PluginManager:
def __init__(self):
self.log = logging.getLogger("PluginManager")
- self.path_plugins = os.path.abspath(os.path.dirname(plugins.__file__))
+ self.path_plugins = None
+ if plugins.__file__:
+ self.path_plugins = os.path.dirname(os.path.abspath(plugins.__file__));
self.path_installed_plugins = config.data_dir + "/__plugins__"
self.plugins = defaultdict(list) # Registered plugins (key: class name, value: list of plugins for class)
self.subclass_order = {} # Record the load order of the plugins, to keep it after reload
@@ -32,7 +34,8 @@ class PluginManager:
self.config.setdefault("builtin", {})
- sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
+ if self.path_plugins:
+ sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
self.migratePlugins()
if config.debug: # Auto reload Plugins on file change
@@ -127,6 +130,8 @@ class PluginManager:
def loadPlugins(self):
all_loaded = True
s = time.time()
+ if self.path_plugins is None:
+ return
for plugin in self.listPlugins():
self.log.debug("Loading plugin: %s (%s)" % (plugin["name"], plugin["source"]))
if plugin["source"] != "builtin":
diff --git a/src/Site/Site.py b/src/Site/Site.py
index 46e19169..d6179307 100644
--- a/src/Site/Site.py
+++ b/src/Site/Site.py
@@ -6,13 +6,11 @@ import time
import random
import sys
import hashlib
-import itertools
import collections
import base64
import gevent
import gevent.pool
-import gevent.lock
import util
from Config import config
@@ -29,125 +27,6 @@ from Plugin import PluginManager
from File import FileServer
from .SiteAnnouncer import SiteAnnouncer
from . import SiteManager
-from . import SiteHelpers
-
-def lerp(val_min, val_max, scale):
- return scale * (val_max - val_min) + val_min
-
-class ScaledTimeoutHandler:
- def __init__(self, val_min, val_max, handler=None, scaler=None):
- self.val_min = val_min
- self.val_max = val_max
- self.timestamp = 0
- self.handler = handler
- self.scaler = scaler
- self.log = logging.getLogger("ScaledTimeoutHandler")
-
- def isExpired(self, scale):
- interval = lerp(self.val_min, self.val_max, scale)
- expired_at = self.timestamp + interval
- now = time.time()
- expired = (now > expired_at)
- if expired:
- self.log.debug(
- "Expired: [%d..%d]: scale=%f, interval=%f",
- self.val_min, self.val_max, scale, interval)
- return expired
-
- def done(self):
- self.timestamp = time.time()
-
- def run(self, *args, **kwargs):
- do_run = kwargs["force"] or self.isExpired(self.scaler())
- if do_run:
- result = self.handler(*args, **kwargs)
- if result:
- self.done()
- return result
- else:
- return None
-
-class BackgroundPublisher:
- def __init__(self, site, published=[], limit=5, inner_path="content.json", diffs={}):
- self.site = site
- self.threads = gevent.pool.Pool(None)
- self.inner_path = inner_path
- self.stages = [
- {
- "interval": ScaledTimeoutHandler(60, 60),
- "max_tries": 2,
- "tries": 0,
- "limit": 0,
- "done": False
- },
- {
- "interval": ScaledTimeoutHandler(60 * 10, 60 * 10),
- "max_tries": 5,
- "tries": 0,
- "limit": 0,
- "done": False
- }
- ]
- self.reinit(published=published, limit=limit, diffs=diffs)
-
- def reinit(self, published=[], limit=5, diffs={}):
- self.threads.kill()
- self.published = published
- self.diffs = diffs
-
- i = 0
- for stage in self.stages:
- stage["nr"] = i
- stage["limit"] = limit * (2 + i)
- stage["tries"] = 0
- stage["done"] = False
- stage["thread"] = None
- if i > 0:
- stage["interval"].done()
- i += 1
-
- def isStageComplete(self, stage):
- if not stage["done"]:
- stage["done"] = len(self.published) >= stage["limit"]
- if not stage["done"]:
- stage["done"] = stage["tries"] >= stage["max_tries"]
- return stage["done"]
-
- def isComplete(self):
- for stage in self.stages:
- if not self.isStageComplete(stage):
- return False
- return True
-
- def process(self):
- for stage in self.stages:
- if not self.isStageComplete(stage):
- self.processStage(stage)
- break
- return self.isComplete()
-
- def processStage(self, stage):
- if not stage["interval"].isExpired(0):
- return
-
- if len(self.site.peers) < stage["limit"]:
- self.site.announce(mode="more")
-
- if not stage["thread"]:
- peers = list(self.site.peers.values())
- random.shuffle(peers)
- stage["thread"] = self.threads.spawn(self.site.publisher,
- self.inner_path, peers, self.published, stage["limit"], diffs=self.diffs, max_retries=1)
-
- stage["tries"] += 1
- stage["interval"].done()
-
- self.site.log.info("Background publisher: Stage #%s: %s published to %s/%s peers",
- stage["nr"], self.inner_path, len(self.published), stage["limit"])
-
- def finalize(self):
- self.threads.kill()
- self.site.log.info("Background publisher: Published %s to %s peers", self.inner_path, len(self.published))
@PluginManager.acceptPlugins
@@ -161,35 +40,23 @@ class Site(object):
self.log = logging.getLogger("Site:%s" % self.address_short)
self.addEventListeners()
- self.periodic_maintenance_handlers = [
- ScaledTimeoutHandler(
- config.site_announce_interval_max * 60,
- config.site_announce_interval_min * 60,
- handler=self.periodicMaintenanceHandler_announce,
- scaler=self.getAnnounceRating),
- ScaledTimeoutHandler(
- config.site_peer_check_interval_max * 60,
- config.site_peer_check_interval_min * 60,
- handler=self.periodicMaintenanceHandler_peer_check,
- scaler=self.getAnnounceRating),
- ScaledTimeoutHandler(
- config.site_update_check_interval_max * 60,
- config.site_update_check_interval_min * 60,
- handler=self.periodicMaintenanceHandler_general,
- scaler=self.getActivityRating)
- ]
+ self.content = None # Load content.json
+ self.peers = {} # Key: ip:port, Value: Peer.Peer
+ self.peers_recent = collections.deque(maxlen=150)
+ self.peer_blacklist = SiteManager.peer_blacklist # Ignore this peers (eg. myself)
+ self.greenlet_manager = GreenletManager.GreenletManager() # Running greenlets
+ self.worker_manager = WorkerManager(self) # Handle site download from other peers
+ self.bad_files = {} # SHA check failed files, need to redownload {"inner.content": 1} (key: file, value: failed accept)
+ self.content_updated = None # Content.js update time
+ self.notifications = [] # Pending notifications displayed once on page load [error|ok|info, message, timeout]
+ self.page_requested = False # Page viewed in browser
+ self.websockets = [] # Active site websocket connections
- self.background_publishers = {}
- self.background_publishers_lock = gevent.lock.RLock()
-
- # FZS = forced zero "since"
- self.fzs_range = 20
- self.fzs_interval = 30 * 60
- self.fzs_count = random.randint(0, self.fzs_range / 4)
- self.fzs_timestamp = 0
-
- ##############################################
self.connection_server = None
+ self.loadSettings(settings) # Load settings from sites.json
+ self.storage = SiteStorage(self, allow_create=allow_create) # Save and load site files
+ self.content_manager = ContentManager(self)
+ self.content_manager.loadContents() # Load content.json files
if "main" in sys.modules: # import main has side-effects, breaks tests
import main
if "file_server" in dir(main): # Use global file server by default if possible
@@ -199,32 +66,9 @@ class Site(object):
self.connection_server = main.file_server
else:
self.connection_server = FileServer()
- ##############################################
-
- self.content = None # Load content.json
- self.peers = {} # Key: ip:port, Value: Peer.Peer
- self.peers_recent = collections.deque(maxlen=150)
- self.peer_blacklist = SiteManager.peer_blacklist # Ignore this peers (eg. myself)
- self.greenlet_manager = GreenletManager.GreenletManager(self.connection_server.site_pool) # Running greenlets
- self.worker_manager = WorkerManager(self) # Handle site download from other peers
- self.bad_files = {} # SHA check failed files, need to redownload {"inner.content": 1} (key: file, value: failed accept)
- self.content_updated = None # Content.js update time
- self.last_online_update = 0
- self.startup_announce_done = 0
- self.notifications = [] # Pending notifications displayed once on page load [error|ok|info, message, timeout]
- self.page_requested = False # Page viewed in browser
- self.websockets = [] # Active site websocket connections
-
- self.loadSettings(settings) # Load settings from sites.json
- self.storage = SiteStorage(self, allow_create=allow_create) # Save and load site files
- self.content_manager = ContentManager(self)
- self.content_manager.loadContents() # Load content.json files
self.announcer = SiteAnnouncer(self) # Announce and get peer list from other nodes
- self.peer_connector = SiteHelpers.PeerConnector(self) # Connect more peers in background by request
- self.persistent_peer_req = None # The persistent peer requirement, managed by maintenance handler
-
if not self.settings.get("wrapper_key"): # To auth websocket permissions
self.settings["wrapper_key"] = CryptHash.random()
self.log.debug("New wrapper key: %s" % self.settings["wrapper_key"])
@@ -245,10 +89,6 @@ class Site(object):
settings = json.load(open("%s/sites.json" % config.data_dir)).get(self.address)
if settings:
self.settings = settings
- if "check_files_timestamp" not in settings:
- settings["check_files_timestamp"] = 0
- if "verify_files_timestamp" not in settings:
- settings["verify_files_timestamp"] = 0
if "cache" not in settings:
settings["cache"] = {}
if "size_files_optional" not in settings:
@@ -264,17 +104,8 @@ class Site(object):
self.bad_files[inner_path] = min(self.bad_files[inner_path], 20)
else:
self.settings = {
- "check_files_timestamp": 0,
- "verify_files_timestamp": 0,
- "own": False,
- "serving": True,
- "permissions": [],
- "cache": {"bad_files": {}},
- "size_files_optional": 0,
- "added": int(time.time()),
- "downloaded": None,
- "optional_downloaded": 0,
- "size_optional": 0
+ "own": False, "serving": True, "permissions": [], "cache": {"bad_files": {}}, "size_files_optional": 0,
+ "added": int(time.time()), "downloaded": None, "optional_downloaded": 0, "size_optional": 0
} # Default
if config.download_optional == "auto":
self.settings["autodownloadoptional"] = True
@@ -294,38 +125,12 @@ class Site(object):
SiteManager.site_manager.load(False)
SiteManager.site_manager.saveDelayed()
- # Returns True if any site-related activity should be interrupted
- # due to connection server being stopped or site being deleted
- def isStopping(self):
- return self.connection_server.stopping or self.settings.get("deleting", False)
-
- # Returns False if any network activity for the site should not happen
def isServing(self):
- if self.connection_server.isOfflineMode():
- return False
- elif self.isStopping():
+ if config.offline:
return False
else:
return self.settings["serving"]
- # Spawns a thread that will be waited for on server being stopped (and killed after a timeout).
- # Short cut to self.greenlet_manager.spawn()
- def spawn(self, *args, **kwargs):
- thread = self.greenlet_manager.spawn(*args, **kwargs)
- return thread
-
- # Spawns a thread that will be waited for on server being stopped (and killed after a timeout).
- # Short cut to self.greenlet_manager.spawnLater()
- def spawnLater(self, *args, **kwargs):
- thread = self.greenlet_manager.spawnLater(*args, **kwargs)
- return thread
-
- def checkSendBackLRU(self, peer, inner_path, remote_modified):
- return SiteManager.site_manager.checkSendBackLRU(self, peer, inner_path, remote_modified)
-
- def addToSendBackLRU(self, peer, inner_path, modified):
- return SiteManager.site_manager.addToSendBackLRU(self, peer, inner_path, modified)
-
def getSettingsCache(self):
back = {}
back["bad_files"] = self.bad_files
@@ -336,32 +141,9 @@ class Site(object):
def getSizeLimit(self):
return self.settings.get("size_limit", int(config.size_limit))
- def isFileVerificationExpired(self, check_files_interval, verify_files_interval):
- now = time.time()
- check_files_timestamp = self.settings.get("check_files_timestamp", 0)
- verify_files_timestamp = self.settings.get("verify_files_timestamp", 0)
-
- if check_files_interval is None:
- check_files_expiration = now + 1
- else:
- check_files_expiration = check_files_timestamp + check_files_interval
-
- if verify_files_interval is None:
- verify_files_expiration = now + 1
- else:
- verify_files_expiration = verify_files_timestamp + verify_files_interval
-
- if verify_files_expiration < now:
- return "verify"
-
- if check_files_expiration < now:
- return "check"
-
- return False
-
# Next size limit based on current size
def getNextSizeLimit(self):
- size_limits = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
+ size_limits = [25, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
size = self.settings.get("size", 0)
for size_limit in size_limits:
if size * 1.2 < size_limit * 1024 * 1024:
@@ -371,7 +153,7 @@ class Site(object):
def isAddedRecently(self):
return time.time() - self.settings.get("added", 0) < 60 * 60 * 24
- # Download all files from content.json
+ # Download all file from content.json
def downloadContent(self, inner_path, download_files=True, peer=None, check_modifications=False, diffs={}):
s = time.time()
if config.verbose:
@@ -461,7 +243,7 @@ class Site(object):
# Optionals files
if inner_path == "content.json":
- self.spawn(self.updateHashfield)
+ gevent.spawn(self.updateHashfield)
for file_relative_path in list(self.content_manager.contents[inner_path].get("files_optional", {}).keys()):
file_inner_path = content_inner_dir + file_relative_path
@@ -480,7 +262,7 @@ class Site(object):
include_threads = []
for file_relative_path in list(self.content_manager.contents[inner_path].get("includes", {}).keys()):
file_inner_path = content_inner_dir + file_relative_path
- include_thread = self.spawn(self.downloadContent, file_inner_path, download_files=download_files, peer=peer)
+ include_thread = gevent.spawn(self.downloadContent, file_inner_path, download_files=download_files, peer=peer)
include_threads.append(include_thread)
if config.verbose:
@@ -500,11 +282,6 @@ class Site(object):
inner_path, time.time() - s, len(self.worker_manager.tasks)
))
-
- # If no file tasks have been started, worker_manager.checkComplete()
- # never called. So call it explicitly.
- self.greenlet_manager.spawn(self.worker_manager.checkComplete)
-
return True
# Return bad files with less than 3 retry
@@ -560,9 +337,9 @@ class Site(object):
)
if self.isAddedRecently():
- self.spawn(self.announce, mode="start", force=True)
+ gevent.spawn(self.announce, mode="start", force=True)
else:
- self.spawn(self.announce, mode="update")
+ gevent.spawn(self.announce, mode="update")
if check_size: # Check the size first
valid = self.downloadContent("content.json", download_files=False) # Just download content.json files
@@ -614,18 +391,14 @@ class Site(object):
self.log.debug("Ended downloadFile pool len: %s, skipped: %s" % (len(inner_paths), num_skipped))
# Update worker, try to find client that supports listModifications command
- def updater(self, peers_try, queried, need_queries, since):
+ def updater(self, peers_try, queried, since):
threads = []
while 1:
- if not peers_try or len(queried) >= need_queries: # Stop after 3 successful query
+ if not peers_try or len(queried) >= 3: # Stop after 3 successful query
break
peer = peers_try.pop(0)
-
- if peer in queried:
- continue
-
if config.verbose:
- self.log.debug("CheckModifications: Trying to get updates from: %s Left: %s" % (peer, peers_try))
+ self.log.debug("CheckModifications: Try to get updates from: %s Left: %s" % (peer, peers_try))
res = None
with gevent.Timeout(20, exception=False):
@@ -635,94 +408,37 @@ class Site(object):
continue # Failed query
queried.append(peer)
-
-
modified_contents = []
- send_back = []
- send_back_limit = config.send_back_limit
- send_back_skipped = 0
- peer_modified_files = res["modified_files"]
- my_modified_files = self.content_manager.listModified(since)
-
- inner_paths = itertools.chain(peer_modified_files.keys(), my_modified_files.keys())
- seen_inner_paths = {}
- for inner_path in inner_paths: # Check if the peer has newer files than we have
- if seen_inner_paths.get(inner_path, False):
- continue
- seen_inner_paths[inner_path] = True
-
- peer_modified = int(peer_modified_files.get(inner_path, 0))
- my_modified = int(my_modified_files.get(inner_path, 0))
-
- diff = peer_modified - my_modified
- if diff == 0:
- continue
- has_newer = diff > 0
- has_older = diff < 0
-
- if inner_path not in self.bad_files and not self.content_manager.isArchived(inner_path, peer_modified):
- if has_newer: # We don't have this file or we have older version
+ my_modified = self.content_manager.listModified(since)
+ num_old_files = 0
+ for inner_path, modified in res["modified_files"].items(): # Check if the peer has newer files than we
+ has_newer = int(modified) > my_modified.get(inner_path, 0)
+ has_older = int(modified) < my_modified.get(inner_path, 0)
+ if inner_path not in self.bad_files and not self.content_manager.isArchived(inner_path, modified):
+ if has_newer:
+ # We dont have this file or we have older
modified_contents.append(inner_path)
- self.bad_files[inner_path] = self.bad_files.get(inner_path, 1)
- if has_older: # The remote peer doesn't have this file or it has older version
- if self.checkSendBackLRU(peer, inner_path, peer_modified):
- send_back_skipped += 1
- else:
- send_back.append(inner_path)
-
- inner_paths = None
- seen_inner_paths = None
-
+ self.bad_files[inner_path] = self.bad_files.get(inner_path, 0) + 1
+ if has_older and num_old_files < 5:
+ num_old_files += 1
+ self.log.debug("CheckModifications: %s client has older version of %s, publishing there (%s/5)..." % (peer, inner_path, num_old_files))
+ gevent.spawn(self.publisher, inner_path, [peer], [], 1)
if modified_contents:
- self.log.info("CheckModifications: %s new modified files from %s" % (len(modified_contents), peer))
- modified_contents.sort(key=lambda inner_path: 0 - peer_modified_files[inner_path]) # Download newest first
- for inner_path in modified_contents:
- self.log.info("CheckModifications: %s: %s > %s" % (
- inner_path, peer_modified_files.get(inner_path, 0), my_modified_files.get(inner_path, 0)
- ))
- t = self.spawn(self.pooledDownloadContent, modified_contents, only_if_bad=True)
+ self.log.debug("CheckModifications: %s new modified file from %s" % (len(modified_contents), peer))
+ modified_contents.sort(key=lambda inner_path: 0 - res["modified_files"][inner_path]) # Download newest first
+ t = gevent.spawn(self.pooledDownloadContent, modified_contents, only_if_bad=True)
threads.append(t)
-
- if send_back:
- self.log.info("CheckModifications: %s has older versions of %s files" % (peer, len(send_back)))
- if len(send_back) > send_back_limit:
- self.log.info("CheckModifications: choosing %s random files to publish back" % (send_back_limit))
- random.shuffle(send_back)
- send_back = send_back[0:send_back_limit]
- for inner_path in send_back:
- self.log.info("CheckModifications: %s: %s < %s" % (
- inner_path, peer_modified_files.get(inner_path, 0), my_modified_files.get(inner_path, 0)
- ))
- self.spawn(self.publisher, inner_path, [peer], [], 1, save_to_send_back_lru=True)
-
- if send_back_skipped:
- self.log.info("CheckModifications: %s has older versions of %s files, skipped according to send back LRU" % (peer, send_back_skipped))
-
- self.log.debug("CheckModifications: Waiting for %s pooledDownloadContent" % len(threads))
+ if config.verbose:
+ self.log.debug("CheckModifications: Waiting for %s pooledDownloadContent" % len(threads))
gevent.joinall(threads)
- # We need, with some rate, to perform the full check of modifications,
- # "since the beginning of time", instead of the partial one.
- def getForcedZeroSince(self):
- now = time.time()
- if self.fzs_timestamp + self.fzs_interval > now:
- return False
- self.fzs_count -= 1
- if self.fzs_count < 1:
- self.fzs_count = random.randint(0, self.fzs_range)
- self.fzs_timestamp = now
- return True
- return False
-
# Check modified content.json files from peers and add modified files to bad_files
# Return: Successfully queried peers [Peer, Peer...]
def checkModifications(self, since=None):
s = time.time()
peers_try = [] # Try these peers
queried = [] # Successfully queried from these peers
- peer_limit = 10
- updater_limit = 3
- need_queries = 3
+ limit = 5
# Wait for peers
if not self.peers:
@@ -733,116 +449,58 @@ class Site(object):
if self.peers:
break
- if since is None:
- if self.getForcedZeroSince():
- since = 0
- else:
- margin = 60 * 60 * 24
- since = self.settings.get("modified", margin) - margin
+ peers_try = self.getConnectedPeers()
+ peers_connected_num = len(peers_try)
+ if peers_connected_num < limit * 2: # Add more, non-connected peers if necessary
+ peers_try += self.getRecentPeers(limit * 5)
- if since == 0:
- peer_limit *= 4
- need_queries *= 4
+ if since is None: # No since defined, download from last modification time-1day
+ since = self.settings.get("modified", 60 * 60 * 24) - 60 * 60 * 24
- peers_try = self.getConnectedPeers() + self.getConnectablePeers(peer_limit)
-
- self.log.debug(
- "CheckModifications: Trying to get listModifications from %s peers, %s connected, since: %s" %
- (len(peers_try), len(self.getConnectedPeers()), since)
- )
+ if config.verbose:
+ self.log.debug(
+ "CheckModifications: Try to get listModifications from peers: %s, connected: %s, since: %s" %
+ (peers_try, peers_connected_num, since)
+ )
updaters = []
- for i in range(updater_limit):
- updaters.append(self.spawn(self.updater, peers_try, queried, need_queries, since))
+ for i in range(3):
+ updaters.append(gevent.spawn(self.updater, peers_try, queried, since))
- for r in range(10):
- gevent.joinall(updaters, timeout=5+r)
- if len(queried) >= need_queries or len(peers_try) == 0:
- break
- self.log.debug("CheckModifications: Waiting... (%s) succesfully queried: %s, left: %s" %
- (r + 1, len(queried), len(peers_try)))
+ gevent.joinall(updaters, timeout=10) # Wait 10 sec to workers done query modifications
- self.log.debug("CheckModifications: Queried listModifications from %s peers in %.3fs since %s" % (
- len(queried), time.time() - s, since))
+ if not queried: # Start another 3 thread if first 3 is stuck
+ peers_try[0:0] = [peer for peer in self.getConnectedPeers() if peer.connection.connected] # Add connected peers
+ for _ in range(10):
+ gevent.joinall(updaters, timeout=10) # Wait another 10 sec if none of updaters finished
+ if queried:
+ break
+
+ self.log.debug("CheckModifications: Queried listModifications from: %s in %.3fs since %s" % (queried, time.time() - s, since))
time.sleep(0.1)
return queried
- def invalidateUpdateTime(self, invalid_interval):
- a, b = invalid_interval
- if b is None:
- b = time.time()
- if a is None:
- a = b
- if a <= self.last_online_update and self.last_online_update <= b:
- self.last_online_update = 0
- self.log.debug("Update time invalidated")
-
- def isUpdateTimeValid(self):
- if not self.last_online_update:
- return False
- expirationThreshold = 60 * 60 * 6
- return self.last_online_update > time.time() - expirationThreshold
-
- def refreshUpdateTime(self, valid=True):
- if valid:
- self.last_online_update = time.time()
- else:
- self.last_online_update = 0
-
# Update content.json from peers and download changed files
# Return: None
@util.Noparallel()
- def update(self, announce=False, check_files=False, verify_files=False, since=None):
- online = self.connection_server.isInternetOnline()
-
+ def update(self, announce=False, check_files=False, since=None):
self.content_manager.loadContent("content.json", load_includes=False) # Reload content.json
self.content_updated = None # Reset content updated time
- if verify_files:
- check_files = True
-
- if verify_files:
- self.updateWebsocket(verifying=True)
- elif check_files:
- self.updateWebsocket(checking=True)
if check_files:
- if verify_files:
- self.storage.updateBadFiles(quick_check=False) # Full-featured checksum verification
- else:
- self.storage.updateBadFiles(quick_check=True) # Quick check and mark bad files based on file size
- # Don't update the timestamps in case of the application being shut down,
- # so we can make another try next time.
- if not self.isStopping():
- self.settings["check_files_timestamp"] = time.time()
- if verify_files:
- self.settings["verify_files_timestamp"] = time.time()
-
- if verify_files:
- self.updateWebsocket(verified=True)
- elif check_files:
- self.updateWebsocket(checked=True)
+ self.storage.updateBadFiles(quick_check=True) # Quick check and mark bad files based on file size
if not self.isServing():
return False
- if announce:
- self.updateWebsocket(updating=True)
- self.announce(mode="update", force=True)
-
- reqs = [
- self.peer_connector.newReq(4, 4, 30),
- self.peer_connector.newReq(2, 2, 60),
- self.peer_connector.newReq(1, 1, 120)
- ]
- nr_connected_peers = self.waitForPeers(reqs);
- if nr_connected_peers < 1:
- return
-
self.updateWebsocket(updating=True)
# Remove files that no longer in content.json
self.checkBadFiles()
+ if announce:
+ self.announce(mode="update", force=True)
+
# Full update, we can reset bad files
if check_files and since == 0:
self.bad_files = {}
@@ -853,7 +511,7 @@ class Site(object):
if self.bad_files:
self.log.debug("Bad files: %s" % self.bad_files)
- self.spawn(self.retryBadFiles, force=True)
+ gevent.spawn(self.retryBadFiles, force=True)
if len(queried) == 0:
# Failed to query modifications
@@ -861,19 +519,8 @@ class Site(object):
else:
self.content_updated = time.time()
- self.sendMyHashfield()
- self.updateHashfield()
-
- online = online and self.connection_server.isInternetOnline()
- self.refreshUpdateTime(valid=online)
-
self.updateWebsocket(updated=True)
- # To be called from FileServer
- @util.Noparallel(queue=True, ignore_args=True)
- def update2(self, check_files=False, verify_files=False):
- self.update(check_files=check_files, verify_files=verify_files)
-
# Update site by redownload all content.json
def redownloadContents(self):
# Download all content.json again
@@ -885,7 +532,7 @@ class Site(object):
gevent.joinall(content_threads)
# Publish worker
- def publisher(self, inner_path, peers, published, limit, diffs={}, event_done=None, cb_progress=None, max_retries=2, save_to_send_back_lru=False):
+ def publisher(self, inner_path, peers, published, limit, diffs={}, event_done=None, cb_progress=None):
file_size = self.storage.getSize(inner_path)
content_json_modified = self.content_manager.contents[inner_path]["modified"]
body = self.storage.read(inner_path)
@@ -910,7 +557,7 @@ class Site(object):
timeout = 10 + int(file_size / 1024)
result = {"exception": "Timeout"}
- for retry in range(max_retries):
+ for retry in range(2):
try:
with gevent.Timeout(timeout, False):
result = peer.publish(self.address, inner_path, body, content_json_modified, diffs)
@@ -920,12 +567,6 @@ class Site(object):
self.log.error("Publish error: %s" % Debug.formatException(err))
result = {"exception": Debug.formatException(err)}
- # We add to the send back lru not only on success, but also on errors.
- # Some peers returns None. (Why?)
- # Anyway, we tried our best in delivering possibly lost updates.
- if save_to_send_back_lru:
- self.addToSendBackLRU(peer, inner_path, content_json_modified)
-
if result and "ok" in result:
published.append(peer)
if cb_progress and len(published) <= limit:
@@ -937,75 +578,40 @@ class Site(object):
self.log.info("[FAILED] %s: %s" % (peer.key, result))
time.sleep(0.01)
- def addBackgroundPublisher(self, published=[], limit=5, inner_path="content.json", diffs={}):
- with self.background_publishers_lock:
- if self.background_publishers.get(inner_path, None):
- background_publisher = self.background_publishers[inner_path]
- background_publisher.reinit(published=published, limit=limit, diffs=diffs)
- else:
- background_publisher = BackgroundPublisher(self, published=published, limit=limit, inner_path=inner_path, diffs=diffs)
- self.background_publishers[inner_path] = background_publisher
-
- self.spawn(background_publisher.process)
-
- def processBackgroundPublishers(self):
- with self.background_publishers_lock:
- for inner_path, background_publisher in list(self.background_publishers.items()):
- background_publisher.process()
- if background_publisher.isComplete():
- background_publisher.finalize()
- del self.background_publishers[inner_path]
-
- def getPeersForForegroundPublishing(self, limit):
- # Wait for some peers to appear
- reqs = [
- self.peer_connector.newReq(limit, limit / 2, 10), # some of them...
- self.peer_connector.newReq(1, 1, 60) # or at least one...
- ]
- self.waitForPeers(reqs, update_websocket=False)
-
- peers = self.getConnectedPeers()
- random.shuffle(peers)
-
- # Prefer newer clients.
- # Trying to deliver foreground updates to the latest version clients,
- # expecting that they have better networking facilities.
- # Note: background updates SHOULD NOT discriminate peers by their rev number,
- # otherwise it can cause troubles in delivering updates to older versions.
- peers = sorted(peers, key=lambda peer: peer.connection.handshake.get("rev", 0) < config.rev - 100)
-
- # Add more, non-connected peers if necessary
- if len(peers) < limit * 2 and len(self.peers) > len(peers):
- peers += self.getRecentPeers(limit * 2)
- peers = set(peers)
-
- return peers
-
# Update content.json on peers
@util.Noparallel()
def publish(self, limit="default", inner_path="content.json", diffs={}, cb_progress=None):
published = [] # Successfully published (Peer)
publishers = [] # Publisher threads
+ if not self.peers:
+ self.announce(mode="more")
+
if limit == "default":
limit = 5
threads = limit
- peers = self.getPeersForForegroundPublishing(limit)
+ peers = self.getConnectedPeers()
+ num_connected_peers = len(peers)
+
+ random.shuffle(peers)
+ peers = sorted(peers, key=lambda peer: peer.connection.handshake.get("rev", 0) < config.rev - 100) # Prefer newer clients
+
+ if len(peers) < limit * 2 and len(self.peers) > len(peers): # Add more, non-connected peers if necessary
+ peers += self.getRecentPeers(limit * 2)
+
+ peers = set(peers)
self.log.info("Publishing %s to %s/%s peers (connected: %s) diffs: %s (%.2fk)..." % (
- inner_path,
- limit, len(self.peers), len(self.getConnectedPeers()),
- list(diffs.keys()), float(len(str(diffs))) / 1024
+ inner_path, limit, len(self.peers), num_connected_peers, list(diffs.keys()), float(len(str(diffs))) / 1024
))
if not peers:
- self.addBackgroundPublisher(published=published, limit=limit, inner_path=inner_path, diffs=diffs)
return 0 # No peers found
event_done = gevent.event.AsyncResult()
for i in range(min(len(peers), limit, threads)):
- publisher = self.spawn(self.publisher, inner_path, peers, published, limit, diffs, event_done, cb_progress)
+ publisher = gevent.spawn(self.publisher, inner_path, peers, published, limit, diffs, event_done, cb_progress)
publishers.append(publisher)
event_done.get() # Wait for done
@@ -1014,16 +620,17 @@ class Site(object):
if len(published) == 0:
gevent.joinall(publishers) # No successful publish, wait for all publisher
- # Publish to more peers in the background
+ # Publish more peers in the backgroup
self.log.info(
- "Published %s to %s peers, publishing to more peers in the background" %
- (inner_path, len(published))
+ "Published %s to %s peers, publishing to %s more peers in the background" %
+ (inner_path, len(published), limit)
)
- self.addBackgroundPublisher(published=published, limit=limit, inner_path=inner_path, diffs=diffs)
+ for thread in range(2):
+ gevent.spawn(self.publisher, inner_path, peers, published, limit=limit * 2, diffs=diffs)
# Send my hashfield to every connected peer if changed
- self.spawn(self.sendMyHashfield, 100)
+ gevent.spawn(self.sendMyHashfield, 100)
return len(published)
@@ -1186,7 +793,7 @@ class Site(object):
if not self.content_manager.contents.get("content.json"): # No content.json, download it first!
self.log.debug("Need content.json first (inner_path: %s, priority: %s)" % (inner_path, priority))
if priority > 0:
- self.spawn(self.announce)
+ gevent.spawn(self.announce)
if inner_path != "content.json": # Prevent double download
task = self.worker_manager.addTask("content.json", peer)
task["evt"].get()
@@ -1212,7 +819,7 @@ class Site(object):
self.log.debug("%s: Download not allowed" % inner_path)
return False
- self.bad_files[inner_path] = self.bad_files.get(inner_path, 1) # Mark as bad file
+ self.bad_files[inner_path] = self.bad_files.get(inner_path, 0) + 1 # Mark as bad file
task = self.worker_manager.addTask(inner_path, peer, priority=priority, file_info=file_info)
if blocking:
@@ -1240,239 +847,118 @@ class Site(object):
peer = Peer(ip, port, self)
self.peers[key] = peer
peer.found(source)
-
- self.peer_connector.processReqs()
- self.peer_connector.addPeer(peer)
-
return peer
- # Called from peer.remove to erase links to peer
- def deregisterPeer(self, peer):
- self.peers.pop(peer.key, None)
- try:
- self.peers_recent.remove(peer)
- except:
- pass
- self.peer_connector.deregisterPeer(peer)
-
def announce(self, *args, **kwargs):
if self.isServing():
self.announcer.announce(*args, **kwargs)
- def getActivityRating(self, force_safe=False):
- age = time.time() - self.settings.get("modified", 0)
-
- if age < 60 * 60:
- rating = 1.0
- elif age < 60 * 60 * 5:
- rating = 0.8
- elif age < 60 * 60 * 24:
- rating = 0.6
- elif age < 60 * 60 * 24 * 3:
- rating = 0.4
- elif age < 60 * 60 * 24 * 7:
- rating = 0.2
- else:
- rating = 0.0
-
- force_safe = force_safe or config.expose_no_ownership
-
- if (not force_safe) and self.settings["own"]:
- rating = min(rating, 0.6)
-
- if self.content_updated is False: # Last check modification failed
- rating += 0.1
- elif self.bad_files:
- rating += 0.1
-
- if rating > 1.0:
- rating = 1.0
-
- return rating
-
- def getAnnounceRating(self):
- # rare frequent
- # announces announces
- # 0 ------------------- 1
- # activity -------------> -- active site ==> frequent announces
- # <---------------- peers -- many peers ==> rare announces
- # trackers -------------> -- many trackers ==> frequent announces to iterate over more trackers
-
- activity_rating = self.getActivityRating(force_safe=True)
-
- peer_count = len(self.peers)
- peer_rating = 1.0 - min(peer_count, 50) / 50.0
-
- tracker_count = self.announcer.getSupportedTrackerCount()
- tracker_count = max(tracker_count, 1)
- tracker_rating = 1.0 - (1.0 / tracker_count)
-
- v = [activity_rating, peer_rating, tracker_rating]
- return sum(v) / float(len(v))
-
- def getPreferableConnectablePeerCount(self):
- if not self.isServing():
- return 0
-
- count = lerp(
- config.site_connectable_peer_count_min,
- config.site_connectable_peer_count_max,
- self.getActivityRating(force_safe=True))
- return count
-
- # The engine tries to maintain the number of active connections:
- # >= getPreferableActiveConnectionCount()
- # and
- # <= getActiveConnectionCountLimit()
-
- def getPreferableActiveConnectionCount(self):
- if not self.isServing():
- return 0
-
- age = time.time() - self.settings.get("modified", 0)
- count = int(10 * self.getActivityRating(force_safe=True))
-
- if len(self.peers) < 50:
- count = max(count, 5)
-
- count = min(count, config.connected_limit)
-
- return count
-
- def getActiveConnectionCountLimit(self):
- count_above_preferable = 2
- limit = self.getPreferableActiveConnectionCount() + count_above_preferable
- limit = min(limit, config.connected_limit)
- return limit
-
- ############################################################################
-
- # Returns the maximum value of current reqs for connections
- def waitingForConnections(self):
- self.peer_connector.processReqs()
- return self.peer_connector.need_nr_connected_peers
-
- def needConnections(self, num=None, update_site_on_reconnect=False):
- if not self.connection_server.allowsCreatingConnections():
- return
-
+ # Keep connections to get the updates
+ def needConnections(self, num=None, check_site_on_reconnect=False):
if num is None:
- num = self.getPreferableActiveConnectionCount()
- num = min(len(self.peers), num)
+ if len(self.peers) < 50:
+ num = 3
+ else:
+ num = 6
+ need = min(len(self.peers), num, config.connected_limit) # Need 5 peer, but max total peers
- req = self.peer_connector.newReq(0, num)
- return req
+ connected = len(self.getConnectedPeers())
- # Wait for peers to be discovered and/or connected according to reqs
- # and send updates to the UI
- def waitForPeers(self, reqs, update_websocket=True):
- if not reqs:
- return 0
- i = 0
- nr_connected_peers = -1
- while self.isServing():
- ready_reqs = list(filter(lambda req: req.ready(), reqs))
- if len(ready_reqs) == len(reqs):
- if nr_connected_peers < 0:
- nr_connected_peers = ready_reqs[0].nr_connected_peers
- break
- waiting_reqs = list(filter(lambda req: not req.ready(), reqs))
- if not waiting_reqs:
- break
- waiting_req = waiting_reqs[0]
- #self.log.debug("waiting_req: %s %s %s", waiting_req.need_nr_connected_peers, waiting_req.nr_connected_peers, waiting_req.expiration_interval)
- waiting_req.waitHeartbeat(timeout=1.0)
- if i > 0 and nr_connected_peers != waiting_req.nr_connected_peers:
- nr_connected_peers = waiting_req.nr_connected_peers
- if update_websocket:
- self.updateWebsocket(connecting_to_peers=nr_connected_peers)
- i += 1
- if update_websocket:
- self.updateWebsocket(connected_to_peers=max(nr_connected_peers, 0))
- if i > 1:
- # If we waited some time, pause now for displaying connected_to_peers message in the UI.
- # This sleep is solely needed for site status updates on ZeroHello to be more cool-looking.
- gevent.sleep(1)
- return nr_connected_peers
+ connected_before = connected
- ############################################################################
+ self.log.debug("Need connections: %s, Current: %s, Total: %s" % (need, connected, len(self.peers)))
- # Return: Peers verified to be connectable recently, or if not enough, other peers as well
+ if connected < need: # Need more than we have
+ for peer in self.getRecentPeers(30):
+ if not peer.connection or not peer.connection.connected: # No peer connection or disconnected
+ peer.pex() # Initiate peer exchange
+ if peer.connection and peer.connection.connected:
+ connected += 1 # Successfully connected
+ if connected >= need:
+ break
+ self.log.debug(
+ "Connected before: %s, after: %s. Check site: %s." %
+ (connected_before, connected, check_site_on_reconnect)
+ )
+
+ if check_site_on_reconnect and connected_before == 0 and connected > 0 and self.connection_server.has_internet:
+ gevent.spawn(self.update, check_files=False)
+
+ return connected
+
+ # Return: Probably peers verified to be connectable recently
def getConnectablePeers(self, need_num=5, ignore=[], allow_private=True):
peers = list(self.peers.values())
- random.shuffle(peers)
- connectable_peers = []
- reachable_peers = []
+ found = []
for peer in peers:
+ if peer.key.endswith(":0"):
+ continue # Not connectable
+ if not peer.connection:
+ continue # No connection
+ if peer.ip.endswith(".onion") and not self.connection_server.tor_manager.enabled:
+ continue # Onion not supported
if peer.key in ignore:
+ continue # The requester has this peer
+ if time.time() - peer.connection.last_recv_time > 60 * 60 * 2: # Last message more than 2 hours ago
+ peer.connection = None # Cleanup: Dead connection
continue
if not allow_private and helper.isPrivateIp(peer.ip):
continue
- if peer.isConnectable():
- connectable_peers.append(peer)
- elif peer.isReachable():
- reachable_peers.append(peer)
- if len(connectable_peers) >= need_num:
+ found.append(peer)
+ if len(found) >= need_num:
break # Found requested number of peers
- if len(connectable_peers) < need_num: # Return not that good peers
- connectable_peers += reachable_peers[0:need_num - len(connectable_peers)]
+ if len(found) < need_num: # Return not that good peers
+ found += [
+ peer for peer in peers
+ if not peer.key.endswith(":0") and
+ peer.key not in ignore and
+ (allow_private or not helper.isPrivateIp(peer.ip))
+ ][0:need_num - len(found)]
- return connectable_peers
+ return found
# Return: Recently found peers
- def getReachablePeers(self):
- return [peer for peer in self.peers.values() if peer.isReachable()]
-
- # Return: Recently found peers, sorted by reputation.
- # If there not enough recently found peers, adds other known peers with highest reputation
def getRecentPeers(self, need_num):
- need_num = int(need_num)
- found = set(self.peers_recent)
+ found = list(set(self.peers_recent))
self.log.debug(
"Recent peers %s of %s (need: %s)" %
(len(found), len(self.peers), need_num)
)
- if len(found) < need_num and len(found) < len(self.peers):
- # Add random peers
- peers = self.getReachablePeers()
- peers = sorted(
- list(peers),
+ if len(found) >= need_num or len(found) >= len(self.peers):
+ return sorted(
+ found,
key=lambda peer: peer.reputation,
reverse=True
- )
- while len(found) < need_num and len(peers) > 0:
- found.add(peers.pop())
+ )[0:need_num]
- return sorted(
- list(found),
+ # Add random peers
+ need_more = need_num - len(found)
+ if not self.connection_server.tor_manager.enabled:
+ peers = [peer for peer in self.peers.values() if not peer.ip.endswith(".onion")]
+ else:
+ peers = list(self.peers.values())
+
+ found_more = sorted(
+ peers[0:need_more * 50],
key=lambda peer: peer.reputation,
reverse=True
- )[0:need_num]
+ )[0:need_more * 2]
+
+ found += found_more
return found[0:need_num]
- # Returns the list of connected peers
- # By default the result may contain peers chosen optimistically:
- # If the connection is being established and 20 seconds have not yet passed
- # since the connection start time, those peers are included in the result.
- # Set only_fully_connected=True for restricting only by fully connected peers.
- def getConnectedPeers(self, only_fully_connected=False):
+ def getConnectedPeers(self):
back = []
if not self.connection_server:
return []
tor_manager = self.connection_server.tor_manager
for connection in self.connection_server.connections:
- if len(back) >= len(self.peers): # short cut for breaking early; no peers to check left
- break
-
if not connection.connected and time.time() - connection.start_time > 20: # Still not connected after 20s
continue
- if not connection.connected and only_fully_connected: # Only fully connected peers
- continue
-
peer = self.peers.get("%s:%s" % (connection.ip, connection.port))
if peer:
if connection.ip.endswith(".onion") and connection.target_onion and tor_manager.start_onions:
@@ -1485,158 +971,61 @@ class Site(object):
back.append(peer)
return back
- def removeDeadPeers(self):
- peers = list(self.peers.values())
- if len(peers) <= 20:
- return
-
- removed = 0
- if len(peers) > 10000:
- ttl = 60 * 2
- elif len(peers) > 1000:
- ttl = 60 * 60 * 1
- elif len(peers) > 100:
- ttl = 60 * 60 * 4
- else:
- ttl = 60 * 60 * 8
-
- for peer in peers:
- if peer.isConnected() or peer.isProtected():
- continue
- if peer.isTtlExpired(ttl):
- peer.remove("TTL expired")
- removed += 1
- if removed > len(peers) * 0.1: # Don't remove too much at once
- break
-
- if removed:
- self.log.debug("Cleanup peers result: Removed %s, left: %s" % (removed, len(self.peers)))
-
# Cleanup probably dead peers and close connection if too much
- def cleanupPeers(self):
- self.removeDeadPeers()
+ def cleanupPeers(self, peers_protected=[]):
+ peers = list(self.peers.values())
+ if len(peers) > 20:
+ # Cleanup old peers
+ removed = 0
+ if len(peers) > 1000:
+ ttl = 60 * 60 * 1
+ else:
+ ttl = 60 * 60 * 4
- limit = max(self.getActiveConnectionCountLimit(), self.waitingForConnections())
- connected_peers = self.getConnectedPeers(only_fully_connected=True)
- need_to_close = len(connected_peers) - limit
-
- if need_to_close > 0:
- closed = 0
- for peer in sorted(connected_peers, key=lambda peer: min(peer.connection.sites, 5)):
- if not peer.isConnected():
+ for peer in peers:
+ if peer.connection and peer.connection.connected:
continue
- if peer.isProtected():
+ if peer.connection and not peer.connection.connected:
+ peer.connection = None # Dead connection
+ if time.time() - peer.time_found > ttl: # Not found on tracker or via pex in last 4 hour
+ peer.remove("Time found expired")
+ removed += 1
+ if removed > len(peers) * 0.1: # Don't remove too much at once
+ break
+
+ if removed:
+ self.log.debug("Cleanup peers result: Removed %s, left: %s" % (removed, len(self.peers)))
+
+ # Close peers over the limit
+ closed = 0
+ connected_peers = [peer for peer in self.getConnectedPeers() if peer.connection.connected] # Only fully connected peers
+ need_to_close = len(connected_peers) - config.connected_limit
+
+ if closed < need_to_close:
+ # Try to keep connections with more sites
+ for peer in sorted(connected_peers, key=lambda peer: min(peer.connection.sites, 5)):
+ if not peer.connection:
+ continue
+ if peer.key in peers_protected:
continue
if peer.connection.sites > 5:
break
- peer.disconnect("Cleanup peers")
+ peer.connection.close("Cleanup peers")
+ peer.connection = None
closed += 1
if closed >= need_to_close:
break
- self.log.debug("Connected: %s, Need to close: %s, Closed: %s" % (
- len(connected_peers), need_to_close, closed))
-
- def lookForConnectablePeers(self):
- num_tries = 2
- need_connectable_peers = self.getPreferableConnectablePeerCount()
-
- connectable_peers = 0
- reachable_peers = []
-
- for peer in list(self.peers.values()):
- if peer.isConnected() or peer.isConnectable():
- connectable_peers += 1
- elif peer.isReachable():
- reachable_peers.append(peer)
- if connectable_peers >= need_connectable_peers:
- return True
-
- random.shuffle(reachable_peers)
-
- for peer in reachable_peers:
- if peer.isConnected() or peer.isConnectable() or peer.removed:
- continue
- peer.ping()
- if peer.isConnected():
- peer.pex()
- num_tries -= 1
- if num_tries < 1:
- break
-
- @util.Noparallel(queue=True)
- def runPeriodicMaintenance(self, startup=False, force=False):
- if not self.isServing():
- return False
-
- self.log.debug("runPeriodicMaintenance: startup=%s, force=%s" % (startup, force))
-
- result = False
-
- for handler in self.periodic_maintenance_handlers:
- result = result | bool(handler.run(startup=startup, force=force))
-
- return result
-
- def periodicMaintenanceHandler_general(self, startup=False, force=False):
- if not self.isServing():
- return False
-
- if not self.peers:
- return False
-
- self.log.debug("periodicMaintenanceHandler_general: startup=%s, force=%s" % (startup, force))
-
- #self.persistent_peer_req = self.needConnections(update_site_on_reconnect=True)
- #self.persistent_peer_req.result_connected.wait(timeout=2.0)
-
- #self.announcer.announcePex()
-
- self.processBackgroundPublishers()
-
- self.update()
-
- return True
-
- def periodicMaintenanceHandler_peer_check(self, startup=False, force=False):
- if not self.isServing():
- return False
-
- if not self.peers:
- return False
-
- self.log.debug("periodicMaintenanceHandler_peer_check: startup=%s, force=%s" % (startup, force))
-
- if not startup:
- self.cleanupPeers()
-
- self.lookForConnectablePeers()
-
- return True
-
- def periodicMaintenanceHandler_announce(self, startup=False, force=False):
- if not self.isServing():
- return False
-
- self.log.debug("periodicMaintenanceHandler_announce: startup=%s, force=%s" % (startup, force))
-
- if startup and len(self.peers) < 10:
- self.announce(mode="startup")
- else:
- self.announce(mode="update", pex=False)
-
- return True
+ if need_to_close > 0:
+ self.log.debug("Connected: %s, Need to close: %s, Closed: %s" % (len(connected_peers), need_to_close, closed))
# Send hashfield to peers
def sendMyHashfield(self, limit=5):
- if not self.isServing():
- return False
-
if not self.content_manager.hashfield: # No optional files
return False
sent = 0
- connected_peers = self.getConnectedPeers(only_fully_connected=True)
+ connected_peers = self.getConnectedPeers()
for peer in connected_peers:
if peer.sendMyHashfield():
sent += 1
@@ -1649,16 +1038,13 @@ class Site(object):
# Update hashfield
def updateHashfield(self, limit=5):
- if not self.isServing():
- return False
-
# Return if no optional files
if not self.content_manager.hashfield and not self.content_manager.has_optional_files:
return False
s = time.time()
queried = 0
- connected_peers = self.getConnectedPeers(only_fully_connected=True)
+ connected_peers = self.getConnectedPeers()
for peer in connected_peers:
if peer.time_hashfield:
continue
@@ -1696,10 +1082,10 @@ class Site(object):
# Add event listeners
def addEventListeners(self):
- self.onFileStart = util.Event() # If WorkerManager added new task
+ self.onFileStart = util.Event() # If WorkerManager added new task
self.onFileDone = util.Event() # If WorkerManager successfully downloaded a file
self.onFileFail = util.Event() # If WorkerManager failed to download a file
- self.onComplete = util.Event() # All files finished
+ self.onComplete = util.Event() # All file finished
self.onFileStart.append(lambda inner_path: self.fileStarted()) # No parameters to make Noparallel batching working
self.onFileDone.append(lambda inner_path: self.fileDone(inner_path))
@@ -1713,7 +1099,6 @@ class Site(object):
param = None
for ws in self.websockets:
ws.event("siteChanged", self, param)
- time.sleep(0.001)
def messageWebsocket(self, message, type="info", progress=None):
for ws in self.websockets:
@@ -1728,7 +1113,7 @@ class Site(object):
time.sleep(0.001) # Wait for other files adds
self.updateWebsocket(file_started=True)
- # File downloaded successfully
+ # File downloaded successful
def fileDone(self, inner_path):
# File downloaded, remove it from bad files
if inner_path in self.bad_files:
diff --git a/src/Site/SiteAnnouncer.py b/src/Site/SiteAnnouncer.py
index 1baf39af..2fd63e82 100644
--- a/src/Site/SiteAnnouncer.py
+++ b/src/Site/SiteAnnouncer.py
@@ -1,7 +1,6 @@
import random
import time
import hashlib
-import logging
import re
import collections
@@ -13,7 +12,6 @@ from Debug import Debug
from util import helper
from greenlet import GreenletExit
import util
-from util import CircularIterator
class AnnounceError(Exception):
@@ -26,20 +24,11 @@ global_stats = collections.defaultdict(lambda: collections.defaultdict(int))
class SiteAnnouncer(object):
def __init__(self, site):
self.site = site
- self.log = logging.getLogger("Site:%s SiteAnnouncer" % self.site.address_short)
-
self.stats = {}
self.fileserver_port = config.fileserver_port
self.peer_id = self.site.connection_server.peer_id
- self.tracker_circular_iterator = CircularIterator()
+ self.last_tracker_id = random.randint(0, 10)
self.time_last_announce = 0
- self.supported_tracker_count = 0
-
- # Returns connection_server rela
- # Since 0.8.0
- @property
- def connection_server(self):
- return self.site.connection_server
def getTrackers(self):
return config.trackers
@@ -47,76 +36,25 @@ class SiteAnnouncer(object):
def getSupportedTrackers(self):
trackers = self.getTrackers()
- if not self.connection_server.tor_manager.enabled:
+ if not self.site.connection_server.tor_manager.enabled:
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
- if "ipv6" not in self.connection_server.supported_ip_types:
- trackers = [tracker for tracker in trackers if self.connection_server.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
+ if "ipv6" not in self.site.connection_server.supported_ip_types:
+ trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
return trackers
- # Returns a cached value of len(self.getSupportedTrackers()), which can be
- # inacurate.
- # To be used from Site for estimating available tracker count.
- def getSupportedTrackerCount(self):
- return self.supported_tracker_count
-
- def shouldTrackerBeTemporarilyIgnored(self, tracker, mode, force):
- if not tracker:
- return True
-
- if force:
- return False
-
- now = time.time()
-
- # Throttle accessing unresponsive trackers
- tracker_stats = global_stats[tracker]
- delay = min(30 * tracker_stats["num_error"], 60 * 10)
- time_announce_allowed = tracker_stats["time_request"] + delay
- if now < time_announce_allowed:
- return True
-
- return False
-
- def getAnnouncingTrackers(self, mode, force):
+ def getAnnouncingTrackers(self, mode):
trackers = self.getSupportedTrackers()
- self.supported_tracker_count = len(trackers)
-
- if trackers and (mode == "update" or mode == "more"):
-
- # Choose just 2 trackers to announce to
-
- trackers_announcing = []
-
- # One is the next in sequence
-
- self.tracker_circular_iterator.resetSuccessiveCount()
- while 1:
- tracker = self.tracker_circular_iterator.next(trackers)
- if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
- trackers_announcing.append(tracker)
- break
- if self.tracker_circular_iterator.isWrapped():
- break
-
- # And one is just random
-
- shuffled_trackers = random.sample(trackers, len(trackers))
- for tracker in shuffled_trackers:
- if tracker in trackers_announcing:
- continue
- if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
- trackers_announcing.append(tracker)
- break
+ if trackers and (mode == "update" or mode == "more"): # Only announce on one tracker, increment the queried tracker id
+ self.last_tracker_id += 1
+ self.last_tracker_id = self.last_tracker_id % len(trackers)
+ trackers_announcing = [trackers[self.last_tracker_id]] # We only going to use this one
else:
- trackers_announcing = [
- tracker for tracker in trackers
- if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force)
- ]
+ trackers_announcing = trackers
return trackers_announcing
@@ -124,32 +62,94 @@ class SiteAnnouncer(object):
back = []
# Type of addresses they can reach me
if config.trackers_proxy == "disable" and config.tor != "always":
- for ip_type, opened in list(self.connection_server.port_opened.items()):
+ for ip_type, opened in list(self.site.connection_server.port_opened.items()):
if opened:
back.append(ip_type)
- if self.connection_server.tor_manager.start_onions:
+ if self.site.connection_server.tor_manager.start_onions:
back.append("onion")
return back
- @util.Noparallel()
+ @util.Noparallel(blocking=False)
def announce(self, force=False, mode="start", pex=True):
- if not self.site.isServing():
- return
-
if time.time() - self.time_last_announce < 30 and not force:
return # No reannouncing within 30 secs
-
- self.log.debug("announce: force=%s, mode=%s, pex=%s" % (force, mode, pex))
+ if force:
+ self.site.log.debug("Force reannounce in mode %s" % mode)
self.fileserver_port = config.fileserver_port
self.time_last_announce = time.time()
- trackers = self.getAnnouncingTrackers(mode, force)
- self.log.debug("Chosen trackers: %s" % trackers)
- self.announceToTrackers(trackers, force=force, mode=mode)
+ trackers = self.getAnnouncingTrackers(mode)
+
+ if config.verbose:
+ self.site.log.debug("Tracker announcing, trackers: %s" % trackers)
+
+ errors = []
+ slow = []
+ s = time.time()
+ threads = []
+ num_announced = 0
+
+ for tracker in trackers: # Start announce threads
+ tracker_stats = global_stats[tracker]
+ # Reduce the announce time for trackers that looks unreliable
+ time_announce_allowed = time.time() - 60 * min(30, tracker_stats["num_error"])
+ if tracker_stats["num_error"] > 5 and tracker_stats["time_request"] > time_announce_allowed and not force:
+ if config.verbose:
+ self.site.log.debug("Tracker %s looks unreliable, announce skipped (error: %s)" % (tracker, tracker_stats["num_error"]))
+ continue
+ thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
+ threads.append(thread)
+ thread.tracker = tracker
+
+ time.sleep(0.01)
+ self.updateWebsocket(trackers="announcing")
+
+ gevent.joinall(threads, timeout=20) # Wait for announce finish
+
+ for thread in threads:
+ if thread.value is None:
+ continue
+ if thread.value is not False:
+ if thread.value > 1.0: # Takes more than 1 second to announce
+ slow.append("%.2fs %s" % (thread.value, thread.tracker))
+ num_announced += 1
+ else:
+ if thread.ready():
+ errors.append(thread.tracker)
+ else: # Still running
+ slow.append("30s+ %s" % thread.tracker)
+
+ # Save peers num
+ self.site.settings["peers"] = len(self.site.peers)
+
+ if len(errors) < len(threads): # At least one tracker finished
+ if len(trackers) == 1:
+ announced_to = trackers[0]
+ else:
+ announced_to = "%s/%s trackers" % (num_announced, len(threads))
+ if mode != "update" or config.verbose:
+ self.site.log.debug(
+ "Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
+ (mode, announced_to, time.time() - s, errors, slow)
+ )
+ else:
+ if len(threads) > 1:
+ self.site.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
+ if len(threads) == 1 and mode != "start": # Move to next tracker
+ self.site.log.debug("Tracker failed, skipping to next one...")
+ self.site.greenlet_manager.spawnLater(1.0, self.announce, force=force, mode=mode, pex=pex)
+
+ self.updateWebsocket(trackers="announced")
if pex:
- self.announcePex()
+ self.updateWebsocket(pex="announcing")
+ if mode == "more": # Need more peers
+ self.announcePex(need_num=10)
+ else:
+ self.announcePex()
+
+ self.updateWebsocket(pex="announced")
def getTrackerHandler(self, protocol):
return None
@@ -177,7 +177,7 @@ class SiteAnnouncer(object):
s = time.time()
address_parts = self.getAddressParts(tracker)
if not address_parts:
- self.log.warning("Tracker %s error: Invalid address" % tracker)
+ self.site.log.warning("Tracker %s error: Invalid address" % tracker)
return False
if tracker not in self.stats:
@@ -188,7 +188,7 @@ class SiteAnnouncer(object):
self.stats[tracker]["time_request"] = time.time()
global_stats[tracker]["time_request"] = time.time()
if config.verbose:
- self.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
+ self.site.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
if mode == "update":
num_want = 10
else:
@@ -202,7 +202,7 @@ class SiteAnnouncer(object):
else:
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
except Exception as err:
- self.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
+ self.site.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
error = err
if error:
@@ -210,11 +210,11 @@ class SiteAnnouncer(object):
self.stats[tracker]["time_status"] = time.time()
self.stats[tracker]["last_error"] = str(error)
self.stats[tracker]["time_last_error"] = time.time()
- if self.connection_server.has_internet:
+ if self.site.connection_server.has_internet:
self.stats[tracker]["num_error"] += 1
self.stats[tracker]["num_request"] += 1
global_stats[tracker]["num_request"] += 1
- if self.connection_server.has_internet:
+ if self.site.connection_server.has_internet:
global_stats[tracker]["num_error"] += 1
self.updateWebsocket(tracker="error")
return False
@@ -249,106 +249,39 @@ class SiteAnnouncer(object):
self.site.updateWebsocket(peers_added=added)
if config.verbose:
- self.log.debug(
+ self.site.log.debug(
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
)
return time.time() - s
- def announceToTrackers(self, trackers, force=False, mode="start"):
- errors = []
- slow = []
- s = time.time()
- threads = []
- num_announced = 0
-
- for tracker in trackers: # Start announce threads
- thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
- threads.append(thread)
- thread.tracker = tracker
-
- time.sleep(0.01)
- self.updateWebsocket(trackers="announcing")
-
- gevent.joinall(threads, timeout=20) # Wait for announce finish
-
- for thread in threads:
- if thread.value is None:
- continue
- if thread.value is not False:
- if thread.value > 1.0: # Takes more than 1 second to announce
- slow.append("%.2fs %s" % (thread.value, thread.tracker))
- num_announced += 1
- else:
- if thread.ready():
- errors.append(thread.tracker)
- else: # Still running
- slow.append("30s+ %s" % thread.tracker)
-
- # Save peers num
- self.site.settings["peers"] = len(self.site.peers)
-
- if len(errors) < len(threads): # At least one tracker finished
- if len(trackers) == 1:
- announced_to = trackers[0]
- else:
- announced_to = "%s/%s trackers" % (num_announced, len(threads))
- if mode != "update" or config.verbose:
- self.log.debug(
- "Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
- (mode, announced_to, time.time() - s, errors, slow)
- )
- else:
- if len(threads) > 1:
- self.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
- if len(threads) > 1 and mode != "start": # Move to next tracker
- self.log.debug("Tracker failed, skipping to next one...")
- self.site.greenlet_manager.spawnLater(5.0, self.announce, force=force, mode=mode, pex=False)
-
- self.updateWebsocket(trackers="announced")
-
@util.Noparallel(blocking=False)
- def announcePex(self, query_num=2, need_num=10, establish_connections=True):
- peers = []
- try:
- peer_count = 20 + query_num * 2
+ def announcePex(self, query_num=2, need_num=5):
+ peers = self.site.getConnectedPeers()
+ if len(peers) == 0: # Wait 3s for connections
+ time.sleep(3)
+ peers = self.site.getConnectedPeers()
- # Wait for some peers to connect
- for _ in range(5):
- if not self.site.isServing():
- return
- peers = self.site.getConnectedPeers(only_fully_connected=True)
- if len(peers) > 0:
- break
- time.sleep(2)
+ if len(peers) == 0: # Small number of connected peers for this site, connect to any
+ peers = list(self.site.getRecentPeers(20))
+ need_num = 10
- if len(peers) < peer_count and establish_connections:
- # Small number of connected peers for this site, connect to any
- peers = list(self.site.getRecentPeers(peer_count))
-
- if len(peers) > 0:
- self.updateWebsocket(pex="announcing")
-
- random.shuffle(peers)
- done = 0
- total_added = 0
- for peer in peers:
- if not establish_connections and not peer.isConnected():
- continue
- num_added = peer.pex(need_num=need_num)
- if num_added is not False:
- done += 1
- total_added += num_added
- if num_added:
- self.site.worker_manager.onPeers()
- self.site.updateWebsocket(peers_added=num_added)
- if done == query_num:
- break
+ random.shuffle(peers)
+ done = 0
+ total_added = 0
+ for peer in peers:
+ num_added = peer.pex(need_num=need_num)
+ if num_added is not False:
+ done += 1
+ total_added += num_added
+ if num_added:
+ self.site.worker_manager.onPeers()
+ self.site.updateWebsocket(peers_added=num_added)
+ else:
time.sleep(0.1)
- self.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
- finally:
- if len(peers) > 0:
- self.updateWebsocket(pex="announced")
+ if done == query_num:
+ break
+ self.site.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
def updateWebsocket(self, **kwargs):
if kwargs:
diff --git a/src/Site/SiteHelpers.py b/src/Site/SiteHelpers.py
deleted file mode 100644
index 90a298cf..00000000
--- a/src/Site/SiteHelpers.py
+++ /dev/null
@@ -1,256 +0,0 @@
-import time
-import weakref
-import gevent
-
-class ConnectRequirement(object):
- next_id = 1
- def __init__(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
- self.need_nr_peers = need_nr_peers # how many total peers we need
- self.need_nr_connected_peers = need_nr_connected_peers # how many connected peers we need
- self.result = gevent.event.AsyncResult() # resolves on need_nr_peers condition
- self.result_connected = gevent.event.AsyncResult() # resolves on need_nr_connected_peers condition
-
- self.expiration_interval = expiration_interval
- self.expired = False
- if expiration_interval:
- self.expire_at = time.time() + expiration_interval
- else:
- self.expire_at = None
-
- self.nr_peers = -1 # updated PeerConnector()
- self.nr_connected_peers = -1 # updated PeerConnector()
-
- self.heartbeat = gevent.event.AsyncResult()
-
- self.id = type(self).next_id
- type(self).next_id += 1
-
- def fulfilled(self):
- return self.result.ready() and self.result_connected.ready()
-
- def ready(self):
- return self.expired or self.fulfilled()
-
- # Heartbeat sent when any of the following happens:
- # * self.result is set
- # * self.result_connected is set
- # * self.nr_peers changed
- # * self.nr_peers_connected changed
- # * self.expired is set
- def waitHeartbeat(self, timeout=None):
- if self.heartbeat.ready():
- self.heartbeat = gevent.event.AsyncResult()
- return self.heartbeat.wait(timeout=timeout)
-
- def sendHeartbeat(self):
- self.heartbeat.set_result()
- if self.heartbeat.ready():
- self.heartbeat = gevent.event.AsyncResult()
-
-class PeerConnector(object):
-
- def __init__(self, site):
- self.site = site
-
- self.peer_reqs = weakref.WeakValueDictionary() # How many connected peers we need.
- # Separate entry for each requirement.
- # Objects of type ConnectRequirement.
- self.peer_connector_controller = None # Thread doing the orchestration in background.
- self.peer_connector_workers = dict() # Threads trying to connect to individual peers.
- self.peer_connector_worker_limit = 5 # Max nr of workers.
- self.peer_connector_announcer = None # Thread doing announces in background.
-
- # Max effective values. Set by processReqs().
- self.need_nr_peers = 0
- self.need_nr_connected_peers = 0
- self.nr_peers = 0 # set by processReqs()
- self.nr_connected_peers = 0 # set by processReqs2()
-
- # Connector Controller state
- self.peers = list()
-
- def addReq(self, req):
- self.peer_reqs[req.id] = req
- self.processReqs()
-
- def newReq(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
- req = ConnectRequirement(need_nr_peers, need_nr_connected_peers, expiration_interval=expiration_interval)
- self.addReq(req)
- return req
-
- def processReqs(self, nr_connected_peers=None):
- nr_peers = len(self.site.peers)
- self.nr_peers = nr_peers
-
- need_nr_peers = 0
- need_nr_connected_peers = 0
-
- items = list(self.peer_reqs.items())
- for key, req in items:
- send_heartbeat = False
-
- if req.expire_at and req.expire_at < time.time():
- req.expired = True
- self.peer_reqs.pop(key, None)
- send_heartbeat = True
- elif req.result.ready() and req.result_connected.ready():
- pass
- else:
- if nr_connected_peers is not None:
- if req.need_nr_peers <= nr_peers and req.need_nr_connected_peers <= nr_connected_peers:
- req.result.set_result(nr_peers)
- req.result_connected.set_result(nr_connected_peers)
- send_heartbeat = True
- if req.nr_peers != nr_peers or req.nr_connected_peers != nr_connected_peers:
- req.nr_peers = nr_peers
- req.nr_connected_peers = nr_connected_peers
- send_heartbeat = True
-
- if not (req.result.ready() and req.result_connected.ready()):
- need_nr_peers = max(need_nr_peers, req.need_nr_peers)
- need_nr_connected_peers = max(need_nr_connected_peers, req.need_nr_connected_peers)
-
- if send_heartbeat:
- req.sendHeartbeat()
-
- self.need_nr_peers = need_nr_peers
- self.need_nr_connected_peers = need_nr_connected_peers
-
- if nr_connected_peers is None:
- nr_connected_peers = 0
- if need_nr_peers > nr_peers:
- self.spawnPeerConnectorAnnouncer();
- if need_nr_connected_peers > nr_connected_peers:
- self.spawnPeerConnectorController();
-
- def processReqs2(self):
- self.nr_connected_peers = len(self.site.getConnectedPeers(only_fully_connected=True))
- self.processReqs(nr_connected_peers=self.nr_connected_peers)
-
- # For adding new peers when ConnectorController is working.
- # While it is iterating over a cached list of peers, there can be a significant lag
- # for a newly discovered peer to get in sight of the controller.
- # Suppose most previously known peers are dead and we've just get a few
- # new peers from a tracker.
- # So we mix the new peer to the cached list.
- # When ConnectorController is stopped (self.peers is empty), we just do nothing here.
- def addPeer(self, peer):
- if not self.peers:
- return
- if peer not in self.peers:
- self.peers.append(peer)
-
- def deregisterPeer(self, peer):
- try:
- self.peers.remove(peer)
- except:
- pass
-
- def sleep(self, t):
- self.site.connection_server.sleep(t)
-
- def keepGoing(self):
- return self.site.isServing() and self.site.connection_server.allowsCreatingConnections()
-
- def peerConnectorWorker(self, peer):
- if not peer.isConnected():
- peer.connect()
- if peer.isConnected():
- peer.ping()
- self.processReqs2()
-
- def peerConnectorController(self):
- self.peers = list()
- addendum = 20
- while self.keepGoing():
-
- no_peers_loop = 0
- while len(self.site.peers) < 1:
- # No peers at all.
- # Waiting for the announcer to discover some peers.
- self.sleep(10 + no_peers_loop)
- no_peers_loop += 1
- if not self.keepGoing() or no_peers_loop > 60:
- break
-
- self.processReqs2()
-
- if self.need_nr_connected_peers <= self.nr_connected_peers:
- # Ok, nobody waits for connected peers.
- # Done.
- break
-
- if len(self.site.peers) < 1:
- break
-
- if len(self.peers) < 1:
- # refill the peer list
- self.peers = self.site.getRecentPeers(self.need_nr_connected_peers * 2 + self.nr_connected_peers + addendum)
- addendum = min(addendum * 2 + 50, 10000)
- if len(self.peers) <= self.nr_connected_peers:
- # Looks like all known peers are connected.
- # Waiting for the announcer to discover some peers.
- self.site.announcer.announcePex(establish_connections=False)
- self.sleep(10)
- continue
-
- added = 0
-
- # try connecting to peers
- while self.keepGoing() and len(self.peer_connector_workers) < self.peer_connector_worker_limit:
- if len(self.peers) < 1:
- break
-
- peer = self.peers.pop(0)
-
- if peer.isConnected():
- continue
-
- thread = self.peer_connector_workers.get(peer, None)
- if thread:
- continue
-
- thread = self.site.spawn(self.peerConnectorWorker, peer)
- self.peer_connector_workers[peer] = thread
- thread.link(lambda thread, peer=peer: self.peer_connector_workers.pop(peer, None))
- added += 1
-
- if not self.keepGoing():
- break
-
- if not added:
- # Looks like all known peers are either connected or being connected,
- # so we weren't able to start connecting any peer in this iteration.
- # Waiting for the announcer to discover some peers.
- self.sleep(20)
-
- # wait for more room in self.peer_connector_workers
- while self.keepGoing() and len(self.peer_connector_workers) >= self.peer_connector_worker_limit:
- self.sleep(2)
-
- if not self.site.connection_server.isInternetOnline():
- self.sleep(30)
-
- self.peers = list()
- self.peer_connector_controller = None
-
- def peerConnectorAnnouncer(self):
- while self.keepGoing():
- if self.need_nr_peers <= self.nr_peers:
- break
- self.site.announce(mode="more")
- self.processReqs2()
- if self.need_nr_peers <= self.nr_peers:
- break
- self.sleep(10)
- if not self.site.connection_server.isInternetOnline():
- self.sleep(20)
- self.peer_connector_announcer = None
-
- def spawnPeerConnectorController(self):
- if self.peer_connector_controller is None or self.peer_connector_controller.ready():
- self.peer_connector_controller = self.site.spawn(self.peerConnectorController)
-
- def spawnPeerConnectorAnnouncer(self):
- if self.peer_connector_announcer is None or self.peer_connector_announcer.ready():
- self.peer_connector_announcer = self.site.spawn(self.peerConnectorAnnouncer)
diff --git a/src/Site/SiteManager.py b/src/Site/SiteManager.py
index 8175a1f5..684d69fc 100644
--- a/src/Site/SiteManager.py
+++ b/src/Site/SiteManager.py
@@ -4,7 +4,6 @@ import re
import os
import time
import atexit
-import collections
import gevent
@@ -28,21 +27,6 @@ class SiteManager(object):
gevent.spawn(self.saveTimer)
atexit.register(lambda: self.save(recalculate_size=True))
- # ZeroNet has a bug of desyncing between:
- # * time sent in a response of listModified
- # and
- # * time checked on receiving a file.
- # This leads to the following scenario:
- # * Request listModified.
- # * Detect that the remote peer missing an update
- # * Send a newer version of the file back to the peer.
- # * The peer responses "ok: File not changed"
- # .....
- # * Request listModified the next time and do all the same again.
- # So we keep the list of sent back entries to prevent sending multiple useless updates:
- # "{site.address} - {peer.key} - {inner_path}" -> mtime
- self.send_back_lru = collections.OrderedDict()
-
# Load all sites from data/sites.json
@util.Noparallel()
def load(self, cleanup=True, startup=False):
@@ -171,11 +155,6 @@ class SiteManager(object):
def resolveDomainCached(self, domain):
return self.resolveDomain(domain)
- # Checks if the address is blocked. To be implemented in content filter plugins.
- # Since 0.8.0
- def isAddressBlocked(self, address):
- return False
-
# Return: Site object or None if not found
def get(self, address):
if self.isDomainCached(address):
@@ -237,23 +216,6 @@ class SiteManager(object):
self.load(startup=True)
return self.sites
- # Return False if we never sent to
- # or if the file that was sent was older than
- # so that send back logic is suppressed for .
- # True if can be sent back to .
- def checkSendBackLRU(self, site, peer, inner_path, remote_modified):
- key = site.address + ' - ' + peer.key + ' - ' + inner_path
- sent_modified = self.send_back_lru.get(key, 0)
- return remote_modified < sent_modified
-
- def addToSendBackLRU(self, site, peer, inner_path, modified):
- key = site.address + ' - ' + peer.key + ' - ' + inner_path
- if self.send_back_lru.get(key, None) is None:
- self.send_back_lru[key] = modified
- while len(self.send_back_lru) > config.send_back_lru_size:
- self.send_back_lru.popitem(last=False)
- else:
- self.send_back_lru.move_to_end(key, last=True)
site_manager = SiteManager() # Singletone
diff --git a/src/Site/SiteStorage.py b/src/Site/SiteStorage.py
index d5df8e95..27032e79 100644
--- a/src/Site/SiteStorage.py
+++ b/src/Site/SiteStorage.py
@@ -24,25 +24,6 @@ thread_pool_fs_read = ThreadPool.ThreadPool(config.threads_fs_read, name="FS rea
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
-class VerifyFiles_Notificator(object):
- def __init__(self, site, quick_check):
- self.site = site
- self.quick_check = quick_check
- self.scanned_files = 0
- self.websocket_update_interval = 0.25
- self.websocket_update_time = time.time()
-
- def inc(self):
- self.scanned_files += 1
- if self.websocket_update_time + self.websocket_update_interval < time.time():
- self.send()
-
- def send(self):
- self.websocket_update_time = time.time()
- if self.quick_check:
- self.site.updateWebsocket(checking=self.scanned_files)
- else:
- self.site.updateWebsocket(verifying=self.scanned_files)
@PluginManager.acceptPlugins
class SiteStorage(object):
@@ -279,7 +260,7 @@ class SiteStorage(object):
# Open file object
@thread_pool_fs_read.wrap
def read(self, inner_path, mode="rb"):
- return open(self.getPath(inner_path), mode).read()
+ return self.open(inner_path, mode).read()
@thread_pool_fs_write.wrap
def writeThread(self, inner_path, content):
@@ -375,7 +356,7 @@ class SiteStorage(object):
# Reopen DB to check changes
if self.has_db:
self.closeDb("New dbschema")
- self.site.spawn(self.getDb)
+ gevent.spawn(self.getDb)
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
if config.verbose:
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
@@ -388,11 +369,11 @@ class SiteStorage(object):
# Load and parse json file
@thread_pool_fs_read.wrap
def loadJson(self, inner_path):
- try :
- with self.open(inner_path) as file:
+ try:
+ with self.open(inner_path, "r", encoding="utf8") as file:
return json.load(file)
except Exception as err:
- self.log.error("Json load error: %s" % Debug.formatException(err))
+ self.log.warning("Json load error: %s" % Debug.formatException(err))
return None
# Write formatted json file
@@ -443,8 +424,6 @@ class SiteStorage(object):
return inner_path
# Verify all files sha512sum using content.json
- # The result may not be accurate if self.site.isStopping().
- # verifyFiles() return immediately in that case.
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
bad_files = []
back = defaultdict(int)
@@ -456,55 +435,17 @@ class SiteStorage(object):
self.log.debug("VerifyFile content.json not exists")
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
self.site.content_manager.loadContent() # Reload content.json
-
- # Trying to read self.site.content_manager.contents without being stuck
- # on reading the long file list and also without getting
- # "RuntimeError: dictionary changed size during iteration"
- # We can't use just list(iteritems()) since it loads all the contents files
- # at once and gets unresponsive.
- contents = {}
- notificator = None
- tries = 0
- max_tries = 40
- stop = False
- while not stop:
- try:
- contents = {}
- notificator = VerifyFiles_Notificator(self.site, quick_check)
- for content_inner_path, content in self.site.content_manager.contents.iteritems():
- notificator.inc()
- contents[content_inner_path] = content
- if self.site.isStopping():
- stop = True
- break
- stop = True
- except RuntimeError as err:
- if "changed size during iteration" in str(err):
- tries += 1
- if tries >= max_tries:
- self.log.info("contents.json file list changed during iteration. %s tries done. Giving up.", tries)
- stop = True
- self.log.info("contents.json file list changed during iteration. Trying again... (%s)", tries)
- time.sleep(2 * tries)
- else:
- stop = True
-
- for content_inner_path, content in contents.items():
+ for content_inner_path, content in list(self.site.content_manager.contents.items()):
back["num_content"] += 1
i += 1
if i % 50 == 0:
time.sleep(0.001) # Context switch to avoid gevent hangs
-
- if self.site.isStopping():
- break
-
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
back["num_content_missing"] += 1
self.log.debug("[MISSING] %s" % content_inner_path)
bad_files.append(content_inner_path)
for file_relative_path in list(content.get("files", {}).keys()):
- notificator.inc()
back["num_file"] += 1
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
@@ -515,19 +456,15 @@ class SiteStorage(object):
bad_files.append(file_inner_path)
continue
- err = None
-
if quick_check:
- file_size = os.path.getsize(file_path)
- expected_size = content["files"][file_relative_path]["size"]
- ok = file_size == expected_size
+ ok = os.path.getsize(file_path) == content["files"][file_relative_path]["size"]
if not ok:
- err = "Invalid size: %s - actual, %s - expected" % (file_size, expected_size)
+ err = "Invalid size"
else:
try:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
- except Exception as err2:
- err = err2
+ except Exception as _err:
+ err = _err
ok = False
if not ok:
@@ -540,7 +477,6 @@ class SiteStorage(object):
optional_added = 0
optional_removed = 0
for file_relative_path in list(content.get("files_optional", {}).keys()):
- notificator.inc()
back["num_optional"] += 1
file_node = content["files_optional"][file_relative_path]
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
@@ -585,8 +521,6 @@ class SiteStorage(object):
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
)
- notificator.send()
-
self.site.content_manager.contents.db.processDelayed()
time.sleep(0.001) # Context switch to avoid gevent hangs
return back
diff --git a/src/Test/TestFileRequest.py b/src/Test/TestFileRequest.py
index 51dad600..3fabc271 100644
--- a/src/Test/TestFileRequest.py
+++ b/src/Test/TestFileRequest.py
@@ -16,7 +16,7 @@ class TestFileRequest:
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Normal request
response = connection.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
@@ -61,7 +61,7 @@ class TestFileRequest:
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer(file_server.ip, 1545)
connection = client.getConnection(file_server.ip, 1544)
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
buff = io.BytesIO()
response = connection.request("streamFile", {"site": site.address, "inner_path": "content.json", "location": 0}, buff)
@@ -89,7 +89,7 @@ class TestFileRequest:
client.stop()
def testPex(self, file_server, site, site_temp):
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
diff --git a/src/Test/TestPeer.py b/src/Test/TestPeer.py
index 2d587ef4..f57e046e 100644
--- a/src/Test/TestPeer.py
+++ b/src/Test/TestPeer.py
@@ -13,7 +13,7 @@ from . import Spy
@pytest.mark.usefixtures("resetTempSettings")
class TestPeer:
def testPing(self, file_server, site, site_temp):
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
@@ -32,7 +32,7 @@ class TestPeer:
client.stop()
def testDownloadFile(self, file_server, site, site_temp):
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
@@ -77,11 +77,11 @@ class TestPeer:
def testHashfieldExchange(self, file_server, site, site_temp):
server1 = file_server
- server1.getSites()[site.address] = site
+ server1.sites[site.address] = site
site.connection_server = server1
server2 = FileServer(file_server.ip, 1545)
- server2.getSites()[site_temp.address] = site_temp
+ server2.sites[site_temp.address] = site_temp
site_temp.connection_server = server2
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
@@ -127,7 +127,7 @@ class TestPeer:
server2.stop()
def testFindHash(self, file_server, site, site_temp):
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
client = FileServer(file_server.ip, 1545)
client.sites = {site_temp.address: site_temp}
site_temp.connection_server = client
diff --git a/src/Test/TestSiteDownload.py b/src/Test/TestSiteDownload.py
index 37fe796b..cd0a4c9f 100644
--- a/src/Test/TestSiteDownload.py
+++ b/src/Test/TestSiteDownload.py
@@ -23,7 +23,7 @@ class TestSiteDownload:
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -74,7 +74,7 @@ class TestSiteDownload:
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -130,7 +130,7 @@ class TestSiteDownload:
def testArchivedDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -178,7 +178,7 @@ class TestSiteDownload:
def testArchivedBeforeDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -229,7 +229,7 @@ class TestSiteDownload:
def testOptionalDownload(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = ConnectionServer(file_server.ip, 1545)
@@ -271,7 +271,7 @@ class TestSiteDownload:
def testFindOptional(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init full source server (has optional files)
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
@@ -284,7 +284,7 @@ class TestSiteDownload:
gevent.spawn(listen)
time.sleep(0.001) # Port opening
- file_server_full.getSites()[site_full.address] = site_full # Add site
+ file_server_full.sites[site_full.address] = site_full # Add site
site_full.storage.verifyFiles(quick_check=True) # Check optional files
site_full_peer = site.addPeer(file_server.ip, 1546) # Add it to source server
hashfield = site_full_peer.updateHashfield() # Update hashfield
@@ -342,7 +342,7 @@ class TestSiteDownload:
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -423,7 +423,7 @@ class TestSiteDownload:
def testBigUpdate(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -476,7 +476,7 @@ class TestSiteDownload:
def testHugeContentSiteUpdate(self, file_server, site, site_temp):
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
@@ -524,7 +524,7 @@ class TestSiteDownload:
# Init source server
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Init client server
client = FileServer(file_server.ip, 1545)
diff --git a/src/Test/TestTor.py b/src/Test/TestTor.py
index 120cd53e..e6b82c1a 100644
--- a/src/Test/TestTor.py
+++ b/src/Test/TestTor.py
@@ -75,7 +75,7 @@ class TestTor:
assert file_server.getConnection(address + ".onion", 1544, site=site) != file_server.getConnection(address + ".onion", 1544, site=site_temp)
# Only allow to query from the locked site
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
connection_locked = file_server.getConnection(address + ".onion", 1544, site=site)
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
@@ -83,11 +83,11 @@ class TestTor:
def testPex(self, file_server, site, site_temp):
# Register site to currently running fileserver
site.connection_server = file_server
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
# Create a new file server to emulate new peer connecting to our peer
file_server_temp = FileServer(file_server.ip, 1545)
site_temp.connection_server = file_server_temp
- file_server_temp.getSites()[site_temp.address] = site_temp
+ file_server_temp.sites[site_temp.address] = site_temp
# We will request peers from this
peer_source = site_temp.addPeer(file_server.ip, 1544)
@@ -113,7 +113,7 @@ class TestTor:
def testFindHash(self, tor_manager, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
- file_server.getSites()[site.address] = site
+ file_server.sites[site.address] = site
file_server.tor_manager = tor_manager
client = FileServer(file_server.ip, 1545)
diff --git a/src/Test/testdata/chart.db-shm b/src/Test/testdata/chart.db-shm
deleted file mode 100644
index 7c822be1..00000000
Binary files a/src/Test/testdata/chart.db-shm and /dev/null differ
diff --git a/src/Test/testdata/chart.db-wal b/src/Test/testdata/chart.db-wal
deleted file mode 100644
index dbb007b0..00000000
Binary files a/src/Test/testdata/chart.db-wal and /dev/null differ
diff --git a/src/Test/testdata/content.db-shm b/src/Test/testdata/content.db-shm
deleted file mode 100644
index dfcb5880..00000000
Binary files a/src/Test/testdata/content.db-shm and /dev/null differ
diff --git a/src/Test/testdata/content.db-wal b/src/Test/testdata/content.db-wal
deleted file mode 100644
index a7892237..00000000
Binary files a/src/Test/testdata/content.db-wal and /dev/null differ
diff --git a/src/Test/testdata/filters.json b/src/Test/testdata/filters.json
deleted file mode 100644
index 9e26dfee..00000000
--- a/src/Test/testdata/filters.json
+++ /dev/null
@@ -1 +0,0 @@
-{}
\ No newline at end of file
diff --git a/src/Test/testdata/openssl.cnf b/src/Test/testdata/openssl.cnf
deleted file mode 100644
index ff59f6a4..00000000
--- a/src/Test/testdata/openssl.cnf
+++ /dev/null
@@ -1,58 +0,0 @@
-[ req ]
-default_bits = 2048
-default_keyfile = server-key.pem
-distinguished_name = subject
-req_extensions = req_ext
-x509_extensions = x509_ext
-string_mask = utf8only
-
-# The Subject DN can be formed using X501 or RFC 4514 (see RFC 4519 for a description).
-# Its sort of a mashup. For example, RFC 4514 does not provide emailAddress.
-[ subject ]
-countryName = US
-stateOrProvinceName = NY
-localityName = New York
-organizationName = Example, LLC
-
-# Use a friendly name here because its presented to the user. The server's DNS
-# names are placed in Subject Alternate Names. Plus, DNS names here is deprecated
-# by both IETF and CA/Browser Forums. If you place a DNS name here, then you
-# must include the DNS name in the SAN too (otherwise, Chrome and others that
-# strictly follow the CA/Browser Baseline Requirements will fail).
-commonName = Example Company
-
-emailAddress = test@example.com
-
-# Section x509_ext is used when generating a self-signed certificate. I.e., openssl req -x509 ...
-[ x509_ext ]
-
-subjectKeyIdentifier = hash
-authorityKeyIdentifier = keyid,issuer
-
-basicConstraints = CA:FALSE
-keyUsage = digitalSignature, keyEncipherment
-extendedKeyUsage = clientAuth, serverAuth
-subjectAltName = @alternate_names
-
-# RFC 5280, Section 4.2.1.12 makes EKU optional
-# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
-# extendedKeyUsage = serverAuth, clientAuth
-
-# Section req_ext is used when generating a certificate signing request. I.e., openssl req ...
-[ req_ext ]
-
-subjectKeyIdentifier = hash
-
-basicConstraints = CA:FALSE
-keyUsage = digitalSignature, keyEncipherment
-extendedKeyUsage = clientAuth, serverAuth
-subjectAltName = @alternate_names
-
-# RFC 5280, Section 4.2.1.12 makes EKU optional
-# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
-# extendedKeyUsage = serverAuth, clientAuth
-
-[ alternate_names ]
-
-DNS.1 = nazwa.pl
-DNS.2 = www.nazwa.pl
\ No newline at end of file
diff --git a/src/Test/testdata/sites.json b/src/Test/testdata/sites.json
deleted file mode 100644
index 9e26dfee..00000000
--- a/src/Test/testdata/sites.json
+++ /dev/null
@@ -1 +0,0 @@
-{}
\ No newline at end of file
diff --git a/src/Test/testdata/trackers.json b/src/Test/testdata/trackers.json
deleted file mode 100644
index 9e26dfee..00000000
--- a/src/Test/testdata/trackers.json
+++ /dev/null
@@ -1 +0,0 @@
-{}
\ No newline at end of file
diff --git a/src/Test/testdata/users.json b/src/Test/testdata/users.json
deleted file mode 100644
index fdadced4..00000000
--- a/src/Test/testdata/users.json
+++ /dev/null
@@ -1,9 +0,0 @@
-
- {
- "15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
- "certs": {},
- "master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
- "sites": {}
- }
- }
-
\ No newline at end of file
diff --git a/src/Ui/UiRequest.py b/src/Ui/UiRequest.py
index dbd3ca67..4a4e0545 100644
--- a/src/Ui/UiRequest.py
+++ b/src/Ui/UiRequest.py
@@ -543,18 +543,37 @@ class UiRequest(object):
if show_loadingscreen:
meta_tags += '';
-
+
+ def xescape(s):
+ '''combines parts from re.escape & html.escape'''
+ # https://github.com/python/cpython/blob/3.10/Lib/re.py#L267
+ # '&' is handled otherwise
+ re_chars = {i: '\\' + chr(i) for i in b'()[]{}*+-|^$\\.~# \t\n\r\v\f'}
+ # https://github.com/python/cpython/blob/3.10/Lib/html/__init__.py#L12
+ html_chars = {
+ '<' : '<',
+ '>' : '>',
+ '"' : '"',
+ "'" : ''',
+ }
+ # we can't replace '&' because it makes certain zites work incorrectly
+ # it should however in no way interfere with re.sub in render
+ repl = {}
+ repl.update(re_chars)
+ repl.update(html_chars)
+ return s.translate(repl)
+
return self.render(
"src/Ui/template/wrapper.html",
server_url=server_url,
inner_path=inner_path,
- file_url=re.escape(file_url),
- file_inner_path=re.escape(file_inner_path),
+ file_url=xescape(file_url),
+ file_inner_path=xescape(file_inner_path),
address=site.address,
- title=html.escape(title),
+ title=xescape(title),
body_style=body_style,
meta_tags=meta_tags,
- query_string=re.escape(inner_query_string),
+ query_string=xescape(inner_query_string),
wrapper_key=site.settings["wrapper_key"],
ajax_key=site.settings["ajax_key"],
wrapper_nonce=wrapper_nonce,
@@ -730,7 +749,10 @@ class UiRequest(object):
def replaceHtmlVariables(self, block, path_parts):
user = self.getCurrentUser()
- themeclass = "theme-%-6s" % re.sub("[^a-z]", "", user.settings.get("theme", "light"))
+ if user and user.settings:
+ themeclass = "theme-%-6s" % re.sub("[^a-z]", "", user.settings.get("theme", "light"))
+ else:
+ themeclass = "theme-light"
block = block.replace(b"{themeclass}", themeclass.encode("utf8"))
if path_parts:
diff --git a/src/Ui/UiServer.py b/src/Ui/UiServer.py
index bda1daa0..61943ada 100644
--- a/src/Ui/UiServer.py
+++ b/src/Ui/UiServer.py
@@ -167,7 +167,7 @@ class UiServer:
self.log.error("Web interface bind error, must be running already, exiting.... %s" % err)
import main
main.file_server.stop()
- self.log.info("Stopped.")
+ self.log.debug("Stopped.")
def stop(self):
self.log.debug("Stopping...")
diff --git a/src/Ui/UiWebsocket.py b/src/Ui/UiWebsocket.py
index 60d746db..2f982e1d 100644
--- a/src/Ui/UiWebsocket.py
+++ b/src/Ui/UiWebsocket.py
@@ -318,7 +318,6 @@ class UiWebsocket(object):
back["updatesite"] = config.updatesite
back["dist_type"] = config.dist_type
back["lib_verify_best"] = CryptBitcoin.lib_verify_best
- back["passive_mode"] = file_server.passive_mode
return back
def formatAnnouncerInfo(self, site):
@@ -328,7 +327,10 @@ class UiWebsocket(object):
def actionAs(self, to, address, cmd, params=[]):
if not self.hasSitePermission(address, cmd=cmd):
+ #TODO! Return this as error ?
return self.response(to, "No permission for site %s" % address)
+ if not self.server.sites.get(address):
+ return self.response(to, {"error": "Site Does Not Exist: %s" % address})
req_self = copy.copy(self)
req_self.site = self.server.sites.get(address)
req_self.hasCmdPermission = self.hasCmdPermission # Use the same permissions as current site
@@ -420,10 +422,15 @@ class UiWebsocket(object):
is_user_content = file_info and ("cert_signers" in file_info or "cert_signers_pattern" in file_info)
if is_user_content and privatekey is None:
cert = self.user.getCert(self.site.address)
- extend["cert_auth_type"] = cert["auth_type"]
- extend["cert_user_id"] = self.user.getCertUserId(site.address)
- extend["cert_sign"] = cert["cert_sign"]
- self.log.debug("Extending content.json with cert %s" % extend["cert_user_id"])
+ if not cert:
+ error = "Site sign failed: No certificate selected for Site: %s, Hence Signing inner_path: %s Failed, Try Adding/Selecting User Cert via Site Login" % (self.site.address, inner_path)
+ self.log.error(error)
+ return self.response(to, {"error": error})
+ else:
+ extend["cert_auth_type"] = cert["auth_type"]
+ extend["cert_user_id"] = self.user.getCertUserId(site.address)
+ extend["cert_sign"] = cert["cert_sign"]
+ self.log.debug("Extending content.json with cert %s" % extend["cert_user_id"])
if not self.hasFilePermission(inner_path):
self.log.error("SiteSign error: you don't own this site & site owner doesn't allow you to do so.")
@@ -913,9 +920,9 @@ class UiWebsocket(object):
self.response(to, "ok")
# Update site content.json
- def actionSiteUpdate(self, to, address, check_files=False, verify_files=False, since=None, announce=False):
+ def actionSiteUpdate(self, to, address, check_files=False, since=None, announce=False):
def updateThread():
- site.update(announce=announce, check_files=check_files, verify_files=verify_files, since=since)
+ site.update(announce=announce, check_files=check_files, since=since)
self.response(to, "Updated")
site = self.server.sites.get(address)
@@ -1165,32 +1172,6 @@ class UiWebsocket(object):
file_server.portCheck()
self.response(to, file_server.port_opened)
- @flag.admin
- @flag.no_multiuser
- def actionServerSetPassiveMode(self, to, passive_mode=False):
- import main
- file_server = main.file_server
- if file_server.isPassiveMode() != passive_mode:
- file_server.setPassiveMode(passive_mode)
- if file_server.isPassiveMode():
- self.cmd("notification", ["info", _["Passive mode enabled"], 5000])
- else:
- self.cmd("notification", ["info", _["Passive mode disabled"], 5000])
- self.server.updateWebsocket()
-
- @flag.admin
- @flag.no_multiuser
- def actionServerSetOfflineMode(self, to, offline_mode=False):
- import main
- file_server = main.file_server
- if file_server.isOfflineMode() != offline_mode:
- file_server.setOfflineMode(offline_mode)
- if file_server.isOfflineMode():
- self.cmd("notification", ["info", _["Offline mode enabled"], 5000])
- else:
- self.cmd("notification", ["info", _["Offline mode disabled"], 5000])
- self.server.updateWebsocket()
-
@flag.admin
@flag.no_multiuser
def actionServerShutdown(self, to, restart=False):
@@ -1201,7 +1182,7 @@ class UiWebsocket(object):
return False
if restart:
main.restart_after_shutdown = True
- main.file_server.stop(ui_websocket=self)
+ main.file_server.stop()
main.ui_server.stop()
if restart:
diff --git a/src/loglevel_overrides.py b/src/loglevel_overrides.py
deleted file mode 100644
index 5622e523..00000000
--- a/src/loglevel_overrides.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# This file is for adding rules for selectively enabling debug logging
-# when working on the code.
-# Add your rules here and skip this file when committing changes.
-
-#import re
-#from util import SelectiveLogger
-#
-#SelectiveLogger.addLogLevelRaisingRule("ConnServer")
-#SelectiveLogger.addLogLevelRaisingRule(re.compile(r'^Site:'))
diff --git a/src/main.py b/src/main.py
index 21424d47..ec90f4d9 100644
--- a/src/main.py
+++ b/src/main.py
@@ -4,7 +4,6 @@ import sys
import stat
import time
import logging
-import loglevel_overrides
startup_errors = []
def startupError(msg):
@@ -155,7 +154,7 @@ class Actions(object):
logging.info("Starting servers....")
gevent.joinall([gevent.spawn(ui_server.start), gevent.spawn(file_server.start)])
- logging.info("All servers stopped")
+ logging.info("All server stopped")
# Site commands
@@ -255,8 +254,9 @@ class Actions(object):
file_correct = site.content_manager.verifyFile(
content_inner_path, site.storage.open(content_inner_path, "rb"), ignore_same=False
)
- except Exception as err:
+ except Exception as exp:
file_correct = False
+ err = exp
if file_correct is True:
logging.info("[OK] %s (Done in %.3fs)" % (content_inner_path, time.time() - s))
diff --git a/src/util/CircularIterator.py b/src/util/CircularIterator.py
deleted file mode 100644
index 3466092e..00000000
--- a/src/util/CircularIterator.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import random
-
-class CircularIterator:
- def __init__(self):
- self.successive_count = 0
- self.last_size = 0
- self.index = -1
-
- def next(self, items):
- self.last_size = len(items)
-
- if self.last_size == 0:
- return None
-
- if self.index < 0:
- self.index = random.randint(0, self.last_size)
- else:
- self.index += 1
-
- self.index = self.index % self.last_size
-
- self.successive_count += 1
-
- return items[self.index]
-
- def resetSuccessiveCount(self):
- self.successive_count = 0
-
- def getSuccessiveCount(self):
- return self.successive_count
-
- def isWrapped(self):
- return self.successive_count >= self.last_size
-
diff --git a/src/util/Diff.py b/src/util/Diff.py
index 8281188b..53b82c5a 100644
--- a/src/util/Diff.py
+++ b/src/util/Diff.py
@@ -42,6 +42,8 @@ def patch(old_f, actions):
continue
elif action == "+": # Add lines
for add_line in param:
+ if type(add_line) is str:
+ add_line = add_line.encode()
new_f.write(add_line)
else:
raise "Unknown action: %s" % action
diff --git a/src/util/GreenletManager.py b/src/util/GreenletManager.py
index d711d09a..e024233d 100644
--- a/src/util/GreenletManager.py
+++ b/src/util/GreenletManager.py
@@ -3,37 +3,17 @@ from Debug import Debug
class GreenletManager:
- # pool is either gevent.pool.Pool or GreenletManager.
- # if pool is None, new gevent.pool.Pool() is created.
- def __init__(self, pool=None):
+ def __init__(self):
self.greenlets = set()
- if not pool:
- pool = gevent.pool.Pool(None)
- self.pool = pool
-
- def _spawn_later(self, seconds, *args, **kwargs):
- # If pool is another GreenletManager, delegate to it.
- if hasattr(self.pool, 'spawnLater'):
- return self.pool.spawnLater(seconds, *args, **kwargs)
-
- # There's gevent.spawn_later(), but there isn't gevent.pool.Pool.spawn_later().
- # Doing manually.
- greenlet = self.pool.greenlet_class(*args, **kwargs)
- self.pool.add(greenlet)
- greenlet.start_later(seconds)
- return greenlet
-
- def _spawn(self, *args, **kwargs):
- return self.pool.spawn(*args, **kwargs)
def spawnLater(self, *args, **kwargs):
- greenlet = self._spawn_later(*args, **kwargs)
+ greenlet = gevent.spawn_later(*args, **kwargs)
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
self.greenlets.add(greenlet)
return greenlet
def spawn(self, *args, **kwargs):
- greenlet = self._spawn(*args, **kwargs)
+ greenlet = gevent.spawn(*args, **kwargs)
greenlet.link(lambda greenlet: self.greenlets.remove(greenlet))
self.greenlets.add(greenlet)
return greenlet
diff --git a/src/util/SafeRe.py b/src/util/SafeRe.py
index 8c394a84..6018e2d3 100644
--- a/src/util/SafeRe.py
+++ b/src/util/SafeRe.py
@@ -1,16 +1,10 @@
import re
-import logging
-
-log = logging.getLogger("SafeRe")
-
class UnsafePatternError(Exception):
pass
-max_cache_size = 1000
cached_patterns = {}
-old_cached_patterns = {}
def isSafePattern(pattern):
@@ -21,78 +15,18 @@ def isSafePattern(pattern):
if unsafe_pattern_match:
raise UnsafePatternError("Potentially unsafe part of the pattern: %s in %s" % (unsafe_pattern_match.group(0), pattern))
- repetitions1 = re.findall(r"\.[\*\{\+]", pattern)
- repetitions2 = re.findall(r"[^(][?]", pattern)
- if len(repetitions1) + len(repetitions2) >= 10:
- raise UnsafePatternError("More than 10 repetitions in %s" % pattern)
+ repetitions = re.findall(r"\.[\*\{\+]", pattern)
+ if len(repetitions) >= 10:
+ raise UnsafePatternError("More than 10 repetitions of %s in %s" % (repetitions[0], pattern))
return True
-def compilePattern(pattern):
- global cached_patterns
- global old_cached_patterns
-
+def match(pattern, *args, **kwargs):
cached_pattern = cached_patterns.get(pattern)
if cached_pattern:
- return cached_pattern
-
- cached_pattern = old_cached_patterns.get(pattern)
- if cached_pattern:
- del old_cached_patterns[pattern]
- cached_patterns[pattern] = cached_pattern
- return cached_pattern
-
- if isSafePattern(pattern):
- cached_pattern = re.compile(pattern)
- cached_patterns[pattern] = cached_pattern
- log.debug("Compiled new pattern: %s" % pattern)
- log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
-
- if len(cached_patterns) > max_cache_size:
- old_cached_patterns = cached_patterns
- cached_patterns = {}
- log.debug("Size limit reached. Rotating cache.")
- log.debug("Cache size: %d + %d" % (len(cached_patterns), len(old_cached_patterns)))
-
- return cached_pattern
-
-
-def match(pattern, *args, **kwargs):
- cached_pattern = compilePattern(pattern)
- return cached_pattern.match(*args, **kwargs)
-
-################################################################################
-
-# TESTS
-
-def testSafePattern(pattern):
- try:
- return isSafePattern(pattern)
- except UnsafePatternError as err:
- return False
-
-
-# Some real examples to make sure it works as expected
-assert testSafePattern('(data/mp4/.*|updater/.*)')
-assert testSafePattern('((js|css)/(?!all.(js|css)))|.git')
-
-
-# Unsafe cases:
-
-# ((?!json).)*$ not allowed, because of ) before the * character. Possible fix: .*(?!json)$
-assert not testSafePattern('((?!json).)*$')
-assert testSafePattern('.*(?!json)$')
-
-# (.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot) not allowed, because it has 12 .* repetition patterns. Possible fix: .*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)
-assert not testSafePattern('(.*.epub|.*.jpg|.*.jpeg|.*.png|data/.*.gif|.*.avi|.*.ogg|.*.webm|.*.mp4|.*.mp3|.*.mkv|.*.eot)')
-assert testSafePattern('.*(epub|jpg|jpeg|png|data/gif|avi|ogg|webm|mp4|mp3|mkv|eot)')
-
-# https://github.com/HelloZeroNet/ZeroNet/issues/2757
-assert not testSafePattern('a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
-assert not testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}x.{0,1}')
-assert testSafePattern('a?a?a?a?a?a?a?x.{0,1}x.{0,1}')
-assert not testSafePattern('a?a?a?a?a?a?a?x.*x.*x.*')
-assert testSafePattern('a?a?a?a?a?a?a?x.*x.*')
-
-################################################################################
+ return cached_pattern.match(*args, **kwargs)
+ else:
+ if isSafePattern(pattern):
+ cached_patterns[pattern] = re.compile(pattern)
+ return cached_patterns[pattern].match(*args, **kwargs)
diff --git a/src/util/SelectiveLogger.py b/src/util/SelectiveLogger.py
deleted file mode 100644
index fcdcba0a..00000000
--- a/src/util/SelectiveLogger.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import re
-
-log_level_raising_rules = []
-
-def addLogLevelRaisingRule(rule, level=None):
- if level is None:
- level = logging.INFO
- log_level_raising_rules.append({
- "rule": rule,
- "level": level
- })
-
-def matchLogLevelRaisingRule(name):
- for rule in log_level_raising_rules:
- if isinstance(rule["rule"], re.Pattern):
- if rule["rule"].search(name):
- return rule["level"]
- else:
- if rule["rule"] == name:
- return rule["level"]
- return None
-
-class SelectiveLogger(logging.getLoggerClass()):
- def __init__(self, name, level=logging.NOTSET):
- return super().__init__(name, level)
-
- def raiseLevel(self, level):
- raised_level = matchLogLevelRaisingRule(self.name)
- if raised_level is not None:
- if level < raised_level:
- level = raised_level
- return level
-
- def isEnabledFor(self, level):
- level = self.raiseLevel(level)
- return super().isEnabledFor(level)
-
- def _log(self, level, msg, args, **kwargs):
- level = self.raiseLevel(level)
- return super()._log(level, msg, args, **kwargs)
-
-logging.setLoggerClass(SelectiveLogger)
diff --git a/src/util/__init__.py b/src/util/__init__.py
index f00c1459..ab8a8b88 100644
--- a/src/util/__init__.py
+++ b/src/util/__init__.py
@@ -1,5 +1,4 @@
from .Cached import Cached
-from .CircularIterator import CircularIterator
from .Event import Event
from .Noparallel import Noparallel
from .Pooled import Pooled
diff --git a/src/util/helper.py b/src/util/helper.py
index f44bcfce..61455b08 100644
--- a/src/util/helper.py
+++ b/src/util/helper.py
@@ -290,8 +290,7 @@ local_ip_pattern = re.compile(r"^127\.|192\.168\.|10\.|172\.1[6-9]\.|172\.2[0-9]
def isPrivateIp(ip):
return local_ip_pattern.match(ip)
-# XXX: Deprecated. Use ConnectionServer.getIpType() instead.
-# To be removed in 0.9.0
+
def getIpType(ip):
if ip.endswith(".onion"):
return "onion"
diff --git a/trackers.txt b/trackers.txt
new file mode 100644
index 00000000..a42f8ca4
--- /dev/null
+++ b/trackers.txt
@@ -0,0 +1,142 @@
+udp://tracker.opentrackr.org:1337/announce
+udp://explodie.org:6969/announce
+udp://open.stealth.si:80/announce
+http://tracker.ipv6tracker.ru:80/announce
+udp://tracker.birkenwald.de:6969/announce
+udp://tracker.moeking.me:6969/announce
+http://tracker.bt4g.com:2095/announce
+https://tracker.nanoha.org:443/announce
+http://tracker.files.fm:6969/announce
+http://open.acgnxtracker.com:80/announce
+udp://tracker.army:6969/announce
+udp://fe.dealclub.de:6969/announce
+udp://tracker.leech.ie:1337/announce
+udp://tracker.altrosky.nl:6969/announce
+https://tracker.cyber-hub.net:443/announce
+https://tracker.lilithraws.cf:443/announce
+http://bt.okmp3.ru:2710/announce
+udp://vibe.sleepyinternetfun.xyz:1738/announce
+udp://open.publictracker.xyz:6969/announce
+udp://tracker.bitsearch.to:1337/announce
+udp://tracker.pomf.se:80/announce
+https://tr.burnabyhighstar.com:443/announce
+https://tr.abiir.top:443/announce
+udp://open.free-tracker.ga:6969/announce
+http://i-p-v-6.tk:6969/announce
+http://open-v6.demonoid.ch:6969/announce
+udp://aarsen.me:6969/announce
+udp://htz3.noho.st:6969/announce
+udp://uploads.gamecoast.net:6969/announce
+udp://mail.zasaonsk.ga:6969/announce
+udp://tracker.joybomb.tw:6969/announce
+udp://tracker.jonaslsa.com:6969/announce
+udp://leefafa.tk:6969/announce
+udp://carr.codes:6969/announce
+https://tr.fuckbitcoin.xyz:443/announce
+udp://tracker.cubonegro.xyz:6969/announce
+udp://tracker.skynetcloud.site:6969/announce
+http://tracker4.itzmx.com:2710/announce
+https://tracker.lilithraws.org:443/announce
+udp://tracker.novaopcj.eu.org:6969/announce
+udp://exodus.desync.com:6969/announce
+http://t.acg.rip:6699/announce
+udp://tracker2.dler.com:80/announce
+udp://6ahddutb1ucc3cp.ru:6969/announce
+udp://tracker.blacksparrowmedia.net:6969/announce
+http://fxtt.ru:80/announce
+udp://tracker.auctor.tv:6969/announce
+udp://torrentclub.space:6969/announce
+udp://zecircle.xyz:6969/announce
+udp://psyco.fr:6969/announce
+udp://fh2.cmp-gaming.com:6969/announce
+udp://new-line.net:6969/announce
+udp://torrents.artixlinux.org:6969/announce
+udp://bt.ktrackers.com:6666/announce
+udp://static.54.161.216.95.clients.your-server.de:6969/announce
+udp://cpe-104-34-3-152.socal.res.rr.com:6969/announce
+http://t.overflow.biz:6969/announce
+udp://tracker1.myporn.club:9337/announce
+udp://moonburrow.club:6969/announce
+udp://tracker.artixlinux.org:6969/announce
+https://t1.hloli.org:443/announce
+udp://bt1.archive.org:6969/announce
+udp://tracker.theoks.net:6969/announce
+udp://tracker.4.babico.name.tr:3131/announce
+udp://buddyfly.top:6969/announce
+udp://ipv6.tracker.harry.lu:80/announce
+udp://public.publictracker.xyz:6969/announce
+udp://mail.artixlinux.org:6969/announce
+udp://v1046920.hosted-by-vdsina.ru:6969/announce
+udp://tracker.cyberia.is:6969/announce
+udp://tracker.beeimg.com:6969/announce
+udp://creative.7o7.cx:6969/announce
+udp://open.dstud.io:6969/announce
+udp://laze.cc:6969/announce
+udp://download.nerocloud.me:6969/announce
+udp://cutscloud.duckdns.org:6969/announce
+https://tracker.jiesen.life:8443/announce
+udp://jutone.com:6969/announce
+udp://wepzone.net:6969/announce
+udp://ipv4.tracker.harry.lu:80/announce
+udp://tracker.tcp.exchange:6969/announce
+udp://f1sh.de:6969/announce
+udp://movies.zsw.ca:6969/announce
+https://tracker1.ctix.cn:443/announce
+udp://sanincode.com:6969/announce
+udp://www.torrent.eu.org:451/announce
+udp://open.4ever.tk:6969/announce
+https://tracker2.ctix.cn:443/announce
+udp://bt2.archive.org:6969/announce
+http://t.nyaatracker.com:80/announce
+udp://yahor.ftp.sh:6969/announce
+udp://tracker.openbtba.com:6969/announce
+udp://tracker.dler.com:6969/announce
+udp://tracker-udp.gbitt.info:80/announce
+udp://tracker.srv00.com:6969/announce
+udp://tracker.pimpmyworld.to:6969/announce
+http://tracker.gbitt.info:80/announce
+udp://tracker6.lelux.fi:6969/announce
+http://tracker.vrpnet.org:6969/announce
+http://00.xxtor.com:443/announce
+http://vps02.net.orel.ru:80/announce
+udp://tracker.yangxiaoguozi.cn:6969/announce
+udp://rep-art.ynh.fr:6969/announce
+https://tracker.imgoingto.icu:443/announce
+udp://mirror.aptus.co.tz:6969/announce
+udp://tracker.lelux.fi:6969/announce
+udp://tracker.torrent.eu.org:451/announce
+udp://admin.52ywp.com:6969/announce
+udp://thouvenin.cloud:6969/announce
+http://vps-dd0a0715.vps.ovh.net:6969/announce
+udp://bubu.mapfactor.com:6969/announce
+udp://94-227-232-84.access.telenet.be:6969/announce
+udp://epider.me:6969/announce
+udp://camera.lei001.com:6969/announce
+udp://tamas3.ynh.fr:6969/announce
+https://tracker.tamersunion.org:443/announce
+udp://ftp.pet:2710/announce
+udp://p4p.arenabg.com:1337/announce
+http://tracker.mywaifu.best:6969/announce
+udp://tracker.monitorit4.me:6969/announce
+udp://ipv6.tracker.monitorit4.me:6969/announce
+zero://k5w77dozo3hy5zualyhni6vrh73iwfkaofa64abbilwyhhd3wgenbjqd.onion:15441
+zero://2kcb2fqesyaevc4lntogupa4mkdssth2ypfwczd2ov5a3zo6ytwwbayd.onion:15441
+zero://5vczpwawviukvd7grfhsfxp7a6huz77hlis4fstjkym5kmf4pu7i7myd.onion:15441
+zero://pn4q2zzt2pw4nk7yidxvsxmydko7dfibuzxdswi6gu6ninjpofvqs2id.onion:15441
+zero://6i54dd5th73oelv636ivix6sjnwfgk2qsltnyvswagwphub375t3xcad.onion:15441
+zero://tl74auz4tyqv4bieeclmyoe4uwtoc2dj7fdqv4nc4gl5j2bwg2r26bqd.onion:15441
+zero://wlxav3szbrdhest4j7dib2vgbrd7uj7u7rnuzg22cxbih7yxyg2hsmid.onion:15441
+zero://zy7wttvjtsijt5uwmlar4yguvjc2gppzbdj4v6bujng6xwjmkdg7uvqd.onion:15441
+zero://rlcjomszyitxpwv7kzopmqgzk3bdpsxeull4c3s6goszkk6h2sotfoad.onion:15441
+zero://gugt43coc5tkyrhrc3esf6t6aeycvcqzw7qafxrjpqbwt4ssz5czgzyd.onion:15441
+zero://ow7in4ftwsix5klcbdfqvfqjvimqshbm2o75rhtpdnsderrcbx74wbad.onion:15441
+zero://57hzgtu62yzxqgbvgxs7g3lfck3za4zrda7qkskar3tlak5recxcebyd.onion:15445
+zero://hb6ozikfiaafeuqvgseiik4r46szbpjfu66l67wjinnyv6dtopuwhtqd.onion:15445
+zero://qn65si4gtcwdiliq7vzrwu62qrweoxb6tx2cchwslaervj6szuje66qd.onion:26117
+zero://s3j2s5pjdfesbsmaqx6alsumaxxdxibmhv4eukmqpv3vqj6f627qx5yd.onion:15441
+zero://agufghdtniyfwty3wk55drxxwj2zxgzzo7dbrtje73gmvcpxy4ngs4ad.onion:15441
+zero://kgsvasoakvj4gnjiy7zemu34l3hq46dn5eauqkn76jpowmilci5t2vqd.onion:15445
+zero://dslesoe72bdfwfu4cfqa2wpd4hr3fhlu4zv6mfsjju5xlpmssouv36qd.onion:15441
+zero://f2hnjbggc3c2u2apvxdugirnk6bral54ibdoul3hhvu7pd4fso5fq3yd.onion:15441
+zero://skdeywpgm5xncpxbbr4cuiip6ey4dkambpanog6nruvmef4f3e7o47qd.onion:15441
+zero://tqmo2nffqo4qc5jgmz3me5eri3zpgf3v2zciufzmhnvznjve5c3argad.onion:15441
\ No newline at end of file
diff --git a/zeronet.py b/zeronet.py
index dacd2096..457efb19 100755
--- a/zeronet.py
+++ b/zeronet.py
@@ -66,7 +66,7 @@ def displayErrorMessage(err, error_log_path):
res = ctypes.windll.user32.MessageBoxW(0, err_title, "ZeroNet error", MB_YESNOCANCEL | MB_ICONEXCLAIMATION)
if res == ID_YES:
import webbrowser
- report_url = "https://github.com/HelloZeroNet/ZeroNet/issues/new?assignees=&labels=&template=bug-report.md&title=%s"
+ report_url = "https://github.com/ZeroNetX/ZeroNet/issues/new?assignees=&labels=&template=bug-report.md&title=%s"
webbrowser.open(report_url % urllib.parse.quote("Unhandled exception: %s" % err_message))
if res in [ID_YES, ID_NO]:
subprocess.Popen(['notepad.exe', error_log_path])