Version 0.3.5, Rev830, Full Tor mode support with hidden services, Onion stats in Sidebar, GeoDB download fix using Tor, Gray out disabled sites in Stats page, Tor hidden service status in stat page, Benchmark sha256, Skyts tracker out expodie in, 2 new tracker using ZeroNet protocol, Keep SSL cert option between restarts, SSL Certificate pinning support for connections, Site lock support for connections, Certificate pinned connections using implicit SSL, Flood protection whitelist support, Foreign keys support for DB layer, Not support for SQL query helper, 0 length file get bugfix, Pex onion address support, Faster port testing, Faster uPnP port opening, Need connections more often on owned sites, Delay ZeroHello startup message if port check or Tor manager not ready yet, Use lockfiles to avoid double start, Save original socket on proxy monkey patching to get ability to connect localhost directly, Handle atomic write errors, Broken gevent https workaround helper, Rsa crypt functions, Plugin to Bootstrap using ZeroNet protocol
This commit is contained in:
parent
c9578e9037
commit
e9d2cdfd37
99 changed files with 9476 additions and 267 deletions
118
plugins/AnnounceZero/AnnounceZeroPlugin.py
Normal file
118
plugins/AnnounceZero/AnnounceZeroPlugin.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
import hashlib
|
||||
import time
|
||||
|
||||
from Plugin import PluginManager
|
||||
from Peer import Peer
|
||||
from util import helper
|
||||
from Crypt import CryptRsa
|
||||
|
||||
allow_reload = False # No source reload supported in this plugin
|
||||
time_full_announced = {} # Tracker address: Last announced all site to tracker
|
||||
connection_pool = {} # Tracker address: Peer object
|
||||
|
||||
|
||||
# Process result got back from tracker
|
||||
def processPeerRes(site, peers):
|
||||
added = 0
|
||||
# Ip4
|
||||
found_ip4 = 0
|
||||
for packed_address in peers["ip4"]:
|
||||
found_ip4 += 1
|
||||
peer_ip, peer_port = helper.unpackAddress(packed_address)
|
||||
if site.addPeer(peer_ip, peer_port):
|
||||
added += 1
|
||||
# Onion
|
||||
found_onion = 0
|
||||
for packed_address in peers["onion"]:
|
||||
found_onion += 1
|
||||
peer_onion, peer_port = helper.unpackOnionAddress(packed_address)
|
||||
if site.addPeer(peer_onion, peer_port):
|
||||
added += 1
|
||||
|
||||
if added:
|
||||
site.worker_manager.onPeers()
|
||||
site.updateWebsocket(peers_added=added)
|
||||
site.log.debug("Found %s ip4, %s onion peers, new: %s" % (found_ip4, found_onion, added))
|
||||
|
||||
|
||||
@PluginManager.registerTo("Site")
|
||||
class SitePlugin(object):
|
||||
def announceTracker(self, tracker_protocol, tracker_address, fileserver_port=0, add_types=[], my_peer_id="", mode="start"):
|
||||
if tracker_protocol != "zero":
|
||||
return super(SitePlugin, self).announceTracker(
|
||||
tracker_protocol, tracker_address, fileserver_port, add_types, my_peer_id, mode
|
||||
)
|
||||
|
||||
s = time.time()
|
||||
|
||||
need_types = ["ip4"]
|
||||
if self.connection_server and self.connection_server.tor_manager.enabled:
|
||||
need_types.append("onion")
|
||||
|
||||
if mode == "start" or mode == "more": # Single: Announce only this site
|
||||
sites = [self]
|
||||
full_announce = False
|
||||
else: # Multi: Announce all currently serving site
|
||||
full_announce = True
|
||||
if time.time() - time_full_announced.get(tracker_address, 0) < 60 * 5: # No reannounce all sites within 5 minute
|
||||
return True
|
||||
time_full_announced[tracker_address] = time.time()
|
||||
from Site import SiteManager
|
||||
sites = [site for site in SiteManager.site_manager.sites.values() if site.settings["serving"]]
|
||||
|
||||
# Create request
|
||||
request = {
|
||||
"hashes": [], "onions": [], "port": fileserver_port, "need_types": need_types, "need_num": 20, "add": add_types
|
||||
}
|
||||
for site in sites:
|
||||
if "onion" in add_types:
|
||||
onion = self.connection_server.tor_manager.getOnion(site.address)
|
||||
request["onions"].append(onion)
|
||||
request["hashes"].append(hashlib.sha256(site.address).digest())
|
||||
|
||||
# Tracker can remove sites that we don't announce
|
||||
if full_announce:
|
||||
request["delete"] = True
|
||||
|
||||
# Sent request to tracker
|
||||
tracker = connection_pool.get(tracker_address) # Re-use tracker connection if possible
|
||||
if not tracker:
|
||||
tracker_ip, tracker_port = tracker_address.split(":")
|
||||
tracker = Peer(tracker_ip, tracker_port, connection_server=self.connection_server)
|
||||
connection_pool[tracker_address] = tracker
|
||||
res = tracker.request("announce", request)
|
||||
|
||||
if not res or "peers" not in res:
|
||||
self.log.debug("Announce to %s failed: %s" % (tracker_address, res))
|
||||
if full_announce:
|
||||
time_full_announced[tracker_address] = 0
|
||||
return False
|
||||
|
||||
# Add peers from response to site
|
||||
site_index = 0
|
||||
for site_res in res["peers"]:
|
||||
site = sites[site_index]
|
||||
processPeerRes(site, site_res)
|
||||
site_index += 1
|
||||
|
||||
# Check if we need to sign prove the onion addresses
|
||||
if "onion_sign_this" in res:
|
||||
self.log.debug("Signing %s for %s to add %s onions" % (res["onion_sign_this"], tracker_address, len(sites)))
|
||||
request["onion_signs"] = {}
|
||||
request["onion_sign_this"] = res["onion_sign_this"]
|
||||
request["need_num"] = 0
|
||||
for site in sites:
|
||||
onion = self.connection_server.tor_manager.getOnion(site.address)
|
||||
sign = CryptRsa.sign(res["onion_sign_this"], self.connection_server.tor_manager.getPrivatekey(onion))
|
||||
request["onion_signs"][self.connection_server.tor_manager.getPublickey(onion)] = sign
|
||||
res = tracker.request("announce", request)
|
||||
if not res or "onion_sign_this" in res:
|
||||
self.log.debug("Announce onion address to %s failed: %s" % (tracker_address, res))
|
||||
if full_announce:
|
||||
time_full_announced[tracker_address] = 0
|
||||
return False
|
||||
|
||||
if full_announce:
|
||||
tracker.remove() # Close connection, we don't need it in next 5 minute
|
||||
|
||||
return time.time() - s
|
1
plugins/AnnounceZero/__init__.py
Normal file
1
plugins/AnnounceZero/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
import AnnounceZeroPlugin
|
|
@ -60,6 +60,7 @@ class UiWebsocketPlugin(object):
|
|||
def sidebarRenderPeerStats(self, body, site):
|
||||
connected = len([peer for peer in site.peers.values() if peer.connection and peer.connection.connected])
|
||||
connectable = len([peer_id for peer_id in site.peers.keys() if not peer_id.endswith(":0")])
|
||||
onion = len([peer_id for peer_id in site.peers.keys() if ".onion" in peer_id])
|
||||
peers_total = len(site.peers)
|
||||
if peers_total:
|
||||
percent_connected = float(connected) / peers_total
|
||||
|
@ -77,6 +78,7 @@ class UiWebsocketPlugin(object):
|
|||
<ul class='graph-legend'>
|
||||
<li class='color-green'><span>connected:</span><b>{connected}</b></li>
|
||||
<li class='color-blue'><span>Connectable:</span><b>{connectable}</b></li>
|
||||
<li class='color-purple'><span>Onion:</span><b>{onion}</b></li>
|
||||
<li class='color-black'><span>Total:</span><b>{peers_total}</b></li>
|
||||
</ul>
|
||||
</li>
|
||||
|
@ -201,7 +203,6 @@ class UiWebsocketPlugin(object):
|
|||
</li>
|
||||
""".format(**locals()))
|
||||
|
||||
|
||||
def sidebarRenderOptionalFileStats(self, body, site):
|
||||
size_total = 0.0
|
||||
size_downloaded = 0.0
|
||||
|
@ -213,7 +214,6 @@ class UiWebsocketPlugin(object):
|
|||
if site.content_manager.hashfield.hasHash(file_details["sha512"]):
|
||||
size_downloaded += file_details["size"]
|
||||
|
||||
|
||||
if not size_total:
|
||||
return False
|
||||
|
||||
|
@ -365,30 +365,43 @@ class UiWebsocketPlugin(object):
|
|||
import urllib
|
||||
import gzip
|
||||
import shutil
|
||||
from util import helper
|
||||
|
||||
self.log.info("Downloading GeoLite2 City database...")
|
||||
self.cmd("notification", ["geolite-info", "Downloading GeoLite2 City database (one time only, ~15MB)...", 0])
|
||||
try:
|
||||
# Download
|
||||
file = urllib.urlopen("http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz")
|
||||
data = StringIO.StringIO()
|
||||
while True:
|
||||
buff = file.read(1024 * 16)
|
||||
if not buff:
|
||||
break
|
||||
data.write(buff)
|
||||
self.log.info("GeoLite2 City database downloaded (%s bytes), unpacking..." % data.tell())
|
||||
data.seek(0)
|
||||
db_urls = [
|
||||
"http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz",
|
||||
"https://raw.githubusercontent.com/texnikru/GeoLite2-Database/master/GeoLite2-City.mmdb.gz"
|
||||
]
|
||||
for db_url in db_urls:
|
||||
try:
|
||||
# Download
|
||||
response = helper.httpRequest(db_url)
|
||||
|
||||
# Unpack
|
||||
with gzip.GzipFile(fileobj=data) as gzip_file:
|
||||
shutil.copyfileobj(gzip_file, open(db_path, "wb"))
|
||||
data = StringIO.StringIO()
|
||||
while True:
|
||||
buff = response.read(1024 * 512)
|
||||
if not buff:
|
||||
break
|
||||
data.write(buff)
|
||||
self.log.info("GeoLite2 City database downloaded (%s bytes), unpacking..." % data.tell())
|
||||
data.seek(0)
|
||||
|
||||
self.cmd("notification", ["geolite-done", "GeoLite2 City database downloaded!", 5000])
|
||||
time.sleep(2) # Wait for notify animation
|
||||
except Exception, err:
|
||||
self.cmd("notification", ["geolite-error", "GeoLite2 City database download error: %s!" % err, 0])
|
||||
raise err
|
||||
# Unpack
|
||||
with gzip.GzipFile(fileobj=data) as gzip_file:
|
||||
shutil.copyfileobj(gzip_file, open(db_path, "wb"))
|
||||
|
||||
self.cmd("notification", ["geolite-done", "GeoLite2 City database downloaded!", 5000])
|
||||
time.sleep(2) # Wait for notify animation
|
||||
return True
|
||||
except Exception, err:
|
||||
self.log.error("Error downloading %s: %s" % (db_url, err))
|
||||
pass
|
||||
self.cmd("notification", [
|
||||
"geolite-error",
|
||||
"GeoLite2 City database download error: %s!<br>Please download and unpack to data dir:<br>%s" % (err, db_urls[0]),
|
||||
0
|
||||
])
|
||||
|
||||
def actionSidebarGetPeers(self, to):
|
||||
permissions = self.getPermissions(to)
|
||||
|
@ -397,8 +410,9 @@ class UiWebsocketPlugin(object):
|
|||
try:
|
||||
import maxminddb
|
||||
db_path = config.data_dir + '/GeoLite2-City.mmdb'
|
||||
if not os.path.isfile(db_path):
|
||||
self.downloadGeoLiteDb(db_path)
|
||||
if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0:
|
||||
if not self.downloadGeoLiteDb(db_path):
|
||||
return False
|
||||
geodb = maxminddb.open_database(db_path)
|
||||
|
||||
peers = self.site.peers.values()
|
||||
|
@ -426,7 +440,10 @@ class UiWebsocketPlugin(object):
|
|||
if peer.ip in loc_cache:
|
||||
loc = loc_cache[peer.ip]
|
||||
else:
|
||||
loc = geodb.get(peer.ip)
|
||||
try:
|
||||
loc = geodb.get(peer.ip)
|
||||
except:
|
||||
loc = None
|
||||
loc_cache[peer.ip] = loc
|
||||
if not loc or "location" not in loc:
|
||||
continue
|
||||
|
@ -458,7 +475,6 @@ class UiWebsocketPlugin(object):
|
|||
return self.response(to, "You don't have permission to run this command")
|
||||
self.site.settings["own"] = bool(owned)
|
||||
|
||||
|
||||
def actionSiteSetAutodownloadoptional(self, to, owned):
|
||||
permissions = self.getPermissions(to)
|
||||
if "ADMIN" not in permissions:
|
||||
|
|
|
@ -53,6 +53,7 @@ class UiRequestPlugin(object):
|
|||
<style>
|
||||
* { font-family: monospace }
|
||||
table td, table th { text-align: right; padding: 0px 10px }
|
||||
.serving-False { color: gray }
|
||||
</style>
|
||||
"""
|
||||
|
||||
|
@ -113,15 +114,20 @@ class UiRequestPlugin(object):
|
|||
])
|
||||
yield "</table>"
|
||||
|
||||
# Tor hidden services
|
||||
yield "<br><br><b>Tor hidden services (status: %s):</b><br>" % main.file_server.tor_manager.status
|
||||
for site_address, onion in main.file_server.tor_manager.site_onions.items():
|
||||
yield "- %-34s: %s<br>" % (site_address, onion)
|
||||
|
||||
# Sites
|
||||
yield "<br><br><b>Sites</b>:"
|
||||
yield "<table>"
|
||||
yield "<tr><th>address</th> <th>connected</th> <th title='connected/good/total'>peers</th> <th>content.json</th> <th>out</th> <th>in</th> </tr>"
|
||||
for site in self.server.sites.values():
|
||||
for site in sorted(self.server.sites.values(), lambda a, b: cmp(a.address,b.address)):
|
||||
yield self.formatTableRow([
|
||||
(
|
||||
"""<a href='#' onclick='document.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
|
||||
(site.address, site.address)
|
||||
"""<a href='#' class='serving-%s' onclick='document.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
|
||||
(site.settings["serving"], site.address, site.address)
|
||||
),
|
||||
("%s", [peer.connection.id for peer in site.peers.values() if peer.connection and peer.connection.connected]),
|
||||
("%s/%s/%s", (
|
||||
|
@ -133,10 +139,10 @@ class UiRequestPlugin(object):
|
|||
("%.0fkB", site.settings.get("bytes_sent", 0) / 1024),
|
||||
("%.0fkB", site.settings.get("bytes_recv", 0) / 1024),
|
||||
])
|
||||
yield "<tr><td id='peers_%s' style='display: none; white-space: pre' colspan=2>" % site.address
|
||||
yield "<tr><td id='peers_%s' style='display: none; white-space: pre' colspan=6>" % site.address
|
||||
for key, peer in site.peers.items():
|
||||
if peer.time_found:
|
||||
time_found = int(time.time()-peer.time_found)/60
|
||||
time_found = int(time.time() - peer.time_found) / 60
|
||||
else:
|
||||
time_found = "--"
|
||||
if peer.connection:
|
||||
|
@ -145,7 +151,7 @@ class UiRequestPlugin(object):
|
|||
connection_id = None
|
||||
if site.content_manager.hashfield:
|
||||
yield "Optional files: %4s " % len(peer.hashfield)
|
||||
yield "(#%4s, err: %s, found: %5s min ago) %22s -<br>" % (connection_id, peer.connection_error, time_found, key)
|
||||
yield "(#%4s, err: %s, found: %5s min ago) %30s -<br>" % (connection_id, peer.connection_error, time_found, key)
|
||||
yield "<br></td></tr>"
|
||||
yield "</table>"
|
||||
|
||||
|
@ -155,7 +161,6 @@ class UiRequestPlugin(object):
|
|||
|
||||
# Object types
|
||||
|
||||
|
||||
obj_count = {}
|
||||
for obj in gc.get_objects():
|
||||
obj_type = str(type(obj))
|
||||
|
@ -325,9 +330,12 @@ class UiRequestPlugin(object):
|
|||
]
|
||||
if not refs:
|
||||
continue
|
||||
yield "%.1fkb <span title=\"%s\">%s</span>... " % (
|
||||
float(sys.getsizeof(obj)) / 1024, cgi.escape(str(obj)), cgi.escape(str(obj)[0:100].ljust(100))
|
||||
)
|
||||
try:
|
||||
yield "%.1fkb <span title=\"%s\">%s</span>... " % (
|
||||
float(sys.getsizeof(obj)) / 1024, cgi.escape(str(obj)), cgi.escape(str(obj)[0:100].ljust(100))
|
||||
)
|
||||
except:
|
||||
continue
|
||||
for ref in refs:
|
||||
yield " ["
|
||||
if "object at" in str(ref) or len(str(ref)) > 100:
|
||||
|
@ -445,12 +453,21 @@ class UiRequestPlugin(object):
|
|||
from cStringIO import StringIO
|
||||
|
||||
data = StringIO("Hello" * 1024 * 1024) # 5m
|
||||
with benchmark("sha512 x 100 000", 1):
|
||||
with benchmark("sha256 5M x 10", 0.6):
|
||||
for i in range(10):
|
||||
for y in range(10000):
|
||||
hash = CryptHash.sha512sum(data)
|
||||
data.seek(0)
|
||||
hash = CryptHash.sha256sum(data)
|
||||
yield "."
|
||||
valid = "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"
|
||||
valid = "8cd629d9d6aff6590da8b80782a5046d2673d5917b99d5603c3dcb4005c45ffa"
|
||||
assert hash == valid, "%s != %s" % (hash, valid)
|
||||
|
||||
data = StringIO("Hello" * 1024 * 1024) # 5m
|
||||
with benchmark("sha512 5M x 10", 0.6):
|
||||
for i in range(10):
|
||||
data.seek(0)
|
||||
hash = CryptHash.sha512sum(data)
|
||||
yield "."
|
||||
valid = "9ca7e855d430964d5b55b114e95c6bbb114a6d478f6485df93044d87b108904d"
|
||||
assert hash == valid, "%s != %s" % (hash, valid)
|
||||
|
||||
with benchmark("os.urandom(256) x 100 000", 0.65):
|
||||
|
|
157
plugins/disabled-Bootstrapper/BootstrapperDb.py
Normal file
157
plugins/disabled-Bootstrapper/BootstrapperDb.py
Normal file
|
@ -0,0 +1,157 @@
|
|||
import time
|
||||
import re
|
||||
|
||||
import gevent
|
||||
|
||||
from Config import config
|
||||
from Db import Db
|
||||
from util import helper
|
||||
|
||||
|
||||
class BootstrapperDb(Db):
|
||||
def __init__(self):
|
||||
self.version = 6
|
||||
self.hash_ids = {} # hash -> id cache
|
||||
super(BootstrapperDb, self).__init__({"db_name": "Bootstrapper"}, "%s/bootstrapper.db" % config.data_dir)
|
||||
self.foreign_keys = True
|
||||
self.checkTables()
|
||||
self.updateHashCache()
|
||||
gevent.spawn(self.cleanup)
|
||||
|
||||
def cleanup(self):
|
||||
while 1:
|
||||
self.execute("DELETE FROM peer WHERE date_announced < DATETIME('now', '-40 minute')")
|
||||
time.sleep(4*60)
|
||||
|
||||
def updateHashCache(self):
|
||||
res = self.execute("SELECT * FROM hash")
|
||||
self.hash_ids = {str(row["hash"]): row["hash_id"] for row in res}
|
||||
self.log.debug("Loaded %s hash_ids" % len(self.hash_ids))
|
||||
|
||||
def checkTables(self):
|
||||
version = int(self.execute("PRAGMA user_version").fetchone()[0])
|
||||
self.log.debug("Db version: %s, needed: %s" % (version, self.version))
|
||||
if version < self.version:
|
||||
self.createTables()
|
||||
else:
|
||||
self.execute("VACUUM")
|
||||
|
||||
def createTables(self):
|
||||
# Delete all tables
|
||||
self.execute("PRAGMA writable_schema = 1")
|
||||
self.execute("DELETE FROM sqlite_master WHERE type IN ('table', 'index', 'trigger')")
|
||||
self.execute("PRAGMA writable_schema = 0")
|
||||
self.execute("VACUUM")
|
||||
self.execute("PRAGMA INTEGRITY_CHECK")
|
||||
# Create new tables
|
||||
self.execute("""
|
||||
CREATE TABLE peer (
|
||||
peer_id INTEGER PRIMARY KEY ASC AUTOINCREMENT NOT NULL UNIQUE,
|
||||
port INTEGER NOT NULL,
|
||||
ip4 TEXT,
|
||||
onion TEXT,
|
||||
date_added DATETIME DEFAULT (CURRENT_TIMESTAMP),
|
||||
date_announced DATETIME DEFAULT (CURRENT_TIMESTAMP)
|
||||
);
|
||||
""")
|
||||
|
||||
self.execute("""
|
||||
CREATE TABLE peer_to_hash (
|
||||
peer_to_hash_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
|
||||
peer_id INTEGER REFERENCES peer (peer_id) ON DELETE CASCADE,
|
||||
hash_id INTEGER REFERENCES hash (hash_id)
|
||||
);
|
||||
""")
|
||||
self.execute("CREATE INDEX peer_id ON peer_to_hash (peer_id);")
|
||||
self.execute("CREATE INDEX hash_id ON peer_to_hash (hash_id);")
|
||||
|
||||
self.execute("""
|
||||
CREATE TABLE hash (
|
||||
hash_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
|
||||
hash BLOB UNIQUE NOT NULL,
|
||||
date_added DATETIME DEFAULT (CURRENT_TIMESTAMP)
|
||||
);
|
||||
""")
|
||||
self.execute("PRAGMA user_version = %s" % self.version)
|
||||
|
||||
def getHashId(self, hash):
|
||||
if hash not in self.hash_ids:
|
||||
self.log.debug("New hash: %s" % repr(hash))
|
||||
self.execute("INSERT OR IGNORE INTO hash ?", {"hash": buffer(hash)})
|
||||
self.hash_ids[hash] = self.cur.cursor.lastrowid
|
||||
return self.hash_ids[hash]
|
||||
|
||||
def peerAnnounce(self, ip4=None, onion=None, port=None, hashes=[], onion_signed=False, delete_missing_hashes=False):
|
||||
hashes_ids_announced = []
|
||||
for hash in hashes:
|
||||
hashes_ids_announced.append(self.getHashId(hash))
|
||||
|
||||
if not ip4 and not onion:
|
||||
return 0
|
||||
|
||||
# Check user
|
||||
if onion:
|
||||
res = self.execute("SELECT * FROM peer WHERE ? LIMIT 1", {"onion": onion})
|
||||
else:
|
||||
res = self.execute("SELECT * FROM peer WHERE ? LIMIT 1", {"ip4": ip4, "port": port})
|
||||
|
||||
user_row = res.fetchone()
|
||||
if user_row:
|
||||
peer_id = user_row["peer_id"]
|
||||
self.execute("UPDATE peer SET date_announced = DATETIME('now') WHERE ?", {"peer_id": peer_id})
|
||||
else:
|
||||
self.log.debug("New peer: %s %s signed: %s" % (ip4, onion, onion_signed))
|
||||
if onion and not onion_signed:
|
||||
return len(hashes)
|
||||
self.execute("INSERT INTO peer ?", {"ip4": ip4, "onion": onion, "port": port})
|
||||
peer_id = self.cur.cursor.lastrowid
|
||||
|
||||
# Check user's hashes
|
||||
res = self.execute("SELECT * FROM peer_to_hash WHERE ?", {"peer_id": peer_id})
|
||||
hash_ids_db = [row["hash_id"] for row in res]
|
||||
if hash_ids_db != hashes_ids_announced:
|
||||
hash_ids_added = set(hashes_ids_announced) - set(hash_ids_db)
|
||||
hash_ids_removed = set(hash_ids_db) - set(hashes_ids_announced)
|
||||
if not onion or onion_signed:
|
||||
for hash_id in hash_ids_added:
|
||||
self.execute("INSERT INTO peer_to_hash ?", {"peer_id": peer_id, "hash_id": hash_id})
|
||||
if hash_ids_removed and delete_missing_hashes:
|
||||
self.execute("DELETE FROM peer_to_hash WHERE ?", {"peer_id": peer_id, "hash_id": list(hash_ids_removed)})
|
||||
|
||||
return len(hash_ids_added) + len(hash_ids_removed)
|
||||
else:
|
||||
return 0
|
||||
|
||||
def peerList(self, hash, ip4=None, onions=[], port=None, limit=30, need_types=["ip4", "onion"]):
|
||||
hash_peers = {"ip4": [], "onion": []}
|
||||
if limit == 0:
|
||||
return hash_peers
|
||||
hashid = self.getHashId(hash)
|
||||
|
||||
where = "hash_id = :hashid"
|
||||
if onions:
|
||||
onions_escaped = ["'%s'" % re.sub("[^a-z0-9,]", "", onion) for onion in onions]
|
||||
where += " AND (onion NOT IN (%s) OR onion IS NULL)" % ",".join(onions_escaped)
|
||||
elif ip4:
|
||||
where += " AND (NOT (ip4 = :ip4 AND port = :port) OR ip4 IS NULL)"
|
||||
|
||||
query = """
|
||||
SELECT ip4, port, onion
|
||||
FROM peer_to_hash
|
||||
LEFT JOIN peer USING (peer_id)
|
||||
WHERE %s
|
||||
LIMIT :limit
|
||||
""" % where
|
||||
res = self.execute(query, {"hashid": hashid, "ip4": ip4, "onions": onions, "port": port, "limit": limit})
|
||||
|
||||
for row in res:
|
||||
if row["ip4"] and "ip4" in need_types:
|
||||
hash_peers["ip4"].append(
|
||||
helper.packAddress(row["ip4"], row["port"])
|
||||
)
|
||||
if row["onion"] and "onion" in need_types:
|
||||
hash_peers["onion"].append(
|
||||
helper.packOnionAddress(row["onion"], row["port"])
|
||||
)
|
||||
|
||||
return hash_peers
|
105
plugins/disabled-Bootstrapper/BootstrapperPlugin.py
Normal file
105
plugins/disabled-Bootstrapper/BootstrapperPlugin.py
Normal file
|
@ -0,0 +1,105 @@
|
|||
import time
|
||||
|
||||
from Plugin import PluginManager
|
||||
from BootstrapperDb import BootstrapperDb
|
||||
from Crypt import CryptRsa
|
||||
|
||||
if "db" not in locals().keys(): # Share durin reloads
|
||||
db = BootstrapperDb()
|
||||
|
||||
|
||||
@PluginManager.registerTo("FileRequest")
|
||||
class FileRequestPlugin(object):
|
||||
def actionAnnounce(self, params):
|
||||
hashes = params["hashes"]
|
||||
|
||||
if "onion_signs" in params and len(params["onion_signs"]) == len(hashes):
|
||||
# Check if all sign is correct
|
||||
if time.time() - float(params["onion_sign_this"]) < 3*60: # Peer has 3 minute to sign the message
|
||||
onions_signed = []
|
||||
# Check onion signs
|
||||
for onion_publickey, onion_sign in params["onion_signs"].items():
|
||||
if CryptRsa.verify(params["onion_sign_this"], onion_publickey, onion_sign):
|
||||
onions_signed.append(CryptRsa.publickeyToOnion(onion_publickey))
|
||||
else:
|
||||
break
|
||||
# Check if the same onion addresses signed as the announced onces
|
||||
if sorted(onions_signed) == sorted(params["onions"]):
|
||||
all_onions_signed = True
|
||||
else:
|
||||
all_onions_signed = False
|
||||
else:
|
||||
# Onion sign this out of 3 minute
|
||||
all_onions_signed = False
|
||||
else:
|
||||
# Incorrect signs number
|
||||
all_onions_signed = False
|
||||
|
||||
if "ip4" in params["add"] and self.connection.ip != "127.0.0.1" and not self.connection.ip.endswith(".onion"):
|
||||
ip4 = self.connection.ip
|
||||
else:
|
||||
ip4 = None
|
||||
|
||||
# Separatley add onions to sites or at once if no onions present
|
||||
hashes_changed = 0
|
||||
i = 0
|
||||
for onion in params.get("onions", []):
|
||||
hashes_changed += db.peerAnnounce(
|
||||
onion=onion,
|
||||
port=params["port"],
|
||||
hashes=[hashes[i]],
|
||||
onion_signed=all_onions_signed
|
||||
)
|
||||
i += 1
|
||||
# Announce all sites if ip4 defined
|
||||
if ip4:
|
||||
hashes_changed += db.peerAnnounce(
|
||||
ip4=ip4,
|
||||
port=params["port"],
|
||||
hashes=hashes,
|
||||
delete_missing_hashes=params.get("delete")
|
||||
)
|
||||
|
||||
# Query sites
|
||||
back = {}
|
||||
peers = []
|
||||
if params.get("onions") and not all_onions_signed and hashes_changed:
|
||||
back["onion_sign_this"] = "%.0f" % time.time() # Send back nonce for signing
|
||||
|
||||
for hash in hashes:
|
||||
hash_peers = db.peerList(
|
||||
hash,
|
||||
ip4=self.connection.ip, onions=params.get("onions"), port=params["port"],
|
||||
limit=min(30, params["need_num"]), need_types=params["need_types"]
|
||||
)
|
||||
peers.append(hash_peers)
|
||||
|
||||
back["peers"] = peers
|
||||
self.response(back)
|
||||
|
||||
|
||||
@PluginManager.registerTo("UiRequest")
|
||||
class UiRequestPlugin(object):
|
||||
def actionStatsBootstrapper(self):
|
||||
self.sendHeader()
|
||||
|
||||
# Style
|
||||
yield """
|
||||
<style>
|
||||
* { font-family: monospace; white-space: pre }
|
||||
table td, table th { text-align: right; padding: 0px 10px }
|
||||
</style>
|
||||
"""
|
||||
|
||||
hash_rows = db.execute("SELECT * FROM hash").fetchall()
|
||||
for hash_row in hash_rows:
|
||||
peer_rows = db.execute(
|
||||
"SELECT * FROM peer LEFT JOIN peer_to_hash USING (peer_id) WHERE hash_id = :hash_id",
|
||||
{"hash_id": hash_row["hash_id"]}
|
||||
).fetchall()
|
||||
|
||||
yield "<br>%s (added: %s, peers: %s)<br>" % (
|
||||
str(hash_row["hash"]).encode("hex"), hash_row["date_added"], len(peer_rows)
|
||||
)
|
||||
for peer_row in peer_rows:
|
||||
yield " - {ip4: <30} {onion: <30} added: {date_added}, announced: {date_announced}<br>".format(**dict(peer_row))
|
179
plugins/disabled-Bootstrapper/Test/TestBootstrapper.py
Normal file
179
plugins/disabled-Bootstrapper/Test/TestBootstrapper.py
Normal file
|
@ -0,0 +1,179 @@
|
|||
import hashlib
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from Bootstrapper import BootstrapperPlugin
|
||||
from Bootstrapper.BootstrapperDb import BootstrapperDb
|
||||
from Peer import Peer
|
||||
from Crypt import CryptRsa
|
||||
from util import helper
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def bootstrapper_db(request):
|
||||
BootstrapperPlugin.db.close()
|
||||
BootstrapperPlugin.db = BootstrapperDb()
|
||||
BootstrapperPlugin.db.createTables() # Reset db
|
||||
BootstrapperPlugin.db.cur.logging = True
|
||||
|
||||
def cleanup():
|
||||
BootstrapperPlugin.db.close()
|
||||
os.unlink(BootstrapperPlugin.db.db_path)
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
return BootstrapperPlugin.db
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
class TestBootstrapper:
|
||||
def testIp4(self, file_server, bootstrapper_db):
|
||||
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
|
||||
hash1 = hashlib.sha256("site1").digest()
|
||||
hash2 = hashlib.sha256("site2").digest()
|
||||
hash3 = hashlib.sha256("site3").digest()
|
||||
|
||||
# Verify empty result
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1, hash2],
|
||||
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
|
||||
})
|
||||
|
||||
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
|
||||
|
||||
# Verify added peer on previous request
|
||||
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2], delete_missing_hashes=True)
|
||||
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1, hash2],
|
||||
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
|
||||
})
|
||||
assert len(res["peers"][0]["ip4"]) == 1
|
||||
assert len(res["peers"][1]["ip4"]) == 1
|
||||
|
||||
# hash2 deleted from 1.2.3.4
|
||||
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1], delete_missing_hashes=True)
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1, hash2],
|
||||
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
|
||||
})
|
||||
assert len(res["peers"][0]["ip4"]) == 1
|
||||
assert len(res["peers"][1]["ip4"]) == 0
|
||||
|
||||
# Announce 3 hash again
|
||||
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2, hash3], delete_missing_hashes=True)
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1, hash2, hash3],
|
||||
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
|
||||
})
|
||||
assert len(res["peers"][0]["ip4"]) == 1
|
||||
assert len(res["peers"][1]["ip4"]) == 1
|
||||
assert len(res["peers"][2]["ip4"]) == 1
|
||||
|
||||
# Single hash announce
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
|
||||
})
|
||||
assert len(res["peers"][0]["ip4"]) == 1
|
||||
|
||||
# Test DB cleanup
|
||||
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 1 # 127.0.0.1 never get added to db
|
||||
|
||||
# Delete peers
|
||||
bootstrapper_db.execute("DELETE FROM peer WHERE ip4 = '1.2.3.4'")
|
||||
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer_to_hash").fetchone()["num"] == 0
|
||||
|
||||
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM hash").fetchone()["num"] == 3 # 3 sites
|
||||
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 0 # 0 peer
|
||||
|
||||
def testPassive(self, file_server, bootstrapper_db):
|
||||
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
|
||||
hash1 = hashlib.sha256("hash1").digest()
|
||||
|
||||
bootstrapper_db.peerAnnounce(ip4=None, port=15441, hashes=[hash1])
|
||||
res = peer.request("announce", {
|
||||
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": []
|
||||
})
|
||||
|
||||
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
|
||||
|
||||
def testAddOnion(self, file_server, site, bootstrapper_db, tor_manager):
|
||||
onion1 = tor_manager.addOnion()
|
||||
onion2 = tor_manager.addOnion()
|
||||
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
|
||||
hash1 = hashlib.sha256("site1").digest()
|
||||
hash2 = hashlib.sha256("site2").digest()
|
||||
|
||||
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash1, hash2])
|
||||
res = peer.request("announce", {
|
||||
"onions": [onion1, onion2],
|
||||
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
|
||||
})
|
||||
assert len(res["peers"][0]["ip4"]) == 1
|
||||
assert "onion_sign_this" in res
|
||||
|
||||
# Onion address not added yet
|
||||
site_peers = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
|
||||
assert len(site_peers["onion"]) == 0
|
||||
assert "onion_sign_this" in res
|
||||
|
||||
# Sign the nonces
|
||||
sign1 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion1))
|
||||
sign2 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion2))
|
||||
|
||||
# Bad sign (different address)
|
||||
res = peer.request("announce", {
|
||||
"onions": [onion1], "onion_sign_this": res["onion_sign_this"],
|
||||
"onion_signs": {tor_manager.getPublickey(onion2): sign2},
|
||||
"hashes": [hash1], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
|
||||
})
|
||||
assert "onion_sign_this" in res
|
||||
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
|
||||
assert len(site_peers1["onion"]) == 0 # Not added
|
||||
|
||||
# Bad sign (missing one)
|
||||
res = peer.request("announce", {
|
||||
"onions": [onion1, onion2], "onion_sign_this": res["onion_sign_this"],
|
||||
"onion_signs": {tor_manager.getPublickey(onion1): sign1},
|
||||
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
|
||||
})
|
||||
assert "onion_sign_this" in res
|
||||
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
|
||||
assert len(site_peers1["onion"]) == 0 # Not added
|
||||
|
||||
# Good sign
|
||||
res = peer.request("announce", {
|
||||
"onions": [onion1, onion2], "onion_sign_this": res["onion_sign_this"],
|
||||
"onion_signs": {tor_manager.getPublickey(onion1): sign1, tor_manager.getPublickey(onion2): sign2},
|
||||
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
|
||||
})
|
||||
assert "onion_sign_this" not in res
|
||||
|
||||
# Onion addresses added
|
||||
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
|
||||
assert len(site_peers1["onion"]) == 1
|
||||
site_peers2 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash2)
|
||||
assert len(site_peers2["onion"]) == 1
|
||||
|
||||
assert site_peers1["onion"][0] != site_peers2["onion"][0]
|
||||
assert helper.unpackOnionAddress(site_peers1["onion"][0])[0] == onion1+".onion"
|
||||
assert helper.unpackOnionAddress(site_peers2["onion"][0])[0] == onion2+".onion"
|
||||
|
||||
tor_manager.delOnion(onion1)
|
||||
tor_manager.delOnion(onion2)
|
||||
|
||||
def testRequestPeers(self, file_server, site, bootstrapper_db, tor_manager):
|
||||
site.connection_server = file_server
|
||||
hash = hashlib.sha256(site.address).digest()
|
||||
|
||||
# Request peers from tracker
|
||||
assert len(site.peers) == 0
|
||||
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash])
|
||||
site.announceTracker("zero", "127.0.0.1:1544")
|
||||
assert len(site.peers) == 1
|
||||
|
||||
# Test onion address store
|
||||
bootstrapper_db.peerAnnounce(onion="bka4ht2bzxchy44r", port=1234, hashes=[hash], onion_signed=True)
|
||||
site.announceTracker("zero", "127.0.0.1:1544")
|
||||
assert len(site.peers) == 2
|
||||
assert "bka4ht2bzxchy44r.onion:1234" in site.peers
|
1
plugins/disabled-Bootstrapper/Test/conftest.py
Normal file
1
plugins/disabled-Bootstrapper/Test/conftest.py
Normal file
|
@ -0,0 +1 @@
|
|||
from src.Test.conftest import *
|
5
plugins/disabled-Bootstrapper/Test/pytest.ini
Normal file
5
plugins/disabled-Bootstrapper/Test/pytest.ini
Normal file
|
@ -0,0 +1,5 @@
|
|||
[pytest]
|
||||
python_files = Test*.py
|
||||
addopts = -rsxX -v --durations=6
|
||||
markers =
|
||||
webtest: mark a test as a webtest.
|
1
plugins/disabled-Bootstrapper/__init__.py
Normal file
1
plugins/disabled-Bootstrapper/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
import BootstrapperPlugin
|
Loading…
Add table
Add a link
Reference in a new issue