Resolve merge conflict
Signed-off-by: Marek Küthe <m.k@mk16.de>
This commit is contained in:
commit
5ded3ef494
18 changed files with 1112 additions and 18 deletions
|
@ -22,7 +22,9 @@ Decentralized websites using Bitcoin crypto and the BitTorrent network - https:/
|
|||
* Password-less [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
based authorization: Your account is protected by the same cryptography as your Bitcoin wallet
|
||||
* Built-in SQL server with P2P data synchronization: Allows easier site development and faster page load times
|
||||
* Anonymity: Full Tor network support with .onion hidden services instead of IPv4 addresses
|
||||
* Anonymity:
|
||||
* Full Tor network support with .onion hidden services instead of IPv4 addresses
|
||||
* Full I2P network support with I2P Destinations instead of IPv4 addresses
|
||||
* TLS encrypted connections
|
||||
* Automatic uPnP port opening
|
||||
* Plugin for multiuser (openproxy) support
|
||||
|
@ -132,6 +134,7 @@ https://zeronet.ipfsscan.io/
|
|||
|
||||
* File transactions are not compressed
|
||||
* No private sites
|
||||
* ~~No more anonymous than Bittorrent~~ (built-in full Tor and I2P support added)
|
||||
|
||||
|
||||
## How can I create a ZeroNet site?
|
||||
|
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
|
@ -40,6 +40,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
config.vm.provision "shell",
|
||||
inline: "sudo apt-get install msgpack-python python-gevent python-pip python-dev -y"
|
||||
config.vm.provision "shell",
|
||||
inline: "sudo pip install msgpack --upgrade"
|
||||
inline: "sudo pip install -r requirements.txt --upgrade"
|
||||
|
||||
end
|
||||
|
|
1
plugins
1
plugins
|
@ -1 +0,0 @@
|
|||
Subproject commit 689d9309f73371f4681191b125ec3f2e14075eeb
|
|
@ -11,3 +11,4 @@ websocket_client
|
|||
gevent-ws
|
||||
coincurve
|
||||
maxminddb
|
||||
i2p.socket>=0.3.1
|
||||
|
|
|
@ -87,7 +87,7 @@ class Config(object):
|
|||
"http://tracker.files.fm:6969/announce",
|
||||
"http://t.publictracker.xyz:6969/announce",
|
||||
"https://tracker.lilithraws.cf:443/announce",
|
||||
"https://tracker.babico.name.tr:443/announce",
|
||||
"https://tracker.babico.name.tr:443/announce"
|
||||
]
|
||||
# Platform specific
|
||||
if sys.platform.startswith("win"):
|
||||
|
@ -312,6 +312,9 @@ class Config(object):
|
|||
self.parser.add_argument('--tor_hs_limit', help='Maximum number of hidden services in Tor always mode', metavar='limit', type=int, default=10)
|
||||
self.parser.add_argument('--tor_hs_port', help='Hidden service port in Tor always mode', metavar='limit', type=int, default=15441)
|
||||
|
||||
self.parser.add_argument('--i2p', help='enable: Use only for I2P peers, always: Use I2P for every connection', choices=["disable", "enable", "always"], default='enable')
|
||||
self.parser.add_argument('--i2p_sam', help='I2P SAM API address', metavar='ip:port', default='127.0.0.1:7656')
|
||||
|
||||
self.parser.add_argument('--version', action='version', version='ZeroNet %s r%s' % (self.version, self.rev))
|
||||
self.parser.add_argument('--end', help='Stop multi value argument parsing', action='store_true')
|
||||
|
||||
|
|
|
@ -133,6 +133,10 @@ class Connection(object):
|
|||
self.sock = socks.socksocket()
|
||||
proxy_ip, proxy_port = config.trackers_proxy.split(":")
|
||||
self.sock.set_proxy(socks.PROXY_TYPE_SOCKS5, proxy_ip, int(proxy_port))
|
||||
elif self.ip.endswith(".i2p"):
|
||||
if not self.server.i2p_manager or not self.server.i2p_manager.enabled:
|
||||
raise Exception("Can't connect to I2P addresses, no SAM API present")
|
||||
self.sock = self.server.i2p_manager.createSocket(self.ip, self.port)
|
||||
else:
|
||||
self.sock = self.createSocket()
|
||||
|
||||
|
@ -344,14 +348,14 @@ class Connection(object):
|
|||
# My handshake info
|
||||
def getHandshakeInfo(self):
|
||||
# No TLS for onion connections
|
||||
if self.ip_type == "onion":
|
||||
if self.ip_type == "onion" or self.ip_type == "i2p":
|
||||
crypt_supported = []
|
||||
elif self.ip in self.server.broken_ssl_ips:
|
||||
crypt_supported = []
|
||||
else:
|
||||
crypt_supported = CryptConnection.manager.crypt_supported
|
||||
# No peer id for onion connections
|
||||
if self.ip_type == "onion" or self.ip in config.ip_local:
|
||||
if self.ip_type == "onion" or self.ip_type == "i2p" or self.ip in config.ip_local:
|
||||
peer_id = ""
|
||||
else:
|
||||
peer_id = self.server.peer_id
|
||||
|
@ -360,6 +364,17 @@ class Connection(object):
|
|||
self.target_onion = self.handshake.get("target_ip").replace(".onion", "") # My onion address
|
||||
if not self.server.tor_manager.site_onions.values():
|
||||
self.server.log.warning("Unknown target onion address: %s" % self.target_onion)
|
||||
# Setup peer lock from requested onion address or I2P Destination
|
||||
if self.handshake:
|
||||
if self.handshake.get("target_ip", "").endswith(".onion") and self.server.tor_manager.start_onions:
|
||||
self.target_onion = self.handshake.get("target_ip").replace(".onion", "") # My onion address
|
||||
if not self.server.tor_manager.site_onions.values():
|
||||
self.server.log.warning("Unknown target onion address: %s" % self.target_onion)
|
||||
elif self.handshake.get("target_ip", "").endswith(".i2p"):
|
||||
self.target_dest = self.handshake.get("target_ip").replace(".i2p", "") # My I2P Destination
|
||||
if not dest_sites.get(target_dest):
|
||||
self.server.log.error("Unknown target I2P Destination: %s" % target_dest)
|
||||
self.site_lock = "unknown"
|
||||
|
||||
handshake = {
|
||||
"version": config.version,
|
||||
|
@ -374,10 +389,13 @@ class Connection(object):
|
|||
"crypt": self.crypt,
|
||||
"time": int(time.time())
|
||||
}
|
||||
|
||||
if self.target_onion:
|
||||
handshake["onion"] = self.target_onion
|
||||
elif self.ip_type == "onion":
|
||||
handshake["onion"] = self.server.tor_manager.getOnion("global")
|
||||
elif self.ip.endswith(".i2p"):
|
||||
handshake["i2p"] = self.server.i2p_manager.getDest("global").base64()
|
||||
|
||||
if self.is_tracker_connection:
|
||||
handshake["tracker_connection"] = True
|
||||
|
@ -397,7 +415,8 @@ class Connection(object):
|
|||
return False
|
||||
|
||||
self.handshake = handshake
|
||||
if handshake.get("port_opened", None) is False and "onion" not in handshake and not self.is_private_ip: # Not connectable
|
||||
if handshake.get("port_opened", None) is False and "onion" not in handshake and \
|
||||
"i2p" not in handshake: # Not connectable
|
||||
self.port = 0
|
||||
else:
|
||||
self.port = int(handshake["fileserver_port"]) # Set peer fileserver port
|
||||
|
@ -411,12 +430,16 @@ class Connection(object):
|
|||
if unprocessed_bytes:
|
||||
self.unpacker.feed(unprocessed_bytes)
|
||||
|
||||
if handshake.get("i2p") and not self.ip.endswith(".i2p"): # Set incoming connection's I2P Destination
|
||||
self.ip = handshake["i2p"] + ".i2p"
|
||||
self.updateName()
|
||||
|
||||
# Check if we can encrypt the connection
|
||||
if handshake.get("crypt_supported") and self.ip not in self.server.broken_ssl_ips:
|
||||
if type(handshake["crypt_supported"][0]) is bytes:
|
||||
handshake["crypt_supported"] = [item.decode() for item in handshake["crypt_supported"]] # Backward compatibility
|
||||
|
||||
if self.ip_type == "onion" or self.ip in config.ip_local:
|
||||
if self.ip_type == "onion" or self.ip_type == "i2p" or self.ip in config.ip_local:
|
||||
crypt = None
|
||||
elif handshake.get("crypt"): # Recommended crypt by server
|
||||
crypt = handshake["crypt"]
|
||||
|
|
|
@ -16,6 +16,7 @@ from .Connection import Connection
|
|||
from Config import config
|
||||
from Crypt import CryptConnection
|
||||
from Crypt import CryptHash
|
||||
from I2P import I2PManager
|
||||
from Tor import TorManager
|
||||
from Site import SiteManager
|
||||
|
||||
|
@ -38,6 +39,11 @@ class ConnectionServer(object):
|
|||
self.peer_blacklist = SiteManager.peer_blacklist
|
||||
|
||||
self.tor_manager = TorManager(self.ip, self.port)
|
||||
if config.i2p != "disabled":
|
||||
self.i2p_manager = I2PManager(self.handleIncomingConnection)
|
||||
else:
|
||||
self.i2p_manager = None
|
||||
|
||||
self.connections = [] # Connections
|
||||
self.whitelist = config.ip_local # No flood protection on this ips
|
||||
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
|
||||
|
@ -171,10 +177,13 @@ class ConnectionServer(object):
|
|||
|
||||
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
|
||||
ip_type = helper.getIpType(ip)
|
||||
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
|
||||
if has_per_site_onion: # Site-unique connection for Tor
|
||||
has_per_site_onion = ((ip.endswith(".onion") or self.port_opened.get("onion", None) == False) and self.tor_manager.start_onions) or \
|
||||
(ip.endswith(".i2p") or self.port_opened.get("i2p", None) == False) and self.i2p_manager.start_dests)) and site
|
||||
if has_per_site_onion: # Site-unique connection for Tor or I2P
|
||||
if ip.endswith(".onion"):
|
||||
site_onion = self.tor_manager.getOnion(site.address)
|
||||
if ip.endswith(".i2p"):
|
||||
site_onion = self.i2p_manager.getDest(site.address)
|
||||
else:
|
||||
site_onion = self.tor_manager.getOnion("global")
|
||||
key = ip + site_onion
|
||||
|
@ -196,7 +205,8 @@ class ConnectionServer(object):
|
|||
if connection.ip == ip:
|
||||
if peer_id and connection.handshake.get("peer_id") != peer_id: # Does not match
|
||||
continue
|
||||
if ip.endswith(".onion") and self.tor_manager.start_onions and ip.replace(".onion", "") != connection.target_onion:
|
||||
if (ip.endswith(".onion") and self.tor_manager.start_onions and ip.replace(".onion", "") != connection.target_onion) or \
|
||||
(ip.endswith(".i2p") and self.i2p_manager.start_dests and ip.replace(".onion", "") != connection.target_dest):
|
||||
# For different site
|
||||
continue
|
||||
if not connection.connected and create:
|
||||
|
|
|
@ -321,6 +321,13 @@ class FileRequest(object):
|
|||
if site.addPeer(*address, source="pex"):
|
||||
added += 1
|
||||
|
||||
# Add sent peers to site
|
||||
for packed_address in params.get("peers_i2p", []):
|
||||
address = helper.unpackI2PAddress(packed_address)
|
||||
got_peer_keys.append("%s:%s" % address)
|
||||
if site.addPeer(*address):
|
||||
added += 1
|
||||
|
||||
# Send back peers that is not in the sent list and connectable (not port 0)
|
||||
packed_peers = helper.packPeers(site.getConnectablePeers(params["need"], ignore=got_peer_keys, allow_private=False))
|
||||
|
||||
|
@ -335,7 +342,8 @@ class FileRequest(object):
|
|||
back = {
|
||||
"peers": packed_peers["ipv4"],
|
||||
"peers_ipv6": packed_peers["ipv6"],
|
||||
"peers_onion": packed_peers["onion"]
|
||||
"peers_onion": packed_peers["onion"],
|
||||
"peers_i2p": packed_peers["i2p"]
|
||||
}
|
||||
|
||||
self.response(back)
|
||||
|
@ -410,7 +418,7 @@ class FileRequest(object):
|
|||
"Found: %s for %s hashids in %.3fs" %
|
||||
({key: len(val) for key, val in back.items()}, len(params["hash_ids"]), time.time() - s)
|
||||
)
|
||||
self.response({"peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"], "my": my_hashes})
|
||||
self.response({"peers": back["ipv4"], "peers_onion": back["onion"], "peers_i2p": back["i2p"], "peers_ipv6": back["ipv6"], "my": my_hashes})
|
||||
|
||||
def actionSetHashfield(self, params):
|
||||
site = self.sites.get(params["site"])
|
||||
|
|
|
@ -252,6 +252,7 @@ class FileServer(ConnectionServer):
|
|||
|
||||
if not self.port_opened["ipv4"]:
|
||||
self.tor_manager.startOnions()
|
||||
self.i2p_manager.startDests()
|
||||
|
||||
if not sites_checking:
|
||||
check_pool = gevent.pool.Pool(5)
|
||||
|
|
176
src/I2P/I2PManager.py
Normal file
176
src/I2P/I2PManager.py
Normal file
|
@ -0,0 +1,176 @@
|
|||
import logging
|
||||
|
||||
from gevent.coros import RLock
|
||||
from gevent.server import StreamServer
|
||||
from gevent.pool import Pool
|
||||
from httplib import HTTPConnection
|
||||
import urllib2
|
||||
|
||||
from i2p import socket
|
||||
from i2p.datatypes import Destination
|
||||
|
||||
from Config import config
|
||||
from Site import SiteManager
|
||||
from Debug import Debug
|
||||
|
||||
|
||||
class I2PHTTPConnection(HTTPConnection):
|
||||
def __init__(self, i2p_manager, site_address, *args, **kwargs):
|
||||
HTTPConnection.__init__(self, *args, **kwargs)
|
||||
self.i2p_manager = i2p_manager
|
||||
self.site_address = site_address
|
||||
self._create_connection = self._create_i2p_connection
|
||||
|
||||
def _create_i2p_connection(self, address, timeout=60,
|
||||
source_address=None):
|
||||
return self.i2p_manager.createSocket(self.site_address, *address)
|
||||
|
||||
class I2PHTTPHandler(urllib2.HTTPHandler):
|
||||
def __init__(self, i2p_manager, site_address, *args, **kwargs):
|
||||
urllib2.HTTPHandler.__init__(self, *args, **kwargs)
|
||||
self.i2p_manager = i2p_manager
|
||||
self.site_address = site_address
|
||||
|
||||
def http_open(self, req):
|
||||
return self.do_open(self._createI2PHTTPConnection, req)
|
||||
|
||||
def _createI2PHTTPConnection(self, *args, **kwargs):
|
||||
return I2PHTTPConnection(self.i2p_manager, self.site_address, *args, **kwargs)
|
||||
|
||||
class I2PManager:
|
||||
def __init__(self, fileserver_handler=None):
|
||||
self.dest_conns = {} # Destination: SAM connection
|
||||
self.dest_servs = {} # Destination: StreamServer
|
||||
self.site_dests = {} # Site address: Destination
|
||||
self.log = logging.getLogger("I2PManager")
|
||||
self.start_dests = None
|
||||
self.lock = RLock()
|
||||
|
||||
if config.i2p == "disable":
|
||||
self.enabled = False
|
||||
self.start_dests = False
|
||||
self.status = "Disabled"
|
||||
else:
|
||||
self.enabled = True
|
||||
self.status = "Waiting"
|
||||
|
||||
if fileserver_handler:
|
||||
self.fileserver_handler = fileserver_handler
|
||||
else:
|
||||
self.fileserver_handler = lambda self, sock, addr: None
|
||||
|
||||
self.sam_ip, self.sam_port = config.i2p_sam.split(":")
|
||||
self.sam_port = int(self.sam_port)
|
||||
|
||||
# Test SAM port
|
||||
if config.i2p != "disable":
|
||||
try:
|
||||
assert self.connect(), "No connection"
|
||||
self.log.debug("I2P SAM port %s check ok" % config.i2p_sam)
|
||||
except Exception, err:
|
||||
self.log.debug("I2P SAM port %s check error: %s" % (config.i2p_sam, err))
|
||||
self.enabled = False
|
||||
|
||||
def connect(self):
|
||||
if not self.enabled:
|
||||
return False
|
||||
self.site_dests = {}
|
||||
self.dest_conns = {}
|
||||
self.dest_servs = {}
|
||||
|
||||
self.log.debug("Connecting to %s:%s" % (self.sam_ip, self.sam_port))
|
||||
with self.lock:
|
||||
try:
|
||||
socket.checkAPIConnection((self.sam_ip, self.sam_port))
|
||||
self.status = u"Connected"
|
||||
return True
|
||||
except Exception, err:
|
||||
self.status = u"Error (%s)" % err
|
||||
self.log.error("I2P SAM connect error: %s" % Debug.formatException(err))
|
||||
self.enabled = False
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
for server in self.dest_servs:
|
||||
server.stop()
|
||||
self.dest_conns = {}
|
||||
self.dest_servs = {}
|
||||
|
||||
def startDests(self):
|
||||
if self.enabled:
|
||||
self.log.debug("Start Destinations")
|
||||
self.start_dests = True
|
||||
|
||||
def addDest(self, site_address=None):
|
||||
sock = socket.socket(socket.AF_I2P, socket.SOCK_STREAM,
|
||||
samaddr=(self.sam_ip, self.sam_port))
|
||||
try:
|
||||
sock.setblocking(0)
|
||||
sock.bind(None, site_address) # Transient Destination, tied to site address
|
||||
sock.listen()
|
||||
server = StreamServer(
|
||||
sock, self.fileserver_handler, spawn=Pool(1000)
|
||||
)
|
||||
server.start()
|
||||
dest = sock.getsockname()
|
||||
self.dest_conns[dest] = sock
|
||||
self.dest_servs[dest] = server
|
||||
self.status = u"OK (%s Destinations running)" % len(self.dest_conns)
|
||||
SiteManager.peer_blacklist.append((dest.base64()+".i2p", 0))
|
||||
return dest
|
||||
except Exception, err:
|
||||
self.status = u"SESSION CREATE error (%s)" % err
|
||||
self.log.error("I2P SESSION CREATE error: %s" % Debug.formatException(err))
|
||||
return False
|
||||
|
||||
def delDest(self, dest):
|
||||
if dest in self.dest_servs:
|
||||
self.dest_servs[dest].stop()
|
||||
del self.dest_conns[dest]
|
||||
del self.dest_servs[dest]
|
||||
self.status = "OK (%s Destinations running)" % len(self.dest_conns)
|
||||
return True
|
||||
else:
|
||||
self.status = u"Tried to delete non-existent Destination"
|
||||
self.log.error("I2P error: Tried to delete non-existent")
|
||||
self.disconnect()
|
||||
return False
|
||||
|
||||
def getDest(self, site_address):
|
||||
with self.lock:
|
||||
if not self.enabled:
|
||||
return None
|
||||
if self.start_dests: # Different Destination for every site
|
||||
dest = self.site_dests.get(site_address)
|
||||
else: # Same Destination for every site
|
||||
dest = self.site_dests.get("global")
|
||||
site_address = "global"
|
||||
if not dest:
|
||||
self.site_dests[site_address] = self.addDest(site_address)
|
||||
dest = self.site_dests[site_address]
|
||||
self.log.debug("Created new Destination for %s: %s" % (site_address, dest))
|
||||
return dest
|
||||
|
||||
def getPrivateDest(self, addr):
|
||||
dest = addr if isinstance(addr, Destination) else getDest(addr)
|
||||
return self.dest_conns[dest].getPrivateDest()
|
||||
|
||||
def createSocket(self, site_address, dest, port):
|
||||
if not self.enabled:
|
||||
return False
|
||||
if dest.endswith(".i2p") and not dest.endswith(".b32.i2p"):
|
||||
dest = Destination(raw=dest[:-4], b64=True)
|
||||
self.log.debug("Creating new socket to %s:%s" %
|
||||
(dest.base32() if isinstance(dest, Destination) else dest, port))
|
||||
sock = socket.socket(socket.AF_I2P, socket.SOCK_STREAM,
|
||||
samaddr=(self.sam_ip, self.sam_port))
|
||||
sock.connect((dest, int(port)), site_address)
|
||||
return sock
|
||||
|
||||
def lookup(self, name):
|
||||
return socket.lookup(name, (self.sam_ip, self.sam_port))
|
||||
|
||||
def urlopen(self, site_address, url, timeout):
|
||||
handler = I2PHTTPHandler(self, site_address)
|
||||
opener = urllib2.build_opener(handler)
|
||||
return opener.open(url, timeout=50)
|
1
src/I2P/__init__.py
Normal file
1
src/I2P/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
from I2PManager import I2PManager
|
|
@ -126,6 +126,8 @@ class Peer(object):
|
|||
def packMyAddress(self):
|
||||
if self.ip.endswith(".onion"):
|
||||
return helper.packOnionAddress(self.ip, self.port)
|
||||
elif self.ip.endswith(".i2p"):
|
||||
return helper.packI2PAddress(self.ip, self.port)
|
||||
else:
|
||||
return helper.packAddress(self.ip, self.port)
|
||||
|
||||
|
@ -273,6 +275,8 @@ class Peer(object):
|
|||
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
|
||||
if packed_peers["onion"]:
|
||||
request["peers_onion"] = packed_peers["onion"]
|
||||
if packed_peers["i2p"]:
|
||||
request["peers_i2p"] = packed_peers["i2p"]
|
||||
if packed_peers["ipv6"]:
|
||||
request["peers_ipv6"] = packed_peers["ipv6"]
|
||||
res = self.request("pex", request)
|
||||
|
@ -298,6 +302,11 @@ class Peer(object):
|
|||
address = helper.unpackOnionAddress(peer)
|
||||
if site.addPeer(*address, source="pex"):
|
||||
added += 1
|
||||
# I2P
|
||||
for peer in res.get("peers_i2p", []):
|
||||
address = helper.unpackI2PAddress(peer)
|
||||
if site.addPeer(*address):
|
||||
added += 1
|
||||
|
||||
if added:
|
||||
self.log("Added peers using pex: %s" % added)
|
||||
|
@ -331,7 +340,7 @@ class Peer(object):
|
|||
|
||||
back = collections.defaultdict(list)
|
||||
|
||||
for ip_type in ["ipv4", "ipv6", "onion"]:
|
||||
for ip_type in ["ipv4", "ipv6", "onion", "i2p"]:
|
||||
if ip_type == "ipv4":
|
||||
key = "peers"
|
||||
else:
|
||||
|
@ -339,6 +348,8 @@ class Peer(object):
|
|||
for hash, peers in list(res.get(key, {}).items())[0:30]:
|
||||
if ip_type == "onion":
|
||||
unpacker_func = helper.unpackOnionAddress
|
||||
elif ip_type == "i2p":
|
||||
unpacker_func = helper.unpackI2PAddress
|
||||
else:
|
||||
unpacker_func = helper.unpackAddress
|
||||
|
||||
|
|
235
src/Site/Site.py
235
src/Site/Site.py
|
@ -537,6 +537,27 @@ class Site(object):
|
|||
content_json_modified = self.content_manager.contents[inner_path]["modified"]
|
||||
body = self.storage.read(inner_path)
|
||||
|
||||
# Do we need this part? Removed in ZeroNetX, included in I2P Patch
|
||||
# Find out my ip and port
|
||||
tor_manager = self.connection_server.tor_manager
|
||||
i2p_manager = self.connection_server.i2p_manager
|
||||
if tor_manager and tor_manager.enabled and tor_manager.start_onions:
|
||||
my_ip = tor_manager.getOnion(self.address)
|
||||
if my_ip:
|
||||
my_ip += ".onion"
|
||||
my_port = config.fileserver_port
|
||||
elif i2p_manager and i2p_manager.enabled and i2p_manager.start_dests:
|
||||
my_ip = i2p_manager.getDest(self.address)
|
||||
if my_ip:
|
||||
my_ip += ".i2p"
|
||||
my_port = 0
|
||||
else:
|
||||
my_ip = config.ip_external
|
||||
if self.connection_server.port_opened:
|
||||
my_port = config.fileserver_port
|
||||
else:
|
||||
my_port = 0
|
||||
|
||||
while 1:
|
||||
if not peers or len(published) >= limit:
|
||||
if event_done:
|
||||
|
@ -849,10 +870,224 @@ class Site(object):
|
|||
peer.found(source)
|
||||
return peer
|
||||
|
||||
|
||||
def announce(self, *args, **kwargs):
|
||||
if self.isServing():
|
||||
self.announcer.announce(*args, **kwargs)
|
||||
|
||||
# Part from I2P Patch
|
||||
# Gather peer from connected peers
|
||||
@util.Noparallel(blocking=False)
|
||||
def announcePex(self, query_num=2, need_num=5):
|
||||
peers = [peer for peer in self.peers.values() if peer.connection and peer.connection.connected] # Connected peers
|
||||
if len(peers) == 0: # Small number of connected peers for this site, connect to any
|
||||
self.log.debug("Small number of peers detected...query all of peers using pex")
|
||||
peers = self.peers.values()
|
||||
need_num = 10
|
||||
|
||||
random.shuffle(peers)
|
||||
done = 0
|
||||
added = 0
|
||||
for peer in peers:
|
||||
res = peer.pex(need_num=need_num)
|
||||
if type(res) == int: # We have result
|
||||
done += 1
|
||||
added += res
|
||||
if res:
|
||||
self.worker_manager.onPeers()
|
||||
self.updateWebsocket(peers_added=res)
|
||||
if done == query_num:
|
||||
break
|
||||
self.log.debug("Queried pex from %s peers got %s new peers." % (done, added))
|
||||
|
||||
# Gather peers from tracker
|
||||
# Return: Complete time or False on error
|
||||
def announceTracker(self, tracker_protocol, tracker_address, fileserver_port=0, add_types=[], my_peer_id="", mode="start"):
|
||||
is_i2p = ".i2p" in tracker_address
|
||||
i2p_manager = self.connection_server.i2p_manager
|
||||
if is_i2p and not (i2p_manager and i2p_manager.enabled):
|
||||
return False
|
||||
|
||||
s = time.time()
|
||||
if "ip4" not in add_types:
|
||||
fileserver_port = 0
|
||||
|
||||
if tracker_protocol == "udp": # Udp tracker
|
||||
if config.disable_udp:
|
||||
return False # No udp supported
|
||||
ip, port = tracker_address.split(":")
|
||||
tracker = UdpTrackerClient(ip, int(port))
|
||||
tracker.peer_port = fileserver_port
|
||||
try:
|
||||
tracker.connect()
|
||||
tracker.poll_once()
|
||||
tracker.announce(info_hash=hashlib.sha1(self.address).hexdigest(), num_want=50)
|
||||
back = tracker.poll_once()
|
||||
peers = back["response"]["peers"]
|
||||
except Exception, err:
|
||||
return False
|
||||
|
||||
elif tracker_protocol == "http": # Http tracker
|
||||
params = {
|
||||
'info_hash': hashlib.sha1(self.address).digest(),
|
||||
'peer_id': my_peer_id, 'port': fileserver_port,
|
||||
'uploaded': 0, 'downloaded': 0, 'left': 0, 'compact': 1, 'numwant': 30,
|
||||
'event': 'started'
|
||||
}
|
||||
if is_i2p:
|
||||
params['ip'] = i2p_manager.getDest(self.address).base64()
|
||||
req = None
|
||||
try:
|
||||
url = "http://" + tracker_address + "?" + urllib.urlencode(params)
|
||||
timeout = 60 if is_i2p else 30
|
||||
# Load url
|
||||
with gevent.Timeout(timeout, False): # Make sure of timeout
|
||||
if is_i2p:
|
||||
req = i2p_manager.urlopen(self.address, url, timeout=50)
|
||||
else:
|
||||
req = urllib2.urlopen(url, timeout=25)
|
||||
response = req.read()
|
||||
req.fp._sock.recv = None # Hacky avoidance of memory leak for older python versions
|
||||
req.close()
|
||||
req = None
|
||||
if not response:
|
||||
self.log.debug("Http tracker %s response error" % url)
|
||||
return False
|
||||
# Decode peers
|
||||
peer_data = bencode.decode(response)["peers"]
|
||||
response = None
|
||||
peers = []
|
||||
if isinstance(peer_data, str):
|
||||
# Compact response
|
||||
peer_length = 32 if is_i2p else 6
|
||||
peer_count = len(peer_data) / peer_length
|
||||
for peer_offset in xrange(peer_count):
|
||||
off = peer_length * peer_offset
|
||||
peer = peer_data[off:off + peer_length]
|
||||
if is_i2p:
|
||||
# TODO measure whether non-compact is faster than compact+lookup
|
||||
try:
|
||||
dest = i2p_manager.lookup(peer+".b32.i2p")
|
||||
peers.append({"addr": dest.base64()+".i2p", "port": 6881})
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
addr, port = struct.unpack('!LH', peer)
|
||||
peers.append({"addr": socket.inet_ntoa(struct.pack('!L', addr)), "port": port})
|
||||
else:
|
||||
# Non-compact response
|
||||
for peer in peer_data:
|
||||
if is_i2p:
|
||||
peers.append({"addr": peer["ip"]+".i2p", "port": peer["port"]})
|
||||
else:
|
||||
peers.append({"addr": peer["ip"], "port": peer["port"]})
|
||||
except Exception, err:
|
||||
self.log.debug("Http tracker %s error: %s" % (url, err))
|
||||
if req:
|
||||
req.close()
|
||||
req = None
|
||||
return False
|
||||
else:
|
||||
peers = []
|
||||
|
||||
# Adding peers
|
||||
added = 0
|
||||
for peer in peers:
|
||||
if not peer["port"]:
|
||||
continue # Dont add peers with port 0
|
||||
if self.addPeer(peer["addr"], peer["port"]):
|
||||
added += 1
|
||||
if added:
|
||||
self.worker_manager.onPeers()
|
||||
self.updateWebsocket(peers_added=added)
|
||||
self.log.debug("Found %s peers, new: %s, total: %s" % (len(peers), added, len(self.peers)))
|
||||
return time.time() - s
|
||||
|
||||
# Add myself and get other peers from tracker
|
||||
def announce(self, force=False, mode="start", pex=True):
|
||||
if time.time() < self.time_announce + 30 and not force:
|
||||
return # No reannouncing within 30 secs
|
||||
self.time_announce = time.time()
|
||||
|
||||
trackers = config.trackers
|
||||
# Filter trackers based on supported networks
|
||||
if config.disable_udp:
|
||||
trackers = [tracker for tracker in trackers if not tracker.startswith("udp://")]
|
||||
if self.connection_server and not self.connection_server.tor_manager.enabled:
|
||||
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
|
||||
if self.connection_server and not self.connection_server.i2p_manager.enabled:
|
||||
trackers = [tracker for tracker in trackers if ".i2p" not in tracker]
|
||||
|
||||
if mode == "update" or mode == "more": # Only announce on one tracker, increment the queried tracker id
|
||||
self.last_tracker_id += 1
|
||||
self.last_tracker_id = self.last_tracker_id % len(trackers)
|
||||
trackers = [trackers[self.last_tracker_id]] # We only going to use this one
|
||||
|
||||
errors = []
|
||||
slow = []
|
||||
add_types = []
|
||||
if self.connection_server:
|
||||
my_peer_id = self.connection_server.peer_id
|
||||
|
||||
# Type of addresses they can reach me
|
||||
if self.connection_server.port_opened:
|
||||
add_types.append("ip4")
|
||||
if self.connection_server.tor_manager.enabled and self.connection_server.tor_manager.start_onions:
|
||||
add_types.append("onion")
|
||||
if self.connection_server.i2p_manager.enabled and self.connection_server.i2p_manager.start_dests:
|
||||
add_types.append("i2p")
|
||||
else:
|
||||
my_peer_id = ""
|
||||
|
||||
s = time.time()
|
||||
announced = 0
|
||||
threads = []
|
||||
fileserver_port = config.fileserver_port
|
||||
|
||||
for tracker in trackers: # Start announce threads
|
||||
tracker_protocol, tracker_address = tracker.split("://")
|
||||
thread = gevent.spawn(
|
||||
self.announceTracker, tracker_protocol, tracker_address, fileserver_port, add_types, my_peer_id, mode
|
||||
)
|
||||
threads.append(thread)
|
||||
thread.tracker_address = tracker_address
|
||||
thread.tracker_protocol = tracker_protocol
|
||||
|
||||
gevent.joinall(threads, timeout=10) # Wait for announce finish
|
||||
|
||||
for thread in threads:
|
||||
if thread.value:
|
||||
if thread.value > 1:
|
||||
slow.append("%.2fs %s://%s" % (thread.value, thread.tracker_protocol, thread.tracker_address))
|
||||
announced += 1
|
||||
else:
|
||||
if thread.ready():
|
||||
errors.append("%s://%s" % (thread.tracker_protocol, thread.tracker_address))
|
||||
else: # Still running
|
||||
slow.append("10s+ %s://%s" % (thread.tracker_protocol, thread.tracker_address))
|
||||
|
||||
# Save peers num
|
||||
self.settings["peers"] = len(self.peers)
|
||||
|
||||
if len(errors) < len(threads): # Less errors than total tracker nums
|
||||
self.log.debug(
|
||||
"Announced types %s in mode %s to %s trackers in %.3fs, errors: %s, slow: %s" %
|
||||
(add_types, mode, announced, time.time() - s, errors, slow)
|
||||
)
|
||||
else:
|
||||
if mode != "update":
|
||||
self.log.error("Announce to %s trackers in %.3fs, failed" % (announced, time.time() - s))
|
||||
|
||||
if pex:
|
||||
if not [peer for peer in self.peers.values() if peer.connection and peer.connection.connected]:
|
||||
# If no connected peer yet then wait for connections
|
||||
gevent.spawn_later(3, self.announcePex, need_num=10) # Spawn 3 secs later
|
||||
else: # Else announce immediately
|
||||
if mode == "more": # Need more peers
|
||||
self.announcePex(need_num=10)
|
||||
else:
|
||||
self.announcePex()
|
||||
|
||||
# Keep connections to get the updates
|
||||
def needConnections(self, num=None, check_site_on_reconnect=False):
|
||||
if num is None:
|
||||
|
|
135
src/Test/TestI2P.py
Normal file
135
src/Test/TestI2P.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
import pytest
|
||||
import time
|
||||
|
||||
from File import FileServer
|
||||
|
||||
# stats.i2p
|
||||
TEST_B64 = 'Okd5sN9hFWx-sr0HH8EFaxkeIMi6PC5eGTcjM1KB7uQ0ffCUJ2nVKzcsKZFHQc7pLONjOs2LmG5H-2SheVH504EfLZnoB7vxoamhOMENnDABkIRGGoRisc5AcJXQ759LraLRdiGSR0WTHQ0O1TU0hAz7vAv3SOaDp9OwNDr9u902qFzzTKjUTG5vMTayjTkLo2kOwi6NVchDeEj9M7mjj5ySgySbD48QpzBgcqw1R27oIoHQmjgbtbmV2sBL-2Tpyh3lRe1Vip0-K0Sf4D-Zv78MzSh8ibdxNcZACmZiVODpgMj2ejWJHxAEz41RsfBpazPV0d38Mfg4wzaS95R5hBBo6SdAM4h5vcZ5ESRiheLxJbW0vBpLRd4mNvtKOrcEtyCvtvsP3FpA-6IKVswyZpHgr3wn6ndDHiVCiLAQZws4MsIUE1nkfxKpKtAnFZtPrrB8eh7QO9CkH2JBhj7bG0ED6mV5~X5iqi52UpsZ8gnjZTgyG5pOF8RcFrk86kHxAAAA'
|
||||
|
||||
@pytest.mark.usefixtures("resetSettings")
|
||||
@pytest.mark.usefixtures("resetTempSettings")
|
||||
class TestI2P:
|
||||
def testAddDest(self, i2p_manager):
|
||||
# Add
|
||||
dest = i2p_manager.addDest()
|
||||
assert dest
|
||||
assert dest in i2p_manager.dest_conns
|
||||
|
||||
# Delete
|
||||
assert i2p_manager.delDest(dest)
|
||||
assert dest not in i2p_manager.dest_conns
|
||||
|
||||
def testSignDest(self, i2p_manager):
|
||||
dest = i2p_manager.addDest()
|
||||
|
||||
# Sign
|
||||
sign = i2p_manager.getPrivateDest(dest).sign("hello")
|
||||
assert len(sign) == dest.signature_size()
|
||||
|
||||
# Verify
|
||||
assert dest.verify("hello", sign)
|
||||
assert not dest.verify("not hello", sign)
|
||||
|
||||
# Delete
|
||||
i2p_manager.delDest(dest)
|
||||
|
||||
@pytest.mark.skipif(not pytest.config.getvalue("slow"), reason="--slow not requested (takes around ~ 1min)")
|
||||
def testConnection(self, i2p_manager, file_server, site, site_temp):
|
||||
file_server.i2p_manager.start_dests = True
|
||||
dest = file_server.i2p_manager.getDest(site.address)
|
||||
assert dest
|
||||
print "Connecting to", dest.base32()
|
||||
for retry in range(5): # Wait for Destination creation
|
||||
time.sleep(10)
|
||||
try:
|
||||
connection = file_server.getConnection(dest.base64()+".i2p", 1544)
|
||||
if connection:
|
||||
break
|
||||
except Exception, err:
|
||||
continue
|
||||
assert connection.handshake
|
||||
assert not connection.handshake["peer_id"] # No peer_id for I2P connections
|
||||
|
||||
# Return the same connection without site specified
|
||||
assert file_server.getConnection(dest.base64()+".i2p", 1544) == connection
|
||||
# No reuse for different site
|
||||
assert file_server.getConnection(dest.base64()+".i2p", 1544, site=site) != connection
|
||||
assert file_server.getConnection(dest.base64()+".i2p", 1544, site=site) == file_server.getConnection(dest.base64()+".i2p", 1544, site=site)
|
||||
site_temp.address = "1OTHERSITE"
|
||||
assert file_server.getConnection(dest.base64()+".i2p", 1544, site=site) != file_server.getConnection(dest.base64()+".i2p", 1544, site=site_temp)
|
||||
|
||||
# Only allow to query from the locked site
|
||||
file_server.sites[site.address] = site
|
||||
connection_locked = file_server.getConnection(dest.base64()+".i2p", 1544, site=site)
|
||||
assert "body" in connection_locked.request("getFile", {"site": site.address, "inner_path": "content.json", "location": 0})
|
||||
assert connection_locked.request("getFile", {"site": "1OTHERSITE", "inner_path": "content.json", "location": 0})["error"] == "Invalid site"
|
||||
|
||||
def testPex(self, file_server, site, site_temp):
|
||||
# Register site to currently running fileserver
|
||||
site.connection_server = file_server
|
||||
file_server.sites[site.address] = site
|
||||
# Create a new file server to emulate new peer connecting to our peer
|
||||
file_server_temp = FileServer("127.0.0.1", 1545)
|
||||
site_temp.connection_server = file_server_temp
|
||||
file_server_temp.sites[site_temp.address] = site_temp
|
||||
# We will request peers from this
|
||||
peer_source = site_temp.addPeer("127.0.0.1", 1544)
|
||||
|
||||
# Get ip4 peers from source site
|
||||
assert peer_source.pex(need_num=10) == 1 # Need >5 to return also return non-connected peers
|
||||
assert len(site_temp.peers) == 2 # Me, and the other peer
|
||||
site.addPeer("1.2.3.4", 1555) # Add peer to source site
|
||||
assert peer_source.pex(need_num=10) == 1
|
||||
assert len(site_temp.peers) == 3
|
||||
assert "1.2.3.4:1555" in site_temp.peers
|
||||
|
||||
# Get I2P peers from source site
|
||||
site.addPeer(TEST_B64+".i2p", 1555)
|
||||
assert TEST_B64+".i2p:1555" not in site_temp.peers
|
||||
assert peer_source.pex(need_num=10) == 1 # Need >5 to return also return non-connected peers
|
||||
assert TEST_B64+".i2p:1555" in site_temp.peers
|
||||
|
||||
def testFindHash(self, i2p_manager, file_server, site, site_temp):
|
||||
file_server.ip_incoming = {} # Reset flood protection
|
||||
file_server.sites[site.address] = site
|
||||
assert file_server.i2p_manager == None
|
||||
file_server.i2p_manager = i2p_manager
|
||||
|
||||
client = FileServer("127.0.0.1", 1545)
|
||||
client.sites[site_temp.address] = site_temp
|
||||
site_temp.connection_server = client
|
||||
|
||||
# Add file_server as peer to client
|
||||
peer_file_server = site_temp.addPeer("127.0.0.1", 1544)
|
||||
|
||||
assert peer_file_server.findHashIds([1234]) == {}
|
||||
|
||||
# Add fake peer with requred hash
|
||||
fake_peer_1 = site.addPeer(TEST_B64+".i2p", 1544)
|
||||
fake_peer_1.hashfield.append(1234)
|
||||
fake_peer_2 = site.addPeer("1.2.3.5", 1545)
|
||||
fake_peer_2.hashfield.append(1234)
|
||||
fake_peer_2.hashfield.append(1235)
|
||||
fake_peer_3 = site.addPeer("1.2.3.6", 1546)
|
||||
fake_peer_3.hashfield.append(1235)
|
||||
fake_peer_3.hashfield.append(1236)
|
||||
|
||||
assert peer_file_server.findHashIds([1234, 1235]) == {
|
||||
1234: [('1.2.3.5', 1545), (TEST_B64+".i2p", 1544)],
|
||||
1235: [('1.2.3.6', 1546), ('1.2.3.5', 1545)]
|
||||
}
|
||||
|
||||
# Test my address adding
|
||||
site.content_manager.hashfield.append(1234)
|
||||
my_i2p_address = i2p_manager.getDest(site_temp.address).base64()+".i2p"
|
||||
|
||||
res = peer_file_server.findHashIds([1234, 1235])
|
||||
assert res[1234] == [('1.2.3.5', 1545), (TEST_B64+".i2p", 1544), (my_i2p_address, 1544)]
|
||||
assert res[1235] == [('1.2.3.6', 1546), ('1.2.3.5', 1545)]
|
||||
|
||||
# Reset
|
||||
file_server.i2p_manager = None
|
||||
|
||||
def testSiteDest(self, i2p_manager):
|
||||
assert i2p_manager.getDest("address1") != i2p_manager.getDest("address2")
|
||||
assert i2p_manager.getDest("address1") == i2p_manager.getDest("address1")
|
|
@ -74,6 +74,7 @@ config.debug = True
|
|||
config.debug_socket = True # Use test data for unittests
|
||||
config.verbose = True # Use test data for unittests
|
||||
config.tor = "disable" # Don't start Tor client
|
||||
config.i2p = "disable" # Don't start I2P client
|
||||
config.trackers = []
|
||||
config.data_dir = TEST_DATA_PATH # Use test data for unittests
|
||||
if "ZERONET_LOG_DIR" in os.environ:
|
||||
|
@ -126,6 +127,7 @@ from Crypt import CryptConnection
|
|||
from Crypt import CryptBitcoin
|
||||
from Ui import UiWebsocket
|
||||
from Tor import TorManager
|
||||
from I2P import I2PManager
|
||||
from Content import ContentDb
|
||||
from util import RateLimit
|
||||
from Db import Db
|
||||
|
@ -397,7 +399,6 @@ def tor_manager():
|
|||
raise pytest.skip("Test requires Tor with ControlPort: %s, %s" % (config.tor_controller, err))
|
||||
return tor_manager
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def db(request):
|
||||
db_path = "%s/zeronet.db" % config.data_dir
|
||||
|
@ -495,3 +496,13 @@ def disableLog():
|
|||
yield None # Wait until all test done
|
||||
logging.getLogger('').setLevel(logging.getLevelName(logging.CRITICAL))
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def i2p_manager():
|
||||
try:
|
||||
i2p_manager = I2PManager()
|
||||
i2p_manager.enabled = True
|
||||
assert i2p_manager.connect(), "No connection"
|
||||
i2p_manager.startDests()
|
||||
except Exception, err:
|
||||
raise pytest.skip("Test requires I2P with SAM port: %s, %s" % (config.i2p_sam, err))
|
||||
return i2p_manager
|
||||
|
|
|
@ -47,7 +47,7 @@ class UiWebsocket(object):
|
|||
self.site.page_requested = True # Dont add connection notification anymore
|
||||
import main
|
||||
file_server = main.file_server
|
||||
if not file_server.port_opened or file_server.tor_manager.start_onions is None:
|
||||
if not file_server.port_opened or (file_server.tor_manager.start_onions or file_server.i2p_manager.start_dests) is None:
|
||||
self.site.page_requested = False # Not ready yet, check next time
|
||||
else:
|
||||
try:
|
||||
|
@ -302,6 +302,8 @@ class UiWebsocket(object):
|
|||
"tor_status": file_server.tor_manager.status,
|
||||
"tor_has_meek_bridges": file_server.tor_manager.has_meek_bridges,
|
||||
"tor_use_bridges": config.tor_use_bridges,
|
||||
"i2p_enabled": file_server.i2p_manager.enabled,
|
||||
"i2p_status": file_server.i2p_manager.status,
|
||||
"ui_ip": config.ui_ip,
|
||||
"ui_port": config.ui_port,
|
||||
"version": config.version,
|
||||
|
@ -537,7 +539,7 @@ class UiWebsocket(object):
|
|||
else:
|
||||
if len(site.peers) == 0:
|
||||
import main
|
||||
if any(main.file_server.port_opened.values()) or main.file_server.tor_manager.start_onions:
|
||||
if any(main.file_server.port_opened.values()) or main.file_server.tor_manager.start_onions or sys.modules["main"].file_server.i2p_manager.start_dests:
|
||||
if notification:
|
||||
self.cmd("notification", ["info", _["No peers found, but your content is ready to access."]])
|
||||
if callback:
|
||||
|
|
460
src/lib/pyelliptic/openssl.py
Normal file
460
src/lib/pyelliptic/openssl.py
Normal file
|
@ -0,0 +1,460 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
|
||||
# See LICENSE for details.
|
||||
#
|
||||
# Software slightly changed by Jonathan Warren <bitmessage at-symbol jonwarren.org>
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
import logging
|
||||
import os
|
||||
|
||||
OpenSSL = None
|
||||
|
||||
|
||||
class CipherName:
|
||||
def __init__(self, name, pointer, blocksize):
|
||||
self._name = name
|
||||
self._pointer = pointer
|
||||
self._blocksize = blocksize
|
||||
|
||||
def __str__(self):
|
||||
return "Cipher : " + self._name + " | Blocksize : " + str(self._blocksize) + " | Fonction pointer : " + str(self._pointer)
|
||||
|
||||
def get_pointer(self):
|
||||
return self._pointer()
|
||||
|
||||
def get_name(self):
|
||||
return self._name
|
||||
|
||||
def get_blocksize(self):
|
||||
return self._blocksize
|
||||
|
||||
|
||||
class _OpenSSL:
|
||||
"""
|
||||
Wrapper for OpenSSL using ctypes
|
||||
"""
|
||||
def __init__(self, library):
|
||||
"""
|
||||
Build the wrapper
|
||||
"""
|
||||
self._lib = ctypes.CDLL(library)
|
||||
|
||||
self.pointer = ctypes.pointer
|
||||
self.c_int = ctypes.c_int
|
||||
self.byref = ctypes.byref
|
||||
self.create_string_buffer = ctypes.create_string_buffer
|
||||
|
||||
self.BN_new = self._lib.BN_new
|
||||
self.BN_new.restype = ctypes.c_void_p
|
||||
self.BN_new.argtypes = []
|
||||
|
||||
self.BN_free = self._lib.BN_free
|
||||
self.BN_free.restype = None
|
||||
self.BN_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_num_bits = self._lib.BN_num_bits
|
||||
self.BN_num_bits.restype = ctypes.c_int
|
||||
self.BN_num_bits.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_bn2bin = self._lib.BN_bn2bin
|
||||
self.BN_bn2bin.restype = ctypes.c_int
|
||||
self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.BN_bin2bn = self._lib.BN_bin2bn
|
||||
self.BN_bin2bn.restype = ctypes.c_void_p
|
||||
self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_free = self._lib.EC_KEY_free
|
||||
self.EC_KEY_free.restype = None
|
||||
self.EC_KEY_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name
|
||||
self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
|
||||
self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
|
||||
|
||||
self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key
|
||||
self.EC_KEY_generate_key.restype = ctypes.c_int
|
||||
self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_check_key = self._lib.EC_KEY_check_key
|
||||
self.EC_KEY_check_key.restype = ctypes.c_int
|
||||
self.EC_KEY_check_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key
|
||||
self.EC_KEY_get0_private_key.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key
|
||||
self.EC_KEY_get0_public_key.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group
|
||||
self.EC_KEY_get0_group.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_get_affine_coordinates_GFp = self._lib.EC_POINT_get_affine_coordinates_GFp
|
||||
self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
|
||||
self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key
|
||||
self.EC_KEY_set_public_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_group = self._lib.EC_KEY_set_group
|
||||
self.EC_KEY_set_group.restype = ctypes.c_int
|
||||
self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_set_affine_coordinates_GFp = self._lib.EC_POINT_set_affine_coordinates_GFp
|
||||
self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_new = self._lib.EC_POINT_new
|
||||
self.EC_POINT_new.restype = ctypes.c_void_p
|
||||
self.EC_POINT_new.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_free = self._lib.EC_POINT_free
|
||||
self.EC_POINT_free.restype = None
|
||||
self.EC_POINT_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_CTX_free = self._lib.BN_CTX_free
|
||||
self.BN_CTX_free.restype = None
|
||||
self.BN_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_mul = self._lib.EC_POINT_mul
|
||||
self.EC_POINT_mul.restype = None
|
||||
self.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
|
||||
self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
|
||||
self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL
|
||||
self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p
|
||||
self._lib.ECDH_OpenSSL.argtypes = []
|
||||
|
||||
self.BN_CTX_new = self._lib.BN_CTX_new
|
||||
self._lib.BN_CTX_new.restype = ctypes.c_void_p
|
||||
self._lib.BN_CTX_new.argtypes = []
|
||||
|
||||
self.ECDH_set_method = self._lib.ECDH_set_method
|
||||
self._lib.ECDH_set_method.restype = ctypes.c_int
|
||||
self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.ECDH_compute_key = self._lib.ECDH_compute_key
|
||||
self.ECDH_compute_key.restype = ctypes.c_int
|
||||
self.ECDH_compute_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex
|
||||
self.EVP_CipherInit_ex.restype = ctypes.c_int
|
||||
self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new
|
||||
self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p
|
||||
self.EVP_CIPHER_CTX_new.argtypes = []
|
||||
|
||||
# Cipher
|
||||
self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128
|
||||
self.EVP_aes_128_cfb128.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_cfb128.argtypes = []
|
||||
|
||||
self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128
|
||||
self.EVP_aes_256_cfb128.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_cfb128.argtypes = []
|
||||
|
||||
self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc
|
||||
self.EVP_aes_128_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_cbc.argtypes = []
|
||||
|
||||
self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc
|
||||
self.EVP_aes_256_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_cbc.argtypes = []
|
||||
|
||||
#self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr
|
||||
#self.EVP_aes_128_ctr.restype = ctypes.c_void_p
|
||||
#self.EVP_aes_128_ctr.argtypes = []
|
||||
|
||||
#self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr
|
||||
#self.EVP_aes_256_ctr.restype = ctypes.c_void_p
|
||||
#self.EVP_aes_256_ctr.argtypes = []
|
||||
|
||||
self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb
|
||||
self.EVP_aes_128_ofb.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_ofb.argtypes = []
|
||||
|
||||
self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb
|
||||
self.EVP_aes_256_ofb.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_ofb.argtypes = []
|
||||
|
||||
self.EVP_bf_cbc = self._lib.EVP_bf_cbc
|
||||
self.EVP_bf_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_bf_cbc.argtypes = []
|
||||
|
||||
self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64
|
||||
self.EVP_bf_cfb64.restype = ctypes.c_void_p
|
||||
self.EVP_bf_cfb64.argtypes = []
|
||||
|
||||
self.EVP_rc4 = self._lib.EVP_rc4
|
||||
self.EVP_rc4.restype = ctypes.c_void_p
|
||||
self.EVP_rc4.argtypes = []
|
||||
|
||||
self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup
|
||||
self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int
|
||||
self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free
|
||||
self.EVP_CIPHER_CTX_free.restype = None
|
||||
self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate
|
||||
self.EVP_CipherUpdate.restype = ctypes.c_int
|
||||
self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex
|
||||
self.EVP_CipherFinal_ex.restype = ctypes.c_int
|
||||
self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestInit = self._lib.EVP_DigestInit
|
||||
self.EVP_DigestInit.restype = ctypes.c_int
|
||||
self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex
|
||||
self.EVP_DigestInit_ex.restype = ctypes.c_int
|
||||
self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate
|
||||
self.EVP_DigestUpdate.restype = ctypes.c_int
|
||||
self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
self.EVP_DigestFinal = self._lib.EVP_DigestFinal
|
||||
self.EVP_DigestFinal.restype = ctypes.c_int
|
||||
self.EVP_DigestFinal.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex
|
||||
self.EVP_DigestFinal_ex.restype = ctypes.c_int
|
||||
self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_ecdsa = self._lib.EVP_ecdsa
|
||||
self._lib.EVP_ecdsa.restype = ctypes.c_void_p
|
||||
self._lib.EVP_ecdsa.argtypes = []
|
||||
|
||||
self.ECDSA_sign = self._lib.ECDSA_sign
|
||||
self.ECDSA_sign.restype = ctypes.c_int
|
||||
self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.ECDSA_verify = self._lib.ECDSA_verify
|
||||
self.ECDSA_verify.restype = ctypes.c_int
|
||||
self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create
|
||||
self.EVP_MD_CTX_create.restype = ctypes.c_void_p
|
||||
self.EVP_MD_CTX_create.argtypes = []
|
||||
|
||||
self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init
|
||||
self.EVP_MD_CTX_init.restype = None
|
||||
self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy
|
||||
self.EVP_MD_CTX_destroy.restype = None
|
||||
self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.RAND_bytes = self._lib.RAND_bytes
|
||||
self.RAND_bytes.restype = ctypes.c_int
|
||||
self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
|
||||
self.EVP_sha256 = self._lib.EVP_sha256
|
||||
self.EVP_sha256.restype = ctypes.c_void_p
|
||||
self.EVP_sha256.argtypes = []
|
||||
|
||||
self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey
|
||||
self.i2o_ECPublicKey.restype = ctypes.c_void_p
|
||||
self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_sha512 = self._lib.EVP_sha512
|
||||
self.EVP_sha512.restype = ctypes.c_void_p
|
||||
self.EVP_sha512.argtypes = []
|
||||
|
||||
self.HMAC = self._lib.HMAC
|
||||
self.HMAC.restype = ctypes.c_void_p
|
||||
self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
try:
|
||||
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC
|
||||
except:
|
||||
# The above is not compatible with all versions of OSX.
|
||||
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1
|
||||
|
||||
self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
|
||||
self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
self._set_ciphers()
|
||||
self._set_curves()
|
||||
|
||||
def _set_ciphers(self):
|
||||
self.cipher_algo = {
|
||||
'aes-128-cbc': CipherName('aes-128-cbc', self.EVP_aes_128_cbc, 16),
|
||||
'aes-256-cbc': CipherName('aes-256-cbc', self.EVP_aes_256_cbc, 16),
|
||||
'aes-128-cfb': CipherName('aes-128-cfb', self.EVP_aes_128_cfb128, 16),
|
||||
'aes-256-cfb': CipherName('aes-256-cfb', self.EVP_aes_256_cfb128, 16),
|
||||
'aes-128-ofb': CipherName('aes-128-ofb', self._lib.EVP_aes_128_ofb, 16),
|
||||
'aes-256-ofb': CipherName('aes-256-ofb', self._lib.EVP_aes_256_ofb, 16),
|
||||
#'aes-128-ctr': CipherName('aes-128-ctr', self._lib.EVP_aes_128_ctr, 16),
|
||||
#'aes-256-ctr': CipherName('aes-256-ctr', self._lib.EVP_aes_256_ctr, 16),
|
||||
'bf-cfb': CipherName('bf-cfb', self.EVP_bf_cfb64, 8),
|
||||
'bf-cbc': CipherName('bf-cbc', self.EVP_bf_cbc, 8),
|
||||
'rc4': CipherName('rc4', self.EVP_rc4, 128), # 128 is the initialisation size not block size
|
||||
}
|
||||
|
||||
def _set_curves(self):
|
||||
self.curves = {
|
||||
'secp112r1': 704,
|
||||
'secp112r2': 705,
|
||||
'secp128r1': 706,
|
||||
'secp128r2': 707,
|
||||
'secp160k1': 708,
|
||||
'secp160r1': 709,
|
||||
'secp160r2': 710,
|
||||
'secp192k1': 711,
|
||||
'secp224k1': 712,
|
||||
'secp224r1': 713,
|
||||
'secp256k1': 714,
|
||||
'secp384r1': 715,
|
||||
'secp521r1': 716,
|
||||
'sect113r1': 717,
|
||||
'sect113r2': 718,
|
||||
'sect131r1': 719,
|
||||
'sect131r2': 720,
|
||||
'sect163k1': 721,
|
||||
'sect163r1': 722,
|
||||
'sect163r2': 723,
|
||||
'sect193r1': 724,
|
||||
'sect193r2': 725,
|
||||
'sect233k1': 726,
|
||||
'sect233r1': 727,
|
||||
'sect239k1': 728,
|
||||
'sect283k1': 729,
|
||||
'sect283r1': 730,
|
||||
'sect409k1': 731,
|
||||
'sect409r1': 732,
|
||||
'sect571k1': 733,
|
||||
'sect571r1': 734,
|
||||
}
|
||||
|
||||
def BN_num_bytes(self, x):
|
||||
"""
|
||||
returns the length of a BN (OpenSSl API)
|
||||
"""
|
||||
return int((self.BN_num_bits(x) + 7) / 8)
|
||||
|
||||
def get_cipher(self, name):
|
||||
"""
|
||||
returns the OpenSSL cipher instance
|
||||
"""
|
||||
if name not in self.cipher_algo:
|
||||
raise Exception("Unknown cipher")
|
||||
return self.cipher_algo[name]
|
||||
|
||||
def get_curve(self, name):
|
||||
"""
|
||||
returns the id of a elliptic curve
|
||||
"""
|
||||
if name not in self.curves:
|
||||
raise Exception("Unknown curve")
|
||||
return self.curves[name]
|
||||
|
||||
def get_curve_by_id(self, id):
|
||||
"""
|
||||
returns the name of a elliptic curve with his id
|
||||
"""
|
||||
res = None
|
||||
for i in self.curves:
|
||||
if self.curves[i] == id:
|
||||
res = i
|
||||
break
|
||||
if res is None:
|
||||
raise Exception("Unknown curve")
|
||||
return res
|
||||
|
||||
def rand(self, size):
|
||||
"""
|
||||
OpenSSL random function
|
||||
"""
|
||||
buffer = self.malloc(0, size)
|
||||
# This pyelliptic library, by default, didn't check the return value of RAND_bytes. It is
|
||||
# evidently possible that it returned an error and not-actually-random data. However, in
|
||||
# tests on various operating systems, while generating hundreds of gigabytes of random
|
||||
# strings of various sizes I could not get an error to occur. Also Bitcoin doesn't check
|
||||
# the return value of RAND_bytes either.
|
||||
# Fixed in Bitmessage version 0.4.2 (in source code on 2013-10-13)
|
||||
while self.RAND_bytes(buffer, size) != 1:
|
||||
import time
|
||||
time.sleep(1)
|
||||
return buffer.raw
|
||||
|
||||
def malloc(self, data, size):
|
||||
"""
|
||||
returns a create_string_buffer (ctypes)
|
||||
"""
|
||||
buffer = None
|
||||
if data != 0:
|
||||
if sys.version_info.major == 3 and isinstance(data, type('')):
|
||||
data = data.encode()
|
||||
buffer = self.create_string_buffer(data, size)
|
||||
else:
|
||||
buffer = self.create_string_buffer(size)
|
||||
return buffer
|
||||
|
||||
|
||||
def openLibrary():
|
||||
global OpenSSL
|
||||
try:
|
||||
if sys.platform.startswith("win"):
|
||||
dll_path = "src/lib/opensslVerify/libeay32.dll"
|
||||
elif sys.platform == "cygwin":
|
||||
dll_path = "/bin/cygcrypto-1.0.0.dll"
|
||||
elif os.path.isfile("../lib/libcrypto.so"): # ZeroBundle
|
||||
dll_path = "../lib/libcrypto.so"
|
||||
else:
|
||||
dll_path = "/usr/local/ssl/lib/libcrypto.so"
|
||||
ssl = _OpenSSL(dll_path)
|
||||
assert ssl
|
||||
except Exception, err:
|
||||
ssl = _OpenSSL(ctypes.util.find_library('ssl') or ctypes.util.find_library('crypto') or ctypes.util.find_library('libcrypto') or 'libeay32')
|
||||
OpenSSL = ssl
|
||||
logging.debug("pyelliptic loaded: %s", ssl._lib)
|
||||
|
||||
|
||||
def closeLibrary():
|
||||
import _ctypes
|
||||
if "FreeLibrary" in dir(_ctypes):
|
||||
_ctypes.FreeLibrary(OpenSSL._lib._handle)
|
||||
else:
|
||||
_ctypes.dlclose(OpenSSL._lib._handle)
|
||||
|
||||
openLibrary()
|
|
@ -13,6 +13,8 @@ import gevent
|
|||
|
||||
from Config import config
|
||||
|
||||
from i2p.datatypes import Destination
|
||||
|
||||
|
||||
def atomicWrite(dest, content, mode="wb"):
|
||||
try:
|
||||
|
@ -115,7 +117,7 @@ def shellquote(*args):
|
|||
|
||||
|
||||
def packPeers(peers):
|
||||
packed_peers = {"ipv4": [], "ipv6": [], "onion": []}
|
||||
packed_peers = {"ipv4": [], "ipv6": [], "onion": [], "i2p": []}
|
||||
for peer in peers:
|
||||
try:
|
||||
ip_type = getIpType(peer.ip)
|
||||
|
@ -155,6 +157,19 @@ def unpackOnionAddress(packed):
|
|||
return base64.b32encode(packed[0:-2]).lower().decode() + ".onion", struct.unpack("H", packed[-2:])[0]
|
||||
|
||||
|
||||
# Destination, port to packed (389+)-byte format
|
||||
def packI2PAddress(dest, port):
|
||||
if not isinstance(dest, Destination):
|
||||
dest = dest.replace(".i2p", "")
|
||||
dest = Destination(raw=dest, b64=True)
|
||||
return dest.serialize() + struct.pack("H", port)
|
||||
|
||||
|
||||
# From (389+)-byte format to Destination, port
|
||||
def unpackI2PAddress(packed):
|
||||
return Destination(raw=packed[0:-2]).base64() + ".i2p", struct.unpack("H", packed[-2:])[0]
|
||||
|
||||
|
||||
# Get dir from file
|
||||
# Return: data/site/content.json -> data/site/
|
||||
def getDirname(path):
|
||||
|
|
Loading…
Reference in a new issue