Fix typos

This commit is contained in:
Dimitris Apostolou 2022-01-02 13:59:17 +02:00
parent 0bbf19aab9
commit 6f76383c56
No known key found for this signature in database
GPG key ID: 4B5D20E938204A8A
56 changed files with 131 additions and 131 deletions

View file

@ -188,7 +188,7 @@ Private key (Приватный ключ) (input hidden):
$ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2 $ zeronet.py sitePublish 13DNDkMUExRf9Xa9ogwPKqp7zyHFEqbhC2
... ...
Site:13DNDk..bhC2 Publishing to 3/10 peers... Site:13DNDk..bhC2 Publishing to 3/10 peers...
Site:13DNDk..bhC2 Successfuly published to 3 peers Site:13DNDk..bhC2 Successfully published to 3 peers
- Serving files.... - Serving files....
``` ```

2
Vagrantfile vendored
View file

@ -22,7 +22,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.synced_folder ".", "/vagrant", config.vm.synced_folder ".", "/vagrant",
:nfs => !Vagrant::Util::Platform.windows? :nfs => !Vagrant::Util::Platform.windows?
#Virtal Box settings #Virtual Box settings
config.vm.provider "virtualbox" do |vb| config.vm.provider "virtualbox" do |vb|
# Don't boot with headless mode # Don't boot with headless mode
#vb.gui = true #vb.gui = true

View file

@ -80,7 +80,7 @@ class LocalAnnouncer(BroadcastServer.BroadcastServer):
back = [] back = []
sites = list(self.server.sites.values()) sites = list(self.server.sites.values())
# Split adresses to group of 100 to avoid UDP size limit # Split addresses to group of 100 to avoid UDP size limit
site_groups = [sites[i:i + 100] for i in range(0, len(sites), 100)] site_groups = [sites[i:i + 100] for i in range(0, len(sites), 100)]
for site_group in site_groups: for site_group in site_groups:
res = {} res = {}

View file

@ -18,7 +18,7 @@ class TestAnnounceShare:
tracker_storage.onTrackerFound("zero://%s:15441" % file_server.ip) tracker_storage.onTrackerFound("zero://%s:15441" % file_server.ip)
assert peer.request("getTrackers")["trackers"] == [] assert peer.request("getTrackers")["trackers"] == []
# It needs to have at least one successfull announce to be shared to other peers # It needs to have at least one successful announce to be shared to other peers
tracker_storage.onTrackerSuccess("zero://%s:15441" % file_server.ip, 1.0) tracker_storage.onTrackerSuccess("zero://%s:15441" % file_server.ip, 1.0)
assert peer.request("getTrackers")["trackers"] == ["zero://%s:15441" % file_server.ip] assert peer.request("getTrackers")["trackers"] == ["zero://%s:15441" % file_server.ip]

View file

@ -89,7 +89,7 @@ class TestBigfile:
site.storage.delete(inner_path) site.storage.delete(inner_path)
# Writing to end shold not take much longer, than writing to start # Writing to end should not take much longer, than writing to start
assert time_write_end <= max(0.1, time_write_start * 1.1) assert time_write_end <= max(0.1, time_write_start * 1.1)
def testRangedFileRequest(self, file_server, site, site_temp): def testRangedFileRequest(self, file_server, site, site_temp):

View file

@ -202,9 +202,9 @@ class UiWebsocketPlugin(object):
class SiteStoragePlugin(object): class SiteStoragePlugin(object):
def updateDbFile(self, inner_path, file=None, cur=None): def updateDbFile(self, inner_path, file=None, cur=None):
if file is not False: # File deletion always allowed if file is not False: # File deletion always allowed
# Find for bitcoin addresses in file path # Find bitcoin addresses in file path
matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path) matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
# Check if any of the adresses are in the mute list # Check if any of the addresses are in the mute list
for auth_address in matches: for auth_address in matches:
if filter_storage.isMuted(auth_address): if filter_storage.isMuted(auth_address):
self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path)) self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))

View file

@ -144,7 +144,7 @@ class ContentFilterStorage(object):
return details return details
# Search and remove or readd files of an user # Search and remove or re-add files of a user
def changeDbs(self, auth_address, action): def changeDbs(self, auth_address, action):
self.log.debug("Mute action %s on user %s" % (action, auth_address)) self.log.debug("Mute action %s on user %s" % (action, auth_address))
res = list(self.site_manager.list().values())[0].content_manager.contents.db.execute( res = list(self.site_manager.list().values())[0].content_manager.contents.db.execute(

View file

@ -27,7 +27,7 @@ class TestCrypt:
pub = ui_websocket.testAction("UserPublickey", 0) pub = ui_websocket.testAction("UserPublickey", 0)
assert len(pub) == 44 # Compressed, b64 encoded publickey assert len(pub) == 44 # Compressed, b64 encoded publickey
# Different pubkey for specificed index # Different pubkey for specified index
assert ui_websocket.testAction("UserPublickey", 1) != ui_websocket.testAction("UserPublickey", 0) assert ui_websocket.testAction("UserPublickey", 1) != ui_websocket.testAction("UserPublickey", 0)
# Same publickey for same index # Same publickey for same index

View file

@ -7,7 +7,7 @@ from Plugin import PluginManager
from Config import config from Config import config
from Debug import Debug from Debug import Debug
# Keep archive open for faster reponse times for large sites # Keep archive open for faster response times for large sites
archive_cache = {} archive_cache = {}
@ -55,7 +55,7 @@ class UiRequestPlugin(object):
if not os.path.isfile(archive_path): if not os.path.isfile(archive_path):
# Wait until file downloads # Wait until file downloads
result = site.needFile(archive_inner_path, priority=10) result = site.needFile(archive_inner_path, priority=10)
# Send virutal file path download finished event to remove loading screen # Send virtual file path download finished event to remove loading screen
site.updateWebsocket(file_done=archive_inner_path) site.updateWebsocket(file_done=archive_inner_path)
if not result: if not result:
return self.error404(archive_inner_path) return self.error404(archive_inner_path)

View file

@ -81,7 +81,7 @@ class UiWebsocketPlugin(object):
if not isinstance(row["date_added"], (int, float, complex)): if not isinstance(row["date_added"], (int, float, complex)):
self.log.debug("Invalid date_added from site %s: %r" % (address, row["date_added"])) self.log.debug("Invalid date_added from site %s: %r" % (address, row["date_added"]))
continue continue
if row["date_added"] > 1000000000000: # Formatted as millseconds if row["date_added"] > 1000000000000: # Formatted as milliseconds
row["date_added"] = row["date_added"] / 1000 row["date_added"] = row["date_added"] / 1000
if "date_added" not in row or row["date_added"] > time.time() + 120: if "date_added" not in row or row["date_added"] > time.time() + 120:
self.log.debug("Newsfeed item from the future from from site %s" % address) self.log.debug("Newsfeed item from the future from from site %s" % address)

View file

@ -266,7 +266,7 @@ class ContentDbPlugin(object):
self.log.debug("%s/%s peer number for %s site updated in %.3fs" % (num_updated, num_file, num_site, time.time() - s)) self.log.debug("%s/%s peer number for %s site updated in %.3fs" % (num_updated, num_file, num_site, time.time() - s))
def queryDeletableFiles(self): def queryDeletableFiles(self):
# First return the files with atleast 10 seeder and not accessed in last week # First return the files with at least 10 seeders and not accessed in last week
query = """ query = """
SELECT * FROM file_optional SELECT * FROM file_optional
WHERE peer > 10 AND %s WHERE peer > 10 AND %s
@ -285,7 +285,7 @@ class ContentDbPlugin(object):
self.log.debug("queryDeletableFiles returning less-seeded files") self.log.debug("queryDeletableFiles returning less-seeded files")
# Then return files less seeder but still not accessed in last week # Then return files with less seeders but still not accessed in last week
query = """ query = """
SELECT * FROM file_optional SELECT * FROM file_optional
WHERE peer <= 10 AND %s WHERE peer <= 10 AND %s
@ -302,7 +302,7 @@ class ContentDbPlugin(object):
break break
limit_start += 50 limit_start += 50
self.log.debug("queryDeletableFiles returning everyting") self.log.debug("queryDeletableFiles returning everything")
# At the end return all files # At the end return all files
query = """ query = """

View file

@ -398,7 +398,7 @@ class UiRequestPlugin(object):
for part in self.renderMemory(): for part in self.renderMemory():
yield part yield part
gc.collect() # Implicit grabage collection gc.collect() # Implicit garbage collection
yield "Done in %.1f" % (time.time() - s) yield "Done in %.1f" % (time.time() - s)
@helper.encodeResponse @helper.encodeResponse
@ -437,7 +437,7 @@ class UiRequestPlugin(object):
yield "- %s: %s<br>" % (attr, html.escape(str(getattr(obj, attr)))) yield "- %s: %s<br>" % (attr, html.escape(str(getattr(obj, attr))))
yield "<br>" yield "<br>"
gc.collect() # Implicit grabage collection gc.collect() # Implicit garbage collection
@helper.encodeResponse @helper.encodeResponse
def actionListobj(self): def actionListobj(self):
@ -505,7 +505,7 @@ class UiRequestPlugin(object):
for obj, stat in sorted(list(ref_count.items()), key=lambda x: x[1][0], reverse=True)[0:30]: # Sorted by count for obj, stat in sorted(list(ref_count.items()), key=lambda x: x[1][0], reverse=True)[0:30]: # Sorted by count
yield " - %.1fkb = %s x %s<br>" % (stat[1], stat[0], html.escape(str(obj))) yield " - %.1fkb = %s x %s<br>" % (stat[1], stat[0], html.escape(str(obj)))
gc.collect() # Implicit grabage collection gc.collect() # Implicit garbage collection
@helper.encodeResponse @helper.encodeResponse
def actionGcCollect(self): def actionGcCollect(self):

View file

@ -18,7 +18,7 @@
"Use Tor bridges": "Tor bridge-ek használata", "Use Tor bridges": "Tor bridge-ek használata",
"Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Tor elrejtő bridge-ek használata a hálózat szintű Tor tiltás megkerüléséhez (még lassabb)", "Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Tor elrejtő bridge-ek használata a hálózat szintű Tor tiltás megkerüléséhez (még lassabb)",
"Discover new peers using these adresses": "Új kapcsolat felfedezése ezen címek használatával", "Discover new peers using these addresses": "Új kapcsolat felfedezése ezen címek használatával",
"Trackers files": "Tracker file-ok", "Trackers files": "Tracker file-ok",
"Load additional list of torrent trackers dynamically, from a file": "További trackerek felfedezése dinamikusan, ezen file használatával", "Load additional list of torrent trackers dynamically, from a file": "További trackerek felfedezése dinamikusan, ezen file használatával",

View file

@ -26,7 +26,7 @@
"Use Tor bridges": "Użyj Tor bridges", "Use Tor bridges": "Użyj Tor bridges",
"Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Użyj obfuskacji, aby uniknąć blokowania Tor na poziomie sieci (jeszcze wolniejsze działanie)", "Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Użyj obfuskacji, aby uniknąć blokowania Tor na poziomie sieci (jeszcze wolniejsze działanie)",
"Trackers": "Trackery", "Trackers": "Trackery",
"Discover new peers using these adresses": "Wykryj użytkowników korzystając z tych adresów trackerów", "Discover new peers using these addresses": "Wykryj użytkowników korzystając z tych adresów trackerów",
"Trackers files": "Pliki trackerów", "Trackers files": "Pliki trackerów",
"Load additional list of torrent trackers dynamically, from a file": "Dynamicznie wczytaj dodatkową listę trackerów z pliku .json", "Load additional list of torrent trackers dynamically, from a file": "Dynamicznie wczytaj dodatkową listę trackerów z pliku .json",

View file

@ -18,7 +18,7 @@
"Use Tor bridges": "Usar pontes do Tor", "Use Tor bridges": "Usar pontes do Tor",
"Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Usar relays de ponte ofuscados para evitar o bloqueio de Tor de nível de rede (ainda mais lento)", "Use obfuscated bridge relays to avoid network level Tor block (even slower)": "Usar relays de ponte ofuscados para evitar o bloqueio de Tor de nível de rede (ainda mais lento)",
"Discover new peers using these adresses": "Descobrir novos peers usando estes endereços", "Discover new peers using these addresses": "Descobrir novos peers usando estes endereços",
"Trackers files": "Arquivos de trackers", "Trackers files": "Arquivos de trackers",
"Load additional list of torrent trackers dynamically, from a file": "Carregar lista adicional de trackers de torrent dinamicamente, à partir de um arquivo", "Load additional list of torrent trackers dynamically, from a file": "Carregar lista adicional de trackers de torrent dinamicamente, à partir de um arquivo",

View file

@ -26,7 +26,7 @@
"Use Tor bridges": "使用Tor网桥", "Use Tor bridges": "使用Tor网桥",
"Use obfuscated bridge relays to avoid network level Tor block (even slower)": "使用混淆网桥中继,从而避免网络层Tor阻碍 (超级慢)", "Use obfuscated bridge relays to avoid network level Tor block (even slower)": "使用混淆网桥中继,从而避免网络层Tor阻碍 (超级慢)",
"Discover new peers using these adresses": "使用这些地址发现新节点", "Discover new peers using these addresses": "使用这些地址发现新节点",
"Trackers files": "Trackers文件", "Trackers files": "Trackers文件",
"Load additional list of torrent trackers dynamically, from a file": "从一个文件中,动态加载额外的种子Trackers列表", "Load additional list of torrent trackers dynamically, from a file": "从一个文件中,动态加载额外的种子Trackers列表",

View file

@ -111,7 +111,7 @@ class ConfigStorage extends Class
title: "Trackers" title: "Trackers"
key: "trackers" key: "trackers"
type: "textarea" type: "textarea"
description: "Discover new peers using these adresses" description: "Discover new peers using these addresses"
section.items.push section.items.push
title: "Trackers files" title: "Trackers files"

View file

@ -1427,7 +1427,7 @@
title: "Trackers", title: "Trackers",
key: "trackers", key: "trackers",
type: "textarea", type: "textarea",
description: "Discover new peers using these adresses" description: "Discover new peers using these addresses"
}); });
section.items.push({ section.items.push({
title: "Trackers files", title: "Trackers files",

View file

@ -5299,7 +5299,7 @@
var hist = doc.history, event, selAfter = doc.sel; var hist = doc.history, event, selAfter = doc.sel;
var source = type == "undo" ? hist.done : hist.undone, dest = type == "undo" ? hist.undone : hist.done; var source = type == "undo" ? hist.done : hist.undone, dest = type == "undo" ? hist.undone : hist.done;
// Verify that there is a useable event (so that ctrl-z won't // Verify that there is a usable event (so that ctrl-z won't
// needlessly clear selection events) // needlessly clear selection events)
var i = 0; var i = 0;
for (; i < source.length; i++) { for (; i < source.length; i++) {
@ -11370,7 +11370,7 @@
}); });
} else { } else {
if (startIndent.length > nextIndent.length) return; if (startIndent.length > nextIndent.length) return;
// This doesn't run if the next line immediatley indents, as it is // This doesn't run if the next line immediately indents, as it is
// not clear of the users intention (new indented item or same level) // not clear of the users intention (new indented item or same level)
if ((startIndent.length < nextIndent.length) && (lookAhead === 1)) return; if ((startIndent.length < nextIndent.length) && (lookAhead === 1)) return;
skipCount += 1; skipCount += 1;
@ -14492,7 +14492,7 @@ var jsonlint=function(){var a=!0,b=!1,c={},d=function(){var a={trace:function(){
// highlighted only if the selected text is a word. showToken, when enabled, // highlighted only if the selected text is a word. showToken, when enabled,
// will cause the current token to be highlighted when nothing is selected. // will cause the current token to be highlighted when nothing is selected.
// delay is used to specify how much time to wait, in milliseconds, before // delay is used to specify how much time to wait, in milliseconds, before
// highlighting the matches. If annotateScrollbar is enabled, the occurences // highlighting the matches. If annotateScrollbar is enabled, the occurrences
// will be highlighted on the scrollbar via the matchesonscrollbar addon. // will be highlighted on the scrollbar via the matchesonscrollbar addon.
(function(mod) { (function(mod) {

View file

@ -5295,7 +5295,7 @@
var hist = doc.history, event, selAfter = doc.sel; var hist = doc.history, event, selAfter = doc.sel;
var source = type == "undo" ? hist.done : hist.undone, dest = type == "undo" ? hist.undone : hist.done; var source = type == "undo" ? hist.done : hist.undone, dest = type == "undo" ? hist.undone : hist.done;
// Verify that there is a useable event (so that ctrl-z won't // Verify that there is a usable event (so that ctrl-z won't
// needlessly clear selection events) // needlessly clear selection events)
var i = 0; var i = 0;
for (; i < source.length; i++) { for (; i < source.length; i++) {

View file

@ -90,7 +90,7 @@
}); });
} else { } else {
if (startIndent.length > nextIndent.length) return; if (startIndent.length > nextIndent.length) return;
// This doesn't run if the next line immediatley indents, as it is // This doesn't run if the next line immediately indents, as it is
// not clear of the users intention (new indented item or same level) // not clear of the users intention (new indented item or same level)
if ((startIndent.length < nextIndent.length) && (lookAhead === 1)) return; if ((startIndent.length < nextIndent.length) && (lookAhead === 1)) return;
skipCount += 1; skipCount += 1;

View file

@ -16,7 +16,7 @@
// highlighted only if the selected text is a word. showToken, when enabled, // highlighted only if the selected text is a word. showToken, when enabled,
// will cause the current token to be highlighted when nothing is selected. // will cause the current token to be highlighted when nothing is selected.
// delay is used to specify how much time to wait, in milliseconds, before // delay is used to specify how much time to wait, in milliseconds, before
// highlighting the matches. If annotateScrollbar is enabled, the occurences // highlighting the matches. If annotateScrollbar is enabled, the occurrences
// will be highlighted on the scrollbar via the matchesonscrollbar addon. // will be highlighted on the scrollbar via the matchesonscrollbar addon.
(function(mod) { (function(mod) {

View file

@ -90,7 +90,7 @@ class Text
next_find = s2[next_find_i] next_find = s2[next_find_i]
if extra_parts[next_find_i] if extra_parts[next_find_i]
extra_parts[next_find_i] = "" # Extra chars on the end doesnt matter extra_parts[next_find_i] = "" # Extra chars at the end don't matter
extra_parts = (val for key, val of extra_parts) extra_parts = (val for key, val of extra_parts)
if next_find_i >= s2.length if next_find_i >= s2.length
return extra_parts.length + extra_parts.join("").length return extra_parts.length + extra_parts.join("").length

View file

@ -86,7 +86,7 @@ class PluginList extends Class
) )
else else
tag_update = h("span.version-missing", "(unable to get latest vesion: update site missing)") tag_update = h("span.version-missing", "(unable to get latest version: update site missing)")
tag_version = h("span.version",[ tag_version = h("span.version",[
"rev#{plugin.info.rev} ", "rev#{plugin.info.rev} ",

View file

@ -1400,7 +1400,7 @@
}, "Update to rev" + plugin.site_info.rev); }, "Update to rev" + plugin.site_info.rev);
} }
} else { } else {
tag_update = h("span.version-missing", "(unable to get latest vesion: update site missing)"); tag_update = h("span.version-missing", "(unable to get latest version: update site missing)");
} }
tag_version = h("span.version", ["rev" + plugin.info.rev + " ", tag_update]); tag_version = h("span.version", ["rev" + plugin.info.rev + " ", tag_update]);
tag_source = h("div.source", [ tag_source = h("div.source", [

View file

@ -14,7 +14,7 @@ class SiteManagerPlugin(object):
dns_cache_path = "%s/dns_cache.json" % config.data_dir dns_cache_path = "%s/dns_cache.json" % config.data_dir
dns_cache = None dns_cache = None
# Checks if its a valid address # Checks if it's a valid address
def isAddress(self, address): def isAddress(self, address):
if self.isDomain(address): if self.isDomain(address):
return True return True
@ -92,27 +92,27 @@ class SiteManagerPlugin(object):
domain = domain.lower() domain = domain.lower()
if self.dns_cache == None: if self.dns_cache == None:
self.loadDnsCache() self.loadDnsCache()
if domain.count(".") < 2: # Its a topleved request, prepend @. to it if domain.count(".") < 2: # It's a top-level request, prepend @. to it
domain = "@."+domain domain = "@."+domain
domain_details = self.dns_cache.get(domain) domain_details = self.dns_cache.get(domain)
if domain_details and time.time() < domain_details[1]: # Found in cache and its not expired if domain_details and time.time() < domain_details[1]: # Found in cache and it's not expired
return domain_details[0] return domain_details[0]
else: else:
# Resovle dns using dnschain # Resolve dns using dnschain
thread_dnschain_info = gevent.spawn(self.resolveDomainDnschainInfo, domain) thread_dnschain_info = gevent.spawn(self.resolveDomainDnschainInfo, domain)
thread_dnschain_net = gevent.spawn(self.resolveDomainDnschainNet, domain) thread_dnschain_net = gevent.spawn(self.resolveDomainDnschainNet, domain)
gevent.joinall([thread_dnschain_net, thread_dnschain_info]) # Wait for finish gevent.joinall([thread_dnschain_net, thread_dnschain_info]) # Wait for finish
if thread_dnschain_info.value and thread_dnschain_net.value: # Booth successfull if thread_dnschain_info.value and thread_dnschain_net.value: # Booth successful
if thread_dnschain_info.value == thread_dnschain_net.value: # Same returned value if thread_dnschain_info.value == thread_dnschain_net.value: # Same returned value
return thread_dnschain_info.value return thread_dnschain_info.value
else: else:
log.error("Dns %s missmatch: %s != %s" % (domain, thread_dnschain_info.value, thread_dnschain_net.value)) log.error("Dns %s mismatch: %s != %s" % (domain, thread_dnschain_info.value, thread_dnschain_net.value))
# Problem during resolve # Problem during resolve
if domain_details: # Resolve failed, but we have it in the cache if domain_details: # Resolve failed, but we have it in the cache
domain_details[1] = time.time()+60*60 # Dont try again for 1 hour domain_details[1] = time.time()+60*60 # Don't try again for 1 hour
return domain_details[0] return domain_details[0]
else: # Not found in cache else: # Not found in cache
self.dns_cache[domain] = [None, time.time()+60] # Don't check again for 1 min self.dns_cache[domain] = [None, time.time()+60] # Don't check again for 1 min
@ -136,7 +136,7 @@ class SiteManagerPlugin(object):
def get(self, address): def get(self, address):
if self.sites == None: # Not loaded yet if self.sites == None: # Not loaded yet
self.load() self.load()
if self.isDomain(address): # Its looks like a domain if self.isDomain(address): # It looks like a domain
address_resolved = self.resolveDomain(address) address_resolved = self.resolveDomain(address)
if address_resolved: # Domain found if address_resolved: # Domain found
site = self.sites.get(address_resolved) site = self.sites.get(address_resolved)

View file

@ -1,5 +1,5 @@
{ {
"name": "UiPassword", "name": "UiPassword",
"description": "Password based autentication on the web interface.", "description": "Password based authentication on the web interface.",
"default": "disabled" "default": "disabled"
} }

View file

@ -61,7 +61,7 @@ class Config(object):
elif this_file.endswith("/Contents/Resources/core/src/Config.py"): elif this_file.endswith("/Contents/Resources/core/src/Config.py"):
# Running as ZeroNet.app # Running as ZeroNet.app
if this_file.startswith("/Application") or this_file.startswith("/private") or this_file.startswith(os.path.expanduser("~/Library")): if this_file.startswith("/Application") or this_file.startswith("/private") or this_file.startswith(os.path.expanduser("~/Library")):
# Runnig from non-writeable directory, put data to Application Support # Running from non-writeable directory, put data to Application Support
start_dir = os.path.expanduser("~/Library/Application Support/ZeroNet") start_dir = os.path.expanduser("~/Library/Application Support/ZeroNet")
else: else:
# Running from writeable directory put data next to .app # Running from writeable directory put data next to .app
@ -444,7 +444,7 @@ class Config(object):
# Parse command line arguments # Parse command line arguments
def parseCommandline(self, argv, silent=False): def parseCommandline(self, argv, silent=False):
# Find out if action is specificed on start # Find out if action is specified on start
action = self.getAction(argv) action = self.getAction(argv)
if not action: if not action:
argv.append("--end") argv.append("--end")

View file

@ -27,7 +27,7 @@ class Connection(object):
self.cert_pin = None self.cert_pin = None
if "#" in ip: if "#" in ip:
ip, self.cert_pin = ip.split("#") ip, self.cert_pin = ip.split("#")
self.target_onion = target_onion # Requested onion adress self.target_onion = target_onion # Requested onion address
self.id = server.last_connection_id self.id = server.last_connection_id
server.last_connection_id += 1 server.last_connection_id += 1
self.protocol = "?" self.protocol = "?"
@ -456,7 +456,7 @@ class Connection(object):
if config.debug_socket: if config.debug_socket:
self.log("Handshake response: %s, ping: %s" % (message, ping)) self.log("Handshake response: %s, ping: %s" % (message, ping))
self.last_ping_delay = ping self.last_ping_delay = ping
# Server switched to crypt, lets do it also if not crypted already # Server switched to crypt, let's do it also if not crypted already
if message.get("crypt") and not self.sock_wrapped: if message.get("crypt") and not self.sock_wrapped:
self.crypt = message["crypt"] self.crypt = message["crypt"]
server = (self.type == "in") server = (self.type == "in")

View file

@ -965,7 +965,7 @@ class ContentManager(object):
if "signs" in new_content: if "signs" in new_content:
del(new_content["signs"]) # The file signed without the signs del(new_content["signs"]) # The file signed without the signs
sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitepsace sign_content = json.dumps(new_content, sort_keys=True) # Dump the json to string to remove whitespace
# Fix float representation error on Android # Fix float representation error on Android
modified = new_content["modified"] modified = new_content["modified"]

View file

@ -78,7 +78,7 @@ class Db(object):
self.id = next_db_id self.id = next_db_id
next_db_id += 1 next_db_id += 1
self.progress_sleeping = False self.progress_sleeping = False
self.commiting = False self.committing = False
self.log = logging.getLogger("Db#%s:%s" % (self.id, schema["db_name"])) self.log = logging.getLogger("Db#%s:%s" % (self.id, schema["db_name"]))
self.table_names = None self.table_names = None
self.collect_stats = False self.collect_stats = False
@ -159,15 +159,15 @@ class Db(object):
self.log.debug("Commit ignored: No connection") self.log.debug("Commit ignored: No connection")
return False return False
if self.commiting: if self.committing:
self.log.debug("Commit ignored: Already commiting") self.log.debug("Commit ignored: Already committing")
return False return False
try: try:
s = time.time() s = time.time()
self.commiting = True self.committing = True
self.conn.commit() self.conn.commit()
self.log.debug("Commited in %.3fs (reason: %s)" % (time.time() - s, reason)) self.log.debug("Committed in %.3fs (reason: %s)" % (time.time() - s, reason))
return True return True
except Exception as err: except Exception as err:
if "SQL statements in progress" in str(err): if "SQL statements in progress" in str(err):
@ -176,7 +176,7 @@ class Db(object):
self.log.error("Commit error: %s (reason: %s)" % (Debug.formatException(err), reason)) self.log.error("Commit error: %s (reason: %s)" % (Debug.formatException(err), reason))
return False return False
finally: finally:
self.commiting = False self.committing = False
def insertOrUpdate(self, *args, **kwargs): def insertOrUpdate(self, *args, **kwargs):
if not self.conn: if not self.conn:
@ -383,7 +383,7 @@ class Db(object):
self.log.debug("Json file %s load error: %s" % (file_path, err)) self.log.debug("Json file %s load error: %s" % (file_path, err))
data = {} data = {}
# No cursor specificed # No cursor specified
if not cur: if not cur:
cur = self.getSharedCursor() cur = self.getSharedCursor()
cur.logging = False cur.logging = False
@ -432,7 +432,7 @@ class Db(object):
# Insert data to tables # Insert data to tables
for table_settings in dbmap.get("to_table", []): for table_settings in dbmap.get("to_table", []):
if isinstance(table_settings, dict): # Custom settings if isinstance(table_settings, dict): # Custom settings
table_name = table_settings["table"] # Table name to insert datas table_name = table_settings["table"] # Table name to insert data
node = table_settings.get("node", table_name) # Node keyname in data json file node = table_settings.get("node", table_name) # Node keyname in data json file
key_col = table_settings.get("key_col") # Map dict key as this col key_col = table_settings.get("key_col") # Map dict key as this col
val_col = table_settings.get("val_col") # Map dict value as this col val_col = table_settings.get("val_col") # Map dict value as this col

View file

@ -84,7 +84,7 @@ class DbCursor:
def execute(self, query, params=None): def execute(self, query, params=None):
query = query.strip() query = query.strip()
while self.db.progress_sleeping or self.db.commiting: while self.db.progress_sleeping or self.db.committing:
time.sleep(0.1) time.sleep(0.1)
self.db.last_query_time = time.time() self.db.last_query_time = time.time()
@ -133,7 +133,7 @@ class DbCursor:
return res return res
def executemany(self, query, params): def executemany(self, query, params):
while self.db.progress_sleeping or self.db.commiting: while self.db.progress_sleeping or self.db.committing:
time.sleep(0.1) time.sleep(0.1)
self.db.last_query_time = time.time() self.db.last_query_time = time.time()

View file

@ -68,7 +68,7 @@ def merge(merged_path):
return # Assets not changed, nothing to do return # Assets not changed, nothing to do
old_parts = {} old_parts = {}
if os.path.isfile(merged_path): # Find old parts to avoid unncessary recompile if os.path.isfile(merged_path): # Find old parts to avoid unnecessary recompile
merged_old = open(merged_path, "rb").read() merged_old = open(merged_path, "rb").read()
for match in re.findall(rb"(/\* ---- (.*?) ---- \*/(.*?)(?=/\* ----|$))", merged_old, re.DOTALL): for match in re.findall(rb"(/\* ---- (.*?) ---- \*/(.*?)(?=/\* ----|$))", merged_old, re.DOTALL):
old_parts[match[1].decode()] = match[2].strip(b"\n\r") old_parts[match[1].decode()] = match[2].strip(b"\n\r")

View file

@ -55,7 +55,7 @@ class FileServer(ConnectionServer):
self.log.debug("Supported IP types: %s" % self.supported_ip_types) self.log.debug("Supported IP types: %s" % self.supported_ip_types)
if ip_type == "dual" and ip == "::": if ip_type == "dual" and ip == "::":
# Also bind to ipv4 addres in dual mode # Also bind to ipv4 address in dual mode
try: try:
self.log.debug("Binding proxy to %s:%s" % ("::", self.port)) self.log.debug("Binding proxy to %s:%s" % ("::", self.port))
self.stream_server_proxy = StreamServer( self.stream_server_proxy = StreamServer(
@ -296,7 +296,7 @@ class FileServer(ConnectionServer):
site.retryBadFiles() site.retryBadFiles()
if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7: if time.time() - site.settings.get("modified", 0) < 60 * 60 * 24 * 7:
# Keep active connections if site has been modified witin 7 days # Keep active connections if site has been modified within 7 days
connected_num = site.needConnections(check_site_on_reconnect=True) connected_num = site.needConnections(check_site_on_reconnect=True)
if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing if connected_num < config.connected_limit: # This site has small amount of peers, protect them from closing

View file

@ -169,7 +169,7 @@ class Site(object):
content_inner_dir = helper.getDirname(inner_path) content_inner_dir = helper.getDirname(inner_path)
if not found: if not found:
self.log.debug("DownloadContent %s: Download failed, check_modifications: %s" % (inner_path, check_modifications)) self.log.debug("DownloadContent %s: Download failed, check_modifications: %s" % (inner_path, check_modifications))
if check_modifications: # Download failed, but check modifications if its succed later if check_modifications: # Download failed, but check modifications if it's succeeded later
self.onFileDone.once(lambda file_name: self.checkModifications(0), "check_modifications") self.onFileDone.once(lambda file_name: self.checkModifications(0), "check_modifications")
return False # Could not download content.json return False # Could not download content.json
@ -193,7 +193,7 @@ class Site(object):
site_size_limit = self.getSizeLimit() * 1024 * 1024 site_size_limit = self.getSizeLimit() * 1024 * 1024
content_size = len(json.dumps(self.content_manager.contents[inner_path], indent=1)) + sum([file["size"] for file in list(self.content_manager.contents[inner_path].get("files", {}).values()) if file["size"] >= 0]) # Size of new content content_size = len(json.dumps(self.content_manager.contents[inner_path], indent=1)) + sum([file["size"] for file in list(self.content_manager.contents[inner_path].get("files", {}).values()) if file["size"] >= 0]) # Size of new content
if site_size_limit < content_size: if site_size_limit < content_size:
# Not enought don't download anything # Not enough don't download anything
self.log.debug("DownloadContent Size limit reached (site too big please increase limit): %.2f MB > %.2f MB" % (content_size / 1024 / 1024, site_size_limit / 1024 / 1024)) self.log.debug("DownloadContent Size limit reached (site too big please increase limit): %.2f MB > %.2f MB" % (content_size / 1024 / 1024, site_size_limit / 1024 / 1024))
return False return False
@ -236,7 +236,7 @@ class Site(object):
diff_success = False diff_success = False
if not diff_success: if not diff_success:
# Start download and dont wait for finish, return the event # Start download and don't wait for finish, return the event
res = self.needFile(file_inner_path, blocking=False, update=self.bad_files.get(file_inner_path), peer=peer) res = self.needFile(file_inner_path, blocking=False, update=self.bad_files.get(file_inner_path), peer=peer)
if res is not True and res is not False: # Need downloading and file is allowed if res is not True and res is not False: # Need downloading and file is allowed
file_threads.append(res) # Append evt file_threads.append(res) # Append evt
@ -251,7 +251,7 @@ class Site(object):
continue continue
if not self.isDownloadable(file_inner_path): if not self.isDownloadable(file_inner_path):
continue continue
# Start download and dont wait for finish, return the event # Start download and don't wait for finish, return the event
res = self.pooledNeedFile( res = self.pooledNeedFile(
file_inner_path, blocking=False, update=self.bad_files.get(file_inner_path), peer=peer file_inner_path, blocking=False, update=self.bad_files.get(file_inner_path), peer=peer
) )
@ -344,7 +344,7 @@ class Site(object):
if check_size: # Check the size first if check_size: # Check the size first
valid = self.downloadContent("content.json", download_files=False) # Just download content.json files valid = self.downloadContent("content.json", download_files=False) # Just download content.json files
if not valid: if not valid:
return False # Cant download content.jsons or size is not fits return False # Can't download content.jsons or size is not fits
# Download everything # Download everything
valid = self.downloadContent("content.json", check_modifications=blind_includes) valid = self.downloadContent("content.json", check_modifications=blind_includes)
@ -1033,7 +1033,7 @@ class Site(object):
break break
if sent: if sent:
my_hashfield_changed = self.content_manager.hashfield.time_changed my_hashfield_changed = self.content_manager.hashfield.time_changed
self.log.debug("Sent my hashfield (chaged %.3fs ago) to %s peers" % (time.time() - my_hashfield_changed, sent)) self.log.debug("Sent my hashfield (changed %.3fs ago) to %s peers" % (time.time() - my_hashfield_changed, sent))
return sent return sent
# Update hashfield # Update hashfield
@ -1121,7 +1121,7 @@ class Site(object):
self.log.debug("Bad file solved: %s" % inner_path) self.log.debug("Bad file solved: %s" % inner_path)
del(self.bad_files[inner_path]) del(self.bad_files[inner_path])
# Update content.json last downlad time # Update content.json last download time
if inner_path == "content.json": if inner_path == "content.json":
if not self.settings.get("downloaded"): if not self.settings.get("downloaded"):
self.settings["downloaded"] = int(time.time()) self.settings["downloaded"] = int(time.time())

View file

@ -37,7 +37,7 @@ class SiteManager(object):
address_found = [] address_found = []
added = 0 added = 0
load_s = time.time() load_s = time.time()
# Load new adresses # Load new addresses
try: try:
json_path = "%s/sites.json" % config.data_dir json_path = "%s/sites.json" % config.data_dir
data = json.load(open(json_path)) data = json.load(open(json_path))
@ -68,14 +68,14 @@ class SiteManager(object):
address_found.append(address) address_found.append(address)
# Remove deleted adresses # Remove deleted addresses
if cleanup: if cleanup:
for address in list(self.sites.keys()): for address in list(self.sites.keys()):
if address not in address_found: if address not in address_found:
del(self.sites[address]) del(self.sites[address])
self.log.debug("Removed site: %s" % address) self.log.debug("Removed site: %s" % address)
# Remove orpan sites from contentdb # Remove orphan sites from contentdb
content_db = ContentDb.getContentDb() content_db = ContentDb.getContentDb()
for row in content_db.execute("SELECT * FROM site").fetchall(): for row in content_db.execute("SELECT * FROM site").fetchall():
address = row["address"] address = row["address"]
@ -172,7 +172,7 @@ class SiteManager(object):
def add(self, address, all_file=True, settings=None, **kwargs): def add(self, address, all_file=True, settings=None, **kwargs):
from .Site import Site from .Site import Site
self.sites_changed = int(time.time()) self.sites_changed = int(time.time())
# Try to find site with differect case # Try to find site with different case
for recover_address, recover_site in list(self.sites.items()): for recover_address, recover_site in list(self.sites.items()):
if recover_address.lower() == address.lower(): if recover_address.lower() == address.lower():
return recover_site return recover_site

View file

@ -29,7 +29,7 @@ thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
class SiteStorage(object): class SiteStorage(object):
def __init__(self, site, allow_create=True): def __init__(self, site, allow_create=True):
self.site = site self.site = site
self.directory = "%s/%s" % (config.data_dir, self.site.address) # Site data diretory self.directory = "%s/%s" % (config.data_dir, self.site.address) # Site data directory
self.allowed_dir = os.path.abspath(self.directory) # Only serve file within this dir self.allowed_dir = os.path.abspath(self.directory) # Only serve file within this dir
self.log = site.log self.log = site.log
self.db = None # Db class self.db = None # Db class
@ -94,7 +94,7 @@ class SiteStorage(object):
try: try:
changed_tables = self.db.checkTables() changed_tables = self.db.checkTables()
if changed_tables: if changed_tables:
self.rebuildDb(delete_db=False, reason="Changed tables") # TODO: only update the changed table datas self.rebuildDb(delete_db=False, reason="Changed tables") # TODO: only update the changed table data
except sqlite3.OperationalError: except sqlite3.OperationalError:
pass pass
@ -102,7 +102,7 @@ class SiteStorage(object):
@util.Noparallel() @util.Noparallel()
def getDb(self): def getDb(self):
if self.event_db_busy: # Db not ready for queries if self.event_db_busy: # Db not ready for queries
self.log.debug("Wating for db...") self.log.debug("Waiting for db...")
self.event_db_busy.get() # Wait for event self.event_db_busy.get() # Wait for event
if not self.db: if not self.db:
self.loadDb() self.loadDb()
@ -130,7 +130,7 @@ class SiteStorage(object):
content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site
for file_relative_path in list(content.get("files", {}).keys()) + list(content.get("files_optional", {}).keys()): for file_relative_path in list(content.get("files", {}).keys()) + list(content.get("files_optional", {}).keys()):
if not file_relative_path.endswith(".json") and not file_relative_path.endswith("json.gz"): if not file_relative_path.endswith(".json") and not file_relative_path.endswith("json.gz"):
continue # We only interesed in json files continue # We are only interested in json files
file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading / file_inner_path = file_inner_path.strip("/") # Strip leading /
if self.isFile(file_inner_path): if self.isFile(file_inner_path):
@ -297,7 +297,7 @@ class SiteStorage(object):
def rename(self, inner_path_before, inner_path_after): def rename(self, inner_path_before, inner_path_after):
for retry in range(3): for retry in range(3):
rename_err = None rename_err = None
# To workaround "The process cannot access the file beacause it is being used by another process." error # To workaround "The process cannot access the file because it is being used by another process." error
try: try:
os.rename(self.getPath(inner_path_before), self.getPath(inner_path_after)) os.rename(self.getPath(inner_path_before), self.getPath(inner_path_after))
break break
@ -425,7 +425,7 @@ class SiteStorage(object):
back = defaultdict(int) back = defaultdict(int)
back["bad_files"] = bad_files back["bad_files"] = bad_files
i = 0 i = 0
self.log.debug("Verifing files...") self.log.debug("Verifying files...")
if not self.site.content_manager.contents.get("content.json"): # No content.json, download it first if not self.site.content_manager.contents.get("content.json"): # No content.json, download it first
self.log.debug("VerifyFile content.json not exists") self.log.debug("VerifyFile content.json not exists")

View file

@ -119,7 +119,7 @@ def info():
else: else:
memory_info = process.get_memory_info memory_info = process.get_memory_info
while 1: while 1:
print(total_num, "req", (total_bytes / 1024), "kbytes", "transfered in", time.time() - s, end=' ') print(total_num, "req", (total_bytes / 1024), "kbytes", "transferred in", time.time() - s, end=' ')
print("using", clipher, "Mem:", memory_info()[0] / float(2 ** 20)) print("using", clipher, "Mem:", memory_info()[0] / float(2 ** 20))
time.sleep(1) time.sleep(1)
@ -132,31 +132,31 @@ for test in range(1):
gevent.joinall(clients) gevent.joinall(clients)
print(total_num, "req", (total_bytes / 1024), "kbytes", "transfered in", time.time() - s) print(total_num, "req", (total_bytes / 1024), "kbytes", "transferred in", time.time() - s)
# Separate client/server process: # Separate client/server process:
# 10*10*100: # 10*10*100:
# Raw: 10000 req 1000009 kbytes transfered in 5.39999985695 # Raw: 10000 req 1000009 kbytes transferred in 5.39999985695
# RSA 2048: 10000 req 1000009 kbytes transfered in 27.7890000343 using ('ECDHE-RSA-AES256-SHA', 'TLSv1/SSLv3', 256) # RSA 2048: 10000 req 1000009 kbytes transferred in 27.7890000343 using ('ECDHE-RSA-AES256-SHA', 'TLSv1/SSLv3', 256)
# ECC: 10000 req 1000009 kbytes transfered in 26.1959998608 using ('ECDHE-ECDSA-AES256-SHA', 'TLSv1/SSLv3', 256) # ECC: 10000 req 1000009 kbytes transferred in 26.1959998608 using ('ECDHE-ECDSA-AES256-SHA', 'TLSv1/SSLv3', 256)
# ECC: 10000 req 1000009 kbytes transfered in 28.2410001755 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 13.3828125 # ECC: 10000 req 1000009 kbytes transferred in 28.2410001755 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 13.3828125
# #
# 10*100*10: # 10*100*10:
# Raw: 10000 req 1000009 kbytes transfered in 7.02700018883 Mem: 14.328125 # Raw: 10000 req 1000009 kbytes transferred in 7.02700018883 Mem: 14.328125
# RSA 2048: 10000 req 1000009 kbytes transfered in 44.8860001564 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.078125 # RSA 2048: 10000 req 1000009 kbytes transferred in 44.8860001564 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.078125
# ECC: 10000 req 1000009 kbytes transfered in 37.9430000782 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.0234375 # ECC: 10000 req 1000009 kbytes transferred in 37.9430000782 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 20.0234375
# #
# 1*100*100: # 1*100*100:
# Raw: 10000 req 1000009 kbytes transfered in 4.64400005341 Mem: 14.06640625 # Raw: 10000 req 1000009 kbytes transferred in 4.64400005341 Mem: 14.06640625
# RSA: 10000 req 1000009 kbytes transfered in 24.2300000191 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 19.7734375 # RSA: 10000 req 1000009 kbytes transferred in 24.2300000191 using ('ECDHE-RSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 19.7734375
# ECC: 10000 req 1000009 kbytes transfered in 22.8849999905 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 17.8125 # ECC: 10000 req 1000009 kbytes transferred in 22.8849999905 using ('ECDHE-ECDSA-AES256-GCM-SHA384', 'TLSv1/SSLv3', 256) Mem: 17.8125
# AES128: 10000 req 1000009 kbytes transfered in 21.2839999199 using ('AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.1328125 # AES128: 10000 req 1000009 kbytes transferred in 21.2839999199 using ('AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.1328125
# ECC+128: 10000 req 1000009 kbytes transfered in 20.496999979 using ('ECDHE-ECDSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.40234375 # ECC+128: 10000 req 1000009 kbytes transferred in 20.496999979 using ('ECDHE-ECDSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.40234375
# #
# #
# Single process: # Single process:
# 1*100*100 # 1*100*100
# RSA: 10000 req 1000009 kbytes transfered in 41.7899999619 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 26.91015625 # RSA: 10000 req 1000009 kbytes transferred in 41.7899999619 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 26.91015625
# #
# 10*10*100 # 10*10*100
# RSA: 10000 req 1000009 kbytes transfered in 40.1640000343 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.94921875 # RSA: 10000 req 1000009 kbytes transferred in 40.1640000343 using ('ECDHE-RSA-AES128-GCM-SHA256', 'TLSv1/SSLv3', 128) Mem: 14.94921875

View file

@ -60,7 +60,7 @@ class TestDb:
{"test_id": [1, 2, 3], "title": ["Test #2", "Test #3", "Test #4"]} {"test_id": [1, 2, 3], "title": ["Test #2", "Test #3", "Test #4"]}
).fetchone()["num"] == 2 ).fetchone()["num"] == 2
# Large ammount of IN values # Large amount of IN values
assert db.execute( assert db.execute(
"SELECT COUNT(*) AS num FROM test WHERE ?", "SELECT COUNT(*) AS num FROM test WHERE ?",
{"not__test_id": list(range(2, 3000))} {"not__test_id": list(range(2, 3000))}

View file

@ -21,8 +21,8 @@ class TestDebug:
(["/root/main.py:17"], ["/root/main.py line 17"]), (["/root/main.py:17"], ["/root/main.py line 17"]),
(["{gevent}:13"], ["<gevent>/__init__.py line 13"]), # modules (["{gevent}:13"], ["<gevent>/__init__.py line 13"]), # modules
(["{os}:13"], ["<os> line 13"]), # python builtin modules (["{os}:13"], ["<os> line 13"]), # python builtin modules
(["src/gevent/event.py:17"], ["<gevent>/event.py line 17"]), # gevent-overriden __file__ (["src/gevent/event.py:17"], ["<gevent>/event.py line 17"]), # gevent-overridden __file__
(["@/src/Db/Db.py:17", "@/src/Db/DbQuery.py:1"], ["Db.py line 17", "DbQuery.py line 1"]), # mutliple args (["@/src/Db/Db.py:17", "@/src/Db/DbQuery.py:1"], ["Db.py line 17", "DbQuery.py line 1"]), # multiple args
(["@/src/Db/Db.py:17", "@/src/Db/Db.py:1"], ["Db.py line 17", "1"]), # same file (["@/src/Db/Db.py:17", "@/src/Db/Db.py:1"], ["Db.py line 17", "1"]), # same file
(["{os}:1", "@/src/Db/Db.py:17"], ["<os> line 1", "Db.py line 17"]), # builtins (["{os}:1", "@/src/Db/Db.py:17"], ["<os> line 1", "Db.py line 17"]), # builtins
(["{gevent}:1"] + ["{os}:3"] * 4 + ["@/src/Db/Db.py:17"], ["<gevent>/__init__.py line 1", "...", "Db.py line 17"]) (["{gevent}:1"] + ["{os}:3"] * 4 + ["@/src/Db/Db.py:17"], ["<gevent>/__init__.py line 1", "...", "Db.py line 17"])

View file

@ -61,7 +61,7 @@ class TestPeer:
assert site.content_manager.hashfield assert site.content_manager.hashfield
assert len(site.content_manager.hashfield) > 0 assert len(site.content_manager.hashfield) > 0
# Check exsist hash # Check existing hash
assert site.content_manager.hashfield.getHashId(sample_hash) in site.content_manager.hashfield assert site.content_manager.hashfield.getHashId(sample_hash) in site.content_manager.hashfield
# Add new hash # Add new hash
@ -137,7 +137,7 @@ class TestPeer:
assert peer_file_server.findHashIds([1234]) == {} assert peer_file_server.findHashIds([1234]) == {}
# Add fake peer with requred hash # Add fake peer with required hash
fake_peer_1 = site.addPeer(file_server.ip_external, 1544) fake_peer_1 = site.addPeer(file_server.ip_external, 1544)
fake_peer_1.hashfield.append(1234) fake_peer_1.hashfield.append(1234)
fake_peer_2 = site.addPeer("1.2.3.5", 1545) fake_peer_2 = site.addPeer("1.2.3.5", 1545)

View file

@ -500,7 +500,7 @@ class TestSiteDownload:
site.storage.writeJson("content.json", content_json) site.storage.writeJson("content.json", content_json)
changed, deleted = site.content_manager.loadContent("content.json", force=True) changed, deleted = site.content_manager.loadContent("content.json", force=True)
# Make sure we have 2 differents content.json # Make sure we have 2 different content.json
assert site_temp.storage.open("content.json").read() != site.storage.open("content.json").read() assert site_temp.storage.open("content.json").read() != site.storage.open("content.json").read()
# Generate diff # Generate diff

View file

@ -125,7 +125,7 @@ class TestTor:
assert peer_file_server.findHashIds([1234]) == {} assert peer_file_server.findHashIds([1234]) == {}
# Add fake peer with requred hash # Add fake peer with required hash
fake_peer_1 = site.addPeer("bka4ht2bzxchy44r.onion", 1544) fake_peer_1 = site.addPeer("bka4ht2bzxchy44r.onion", 1544)
fake_peer_1.hashfield.append(1234) fake_peer_1.hashfield.append(1234)
fake_peer_2 = site.addPeer("1.2.3.5", 1545) fake_peer_2 = site.addPeer("1.2.3.5", 1545)

View file

@ -22,13 +22,13 @@
"post_id": 39, "post_id": 39,
"title": "Changelog: May 25, 2015", "title": "Changelog: May 25, 2015",
"date_published": 1432511642.167, "date_published": 1432511642.167,
"body": "- Version 0.3.0, rev187\n- Trusted authorization provider support: Easier multi-user sites by allowing site owners to define tusted third-party user certificate signers. (more info about it in the next days)\n- `--publish` option to siteSign to publish automatically after the new files signed.\n- `cryptSign` command line command to sign message using private key.\n- New, more stable OpenSSL layer that also works on OSX.\n- New json table format support.\n- DbCursor SELECT parameters bugfix.\n- Faster multi-threaded peer discovery from trackers.\n- New http trackers added.\n- Wait for dbschema.json file to execute query.\n- Handle json import errors.\n- More compact json writeJson storage command output.\n- Workaround to make non target=_top links work.\n- Cleaner UiWebsocket command router.\n- Notify other local users on local file changes.\n- Option to wait file download before execute query.\n- fileRules, certAdd, certSelect, certSet websocket API commands.\n- Allow more file errors on big sites.\n- On stucked downloads skip worker's current file instead of stopping it.\n- NoParallel parameter bugfix.\n- RateLimit interval bugfix.\n- Updater skips non-writeable files.\n- Try to close OpenSSL dll before update.\n\nZeroBlog:\n- Rewritten to use SQL database\n- Commenting on posts (**Please note: The comment publishing and distribution can be slow until most of the clients is not updated to version 0.3.0**)\n\n![comments](data/img/zeroblog-comments.png)\n\nZeroID\n- Sample Trusted authorization provider site with Bitmessage registration support\n\n![comments](data/img/zeroid.png)" "body": "- Version 0.3.0, rev187\n- Trusted authorization provider support: Easier multi-user sites by allowing site owners to define trusted third-party user certificate signers. (more info about it in the next days)\n- `--publish` option to siteSign to publish automatically after the new files signed.\n- `cryptSign` command line command to sign message using private key.\n- New, more stable OpenSSL layer that also works on OSX.\n- New json table format support.\n- DbCursor SELECT parameters bugfix.\n- Faster multi-threaded peer discovery from trackers.\n- New http trackers added.\n- Wait for dbschema.json file to execute query.\n- Handle json import errors.\n- More compact json writeJson storage command output.\n- Workaround to make non target=_top links work.\n- Cleaner UiWebsocket command router.\n- Notify other local users on local file changes.\n- Option to wait file download before execute query.\n- fileRules, certAdd, certSelect, certSet websocket API commands.\n- Allow more file errors on big sites.\n- On stuck downloads skip worker's current file instead of stopping it.\n- NoParallel parameter bugfix.\n- RateLimit interval bugfix.\n- Updater skips non-writeable files.\n- Try to close OpenSSL dll before update.\n\nZeroBlog:\n- Rewritten to use SQL database\n- Commenting on posts (**Please note: The comment publishing and distribution can be slow until most of the clients is not updated to version 0.3.0**)\n\n![comments](data/img/zeroblog-comments.png)\n\nZeroID\n- Sample Trusted authorization provider site with Bitmessage registration support\n\n![comments](data/img/zeroid.png)"
}, },
{ {
"post_id": 38, "post_id": 38,
"title": "Status report: Trusted authorization providers", "title": "Status report: Trusted authorization providers",
"date_published": 1431286381.226, "date_published": 1431286381.226,
"body": "Currently working on a new feature that allows to create multi-user sites more easily. For example it will allows us to have comments on ZeroBlog (without contacting the site owner).\n\nCurrent status:\n\n - Sign/verification process: 90%\n - Sample trusted authorization provider site: 70%\n - ZeroBlog modifications: 30%\n - Authorization UI enhacements: 10%\n - Total progress: 60%\n \nEta.: 1-2weeks\n\n### Update: May 18, 2015:\n\nThings left:\n - More ZeroBlog modifications on commenting interface\n - Bitmessage support in Sample trusted authorization provider site\n - Test everything on multiple platform/browser and machine\n - Total progress: 80%\n\nIf no major flaw discovered it should be out this week." "body": "Currently working on a new feature that allows to create multi-user sites more easily. For example it will allows us to have comments on ZeroBlog (without contacting the site owner).\n\nCurrent status:\n\n - Sign/verification process: 90%\n - Sample trusted authorization provider site: 70%\n - ZeroBlog modifications: 30%\n - Authorization UI enhancements: 10%\n - Total progress: 60%\n \nEta.: 1-2weeks\n\n### Update: May 18, 2015:\n\nThings left:\n - More ZeroBlog modifications on commenting interface\n - Bitmessage support in Sample trusted authorization provider site\n - Test everything on multiple platform/browser and machine\n - Total progress: 80%\n\nIf no major flaw discovered it should be out this week."
}, },
{ {
"post_id": 37, "post_id": 37,
@ -58,7 +58,7 @@
"post_id": 33, "post_id": 33,
"title": "Changelog: Apr 24, 2014", "title": "Changelog: Apr 24, 2014",
"date_published": 1429873756.187, "date_published": 1429873756.187,
"body": " - Revision 120\n - Batched publishing to avoid update flood: Only send one update in every 7 seconds\n - Protection against update flood by adding update queue: Only allows 1 update in every 10 second for the same file\n - Fix stucked notification icon\n - Fix websocket error when writing to not-owned sites" "body": " - Revision 120\n - Batched publishing to avoid update flood: Only send one update in every 7 seconds\n - Protection against update flood by adding update queue: Only allows 1 update in every 10 second for the same file\n - Fix stuck notification icon\n - Fix websocket error when writing to not-owned sites"
}, },
{ {
"post_id": 32, "post_id": 32,
@ -76,7 +76,7 @@
"post_id": 30, "post_id": 30,
"title": "Changelog: Apr 16, 2015", "title": "Changelog: Apr 16, 2015",
"date_published": 1429135541.581, "date_published": 1429135541.581,
"body": "Apr 15:\n\n - Version 0.2.9\n - To get rid of dead ips only send peers over pex that messaged within 2 hour\n - Only ask peers from 2 sources using pex every 20 min\n - Fixed mysterious notification icon disappearings\n - Mark peers as bad if publish is timed out (5s+)" "body": "Apr 15:\n\n - Version 0.2.9\n - To get rid of dead ips only send peers over pex that messaged within 2 hour\n - Only ask peers from 2 sources using pex every 20 min\n - Fixed mysterious notification icon disappearances\n - Mark peers as bad if publish is timed out (5s+)"
}, },
{ {
"post_id": 29, "post_id": 29,
@ -112,13 +112,13 @@
"post_id": 24, "post_id": 24,
"title": "Changelog: Mar 29, 2015", "title": "Changelog: Mar 29, 2015",
"date_published": 1427758356.109, "date_published": 1427758356.109,
"body": " - Version 0.2.8\n - Namecoin (.bit) domain support!\n - Possible to disable backward compatibility with old version to save some memory\n - Faster content publishing (commenting, posting etc.)\n - Display error on internal server errors\n - Better progress bar\n - Crash and bugfixes\n - Removed coppersurfer tracker (its down atm), added eddie4\n - Sorry, the auto updater broken for this version: please overwrite your current `update.py` file with the [latest one from github](https://raw.githubusercontent.com/HelloZeroNet/ZeroNet/master/update.py), run it and restart ZeroNet.\n - Fixed updater\n\n![domain](data/img/domain.png)\n\nZeroName\n\n - New site for resolving namecoin domains and display registered ones\n\n![ZeroName](data/img/zeroname.png)\nZeroHello\n\n - Automatically links to site's domain names if its specificed in content.json `domain` field\n\n" "body": " - Version 0.2.8\n - Namecoin (.bit) domain support!\n - Possible to disable backward compatibility with old version to save some memory\n - Faster content publishing (commenting, posting etc.)\n - Display error on internal server errors\n - Better progress bar\n - Crash and bugfixes\n - Removed coppersurfer tracker (its down atm), added eddie4\n - Sorry, the auto updater broken for this version: please overwrite your current `update.py` file with the [latest one from github](https://raw.githubusercontent.com/HelloZeroNet/ZeroNet/master/update.py), run it and restart ZeroNet.\n - Fixed updater\n\n![domain](data/img/domain.png)\n\nZeroName\n\n - New site for resolving namecoin domains and display registered ones\n\n![ZeroName](data/img/zeroname.png)\nZeroHello\n\n - Automatically links to site's domain names if its specified in content.json `domain` field\n\n"
}, },
{ {
"post_id": 22, "post_id": 22,
"title": "Changelog: Mar 23, 2015", "title": "Changelog: Mar 23, 2015",
"date_published": 1427159576.994, "date_published": 1427159576.994,
"body": " - Version 0.2.7\n - Plugin system: Allows extend ZeroNet without modify the core source\n - Comes with 3 plugin:\n - Multiuser: User login/logout based on BIP32 master seed, generate new master seed on visit (disabled by default to enable it just remove the disabled- from the directory name)\n - Stats: /Stats url moved to separate plugin for demonstration reasons\n - DonationMessage: Puts a little donation link to the bottom of every page (disabled by default)\n - Reworked module import system\n - Lazy user auth_address generatation\n - Allow to send prompt dialog to user from server-side\n - Update script remembers plugins enabled/disabled status\n - Multiline notifications\n - Cookie parser\n\nZeroHello in multiuser mode:\n\n - Logout button\n - Identicon generated based on logined user xpub address\n\n![Multiuser](data/img/multiuser.png)" "body": " - Version 0.2.7\n - Plugin system: Allows extend ZeroNet without modify the core source\n - Comes with 3 plugin:\n - Multiuser: User login/logout based on BIP32 master seed, generate new master seed on visit (disabled by default to enable it just remove the disabled- from the directory name)\n - Stats: /Stats url moved to separate plugin for demonstration reasons\n - DonationMessage: Puts a little donation link to the bottom of every page (disabled by default)\n - Reworked module import system\n - Lazy user auth_address generatation\n - Allow to send prompt dialog to user from server-side\n - Update script remembers plugins enabled/disabled status\n - Multiline notifications\n - Cookie parser\n\nZeroHello in multiuser mode:\n\n - Logout button\n - Identicon generated based on logged-in user xpub address\n\n![Multiuser](data/img/multiuser.png)"
}, },
{ {
"post_id": 21, "post_id": 21,
@ -232,13 +232,13 @@
"post_id": 2, "post_id": 2,
"title": "Changelog: Jan 24, 2015", "title": "Changelog: Jan 24, 2015",
"date_published": 1422105774.057, "date_published": 1422105774.057,
"body": "* Version 0.1.6\n* Only serve .html files with wrapper frame\n* Http parameter support in url\n* Customizable background-color for wrapper in content.json\n* New Websocket API commands (only allowed on own sites):\n - fileWrite: Modify site's files in hdd from javascript\n - sitePublish: Sign new content and Publish to peers\n* Prompt value support in ZeroFrame (used for prompting privatekey for publishing in ZeroBlog)\n\n---\n\n## Previous changes:\n\n### Jan 20, 2014\n- Version 0.1.5\n- Detect computer wakeup from sleep and acts as startup (check open port, site changes)\n- Announce interval changed from 10min to 20min\n- Delete site files command support\n- Stop unfinished downloads on pause, delete\n- Confirm dialog support to WrapperApi\n\nZeroHello\n- Site Delete menuitem\n- Browser back button doesn't jumps to top\n\n### Jan 19, 2014:\n- Version 0.1.4\n- WIF compatible new private addresses\n- Proper bitcoin address verification, vanity address support: http://127.0.0.1:43110/1ZEro9ZwiZeEveFhcnubFLiN3v7tDL4bz\n- No hash error on worker kill\n- Have you secured your private key? confirmation\n\n### Jan 18, 2014:\n- Version 0.1.3\n- content.json hashing changed from sha1 to sha512 (trimmed to 256bits) for better security, keep hasing to sha1 for backward compatiblility yet\n- Fixed fileserver_port argument parsing\n- Try to ping peer before asking any command if no communication for 20min\n- Ping timeout / retry\n- Reduce websocket bw usage\n- Separate wrapper_key for websocket auth and auth_key to identify user\n- Removed unnecessary from wrapper iframe url\n\nZeroHello:\n- Compatiblilty with 0.1.3 websocket changes while maintaining backward compatibility\n- Better error report on file update fail\n\nZeroBoard:\n- Support for sha512 hashed auth_key, but keeping md5 key support for older versions yet\n\n### Jan 17, 2014:\n- Version 0.1.2\n- Better error message logging\n- Kill workers on download done\n- Retry on socket error\n- Timestamping console messages\n\n### Jan 16:\n- Version to 0.1.1\n- Version info to websocket api\n- Add publisher's zeronet version to content.json\n- Still chasing network publish problems, added more debug info\n\nZeroHello:\n- Your and the latest ZeroNet version added to top right corner (please update if you dont see it)\n" "body": "* Version 0.1.6\n* Only serve .html files with wrapper frame\n* Http parameter support in url\n* Customizable background-color for wrapper in content.json\n* New Websocket API commands (only allowed on own sites):\n - fileWrite: Modify site's files in hdd from javascript\n - sitePublish: Sign new content and Publish to peers\n* Prompt value support in ZeroFrame (used for prompting privatekey for publishing in ZeroBlog)\n\n---\n\n## Previous changes:\n\n### Jan 20, 2014\n- Version 0.1.5\n- Detect computer wakeup from sleep and acts as startup (check open port, site changes)\n- Announce interval changed from 10min to 20min\n- Delete site files command support\n- Stop unfinished downloads on pause, delete\n- Confirm dialog support to WrapperApi\n\nZeroHello\n- Site Delete menuitem\n- Browser back button doesn't jumps to top\n\n### Jan 19, 2014:\n- Version 0.1.4\n- WIF compatible new private addresses\n- Proper bitcoin address verification, vanity address support: http://127.0.0.1:43110/1ZEro9ZwiZeEveFhcnubFLiN3v7tDL4bz\n- No hash error on worker kill\n- Have you secured your private key? confirmation\n\n### Jan 18, 2014:\n- Version 0.1.3\n- content.json hashing changed from sha1 to sha512 (trimmed to 256bits) for better security, keep hashing to sha1 for backward compatibility yet\n- Fixed fileserver_port argument parsing\n- Try to ping peer before asking any command if no communication for 20min\n- Ping timeout / retry\n- Reduce websocket bw usage\n- Separate wrapper_key for websocket auth and auth_key to identify user\n- Removed unnecessary from wrapper iframe url\n\nZeroHello:\n- Compatibility with 0.1.3 websocket changes while maintaining backward compatibility\n- Better error report on file update fail\n\nZeroBoard:\n- Support for sha512 hashed auth_key, but keeping md5 key support for older versions yet\n\n### Jan 17, 2014:\n- Version 0.1.2\n- Better error message logging\n- Kill workers on download done\n- Retry on socket error\n- Timestamping console messages\n\n### Jan 16:\n- Version to 0.1.1\n- Version info to websocket api\n- Add publisher's zeronet version to content.json\n- Still chasing network publish problems, added more debug info\n\nZeroHello:\n- Your and the latest ZeroNet version added to top right corner (please update if you dont see it)\n"
}, },
{ {
"post_id": 1, "post_id": 1,
"title": "ZeroBlog features", "title": "ZeroBlog features",
"date_published": 1422105061, "date_published": 1422105061,
"body": "Initial version (Jan 24, 2014):\n\n* Site avatar generated by site address\n* Distraction-free inline edit: Post title, date, body, Site title, description, links\n* Post format using [markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)\n* Code block [syntax highlight](#code-highlight-demos) using [highlight.js](https://highlightjs.org/)\n* Create & Delete post\n* Sign & Publish from web\n* Fold blog post: Content after first `---` won't appear at listing\n* Shareable, friendly post urls\n\n\nTodo:\n\n* ~~Better content editor (contenteditable seemed like a good idea, but tricky support of copy/paste makes it more pain than gain)~~\n* Image upload to post & blog avatar\n* Paging\n* Searching\n* ~~Quick cheat-sheet using markdown~~\n\n---\n\n## Code highlight demos\n### Server-side site publishing (UiWebsocket.py):\n```py\ndef actionSitePublish(self, to, params):\n\tsite = self.site\n\tif not site.settings[\"own\"]: return self.response(to, \"Forbidden, you can only modify your own sites\")\n\n\t# Signing\n\tsite.loadContent(True) # Reload content.json, ignore errors to make it up-to-date\n\tsigned = site.signContent(params[0]) # Sign using private key sent by user\n\tif signed:\n\t\tself.cmd(\"notification\", [\"done\", \"Private key correct, site signed!\", 5000]) # Display message for 5 sec\n\telse:\n\t\tself.cmd(\"notification\", [\"error\", \"Site sign failed: invalid private key.\"])\n\t\tself.response(to, \"Site sign failed\")\n\t\treturn\n\tsite.loadContent(True) # Load new content.json, ignore errors\n\n\t# Publishing\n\tif not site.settings[\"serving\"]: # Enable site if paused\n\t\tsite.settings[\"serving\"] = True\n\t\tsite.saveSettings()\n\t\tsite.announce()\n\n\tpublished = site.publish(5) # Publish to 5 peer\n\n\tif published>0: # Successfuly published\n\t\tself.cmd(\"notification\", [\"done\", \"Site published to %s peers.\" % published, 5000])\n\t\tself.response(to, \"ok\")\n\t\tsite.updateWebsocket() # Send updated site data to local websocket clients\n\telse:\n\t\tif len(site.peers) == 0:\n\t\t\tself.cmd(\"notification\", [\"info\", \"No peers found, but your site is ready to access.\"])\n\t\t\tself.response(to, \"No peers found, but your site is ready to access.\")\n\t\telse:\n\t\t\tself.cmd(\"notification\", [\"error\", \"Site publish failed.\"])\n\t\t\tself.response(to, \"Site publish failed.\")\n```\n\n\n### Client-side site publish (ZeroBlog.coffee)\n```coffee\n# Sign and Publish site\npublish: =>\n\tif not @server_info.ip_external # No port open\n\t\t@cmd \"wrapperNotification\", [\"error\", \"To publish the site please open port <b>#{@server_info.fileserver_port}</b> on your router\"]\n\t\treturn false\n\t@cmd \"wrapperPrompt\", [\"Enter your private key:\", \"password\"], (privatekey) => # Prompt the private key\n\t\t$(\".publishbar .button\").addClass(\"loading\")\n\t\t@cmd \"sitePublish\", [privatekey], (res) =>\n\t\t\t$(\".publishbar .button\").removeClass(\"loading\")\n\t\t\t@log \"Publish result:\", res\n\n\treturn false # Ignore link default event\n```\n\n" "body": "Initial version (Jan 24, 2014):\n\n* Site avatar generated by site address\n* Distraction-free inline edit: Post title, date, body, Site title, description, links\n* Post format using [markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)\n* Code block [syntax highlight](#code-highlight-demos) using [highlight.js](https://highlightjs.org/)\n* Create & Delete post\n* Sign & Publish from web\n* Fold blog post: Content after first `---` won't appear at listing\n* Shareable, friendly post urls\n\n\nTodo:\n\n* ~~Better content editor (contenteditable seemed like a good idea, but tricky support of copy/paste makes it more pain than gain)~~\n* Image upload to post & blog avatar\n* Paging\n* Searching\n* ~~Quick cheat-sheet using markdown~~\n\n---\n\n## Code highlight demos\n### Server-side site publishing (UiWebsocket.py):\n```py\ndef actionSitePublish(self, to, params):\n\tsite = self.site\n\tif not site.settings[\"own\"]: return self.response(to, \"Forbidden, you can only modify your own sites\")\n\n\t# Signing\n\tsite.loadContent(True) # Reload content.json, ignore errors to make it up-to-date\n\tsigned = site.signContent(params[0]) # Sign using private key sent by user\n\tif signed:\n\t\tself.cmd(\"notification\", [\"done\", \"Private key correct, site signed!\", 5000]) # Display message for 5 sec\n\telse:\n\t\tself.cmd(\"notification\", [\"error\", \"Site sign failed: invalid private key.\"])\n\t\tself.response(to, \"Site sign failed\")\n\t\treturn\n\tsite.loadContent(True) # Load new content.json, ignore errors\n\n\t# Publishing\n\tif not site.settings[\"serving\"]: # Enable site if paused\n\t\tsite.settings[\"serving\"] = True\n\t\tsite.saveSettings()\n\t\tsite.announce()\n\n\tpublished = site.publish(5) # Publish to 5 peer\n\n\tif published>0: # Successfully published\n\t\tself.cmd(\"notification\", [\"done\", \"Site published to %s peers.\" % published, 5000])\n\t\tself.response(to, \"ok\")\n\t\tsite.updateWebsocket() # Send updated site data to local websocket clients\n\telse:\n\t\tif len(site.peers) == 0:\n\t\t\tself.cmd(\"notification\", [\"info\", \"No peers found, but your site is ready to access.\"])\n\t\t\tself.response(to, \"No peers found, but your site is ready to access.\")\n\t\telse:\n\t\t\tself.cmd(\"notification\", [\"error\", \"Site publish failed.\"])\n\t\t\tself.response(to, \"Site publish failed.\")\n```\n\n\n### Client-side site publish (ZeroBlog.coffee)\n```coffee\n# Sign and Publish site\npublish: =>\n\tif not @server_info.ip_external # No port open\n\t\t@cmd \"wrapperNotification\", [\"error\", \"To publish the site please open port <b>#{@server_info.fileserver_port}</b> on your router\"]\n\t\treturn false\n\t@cmd \"wrapperPrompt\", [\"Enter your private key:\", \"password\"], (privatekey) => # Prompt the private key\n\t\t$(\".publishbar .button\").addClass(\"loading\")\n\t\t@cmd \"sitePublish\", [privatekey], (res) =>\n\t\t\t$(\".publishbar .button\").removeClass(\"loading\")\n\t\t\t@log \"Publish result:\", res\n\n\treturn false # Ignore link default event\n```\n\n"
} }
] ]
} }

File diff suppressed because one or more lines are too long

View file

@ -103,7 +103,7 @@ class TorManager(object):
self.enabled = True self.enabled = True
if self.connect(): if self.connect():
if self.isSubprocessRunning(): if self.isSubprocessRunning():
self.request("TAKEOWNERSHIP") # Shut down Tor client when controll connection closed self.request("TAKEOWNERSHIP") # Shut down Tor client when controller connection closed
break break
# Terminate on exit # Terminate on exit
atexit.register(self.stopTor) atexit.register(self.stopTor)

View file

@ -56,7 +56,7 @@ class UiRequest(object):
self.server = server self.server = server
self.log = server.log self.log = server.log
self.get = get # Get parameters self.get = get # Get parameters
self.env = env # Enviroment settings self.env = env # Environment settings
# ['CONTENT_LENGTH', 'CONTENT_TYPE', 'GATEWAY_INTERFACE', 'HTTP_ACCEPT', 'HTTP_ACCEPT_ENCODING', 'HTTP_ACCEPT_LANGUAGE', # ['CONTENT_LENGTH', 'CONTENT_TYPE', 'GATEWAY_INTERFACE', 'HTTP_ACCEPT', 'HTTP_ACCEPT_ENCODING', 'HTTP_ACCEPT_LANGUAGE',
# 'HTTP_COOKIE', 'HTTP_CACHE_CONTROL', 'HTTP_HOST', 'HTTP_HTTPS', 'HTTP_ORIGIN', 'HTTP_PROXY_CONNECTION', 'HTTP_REFERER', # 'HTTP_COOKIE', 'HTTP_CACHE_CONTROL', 'HTTP_HOST', 'HTTP_HTTPS', 'HTTP_ORIGIN', 'HTTP_PROXY_CONNECTION', 'HTTP_REFERER',
# 'HTTP_USER_AGENT', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME', # 'HTTP_USER_AGENT', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME',
@ -127,8 +127,8 @@ class UiRequest(object):
# Prepend .bit host for transparent proxy # Prepend .bit host for transparent proxy
if self.isDomain(self.env.get("HTTP_HOST")): if self.isDomain(self.env.get("HTTP_HOST")):
path = re.sub("^/", "/" + self.env.get("HTTP_HOST") + "/", path) path = re.sub("^/", "/" + self.env.get("HTTP_HOST") + "/", path)
path = re.sub("^http://zero[/]+", "/", path) # Remove begining http://zero/ for chrome extension path = re.sub("^http://zero[/]+", "/", path) # Remove beginning http://zero/ for chrome extension
path = re.sub("^http://", "/", path) # Remove begining http for chrome extension .bit access path = re.sub("^http://", "/", path) # Remove beginning http for chrome extension .bit access
# Sanitize request url # Sanitize request url
path = path.replace("\\", "/") path = path.replace("\\", "/")

View file

@ -1043,7 +1043,7 @@ class UiWebsocket(object):
def actionSiteListModifiedFiles(self, to, content_inner_path="content.json"): def actionSiteListModifiedFiles(self, to, content_inner_path="content.json"):
content = self.site.content_manager.contents.get(content_inner_path) content = self.site.content_manager.contents.get(content_inner_path)
if not content: if not content:
return {"error": "content file not avaliable"} return {"error": "content file not available"}
min_mtime = content.get("modified", 0) min_mtime = content.get("modified", 0)
site_path = self.site.storage.directory site_path = self.site.storage.directory

View file

@ -471,7 +471,7 @@ class Wrapper
if @inner_loaded # Update site info if @inner_loaded # Update site info
@reloadSiteInfo() @reloadSiteInfo()
# If inner frame not loaded for 2 sec show peer informations on loading screen by loading site info # If inner frame not loaded for 2 sec show peer information on loading screen by loading site info
setTimeout (=> setTimeout (=>
if not @site_info then @reloadSiteInfo() if not @site_info then @reloadSiteInfo()
), 2000 ), 2000

View file

@ -37,7 +37,7 @@ class UserManager(object):
added += 1 added += 1
user_found.append(master_address) user_found.append(master_address)
# Remove deleted adresses # Remove deleted addresses
for master_address in list(self.users.keys()): for master_address in list(self.users.keys()):
if master_address not in user_found: if master_address not in user_found:
del(self.users[master_address]) del(self.users[master_address])

View file

@ -52,7 +52,7 @@ class WorkerManager(object):
if not self.tasks: if not self.tasks:
continue continue
tasks = self.tasks[:] # Copy it so removing elements wont cause any problem tasks = self.tasks[:] # Copy it so removing elements won't cause any problem
num_tasks_started = len([task for task in tasks if task["time_started"]]) num_tasks_started = len([task for task in tasks if task["time_started"]])
self.log.debug( self.log.debug(

View file

@ -183,12 +183,12 @@ iv = "InitializationVe"
ciphertext = '' ciphertext = ''
# We can encrypt one line at a time, regardles of length # We can encrypt one line at a time, regardless of length
encrypter = pyaes.Encrypter(pyaes.AESModeOfOperationCBC(key, iv)) encrypter = pyaes.Encrypter(pyaes.AESModeOfOperationCBC(key, iv))
for line in file('/etc/passwd'): for line in file('/etc/passwd'):
ciphertext += encrypter.feed(line) ciphertext += encrypter.feed(line)
# Make a final call to flush any remaining bytes and add paddin # Make a final call to flush any remaining bytes and add padding
ciphertext += encrypter.feed() ciphertext += encrypter.feed()
# We can decrypt the cipher text in chunks (here we split it in half) # We can decrypt the cipher text in chunks (here we split it in half)

View file

@ -519,7 +519,7 @@ class AESModeOfOperationOFB(AESStreamModeOfOperation):
return _bytes_to_string(encrypted) return _bytes_to_string(encrypted)
def decrypt(self, ciphertext): def decrypt(self, ciphertext):
# AES-OFB is symetric # AES-OFB is symmetric
return self.encrypt(ciphertext) return self.encrypt(ciphertext)
@ -530,9 +530,9 @@ class AESModeOfOperationCTR(AESStreamModeOfOperation):
o A stream-cipher, so input does not need to be padded to blocks, o A stream-cipher, so input does not need to be padded to blocks,
allowing arbitrary length data. allowing arbitrary length data.
o The counter must be the same size as the key size (ie. len(key)) o The counter must be the same size as the key size (ie. len(key))
o Each block independant of the other, so a corrupt byte will not o Each block independent of the other, so a corrupt byte will not
damage future blocks. damage future blocks.
o Each block has a uniue counter value associated with it, which o Each block has a unique counter value associated with it, which
contributes to the encrypted value, so no data patterns are contributes to the encrypted value, so no data patterns are
leaked. leaked.
o Also known as: Counter Mode (CM), Integer Counter Mode (ICM) and o Also known as: Counter Mode (CM), Integer Counter Mode (ICM) and
@ -575,7 +575,7 @@ class AESModeOfOperationCTR(AESStreamModeOfOperation):
return _bytes_to_string(encrypted) return _bytes_to_string(encrypted)
def decrypt(self, crypttext): def decrypt(self, crypttext):
# AES-CTR is symetric # AES-CTR is symmetric
return self.encrypt(crypttext) return self.encrypt(crypttext)

View file

@ -242,13 +242,13 @@ class Actions(object):
SiteManager.site_manager.load() SiteManager.site_manager.load()
s = time.time() s = time.time()
logging.info("Verifing site: %s..." % address) logging.info("Verifying site: %s..." % address)
site = Site(address) site = Site(address)
bad_files = [] bad_files = []
for content_inner_path in site.content_manager.contents: for content_inner_path in site.content_manager.contents:
s = time.time() s = time.time()
logging.info("Verifing %s signature..." % content_inner_path) logging.info("Verifying %s signature..." % content_inner_path)
err = None err = None
try: try:
file_correct = site.content_manager.verifyFile( file_correct = site.content_manager.verifyFile(
@ -427,7 +427,7 @@ class Actions(object):
# Started fileserver # Started fileserver
file_server.portCheck() file_server.portCheck()
if peer_ip: # Announce ip specificed if peer_ip: # Announce ip specified
site.addPeer(peer_ip, peer_port) site.addPeer(peer_ip, peer_port)
else: # Just ask the tracker else: # Just ask the tracker
logging.info("Gathering peers from tracker") logging.info("Gathering peers from tracker")
@ -563,7 +563,7 @@ class Actions(object):
test_names = [funcToName(name) for name in dir(self) if name.startswith("test") and name != "test"] test_names = [funcToName(name) for name in dir(self) if name.startswith("test") and name != "test"]
if not test_name: if not test_name:
# No test specificed, list tests # No test specified, list tests
print("\nNo test specified, possible tests:") print("\nNo test specified, possible tests:")
for test_name in test_names: for test_name in test_names:
func_name = "test" + test_name[0].upper() + test_name[1:] func_name = "test" + test_name[0].upper() + test_name[1:]

View file

@ -1,6 +1,6 @@
# CoffeeScript compiler for Windows # CoffeeScript compiler for Windows
A simple command-line utilty for Windows that will compile `*.coffee` files to JavaScript `*.js` files using [CoffeeScript](http://jashkenas.github.com/coffee-script/) and the venerable Windows Script Host, ubiquitous on Windows since the 90s. A simple command-line utility for Windows that will compile `*.coffee` files to JavaScript `*.js` files using [CoffeeScript](http://jashkenas.github.com/coffee-script/) and the venerable Windows Script Host, ubiquitous on Windows since the 90s.
## Usage ## Usage