Separate site cleanup and announce thread
This commit is contained in:
parent
353ddd3105
commit
ebd00bfb9f
1 changed files with 54 additions and 29 deletions
|
@ -201,6 +201,45 @@ class FileServer(ConnectionServer):
|
||||||
if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
|
if site.settings.get("modified", 0) < time.time() - 60 * 60 * 24: # Not so active site, wait some sec to finish
|
||||||
check_thread.join(timeout=10)
|
check_thread.join(timeout=10)
|
||||||
|
|
||||||
|
def cleanupSites(self):
|
||||||
|
import gc
|
||||||
|
startup = True
|
||||||
|
time.sleep(5 * 60) # Sites already cleaned up on startup
|
||||||
|
while 1:
|
||||||
|
# Sites health care every 20 min
|
||||||
|
self.log.debug("Running site cleanup, connections: %s, internet: %s" % (len(self.connections), self.has_internet))
|
||||||
|
|
||||||
|
for address, site in self.sites.items():
|
||||||
|
if not site.settings["serving"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not startup:
|
||||||
|
site.cleanupPeers()
|
||||||
|
|
||||||
|
time.sleep(1) # Prevent too quick request
|
||||||
|
|
||||||
|
for address, site in self.sites.items():
|
||||||
|
if not site.settings["serving"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if site.peers:
|
||||||
|
with gevent.Timeout(10, exception=False):
|
||||||
|
site.announcePex()
|
||||||
|
|
||||||
|
# Retry failed files
|
||||||
|
if site.bad_files:
|
||||||
|
site.retryBadFiles()
|
||||||
|
|
||||||
|
if not startup: # Don't do it at start up because checkSite already has needConnections at start up.
|
||||||
|
site.needConnections(check_site_on_reconnect=True) # Keep active peer connection to get the updates
|
||||||
|
|
||||||
|
time.sleep(1) # Prevent too quick request
|
||||||
|
|
||||||
|
site = None
|
||||||
|
gc.collect() # Implicit garbage collection
|
||||||
|
startup = False
|
||||||
|
time.sleep(60 * 20)
|
||||||
|
|
||||||
def trackersFileReloader(self):
|
def trackersFileReloader(self):
|
||||||
while 1:
|
while 1:
|
||||||
config.loadTrackersFile()
|
config.loadTrackersFile()
|
||||||
|
@ -208,42 +247,27 @@ class FileServer(ConnectionServer):
|
||||||
|
|
||||||
# Announce sites every 20 min
|
# Announce sites every 20 min
|
||||||
def announceSites(self):
|
def announceSites(self):
|
||||||
import gc
|
|
||||||
if config.trackers_file:
|
if config.trackers_file:
|
||||||
gevent.spawn(self.trackersFileReloader)
|
gevent.spawn(self.trackersFileReloader)
|
||||||
|
|
||||||
|
time.sleep(5 * 60) # Sites already announced on startup
|
||||||
while 1:
|
while 1:
|
||||||
# Sites health care every 20 min
|
s = time.time()
|
||||||
for address, site in self.sites.items():
|
for address, site in self.sites.items():
|
||||||
if not site.settings["serving"]:
|
if not site.settings["serving"]:
|
||||||
continue
|
continue
|
||||||
if site.peers:
|
site.announce(mode="update", pex=False)
|
||||||
site.announcePex()
|
active_site = time.time() - site.settings.get("modified", 0) < 24 * 60 * 60
|
||||||
|
if site.settings["own"] or active_site: # Check connections more frequently on own and active sites to speed-up first connections
|
||||||
|
site.needConnections(check_site_on_reconnect=True)
|
||||||
|
site.sendMyHashfield(3)
|
||||||
|
site.updateHashfield(3)
|
||||||
|
time.sleep(1)
|
||||||
|
taken = time.time() - s
|
||||||
|
|
||||||
# Retry failed files
|
sleep = max(0, 60 * 20 / len(config.trackers) - taken) # Query all trackers one-by-one in 20 minutes evenly distributed
|
||||||
if site.bad_files:
|
self.log.debug("Site announce tracker done in %.3fs, sleeping for %ss..." % (taken, sleep))
|
||||||
site.retryBadFiles()
|
time.sleep(sleep)
|
||||||
|
|
||||||
site.cleanupPeers()
|
|
||||||
|
|
||||||
site.needConnections() # Keep 5 active peer connection to get the updates
|
|
||||||
|
|
||||||
time.sleep(2) # Prevent too quick request
|
|
||||||
|
|
||||||
site = None
|
|
||||||
gc.collect() # Implicit garbage collection
|
|
||||||
|
|
||||||
# Find new peers
|
|
||||||
for tracker_i in range(len(config.trackers)):
|
|
||||||
time.sleep(60 * 20 / len(config.trackers)) # Query all trackers one-by-one in 20 minutes evenly distributed
|
|
||||||
for address, site in self.sites.items():
|
|
||||||
if not site.settings["serving"]:
|
|
||||||
continue
|
|
||||||
site.announce(mode="update", pex=False)
|
|
||||||
if site.settings["own"]: # Check connections more frequently on own sites to speed-up first connections
|
|
||||||
site.needConnections()
|
|
||||||
site.sendMyHashfield(3)
|
|
||||||
site.updateHashfield(3)
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
# Detects if computer back from wakeup
|
# Detects if computer back from wakeup
|
||||||
def wakeupWatcher(self):
|
def wakeupWatcher(self):
|
||||||
|
@ -273,6 +297,7 @@ class FileServer(ConnectionServer):
|
||||||
gevent.spawn(self.checkSites)
|
gevent.spawn(self.checkSites)
|
||||||
|
|
||||||
thread_announce_sites = gevent.spawn(self.announceSites)
|
thread_announce_sites = gevent.spawn(self.announceSites)
|
||||||
|
thread_cleanup_sites = gevent.spawn(self.cleanupSites)
|
||||||
thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
|
thread_wakeup_watcher = gevent.spawn(self.wakeupWatcher)
|
||||||
|
|
||||||
ConnectionServer.start(self)
|
ConnectionServer.start(self)
|
||||||
|
|
Loading…
Reference in a new issue