summaryrefslogtreecommitdiffstats
path: root/NohGooee
diff options
context:
space:
mode:
Diffstat (limited to 'NohGooee')
-rw-r--r--NohGooee/BeautifulSupe.py132
-rw-r--r--NohGooee/Choker.py157
-rw-r--r--NohGooee/ClientIdentifier.py157
-rw-r--r--NohGooee/Connecter.py334
-rw-r--r--NohGooee/ConvertedMetainfo.py283
-rw-r--r--NohGooee/CurrentRateMeasure.py48
-rw-r--r--NohGooee/Desktop.py33
-rw-r--r--NohGooee/Downloader.py363
-rw-r--r--NohGooee/DownloaderFeedback.py139
-rw-r--r--NohGooee/Encoder.py286
-rw-r--r--NohGooee/GUI.py679
-rw-r--r--NohGooee/GetTorrent.py94
-rw-r--r--NohGooee/IPC.py441
-rw-r--r--NohGooee/LaunchPath.py54
-rw-r--r--NohGooee/NatCheck.py146
-rw-r--r--NohGooee/NatTraversal.py757
-rw-r--r--NohGooee/NewVersion.py281
-rw-r--r--NohGooee/PeerID.py28
-rw-r--r--NohGooee/PiecePicker.py138
-rw-r--r--NohGooee/RateLimiter.py190
-rw-r--r--NohGooee/RateMeasure.py63
-rw-r--r--NohGooee/Rerequester.py293
-rw-r--r--NohGooee/StatusLight.py108
-rw-r--r--NohGooee/Storage.py274
-rw-r--r--NohGooee/StorageWrapper.py408
-rw-r--r--NohGooee/TorrentQueue.py848
-rw-r--r--NohGooee/TrayIcon.py95
-rw-r--r--NohGooee/Uploader.py97
-rw-r--r--NohGooee/__init__.py49
-rw-r--r--NohGooee/bencode.py134
-rw-r--r--NohGooee/bitfield.py75
-rw-r--r--NohGooee/btformats.py140
-rw-r--r--NohGooee/configfile.py222
-rw-r--r--NohGooee/defaultargs.py306
-rw-r--r--NohGooee/defer.py56
-rw-r--r--NohGooee/download.py591
-rw-r--r--NohGooee/language.py202
-rw-r--r--NohGooee/launchmanycore.py261
-rw-r--r--NohGooee/makemetafile.py260
-rw-r--r--NohGooee/parseargs.py178
-rw-r--r--NohGooee/parsedir.py150
-rw-r--r--NohGooee/platform.py390
-rw-r--r--NohGooee/prefs.py89
-rw-r--r--NohGooee/selectpoll.py68
-rw-r--r--NohGooee/track.py876
-rw-r--r--NohGooee/zurllib.py269
46 files changed, 11242 insertions, 0 deletions
diff --git a/NohGooee/BeautifulSupe.py b/NohGooee/BeautifulSupe.py
new file mode 100644
index 0000000..79072d4
--- /dev/null
+++ b/NohGooee/BeautifulSupe.py
@@ -0,0 +1,132 @@
+# A very very minimal BeautifulSoup immitation.
+#
+# BS uses SGMLlib to parse, which converts everything to lower case.
+# This uses real xml parsing to mimic the parts of BS we use.
+
+import xml.dom.minidom
+
+def _getText(node):
+ nodelist = node.childNodes
+ rc = []
+ for node in nodelist:
+ if node.nodeType == node.TEXT_NODE:
+ rc.append(str(node.data))
+ return rc
+
+def _getNodesAsTags(root):
+ nodelist = root.childNodes
+ tags = []
+ for node in nodelist:
+ if node.nodeType == node.ELEMENT_NODE:
+ tags.append(Tag(node))
+ return tags
+
+class Tag(object):
+ def __init__(self, node):
+ self.node = node
+ self.name = node.nodeName
+ self.contents = _getNodesAsTags(self.node)
+ text = _getText(self.node)
+ self.contents += text
+ self.text = ''.join(text)
+
+ def child_elements(self):
+ children = []
+ for tag in self.contents:
+ if isinstance(tag, Tag):
+ children.append(tag)
+ return children
+
+ def get(self, tagname):
+ got = self.first(tagname)
+ if got:
+ return got.text
+
+ def first(self, tagname):
+ found = None
+
+ for tag in self.contents:
+ if isinstance(tag, Tag):
+ if tag.name == tagname:
+ found = tag
+ break
+
+ return found
+
+class BeautifulSupe(object):
+
+ def __init__(self, data):
+ #please don't give us your null terminators
+ data = data.strip(chr(0))
+ self.dom = xml.dom.minidom.parseString(data)
+
+ def first(self, tagname, root = None):
+ found = None
+ if root == None:
+ e = self.dom.getElementsByTagName(tagname)
+ if len(e) > 0:
+ found = e[0]
+ else:
+ for node in root.childNodes:
+ if node.nodeName == tagname:
+ found = node
+ break
+
+ if not found:
+ return None
+
+ tag = Tag(found)
+ return tag
+
+ def fetch(self, tagname, restraints = {}):
+ e = self.dom.getElementsByTagName(tagname)
+
+ matches = []
+
+ for node in e:
+ match = 1
+
+ for restraint in restraints:
+ f = self.first(restraint, node)
+ if not f:
+ match = 0
+ break
+ text = restraints[restraint]
+ if not f.contents[0].startswith(text):
+ match = 0
+ break
+
+ if match:
+ tag = Tag(node)
+ matches.append(tag)
+
+ return matches
+
+
+ def scour(self, prefix, suffix = None, node = None):
+ if node is None:
+ root = self.dom.getElementsByTagName(self.dom.documentElement.tagName)[0]
+ node = root
+
+ matches = []
+
+ for node in node.childNodes:
+ match = 0
+
+ name = node.nodeName
+
+ if name.startswith(prefix):
+ if suffix:
+ if name.endswith(suffix):
+ match = 1
+ else:
+ match = 1
+
+ if match:
+ tag = Tag(node)
+ matches.append(tag)
+
+ matches += self.scour(prefix, suffix, node)
+
+ return matches
+
diff --git a/NohGooee/Choker.py b/NohGooee/Choker.py
new file mode 100644
index 0000000..ed3d500
--- /dev/null
+++ b/NohGooee/Choker.py
@@ -0,0 +1,157 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from random import randrange
+from math import sqrt
+
+class Choker(object):
+
+ def __init__(self, config, schedule, done = lambda: False):
+ self.config = config
+ self.schedule = schedule
+ self.connections = []
+ self.count = 0
+ self.done = done
+ self.unchokes_since_last = 0
+ schedule(self._round_robin, 10)
+
+ def _round_robin(self):
+ self.schedule(self._round_robin, 10)
+ self.count += 1
+ if self.done():
+ self._rechoke_seed(True)
+ return
+ if self.count % 3 == 0:
+ for i in xrange(len(self.connections)):
+ u = self.connections[i].upload
+ if u.choked and u.interested:
+ self.connections = self.connections[i:] + self.connections[:i]
+ break
+ self._rechoke()
+
+ def _rechoke(self):
+ if self.done():
+ self._rechoke_seed()
+ return
+ preferred = []
+ for i in xrange(len(self.connections)):
+ c = self.connections[i]
+ if c.upload.interested and not c.download.is_snubbed() and c.download.have.numfalse:
+ preferred.append((-c.download.get_rate(), i))
+ preferred.sort()
+ prefcount = min(len(preferred), self._max_uploads() -1)
+ mask = [0] * len(self.connections)
+ for _, i in preferred[:prefcount]:
+ mask[i] = 1
+ count = max(1, self.config['min_uploads'] - prefcount)
+ for i in xrange(len(self.connections)):
+ c = self.connections[i]
+ u = c.upload
+ if mask[i]:
+ u.unchoke(self.count)
+ elif count > 0 and c.download.have.numfalse:
+ u.unchoke(self.count)
+ if u.interested:
+ count -= 1
+ else:
+ u.choke()
+
+ def _rechoke_seed(self, force_new_unchokes = False):
+ if force_new_unchokes:
+ # number of unchokes per 30 second period
+ i = (self._max_uploads() + 2) // 3
+ # this is called 3 times in 30 seconds, if i==4 then unchoke 1+1+2
+ # and so on; substract unchokes recently triggered by disconnects
+ num_force_unchokes = max(0, (i + self.count % 3) // 3 - \
+ self.unchokes_since_last)
+ else:
+ num_force_unchokes = 0
+ preferred = []
+ new_limit = self.count - 3
+ for i in xrange(len(self.connections)):
+ c = self.connections[i]
+ u = c.upload
+ if not u.choked and u.interested and c.download.have.numfalse:
+ if u.unchoke_time > new_limit or (
+ u.buffer and c.connection.is_flushed()):
+ preferred.append((-u.unchoke_time, -u.get_rate(), i))
+ else:
+ preferred.append((1, -u.get_rate(), i))
+ num_kept = self._max_uploads() - num_force_unchokes
+ assert num_kept >= 0
+ preferred.sort()
+ preferred = preferred[:num_kept]
+ mask = [0] * len(self.connections)
+ for _, _, i in preferred:
+ mask[i] = 1
+ num_nonpref = self._max_uploads() - len(preferred)
+ if force_new_unchokes:
+ self.unchokes_since_last = 0
+ else:
+ self.unchokes_since_last += num_nonpref
+ last_unchoked = None
+ for i in xrange(len(self.connections)):
+ c = self.connections[i]
+ u = c.upload
+ if not mask[i]:
+ if not u.interested:
+ u.choke()
+ elif u.choked:
+ if num_nonpref > 0 and c.connection.is_flushed() and c.download.have.numfalse:
+ u.unchoke(self.count)
+ num_nonpref -= 1
+ if num_nonpref == 0:
+ last_unchoked = i
+ else:
+ if num_nonpref == 0 or not c.download.have.numfalse:
+ u.choke()
+ else:
+ num_nonpref -= 1
+ if num_nonpref == 0:
+ last_unchoked = i
+ if last_unchoked is not None:
+ self.connections = self.connections[last_unchoked + 1:] + \
+ self.connections[:last_unchoked + 1]
+
+ def connection_made(self, connection):
+ p = randrange(len(self.connections) + 1)
+ self.connections.insert(p, connection)
+
+ def connection_lost(self, connection):
+ self.connections.remove(connection)
+ if connection.upload.interested and not connection.upload.choked:
+ self._rechoke()
+
+ def interested(self, connection):
+ if not connection.upload.choked:
+ self._rechoke()
+
+ def not_interested(self, connection):
+ if not connection.upload.choked:
+ self._rechoke()
+
+ def _max_uploads(self):
+ uploads = self.config['max_uploads']
+ rate = self.config['max_upload_rate']
+ if uploads > 0:
+ pass
+ elif rate <= 0:
+ uploads = 7 # unlimited, just guess something here...
+ elif rate < 9:
+ uploads = 2
+ elif rate < 15:
+ uploads = 3
+ elif rate < 42:
+ uploads = 4
+ else:
+ uploads = int(sqrt(rate * .6))
+ return uploads
diff --git a/NohGooee/ClientIdentifier.py b/NohGooee/ClientIdentifier.py
new file mode 100644
index 0000000..cec2fa5
--- /dev/null
+++ b/NohGooee/ClientIdentifier.py
@@ -0,0 +1,157 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# Written by Matt Chisholm
+# Client list updated by Ed Savage-Jones - May 28th 2005
+
+import re
+
+v64p = '[\da-zA-Z.-]{3}'
+
+matches = (
+ ('-AZ(?P<version>\d+)-+.+$' , "Azureus" ),
+ ('M(?P<version>\d-\d-\d)--.+$' , "BitTorrent" ),
+ ('T(?P<version>%s)0?-+.+$'%v64p , "BitTornado" ),
+ ('-UT(?P<version>[\dA-F]+)-+.+$' , u"\xb5Torrent" ),
+ ('-TS(?P<version>\d+)-+.+$' , "TorrentStorm" ),
+ ('exbc(?P<bcver>.+)LORD.+$' , "BitLord" ),
+ ('exbc(?P<bcver>[^-][^-]+)(?!---).+$', "BitComet" ),
+ ('-BC0(?P<version>\d+)-.+$' , "BitComet" ),
+ ('FUTB(?P<bcver>.+).+$' , "BitComet Mod1" ),
+ ('xUTB(?P<bcver>.+).+$' , "BitComet Mod2" ),
+ ('A(?P<version>%s)-+.+$'%v64p , "ABC" ),
+ ('S(?P<version>%s)-+.+$'%v64p , "Shadow's" ),
+ (chr(0)*12 + 'aa.+$' , "Experimental 3.2.1b2" ),
+ (chr(0)*12 + '.+$' , "BitTorrent (obsolete)"),
+ ('-G3.+$' , "G3Torrent" ),
+ ('-[Ll][Tt](?P<version>\d+)-+.+$' , "libtorrent" ),
+ ('Mbrst(?P<version>\d-\d-\d).+$' , "burst!" ),
+ ('eX.+$' , "eXeem" ),
+ ('\x00\x02BS.+(?P<strver>UDP0|HTTPBT)$', "BitSpirit v2" ),
+ ('\x00[\x02|\x00]BS.+$' , "BitSpirit v2" ),
+ ('.*(?P<strver>UDP0|HTTPBT)$' , "BitSpirit" ),
+ ('-BOWP?(?P<version>[\dA-F]+)-.+$', "Bits on Wheels" ),
+ ('(?P<rsver>.+)RSAnonymous.+$' , "Rufus Anonymous" ),
+ ('(?P<rsver>.+)RS.+$' , "Rufus" ),
+ ('-ML(?P<version>(\d\.)+\d)(?:\.(?P<strver>CVS))?-+.+$',"MLDonkey"),
+ ('346------.+$' , "TorrentTopia 1.70" ),
+ ('OP(?P<strver>\d{4}).+$' , "Opera" ),
+ ('-KT(?P<version>\d+)(?P<rc>R\d+)-+.+$', "KTorrent" ),
+# Unknown but seen in peer lists:
+ ('-S(?P<version>10059)-+.+$' , "S (unknown)" ),
+ ('-TR(?P<version>\d+)-+.+$' , "TR (unknown)" ),
+ ('S\x05\x07\x06\x00{7}.+' , "S 576 (unknown)" ),
+# Clients I've never actually seen in a peer list:
+ ('exbc..---.+$' , "BitVampire 1.3.1" ),
+ ('-BB(?P<version>\d+)-+.+$' , "BitBuddy" ),
+ ('-CT(?P<version>\d+)-+.+$' , "CTorrent" ),
+ ('-MT(?P<version>\d+)-+.+$' , "MoonlightTorrent" ),
+ ('-BX(?P<version>\d+)-+.+$' , "BitTorrent X" ),
+ ('-TN(?P<version>\d+)-+.+$' , "TorrentDotNET" ),
+ ('-SS(?P<version>\d+)-+.+$' , "SwarmScope" ),
+ ('-XT(?P<version>\d+)-+.+$' , "XanTorrent" ),
+ ('U(?P<version>\d+)-+.+$' , "UPnP NAT Bit Torrent" ),
+ ('-AR(?P<version>\d+)-+.+$' , "Arctic" ),
+ ('(?P<rsver>.+)BM.+$' , "BitMagnet" ),
+ ('BG(?P<version>\d+).+$' , "BTGetit" ),
+ ('-eX(?P<version>[\dA-Fa-f]+)-.+$',"eXeem beta" ),
+ ('Plus12(?P<rc>[\dR]+)-.+$' , "Plus! II" ),
+ ('XBT(?P<version>\d+)[d-]-.+$' , "XBT" ),
+ ('-ZT(?P<version>\d+)-+.+$' , "ZipTorrent" ),
+ ('-BitE\?(?P<version>\d+)-.+$' , "BitEruct" ),
+ ('O(?P<version>%s)-+.+$'%v64p , "Osprey Permaseed" ),
+# Guesses based on Rufus source code, never seen in the wild:
+ ('-BS(?P<version>\d+)-+.+$' , "BTSlave" ),
+ ('-SB(?P<version>\d+)-+.+$' , "SwiftBit" ),
+ ('-SN(?P<version>\d+)-+.+$' , "ShareNET" ),
+ ('-bk(?P<version>\d+)-+.+$' , "BitKitten" ),
+ ('-SZ(?P<version>\d+)-+.+$' , "Shareaza" ),
+ ('-MP(?P<version>\d+)-+.+$' , "MooPolice" ),
+ ('Deadman Walking-.+$' , "Deadman" ),
+ ('270------.+$' , "GreedBT 2.7.0" ),
+ ('XTORR302.+$' , "TorrenTres 0.0.2" ),
+ ('turbobt(?P<version>\d\.\d).+$' , "TurboBT" ),
+ ('DansClient.+$' , "XanTorrent" ),
+ ('-PO(?P<version>\d+)-+.+$' , "PO (unknown)" ),
+ ('-UR(?P<version>\d+)-+.+$' , "UR (unknown)" ),
+# Patterns that should be executed last
+ ('.*Azureus.*' , "Azureus 2.0.3.2" ),
+ )
+
+matches = [(re.compile(pattern, re.DOTALL), name) for pattern, name in matches]
+
+unknown_clients = {}
+
+def identify_client(peerid, log=None):
+ client = 'unknown'
+ version = ''
+ for pat, name in matches:
+ m = pat.match(peerid)
+ if m:
+ client = name
+ d = m.groupdict()
+ if d.has_key('version'):
+ version = d['version']
+ version = version.replace('-','.')
+ if version.find('.') >= 0:
+ version = ''.join(version.split('.'))
+
+ version = list(version)
+ for i,c in enumerate(version):
+ if '0' <= c <= '9':
+ version[i] = c
+ elif 'A' <= c <= 'Z':
+ version[i] = str(ord(c) - 55)
+ elif 'a' <= c <= 'z':
+ version[i] = str(ord(c) - 61)
+ elif c == '.':
+ version[i] = '62'
+ elif c == '-':
+ version[i] = '63'
+ else:
+ break
+ version = '.'.join(version)
+ elif d.has_key('bcver'):
+ bcver = d['bcver']
+ version += str(ord(bcver[0])) + '.'
+ if len(bcver) > 1:
+ version += str(ord(bcver[1])/10)
+ version += str(ord(bcver[1])%10)
+ elif d.has_key('rsver'):
+ rsver = d['rsver']
+ version += str(ord(rsver[0])) + '.'
+ if len(rsver) > 1:
+ version += str(ord(rsver[1])/10) + '.'
+ version += str(ord(rsver[1])%10)
+ if d.has_key('strver'):
+ if d['strver'] is not None:
+ version += d['strver']
+ if d.has_key('rc'):
+ rc = 'RC ' + d['rc'][1:]
+ if version:
+ version += ' '
+ version += rc
+ break
+ if client == 'unknown':
+ # identify Shareaza 2.0 - 2.1
+ if len(peerid) == 20 and chr(0) not in peerid[:15]:
+ for i in range(16,20):
+ if ord(peerid[i]) != (ord(peerid[i - 16]) ^ ord(peerid[31 - i])):
+ break
+ else:
+ client = "Shareaza"
+
+
+ if log is not None and 'unknown' in client:
+ if not unknown_clients.has_key(peerid):
+ unknown_clients[peerid] = True
+ log.write('%s\n'%peerid)
+ log.write('------------------------------\n')
+ return client, version
diff --git a/NohGooee/Connecter.py b/NohGooee/Connecter.py
new file mode 100644
index 0000000..830fd82
--- /dev/null
+++ b/NohGooee/Connecter.py
@@ -0,0 +1,334 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Originally written by Bram Cohen, heavily modified by Uoti Urpala
+
+from binascii import b2a_hex
+from struct import pack, unpack
+
+from NohGooee.RawServer_magic import Handler
+from NohGooee.bitfield import Bitfield
+
+def toint(s):
+ return unpack("!i", s)[0]
+
+def tobinary(i):
+ return pack("!i", i)
+
+CHOKE = chr(0)
+UNCHOKE = chr(1)
+INTERESTED = chr(2)
+NOT_INTERESTED = chr(3)
+# index
+HAVE = chr(4)
+# index, bitfield
+BITFIELD = chr(5)
+# index, begin, length
+REQUEST = chr(6)
+# index, begin, piece
+PIECE = chr(7)
+# index, begin, piece
+CANCEL = chr(8)
+
+# 2-byte port message
+PORT = chr(9)
+
+# reserved flags
+DHT = 1
+FLAGS = '\0' * 7 + '\1'
+protocol_name = 'BitTorrent protocol'
+
+
+class Connection(Handler):
+
+ def __init__(self, encoder, connection, id, is_local):
+ self.encoder = encoder
+ self.connection = connection
+ self.connection.handler = self
+ self.id = id
+ self.ip = connection.ip
+ self.locally_initiated = is_local
+ self.complete = False
+ self.closed = False
+ self.got_anything = False
+ self.next_upload = None
+ self.upload = None
+ self.download = None
+ self._buffer = []
+ self._buffer_len = 0
+ self._reader = self._read_messages()
+ self._next_len = self._reader.next()
+ self._partial_message = None
+ self._outqueue = []
+ self.choke_sent = True
+ self.uses_dht = False
+ self.dht_port = None
+ if self.locally_initiated:
+ connection.write(chr(len(protocol_name)) + protocol_name +
+ FLAGS + self.encoder.download_id)
+ if self.id is not None:
+ connection.write(self.encoder.my_id)
+
+ def close(self):
+ if not self.closed:
+ self.connection.close()
+ self._sever()
+
+ def send_interested(self):
+ self._send_message(INTERESTED)
+
+ def send_not_interested(self):
+ self._send_message(NOT_INTERESTED)
+
+ def send_choke(self):
+ if self._partial_message is None:
+ self._send_message(CHOKE)
+ self.choke_sent = True
+ self.upload.sent_choke()
+
+ def send_unchoke(self):
+ if self._partial_message is None:
+ self._send_message(UNCHOKE)
+ self.choke_sent = False
+
+ def send_port(self, port):
+ self._send_message(PORT+pack('!H', port))
+
+ def send_request(self, index, begin, length):
+ self._send_message(pack("!ciii", REQUEST, index, begin, length))
+
+ def send_cancel(self, index, begin, length):
+ self._send_message(pack("!ciii", CANCEL,index, begin, length))
+
+ def send_bitfield(self, bitfield):
+ self._send_message(BITFIELD + bitfield)
+
+ def send_have(self, index):
+ self._send_message(pack("!ci", HAVE, index))
+
+ def send_keepalive(self):
+ self._send_message('')
+
+ def send_partial(self, bytes):
+ if self.closed:
+ return 0
+ if self._partial_message is None:
+ s = self.upload.get_upload_chunk()
+ if s is None:
+ return 0
+ index, begin, piece = s
+ self._partial_message = pack("!icii%ss" % len(piece), len(piece) + 9, PIECE,
+ index, begin, piece)
+ if bytes < len(self._partial_message):
+ self.upload.update_rate(bytes)
+ self.connection.write(buffer(self._partial_message, 0, bytes))
+ self._partial_message = buffer(self._partial_message, bytes)
+ return bytes
+
+ queue = [str(self._partial_message)]
+ self._partial_message = None
+ if self.choke_sent != self.upload.choked:
+ if self.upload.choked:
+ self._outqueue.append(pack("!ic", 1, CHOKE))
+ self.upload.sent_choke()
+ else:
+ self._outqueue.append(pack("!ic", 1, UNCHOKE))
+ self.choke_sent = self.upload.choked
+ queue.extend(self._outqueue)
+ self._outqueue = []
+ queue = ''.join(queue)
+ self.upload.update_rate(len(queue))
+ self.connection.write(queue)
+ return len(queue)
+
+ # yields the number of bytes it wants next, gets those in self._message
+ def _read_messages(self):
+ yield 1 # header length
+ if ord(self._message) != len(protocol_name):
+ return
+
+ yield len(protocol_name)
+ if self._message != protocol_name:
+ return
+
+ yield 8 # reserved
+ # dht is on last reserved byte
+ if ord(self._message[7]) & DHT:
+ self.uses_dht = True
+
+ yield 20 # download id
+ if self.encoder.download_id is None: # incoming connection
+ # modifies self.encoder if successful
+ self.encoder.select_torrent(self, self._message)
+ if self.encoder.download_id is None:
+ return
+ elif self._message != self.encoder.download_id:
+ return
+ if not self.locally_initiated:
+ self.connection.write(chr(len(protocol_name)) + protocol_name +
+ FLAGS + self.encoder.download_id + self.encoder.my_id)
+
+ yield 20 # peer id
+ if not self.id:
+ self.id = self._message
+ if self.id == self.encoder.my_id:
+ return
+ for v in self.encoder.connections.itervalues():
+ if v is not self:
+ if v.id == self.id:
+ return
+ if self.encoder.config['one_connection_per_ip'] and \
+ v.ip == self.ip:
+ return
+ if self.locally_initiated:
+ self.connection.write(self.encoder.my_id)
+ else:
+ self.encoder.everinc = True
+ else:
+ if self._message != self.id:
+ return
+ self.complete = True
+ self.encoder.connection_completed(self)
+
+ while True:
+ yield 4 # message length
+ l = toint(self._message)
+ if l > self.encoder.config['max_message_length']:
+ return
+ if l > 0:
+ yield l
+ self._got_message(self._message)
+
+ def _got_message(self, message):
+ t = message[0]
+ if t == BITFIELD and self.got_anything:
+ self.close()
+ return
+ self.got_anything = True
+ if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
+ len(message) != 1):
+ self.close()
+ return
+ if t == CHOKE:
+ self.download.got_choke()
+ elif t == UNCHOKE:
+ self.download.got_unchoke()
+ elif t == INTERESTED:
+ self.upload.got_interested()
+ elif t == NOT_INTERESTED:
+ self.upload.got_not_interested()
+ elif t == HAVE:
+ if len(message) != 5:
+ self.close()
+ return
+ i = unpack("!xi", message)[0]
+ if i >= self.encoder.numpieces:
+ self.close()
+ return
+ self.download.got_have(i)
+ elif t == BITFIELD:
+ try:
+ b = Bitfield(self.encoder.numpieces, message[1:])
+ except ValueError:
+ self.close()
+ return
+ self.download.got_have_bitfield(b)
+ elif t == REQUEST:
+ if len(message) != 13:
+ self.close()
+ return
+ i, a, b = unpack("!xiii", message)
+ if i >= self.encoder.numpieces:
+ self.close()
+ return
+ self.upload.got_request(i, a, b)
+ elif t == CANCEL:
+ if len(message) != 13:
+ self.close()
+ return
+ i, a, b = unpack("!xiii", message)
+ if i >= self.encoder.numpieces:
+ self.close()
+ return
+ self.upload.got_cancel(i, a, b)
+ elif t == PIECE:
+ if len(message) <= 9:
+ self.close()
+ return
+ n = len(message) - 9
+ i, a, b = unpack("!xii%ss" % n, message)
+ if i >= self.encoder.numpieces:
+ self.close()
+ return
+ if self.download.got_piece(i, a, b):
+ for co in self.encoder.complete_connections:
+ co.send_have(i)
+ elif t == PORT:
+ if len(message) != 3:
+ self.close()
+ return
+ self.dht_port = unpack('!H', message[1:3])[0]
+ self.encoder.got_port(self)
+ else:
+ self.close()
+
+ def _sever(self):
+ self.closed = True
+ self._reader = None
+ del self.encoder.connections[self.connection]
+ self.connection = None
+ self.encoder.replace_connection()
+ if self.complete:
+ del self.encoder.complete_connections[self]
+ self.download.disconnected()
+ self.encoder.choker.connection_lost(self)
+ self.upload = self.download = None
+
+ def _send_message(self, message):
+ s = tobinary(len(message)) + message
+ if self._partial_message is not None:
+ self._outqueue.append(s)
+ else:
+ self.connection.write(s)
+
+ def data_came_in(self, conn, s):
+ while True:
+ if self.closed:
+ return
+ i = self._next_len - self._buffer_len
+ if i > len(s):
+ self._buffer.append(s)
+ self._buffer_len += len(s)
+ return
+ m = s[:i]
+ if self._buffer_len > 0:
+ self._buffer.append(m)
+ m = ''.join(self._buffer)
+ self._buffer = []
+ self._buffer_len = 0
+ s = s[i:]
+ self._message = m
+ try:
+ self._next_len = self._reader.next()
+ except StopIteration:
+ self.close()
+ return
+
+ def connection_lost(self, conn):
+ if self.connection is None:
+ assert self.closed
+ else:
+ assert conn is self.connection
+ self._sever()
+
+ def connection_flushed(self, connection):
+ if self.complete and self.next_upload is None and (self._partial_message is not None
+ or (self.upload and self.upload.buffer)):
+ self.encoder.ratelimiter.queue(self, self.encoder.context.rlgroup)
diff --git a/NohGooee/ConvertedMetainfo.py b/NohGooee/ConvertedMetainfo.py
new file mode 100644
index 0000000..9b2459f
--- /dev/null
+++ b/NohGooee/ConvertedMetainfo.py
@@ -0,0 +1,283 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Uoti Urpala
+
+import os
+import sys
+from sha import sha
+
+from NohGooee.bencode import bencode
+from NohGooee import btformats
+from NohGooee import BTFailure, WARNING, ERROR
+
+
+WINDOWS_UNSUPPORTED_CHARS ='"*/:<>?\|'
+windows_translate = [chr(i) for i in range(256)]
+for x in WINDOWS_UNSUPPORTED_CHARS:
+ windows_translate[ord(x)] = '-'
+windows_translate = ''.join(windows_translate)
+
+noncharacter_translate = {}
+for i in range(0xD800, 0xE000):
+ noncharacter_translate[i] = ord('-')
+for i in range(0xFDD0, 0xFDF0):
+ noncharacter_translate[i] = ord('-')
+for i in (0xFFFE, 0xFFFF):
+ noncharacter_translate[i] = ord('-')
+
+del x, i
+
+def set_filesystem_encoding(encoding, errorfunc):
+ global filesystem_encoding
+ filesystem_encoding = 'ascii'
+ if encoding == '':
+ try:
+ sys.getfilesystemencoding
+ except AttributeError:
+ errorfunc(WARNING,
+ _("This seems to be an old Python version which "
+ "does not support detecting the filesystem "
+ "encoding. Assuming 'ascii'."))
+ return
+ encoding = sys.getfilesystemencoding()
+ if encoding is None:
+ errorfunc(WARNING,
+ _("Python failed to autodetect filesystem encoding. "
+ "Using 'ascii' instead."))
+ return
+ try:
+ 'a1'.decode(encoding)
+ except:
+ errorfunc(ERROR,
+ _("Filesystem encoding '%s' is not supported. "
+ "Using 'ascii' instead.") % encoding)
+ return
+ filesystem_encoding = encoding
+
+
+def generate_names(name, is_dir):
+ if is_dir:
+ prefix = name + '.'
+ suffix = ''
+ else:
+ pos = name.rfind('.')
+ if pos == -1:
+ pos = len(name)
+ prefix = name[:pos] + '.'
+ suffix = name[pos:]
+ i = 0
+ while True:
+ yield prefix + str(i) + suffix
+ i += 1
+
+
+class ConvertedMetainfo(object):
+
+ def __init__(self, metainfo):
+ self.bad_torrent_wrongfield = False
+ self.bad_torrent_unsolvable = False
+ self.bad_torrent_noncharacter = False
+ self.bad_conversion = False
+ self.bad_windows = False
+ self.bad_path = False
+ self.reported_errors = False
+ self.is_batch = False
+ self.orig_files = None
+ self.files_fs = None
+ self.total_bytes = 0
+ self.sizes = []
+ self.comment = None
+
+ btformats.check_message(metainfo, check_paths=False)
+ info = metainfo['info']
+ if info.has_key('length'):
+ self.total_bytes = info['length']
+ self.sizes.append(self.total_bytes)
+ else:
+ self.is_batch = True
+ r = []
+ self.orig_files = []
+ self.sizes = []
+ i = 0
+ for f in info['files']:
+ l = f['length']
+ self.total_bytes += l
+ self.sizes.append(l)
+ path = self._get_attr_utf8(f, 'path')
+ for x in path:
+ if not btformats.allowed_path_re.match(x):
+ if l > 0:
+ raise BTFailure(_("Bad file path component: ")+x)
+ # BitComet makes bad .torrent files with empty
+ # filename part
+ self.bad_path = True
+ break
+ else:
+ p = []
+ for x in path:
+ p.append((self._enforce_utf8(x), x))
+ path = p
+ self.orig_files.append('/'.join([x[0] for x in path]))
+ k = []
+ for u,o in path:
+ tf2 = self._to_fs_2(u)
+ k.append((tf2, u, o))
+ r.append((k,i))
+ i += 1
+ # If two or more file/subdirectory names in the same directory
+ # would map to the same name after encoding conversions + Windows
+ # workarounds, change them. Files are changed as
+ # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
+ # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
+ # names was a "clean" conversion, that one is always unchanged
+ # and the rest are adjusted.
+ r.sort()
+ self.files_fs = [None] * len(r)
+ prev = [None]
+ res = []
+ stack = [{}]
+ for x in r:
+ j = 0
+ x, i = x
+ while x[j] == prev[j]:
+ j += 1
+ del res[j:]
+ del stack[j+1:]
+ name = x[j][0][1]
+ if name in stack[-1]:
+ for name in generate_names(x[j][1], j != len(x) - 1):
+ name = self._to_fs(name)
+ if name not in stack[-1]:
+ break
+ stack[-1][name] = None
+ res.append(name)
+ for j in range(j + 1, len(x)):
+ name = x[j][0][1]
+ stack.append({name: None})
+ res.append(name)
+ self.files_fs[i] = os.path.join(*res)
+ prev = x
+
+ self.name = self._get_field_utf8(info, 'name')
+ self.name_fs = self._to_fs(self.name)
+ self.piece_length = info['piece length']
+ self.is_trackerless = False
+ if metainfo.has_key('announce'):
+ self.announce = metainfo['announce']
+ elif metainfo.has_key('nodes'):
+ self.is_trackerless = True
+ self.nodes = metainfo['nodes']
+
+ if metainfo.has_key('comment'):
+ self.comment = metainfo['comment']
+
+ self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
+ len(info['pieces']), 20)]
+ self.infohash = sha(bencode(info)).digest()
+
+ def show_encoding_errors(self, errorfunc):
+ self.reported_errors = True
+ if self.bad_torrent_unsolvable:
+ errorfunc(ERROR,
+ _("This .torrent file has been created with a broken "
+ "tool and has incorrectly encoded filenames. Some or "
+ "all of the filenames may appear different from what "
+ "the creator of the .torrent file intended."))
+ elif self.bad_torrent_noncharacter:
+ errorfunc(ERROR,
+ _("This .torrent file has been created with a broken "
+ "tool and has bad character values that do not "
+ "correspond to any real character. Some or all of the "
+ "filenames may appear different from what the creator "
+ "of the .torrent file intended."))
+ elif self.bad_torrent_wrongfield:
+ errorfunc(ERROR,
+ _("This .torrent file has been created with a broken "
+ "tool and has incorrectly encoded filenames. The "
+ "names used may still be correct."))
+ elif self.bad_conversion:
+ errorfunc(WARNING,
+ _('The character set used on the local filesystem ("%s") '
+ 'cannot represent all characters used in the '
+ 'filename(s) of this torrent. Filenames have been '
+ 'changed from the original.') % filesystem_encoding)
+ elif self.bad_windows:
+ errorfunc(WARNING,
+ _("The Windows filesystem cannot handle some "
+ "characters used in the filename(s) of this torrent. "
+ "Filenames have been changed from the original."))
+ elif self.bad_path:
+ errorfunc(WARNING,
+ _("This .torrent file has been created with a broken "
+ "tool and has at least 1 file with an invalid file "
+ "or directory name. However since all such files "
+ "were marked as having length 0 those files are "
+ "just ignored."))
+
+ # At least BitComet seems to make bad .torrent files that have
+ # fields in an arbitrary encoding but separate 'field.utf-8' attributes
+ def _get_attr_utf8(self, d, attrib):
+ v = d.get(attrib + '.utf-8')
+ if v is not None:
+ if v != d[attrib]:
+ self.bad_torrent_wrongfield = True
+ else:
+ v = d[attrib]
+ return v
+
+ def _enforce_utf8(self, s):
+ try:
+ s = s.decode('utf-8')
+ except:
+ self.bad_torrent_unsolvable = True
+ s = s.decode('utf-8', 'replace')
+ t = s.translate(noncharacter_translate)
+ if t != s:
+ self.bad_torrent_noncharacter = True
+ return t.encode('utf-8')
+
+ def _get_field_utf8(self, d, attrib):
+ r = self._get_attr_utf8(d, attrib)
+ return self._enforce_utf8(r)
+
+ def _fix_windows(self, name, t=windows_translate):
+ bad = False
+ r = name.translate(t)
+ # for some reason name cannot end with '.' or space
+ if r[-1] in '. ':
+ r = r + '-'
+ if r != name:
+ self.bad_windows = True
+ bad = True
+ return (r, bad)
+
+ def _to_fs(self, name):
+ return self._to_fs_2(name)[1]
+
+ def _to_fs_2(self, name):
+ bad = False
+ if sys.platform.startswith('win'):
+ name, bad = self._fix_windows(name)
+ name = name.decode('utf-8')
+ try:
+ r = name.encode(filesystem_encoding)
+ except:
+ self.bad_conversion = True
+ bad = True
+ r = name.encode(filesystem_encoding, 'replace')
+
+ if sys.platform.startswith('win'):
+ # encoding to mbcs with or without 'replace' will make the
+ # name unsupported by windows again because it adds random
+ # '?' characters which are invalid windows filesystem
+ # character
+ r, bad = self._fix_windows(r)
+ return (bad, r)
diff --git a/NohGooee/CurrentRateMeasure.py b/NohGooee/CurrentRateMeasure.py
new file mode 100644
index 0000000..86e16da
--- /dev/null
+++ b/NohGooee/CurrentRateMeasure.py
@@ -0,0 +1,48 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from NohGooee.platform import bttime
+
+
+class Measure(object):
+
+ def __init__(self, max_rate_period, fudge=5):
+ self.max_rate_period = max_rate_period
+ self.ratesince = bttime() - fudge
+ self.last = self.ratesince
+ self.rate = 0.0
+ self.total = 0
+
+ def update_rate(self, amount):
+ self.total += amount
+ t = bttime()
+ self.rate = (self.rate * (self.last - self.ratesince) +
+ amount) / (t - self.ratesince)
+ self.last = t
+ if self.ratesince < t - self.max_rate_period:
+ self.ratesince = t - self.max_rate_period
+
+ def get_rate(self):
+ self.update_rate(0)
+ return self.rate
+
+ def get_rate_noupdate(self):
+ return self.rate
+
+ def time_until_rate(self, newrate):
+ if self.rate <= newrate:
+ return 0
+ t = bttime() - self.ratesince
+ return ((self.rate * t) / newrate) - t
+
+ def get_total(self):
+ return self.total
diff --git a/NohGooee/Desktop.py b/NohGooee/Desktop.py
new file mode 100644
index 0000000..e8955c3
--- /dev/null
+++ b/NohGooee/Desktop.py
@@ -0,0 +1,33 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# written by Matt Chisholm
+
+import os
+import sys
+
+from NohGooee.platform import get_home_dir, get_shell_dir
+if os.name == 'nt':
+ from win32com.shell import shellcon
+
+desktop = None
+
+if os.name == 'nt':
+ desktop = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
+else:
+ homedir = get_home_dir()
+ if homedir == None :
+ desktop = '/tmp/'
+ else:
+ desktop = homedir
+ if os.name in ('mac', 'posix'):
+ tmp_desktop = os.path.join(homedir, 'Desktop')
+ if os.access(tmp_desktop, os.R_OK|os.W_OK):
+ desktop = tmp_desktop + os.sep
diff --git a/NohGooee/Downloader.py b/NohGooee/Downloader.py
new file mode 100644
index 0000000..ca5f973
--- /dev/null
+++ b/NohGooee/Downloader.py
@@ -0,0 +1,363 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen, Uoti Urpala
+
+from random import shuffle
+
+from NohGooee.platform import bttime
+from NohGooee.CurrentRateMeasure import Measure
+from NohGooee.bitfield import Bitfield
+
+
+class PerIPStats(object):
+
+ def __init__(self):
+ self.numgood = 0
+ self.bad = {}
+ self.numconnections = 0
+ self.lastdownload = None
+ self.peerid = None
+
+
+class BadDataGuard(object):
+
+ def __init__(self, download):
+ self.download = download
+ self.ip = download.connection.ip
+ self.downloader = download.downloader
+ self.stats = self.downloader.perip[self.ip]
+ self.lastindex = None
+
+ def bad(self, index, bump = False):
+ self.stats.bad.setdefault(index, 0)
+ self.stats.bad[index] += 1
+ if self.ip not in self.downloader.bad_peers:
+ self.downloader.bad_peers[self.ip] = (False, self.stats)
+ if self.download is not None:
+ self.downloader.kick(self.download)
+ self.download = None
+ elif len(self.stats.bad) > 1 and self.stats.numconnections == 1 and \
+ self.stats.lastdownload is not None:
+ # kick new connection from same IP if previous one sent bad data,
+ # mainly to give the algorithm time to find other bad pieces
+ # in case the peer is sending a lot of bad data
+ self.downloader.kick(self.stats.lastdownload)
+ if len(self.stats.bad) >= 3 and len(self.stats.bad) > \
+ self.stats.numgood // 30:
+ self.downloader.ban(self.ip)
+ elif bump:
+ self.downloader.picker.bump(index)
+
+ def good(self, index):
+ # lastindex is a hack to only increase numgood for by one for each good
+ # piece, however many chunks came from the connection(s) from this IP
+ if index != self.lastindex:
+ self.stats.numgood += 1
+ self.lastindex = index
+
+
+class SingleDownload(object):
+
+ def __init__(self, downloader, connection):
+ self.downloader = downloader
+ self.connection = connection
+ self.choked = True
+ self.interested = False
+ self.active_requests = []
+ self.measure = Measure(downloader.config['max_rate_period'])
+ self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
+ 20))
+ self.have = Bitfield(downloader.numpieces)
+ self.last = 0
+ self.example_interest = None
+ self.backlog = 2
+ self.guard = BadDataGuard(self)
+
+ def _backlog(self):
+ backlog = 2 + int(4 * self.measure.get_rate() /
+ self.downloader.chunksize)
+ if backlog > 50:
+ backlog = max(50, int(.075 * backlog))
+ self.backlog = backlog
+ return backlog
+
+ def disconnected(self):
+ self.downloader.lost_peer(self)
+ for i in xrange(len(self.have)):
+ if self.have[i]:
+ self.downloader.picker.lost_have(i)
+ self._letgo()
+ self.guard.download = None
+
+ def _letgo(self):
+ if not self.active_requests:
+ return
+ if self.downloader.storage.endgame:
+ self.active_requests = []
+ return
+ lost = []
+ for index, begin, length in self.active_requests:
+ self.downloader.storage.request_lost(index, begin, length)
+ if index not in lost:
+ lost.append(index)
+ self.active_requests = []
+ ds = [d for d in self.downloader.downloads if not d.choked]
+ shuffle(ds)
+ for d in ds:
+ d._request_more(lost)
+ for d in self.downloader.downloads:
+ if d.choked and not d.interested:
+ for l in lost:
+ if d.have[l] and self.downloader.storage.do_I_have_requests(l):
+ d.interested = True
+ d.connection.send_interested()
+ break
+
+ def got_choke(self):
+ if not self.choked:
+ self.choked = True
+ self._letgo()
+
+ def got_unchoke(self):
+ if self.choked:
+ self.choked = False
+ if self.interested:
+ self._request_more()
+
+ def got_piece(self, index, begin, piece):
+ try:
+ self.active_requests.remove((index, begin, len(piece)))
+ except ValueError:
+ self.downloader.discarded_bytes += len(piece)
+ return False
+ if self.downloader.storage.endgame:
+ self.downloader.all_requests.remove((index, begin, len(piece)))
+ self.last = bttime()
+ self.measure.update_rate(len(piece))
+ self.downloader.measurefunc(len(piece))
+ self.downloader.downmeasure.update_rate(len(piece))
+ if not self.downloader.storage.piece_came_in(index, begin, piece,
+ self.guard):
+ if self.downloader.storage.endgame:
+ while self.downloader.storage.do_I_have_requests(index):
+ nb, nl = self.downloader.storage.new_request(index)
+ self.downloader.all_requests.append((index, nb, nl))
+ for d in self.downloader.downloads:
+ d.fix_download_endgame()
+ return False
+ ds = [d for d in self.downloader.downloads if not d.choked]
+ shuffle(ds)
+ for d in ds:
+ d._request_more([index])
+ return False
+ if self.downloader.storage.do_I_have(index):
+ self.downloader.picker.complete(index)
+ if self.downloader.storage.endgame:
+ for d in self.downloader.downloads:
+ if d is not self and d.interested:
+ if d.choked:
+ d.fix_download_endgame()
+ else:
+ try:
+ d.active_requests.remove((index, begin, len(piece)))
+ except ValueError:
+ continue
+ d.connection.send_cancel(index, begin, len(piece))
+ d.fix_download_endgame()
+ self._request_more()
+ if self.downloader.picker.am_I_complete():
+ for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]:
+ d.connection.close()
+ return self.downloader.storage.do_I_have(index)
+
+ def _want(self, index):
+ return self.have[index] and self.downloader.storage.do_I_have_requests(index)
+
+ def _request_more(self, indices = None):
+ assert not self.choked
+ if len(self.active_requests) >= self._backlog():
+ return
+ if self.downloader.storage.endgame:
+ self.fix_download_endgame()
+ return
+ lost_interests = []
+ while len(self.active_requests) < self.backlog:
+ if indices is None:
+ interest = self.downloader.picker.next(self._want, self.have.numfalse == 0)
+ else:
+ interest = None
+ for i in indices:
+ if self.have[i] and self.downloader.storage.do_I_have_requests(i):
+ interest = i
+ break
+ if interest is None:
+ break
+ if not self.interested:
+ self.interested = True
+ self.connection.send_interested()
+ self.example_interest = interest
+ self.downloader.picker.requested(interest, self.have.numfalse == 0)
+ while len(self.active_requests) < (self.backlog-2) * 5 + 2:
+ begin, length = self.downloader.storage.new_request(interest)
+ self.active_requests.append((interest, begin, length))
+ self.connection.send_request(interest, begin, length)
+ if not self.downloader.storage.do_I_have_requests(interest):
+ lost_interests.append(interest)
+ break
+ if not self.active_requests and self.interested:
+ self.interested = False
+ self.connection.send_not_interested()
+ if lost_interests:
+ for d in self.downloader.downloads:
+ if d.active_requests or not d.interested:
+ continue
+ if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
+ continue
+ for lost in lost_interests:
+ if d.have[lost]:
+ break
+ else:
+ continue
+ interest = self.downloader.picker.next(d._want, d.have.numfalse == 0)
+ if interest is None:
+ d.interested = False
+ d.connection.send_not_interested()
+ else:
+ d.example_interest = interest
+ if self.downloader.storage.endgame:
+ self.downloader.all_requests = []
+ for d in self.downloader.downloads:
+ self.downloader.all_requests.extend(d.active_requests)
+ for d in self.downloader.downloads:
+ d.fix_download_endgame()
+
+ def fix_download_endgame(self):
+ want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
+ if self.interested and not self.active_requests and not want:
+ self.interested = False
+ self.connection.send_not_interested()
+ return
+ if not self.interested and want:
+ self.interested = True
+ self.connection.send_interested()
+ if self.choked or len(self.active_requests) >= self._backlog():
+ return
+ shuffle(want)
+ del want[self.backlog - len(self.active_requests):]
+ self.active_requests.extend(want)
+ for piece, begin, length in want:
+ self.connection.send_request(piece, begin, length)
+
+ def got_have(self, index):
+ if self.have[index]:
+ return
+ if index == self.downloader.numpieces-1:
+ self.peermeasure.update_rate(self.downloader.storage.total_length-
+ (self.downloader.numpieces-1)*self.downloader.storage.piece_size)
+ else:
+ self.peermeasure.update_rate(self.downloader.storage.piece_size)
+ self.have[index] = True
+ self.downloader.picker.got_have(index)
+ if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
+ self.connection.close()
+ return
+ if self.downloader.storage.endgame:
+ self.fix_download_endgame()
+ elif self.downloader.storage.do_I_have_requests(index):
+ if not self.choked:
+ self._request_more([index])
+ else:
+ if not self.interested:
+ self.interested = True
+ self.connection.send_interested()
+
+ def got_have_bitfield(self, have):
+ if self.downloader.picker.am_I_complete() and have.numfalse == 0:
+ self.connection.close()
+ return
+ self.have = have
+ for i in xrange(len(self.have)):
+ if self.have[i]:
+ self.downloader.picker.got_have(i)
+ if self.downloader.storage.endgame:
+ for piece, begin, length in self.downloader.all_requests:
+ if self.have[piece]:
+ self.interested = True
+ self.connection.send_interested()
+ return
+ for i in xrange(len(self.have)):
+ if self.have[i] and self.downloader.storage.do_I_have_requests(i):
+ self.interested = True
+ self.connection.send_interested()
+ return
+
+ def get_rate(self):
+ return self.measure.get_rate()
+
+ def is_snubbed(self):
+ return bttime() - self.last > self.downloader.snub_time
+
+
+class Downloader(object):
+
+ def __init__(self, config, storage, picker, numpieces, downmeasure,
+ measurefunc, kickfunc, banfunc):
+ self.config = config
+ self.storage = storage
+ self.picker = picker
+ self.chunksize = config['download_slice_size']
+ self.downmeasure = downmeasure
+ self.numpieces = numpieces
+ self.snub_time = config['snub_time']
+ self.measurefunc = measurefunc
+ self.kickfunc = kickfunc
+ self.banfunc = banfunc
+ self.downloads = []
+ self.perip = {}
+ self.bad_peers = {}
+ self.discarded_bytes = 0
+
+ def make_download(self, connection):
+ ip = connection.ip
+ perip = self.perip.get(ip)
+ if perip is None:
+ perip = PerIPStats()
+ self.perip[ip] = perip
+ perip.numconnections += 1
+ d = SingleDownload(self, connection)
+ perip.lastdownload = d
+ perip.peerid = connection.id
+ self.downloads.append(d)
+ return d
+
+ def lost_peer(self, download):
+ self.downloads.remove(download)
+ ip = download.connection.ip
+ self.perip[ip].numconnections -= 1
+ if self.perip[ip].lastdownload == download:
+ self.perip[ip].lastdownload = None
+
+ def kick(self, download):
+ if not self.config['retaliate_to_garbled_data']:
+ return
+ ip = download.connection.ip
+ peerid = download.connection.id
+ # kickfunc will schedule connection.close() to be executed later; we
+ # might now be inside RawServer event loop with events from that
+ # connection already queued, and trying to handle them after doing
+ # close() now could cause problems.
+ self.kickfunc(download.connection)
+
+ def ban(self, ip):
+ if not self.config['retaliate_to_garbled_data']:
+ return
+ self.banfunc(ip)
+ self.bad_peers[ip] = (True, self.perip[ip])
diff --git a/NohGooee/DownloaderFeedback.py b/NohGooee/DownloaderFeedback.py
new file mode 100644
index 0000000..6c16c90
--- /dev/null
+++ b/NohGooee/DownloaderFeedback.py
@@ -0,0 +1,139 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen, Uoti Urpala
+
+from __future__ import division
+
+
+class DownloaderFeedback(object):
+
+ def __init__(self, choker, upfunc, upfunc2, downfunc, uptotal, downtotal,
+ remainingfunc, leftfunc, file_length, finflag, downloader,
+ files, ever_got_incoming, rerequester):
+ self.downloader = downloader
+ self.picker = downloader.picker
+ self.storage = downloader.storage
+ self.choker = choker
+ self.upfunc = upfunc
+ self.upfunc2 = upfunc2
+ self.downfunc = downfunc
+ self.uptotal = uptotal
+ self.downtotal = downtotal
+ self.remainingfunc = remainingfunc
+ self.leftfunc = leftfunc
+ self.file_length = file_length
+ self.finflag = finflag
+ self.files = files
+ self.ever_got_incoming = ever_got_incoming
+ self.rerequester = rerequester
+ self.lastids = []
+
+ def _rotate(self):
+ cs = self.choker.connections
+ for peerid in self.lastids:
+ for i in xrange(len(cs)):
+ if cs[i].id == peerid:
+ return cs[i:] + cs[:i]
+ return cs
+
+ def collect_spew(self):
+ l = [ ]
+ cs = self._rotate()
+ self.lastids = [c.id for c in cs]
+ for c in cs:
+ rec = {}
+ rec['id'] = c.id
+ rec["ip"] = c.ip
+ rec["is_optimistic_unchoke"] = (c is self.choker.connections[0])
+ if c.locally_initiated:
+ rec["initiation"] = "L"
+ else:
+ rec["initiation"] = "R"
+ u = c.upload
+ rec["upload"] = (u.measure.get_total(), int(u.measure.get_rate()),
+ u.interested, u.choked)
+
+ d = c.download
+ rec["download"] = (d.measure.get_total(),int(d.measure.get_rate()),
+ d.interested, d.choked, d.is_snubbed())
+ rec['completed'] = 1 - d.have.numfalse / len(d.have)
+ rec['speed'] = d.connection.download.peermeasure.get_rate()
+ l.append(rec)
+ return l
+
+ def get_statistics(self, spewflag=False, fileflag=False):
+ status = {}
+ numSeeds = 0
+ numPeers = 0
+ for d in self.downloader.downloads:
+ if d.have.numfalse == 0:
+ numSeeds += 1
+ else:
+ numPeers += 1
+ status['numSeeds'] = numSeeds
+ status['numPeers'] = numPeers
+ status['trackerSeeds'] = self.rerequester.tracker_num_seeds
+ status['trackerPeers'] = self.rerequester.tracker_num_peers
+ status['upRate'] = self.upfunc()
+ status['upRate2'] = self.upfunc2()
+ status['upTotal'] = self.uptotal()
+ status['ever_got_incoming'] = self.ever_got_incoming()
+ missingPieces = 0
+ numCopyList = []
+ numCopies = 0
+ for i in self.picker.crosscount:
+ missingPieces += i
+ if missingPieces == 0:
+ numCopies += 1
+ else:
+ fraction = 1 - missingPieces / self.picker.numpieces
+ numCopyList.append(fraction)
+ if fraction == 0 or len(numCopyList) >= 3:
+ break
+ numCopies -= numSeeds
+ if self.picker.numgot == self.picker.numpieces:
+ numCopies -= 1
+ status['numCopies'] = numCopies
+ status['numCopyList'] = numCopyList
+ status['discarded'] = self.downloader.discarded_bytes
+ status['storage_numcomplete'] = self.storage.stat_numfound + \
+ self.storage.stat_numdownloaded
+ status['storage_dirty'] = len(self.storage.stat_dirty)
+ status['storage_active'] = len(self.storage.stat_active)
+ status['storage_new'] = len(self.storage.stat_new)
+ status['storage_numflunked'] = self.storage.stat_numflunked
+
+ if spewflag:
+ status['spew'] = self.collect_spew()
+ status['bad_peers'] = self.downloader.bad_peers
+ if fileflag:
+ undl = self.storage.storage.undownloaded
+ unal = self.storage.storage.unallocated
+ status['files_left'] = [undl[fname] for fname in self.files]
+ status['files_allocated'] = [not unal[fn] for fn in self.files]
+ if self.finflag.isSet():
+ status['downRate'] = 0
+ status['downTotal'] = self.downtotal()
+ status['fractionDone'] = 1
+ return status
+ timeEst = self.remainingfunc()
+ status['timeEst'] = timeEst
+
+ if self.file_length > 0:
+ fractionDone = 1 - self.leftfunc() / self.file_length
+ else:
+ fractionDone = 1
+ status.update({
+ "fractionDone" : fractionDone,
+ "downRate" : self.downfunc(),
+ "downTotal" : self.downtotal()
+ })
+ return status
diff --git a/NohGooee/Encoder.py b/NohGooee/Encoder.py
new file mode 100644
index 0000000..8675823
--- /dev/null
+++ b/NohGooee/Encoder.py
@@ -0,0 +1,286 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from socket import error as socketerror
+
+from NohGooee import BTFailure
+from NohGooee.RawServer_magic import Handler
+from NohGooee.NatTraversal import UPNPError
+from NohGooee.Connecter import Connection
+from NohGooee.platform import is_frozen_exe
+from NohGooee.ClientIdentifier import identify_client
+
+# header, reserved, download id, my id, [length, message]
+
+class InitialConnectionHandler(Handler):
+ def __init__(self, parent, id):
+ self.parent = parent
+ self.id = id
+ self.accept = True
+
+ def connection_started(self, s):
+
+ del self.parent.pending_connections[(s.ip, s.port)]
+
+ # prevents conenctions we no longer care about from being accepted
+ if not self.accept:
+ return
+
+ con = Connection(self.parent, s, self.id, True)
+ self.parent.connections[s] = con
+
+ # it might not be obvious why this is here.
+ # if the pending queue filled and put the remaining connections
+ # into the spare list, this will push more connections in to pending
+ self.parent.replace_connection()
+
+ def connection_failed(self, addr, exception):
+ del self.parent.pending_connections[addr]
+
+ if not self.accept:
+ # we don't need to rotate the spares with replace_connection()
+ # if the Encoder object has stopped all connections
+ return
+
+ self.parent.replace_connection()
+
+
+class Encoder(object):
+
+ def __init__(self, make_upload, downloader, choker, numpieces, ratelimiter,
+ rawserver, config, my_id, schedulefunc, download_id, context, addcontactfunc, reported_port):
+ self.make_upload = make_upload
+ self.downloader = downloader
+ self.choker = choker
+ self.numpieces = numpieces
+ self.ratelimiter = ratelimiter
+ self.rawserver = rawserver
+ self.my_id = my_id
+ self.config = config
+ self.schedulefunc = schedulefunc
+ self.download_id = download_id
+ self.context = context
+ self.addcontact = addcontactfunc
+ self.reported_port = reported_port
+ self.everinc = False
+
+ # submitted
+ self.pending_connections = {}
+ # transport connected
+ self.connections = {}
+ # protocol active
+ self.complete_connections = {}
+
+ self.spares = {}
+
+ self.banned = {}
+ schedulefunc(self.send_keepalives, config['keepalive_interval'])
+
+ def send_keepalives(self):
+ self.schedulefunc(self.send_keepalives,
+ self.config['keepalive_interval'])
+ for c in self.complete_connections:
+ c.send_keepalive()
+
+ # returns False if the connection has been pushed on to self.spares
+ # other filters and a successful connection return True
+ def start_connection(self, dns, id):
+ if dns[0] in self.banned:
+ return True
+ if id == self.my_id:
+ return True
+ for v in self.connections.values():
+ if id and v.id == id:
+ return True
+ if self.config['one_connection_per_ip'] and v.ip == dns[0]:
+ return True
+
+ #print "start", len(self.pending_connections), len(self.spares), len(self.connections)
+
+ total_outstanding = len(self.connections)
+ # it's possible the pending connections could eventually complete,
+ # so we have to account for those when enforcing max_initiate
+ total_outstanding += len(self.pending_connections)
+
+ if total_outstanding >= self.config['max_initiate']:
+ self.spares[dns] = 1
+ return False
+
+ # if these fail, I'm getting a very weird dns object
+ assert isinstance(dns, tuple)
+ assert isinstance(dns[0], str)
+ assert isinstance(dns[1], int)
+
+ # looks like we connect to the same peer several times in a row.
+ # we should probably stop doing that, but this prevents it from crashing
+ if dns in self.pending_connections:
+ # uncomment this if you want to debug the multi-connect problem
+ #print "Double Add on", dns
+ #traceback.print_stack()
+ return True
+
+ handler = InitialConnectionHandler(self, id)
+ self.pending_connections[dns] = handler
+ started = self.rawserver.async_start_connection(dns, handler, self.context)
+
+ if not started:
+ del self.pending_connections[dns]
+ self.spares[dns] = 1
+ return False
+
+ return True
+
+ def connection_completed(self, c):
+ self.complete_connections[c] = 1
+ c.upload = self.make_upload(c)
+ c.download = self.downloader.make_download(c)
+ self.choker.connection_made(c)
+ if c.uses_dht:
+ c.send_port(self.reported_port)
+
+ def got_port(self, c):
+ if self.addcontact and c.uses_dht and c.dht_port != None:
+ self.addcontact(c.connection.ip, c.dht_port)
+
+ def ever_got_incoming(self):
+ return self.everinc
+
+ def how_many_connections(self):
+ return len(self.complete_connections)
+
+ def replace_connection(self):
+ while self.spares:
+ started = self.start_connection(self.spares.popitem()[0], None)
+ if not started:
+ # start_connection decided to push this connection back on to
+ # self.spares because a limit was hit. break now or loop forever
+ break
+
+ def close_connections(self):
+ # drop connections which could be made after we're not interested
+ for c in self.pending_connections.itervalues():
+ c.accept = False
+
+ for c in self.connections.itervalues():
+ if not c.closed:
+ c.connection.close()
+ c.closed = True
+
+ def singleport_connection(self, listener, con):
+ if con.ip in self.banned:
+ return
+ m = self.config['max_allow_in']
+ if m and len(self.connections) >= m:
+ return
+ self.connections[con.connection] = con
+ del listener.connections[con.connection]
+ con.encoder = self
+ con.connection.context = self.context
+
+ def ban(self, ip):
+ self.banned[ip] = None
+
+
+class SingleportListener(Handler):
+
+ def __init__(self, rawserver, nattraverser):
+ self.rawserver = rawserver
+ self.nattraverser = nattraverser
+ self.port = 0
+ self.ports = {}
+ self.port_change_notification = None
+ self.torrents = {}
+ self.connections = {}
+ self.download_id = None
+
+ def _close(self, port):
+ serversocket = self.ports[port][0]
+ self.nattraverser.unregister_port(port, "TCP")
+ self.rawserver.stop_listening(serversocket)
+ serversocket.close()
+
+ def _check_close(self, port):
+ if not port or self.port == port or len(self.ports[port][1]) > 0:
+ return
+ self._close(port)
+ del self.ports[port]
+
+ def open_port(self, port, config):
+ if port in self.ports:
+ self.port = port
+ return
+ serversocket = self.rawserver.create_serversocket(
+ port, config['bind'], reuse=True, tos=config['peer_socket_tos'])
+ try:
+ d = self.nattraverser.register_port(port, port, "TCP", config['bind'])
+ d.addCallback(self._change_port)
+ except Exception, e:
+ # blanket, just incase - we don't want to interrupt things
+ # maybe we should log it, maybe not
+ #print "UPnP registration error", e
+ pass
+ self.rawserver.start_listening(serversocket, self)
+ oldport = self.port
+ self.port = port
+ self.ports[port] = [serversocket, {}]
+ self._check_close(oldport)
+
+ def _change_port(self, port):
+ if self.port == port:
+ return
+ [serversocket, callbacks] = self.ports[self.port]
+ self.ports[port] = [serversocket, callbacks]
+ del self.ports[self.port]
+ self.port = port
+ for callback in callbacks:
+ if callback:
+ callback(port)
+
+ def get_port(self, callback = None):
+ if self.port:
+ callbacks = self.ports[self.port][1]
+ if not callbacks.has_key(callback):
+ callbacks[callback] = 1
+ else:
+ callbacks[callback] += 1
+ return self.port
+
+ def release_port(self, port, callback = None):
+ callbacks = self.ports[port][1]
+ callbacks[callback] -= 1
+ if callbacks[callback] == 0:
+ del callbacks[callback]
+ self._check_close(port)
+
+ def close_sockets(self):
+ for port in self.ports.iterkeys():
+ self._close(port)
+
+ def add_torrent(self, infohash, encoder):
+ if infohash in self.torrents:
+ raise BTFailure(_("Can't start two separate instances of the same "
+ "torrent"))
+ self.torrents[infohash] = encoder
+
+ def remove_torrent(self, infohash):
+ del self.torrents[infohash]
+
+ def select_torrent(self, conn, infohash):
+ if infohash in self.torrents:
+ self.torrents[infohash].singleport_connection(self, conn)
+
+ def connection_made(self, connection):
+ con = Connection(self, connection, None, False)
+ self.connections[connection] = con
+
+ def replace_connection(self):
+ pass
diff --git a/NohGooee/GUI.py b/NohGooee/GUI.py
new file mode 100644
index 0000000..a4a677c
--- /dev/null
+++ b/NohGooee/GUI.py
@@ -0,0 +1,679 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# written by Matt Chisholm
+
+from __future__ import division
+
+import gtk
+import pango
+import gobject
+import os
+import threading
+
+assert gtk.gtk_version >= (2, 6), _( "GTK %s or newer required") % '2.6'
+assert gtk.pygtk_version >= (2, 6), _("PyGTK %s or newer required") % '2.6'
+
+from NohGooee import app_name, FAQ_URL, languages, language_names
+from NohGooee.platform import image_root, read_language_file, write_language_file
+
+def lock_wrap(function, *args):
+ gtk.gdk.threads_enter()
+ function(*args)
+ gtk.gdk.threads_leave()
+
+def gtk_wrap(function, *args):
+ gobject.idle_add(lock_wrap, function, *args)
+
+SPACING = 8
+WINDOW_TITLE_LENGTH = 128 # do we need this?
+WINDOW_WIDTH = 600
+
+# get screen size from GTK
+d = gtk.gdk.display_get_default()
+s = d.get_default_screen()
+MAX_WINDOW_HEIGHT = s.get_height()
+MAX_WINDOW_WIDTH = s.get_width()
+if os.name == 'nt':
+ MAX_WINDOW_HEIGHT -= 32 # leave room for start bar (exact)
+ MAX_WINDOW_HEIGHT -= 32 # and window decorations (depends on windows theme)
+else:
+ MAX_WINDOW_HEIGHT -= 32 # leave room for window decorations (could be any size)
+
+
+MIN_MULTI_PANE_HEIGHT = 107
+
+BT_TARGET_TYPE = 0
+EXTERNAL_FILE_TYPE = 1
+EXTERNAL_STRING_TYPE = 2
+
+BT_TARGET = ("application/x-bittorrent" , gtk.TARGET_SAME_APP, BT_TARGET_TYPE )
+EXTERNAL_FILE = ("text/uri-list" , 0 , EXTERNAL_FILE_TYPE )
+
+#gtk(gdk actually) is totally unable to receive text drags
+#of any sort in windows because they're too lazy to use OLE.
+#this list is all the atoms I could possibly find so that
+#url dnd works on linux from any browser.
+EXTERNAL_TEXTPLAIN = ("text/plain" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_TEXT = ("TEXT" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_COMPOUND_TEXT = ("COMPOUND_TEXT" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_MOZILLA = ("text/x-moz-url" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_NETSCAPE = ("_NETSCAPE_URL" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_HTML = ("text/html" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_UNICODE = ("text/unicode" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_UTF8 = ("text/plain;charset=utf-8" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_UTF8_STRING = ("UTF8_STRING" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_STRING = ("STRING" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_OLE2_DND = ("OLE2_DND" , 0 , EXTERNAL_STRING_TYPE)
+EXTERNAL_RTF = ("Rich Text Format" , 0 , EXTERNAL_STRING_TYPE)
+#there should alse be text/plain;charset={current charset}
+
+TARGET_EXTERNAL = [EXTERNAL_FILE,
+ EXTERNAL_TEXTPLAIN,
+ EXTERNAL_TEXT,
+ EXTERNAL_COMPOUND_TEXT,
+ EXTERNAL_MOZILLA,
+ EXTERNAL_NETSCAPE,
+ EXTERNAL_HTML,
+ EXTERNAL_UNICODE,
+ EXTERNAL_UTF8,
+ EXTERNAL_UTF8_STRING,
+ EXTERNAL_STRING,
+ EXTERNAL_OLE2_DND,
+ EXTERNAL_RTF]
+
+TARGET_ALL = [BT_TARGET,
+ EXTERNAL_FILE,
+ EXTERNAL_TEXTPLAIN,
+ EXTERNAL_TEXT,
+ EXTERNAL_COMPOUND_TEXT,
+ EXTERNAL_MOZILLA,
+ EXTERNAL_NETSCAPE,
+ EXTERNAL_HTML,
+ EXTERNAL_UNICODE,
+ EXTERNAL_UTF8,
+ EXTERNAL_UTF8_STRING,
+ EXTERNAL_STRING,
+ EXTERNAL_OLE2_DND,
+ EXTERNAL_RTF]
+
+# a slightly hackish but very reliable way to get OS scrollbar width
+sw = gtk.ScrolledWindow()
+SCROLLBAR_WIDTH = sw.size_request()[0] - 48
+del sw
+
+def align(obj,x,y):
+ if type(obj) is gtk.Label:
+ obj.set_alignment(x,y)
+ return obj
+ else:
+ a = gtk.Alignment(x,y,0,0)
+ a.add(obj)
+ return a
+
+def halign(obj, amt):
+ return align(obj,amt,0.5)
+
+def lalign(obj):
+ return halign(obj,0)
+
+def ralign(obj):
+ return halign(obj,1)
+
+def valign(obj, amt):
+ return align(obj,0.5,amt)
+
+def malign(obj):
+ return valign(obj, 0.5)
+
+factory = gtk.IconFactory()
+
+# these don't seem to be documented anywhere:
+# ICON_SIZE_BUTTON = 20x20
+# ICON_SIZE_LARGE_TOOLBAR = 24x24
+
+for n in 'abort broken finished info pause paused play queued running remove status-running status-starting status-pre-natted status-natted status-stopped status-broken'.split():
+ fn = os.path.join(image_root, 'icons', 'default', ('%s.png'%n))
+ if os.access(fn, os.F_OK):
+ pixbuf = gtk.gdk.pixbuf_new_from_file(fn)
+ set = gtk.IconSet(pixbuf)
+ factory.add('bt-%s'%n, set)
+ # maybe we should load a default icon if none exists
+
+factory.add_default()
+
+def get_logo(size=32):
+ fn = os.path.join(image_root, 'logo', 'bittorrent_%d.png'%size)
+ logo = gtk.Image()
+ logo.set_from_file(fn)
+ return logo
+
+class Size(long):
+ """displays size in human-readable format"""
+ __slots__ = []
+ size_labels = ['','K','M','G','T','P','E','Z','Y']
+ radix = 2**10
+ def __new__(cls, value):
+ self = long.__new__(cls, value)
+ return self
+
+ def __init__(self, value):
+ long.__init__(self, value)
+
+ def __str__(self, precision=None):
+ if precision is None:
+ precision = 0
+ value = self
+ for unitname in self.size_labels:
+ if value < self.radix and precision < self.radix:
+ break
+ value /= self.radix
+ precision /= self.radix
+ if unitname and value < 10 and precision < 1:
+ return '%.1f %sB' % (value, unitname)
+ else:
+ return '%.0f %sB' % (value, unitname)
+
+
+class Rate(Size):
+ """displays rate in human-readable format"""
+ __slots__ = []
+ def __init__(self, value):
+ Size.__init__(self, value)
+
+ def __str__(self, precision=2**10):
+ return '%s/s'% Size.__str__(self, precision=precision)
+
+
+class Duration(float):
+ """displays duration in human-readable format"""
+ __slots__ = []
+ def __str__(self):
+ if self > 365 * 24 * 60 * 60:
+ return '?'
+ elif self >= 172800:
+ return _("%d days") % (self//86400) # 2 days or longer
+ elif self >= 86400:
+ return _("1 day %d hours") % ((self-86400)//3600) # 1-2 days
+ elif self >= 3600:
+ return _("%d:%02d hours") % (self//3600, (self%3600)//60) # 1 h - 1 day
+ elif self >= 60:
+ return _("%d:%02d minutes") % (self//60, self%60) # 1 minute to 1 hour
+ elif self >= 0:
+ return _("%d seconds") % int(self)
+ else:
+ return _("0 seconds")
+
+
+class FancyLabel(gtk.Label):
+ def __init__(self, label_string, *values):
+ self.label_string = label_string
+ gtk.Label.__init__(self, label_string%values)
+
+ def set_value(self, *values):
+ self.set_text(self.label_string%values)
+
+
+class IconButton(gtk.Button):
+ def __init__(self, label, iconpath=None, stock=None):
+ gtk.Button.__init__(self, label)
+
+ self.icon = gtk.Image()
+ if stock is not None:
+ self.icon.set_from_stock(stock, gtk.ICON_SIZE_BUTTON)
+ elif iconpath is not None:
+ self.icon.set_from_file(iconpath)
+ else:
+ raise TypeError, 'IconButton needs iconpath or stock'
+ self.set_image(self.icon)
+
+
+class LanguageChooser(gtk.Frame):
+ def __init__(self):
+ gtk.Frame.__init__(self, "Translate %s into:" % app_name)
+ self.set_border_width(SPACING)
+
+ model = gtk.ListStore(*[gobject.TYPE_STRING] * 2)
+ default = model.append(("System default", ''))
+
+ lang = read_language_file()
+ for l in languages:
+ it = model.append((language_names[l].encode('utf8'), l))
+ if l == lang:
+ default = it
+
+ self.combo = gtk.ComboBox(model)
+ cell = gtk.CellRendererText()
+ self.combo.pack_start(cell, True)
+ self.combo.add_attribute(cell, 'text', 0)
+
+ if default is not None:
+ self.combo.set_active_iter(default)
+
+ self.combo.connect('changed', self.changed)
+ box = gtk.VBox(spacing=SPACING)
+ box.set_border_width(SPACING)
+ box.pack_start(self.combo, expand=False, fill=False)
+ l = gtk.Label("You must restart %s for the\nlanguage "
+ "setting to take effect." % app_name)
+ l.set_alignment(0,1)
+ l.set_line_wrap(True)
+ box.pack_start(l, expand=False, fill=False)
+ self.add(box)
+
+ def changed(self, *a):
+ it = self.combo.get_active_iter()
+ model = self.combo.get_model()
+ code = model.get(it, 1)[0]
+ write_language_file(code)
+
+class IconMixin(object):
+ def __init__(self):
+ iconname = os.path.join(image_root,'bittorrent.ico')
+ icon16 = gtk.gdk.pixbuf_new_from_file_at_size(iconname, 16, 16)
+ icon32 = gtk.gdk.pixbuf_new_from_file_at_size(iconname, 32, 32)
+ self.set_icon_list(icon16, icon32)
+
+class Window(IconMixin, gtk.Window):
+ def __init__(self, *args):
+ gtk.Window.__init__(self, *args)
+ IconMixin.__init__(self)
+
+class HelpWindow(Window):
+ def __init__(self, main, helptext):
+ Window.__init__(self)
+ self.set_title(_("%s Help")%app_name)
+ self.main = main
+ self.set_border_width(SPACING)
+
+ self.vbox = gtk.VBox(spacing=SPACING)
+
+ self.faq_box = gtk.HBox(spacing=SPACING)
+ self.faq_box.pack_start(gtk.Label(_("Frequently Asked Questions:")), expand=False, fill=False)
+ self.faq_url = gtk.Entry()
+ self.faq_url.set_text(FAQ_URL)
+ self.faq_url.set_editable(False)
+ self.faq_box.pack_start(self.faq_url, expand=True, fill=True)
+ self.faq_button = gtk.Button(_("Go"))
+ self.faq_button.connect('clicked', lambda w: self.main.visit_url(FAQ_URL) )
+ self.faq_box.pack_start(self.faq_button, expand=False, fill=False)
+ self.vbox.pack_start(self.faq_box, expand=False, fill=False)
+
+ self.cmdline_args = gtk.Label(helptext)
+
+ self.cmdline_sw = ScrolledWindow()
+ self.cmdline_sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
+ self.cmdline_sw.add_with_viewport(self.cmdline_args)
+
+ self.cmdline_sw.set_size_request(self.cmdline_args.size_request()[0]+SCROLLBAR_WIDTH, 200)
+
+ self.vbox.pack_start(self.cmdline_sw)
+
+ self.add(self.vbox)
+
+ self.show_all()
+
+ if self.main is not None:
+ self.connect('destroy', lambda w: self.main.window_closed('help'))
+ else:
+ self.connect('destroy', lambda w: gtk.main_quit())
+ gtk.main()
+
+
+
+ def close(self, widget=None):
+ self.destroy()
+
+
+class ScrolledWindow(gtk.ScrolledWindow):
+ def scroll_to_bottom(self):
+ child_height = self.child.child.size_request()[1]
+ self.scroll_to(0, child_height)
+
+ def scroll_by(self, dx=0, dy=0):
+ v = self.get_vadjustment()
+ new_y = min(v.upper, v.value + dy)
+ self.scroll_to(0, new_y)
+
+ def scroll_to(self, x=0, y=0):
+ v = self.get_vadjustment()
+ child_height = self.child.child.size_request()[1]
+ new_adj = gtk.Adjustment(y, 0, child_height)
+ self.set_vadjustment(new_adj)
+
+
+class AutoScrollingWindow(ScrolledWindow):
+ def __init__(self):
+ ScrolledWindow.__init__(self)
+ self.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
+ gtk.DEST_DEFAULT_DROP,
+ TARGET_ALL,
+ gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
+ self.connect('drag_motion' , self.drag_motion )
+# self.connect('drag_data_received', self.drag_data_received)
+ self.vscrolltimeout = None
+
+# def drag_data_received(self, widget, context, x, y, selection, targetType, time):
+# print _("AutoScrollingWindow.drag_data_received("), widget
+
+ def drag_motion(self, widget, context, x, y, time):
+ v = self.get_vadjustment()
+ if v.page_size - y <= 10:
+ amount = (10 - int(v.page_size - y)) * 2
+ self.start_scrolling(amount)
+ elif y <= 10:
+ amount = (y - 10) * 2
+ self.start_scrolling(amount)
+ else:
+ self.stop_scrolling()
+ return True
+
+ def scroll_and_wait(self, amount, lock_held):
+ if not lock_held:
+ gtk.gdk.threads_enter()
+ self.scroll_by(0, amount)
+ if not lock_held:
+ gtk.gdk.threads_leave()
+ if self.vscrolltimeout is not None:
+ gobject.source_remove(self.vscrolltimeout)
+ self.vscrolltimeout = gobject.timeout_add(100, self.scroll_and_wait, amount, False)
+ #print "adding timeout", self.vscrolltimeout, amount
+
+ def start_scrolling(self, amount):
+ if self.vscrolltimeout is not None:
+ gobject.source_remove(self.vscrolltimeout)
+ self.scroll_and_wait(amount, True)
+
+ def stop_scrolling(self):
+ if self.vscrolltimeout is not None:
+ #print "removing timeout", self.vscrolltimeout
+ gobject.source_remove(self.vscrolltimeout)
+ self.vscrolltimeout = None
+
+class MessageDialog(IconMixin, gtk.MessageDialog):
+ flags = gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT
+
+ def __init__(self, parent, title, message,
+ type=gtk.MESSAGE_ERROR,
+ buttons=gtk.BUTTONS_OK,
+ yesfunc=None, nofunc=None,
+ default=gtk.RESPONSE_OK
+ ):
+ gtk.MessageDialog.__init__(self, parent,
+ self.flags,
+ type, buttons, message)
+ IconMixin.__init__(self)
+
+ self.set_size_request(-1, -1)
+ self.set_resizable(False)
+ self.set_title(title)
+ if default is not None:
+ self.set_default_response(default)
+
+ self.label.set_line_wrap(True)
+
+ self.connect('response', self.callback)
+
+ self.yesfunc = yesfunc
+ self.nofunc = nofunc
+ if os.name == 'nt':
+ parent.present()
+ self.show_all()
+
+ def callback(self, widget, response_id, *args):
+ if ((response_id == gtk.RESPONSE_OK or
+ response_id == gtk.RESPONSE_YES) and
+ self.yesfunc is not None):
+ self.yesfunc()
+ if ((response_id == gtk.RESPONSE_CANCEL or
+ response_id == gtk.RESPONSE_NO )
+ and self.nofunc is not None):
+ self.nofunc()
+ self.destroy()
+
+class ErrorMessageDialog(MessageDialog):
+ flags = gtk.DIALOG_DESTROY_WITH_PARENT
+
+
+class FileSelection(IconMixin, gtk.FileChooserDialog):
+
+ def __init__(self, action, main, title='', fullname='',
+ got_location_func=None, no_location_func=None,
+ got_multiple_location_func=None, show=True):
+ gtk.FileChooserDialog.__init__(self, action=action, title=title,
+ buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
+ gtk.STOCK_OK, gtk.RESPONSE_OK))
+ IconMixin.__init__(self)
+ from NohGooee.ConvertedMetainfo import filesystem_encoding
+ self.fsenc = filesystem_encoding
+ try:
+ fullname.decode('utf8')
+ except:
+ fullname = fullname.decode(self.fsenc)
+ self.set_default_response(gtk.RESPONSE_OK)
+ if action == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER:
+ self.convert_button_box = gtk.HBox()
+ self.convert_button = gtk.Button(_("Choose an existing folder..."))
+ self.convert_button.connect('clicked', self.change_action)
+ self.convert_button_box.pack_end(self.convert_button,
+ expand=False,
+ fill=False)
+ self.convert_button_box.show_all()
+ self.set_extra_widget(self.convert_button_box)
+ elif action == gtk.FILE_CHOOSER_ACTION_OPEN:
+ self.all_filter = gtk.FileFilter()
+ self.all_filter.add_pattern('*')
+ self.all_filter.set_name(_("All Files"))
+ self.add_filter(self.all_filter)
+ self.torrent_filter = gtk.FileFilter()
+ self.torrent_filter.add_pattern('*.torrent')
+ self.torrent_filter.add_mime_type('application/x-bittorrent')
+ self.torrent_filter.set_name(_("Torrents"))
+ self.add_filter(self.torrent_filter)
+ self.set_filter(self.torrent_filter)
+
+ self.main = main
+ self.set_modal(True)
+ self.set_destroy_with_parent(True)
+ if fullname:
+ if action == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
+ self.set_filename(fullname)
+ elif action == gtk.FILE_CHOOSER_ACTION_OPEN:
+ if fullname[-1] != os.sep:
+ fullname = fullname + os.sep
+ path, filename = os.path.split(fullname)
+ self.set_current_folder(path)
+ else:
+ if fullname[-1] == os.sep:
+ fullname = fullname[:-1]
+ path, filename = os.path.split(fullname)
+ if gtk.gtk_version < (2,8):
+ path = path.encode(self.fsenc)
+ self.set_current_folder(path)
+ self.set_current_name(filename)
+ if got_multiple_location_func is not None:
+ self.got_multiple_location_func = got_multiple_location_func
+ self.set_select_multiple(True)
+ self.got_location_func = got_location_func
+ self.no_location_func = no_location_func
+ self.connect('response', self.got_response)
+ self.d_handle = self.connect('destroy', self.got_response,
+ gtk.RESPONSE_CANCEL)
+ if show:
+ self.show()
+
+ def change_action(self, widget):
+ if self.get_action() == gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER:
+ self.convert_button.set_label(_("Create a new folder..."))
+ self.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
+ elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
+ self.convert_button.set_label(_("Choose an existing folder..."))
+ self.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
+
+ def got_response(self, widget, response):
+ if response == gtk.RESPONSE_OK:
+ if self.get_select_multiple():
+ if self.got_multiple_location_func is not None:
+ self.got_multiple_location_func(self.get_filenames())
+ elif self.got_location_func is not None:
+ fn = self.get_filename()
+ if fn:
+ self.got_location_func(fn)
+ else:
+ self.no_location_func()
+ else:
+ if self.no_location_func is not None:
+ self.no_location_func()
+ self.disconnect(self.d_handle)
+ self.destroy()
+
+ def done(self, widget=None):
+ if self.get_select_multiple():
+ self.got_multiple_location()
+ else:
+ self.got_location()
+ self.disconnect(self.d_handle)
+ self.destroy()
+
+ def close_child_windows(self):
+ self.destroy()
+
+ def close(self, widget=None):
+ self.destroy()
+
+
+class OpenFileSelection(FileSelection):
+
+ def __init__(self, *args, **kwargs):
+ FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, *args,
+ **kwargs)
+
+
+class SaveFileSelection(FileSelection):
+
+ def __init__(self, *args, **kwargs):
+ FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, *args,
+ **kwargs)
+
+
+class ChooseFolderSelection(FileSelection):
+
+ def __init__(self, *args, **kwargs):
+ FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
+ *args, **kwargs)
+
+class CreateFolderSelection(FileSelection):
+
+ def __init__(self, *args, **kwargs):
+ FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER,
+ *args, **kwargs)
+
+
+class FileOrFolderSelection(FileSelection):
+ def __init__(self, *args, **kwargs):
+ FileSelection.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, *args,
+ **kwargs)
+ self.select_file = _("Select a file" )
+ self.select_folder = _("Select a folder")
+ self.convert_button_box = gtk.HBox()
+ self.convert_button = gtk.Button(self.select_folder)
+ self.convert_button.connect('clicked', self.change_action)
+ self.convert_button_box.pack_end(self.convert_button,
+ expand=False,
+ fill=False)
+ self.convert_button_box.show_all()
+ self.set_extra_widget(self.convert_button_box)
+ self.reset_by_action()
+ self.set_filter(self.all_filter)
+
+
+ def change_action(self, widget):
+ if self.get_action() == gtk.FILE_CHOOSER_ACTION_OPEN:
+ self.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
+ elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
+ self.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
+ self.reset_by_action()
+
+ def reset_by_action(self):
+ if self.get_action() == gtk.FILE_CHOOSER_ACTION_OPEN:
+ self.convert_button.set_label(self.select_folder)
+ self.set_title(self.select_file)
+ elif self.get_action() == gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER:
+ self.convert_button.set_label(self.select_file)
+ self.set_title(self.select_folder)
+
+ def set_title(self, title):
+ mytitle = title + ':'
+ FileSelection.set_title(self, mytitle)
+
+
+class PaddedHSeparator(gtk.VBox):
+ def __init__(self, spacing=SPACING):
+ gtk.VBox.__init__(self)
+ self.sep = gtk.HSeparator()
+ self.pack_start(self.sep, expand=False, fill=False, padding=spacing)
+ self.show_all()
+
+
+class HSeparatedBox(gtk.VBox):
+
+ def new_separator(self):
+ return PaddedHSeparator()
+
+ def _get_children(self):
+ return gtk.VBox.get_children(self)
+
+ def get_children(self):
+ return self._get_children()[0::2]
+
+ def _reorder_child(self, child, index):
+ gtk.VBox.reorder_child(self, child, index)
+
+ def reorder_child(self, child, index):
+ children = self._get_children()
+ oldindex = children.index(child)
+ sep = None
+ if oldindex == len(children) - 1:
+ sep = children[oldindex-1]
+ else:
+ sep = children[oldindex+1]
+
+ newindex = index*2
+ if newindex == len(children) -1:
+ self._reorder_child(sep, newindex-1)
+ self._reorder_child(child, newindex)
+ else:
+ self._reorder_child(child, newindex)
+ self._reorder_child(sep, newindex+1)
+
+ def pack_start(self, widget, *args, **kwargs):
+ if len(self._get_children()):
+ s = self.new_separator()
+ gtk.VBox.pack_start(self, s, *args, **kwargs)
+ s.show()
+ gtk.VBox.pack_start(self, widget, *args, **kwargs)
+
+ def pack_end(self, widget, *args, **kwargs):
+ if len(self._get_children()):
+ s = self.new_separator()
+ gtk.VBox.pack_start(self, s, *args, **kwargs)
+ s.show()
+ gtk.VBox.pack_end(self, widget, *args, **kwargs)
+
+ def remove(self, widget):
+ children = self._get_children()
+ if len(children) > 1:
+ index = children.index(widget)
+ if index == 0:
+ sep = children[index+1]
+ else:
+ sep = children[index-1]
+ sep.destroy()
+ gtk.VBox.remove(self, widget)
diff --git a/NohGooee/GetTorrent.py b/NohGooee/GetTorrent.py
new file mode 100644
index 0000000..dcbcb50
--- /dev/null
+++ b/NohGooee/GetTorrent.py
@@ -0,0 +1,94 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# GetTorrent -- abstraction which can get a .torrent file from multiple
+# sources: local file, url, etc.
+
+# written by Matt Chisholm
+
+import os
+import re
+import zurllib
+from bencode import bdecode
+from NohGooee.platform import get_cache_dir
+
+urlpat = re.compile('^\w+://')
+urlpat_torrent = re.compile('^torrent://')
+urlpat_bittorrent = re.compile('^bittorrent://')
+
+def get_quietly(arg):
+ (data, errors) = get(arg)
+ # If there's an error opening a file from the IE cache,
+ # act like we simply didn't get a file (because we didn't)
+ if errors:
+ cache = get_cache_dir()
+ if (cache is not None) and (cache in arg):
+ errors = []
+ return data, errors
+
+def get(arg):
+ data = None
+ errors = []
+ if os.access(arg, os.F_OK):
+ data, errors = get_file(arg)
+ elif urlpat.match(arg):
+ data, errors = get_url(arg)
+ else:
+ errors.append(_("Could not read %s") % arg)
+ return data, errors
+
+
+def get_url(url):
+ data = None
+ errors = []
+ err_str = _("Could not download or open \n%s\n"
+ "Try using a web browser to download the torrent file.") % url
+ u = None
+
+ # pending protocol changes, convert:
+ # torrent://http://path.to/file
+ # and:
+ # bittorrent://http://path.to/file
+ # to:
+ # http://path.to/file
+ url = urlpat_torrent.sub('', url)
+ url = urlpat_bittorrent.sub('', url)
+
+ try:
+ u = zurllib.urlopen(url)
+ data = u.read()
+ u.close()
+ b = bdecode(data)
+ except Exception, e:
+ if u is not None:
+ u.close()
+ errors.append(err_str + "\n(%s)" % e)
+ data = None
+ else:
+ if u is not None:
+ u.close()
+
+ return data, errors
+
+
+def get_file(filename):
+ data = None
+ errors = []
+ f = None
+ try:
+ f = file(filename, 'rb')
+ data = f.read()
+ f.close()
+ except Exception, e:
+ if f is not None:
+ f.close()
+ errors.append((_("Could not read %s") % filename) + (': %s' % str(e)))
+
+ return data, errors
diff --git a/NohGooee/IPC.py b/NohGooee/IPC.py
new file mode 100644
index 0000000..11d910b
--- /dev/null
+++ b/NohGooee/IPC.py
@@ -0,0 +1,441 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Greg Hazel
+# based on code by Uoti Urpala
+
+import os
+import socket
+import sys
+import traceback
+if os.name == 'nt':
+ import win32api
+ import win32event
+ import winerror
+ import win32ui
+ import dde
+ import pywin.mfc.object
+
+from binascii import b2a_hex
+
+from NohGooee.RawServer_magic import RawServer, Handler
+from NohGooee.platform import get_home_dir, get_config_dir
+from NohGooee import INFO, WARNING, ERROR, CRITICAL, BTFailure, app_name
+
+def toint(s):
+ return int(b2a_hex(s), 16)
+
+def tobinary(i):
+ return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
+ chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+CONTROL_SOCKET_PORT = 46881
+
+class ControlsocketListener(Handler):
+
+ def __init__(self, callback):
+ self.callback = callback
+
+ def connection_made(self, connection):
+ connection.handler = MessageReceiver(self.callback)
+
+
+class MessageReceiver(Handler):
+
+ def __init__(self, callback):
+ self.callback = callback
+ self._buffer = []
+ self._buffer_len = 0
+ self._reader = self._read_messages()
+ self._next_len = self._reader.next()
+
+ def _read_messages(self):
+ while True:
+ yield 4
+ l = toint(self._message)
+ yield l
+ action = self._message
+
+ if action in ('no-op',):
+ self.callback(action, None)
+ else:
+ yield 4
+ l = toint(self._message)
+ yield l
+ data = self._message
+ if action in ('show_error',):
+ self.callback(action, data)
+ else:
+ yield 4
+ l = toint(self._message)
+ yield l
+ path = self._message
+ if action in ('start_torrent'):
+ self.callback(action, data, path)
+
+ # copied from Connecter.py
+ def data_came_in(self, conn, s):
+ while True:
+ i = self._next_len - self._buffer_len
+ if i > len(s):
+ self._buffer.append(s)
+ self._buffer_len += len(s)
+ return
+ m = s[:i]
+ if self._buffer_len > 0:
+ self._buffer.append(m)
+ m = ''.join(self._buffer)
+ self._buffer = []
+ self._buffer_len = 0
+ s = s[i:]
+ self._message = m
+ try:
+ self._next_len = self._reader.next()
+ except StopIteration:
+ self._reader = None
+ conn.close()
+ return
+
+ def connection_lost(self, conn):
+ self._reader = None
+ pass
+
+ def connection_flushed(self, conn):
+ pass
+
+class IPC(object):
+ def __init__(self, config, log):
+ self.config = config
+ self.log = log
+ self.rawserver = None
+ self.callback = None
+
+ def create(self):
+ pass
+
+ def start(self, callback):
+ self.callback = callback
+
+ def send_command(self, command, *args):
+ pass
+
+ def handle_command(self, command, *args):
+ if callable(self.callback):
+ return self.callback(command, *args)
+ self.log(WARNING, _("Unhandled command: %s %s" % (str(command), str(args))))
+
+ def set_rawserver(self, rawserver):
+ self.rawserver = rawserver
+
+ def stop(self):
+ pass
+
+class IPCSocketBase(IPC):
+
+ def __init__(self, *args):
+ IPC.__init__(self, *args)
+ self.port = CONTROL_SOCKET_PORT
+
+ self.controlsocket = None
+
+ def start(self, callback):
+ IPC.start(self, callback)
+ self.rawserver.start_listening(self.controlsocket,
+ ControlsocketListener(self.handle_command))
+
+ def stop(self):
+ # safe double-stop, since TorrentQueue seems to be prone to do so
+ if self.controlsocket:
+ # it's possible we're told to stop after controlsocket creation but
+ # before rawserver registration
+ if self.rawserver:
+ self.rawserver.stop_listening(self.controlsocket)
+ self.controlsocket.close()
+ self.controlsocket = None
+
+class IPCUnixSocket(IPCSocketBase):
+
+ def __init__(self, *args):
+ IPCSocketBase.__init__(self, *args)
+ self.socket_filename = os.path.join(self.config['data_dir'], 'ui_socket')
+
+ def create(self):
+ filename = self.socket_filename
+ if os.path.exists(filename):
+ try:
+ self.send_command('no-op')
+ except BTFailure:
+ pass
+ else:
+ raise BTFailure(_("Could not create control socket: already in use"))
+
+ try:
+ os.unlink(filename)
+ except OSError, e:
+ raise BTFailure(_("Could not remove old control socket filename:")
+ + str(e))
+ try:
+ controlsocket = RawServer.create_unixserversocket(filename)
+ except socket.error, e:
+ raise BTFailure(_("Could not create control socket: ")+str(e))
+
+ self.controlsocket = controlsocket
+
+ # blocking version without rawserver
+ def send_command(self, command, *args):
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ filename = self.socket_filename
+ try:
+ s.connect(filename)
+ s.send(tobinary(len(command)))
+ s.send(command)
+ for arg in args:
+ s.send(tobinary(len(arg)))
+ s.send(arg)
+ s.close()
+ except socket.error, e:
+ s.close()
+ raise BTFailure(_("Could not send command: ") + str(e))
+
+
+class IPCWin32Socket(IPCSocketBase):
+ def __init__(self, *args):
+ IPCSocketBase.__init__(self, *args)
+ self.socket_filename = os.path.join(self.config['data_dir'], 'ui_socket')
+ self.mutex = None
+ self.master = 0
+
+ def _get_sic_path(self):
+ directory = get_config_dir()
+ configdir = os.path.join(directory, '.bittorrent')
+ filename = os.path.join(configdir, ".btcontrol")
+ return filename
+
+ def create(self):
+ obtain_mutex = 1
+ mutex = win32event.CreateMutex(None, obtain_mutex, app_name)
+
+ # prevent the PyHANDLE from going out of scope, ints are fine
+ self.mutex = int(mutex)
+ mutex.Detach()
+
+ lasterror = win32api.GetLastError()
+
+ if lasterror == winerror.ERROR_ALREADY_EXISTS:
+ takeover = 0
+
+ try:
+ # if the mutex already exists, discover which port to connect to.
+ # if something goes wrong with that, tell us to take over the
+ # role of master
+ takeover = self.discover_sic_socket()
+ except:
+ pass
+
+ if not takeover:
+ raise BTFailure(_("Global mutex already created."))
+
+ self.master = 1
+
+ # lazy free port code
+ port_limit = 50000
+ while self.port < port_limit:
+ try:
+ controlsocket = RawServer.create_serversocket(self.port,
+ '127.0.0.1', reuse=True)
+ self.controlsocket = controlsocket
+ break
+ except socket.error, e:
+ self.port += 1
+
+ if self.port >= port_limit:
+ raise BTFailure(_("Could not find an open port!"))
+
+ filename = self._get_sic_path()
+ (path, name) = os.path.split(filename)
+ try:
+ os.makedirs(path)
+ except OSError, e:
+ # 17 is dir exists
+ if e.errno != 17:
+ BTFailure(_("Could not create application data directory!"))
+ f = open(filename, "w")
+ f.write(str(self.port))
+ f.close()
+
+ # we're done writing the control file, release the mutex so other instances can lock it and read the file
+ # but don't destroy the handle until the application closes, so that the named mutex is still around
+ win32event.ReleaseMutex(self.mutex)
+
+ def discover_sic_socket(self):
+ takeover = 0
+
+ # mutex exists and has been opened (not created, not locked).
+ # wait for it so we can read the file
+ r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
+
+ # WAIT_OBJECT_0 means the mutex was obtained
+ # WAIT_ABANDONED means the mutex was obtained, and it had previously been abandoned
+ if (r != win32event.WAIT_OBJECT_0) and (r != win32event.WAIT_ABANDONED):
+ raise BTFailure(_("Could not acquire global mutex lock for controlsocket file!"))
+
+ filename = self._get_sic_path()
+ try:
+ f = open(filename, "r")
+ self.port = int(f.read())
+ f.close()
+ except:
+ if (r == win32event.WAIT_ABANDONED):
+ self.log(WARNING, _("A previous instance of BT was not cleaned up properly. Continuing."))
+ # take over the role of master
+ takeover = 1
+ else:
+ self.log(WARNING, (_("Another instance of BT is running, but \"%s\" does not exist.\n") % filename)+
+ _("I'll guess at the port."))
+ try:
+ self.port = CONTROL_SOCKET_PORT
+ self.send_command('no-op')
+ self.log(WARNING, _("Port found: %d") % self.port)
+ try:
+ f = open(filename, "w")
+ f.write(str(self.port))
+ f.close()
+ except:
+ traceback.print_exc()
+ except:
+ # this is where this system falls down.
+ # There's another copy of BitTorrent running, or something locking the mutex,
+ # but I can't communicate with it.
+ self.log(WARNING, _("Could not find port."))
+
+
+ # we're done reading the control file, release the mutex so other instances can lock it and read the file
+ win32event.ReleaseMutex(self.mutex)
+
+ return takeover
+
+ #blocking version without rawserver
+ def send_command(self, command, *datas):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ s.connect(('127.0.0.1', self.port))
+ s.send(tobinary(len(command)))
+ s.send(command)
+ for data in datas:
+ s.send(tobinary(len(data)))
+ s.send(data)
+ s.close()
+ except socket.error, e:
+ try:
+ s.close()
+ except:
+ pass
+ raise BTFailure(_("Could not send command: ") + str(e))
+
+
+ def stop(self):
+ if self.master:
+ r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
+ filename = self._get_sic_path()
+ try:
+ os.remove(filename)
+ except OSError, e:
+ # print, but continue
+ traceback.print_exc()
+ self.master = 0
+ win32event.ReleaseMutex(self.mutex)
+ # close it so the named mutex goes away
+ win32api.CloseHandle(self.mutex)
+ self.mutex = None
+
+if os.name == 'nt':
+ class HandlerObject(pywin.mfc.object.Object):
+ def __init__(self, handler, target):
+ self.handler = handler
+ pywin.mfc.object.Object.__init__(self, target)
+
+ class Topic(HandlerObject):
+ def __init__(self, handler, target):
+ target.AddItem(dde.CreateStringItem(""))
+ HandlerObject.__init__(self, handler, target)
+
+ def Request(self, x):
+ # null byte hack
+ x = x.replace("\\**0", "\0")
+ items = x.split("|")
+ self.handler(items[0], *items[1:])
+ return ("OK")
+
+ # remote procedure call
+ #def Exec(self, x):
+ # exec x
+
+ class Server(HandlerObject):
+ def __init__(self, log, *args):
+ self.log = log
+ HandlerObject.__init__(self, *args)
+
+ def CreateSystemTopic(self):
+ return Topic(self.handler, dde.CreateServerSystemTopic())
+
+ def Status(self, s):
+ #if self.log:
+ # self.log(INFO, _("IPC Status: %s") % s)
+ pass
+
+ def stop(self):
+ self.Shutdown()
+ self.Destroy()
+
+class IPCWin32DDE(IPC):
+ def create(self):
+ self.server = None
+
+ # try to connect first
+ self.client = Server(None, None, dde.CreateServer())
+ self.client.Create(app_name, dde.CBF_FAIL_SELFCONNECTIONS|dde.APPCMD_CLIENTONLY)
+ self.conversation = dde.CreateConversation(self.client)
+ try:
+ self.conversation.ConnectTo(app_name, "controlsocket")
+ raise BTFailure(_("DDE Conversation connected."))
+ except dde.error, e:
+ # no one is listening
+ pass
+
+ # clean up
+ self.client.stop()
+ del self.client
+ del self.conversation
+
+ # start server
+ self.server = Server(self.log, self.handle_command, dde.CreateServer())
+ self.server.Create(app_name, dde.CBF_FAIL_SELFCONNECTIONS|dde.APPCLASS_STANDARD)
+ self.server.AddTopic(Topic(self.handle_command, dde.CreateTopic("controlsocket")))
+
+ def send_command(self, command, *args):
+ s = '|'.join([command, ] + list(args))
+ # null byte hack
+ if s.count("\0") > 0:
+ self.log(WARNING, "IPC: String with null byte(s):" + s)
+ s = s.replace("\0", "\\**0")
+ result = self.conversation.Request(s)
+
+ def stop(self):
+ if self.server:
+ server = self.server
+ self.server = None
+ server.stop()
+
+if os.name == 'nt':
+ #ipc_interface = IPCWin32Socket
+ ipc_interface = IPCWin32DDE
+else:
+ ipc_interface = IPCUnixSocket
+
diff --git a/NohGooee/LaunchPath.py b/NohGooee/LaunchPath.py
new file mode 100644
index 0000000..3c78845
--- /dev/null
+++ b/NohGooee/LaunchPath.py
@@ -0,0 +1,54 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# LaunchPath -- a cross platform way to "open," "launch," or "start"
+# files and directories
+
+# written by Matt Chisholm
+
+import os
+
+can_launch_files = False
+posix_browsers = ('gnome-open','konqueror',) #gmc, gentoo only work on dirs
+default_posix_browser = ''
+
+def launchpath_nt(path):
+ os.startfile(path)
+
+def launchpath_mac(path):
+ # BUG: this is untested
+ os.spawnlp(os.P_NOWAIT, 'open', 'open', path)
+
+def launchpath_posix(path):
+ if default_posix_browser:
+ os.spawnlp(os.P_NOWAIT, default_posix_browser,
+ default_posix_browser, path)
+
+def launchpath(path):
+ pass
+
+def launchdir(path):
+ if os.path.isdir(path):
+ launchpath(path)
+
+if os.name == 'nt':
+ can_launch_files = True
+ launchpath = launchpath_nt
+elif os.name == 'mac':
+ can_launch_files = True
+ launchpath = launchpath_mac
+elif os.name == 'posix':
+ for b in posix_browsers:
+ if os.system("which '%s' >/dev/null 2>&1" % b.replace("'","\\'")) == 0:
+ can_launch_files = True
+ default_posix_browser = b
+ launchpath = launchpath_posix
+ break
+
diff --git a/NohGooee/NatCheck.py b/NohGooee/NatCheck.py
new file mode 100644
index 0000000..fe59a15
--- /dev/null
+++ b/NohGooee/NatCheck.py
@@ -0,0 +1,146 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from cStringIO import StringIO
+from socket import error as socketerror
+
+protocol_name = 'BitTorrent protocol'
+
+# header, reserved, download id, my id, [length, message]
+
+from twisted.internet.protocol import Protocol, ClientFactory
+from twisted.internet import reactor
+from twisted.python import log
+
+class NatCheck(object):
+
+ def __init__(self, resultfunc, downloadid, peerid, ip, port):
+ self.resultfunc = resultfunc
+ self.downloadid = downloadid
+ self.peerid = peerid
+ self.ip = ip
+ self.port = port
+ self.answered = False
+
+ factory = NatCheckProtocolFactory(self, downloadid, peerid)
+
+ reactor.connectTCP(ip, port, factory)
+
+ def answer(self, result):
+ if not self.answered:
+ self.answered = True
+ log.msg('NAT check for %s:%i is %s' % (self.ip, self.port, result))
+ self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
+
+class NatCheckProtocolFactory(ClientFactory):
+ def __init__(self, natcheck, downloadid, peerid):
+ self.natcheck = natcheck
+ self.downloadid = downloadid
+ self.peerid = peerid
+
+ def startedConnecting(self, connector):
+ log.msg('Started to connect.')
+
+ def buildProtocol(self, addr):
+ return NatCheckProtocol(self, self.downloadid, self.peerid)
+
+ def clientConnectionLost(self, connector, reason):
+ self.natcheck.answer(False)
+ log.msg('Lost connection. Reason: %s' % reason)
+
+ def clientConnectionFailed(self, connector, reason):
+ self.natcheck.answer(False)
+ log.msg('Connection failed. Reason: %s' % reason)
+
+class NatCheckProtocol(Protocol):
+ def __init__(self, factory, downloadid, peerid):
+ self.factory = factory
+ self.downloadid = downloadid
+ self.peerid = peerid
+ self.data = ''
+ self.received_protocol_name_len = None
+ self.received_protocol_name = None
+ self.received_reserved = None
+ self.received_downloadid = None
+ self.received_peerid = None
+
+ def connectionMade(self):
+ self.transport.write(chr(len(protocol_name)))
+ self.transport.write(protocol_name)
+ self.transport.write(chr(0) * 8)
+ self.transport.write(self.downloadid)
+
+ def dataReceived(self, data):
+ self.data += data
+
+ if self.received_protocol_name_len is None:
+ if len(self.data) >= 1:
+ self.received_protocol_name_len = ord(self.data[0])
+ self.data = self.data[1:]
+ if self.received_protocol_name_len != len(protocol_name):
+ self.factory.natcheck.answer(False)
+ self.transport.loseConnection()
+ return
+ else:
+ return
+
+ if self.received_protocol_name is None:
+ if len(self.data) >= self.received_protocol_name_len:
+ self.received_protocol_name = self.data[:self.received_protocol_name_len]
+ self.data = self.data[self.received_protocol_name_len:]
+ if self.received_protocol_name != protocol_name:
+ log.err('Received protocol name did not match!')
+ self.factory.natcheck.answer(False)
+ self.transport.loseConnection()
+ return
+ else:
+ return
+
+ if self.received_reserved is None:
+ if len(self.data) >= 8:
+ self.received_reserved = self.data[:8]
+ self.data = self.data[8:]
+ else:
+ return
+
+ if self.received_downloadid is None:
+ if len(self.data) >= 20:
+ self.received_downloadid = self.data[:20]
+ self.data = self.data[20:]
+ if self.received_downloadid != self.downloadid:
+ log.err('Received download id did not match!')
+ self.factory.natcheck.answer(False)
+ self.transport.loseConnection()
+ return
+ else:
+ return
+
+ if self.received_peerid is None:
+ if len(self.data) >= 20:
+ log.msg('Peerid length: %i' % len(self.peerid))
+ self.received_peerid = self.data[:20]
+ self.data = self.data[20:]
+ log.msg('Received: %s' % self.received_peerid.encode('hex'))
+ log.msg('Received: %s' % self.received_peerid.encode('quoted-printable'))
+ log.msg('Expected: %s' % self.peerid.encode('hex'))
+ log.msg('Expected: %s' % self.peerid.encode('quoted-printable'))
+ if self.received_peerid != self.peerid:
+ log.err('Received peer id did not match!')
+ self.factory.natcheck.answer(False)
+ self.transport.loseConnection()
+ return
+ else:
+ return
+
+ if self.received_protocol_name == protocol_name and self.received_downloadid == self.downloadid and self.received_peerid == self.peerid:
+ self.factory.natcheck.answer(True)
+ self.transport.loseConnection()
diff --git a/NohGooee/NatTraversal.py b/NohGooee/NatTraversal.py
new file mode 100644
index 0000000..a6c96e3
--- /dev/null
+++ b/NohGooee/NatTraversal.py
@@ -0,0 +1,757 @@
+# someday: http://files.dns-sd.org/draft-nat-port-mapping.txt
+# today: http://www.upnp.org/
+
+debug = False
+
+import sys
+import socket
+import os
+if os.name == 'nt':
+ import pywintypes
+ import win32com.client
+
+has_set = False
+try:
+ # python 2.4
+ s = set()
+ del s
+ has_set = True
+except NameError:
+ try:
+ # python 2.3
+ from sets import Set
+ set = Set
+ has_set = True
+ except ImportError:
+ # python 2.2
+ pass
+
+from NohGooee import app_name, defer
+from NohGooee import INFO, WARNING, ERROR
+from NohGooee.platform import os_version
+from NohGooee.RawServer_magic import RawServer, Handler
+from NohGooee.BeautifulSupe import BeautifulSupe, Tag
+from urllib2 import URLError, HTTPError, Request
+
+#bleh
+from urllib import urlopen, FancyURLopener, addinfourl
+from httplib import HTTPResponse
+
+import threading
+import Queue
+import urlparse
+import random
+
+from traceback import print_stack, print_tb, print_exc
+
+def UnsupportedWarning(logfunc, s):
+ logfunc(WARNING, "NAT Traversal warning " + ("(%s: %s)." % (os_version, s)))
+
+def UPNPError(logfunc, s):
+ logfunc(ERROR, "UPnP ERROR: " + ("(%s: %s)." % (os_version, s)))
+
+class UPnPException(Exception):
+ pass
+
+__host_ip = None
+
+import thread
+
+def get_host_ip():
+ global __host_ip
+
+ if __host_ip is not None:
+ return __host_ip
+
+ #try:
+ # ip = socket.gethostbyname(socket.gethostname())
+ #except socket.error, e:
+ # mac sometimes throws an error, so they can just wait.
+ # plus, complicated /etc/hosts will return invalid IPs
+
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect(("bittorrent.com", 80))
+ endpoint = s.getsockname()
+ __host_ip = endpoint[0]
+ except socket.error, e:
+ __host_ip = socket.gethostbyname(socket.gethostname())
+
+ return __host_ip
+
+
+class InfoFileHandle(object):
+ def __init__(self, logfunc):
+ self.logfunc = logfunc
+ def write(self, s):
+ self.logfunc(INFO, s)
+
+class NATEventLoop(threading.Thread):
+ def __init__(self, logfunc):
+ threading.Thread.__init__(self)
+ self.log = InfoFileHandle(logfunc)
+ self.queue = Queue.Queue()
+ self.killswitch = 0
+ self.setDaemon(True)
+
+ def run(self):
+
+ while (self.killswitch == 0):
+
+ event = self.queue.get()
+
+ try:
+ (f, a, kw) = event
+ f(*a, **kw)
+ except:
+ # sys can be none during interpritter shutdown
+ if sys is None:
+ break
+ # this prints the whole thing.
+ print_exc(file = self.log)
+ # this prints just the traceback to the application log
+ #print_tb(sys.exc_info()[2], file = self.log)
+ # this just prints the exception
+ #self.logfunc(INFO, str(sys.exc_info()[0]) + ": " + str(sys.exc_info()[1].__str__()))
+
+
+class NatTraverser(object):
+ def __init__(self, rawserver, logfunc):
+ self.rawserver = rawserver
+
+ def log_severity_filter(level, s, optional=True):
+ global debug
+ if level >= ERROR or debug or not optional:
+ logfunc(level, s)
+ self.logfunc = log_severity_filter
+
+ self.register_requests = []
+ self.unregister_requests = []
+ self.list_requests = []
+
+ self.service = None
+ self.services = []
+ self.current_service = 0
+
+ if self.rawserver.config['upnp']:
+ if os.name == 'nt':
+ self.services.append(WindowsUPnP)
+ self.services.append(ManualUPnP)
+
+ self.event_loop = NATEventLoop(self.logfunc)
+ self.event_loop.start()
+
+ self.resume_init_services()
+
+ def add_task(self, f, *a, **kw):
+ self.event_loop.queue.put((f, a, kw))
+
+ def init_services(self):
+ # this loop is a little funny so a service can resume the init if it fails later
+ if not self.rawserver.config['upnp']:
+ return
+ while self.current_service < len(self.services):
+ service = self.services[self.current_service]
+ self.current_service += 1
+ try:
+ self.logfunc(INFO, ("Trying: %s" % service.__name__))
+ service(self)
+ break
+ except Exception, e:
+ self.logfunc(WARNING, str(e))
+ else:
+ UnsupportedWarning(self.logfunc, "Unable to map a port using any service.")
+
+ def resume_init_services(self):
+ self.add_task(self.init_services)
+
+ def attach_service(self, service):
+ self.logfunc(INFO, ("Using: %s" % type(service).__name__))
+ self.service = service
+ self.add_task(self._flush_queue)
+
+ def detach_service(self, service):
+ if service != self.service:
+ self.logfunc(ERROR, ("Service: %s is not in use!" % type(service).__name__))
+ return
+ self.logfunc(INFO, ("Detached: %s" % type(service).__name__))
+ self.service = None
+
+ def _flush_queue(self):
+ if self.service:
+ for mapping in self.register_requests:
+ self.add_task(self.service.safe_register_port, mapping)
+ self.register_requests = []
+
+ for request in self.unregister_requests:
+ # unregisters can block, because they occur at shutdown
+ self.service.unregister_port(*request)
+ self.unregister_requests = []
+
+ for request in self.list_requests:
+ self.add_task(self._list_ports, request)
+ self.list_requests = []
+
+ def register_port(self, external_port, internal_port, protocol,
+ host = None, service_name = None):
+ mapping = UPnPPortMapping(external_port, internal_port, protocol,
+ host, service_name)
+ self.register_requests.append(mapping)
+
+ self.add_task(self._flush_queue)
+
+ return mapping.d
+
+ def unregister_port(self, external_port, protocol):
+ self.unregister_requests.append((external_port, protocol))
+
+ # unregisters can block, because they occur at shutdown
+ self._flush_queue()
+
+ def _list_ports(self, d):
+ matches = self.service._list_ports()
+ d.callback(matches)
+
+ def list_ports(self):
+ d = defer.Deferred()
+ self.list_requests.append(d)
+ self.add_task(self._flush_queue)
+ return d
+
+
+
+
+class NATBase(object):
+ def __init__(self, logfunc):
+ self.logfunc = logfunc
+ self.log = InfoFileHandle(logfunc)
+
+ def safe_register_port(self, new_mapping):
+
+ # check for the host now, while we're in the thread and before
+ # we need to read it.
+ new_mapping.populate_host()
+
+ self.logfunc(INFO, "You asked for: " + str(new_mapping))
+ new_mapping.original_external_port = new_mapping.external_port
+ mappings = self._list_ports()
+
+ used_ports = []
+ for mapping in mappings:
+ #only consider ports which match the same protocol
+ if mapping.protocol == new_mapping.protocol:
+ # look for exact matches
+ if (mapping.host == new_mapping.host and
+ mapping.internal_port == new_mapping.internal_port):
+ # the service name could not match, that's ok.
+ new_mapping.d.callback(mapping.external_port)
+ self.logfunc(INFO, "Already effectively mapped: " + str(new_mapping), optional=False)
+ return
+ # otherwise, add it to the list of used external ports
+ used_ports.append(mapping.external_port)
+
+ if has_set:
+ used_ports = set(used_ports)
+
+ if (not has_set) or (len(used_ports) < 1000):
+ # for small sets we can just guess around a little
+ while new_mapping.external_port in used_ports:
+ new_mapping.external_port += random.randint(1, 10)
+ # maybe this happens, I really doubt it
+ if new_mapping.external_port > 65535:
+ new_mapping.external_port = 1025
+ else:
+ # for larger sets we don't want to guess forever
+ all_ports = set(range(1024, 65535))
+ free_ports = all_ports - used_ports
+ new_mapping.external_port = random.choice(free_ports)
+
+ self.logfunc(INFO, "I'll give you: " + str(new_mapping))
+ self.register_port(new_mapping)
+
+ def register_port(self, port):
+ pass
+ def unregister_port(self, external_port, protocol):
+ pass
+ def _list_ports(self):
+ pass
+
+class UPnPPortMapping(object):
+ def __init__(self, external_port, internal_port, protocol,
+ host = None, service_name = None):
+ self.external_port = int(external_port)
+ self.internal_port = int(internal_port)
+ self.protocol = protocol
+
+ self.host = host
+
+ if service_name:
+ self.service_name = service_name
+ else:
+ self.service_name = app_name
+
+ self.d = defer.Deferred()
+
+ def populate_host(self):
+ # throw out '' or None or ints, also look for semi-valid IPs
+ if (not isinstance(self.host, str)) or (self.host.count('.') < 3):
+ self.host = get_host_ip()
+
+ def __str__(self):
+ return "%s %s external:%d %s:%d" % (self.service_name, self.protocol,
+ self.external_port,
+ self.host, self.internal_port)
+
+def VerifySOAPResponse(request, response):
+ if response.code != 200:
+ raise HTTPError(request.get_full_url(),
+ response.code, str(response.msg) + " (unexpected SOAP response code)",
+ response.info(), response)
+
+ data = response.read()
+ bs = BeautifulSupe(data)
+ soap_response = bs.scour("m:", "Response")
+ if not soap_response:
+ # maybe I should read the SOAP spec.
+ soap_response = bs.scour("u:", "Response")
+ if not soap_response:
+ raise HTTPError(request.get_full_url(),
+ response.code, str(response.msg) +
+ " (incorrect SOAP response method)",
+ response.info(), response)
+ return soap_response[0]
+
+def SOAPResponseToDict(soap_response):
+ result = {}
+ for tag in soap_response.child_elements():
+ value = None
+ if tag.contents:
+ value = str(tag.contents[0])
+ result[tag.name] = value
+ return result
+
+def SOAPErrorToString(response):
+ if not isinstance(response, Exception):
+ data = response.read()
+ bs = BeautifulSupe(data)
+ error = bs.first('errorDescription')
+ if error:
+ return str(error.contents[0])
+ return str(response)
+
+_urlopener = None
+def urlopen_custom(req, rawserver):
+ global _urlopener
+
+ if not _urlopener:
+ opener = FancyURLopener()
+ _urlopener = opener
+ #remove User-Agent
+ del _urlopener.addheaders[:]
+
+ if not isinstance(req, str):
+ #for header in r.headers:
+ # _urlopener.addheaders.append((header, r.headers[header]))
+ #return _urlopener.open(r.get_full_url(), r.data)
+
+ # All this has to be done manually, since httplib and urllib 1 and 2
+ # add headers to the request that some routers do not accept.
+ # A minimal, functional request includes the headers:
+ # Content-Length
+ # Soapaction
+ # I have found the following to be specifically disallowed:
+ # User-agent
+ # Connection
+ # Accept-encoding
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(req.get_full_url())
+
+ if not scheme.startswith("http"):
+ raise ValueError("UPnP URL scheme is not http: " + req.get_full_url())
+
+ if len(path) == 0:
+ path = '/'
+
+ if netloc.count(":") > 0:
+ host, port = netloc.split(':', 1)
+ try:
+ port = int(port)
+ except:
+ raise ValueError("UPnP URL port is not int: " + req.get_full_url())
+ else:
+ host = netloc
+ port = 80
+
+ header_str = ''
+ data = ''
+ method = ''
+ header_str = " " + path + " HTTP/1.0\r\n"
+ if req.has_data():
+ method = 'POST'
+ header_str = method + header_str
+ header_str += "Content-Length: " + str(len(req.data)) + "\r\n"
+ data = req.data + "\r\n"
+ else:
+ method = 'GET'
+ header_str = method + header_str
+
+ header_str += "Host: " + host + ":" + str(port) + "\r\n"
+
+ for header in req.headers:
+ header_str += header + ": " + str(req.headers[header]) + "\r\n"
+
+ header_str += "\r\n"
+ data = header_str + data
+
+ try:
+ rawserver._add_pending_connection(host)
+ s.connect((host, port))
+ finally:
+ rawserver._remove_pending_connection(host)
+
+ s.send(data)
+ r = HTTPResponse(s, method=method)
+ r.begin()
+
+ r.recv = r.read
+ fp = socket._fileobject(r)
+
+ resp = addinfourl(fp, r.msg, req.get_full_url())
+ resp.code = r.status
+ resp.msg = r.reason
+
+ return resp
+ return _urlopener.open(req)
+
+
+class ManualUPnP(NATBase, Handler):
+
+ upnp_addr = ('239.255.255.250', 1900)
+
+ search_string = ('M-SEARCH * HTTP/1.1\r\n' +
+ 'Host:239.255.255.250:1900\r\n' +
+ 'ST:urn:schemas-upnp-org:device:InternetGatewayDevice:1\r\n' +
+ 'Man:"ssdp:discover"\r\n' +
+ 'MX:3\r\n' +
+ '\r\n')
+
+ # if you think for one second that I'm going to implement SOAP in any fashion, you're crazy
+
+ get_mapping_template = ('<?xml version="1.0"?>' +
+ '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"' +
+ 's:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">' +
+ '<s:Body>' +
+ '<u:GetGenericPortMappingEntry xmlns:u=' +
+ '"urn:schemas-upnp-org:service:WANIPConnection:1">' +
+ '<NewPortMappingIndex>%d</NewPortMappingIndex>' +
+ '</u:GetGenericPortMappingEntry>' +
+ '</s:Body>' +
+ '</s:Envelope>')
+
+ add_mapping_template = ('<?xml version="1.0"?>' +
+ '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle=' +
+ '"http://schemas.xmlsoap.org/soap/encoding/">' +
+ '<s:Body>' +
+ '<u:AddPortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1">' +
+ '<NewEnabled>1</NewEnabled>' +
+ '<NewRemoteHost></NewRemoteHost>' +
+ '<NewLeaseDuration>0</NewLeaseDuration>' +
+ '<NewInternalPort>%d</NewInternalPort>' +
+ '<NewExternalPort>%d</NewExternalPort>' +
+ '<NewProtocol>%s</NewProtocol>' +
+ '<NewInternalClient>%s</NewInternalClient>' +
+ '<NewPortMappingDescription>%s</NewPortMappingDescription>' +
+ '</u:AddPortMapping>' +
+ '</s:Body>' +
+ '</s:Envelope>')
+
+ delete_mapping_template = ('<?xml version="1.0"?>' +
+ '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle=' +
+ '"http://schemas.xmlsoap.org/soap/encoding/">' +
+ '<s:Body>' +
+ '<u:DeletePortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1">' +
+ '<NewRemoteHost></NewRemoteHost>' +
+ '<NewExternalPort>%d</NewExternalPort>' +
+ '<NewProtocol>%s</NewProtocol>' +
+ '</u:DeletePortMapping>' +
+ '</s:Body>' +
+ '</s:Envelope>')
+
+ def _pretify(self, body):
+ # I actually found a router that needed one tag per line
+ body = body.replace('><', '>\r\n<')
+ body = body.encode('utf-8')
+ return body
+
+ def _build_get_mapping_request(self, pmi):
+ body = (self.get_mapping_template % (pmi))
+ body = self._pretify(body)
+ headers = {'SOAPAction': '"urn:schemas-upnp-org:service:WANIPConnection:1#' +
+ 'GetGenericPortMappingEntry"'}
+ return Request(self.controlURL, body, headers)
+
+ def _build_add_mapping_request(self, mapping):
+ body = (self.add_mapping_template % (mapping.internal_port, mapping.external_port, mapping.protocol,
+ mapping.host, mapping.service_name))
+ body = self._pretify(body)
+ headers = {'SOAPAction': '"urn:schemas-upnp-org:service:WANIPConnection:1#' +
+ 'AddPortMapping"'}
+ return Request(self.controlURL, body, headers)
+
+ def _build_delete_mapping_request(self, external_port, protocol):
+ body = (self.delete_mapping_template % (external_port, protocol))
+ body = self._pretify(body)
+ headers = {'SOAPAction': '"urn:schemas-upnp-org:service:WANIPConnection:1#' +
+ 'DeletePortMapping"'}
+ return Request(self.controlURL, body, headers)
+
+ def __init__(self, traverser):
+ NATBase.__init__(self, traverser.logfunc)
+
+ self.controlURL = None
+ self.transport = None
+ self.traverser = traverser
+ self.rawserver = traverser.rawserver
+
+ # this service can only be provided if rawserver supports multicast
+ if not hasattr(self.rawserver, "create_multicastsocket"):
+ raise AttributeError, "rawserver.create_multicastsocket"
+
+ self.rawserver.external_add_task(self.begin_discovery, 0, ())
+
+ def begin_discovery(self):
+
+ # bind to an available port, and join the multicast group
+ for p in xrange(self.upnp_addr[1], self.upnp_addr[1]+5000):
+ try:
+ # Original RawServer cannot do this!
+ s = self.rawserver.create_multicastsocket(p, get_host_ip())
+ self.transport = s
+ self.rawserver.start_listening_multicast(s, self)
+ s.listening_port.joinGroup(self.upnp_addr[0], socket.INADDR_ANY)
+ break
+ except socket.error, e:
+ pass
+
+ if not self.transport:
+ # resume init services, because we couldn't bind to a port
+ self.traverser.resume_init_services()
+ else:
+ self.transport.sendto(self.search_string, 0, self.upnp_addr)
+ self.transport.sendto(self.search_string, 0, self.upnp_addr)
+ self.rawserver.add_task(self._discovery_timedout, 6, ())
+
+ def _discovery_timedout(self):
+ if self.transport:
+ self.logfunc(WARNING, "Discovery timed out")
+ self.rawserver.stop_listening_multicast(self.transport)
+ self.transport = None
+ # resume init services, because we know we've failed
+ self.traverser.resume_init_services()
+
+ def register_port(self, mapping):
+ request = self._build_add_mapping_request(mapping)
+
+ try:
+ response = urlopen_custom(request, self.rawserver)
+ response = VerifySOAPResponse(request, response)
+ mapping.d.callback(mapping.external_port)
+ self.logfunc(INFO, "registered: " + str(mapping), optional=False)
+ except Exception, e: #HTTPError, URLError, BadStatusLine, you name it.
+ error = SOAPErrorToString(e)
+ mapping.d.errback(error)
+
+
+ def unregister_port(self, external_port, protocol):
+ request = self._build_delete_mapping_request(external_port, protocol)
+
+ try:
+ response = urlopen_custom(request, self.rawserver)
+ response = VerifySOAPResponse(request, response)
+ self.logfunc(INFO, ("unregisterd: %s, %s" % (external_port, protocol)), optional=False)
+ except Exception, e: #HTTPError, URLError, BadStatusLine, you name it.
+ error = SOAPErrorToString(e)
+ self.logfunc(ERROR, error)
+
+ def data_came_in(self, addr, datagram):
+ if self.transport is None:
+ return
+ statusline, response = datagram.split('\r\n', 1)
+ httpversion, statuscode, reasonline = statusline.split(None, 2)
+ if (not httpversion.startswith('HTTP')) or (statuscode != '200'):
+ return
+ headers = response.split('\r\n')
+ location = None
+ for header in headers:
+ prefix = 'location:'
+ if header.lower().startswith(prefix):
+ location = header[len(prefix):]
+ location = location.strip()
+ if location:
+ self.rawserver.stop_listening_multicast(self.transport)
+ self.transport = None
+
+ self.traverser.add_task(self._got_location, location)
+
+ def _got_location(self, location):
+ if self.controlURL is not None:
+ return
+
+ URLBase = location
+
+ data = urlopen_custom(location, self.rawserver).read()
+ bs = BeautifulSupe(data)
+
+ URLBase_tag = bs.first('URLBase')
+ if URLBase_tag and URLBase_tag.contents:
+ URLBase = str(URLBase_tag.contents[0])
+
+ wanservices = bs.fetch('service', dict(serviceType=
+ 'urn:schemas-upnp-org:service:WANIPConnection:'))
+ wanservices += bs.fetch('service', dict(serviceType=
+ 'urn:schemas-upnp-org:service:WANPPPConnection:'))
+ for service in wanservices:
+ controlURL = service.get('controlURL')
+ if controlURL:
+ self.controlURL = urlparse.urljoin(URLBase, controlURL)
+ break
+
+ if self.controlURL is None:
+ # resume init services, because we know we've failed
+ self.traverser.resume_init_services()
+ return
+
+ # attach service, so the queue gets flushed
+ self.traverser.attach_service(self)
+
+ def _list_ports(self):
+ mappings = []
+ index = 0
+
+ if self.controlURL is None:
+ raise UPnPException("ManualUPnP is not prepared")
+
+ while True:
+ request = self._build_get_mapping_request(index)
+
+ try:
+ response = urlopen_custom(request, self.rawserver)
+ soap_response = VerifySOAPResponse(request, response)
+ results = SOAPResponseToDict(soap_response)
+ mapping = UPnPPortMapping(results['NewExternalPort'], results['NewInternalPort'],
+ results['NewProtocol'], results['NewInternalClient'],
+ results['NewPortMappingDescription'])
+ mappings.append(mapping)
+ index += 1
+ except URLError, e:
+ # SpecifiedArrayIndexInvalid, for example
+ break
+ except (HTTPError, BadStatusLine), e:
+ self.logfunc(ERROR, ("list_ports failed with: %s" % (e)))
+
+
+ return mappings
+
+class WindowsUPnPException(UPnPException):
+ def __init__(self, msg, *args):
+ msg += " (%s)" % os_version
+ a = [msg] + list(args)
+ UPnPException.__init__(self, *a)
+
+class WindowsUPnP(NATBase):
+ def __init__(self, traverser):
+ NATBase.__init__(self, traverser.logfunc)
+
+ self.upnpnat = None
+ self.port_collection = None
+ self.traverser = traverser
+
+ win32com.client.pythoncom.CoInitialize()
+
+ try:
+ self.upnpnat = win32com.client.Dispatch("HNetCfg.NATUPnP")
+ except pywintypes.com_error, e:
+ if (e[2][5] == -2147221005):
+ raise WindowsUPnPException("invalid class string")
+ else:
+ raise
+
+ try:
+ self.port_collection = self.upnpnat.StaticPortMappingCollection
+ if self.port_collection is None:
+ raise WindowsUPnPException("none port_collection")
+ except pywintypes.com_error, e:
+ #if e[1].lower() == "exception occurred.":
+ if (e[2][5] == -2147221164):
+ #I think this is Class Not Registered
+ #it happens on Windows 98 after the XP ICS wizard has been run
+ raise WindowsUPnPException("exception occurred, class not registered")
+ else:
+ raise
+
+ # attach service, so the queue gets flushed
+ self.traverser.attach_service(self)
+
+
+ def register_port(self, mapping):
+ try:
+ self.port_collection.Add(mapping.external_port, mapping.protocol,
+ mapping.internal_port, mapping.host,
+ True, mapping.service_name)
+ self.logfunc(INFO, "registered: " + str(mapping), optional=False)
+ mapping.d.callback(mapping.external_port)
+ except pywintypes.com_error, e:
+ # host == 'fake' or address already bound
+ #if (e[2][5] == -2147024726):
+ # host == '', or I haven't a clue
+ #e.args[0] == -2147024894
+
+ #mapping.d.errback(e)
+
+ # detach self so the queue isn't flushed
+ self.traverser.detach_service(self)
+
+ if hasattr(mapping, 'original_external_port'):
+ mapping.external_port = mapping.original_external_port
+ del mapping.original_external_port
+
+ # push this mapping back on the queue
+ self.traverser.register_requests.append(mapping)
+
+ # resume init services, because we know we've failed
+ self.traverser.resume_init_services()
+
+ def unregister_port(self, external_port, protocol):
+ try:
+ self.port_collection.Remove(external_port, protocol)
+ self.logfunc(INFO, ("unregisterd: %s, %s" % (external_port, protocol)), optional=False)
+ except pywintypes.com_error, e:
+ if (e[2][5] == -2147352567):
+ UPNPError(self.logfunc, ("Port %d:%s not bound" % (external_port, protocol)))
+ elif (e[2][5] == -2147221008):
+ UPNPError(self.logfunc, ("Port %d:%s is bound and is not ours to remove" % (external_port, protocol)))
+ elif (e[2][5] == -2147024894):
+ UPNPError(self.logfunc, ("Port %d:%s not bound (2)" % (external_port, protocol)))
+ else:
+ raise
+
+ def _list_ports(self):
+ mappings = []
+
+ try:
+ for mp in self.port_collection:
+ mapping = UPnPPortMapping(mp.ExternalPort, mp.InternalPort, mp.Protocol,
+ mp.InternalClient, mp.Description)
+ mappings.append(mapping)
+ except pywintypes.com_error, e:
+ # it's the "for mp in self.port_collection" iter that can throw
+ # an exception.
+ # com_error: (-2147220976, 'The owner of the PerUser subscription is
+ # not logged on to the system specified',
+ # None, None)
+ pass
+
+ return mappings
+
diff --git a/NohGooee/NewVersion.py b/NohGooee/NewVersion.py
new file mode 100644
index 0000000..96ed2f5
--- /dev/null
+++ b/NohGooee/NewVersion.py
@@ -0,0 +1,281 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# written by Matt Chisholm
+
+import os
+import sys
+import zurllib
+import pickle
+import threading
+from sha import sha
+
+DEBUG = False
+
+from NohGooee import ERROR, WARNING, BTFailure, version, app_name
+from NohGooee import GetTorrent
+from NohGooee.bencode import bdecode, bencode
+from NohGooee.platform import os_version, spawn, get_temp_dir, doc_root, is_frozen_exe, osx
+from NohGooee.ConvertedMetainfo import ConvertedMetainfo
+
+if osx:
+ from Foundation import NSAutoreleasePool
+
+if is_frozen_exe or DEBUG:
+ # needed for py2exe to include the public key lib
+ from Crypto.PublicKey import DSA
+
+version_host = 'http://version.bittorrent.com/'
+download_url = 'http://bittorrent.com/download.html'
+
+# based on Version() class from ShellTools package by Matt Chisholm,
+# used with permission
+class Version(list):
+ def __str__(self):
+ return '.'.join(map(str, self))
+
+ def is_beta(self):
+ return self[1] % 2 == 1
+
+ def from_str(self, text):
+ return Version( [int(t) for t in text.split('.')] )
+
+ def name(self):
+ if self.is_beta():
+ return 'beta'
+ else:
+ return 'stable'
+
+ from_str = classmethod(from_str)
+
+currentversion = Version.from_str(version)
+
+availableversion = None
+
+class Updater(object):
+ def __init__(self, threadwrap, newversionfunc, startfunc, installfunc,
+ errorfunc, test_new_version='', test_current_version=''):
+ self.threadwrap = threadwrap # for calling back to UI from thread
+ self.newversionfunc = newversionfunc # alert to new version UI function
+ self.startfunc = startfunc # start torrent UI function
+ self.installfunc = installfunc # install torrent UI function
+ self.errorfunc = errorfunc # report error UI function
+ self.infohash = None
+ self.version = currentversion
+ self.currentversion = currentversion
+ self.asked_for_install = False
+ self.version_site = version_host
+ if os.name == 'nt':
+ self.version_site += 'win32/'
+ if os_version not in ('XP', '2000', '2003'):
+ self.version_site += 'legacy/'
+ elif osx:
+ self.version_site += 'osx/'
+ self.debug_mode = DEBUG
+ if test_new_version:
+ test_new_version = Version.from_str(test_new_version)
+ self.debug_mode = True
+ def _hack_get_available(url):
+ return test_new_version
+ self._get_available = _hack_get_available
+ if test_current_version:
+ self.debug_mode = True
+ self.currentversion = Version.from_str(test_current_version)
+
+
+ def debug(self, message):
+ if self.debug_mode:
+ self.threadwrap(self.errorfunc, WARNING, message)
+
+
+ def _get_available(self, url):
+ self.debug('Updater.get_available() hitting url %s' % url)
+ try:
+ u = zurllib.urlopen(url)
+ s = u.read()
+ s = s.strip()
+ except:
+ raise BTFailure(_("Could not get latest version from %s")%url)
+ try:
+ assert len(s) == 5
+ availableversion = Version.from_str(s)
+ except:
+ raise BTFailure(_("Could not parse new version string from %s")%url)
+ return availableversion
+
+
+ def get_available(self):
+ url = self.version_site + self.currentversion.name()
+ availableversion = self._get_available(url)
+ if availableversion.is_beta():
+ if availableversion[1] != self.currentversion[1]:
+ availableversion = self.currentversion
+ if self.currentversion.is_beta():
+ stable_url = self.version_site + 'stable'
+ available_stable_version = self._get_available(stable_url)
+ if available_stable_version > availableversion:
+ availableversion = available_stable_version
+ self.version = availableversion
+ self.debug('Updater.get_available() got %s' % str(self.version))
+ return self.version
+
+
+ def get(self):
+ self.debug('Version check skipped for RPM package')
+ return
+
+ try:
+ self.get_available()
+ except BTFailure, e:
+ self.threadwrap(self.errorfunc, WARNING, e)
+ return
+
+ if self.version <= self.currentversion:
+ self.debug('Updater.get() not updating old version %s' % str(self.version))
+ return
+
+ if not self.can_install():
+ self.debug('Updater.get() cannot install on this os')
+ return
+
+ self.installer_name = self.calc_installer_name()
+ self.installer_url = self.version_site + self.installer_name + '.torrent'
+ self.installer_dir = self.calc_installer_dir()
+
+ self.torrentfile = None
+ torrentfile, terrors = GetTorrent.get_url(self.installer_url)
+ signature = None
+ try:
+ signfile = zurllib.urlopen(self.installer_url + '.sign')
+ except:
+ self.debug('Updater.get() failed to get signfile %s.sign' % self.installer_url)
+ else:
+ try:
+ signature = pickle.load(signfile)
+ except:
+ self.debug('Updater.get() failed to load signfile %s' % signfile)
+
+ if terrors:
+ self.threadwrap(self.errorfunc, WARNING, '\n'.join(terrors))
+
+ if torrentfile and signature:
+ public_key_file = open('@@PKIDIR@@/bittorrent/public.key', 'rb')
+ public_key = pickle.load(public_key_file)
+ h = sha(torrentfile).digest()
+ if public_key.verify(h, signature):
+ self.torrentfile = torrentfile
+ b = bdecode(torrentfile)
+ self.infohash = sha(bencode(b['info'])).digest()
+ self.total_size = b['info']['length']
+ self.debug('Updater.get() got torrent file and signature')
+ else:
+ self.debug('Updater.get() torrent file signature failed to verify.')
+ pass
+ else:
+ self.debug('Updater.get() doesn\'t have torrentfile %s and signature %s' %
+ (str(type(torrentfile)), str(type(signature))))
+
+ def installer_path(self):
+ if self.installer_dir is not None:
+ return os.path.join(self.installer_dir,
+ self.installer_name)
+ else:
+ return None
+
+ def check(self):
+ t = threading.Thread(target=self._check,
+ args=())
+ t.setDaemon(True)
+ t.start()
+
+ def _check(self):
+ if osx:
+ pool = NSAutoreleasePool.alloc().init()
+ self.get()
+ if self.version > self.currentversion:
+ self.threadwrap(self.newversionfunc, self.version, download_url)
+
+ def can_install(self):
+ if self.debug_mode:
+ return True
+ if os.name == 'nt':
+ return True
+ elif osx:
+ return True
+ else:
+ return False
+
+ def calc_installer_name(self):
+ if os.name == 'nt':
+ ext = 'exe'
+ elif osx:
+ ext = 'dmg'
+ elif os.name == 'posix' and self.debug_mode:
+ ext = 'tar.gz'
+ else:
+ return
+
+ parts = [app_name, str(self.version)]
+ if self.version.is_beta():
+ parts.append('Beta')
+ name = '-'.join(parts)
+ name += '.' + ext
+ return name
+
+ def set_installer_dir(self, path):
+ self.installer_dir = path
+
+ def calc_installer_dir(self):
+ if hasattr(self, 'installer_dir'):
+ return self.installer_dir
+
+ temp_dir = get_temp_dir()
+ if temp_dir is not None:
+ return temp_dir
+ else:
+ self.errorfunc(WARNING,
+ _("Could not find a suitable temporary location to "
+ "save the %s %s installer.") % (app_name, self.version))
+
+ def installer_downloaded(self):
+ if self.installer_path() and os.access(self.installer_path(), os.F_OK):
+ size = os.stat(self.installer_path())[6]
+ if size == self.total_size:
+ return True
+ else:
+ #print 'installer is wrong size, is', size, 'should be', self.total_size
+ return False
+ else:
+ #print 'installer does not exist'
+ return False
+
+ def download(self):
+ if self.torrentfile is not None:
+ self.startfunc(self.torrentfile, self.installer_path())
+ else:
+ self.errorfunc(WARNING, _("No torrent file available for %s %s "
+ "installer.")%(app_name, self.version))
+
+ def start_install(self):
+ if not self.asked_for_install:
+ if self.installer_downloaded():
+ self.asked_for_install = True
+ self.installfunc()
+ else:
+ self.errorfunc(WARNING,
+ _("%s %s installer appears to be incomplete, "
+ "missing, or corrupt.")%(app_name,
+ self.version))
+
+ def launch_installer(self, torrentqueue):
+ if os.name == 'nt':
+ spawn(torrentqueue, self.installer_path(), "/S")
+ else:
+ self.errorfunc(WARNING, _("Cannot launch installer on this OS"))
diff --git a/NohGooee/PeerID.py b/NohGooee/PeerID.py
new file mode 100644
index 0000000..d116abe
--- /dev/null
+++ b/NohGooee/PeerID.py
@@ -0,0 +1,28 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.0 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Matt Chisholm
+
+import os
+from sha import sha
+from time import time
+try:
+ getpid = os.getpid
+except AttributeError:
+ def getpid():
+ return 1
+
+from NohGooee import version
+
+def make_id():
+ myid = 'M' + version.split()[0].replace('.', '-')
+ myid = myid + ('-' * (8-len(myid)))+sha(repr(time())+ ' ' +
+ str(getpid())).digest()[-6:].encode('hex')
+ return myid
diff --git a/NohGooee/PiecePicker.py b/NohGooee/PiecePicker.py
new file mode 100644
index 0000000..e22b13a
--- /dev/null
+++ b/NohGooee/PiecePicker.py
@@ -0,0 +1,138 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from random import randrange, shuffle, choice
+
+
+class PiecePicker(object):
+
+ def __init__(self, numpieces, config):
+ self.config = config
+ self.numpieces = numpieces
+ self.interests = [range(numpieces)]
+ self.pos_in_interests = range(numpieces)
+ self.numinterests = [0] * numpieces
+ self.have = [False] * numpieces
+ self.crosscount = [numpieces]
+ self.started = []
+ self.seedstarted = []
+ self.numgot = 0
+ self.scrambled = range(numpieces)
+ shuffle(self.scrambled)
+
+ def got_have(self, piece):
+ numint = self.numinterests[piece]
+ self.crosscount[numint + self.have[piece]] -= 1
+ self.numinterests[piece] += 1
+ try:
+ self.crosscount[numint + 1 + self.have[piece]] += 1
+ except IndexError:
+ self.crosscount.append(1)
+ if self.have[piece]:
+ return
+ if numint == len(self.interests) - 1:
+ self.interests.append([])
+ self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
+
+ def lost_have(self, piece):
+ numint = self.numinterests[piece]
+ self.crosscount[numint + self.have[piece]] -= 1
+ self.numinterests[piece] -= 1
+ self.crosscount[numint - 1 + self.have[piece]] += 1
+ if self.have[piece]:
+ return
+ self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
+
+ def _shift_over(self, piece, l1, l2):
+ p = self.pos_in_interests[piece]
+ l1[p] = l1[-1]
+ self.pos_in_interests[l1[-1]] = p
+ del l1[-1]
+ newp = randrange(len(l2) + 1)
+ if newp == len(l2):
+ self.pos_in_interests[piece] = len(l2)
+ l2.append(piece)
+ else:
+ old = l2[newp]
+ self.pos_in_interests[old] = len(l2)
+ l2.append(old)
+ l2[newp] = piece
+ self.pos_in_interests[piece] = newp
+
+ def requested(self, piece, seed = False):
+ if piece not in self.started:
+ self.started.append(piece)
+ if seed and piece not in self.seedstarted:
+ self.seedstarted.append(piece)
+
+ def complete(self, piece):
+ assert not self.have[piece]
+ self.have[piece] = True
+ self.crosscount[self.numinterests[piece]] -= 1
+ try:
+ self.crosscount[self.numinterests[piece] + 1] += 1
+ except IndexError:
+ self.crosscount.append(1)
+ self.numgot += 1
+ l = self.interests[self.numinterests[piece]]
+ p = self.pos_in_interests[piece]
+ l[p] = l[-1]
+ self.pos_in_interests[l[-1]] = p
+ del l[-1]
+ try:
+ self.started.remove(piece)
+ self.seedstarted.remove(piece)
+ except ValueError:
+ pass
+
+ def next(self, havefunc, seed = False):
+ bests = None
+ bestnum = 2 ** 30
+ if seed:
+ s = self.seedstarted
+ else:
+ s = self.started
+ for i in s:
+ if havefunc(i):
+ if self.numinterests[i] < bestnum:
+ bests = [i]
+ bestnum = self.numinterests[i]
+ elif self.numinterests[i] == bestnum:
+ bests.append(i)
+ if bests:
+ return choice(bests)
+ if self.numgot < self.config['rarest_first_cutoff']:
+ for i in self.scrambled:
+ if havefunc(i):
+ return i
+ return None
+ for i in xrange(1, min(bestnum, len(self.interests))):
+ for j in self.interests[i]:
+ if havefunc(j):
+ return j
+ return None
+
+ def am_I_complete(self):
+ return self.numgot == self.numpieces
+
+ def bump(self, piece):
+ l = self.interests[self.numinterests[piece]]
+ pos = self.pos_in_interests[piece]
+ del l[pos]
+ l.append(piece)
+ for i in range(pos,len(l)):
+ self.pos_in_interests[l[i]] = i
+ try:
+ self.started.remove(piece)
+ self.seedstarted.remove(piece)
+ except ValueError:
+ pass
diff --git a/NohGooee/RateLimiter.py b/NohGooee/RateLimiter.py
new file mode 100644
index 0000000..12d3205
--- /dev/null
+++ b/NohGooee/RateLimiter.py
@@ -0,0 +1,190 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Uoti Urpala and Andrew Loewenstern
+
+from NohGooee.platform import bttime
+
+def minctx(a,b):
+ A = B = 0
+ if a.rate > 0:
+ A = a.offset_amount / a.rate
+ if b.rate > 0:
+ B = b.offset_amount / b.rate
+ if A <= B:
+ return a
+ return b
+
+class Dummy(object):
+ def __init__(self, next):
+ self.next_upload = next
+ def send_partial(self, size):
+ return 0
+ closed = False
+
+class RateLimitedGroup(object):
+ def __init__(self, rate, got_exception):
+ self.got_exception = got_exception
+ # limiting
+ self.check_time = 0
+ self.lasttime = bttime()
+ self.offset_amount = 0
+ self.set_rate(rate)
+ # accounting
+ self.count = 0
+ self.counts = []
+
+ def set_rate(self, new_rate):
+ self.rate = new_rate * 1024
+ self.check_time = 0
+ self.offset_amount = 0
+
+class MultiRateLimiter(object):
+ def __init__(self, sched):
+ self.sched = sched
+ self.last = None
+ self.upload_rate = 0
+ self.unitsize = 17000
+ self.offset_amount = 0
+ self.ctxs = [] # list of contexts with connections in the queue
+ self.ctx_counts = {} # dict conn -> how many connections each context has
+
+ def set_parameters(self, rate, unitsize):
+ if unitsize > 17000:
+ # Since data is sent to peers in a round-robin fashion, max one
+ # full request at a time, setting this higher would send more data
+ # to peers that use request sizes larger than standard 16 KiB.
+ # 17000 instead of 16384 to allow room for metadata messages.
+ unitsize = 17000
+ self.upload_rate = rate * 1024
+ self.unitsize = unitsize
+ self.lasttime = bttime()
+ self.offset_amount = 0
+
+ def queue(self, conn, ctx):
+ assert conn.next_upload is None
+ if ctx not in self.ctxs:
+ ctx.check_time = 1
+ self.ctxs.append(ctx)
+ self.ctx_counts[ctx] = 1
+ else:
+ self.ctx_counts[ctx] += 1
+
+ if self.last is None:
+ self.last = conn
+ conn.next_upload = conn
+ self.try_send(True)
+ else:
+ conn.next_upload = self.last.next_upload
+ self.last.next_upload = conn
+ self.last = conn
+
+ def increase_offset(self, bytes):
+ self.offset_amount += bytes
+
+ def try_send(self, check_time = False):
+ t = bttime()
+ cur = self.last.next_upload
+
+ if self.upload_rate > 0:
+ self.offset_amount -= (t - self.lasttime) * self.upload_rate
+ if check_time:
+ self.offset_amount = max(self.offset_amount, -1 * self.unitsize)
+ else:
+ self.offset_amount = 0
+
+ self.lasttime = t
+
+ for ctx in self.ctxs:
+ if ctx.rate == 0:
+ ctx.offset_amount = 0
+ ctx.lasttime = t
+ elif ctx.lasttime != t:
+ ctx.offset_amount -=(t - ctx.lasttime) * ctx.rate
+ ctx.lasttime = t
+ if ctx.check_time:
+ ctx.offset_amount = max(ctx.offset_amount, -1 * self.unitsize)
+
+ min_offset = reduce(minctx, self.ctxs)
+ ctx = cur.encoder.context.rlgroup
+ while self.offset_amount <= 0 and min_offset.offset_amount <= 0:
+ if ctx.offset_amount <= 0:
+ try:
+ bytes = cur.send_partial(self.unitsize)
+ except KeyboardInterrupt:
+ raise
+ except Exception, e:
+ cur.encoder.context.rlgroup.got_exception(e)
+ cur = self.last.next_upload
+ bytes = 0
+
+ if self.upload_rate > 0:
+ self.offset_amount += bytes
+ if ctx.rate > 0:
+ ctx.offset_amount += bytes
+
+ ctx.count += bytes
+
+ if bytes == 0 or not cur.connection.is_flushed():
+ if self.last is cur:
+ self.last = None
+ cur.next_upload = None
+ self.ctx_counts = {}
+ self.ctxs = []
+ break
+ else:
+ self.last.next_upload = cur.next_upload
+ cur.next_upload = None
+ old = ctx
+ cur = self.last.next_upload
+ ctx = cur.encoder.context.rlgroup
+ self.ctx_counts[old] -= 1
+ if self.ctx_counts[old] == 0:
+ del(self.ctx_counts[old])
+ self.ctxs.remove(old)
+ if min_offset == old:
+ min_offset = reduce(minctx, self.ctxs)
+ else:
+ self.last = cur
+ cur = cur.next_upload
+ ctx = cur.encoder.context.rlgroup
+ min_offset = reduce(minctx, self.ctxs)
+ else:
+ self.last = cur
+ cur = self.last.next_upload
+ ctx = cur.encoder.context.rlgroup
+ else:
+ myDelay = minCtxDelay = 0
+ if self.upload_rate > 0:
+ myDelay = 1.0 * self.offset_amount / self.upload_rate
+ if min_offset.rate > 0:
+ minCtxDelay = 1.0 * min_offset.offset_amount / min_offset.rate
+ delay = max(myDelay, minCtxDelay)
+ self.sched(self.try_send, delay)
+
+
+ def clean_closed(self):
+ if self.last is None:
+ return
+ orig = self.last
+ if self.last.closed:
+ self.last = Dummy(self.last.next_upload)
+ self.last.encoder = orig.encoder
+ c = self.last
+ while True:
+ if c.next_upload is orig:
+ c.next_upload = self.last
+ break
+ if c.next_upload.closed:
+ o = c.next_upload
+ c.next_upload = Dummy(c.next_upload.next_upload)
+ c.next_upload.encoder = o.encoder
+ c = c.next_upload
+
diff --git a/NohGooee/RateMeasure.py b/NohGooee/RateMeasure.py
new file mode 100644
index 0000000..e5101ff
--- /dev/null
+++ b/NohGooee/RateMeasure.py
@@ -0,0 +1,63 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from NohGooee.platform import bttime
+
+
+class RateMeasure(object):
+
+ def __init__(self, left):
+ self.start = None
+ self.last = None
+ self.rate = 0
+ self.remaining = None
+ self.left = left
+ self.broke = False
+ self.got_anything = False
+
+ def data_came_in(self, amount):
+ if not self.got_anything:
+ self.got_anything = True
+ self.start = bttime() - 2
+ self.last = self.start
+ self.left -= amount
+ return
+ self.update(bttime(), amount)
+
+ def data_rejected(self, amount):
+ self.left += amount
+
+ def get_time_left(self):
+ if not self.got_anything:
+ return None
+ t = bttime()
+ if t - self.last > 15:
+ self.update(t, 0)
+ return self.remaining
+
+ def get_size_left(self):
+ return self.left
+
+ def update(self, t, amount):
+ self.left -= amount
+ try:
+ self.rate = ((self.rate * (self.last - self.start)) + amount) / (t - self.start)
+ self.last = t
+ self.remaining = self.left / self.rate
+ if self.start < self.last - self.remaining:
+ self.start = self.last - self.remaining
+ except ZeroDivisionError:
+ self.remaining = None
+ if self.broke and self.last - self.start < 20:
+ self.start = self.last - 20
+ if self.last - self.start > 20:
+ self.broke = True
diff --git a/NohGooee/Rerequester.py b/NohGooee/Rerequester.py
new file mode 100644
index 0000000..b899ca9
--- /dev/null
+++ b/NohGooee/Rerequester.py
@@ -0,0 +1,293 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen, Uoti Urpala
+
+from threading import Thread
+from socket import error, gethostbyname
+from random import random, randrange
+from binascii import b2a_hex
+
+from NohGooee import version
+from NohGooee.platform import bttime
+from NohGooee.zurllib import urlopen, quote, Request
+from NohGooee.btformats import check_peers
+from NohGooee.bencode import bencode, bdecode
+from NohGooee import BTFailure, INFO, WARNING, ERROR, CRITICAL
+
+
+class Rerequester(object):
+
+ def __init__(self, url, config, sched, howmany, connect, externalsched,
+ amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
+ upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc):
+ self.baseurl = url
+ self.infohash = infohash
+ self.peerid = None
+ self.wanted_peerid = myid
+ self.port = port
+ self.url = None
+ self.config = config
+ self.last = None
+ self.trackerid = None
+ self.announce_interval = 30 * 60
+ self.sched = sched
+ self.howmany = howmany
+ self.connect = connect
+ self.externalsched = externalsched
+ self.amount_left = amount_left
+ self.up = up
+ self.down = down
+ self.errorfunc = errorfunc
+ self.doneflag = doneflag
+ self.upratefunc = upratefunc
+ self.downratefunc = downratefunc
+ self.ever_got_incoming = ever_got_incoming
+ self.diefunc = diefunc
+ self.successfunc = sfunc
+ self.finish = False
+ self.current_started = None
+ self.fail_wait = None
+ self.last_time = None
+ self.previous_down = 0
+ self.previous_up = 0
+ self.tracker_num_peers = None
+ self.tracker_num_seeds = None
+
+ def _makeurl(self, peerid, port):
+ return ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
+ (self.baseurl, quote(self.infohash), quote(peerid), str(port),
+ b2a_hex(''.join([chr(randrange(256)) for i in xrange(4)]))))
+
+ def change_port(self, peerid, port):
+ self.wanted_peerid = peerid
+ self.port = port
+ self.last = None
+ self.trackerid = None
+ self._check()
+
+ def begin(self):
+ if self.sched:
+ self.sched(self.begin, 60)
+ self._check()
+
+ def announce_finish(self):
+ self.finish = True
+ self._check()
+
+ def announce_stop(self):
+ self._announce(2)
+
+ def _check(self):
+ if self.current_started is not None:
+ if self.current_started <= bttime() - 58:
+ self.errorfunc(WARNING,
+ _("Tracker announce still not complete "
+ "%d seconds after starting it") %
+ int(bttime() - self.current_started))
+ return
+ if self.peerid is None:
+ self.peerid = self.wanted_peerid
+ self.url = self._makeurl(self.peerid, self.port)
+ self._announce(0)
+ return
+ if self.peerid != self.wanted_peerid:
+ self._announce(2)
+ self.peerid = None
+ self.previous_up = self.up()
+ self.previous_down = self.down()
+ return
+ if self.finish:
+ self.finish = False
+ self._announce(1)
+ return
+ if self.fail_wait is not None:
+ if self.last_time + self.fail_wait <= bttime():
+ self._announce()
+ return
+ if self.last_time > bttime() - self.config['rerequest_interval']:
+ return
+ if self.ever_got_incoming():
+ getmore = self.howmany() <= self.config['min_peers'] / 3
+ else:
+ getmore = self.howmany() < self.config['min_peers']
+ if getmore or bttime() - self.last_time > self.announce_interval:
+ self._announce()
+
+ def _announce(self, event=None):
+ self.current_started = bttime()
+ s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
+ (self.url, str(self.up() - self.previous_up),
+ str(self.down() - self.previous_down), str(self.amount_left())))
+ if self.last is not None:
+ s += '&last=' + quote(str(self.last))
+ if self.trackerid is not None:
+ s += '&trackerid=' + quote(str(self.trackerid))
+ if self.howmany() >= self.config['max_initiate']:
+ s += '&numwant=0'
+ else:
+ s += '&compact=1'
+ if event is not None:
+ s += '&event=' + ['started', 'completed', 'stopped'][event]
+ t = Thread(target=self._rerequest, args=[s, self.peerid])
+ t.setDaemon(True)
+ t.start()
+
+ # Must destroy all references that could cause reference circles
+ def cleanup(self):
+ self.sched = None
+ self.howmany = None
+ self.connect = None
+ self.externalsched = lambda *args: None
+ self.amount_left = None
+ self.up = None
+ self.down = None
+ self.errorfunc = None
+ self.upratefunc = None
+ self.downratefunc = None
+ self.ever_got_incoming = None
+ self.diefunc = None
+ self.successfunc = None
+
+ def _rerequest(self, url, peerid):
+ if self.config['ip']:
+ try:
+ url += '&ip=' + gethostbyname(self.config['ip'])
+ except Exception, e:
+ self.errorfunc(WARNING, _("Problem connecting to tracker, gethostbyname failed - ") + str(e))
+ request = Request(url)
+ request.add_header('User-Agent', 'BitTorrent/' + version)
+ if self.config['tracker_proxy']:
+ request.set_proxy(self.config['tracker_proxy'], 'http')
+ try:
+ h = urlopen(request)
+ data = h.read()
+ h.close()
+ # urllib2 can raise various crap that doesn't have a common base
+ # exception class especially when proxies are used, at least
+ # ValueError and stuff from httplib
+ except Exception, e:
+ def f(r=_("Problem connecting to tracker - ") + str(e)):
+ self._postrequest(errormsg=r, peerid=peerid)
+ else:
+ def f():
+ self._postrequest(data, peerid=peerid)
+ self.externalsched(f, 0)
+
+ def _fail(self):
+ if self.fail_wait is None:
+ self.fail_wait = 50
+ else:
+ self.fail_wait *= 1.4 + random() * .2
+ self.fail_wait = min(self.fail_wait,
+ self.config['max_announce_retry_interval'])
+
+ def _postrequest(self, data=None, errormsg=None, peerid=None):
+ self.current_started = None
+ self.last_time = bttime()
+ if errormsg is not None:
+ self.errorfunc(WARNING, errormsg)
+ self._fail()
+ return
+ try:
+ r = bdecode(data)
+ check_peers(r)
+ except BTFailure, e:
+ if data != '':
+ self.errorfunc(ERROR, _("bad data from tracker - ") + str(e))
+ self._fail()
+ return
+ if type(r.get('complete')) in (int, long) and \
+ type(r.get('incomplete')) in (int, long):
+ self.tracker_num_seeds = r['complete']
+ self.tracker_num_peers = r['incomplete']
+ else:
+ self.tracker_num_seeds = self.tracker_num_peers = None
+ if r.has_key('failure reason'):
+ if self.howmany() > 0:
+ self.errorfunc(ERROR, _("rejected by tracker - ") +
+ r['failure reason'])
+ else:
+ # sched shouldn't be strictly necessary
+ def die():
+ self.diefunc(CRITICAL,
+ _("Aborting the torrent as it was rejected by "
+ "the tracker while not connected to any peers. ") +
+ _(" Message from the tracker: ") + r['failure reason'])
+ self.sched(die, 0)
+ self._fail()
+ else:
+ self.fail_wait = None
+ if r.has_key('warning message'):
+ self.errorfunc(ERROR, _("warning from tracker - ") +
+ r['warning message'])
+ self.announce_interval = r.get('interval', self.announce_interval)
+ self.config['rerequest_interval'] = r.get('min interval',
+ self.config['rerequest_interval'])
+ self.trackerid = r.get('tracker id', self.trackerid)
+ self.last = r.get('last')
+ p = r['peers']
+ peers = []
+ if type(p) == str:
+ for x in xrange(0, len(p), 6):
+ ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
+ port = (ord(p[x+4]) << 8) | ord(p[x+5])
+ peers.append((ip, port, None))
+ else:
+ for x in p:
+ peers.append((x['ip'], x['port'], x.get('peer id')))
+ ps = len(peers) + self.howmany()
+ if ps < self.config['max_initiate']:
+ if self.doneflag.isSet():
+ if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
+ self.last = None
+ else:
+ if r.get('num peers', 1000) > ps * 1.2:
+ self.last = None
+ for x in peers:
+ self.connect((x[0], x[1]), x[2])
+ if peerid == self.wanted_peerid:
+ self.successfunc()
+ self._check()
+
+class DHTRerequester(Rerequester):
+ def __init__(self, config, sched, howmany, connect, externalsched,
+ amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
+ upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc, dht):
+ self.dht = dht
+ Rerequester.__init__(self, "http://localhost/announce", config, sched, howmany, connect, externalsched,
+ amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
+ upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc)
+
+ def _announce(self, event=None):
+ self.current_started = bttime()
+ self._rerequest("", self.peerid)
+
+ def _rerequest(self, url, peerid):
+ self.peers = ""
+ try:
+ self.dht.getPeersAndAnnounce(self.infohash, self.port, self._got_peers)
+ except Exception, e:
+ self._postrequest(errormsg=_("Trackerless lookup failed: ") + str(e), peerid=self.wanted_peerid)
+
+ def _got_peers(self, peers):
+ if not self.howmany:
+ return
+ if not peers:
+ self._postrequest(bencode({'peers':''}), peerid=self.wanted_peerid)
+ else:
+ self._postrequest(bencode({'peers':peers[0]}), peerid=None)
+
+ def _announced_peers(self, nodes):
+ pass
+
+ def announce_stop(self):
+ # don't do anything
+ pass
diff --git a/NohGooee/StatusLight.py b/NohGooee/StatusLight.py
new file mode 100644
index 0000000..e1668e4
--- /dev/null
+++ b/NohGooee/StatusLight.py
@@ -0,0 +1,108 @@
+from NohGooee.platform import bttime
+
+class StatusLight(object):
+
+ initial_state = 'stopped'
+
+ states = {
+ # state : (stock icon name, tool tip),
+ 'stopped' : ('bt-status-stopped',
+ _("Paused")),
+ 'empty' : ('bt-status-stopped',
+ _("No torrents")),
+ 'starting' : ('bt-status-starting',
+ _("Starting download")),
+ 'pre-natted': ('bt-status-pre-natted',
+ _("Starting download")),
+ 'running' : ('bt-status-running',
+ _("Running normally")),
+ 'natted' : ('bt-status-natted',
+ _("Downloads may be slow:\nProbably firewalled/NATted")),
+ 'broken' : ('bt-status-broken',
+ _("Check network connection")),
+ }
+
+ messages = {
+ # message : default new state,
+ 'stop' : 'stopped' ,
+ 'empty' : 'empty' ,
+ 'start' : 'starting' ,
+ 'seen_peers' : 'pre-natted',
+ 'seen_remote_peers' : 'running' ,
+ 'broken' : 'broken' ,
+ }
+
+ transitions = {
+ # state : { message : custom new state, },
+ 'pre-natted' : { 'start' : 'pre-natted',
+ 'seen_peers' : 'pre-natted',},
+ 'running' : { 'start' : 'running' ,
+ 'seen_peers' : 'running' ,},
+ 'natted' : { 'start' : 'natted' ,
+ 'seen_peers' : 'natted' ,},
+ 'broken' : { 'start' : 'broken' ,},
+ #TODO: add broken transitions
+ }
+
+ time_to_nat = 60 * 5 # 5 minutes
+
+ def __init__(self):
+ self.mystate = self.initial_state
+ self.start_time = None
+
+ def send_message(self, message):
+ if message not in self.messages.keys():
+ #print 'bad message', message
+ return
+ new_state = self.messages[message]
+ if self.transitions.has_key(self.mystate):
+ if self.transitions[self.mystate].has_key(message):
+ new_state = self.transitions[self.mystate][message]
+
+ # special pre-natted timeout logic
+ if new_state == 'pre-natted':
+ if (self.mystate == 'pre-natted' and
+ bttime() - self.start_time > self.time_to_nat):
+ # go to natted state after a while
+ new_state = 'natted'
+ elif self.mystate != 'pre-natted':
+ # start pre-natted timer
+ self.start_time = bttime()
+
+ if new_state != self.mystate:
+ #print 'changing state from', self.mystate, 'to', new_state
+ self.mystate = new_state
+ self.change_state()
+
+ def change_state(self):
+ pass
+
+
+import gtk
+
+class GtkStatusLight(gtk.EventBox, StatusLight):
+
+ def __init__(self, main):
+ gtk.EventBox.__init__(self)
+ StatusLight.__init__(self)
+ self.main = main
+ self.image = None
+ self.images = {}
+ for k,(s,t) in self.states.items():
+ i = gtk.Image()
+ i.set_from_stock(s, gtk.ICON_SIZE_LARGE_TOOLBAR)
+ i.show()
+ self.images[k] = i
+ self.set_size_request(24,24)
+ self.main.tooltips.set_tip(self, 'tooltip')
+ self.send_message('stop')
+
+ def change_state(self):
+ state = self.mystate
+ assert self.states.has_key(state)
+ if self.image is not None:
+ self.remove(self.image)
+ self.image = self.images[state]
+ self.add(self.image)
+ stock, tooltip = self.states[state]
+ self.main.tooltips.set_tip(self, tooltip)
diff --git a/NohGooee/Storage.py b/NohGooee/Storage.py
new file mode 100644
index 0000000..7a766bd
--- /dev/null
+++ b/NohGooee/Storage.py
@@ -0,0 +1,274 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+import os
+from bisect import bisect_right
+from array import array
+
+from NohGooee import BTFailure, app_name
+
+
+class FilePool(object):
+
+ def __init__(self, max_files_open):
+ self.allfiles = {}
+ self.handlebuffer = None
+ self.handles = {}
+ self.whandles = {}
+ self.set_max_files_open(max_files_open)
+
+ def close_all(self):
+ failures = {}
+ for filename, handle in self.handles.iteritems():
+ try:
+ handle.close()
+ except Exception, e:
+ failures[self.allfiles[filename]] = e
+ self.handles.clear()
+ self.whandles.clear()
+ if self.handlebuffer is not None:
+ del self.handlebuffer[:]
+ for torrent, e in failures.iteritems():
+ torrent.got_exception(e)
+
+ def set_max_files_open(self, max_files_open):
+ if max_files_open <= 0:
+ max_files_open = 1e100
+ self.max_files_open = max_files_open
+ self.close_all()
+ if len(self.allfiles) > self.max_files_open:
+ self.handlebuffer = []
+ else:
+ self.handlebuffer = None
+
+ def add_files(self, files, torrent):
+ for filename in files:
+ if filename in self.allfiles:
+ raise BTFailure(_("File %s belongs to another running torrent")
+ % filename)
+ for filename in files:
+ self.allfiles[filename] = torrent
+ if self.handlebuffer is None and \
+ len(self.allfiles) > self.max_files_open:
+ self.handlebuffer = self.handles.keys()
+
+ def remove_files(self, files):
+ for filename in files:
+ del self.allfiles[filename]
+ if self.handlebuffer is not None and \
+ len(self.allfiles) <= self.max_files_open:
+ self.handlebuffer = None
+
+
+# Make this a separate function because having this code in Storage.__init__()
+# would make python print a SyntaxWarning (uses builtin 'file' before 'global')
+
+def bad_libc_workaround():
+ global file
+ def file(name, mode = 'r', buffering = None):
+ return open(name, mode)
+
+class Storage(object):
+
+ def __init__(self, config, filepool, files, check_only=False):
+ self.filepool = filepool
+ self.config = config
+ self.ranges = []
+ self.myfiles = {}
+ self.tops = {}
+ self.undownloaded = {}
+ self.unallocated = {}
+ total = 0
+ for filename, length in files:
+ self.unallocated[filename] = length
+ self.undownloaded[filename] = length
+ if length > 0:
+ self.ranges.append((total, total + length, filename))
+ self.myfiles[filename] = None
+ total += length
+ if os.path.exists(filename):
+ if not os.path.isfile(filename):
+ raise BTFailure(_("File %s already exists, but is not a "
+ "regular file") % filename)
+ l = os.path.getsize(filename)
+ if l > length and not check_only:
+ h = file(filename, 'rb+')
+ h.truncate(length)
+ h.close()
+ l = length
+ self.tops[filename] = l
+ elif not check_only:
+ f = os.path.split(filename)[0]
+ if f != '' and not os.path.exists(f):
+ os.makedirs(f)
+ file(filename, 'wb').close()
+ self.begins = [i[0] for i in self.ranges]
+ self.total_length = total
+ if check_only:
+ return
+ self.handles = filepool.handles
+ self.whandles = filepool.whandles
+
+ # Rather implement this as an ugly hack here than change all the
+ # individual calls. Affects all torrent instances using this module.
+ if config['bad_libc_workaround']:
+ bad_libc_workaround()
+
+ def was_preallocated(self, pos, length):
+ for filename, begin, end in self._intervals(pos, length):
+ if self.tops.get(filename, 0) < end:
+ return False
+ return True
+
+ def get_total_length(self):
+ return self.total_length
+
+ def _intervals(self, pos, amount):
+ r = []
+ stop = pos + amount
+ p = bisect_right(self.begins, pos) - 1
+ while p < len(self.ranges) and self.ranges[p][0] < stop:
+ begin, end, filename = self.ranges[p]
+ r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
+ p += 1
+ return r
+
+ def _get_file_handle(self, filename, for_write):
+ handlebuffer = self.filepool.handlebuffer
+ if filename in self.handles:
+ if for_write and filename not in self.whandles:
+ self.handles[filename].close()
+ self.handles[filename] = file(filename, 'rb+', 0)
+ self.whandles[filename] = None
+ if handlebuffer is not None and handlebuffer[-1] != filename:
+ handlebuffer.remove(filename)
+ handlebuffer.append(filename)
+ else:
+ if for_write:
+ self.handles[filename] = file(filename, 'rb+', 0)
+ self.whandles[filename] = None
+ else:
+ self.handles[filename] = file(filename, 'rb', 0)
+ if handlebuffer is not None:
+ if len(handlebuffer) >= self.filepool.max_files_open:
+ oldfile = handlebuffer.pop(0)
+ if oldfile in self.whandles: # .pop() in python 2.3
+ del self.whandles[oldfile]
+ self.handles[oldfile].close()
+ del self.handles[oldfile]
+ handlebuffer.append(filename)
+ return self.handles[filename]
+
+ def read(self, pos, amount):
+ r = []
+ for filename, pos, end in self._intervals(pos, amount):
+ h = self._get_file_handle(filename, False)
+ h.seek(pos)
+ r.append(h.read(end - pos))
+ r = ''.join(r)
+ if len(r) != amount:
+ raise BTFailure(_("Short read - something truncated files?"))
+ return r
+
+ def write(self, pos, s):
+ # might raise an IOError
+ total = 0
+ for filename, begin, end in self._intervals(pos, len(s)):
+ h = self._get_file_handle(filename, True)
+ h.seek(begin)
+ h.write(s[total: total + end - begin])
+ total += end - begin
+
+ def close(self):
+ error = None
+ for filename in self.handles.keys():
+ if filename in self.myfiles:
+ try:
+ self.handles[filename].close()
+ except Exception, e:
+ error = e
+ del self.handles[filename]
+ if filename in self.whandles:
+ del self.whandles[filename]
+ handlebuffer = self.filepool.handlebuffer
+ if handlebuffer is not None:
+ handlebuffer = [f for f in handlebuffer if f not in self.myfiles]
+ self.filepool.handlebuffer = handlebuffer
+ if error is not None:
+ raise error
+
+ def write_fastresume(self, resumefile, amount_done):
+ resumefile.write('BitTorrent resume state file, version 1\n')
+ resumefile.write(str(amount_done) + '\n')
+ for x, x, filename in self.ranges:
+ resumefile.write(str(os.path.getsize(filename)) + ' ' +
+ str(int(os.path.getmtime(filename))) + '\n')
+
+ def check_fastresume(self, resumefile, return_filelist=False,
+ piece_size=None, numpieces=None, allfiles=None):
+ filenames = [name for x, x, name in self.ranges]
+ if resumefile is not None:
+ version = resumefile.readline()
+ if version != 'BitTorrent resume state file, version 1\n':
+ raise BTFailure(_("Unsupported fastresume file format, "
+ "maybe from another client version?"))
+ amount_done = int(resumefile.readline())
+ else:
+ amount_done = size = mtime = 0
+ for filename in filenames:
+ if resumefile is not None:
+ line = resumefile.readline()
+ size, mtime = line.split()[:2] # allow adding extra fields
+ size = int(size)
+ mtime = int(mtime)
+ if os.path.exists(filename):
+ fsize = os.path.getsize(filename)
+ else:
+ raise BTFailure(_("Another program appears to have moved, renamed, or deleted the file, "
+ "or %s may have crashed last time it was run.") % app_name)
+ if fsize > 0 and mtime != os.path.getmtime(filename):
+ raise BTFailure(_("Another program appears to have modified the file, "
+ "or %s may have crashed last time it was run.") % app_name)
+ if size != fsize:
+ raise BTFailure(_("Another program appears to have changed the file size, "
+ "or %s may have crashed last time it was run.") % app_name)
+ if not return_filelist:
+ return amount_done
+ if resumefile is None:
+ return None
+ if numpieces < 32768:
+ typecode = 'h'
+ else:
+ typecode = 'l'
+ try:
+ r = array(typecode)
+ r.fromfile(resumefile, numpieces)
+ except Exception, e:
+ raise BTFailure(_("Couldn't read fastresume data: ") + str(e) + '.')
+ for i in range(numpieces):
+ if r[i] >= 0:
+ # last piece goes "past the end", doesn't matter
+ self.downloaded(r[i] * piece_size, piece_size)
+ if r[i] != -2:
+ self.allocated(i * piece_size, piece_size)
+ undl = self.undownloaded
+ unal = self.unallocated
+ return amount_done, [undl[x] for x in allfiles], \
+ [not unal[x] for x in allfiles]
+
+ def allocated(self, pos, length):
+ for filename, begin, end in self._intervals(pos, length):
+ self.unallocated[filename] -= end - begin
+
+ def downloaded(self, pos, length):
+ for filename, begin, end in self._intervals(pos, length):
+ self.undownloaded[filename] -= end - begin
diff --git a/NohGooee/StorageWrapper.py b/NohGooee/StorageWrapper.py
new file mode 100644
index 0000000..7955f95
--- /dev/null
+++ b/NohGooee/StorageWrapper.py
@@ -0,0 +1,408 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from __future__ import division
+
+from sha import sha
+from array import array
+from binascii import b2a_hex
+
+from NohGooee.bitfield import Bitfield
+from NohGooee import BTFailure, INFO, WARNING, ERROR, CRITICAL
+
+def toint(s):
+ return int(b2a_hex(s), 16)
+
+def tobinary(i):
+ return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
+ chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+NO_PLACE = -1
+
+ALLOCATED = -1
+UNALLOCATED = -2
+FASTRESUME_PARTIAL = -3
+
+class StorageWrapper(object):
+
+ def __init__(self, storage, config, hashes, piece_size, finished,
+ statusfunc, flag, data_flunked, infohash, errorfunc, resumefile):
+ self.numpieces = len(hashes)
+ self.storage = storage
+ self.config = config
+ check_hashes = config['check_hashes']
+ self.hashes = hashes
+ self.piece_size = piece_size
+ self.data_flunked = data_flunked
+ self.errorfunc = errorfunc
+ self.total_length = storage.get_total_length()
+ self.amount_left = self.total_length
+ self.partial_mark = "BitTorrent - this part has not been "+\
+ "downloaded yet."+infohash+\
+ tobinary(config['download_slice_size'])
+ if self.total_length <= piece_size * (self.numpieces - 1):
+ raise BTFailure, _("bad data in responsefile - total too small")
+ if self.total_length > piece_size * self.numpieces:
+ raise BTFailure, _("bad data in responsefile - total too big")
+ self.finished = finished
+ self.numactive = array('H', [0] * self.numpieces)
+ self.inactive_requests = [1] * self.numpieces
+ self.amount_inactive = self.total_length
+ self.endgame = False
+ self.have = Bitfield(self.numpieces)
+ self.waschecked = Bitfield(self.numpieces)
+ if self.numpieces < 32768:
+ typecode = 'h'
+ else:
+ typecode = 'l'
+ self.places = array(typecode, [NO_PLACE] * self.numpieces)
+ if not check_hashes:
+ self.rplaces = array(typecode, range(self.numpieces))
+ fastresume = True
+ else:
+ self.rplaces = self._load_fastresume(resumefile, typecode)
+ if self.rplaces is not None:
+ fastresume = True
+ else:
+ self.rplaces = array(typecode, [UNALLOCATED] * self.numpieces)
+ fastresume = False
+ self.holepos = 0
+ self.stat_numfound = 0
+ self.stat_numflunked = 0
+ self.stat_numdownloaded = 0
+ self.stat_active = {}
+ self.stat_new = {}
+ self.stat_dirty = {}
+ self.download_history = {}
+ self.failed_pieces = {}
+
+ if self.numpieces == 0:
+ return
+ targets = {}
+ total = 0
+ if not fastresume:
+ for i in xrange(self.numpieces):
+ if self._waspre(i):
+ self.rplaces[i] = ALLOCATED
+ total += 1
+ else:
+ targets[hashes[i]] = i
+ if total and check_hashes:
+ statusfunc(_("checking existing file"), 0)
+ def markgot(piece, pos):
+ if self.have[piece]:
+ if piece != pos:
+ return
+ self.rplaces[self.places[pos]] = ALLOCATED
+ self.places[pos] = self.rplaces[pos] = pos
+ return
+ self.places[piece] = pos
+ self.rplaces[pos] = piece
+ self.have[piece] = True
+ self.amount_left -= self._piecelen(piece)
+ self.amount_inactive -= self._piecelen(piece)
+ self.inactive_requests[piece] = None
+ if not fastresume:
+ self.waschecked[piece] = True
+ self.stat_numfound += 1
+ lastlen = self._piecelen(self.numpieces - 1)
+ partials = {}
+ for i in xrange(self.numpieces):
+ if not self._waspre(i):
+ if self.rplaces[i] != UNALLOCATED:
+ raise BTFailure(_("--check_hashes 0 or fastresume info "
+ "doesn't match file state (missing data)"))
+ continue
+ elif fastresume:
+ t = self.rplaces[i]
+ if t >= 0:
+ markgot(t, i)
+ continue
+ if t == UNALLOCATED:
+ raise BTFailure(_("Bad fastresume info (files contain more "
+ "data)"))
+ if t == ALLOCATED:
+ continue
+ if t!= FASTRESUME_PARTIAL:
+ raise BTFailure(_("Bad fastresume info (illegal value)"))
+ data = self.storage.read(self.piece_size * i,
+ self._piecelen(i))
+ self._check_partial(i, partials, data)
+ self.rplaces[i] = ALLOCATED
+ else:
+ data = self.storage.read(piece_size * i, self._piecelen(i))
+ sh = sha(buffer(data, 0, lastlen))
+ sp = sh.digest()
+ sh.update(buffer(data, lastlen))
+ s = sh.digest()
+ if s == hashes[i]:
+ markgot(i, i)
+ elif s in targets and self._piecelen(i) == self._piecelen(targets[s]):
+ markgot(targets[s], i)
+ elif not self.have[self.numpieces - 1] and sp == hashes[-1] and (i == self.numpieces - 1 or not self._waspre(self.numpieces - 1)):
+ markgot(self.numpieces - 1, i)
+ else:
+ self._check_partial(i, partials, data)
+ statusfunc(fractionDone = 1 - self.amount_left /
+ self.total_length)
+ if flag.isSet():
+ return
+ self.amount_left_with_partials = self.amount_left
+ for piece in partials:
+ if self.places[piece] < 0:
+ pos = partials[piece][0]
+ self.places[piece] = pos
+ self.rplaces[pos] = piece
+ self._make_partial(piece, partials[piece][1])
+ for i in xrange(self.numpieces):
+ if self.rplaces[i] != UNALLOCATED:
+ self.storage.allocated(piece_size * i, self._piecelen(i))
+ if self.have[i]:
+ self.storage.downloaded(piece_size * i, self._piecelen(i))
+
+ def _waspre(self, piece):
+ return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
+
+ def _piecelen(self, piece):
+ if piece < self.numpieces - 1:
+ return self.piece_size
+ else:
+ return self.total_length - piece * self.piece_size
+
+ def _check_partial(self, pos, partials, data):
+ index = None
+ missing = False
+ marklen = len(self.partial_mark)+4
+ for i in xrange(0, len(data) - marklen,
+ self.config['download_slice_size']):
+ if data[i:i+marklen-4] == self.partial_mark:
+ ind = toint(data[i+marklen-4:i+marklen])
+ if index is None:
+ index = ind
+ parts = []
+ if ind >= self.numpieces or ind != index:
+ return
+ parts.append(i)
+ else:
+ missing = True
+ if index is not None and missing:
+ i += self.config['download_slice_size']
+ if i < len(data):
+ parts.append(i)
+ partials[index] = (pos, parts)
+
+ def _make_partial(self, index, parts):
+ length = self._piecelen(index)
+ l = []
+ self.inactive_requests[index] = l
+ x = 0
+ self.amount_left_with_partials -= self._piecelen(index)
+ self.download_history[index] = {}
+ request_size = self.config['download_slice_size']
+ for x in xrange(0, self._piecelen(index), request_size):
+ partlen = min(request_size, length - x)
+ if x in parts:
+ l.append((x, partlen))
+ self.amount_left_with_partials += partlen
+ else:
+ self.amount_inactive -= partlen
+ self.download_history[index][x] = None
+ self.stat_dirty[index] = 1
+
+ def _initalloc(self, pos, piece):
+ assert self.rplaces[pos] < 0
+ assert self.places[piece] == NO_PLACE
+ p = self.piece_size * pos
+ length = self._piecelen(pos)
+ if self.rplaces[pos] == UNALLOCATED:
+ self.storage.allocated(p, length)
+ self.places[piece] = pos
+ self.rplaces[pos] = piece
+ # "if self.rplaces[pos] != ALLOCATED:" to skip extra mark writes
+ mark = self.partial_mark + tobinary(piece)
+ mark += chr(0xff) * (self.config['download_slice_size'] - len(mark))
+ mark *= (length - 1) // len(mark) + 1
+ self.storage.write(p, buffer(mark, 0, length))
+
+ def _move_piece(self, oldpos, newpos):
+ assert self.rplaces[newpos] < 0
+ assert self.rplaces[oldpos] >= 0
+ data = self.storage.read(self.piece_size * oldpos,
+ self._piecelen(newpos))
+ self.storage.write(self.piece_size * newpos, data)
+ if self.rplaces[newpos] == UNALLOCATED:
+ self.storage.allocated(self.piece_size * newpos, len(data))
+ piece = self.rplaces[oldpos]
+ self.places[piece] = newpos
+ self.rplaces[oldpos] = ALLOCATED
+ self.rplaces[newpos] = piece
+ if not self.have[piece]:
+ return
+ data = data[:self._piecelen(piece)]
+ if sha(data).digest() != self.hashes[piece]:
+ raise BTFailure(_("data corrupted on disk - "
+ "maybe you have two copies running?"))
+
+ def _get_free_place(self):
+ while self.rplaces[self.holepos] >= 0:
+ self.holepos += 1
+ return self.holepos
+
+ def get_amount_left(self):
+ return self.amount_left
+
+ def do_I_have_anything(self):
+ return self.amount_left < self.total_length
+
+ def _make_inactive(self, index):
+ length = self._piecelen(index)
+ l = []
+ x = 0
+ request_size = self.config['download_slice_size']
+ while x + request_size < length:
+ l.append((x, request_size))
+ x += request_size
+ l.append((x, length - x))
+ self.inactive_requests[index] = l
+
+ def _load_fastresume(self, resumefile, typecode):
+ if resumefile is not None:
+ try:
+ r = array(typecode)
+ r.fromfile(resumefile, self.numpieces)
+ return r
+ except Exception, e:
+ self.errorfunc(WARNING, _("Couldn't read fastresume data: ") +
+ str(e))
+ return None
+
+ def write_fastresume(self, resumefile):
+ for i in xrange(self.numpieces):
+ if self.rplaces[i] >= 0 and not self.have[self.rplaces[i]]:
+ self.rplaces[i] = FASTRESUME_PARTIAL
+ self.rplaces.tofile(resumefile)
+
+ def get_have_list(self):
+ return self.have.tostring()
+
+ def do_I_have(self, index):
+ return self.have[index]
+
+ def do_I_have_requests(self, index):
+ return not not self.inactive_requests[index]
+
+ def new_request(self, index):
+ # returns (begin, length)
+ if self.inactive_requests[index] == 1:
+ self._make_inactive(index)
+ self.numactive[index] += 1
+ self.stat_active[index] = 1
+ if index not in self.stat_dirty:
+ self.stat_new[index] = 1
+ rs = self.inactive_requests[index]
+ r = min(rs)
+ rs.remove(r)
+ self.amount_inactive -= r[1]
+ if self.amount_inactive == 0:
+ self.endgame = True
+ return r
+
+ def piece_came_in(self, index, begin, piece, source = None):
+ if self.places[index] < 0:
+ if self.rplaces[index] == ALLOCATED:
+ self._initalloc(index, index)
+ else:
+ n = self._get_free_place()
+ if self.places[n] >= 0:
+ oldpos = self.places[n]
+ self._move_piece(oldpos, n)
+ n = oldpos
+ if self.rplaces[index] < 0 or index == n:
+ self._initalloc(n, index)
+ else:
+ self._move_piece(index, n)
+ self._initalloc(index, index)
+
+ if index in self.failed_pieces:
+ old = self.storage.read(self.places[index] * self.piece_size +
+ begin, len(piece))
+ if old != piece:
+ self.failed_pieces[index][self.download_history[index][begin]]\
+ = None
+ self.download_history.setdefault(index, {})
+ self.download_history[index][begin] = source
+
+ self.storage.write(self.places[index] * self.piece_size + begin, piece)
+ self.stat_dirty[index] = 1
+ self.numactive[index] -= 1
+ if self.numactive[index] == 0:
+ del self.stat_active[index]
+ if index in self.stat_new:
+ del self.stat_new[index]
+ if not self.inactive_requests[index] and not self.numactive[index]:
+ del self.stat_dirty[index]
+ if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() == self.hashes[index]:
+ self.have[index] = True
+ self.storage.downloaded(index * self.piece_size,
+ self._piecelen(index))
+ self.inactive_requests[index] = None
+ self.waschecked[index] = True
+ self.amount_left -= self._piecelen(index)
+ self.stat_numdownloaded += 1
+ for d in self.download_history[index].itervalues():
+ if d is not None:
+ d.good(index)
+ del self.download_history[index]
+ if index in self.failed_pieces:
+ for d in self.failed_pieces[index]:
+ if d is not None:
+ d.bad(index)
+ del self.failed_pieces[index]
+ if self.amount_left == 0:
+ self.finished()
+ else:
+ self.data_flunked(self._piecelen(index), index)
+ self.inactive_requests[index] = 1
+ self.amount_inactive += self._piecelen(index)
+ self.stat_numflunked += 1
+
+ self.failed_pieces[index] = {}
+ allsenders = {}
+ for d in self.download_history[index].itervalues():
+ allsenders[d] = None
+ if len(allsenders) == 1:
+ culprit = allsenders.keys()[0]
+ if culprit is not None:
+ culprit.bad(index, bump = True)
+ del self.failed_pieces[index] # found the culprit already
+ return False
+ return True
+
+ def request_lost(self, index, begin, length):
+ self.inactive_requests[index].append((begin, length))
+ self.amount_inactive += length
+ self.numactive[index] -= 1
+ if not self.numactive[index] and index in self.stat_active:
+ del self.stat_active[index]
+ if index in self.stat_new:
+ del self.stat_new[index]
+
+ def get_piece(self, index, begin, length):
+ if not self.have[index]:
+ return None
+ if not self.waschecked[index]:
+ if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() != self.hashes[index]:
+ raise BTFailure, _("told file complete on start-up, but piece failed hash check")
+ self.waschecked[index] = True
+ if begin + length > self._piecelen(index):
+ return None
+ return self.storage.read(self.piece_size * self.places[index] + begin, length)
diff --git a/NohGooee/TorrentQueue.py b/NohGooee/TorrentQueue.py
new file mode 100644
index 0000000..bf369be
--- /dev/null
+++ b/NohGooee/TorrentQueue.py
@@ -0,0 +1,848 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Uoti Urpala
+
+from __future__ import division
+
+import os
+import sys
+import threading
+import traceback
+
+
+from NohGooee import GetTorrent
+from NohGooee.platform import bttime
+from NohGooee.download import Feedback, Multitorrent
+from NohGooee.bencode import bdecode
+from NohGooee.ConvertedMetainfo import ConvertedMetainfo
+from NohGooee.prefs import Preferences
+from NohGooee import BTFailure, BTShutdown, INFO, WARNING, ERROR, CRITICAL
+from NohGooee import configfile
+from NohGooee import FAQ_URL
+import BitTorrent
+
+
+RUNNING = 0
+RUN_QUEUED = 1
+QUEUED = 2
+KNOWN = 3
+ASKING_LOCATION = 4
+
+
+class TorrentInfo(object):
+
+ def __init__(self, config):
+ self.metainfo = None
+ self.dl = None
+ self.state = None
+ self.completion = None
+ self.finishtime = None
+ self.uptotal = 0
+ self.uptotal_old = 0
+ self.downtotal = 0
+ self.downtotal_old = 0
+ self.config = config
+
+ def _set_dlpath(self, value):
+ self.config['save_as'] = value
+
+ def _get_dlpath(self):
+ return self.config['save_as']
+
+ dlpath = property(_get_dlpath, _set_dlpath)
+
+
+def decode_position(l, pred, succ, default=None):
+ if default is None:
+ default = len(l)
+ if pred is None and succ is None:
+ return default
+ if pred is None:
+ return 0
+ if succ is None:
+ return len(l)
+ try:
+ if l[0] == succ and pred not in l:
+ return 0
+ if l[-1] == pred and succ not in l:
+ return len(l)
+ i = l.index(pred)
+ if l[i+1] == succ:
+ return i+1
+ except (ValueError, IndexError):
+ pass
+ return default
+
+
+class TorrentQueue(Feedback):
+
+ def __init__(self, config, ui_options, ipc):
+ self.ui_options = ui_options
+ self.ipc = ipc
+ self.config = config
+ self.config['def_running_torrents'] = 1 # !@# XXX
+ self.config['max_running_torrents'] = 100 # !@# XXX
+ self.doneflag = threading.Event()
+ self.torrents = {}
+ self.starting_torrent = None
+ self.running_torrents = []
+ self.queue = []
+ self.other_torrents = []
+ self.last_save_time = 0
+ self.last_version_check = 0
+ self.initialized = 0
+
+ def run(self, ui, ui_wrap, startflag):
+ try:
+ self.ui = ui
+ self.run_ui_task = ui_wrap
+ self.multitorrent = Multitorrent(self.config, self.doneflag,
+ self.global_error, listen_fail_ok=True)
+ self.rawserver = self.multitorrent.rawserver
+ self.ipc.set_rawserver(self.rawserver)
+ self.ipc.start(self.external_command)
+ try:
+ self._restore_state()
+ except BTFailure, e:
+ self.torrents = {}
+ self.running_torrents = []
+ self.queue = []
+ self.other_torrents = []
+ self.global_error(ERROR, _("Could not load saved state: ")+str(e))
+ else:
+ for infohash in self.running_torrents + self.queue + \
+ self.other_torrents:
+ t = self.torrents[infohash]
+ if t.dlpath is not None:
+ t.completion = self.multitorrent.get_completion(
+ self.config, t.metainfo, t.dlpath)
+ state = t.state
+ if state == RUN_QUEUED:
+ state = RUNNING
+ self.run_ui_task(self.ui.new_displayed_torrent, infohash,
+ t.metainfo, t.dlpath, state, t.config,
+ t.completion, t.uptotal, t.downtotal, )
+ self._check_queue()
+ self.initialized = 1
+ startflag.set()
+ except Exception, e:
+ # dump a normal exception traceback
+ traceback.print_exc()
+ # set the error flag
+ self.initialized = -1
+ # signal the gui thread to stop waiting
+ startflag.set()
+ return
+
+ self._queue_loop()
+ self.multitorrent.rawserver.listen_forever()
+ if self.doneflag.isSet():
+ self.run_ui_task(self.ui.quit)
+ self.multitorrent.close_listening_socket()
+ self.ipc.stop()
+ for infohash in list(self.running_torrents):
+ t = self.torrents[infohash]
+ if t.state == RUN_QUEUED:
+ continue
+ t.dl.shutdown()
+ if t.dl is not None: # possibly set to none by failed()
+ totals = t.dl.get_total_transfer()
+ t.uptotal = t.uptotal_old + totals[0]
+ t.downtotal = t.downtotal_old + totals[1]
+ self._dump_state()
+
+ def _check_version(self):
+ now = bttime()
+ if self.last_version_check > 0 and \
+ self.last_version_check > now - 24*60*60:
+ return
+ self.last_version_check = now
+ self.run_ui_task(self.ui.check_version)
+
+ def _dump_config(self):
+ configfile.save_ui_config(self.config, 'bittorrent',
+ self.ui_options, self.global_error)
+ for infohash,t in self.torrents.items():
+ ec = lambda level, message: self.error(t.metainfo, level, message)
+ config = t.config.getDict()
+ if config:
+ configfile.save_torrent_config(self.config['data_dir'],
+ infohash, config, ec)
+
+ def _dump_state(self):
+ self.last_save_time = bttime()
+ r = []
+ def write_entry(infohash, t):
+ if t.dlpath is None:
+ assert t.state == ASKING_LOCATION
+ r.append(infohash.encode('hex') + '\n')
+ else:
+ r.append(infohash.encode('hex') + ' ' + str(t.uptotal) + ' ' +
+ str(t.downtotal)+' '+t.dlpath.encode('string_escape')+'\n')
+ r.append('BitTorrent UI state file, version 3\n')
+ r.append('Running torrents\n')
+ for infohash in self.running_torrents:
+ write_entry(infohash, self.torrents[infohash])
+ r.append('Queued torrents\n')
+ for infohash in self.queue:
+ write_entry(infohash, self.torrents[infohash])
+ r.append('Known torrents\n')
+ for infohash in self.other_torrents:
+ write_entry(infohash, self.torrents[infohash])
+ r.append('End\n')
+ f = None
+ try:
+ filename = os.path.join(self.config['data_dir'], 'ui_state')
+ f = file(filename + '.new', 'wb')
+ f.write(''.join(r))
+ f.close()
+ if os.access(filename, os.F_OK):
+ os.remove(filename) # no atomic rename on win32
+ os.rename(filename + '.new', filename)
+ except Exception, e:
+ self.global_error(ERROR, _("Could not save UI state: ") + str(e))
+ if f is not None:
+ f.close()
+
+ def _restore_state(self):
+ def decode_line(line):
+ hashtext = line[:40]
+ try:
+ infohash = hashtext.decode('hex')
+ except:
+ raise BTFailure(_("Invalid state file contents"))
+ if len(infohash) != 20:
+ raise BTFailure(_("Invalid state file contents"))
+ try:
+ path = os.path.join(self.config['data_dir'], 'metainfo',
+ hashtext)
+ f = file(path, 'rb')
+ data = f.read()
+ f.close()
+ except Exception, e:
+ try:
+ f.close()
+ except:
+ pass
+ self.global_error(ERROR,
+ (_("Error reading file \"%s\".") % path) +
+ " (" + str(e)+ "), " +
+ _("cannot restore state completely"))
+ return None
+ if infohash in self.torrents:
+ raise BTFailure(_("Invalid state file (duplicate entry)"))
+ t = TorrentInfo(Preferences(self.config))
+ self.torrents[infohash] = t
+ try:
+ t.metainfo = ConvertedMetainfo(bdecode(data))
+ except Exception, e:
+ self.global_error(ERROR,
+ (_("Corrupt data in \"%s\", cannot restore torrent.") % path) +
+ '('+str(e)+')')
+ return None
+ t.metainfo.reported_errors = True # suppress redisplay on restart
+ if infohash != t.metainfo.infohash:
+ self.global_error(ERROR,
+ (_("Corrupt data in \"%s\", cannot restore torrent.") % path) +
+ _("(infohash mismatch)"))
+ return None
+ if len(line) == 41:
+ t.dlpath = None
+ return infohash, t
+ try:
+ if version < 2:
+ t.dlpath = line[41:-1].decode('string_escape')
+ elif version == 3:
+ up, down, dlpath = line[41:-1].split(' ', 2)
+ t.uptotal = t.uptotal_old = int(up)
+ t.downtotal = t.downtotal_old = int(down)
+ t.dlpath = dlpath.decode('string_escape')
+ elif version >= 4:
+ up, down = line[41:-1].split(' ', 1)
+ t.uptotal = t.uptotal_old = int(up)
+ t.downtotal = t.downtotal_old = int(down)
+ except ValueError: # unpack, int(), decode()
+ raise BTFailure(_("Invalid state file (bad entry)"))
+ config = configfile.read_torrent_config(self.config,
+ self.config['data_dir'],
+ infohash, self.global_error)
+ t.config.update(config)
+ return infohash, t
+ filename = os.path.join(self.config['data_dir'], 'ui_state')
+ if not os.path.exists(filename):
+ return
+ f = None
+ try:
+ f = file(filename, 'rb')
+ lines = f.readlines()
+ f.close()
+ except Exception, e:
+ if f is not None:
+ f.close()
+ raise BTFailure(str(e))
+ i = iter(lines)
+ try:
+ txt = 'BitTorrent UI state file, version '
+ version = i.next()
+ if not version.startswith(txt):
+ raise BTFailure(_("Bad UI state file"))
+ try:
+ version = int(version[len(txt):-1])
+ except:
+ raise BTFailure(_("Bad UI state file version"))
+ if version > 4:
+ raise BTFailure(_("Unsupported UI state file version (from "
+ "newer client version?)"))
+ if version < 3:
+ if i.next() != 'Running/queued torrents\n':
+ raise BTFailure(_("Invalid state file contents"))
+ else:
+ if i.next() != 'Running torrents\n':
+ raise BTFailure(_("Invalid state file contents"))
+ while True:
+ line = i.next()
+ if line == 'Queued torrents\n':
+ break
+ t = decode_line(line)
+ if t is None:
+ continue
+ infohash, t = t
+ if t.dlpath is None:
+ raise BTFailure(_("Invalid state file contents"))
+ t.state = RUN_QUEUED
+ self.running_torrents.append(infohash)
+ while True:
+ line = i.next()
+ if line == 'Known torrents\n':
+ break
+ t = decode_line(line)
+ if t is None:
+ continue
+ infohash, t = t
+ if t.dlpath is None:
+ raise BTFailure(_("Invalid state file contents"))
+ t.state = QUEUED
+ self.queue.append(infohash)
+ while True:
+ line = i.next()
+ if line == 'End\n':
+ break
+ t = decode_line(line)
+ if t is None:
+ continue
+ infohash, t = t
+ if t.dlpath is None:
+ t.state = ASKING_LOCATION
+ else:
+ t.state = KNOWN
+ self.other_torrents.append(infohash)
+ except StopIteration:
+ raise BTFailure(_("Invalid state file contents"))
+
+ def _queue_loop(self):
+ if self.doneflag.isSet():
+ return
+ self.rawserver.add_task(self._queue_loop, 20)
+ now = bttime()
+ self._check_version()
+ if self.queue and self.starting_torrent is None:
+ mintime = now - self.config['next_torrent_time'] * 60
+ minratio = self.config['next_torrent_ratio'] / 100
+ if self.config['seed_forever']:
+ minratio = 1e99
+ else:
+ mintime = 0
+ minratio = self.config['last_torrent_ratio'] / 100
+ if self.config['seed_last_forever']:
+ minratio = 1e99
+ if minratio >= 1e99:
+ return
+ for infohash in self.running_torrents:
+ t = self.torrents[infohash]
+ myminratio = minratio
+ if t.dl:
+ if self.queue and t.dl.config['seed_last_forever']:
+ myminratio = 1e99
+ elif t.dl.config['seed_forever']:
+ myminratio = 1e99
+ if t.state == RUN_QUEUED:
+ continue
+ totals = t.dl.get_total_transfer()
+ # not updated for remaining torrents if one is stopped, who cares
+ t.uptotal = t.uptotal_old + totals[0]
+ t.downtotal = t.downtotal_old + totals[1]
+ if t.finishtime is None or t.finishtime > now - 120:
+ continue
+ if t.finishtime > mintime:
+ if t.uptotal < t.metainfo.total_bytes * myminratio:
+ continue
+ self.change_torrent_state(infohash, RUNNING, KNOWN)
+ break
+ if self.running_torrents and self.last_save_time < now - 300:
+ self._dump_state()
+
+ def _check_queue(self):
+ if self.starting_torrent is not None or self.config['pause']:
+ return
+ for infohash in self.running_torrents:
+ if self.torrents[infohash].state == RUN_QUEUED:
+ self.starting_torrent = infohash
+ t = self.torrents[infohash]
+ t.state = RUNNING
+ t.finishtime = None
+ t.dl = self.multitorrent.start_torrent(t.metainfo, t.config,
+ self, t.dlpath)
+ return
+ if not self.queue or len(self.running_torrents) >= \
+ self.config['def_running_torrents']:
+ return
+ infohash = self.queue.pop(0)
+ self.starting_torrent = infohash
+ t = self.torrents[infohash]
+ assert t.state == QUEUED
+ t.state = RUNNING
+ t.finishtime = None
+ self.running_torrents.append(infohash)
+ t.dl = self.multitorrent.start_torrent(t.metainfo, t.config, self,
+ t.dlpath)
+ self._send_state(infohash)
+
+ def _send_state(self, infohash):
+ t = self.torrents[infohash]
+ state = t.state
+ if state == RUN_QUEUED:
+ state = RUNNING
+ pos = None
+ if state in (KNOWN, RUNNING, QUEUED):
+ l = self._get_list(state)
+ if l[-1] != infohash:
+ pos = l.index(infohash)
+ self.run_ui_task(self.ui.torrent_state_changed, infohash, t.dlpath,
+ state, t.completion, t.uptotal_old, t.downtotal_old, pos)
+
+ def _stop_running(self, infohash):
+ t = self.torrents[infohash]
+ if t.state == RUN_QUEUED:
+ self.running_torrents.remove(infohash)
+ t.state = KNOWN
+ return True
+ assert t.state == RUNNING
+ shutdown_succeded = t.dl.shutdown()
+ if not shutdown_succeded:
+ self.run_ui_task(self.ui.open_log)
+ self.error(t.metainfo, ERROR, "Unable to stop torrent. Please send this application log to bugs@bittorrent.com .")
+ return False
+ if infohash == self.starting_torrent:
+ self.starting_torrent = None
+ try:
+ self.running_torrents.remove(infohash)
+ except ValueError:
+ self.other_torrents.remove(infohash)
+ return False
+ else:
+ t.state = KNOWN
+ totals = t.dl.get_total_transfer()
+ t.uptotal_old += totals[0]
+ t.uptotal = t.uptotal_old
+ t.downtotal_old += totals[1]
+ t.downtotal = t.downtotal_old
+ t.dl = None
+ t.completion = self.multitorrent.get_completion(self.config,
+ t.metainfo, t.dlpath)
+ return True
+
+ def external_command(self, action, *datas):
+ if action == 'start_torrent':
+ assert len(datas) == 2
+ self.start_new_torrent_by_name(datas[0], save_as=datas[1])
+ elif action == 'show_error':
+ assert len(datas) == 1
+ self.global_error(ERROR, datas[0])
+ elif action == 'no-op':
+ pass
+
+ def remove_torrent(self, infohash):
+ if infohash not in self.torrents:
+ return
+ state = self.torrents[infohash].state
+ if state == QUEUED:
+ self.queue.remove(infohash)
+ elif state in (RUNNING, RUN_QUEUED):
+ self._stop_running(infohash)
+ self._check_queue()
+ else:
+ self.other_torrents.remove(infohash)
+ self.run_ui_task(self.ui.removed_torrent, infohash)
+ del self.torrents[infohash]
+
+ for d in ['metainfo', 'resume']:
+ filename = os.path.join(self.config['data_dir'], d,
+ infohash.encode('hex'))
+ try:
+ os.remove(filename)
+ except Exception, e:
+ self.global_error(WARNING,
+ (_("Could not delete cached %s file:")%d) +
+ str(e))
+ ec = lambda level, message: self.global_error(level, message)
+ configfile.remove_torrent_config(self.config['data_dir'],
+ infohash, ec)
+ self._dump_state()
+
+ def set_save_location(self, infohash, dlpath):
+ torrent = self.torrents.get(infohash)
+ if torrent is None or torrent.state == RUNNING:
+ return
+ torrent.dlpath = dlpath
+ self._dump_config()
+ torrent.completion = self.multitorrent.get_completion(self.config,
+ torrent.metainfo, dlpath)
+ if torrent.state == ASKING_LOCATION:
+ torrent.state = KNOWN
+ self.change_torrent_state(infohash, KNOWN, QUEUED)
+ else:
+ self._send_state(infohash)
+ self._dump_state()
+
+ def _get_torrent_then_callback(self, name, save_as=None):
+ data, errors = GetTorrent.get_quietly(name)
+
+ if data:
+ self.start_new_torrent(data, save_as)
+ for error in errors:
+ self.run_ui_task(self.ui.global_error, ERROR, error)
+
+ def start_new_torrent_by_name(self, name, save_as=None):
+ t = threading.Thread(target=self._get_torrent_then_callback,
+ args=(name, save_as,))
+ t.setDaemon(True)
+ t.start()
+
+ def start_new_torrent(self, data, save_as=None):
+ t = TorrentInfo(Preferences(self.config))
+ try:
+ t.metainfo = ConvertedMetainfo(bdecode(data))
+ except Exception, e:
+ self.global_error(ERROR, _("This is not a valid torrent file. (%s)")
+ % str(e))
+ return
+ infohash = t.metainfo.infohash
+ if infohash in self.torrents:
+ real_state = self.torrents[infohash].state
+ if real_state in (RUNNING, RUN_QUEUED):
+ self.error(t.metainfo, ERROR,
+ _("This torrent (or one with the same contents) is "
+ "already running."))
+ elif real_state == QUEUED:
+ self.error(t.metainfo, ERROR,
+ _("This torrent (or one with the same contents) is "
+ "already waiting to run."))
+ elif real_state == ASKING_LOCATION:
+ pass
+ elif real_state == KNOWN:
+ self.change_torrent_state(infohash, KNOWN, newstate=QUEUED)
+ else:
+ raise BTFailure(_("Torrent in unknown state %d") % real_state)
+ return
+
+ path = os.path.join(self.config['data_dir'], 'metainfo',
+ infohash.encode('hex'))
+ try:
+ f = file(path+'.new', 'wb')
+ f.write(data)
+ f.close()
+ if os.access(path, os.F_OK):
+ os.remove(path) # no atomic rename on win32
+ os.rename(path+'.new', path)
+ except Exception, e:
+ try:
+ f.close()
+ except:
+ pass
+ self.global_error(ERROR, _("Could not write file ") + path +
+ ' (' + str(e) + '), ' +
+ _("torrent will not be restarted "
+ "correctly on client restart"))
+
+ config = configfile.read_torrent_config(self.config,
+ self.config['data_dir'],
+ infohash, self.global_error)
+ if config:
+ t.config.update(config)
+ if save_as:
+ self.run_ui_task(self.ui.set_config, 'save_as', save_as)
+ else:
+ save_as = None
+
+ self.torrents[infohash] = t
+ t.state = ASKING_LOCATION
+ self.other_torrents.append(infohash)
+ self._dump_state()
+ self.run_ui_task(self.ui.new_displayed_torrent, infohash,
+ t.metainfo, save_as, t.state, t.config)
+
+ def show_error(level, text):
+ self.run_ui_task(self.ui.error, infohash, level, text)
+ t.metainfo.show_encoding_errors(show_error)
+
+ def set_config(self, option, value, ihash=None):
+ if not ihash:
+ oldvalue = self.config[option]
+ self.config[option] = value
+ self.multitorrent.set_option(option, value)
+ if option == 'pause':
+ if value:# and not oldvalue:
+ self.set_zero_running_torrents()
+ elif not value:# and oldvalue:
+ self._check_queue()
+ else:
+ torrent = self.torrents[ihash]
+ if torrent.state == RUNNING:
+ torrent.dl.set_option(option, value)
+ if option in ('forwarded_port', 'maxport'):
+ torrent.dl.change_port()
+ torrent.config[option] = value
+ self._dump_config()
+
+ def request_status(self, infohash, want_spew, want_fileinfo):
+ torrent = self.torrents.get(infohash)
+ if torrent is None or torrent.state != RUNNING:
+ return
+ status = torrent.dl.get_status(want_spew, want_fileinfo)
+ if torrent.finishtime is not None:
+ now = bttime()
+ uptotal = status['upTotal'] + torrent.uptotal_old
+ downtotal = status['downTotal'] + torrent.downtotal_old
+ ulspeed = status['upRate2']
+ if self.queue:
+ ratio = torrent.dl.config['next_torrent_ratio'] / 100
+ if torrent.dl.config['seed_forever']:
+ ratio = 1e99
+ else:
+ ratio = torrent.dl.config['last_torrent_ratio'] / 100
+ if torrent.dl.config['seed_last_forever']:
+ ratio = 1e99
+ if ulspeed <= 0 or ratio >= 1e99:
+ rem = 1e99
+ elif downtotal == 0:
+ rem = (torrent.metainfo.total_bytes * ratio - uptotal) / ulspeed
+ else:
+ rem = (downtotal * ratio - uptotal) / ulspeed
+ if self.queue and not torrent.dl.config['seed_forever']:
+ rem = min(rem, torrent.finishtime +
+ torrent.dl.config['next_torrent_time'] * 60 - now)
+ rem = max(rem, torrent.finishtime + 120 - now)
+ if rem <= 0:
+ rem = 1
+ if rem >= 1e99:
+ rem = None
+ status['timeEst'] = rem
+ self.run_ui_task(self.ui.update_status, infohash, status)
+
+ def _get_list(self, state):
+ if state == KNOWN:
+ return self.other_torrents
+ elif state == QUEUED:
+ return self.queue
+ elif state in (RUNNING, RUN_QUEUED):
+ return self.running_torrents
+ assert False
+
+ def change_torrent_state(self, infohash, oldstate, newstate=None,
+ pred=None, succ=None, replaced=None, force_running=False):
+ t = self.torrents.get(infohash)
+ if t is None or (t.state != oldstate and not (t.state == RUN_QUEUED and
+ oldstate == RUNNING)):
+ return
+ if newstate is None:
+ newstate = oldstate
+ assert oldstate in (KNOWN, QUEUED, RUNNING)
+ assert newstate in (KNOWN, QUEUED, RUNNING)
+ pos = None
+ if oldstate != RUNNING and newstate == RUNNING and replaced is None:
+ if len(self.running_torrents) >= (force_running and self.config[
+ 'max_running_torrents'] or self.config['def_running_torrents']):
+ if force_running:
+ self.global_error(ERROR,
+ _("Can't run more than %d torrents "
+ "simultaneously. For more info see the"
+ " FAQ at %s.")%
+ (self.config['max_running_torrents'],
+ FAQ_URL))
+ newstate = QUEUED
+ pos = 0
+ l = self._get_list(newstate)
+ if newstate == oldstate:
+ origpos = l.index(infohash)
+ del l[origpos]
+ if pos is None:
+ pos = decode_position(l, pred, succ, -1)
+ if pos == -1 or l == origpos:
+ l.insert(origpos, infohash)
+ return
+ l.insert(pos, infohash)
+ self._dump_state()
+ self.run_ui_task(self.ui.reorder_torrent, infohash, pos)
+ return
+ if pos is None:
+ pos = decode_position(l, pred, succ)
+ if newstate == RUNNING:
+ newstate = RUN_QUEUED
+ if replaced and len(self.running_torrents) >= \
+ self.config['def_running_torrents']:
+ t2 = self.torrents.get(replaced)
+ if t2 is None or t2.state not in (RUNNING, RUN_QUEUED):
+ return
+ if self.running_torrents.index(replaced) < pos:
+ pos -= 1
+ if self._stop_running(replaced):
+ t2.state = QUEUED
+ self.queue.insert(0, replaced)
+ self._send_state(replaced)
+ else:
+ self.other_torrents.append(replaced)
+ if oldstate == RUNNING:
+ if newstate == QUEUED and len(self.running_torrents) <= \
+ self.config['def_running_torrents'] and pos == 0:
+ return
+ if not self._stop_running(infohash):
+ if newstate == KNOWN:
+ self.other_torrents.insert(pos, infohash)
+ self.run_ui_task(self.ui.reorder_torrent, infohash, pos)
+ else:
+ self.other_torrents.append(infohash)
+ return
+ else:
+ self._get_list(oldstate).remove(infohash)
+ t.state = newstate
+ l.insert(pos, infohash)
+ self._check_queue() # sends state if it starts the torrent from queue
+ if t.state != RUNNING or newstate == RUN_QUEUED:
+ self._send_state(infohash)
+ self._dump_state()
+
+ def set_zero_running_torrents(self):
+ newrun = []
+ for infohash in list(self.running_torrents):
+ t = self.torrents[infohash]
+ if self._stop_running(infohash):
+ newrun.append(infohash)
+ t.state = RUN_QUEUED
+ else:
+ self.other_torrents.append(infohash)
+ self.running_torrents = newrun
+
+ def check_completion(self, infohash, filelist=False):
+ t = self.torrents.get(infohash)
+ if t is None:
+ return
+ r = self.multitorrent.get_completion(self.config, t.metainfo,
+ t.dlpath, filelist)
+ if r is None or not filelist:
+ self.run_ui_task(self.ui.update_completion, infohash, r)
+ else:
+ self.run_ui_task(self.ui.update_completion, infohash, *r)
+
+ def global_error(self, level, text):
+ self.run_ui_task(self.ui.global_error, level, text)
+
+ # callbacks from torrent instances
+
+ def failed(self, torrent, is_external):
+ infohash = torrent.infohash
+ if infohash == self.starting_torrent:
+ self.starting_torrent = None
+ self.running_torrents.remove(infohash)
+ t = self.torrents[infohash]
+ t.state = KNOWN
+ if is_external:
+ t.completion = self.multitorrent.get_completion(
+ self.config, t.metainfo, t.dlpath)
+ else:
+ t.completion = None
+ totals = t.dl.get_total_transfer()
+ t.uptotal_old += totals[0]
+ t.uptotal = t.uptotal_old
+ t.downtotal_old += totals[1]
+ t.downtotal = t.downtotal_old
+ t.dl = None
+ self.other_torrents.append(infohash)
+ self._send_state(infohash)
+ if not self.doneflag.isSet():
+ self._check_queue()
+ self._dump_state()
+
+ def finished(self, torrent):
+ """called when a download reaches 100%"""
+ infohash = torrent.infohash
+ t = self.torrents[infohash]
+ totals = t.dl.get_total_transfer()
+ if t.downtotal == 0 and t.downtotal_old == 0 and totals[1] == 0:
+ self.set_config('seed_forever', True, infohash)
+ self.set_config('seed_last_forever', True, infohash)
+ self.request_status(infohash, False, False)
+
+ if infohash == self.starting_torrent:
+ t = self.torrents[infohash]
+ if self.queue:
+ ratio = t.config['next_torrent_ratio'] / 100
+ if t.config['seed_forever']:
+ ratio = 1e99
+ msg = _("Not starting torrent as there are other torrents "
+ "waiting to run, and this one already meets the "
+ "settings for when to stop seeding.")
+ else:
+ ratio = t.config['last_torrent_ratio'] / 100
+ if t.config['seed_last_forever']:
+ ratio = 1e99
+ msg = _("Not starting torrent as it already meets the "
+ "settings for when to stop seeding the last "
+ "completed torrent.")
+ if ratio < 1e99 and t.uptotal >= t.metainfo.total_bytes * ratio:
+ raise BTShutdown(msg)
+ self.torrents[torrent.infohash].finishtime = bttime()
+
+ def started(self, torrent):
+ infohash = torrent.infohash
+ assert infohash == self.starting_torrent
+ self.starting_torrent = None
+ self._check_queue()
+
+ def error(self, torrent, level, text):
+ self.run_ui_task(self.ui.error, torrent.infohash, level, text)
+
+
+class ThreadWrappedQueue(object):
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def set_done(self):
+ self.wrapped.doneflag.set()
+ # add a dummy task to make sure the thread wakes up and notices flag
+ def dummy():
+ pass
+ self.wrapped.rawserver.external_add_task(dummy, 0)
+
+# OW
+def _makemethod(methodname):
+ def wrapper(self, *args, **kws):
+ def f():
+ getattr(self.wrapped, methodname)(*args, **kws)
+ self.wrapped.rawserver.external_add_task(f, 0)
+ return wrapper
+
+# also OW
+for methodname in ("request_status set_config start_new_torrent "
+ "start_new_torrent_by_name remove_torrent set_save_location "
+ "change_torrent_state check_completion").split():
+ setattr(ThreadWrappedQueue, methodname, _makemethod(methodname))
+del _makemethod, methodname
diff --git a/NohGooee/TrayIcon.py b/NohGooee/TrayIcon.py
new file mode 100644
index 0000000..d48dd2e
--- /dev/null
+++ b/NohGooee/TrayIcon.py
@@ -0,0 +1,95 @@
+import os
+
+from NohGooee import app_name
+from NohGooee.GUI import gtk_wrap
+from NohGooee.platform import image_root
+
+if os.name == 'nt':
+ from systray import systray
+
+ class TrayIcon(systray.Control):
+ def __init__(self, initial_state, toggle_func=None, quit_func=None):
+ iconpath = os.path.join(image_root, 'bittorrent.ico')
+
+ systray.Control.__init__(self, app_name, iconpath)
+
+ self.toggle_func = toggle_func
+ self.quit_func = quit_func
+ self.tooltip_text = None
+
+ self.toggle_state = initial_state
+ menu_text = self._get_text_for_state(self.toggle_state)
+
+ self.toggle_item = systray.MenuItem(name='toggle',
+ title=menu_text)
+
+ self.toggle_item.onclick = self.toggle
+ self.on_double_click = self.toggle
+
+ self.add_menuitem(self.toggle_item)
+ self.default_menu_index = 1
+
+ def get_tooltip(self):
+ return self.tooltip_text
+
+ def set_tooltip(self, tooltip_text):
+ # ow.
+ if not hasattr(self, 'systray'):
+ return
+
+ # FIXME: pysystray bug means this might fail
+ try:
+ if self.tooltip_text != tooltip_text:
+ self.systray.text = tooltip_text
+ # we set our own cache after sending the value to pysystray,
+ # since it could fail
+ self.tooltip_text = tooltip_text
+ except:
+ pass
+
+ def on_quit(self, *args):
+ if self.quit_func is not None:
+ self._callout(self.quit_func)
+
+ def set_toggle_state(self, b):
+ # ow.
+ if not hasattr(self, "systray"):
+ return
+
+ s = self.systray
+ self.toggle_state = b
+ s.menu.items['toggle'].title = self._get_text_for_state(self.toggle_state)
+
+ def _get_text_for_state(self, state):
+ if state:
+ text = _("Hide %s") % app_name
+ else:
+ text = _("Show %s") % app_name
+ return text
+
+ def toggle(self, s):
+ if self.toggle_func is not None:
+ self._callout(self.toggle_func)
+ self.set_toggle_state(not self.toggle_state)
+
+ def _callout(self, func):
+ if callable(func):
+ gtk_wrap(func)
+
+else:
+ # No tray icon for *your* OS !
+ class TrayIcon:
+ def func(*a, **kw):
+ pass
+ __init__ = enable = disable = get_tooltip = set_tooltip = set_toggle_state = func
+
+
+if __name__ == '__main__':
+ import threading
+ from NohGooee.platform import install_translation
+ install_translation()
+ ti = TrayIcon(True)
+ th = threading.Thread(target=ti.enable, args=())
+ th.start()
+ from time import sleep
+ sleep(10)
diff --git a/NohGooee/Uploader.py b/NohGooee/Uploader.py
new file mode 100644
index 0000000..ce98949
--- /dev/null
+++ b/NohGooee/Uploader.py
@@ -0,0 +1,97 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from NohGooee.CurrentRateMeasure import Measure
+
+
+class Upload(object):
+
+ def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
+ storage, max_slice_length, max_rate_period):
+ self.connection = connection
+ self.ratelimiter = ratelimiter
+ self.totalup = totalup
+ self.totalup2 = totalup2
+ self.choker = choker
+ self.storage = storage
+ self.max_slice_length = max_slice_length
+ self.max_rate_period = max_rate_period
+ self.choked = True
+ self.unchoke_time = None
+ self.interested = False
+ self.buffer = []
+ self.measure = Measure(max_rate_period)
+ if storage.do_I_have_anything():
+ connection.send_bitfield(storage.get_have_list())
+
+ def got_not_interested(self):
+ if self.interested:
+ self.interested = False
+ del self.buffer[:]
+ self.choker.not_interested(self.connection)
+
+ def got_interested(self):
+ if not self.interested:
+ self.interested = True
+ self.choker.interested(self.connection)
+
+ def get_upload_chunk(self):
+ if not self.buffer:
+ return None
+ index, begin, length = self.buffer.pop(0)
+ piece = self.storage.get_piece(index, begin, length)
+ if piece is None:
+ self.connection.close()
+ return None
+ return (index, begin, piece)
+
+ def update_rate(self, bytes):
+ self.measure.update_rate(bytes)
+ self.totalup.update_rate(bytes)
+ self.totalup2.update_rate(bytes)
+
+ def got_request(self, index, begin, length):
+ if not self.interested or length > self.max_slice_length:
+ self.connection.close()
+ return
+ if not self.connection.choke_sent:
+ self.buffer.append((index, begin, length))
+ if self.connection.next_upload is None and \
+ self.connection.connection.is_flushed():
+ self.ratelimiter.queue(self.connection, self.connection.encoder.context.rlgroup)
+
+ def got_cancel(self, index, begin, length):
+ try:
+ self.buffer.remove((index, begin, length))
+ except ValueError:
+ pass
+
+ def choke(self):
+ if not self.choked:
+ self.choked = True
+ self.connection.send_choke()
+
+ def sent_choke(self):
+ assert self.choked
+ del self.buffer[:]
+
+ def unchoke(self, time):
+ if self.choked:
+ self.choked = False
+ self.unchoke_time = time
+ self.connection.send_unchoke()
+
+ def has_queries(self):
+ return len(self.buffer) > 0
+
+ def get_rate(self):
+ return self.measure.get_rate()
diff --git a/NohGooee/__init__.py b/NohGooee/__init__.py
new file mode 100644
index 0000000..5dc8996
--- /dev/null
+++ b/NohGooee/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+app_name = 'NoGooee'
+version = '1.0.0'
+
+import sys
+assert sys.version_info >= (2, 2, 1), _("Python %s or newer required") % '2.2.1'
+import os
+import time
+
+branch = None
+if os.access('.cdv', os.F_OK):
+ branch = os.path.split(os.path.realpath(os.path.split(sys.argv[0])[0]))[1]
+
+from NohGooee.language import languages, language_names
+from NohGooee.platform import get_home_dir, is_frozen_exe
+
+if os.name == 'posix':
+ if os.uname()[0] == "Darwin":
+ from NohGooee.platform import install_translation
+ install_translation()
+
+del sys, get_home_dir, is_frozen_exe
+
+INFO = 0
+WARNING = 1
+ERROR = 2
+CRITICAL = 3
+
+status_dict = {INFO: 'info',
+ WARNING: 'warning',
+ ERROR: 'error',
+ CRITICAL: 'critical'}
+
+class BTFailure(Exception):
+ pass
+
+class BTShutdown(BTFailure):
+ pass
+
diff --git a/NohGooee/bencode.py b/NohGooee/bencode.py
new file mode 100644
index 0000000..f544286
--- /dev/null
+++ b/NohGooee/bencode.py
@@ -0,0 +1,134 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Petru Paler
+
+from NohGooee import BTFailure
+
+def decode_int(x, f):
+ f += 1
+ newf = x.index('e', f)
+ n = int(x[f:newf])
+ if x[f] == '-':
+ if x[f + 1] == '0':
+ raise ValueError
+ elif x[f] == '0' and newf != f+1:
+ raise ValueError
+ return (n, newf+1)
+
+def decode_string(x, f):
+ colon = x.index(':', f)
+ n = int(x[f:colon])
+ if x[f] == '0' and colon != f+1:
+ raise ValueError
+ colon += 1
+ return (x[colon:colon+n], colon+n)
+
+def decode_list(x, f):
+ r, f = [], f+1
+ while x[f] != 'e':
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ return (r, f + 1)
+
+def decode_dict(x, f):
+ r, f = {}, f+1
+ lastkey = None
+ while x[f] != 'e':
+ k, f = decode_string(x, f)
+ if lastkey >= k:
+ raise ValueError
+ lastkey = k
+ r[k], f = decode_func[x[f]](x, f)
+ return (r, f + 1)
+
+decode_func = {}
+decode_func['l'] = decode_list
+decode_func['d'] = decode_dict
+decode_func['i'] = decode_int
+decode_func['0'] = decode_string
+decode_func['1'] = decode_string
+decode_func['2'] = decode_string
+decode_func['3'] = decode_string
+decode_func['4'] = decode_string
+decode_func['5'] = decode_string
+decode_func['6'] = decode_string
+decode_func['7'] = decode_string
+decode_func['8'] = decode_string
+decode_func['9'] = decode_string
+
+def bdecode(x):
+ try:
+ r, l = decode_func[x[0]](x, 0)
+ except (IndexError, KeyError, ValueError):
+ raise BTFailure, _("not a valid bencoded string")
+ if l != len(x):
+ raise BTFailure, _("invalid bencoded value (data after valid prefix)")
+ return r
+
+from types import StringType, IntType, LongType, DictType, ListType, TupleType
+
+
+class Bencached(object):
+
+ __slots__ = ['bencoded']
+
+ def __init__(self, s):
+ self.bencoded = s
+
+def encode_bencached(x,r):
+ r.append(x.bencoded)
+
+def encode_int(x, r):
+ r.extend(('i', str(x), 'e'))
+
+def encode_bool(x, r):
+ if x:
+ encode_int(1, r)
+ else:
+ encode_int(0, r)
+
+def encode_string(x, r):
+ r.extend((str(len(x)), ':', x))
+
+def encode_list(x, r):
+ r.append('l')
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append('e')
+
+def encode_dict(x,r):
+ r.append('d')
+ ilist = x.items()
+ ilist.sort()
+ for k, v in ilist:
+ r.extend((str(len(k)), ':', k))
+ encode_func[type(v)](v, r)
+ r.append('e')
+
+encode_func = {}
+encode_func[Bencached] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+
+try:
+ from types import BooleanType
+ encode_func[BooleanType] = encode_bool
+except ImportError:
+ pass
+
+def bencode(x):
+ r = []
+ encode_func[type(x)](x, r)
+ return ''.join(r)
diff --git a/NohGooee/bitfield.py b/NohGooee/bitfield.py
new file mode 100644
index 0000000..bede74f
--- /dev/null
+++ b/NohGooee/bitfield.py
@@ -0,0 +1,75 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen, Uoti Urpala, and John Hoffman
+
+from array import array
+
+counts = [chr(sum([(i >> j) & 1 for j in xrange(8)])) for i in xrange(256)]
+counts = ''.join(counts)
+
+
+class Bitfield:
+
+ def __init__(self, length, bitstring=None):
+ self.length = length
+ rlen, extra = divmod(length, 8)
+ if bitstring is None:
+ self.numfalse = length
+ if extra:
+ self.bits = array('B', chr(0) * (rlen + 1))
+ else:
+ self.bits = array('B', chr(0) * rlen)
+ else:
+ if extra:
+ if len(bitstring) != rlen + 1:
+ raise ValueError
+ if (ord(bitstring[-1]) << extra) & 0xFF != 0:
+ raise ValueError
+ else:
+ if len(bitstring) != rlen:
+ raise ValueError
+ c = counts
+ self.numfalse = length - sum(array('B',
+ bitstring.translate(counts)))
+ if self.numfalse != 0:
+ self.bits = array('B', bitstring)
+ else:
+ self.bits = None
+
+ def __setitem__(self, index, val):
+ assert val
+ pos = index >> 3
+ mask = 128 >> (index & 7)
+ if self.bits[pos] & mask:
+ return
+ self.bits[pos] |= mask
+ self.numfalse -= 1
+ if self.numfalse == 0:
+ self.bits = None
+
+ def __getitem__(self, index):
+ bits = self.bits
+ if bits is None:
+ return 1
+ return bits[index >> 3] & 128 >> (index & 7)
+
+ def __len__(self):
+ return self.length
+
+ def tostring(self):
+ if self.bits is None:
+ rlen, extra = divmod(self.length, 8)
+ r = chr(0xFF) * rlen
+ if extra:
+ r += chr((0xFF << (8 - extra)) & 0xFF)
+ return r
+ else:
+ return self.bits.tostring()
diff --git a/NohGooee/btformats.py b/NohGooee/btformats.py
new file mode 100644
index 0000000..632fe27
--- /dev/null
+++ b/NohGooee/btformats.py
@@ -0,0 +1,140 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+import re
+
+from NohGooee import BTFailure
+
+allowed_path_re = re.compile(r'^[^/\\.~][^/\\]*$')
+
+ints = (long, int)
+
+def check_info(info, check_paths=True):
+ if type(info) != dict:
+ raise BTFailure, _("bad metainfo - not a dictionary")
+ pieces = info.get('pieces')
+ if type(pieces) != str or len(pieces) % 20 != 0:
+ raise BTFailure, _("bad metainfo - bad pieces key")
+ piecelength = info.get('piece length')
+ if type(piecelength) not in ints or piecelength <= 0:
+ raise BTFailure, _("bad metainfo - illegal piece length")
+ name = info.get('name')
+ if type(name) != str:
+ raise BTFailure, _("bad metainfo - bad name")
+ if not allowed_path_re.match(name):
+ raise BTFailure, _("name %s disallowed for security reasons") % name
+ if info.has_key('files') == info.has_key('length'):
+ raise BTFailure, _("single/multiple file mix")
+ if info.has_key('length'):
+ length = info.get('length')
+ if type(length) not in ints or length < 0:
+ raise BTFailure, _("bad metainfo - bad length")
+ else:
+ files = info.get('files')
+ if type(files) != list:
+ raise BTFailure, _('bad metainfo - "files" is not a list of files')
+ for f in files:
+ if type(f) != dict:
+ raise BTFailure, _("bad metainfo - file entry must be a dict")
+ length = f.get('length')
+ if type(length) not in ints or length < 0:
+ raise BTFailure, _("bad metainfo - bad length")
+ path = f.get('path')
+ if type(path) != list or path == []:
+ raise BTFailure, _("bad metainfo - bad path")
+ for p in path:
+ if type(p) != str:
+ raise BTFailure, _("bad metainfo - bad path dir")
+ if check_paths and not allowed_path_re.match(p):
+ raise BTFailure, _("path %s disallowed for security reasons") % p
+ f = ['/'.join(x['path']) for x in files]
+ f.sort()
+ i = iter(f)
+ try:
+ name2 = i.next()
+ while True:
+ name1 = name2
+ name2 = i.next()
+ if name2.startswith(name1):
+ if name1 == name2:
+ raise BTFailure, _("bad metainfo - duplicate path")
+ elif name2[len(name1)] == '/':
+ raise BTFailure(_("bad metainfo - name used as both"
+ "file and subdirectory name"))
+ except StopIteration:
+ pass
+
+def check_message(message, check_paths=True):
+ if type(message) != dict:
+ raise BTFailure, _("bad metainfo - wrong object type")
+ check_info(message.get('info'), check_paths)
+ if type(message.get('announce')) != str and type(message.get('nodes')) != list:
+ raise BTFailure, _("bad metainfo - no announce URL string")
+ if message.has_key('nodes'):
+ check_nodes(message.get('nodes'))
+
+def check_nodes(nodes):
+ ## note, these strings need changing
+ for node in nodes:
+ if type(node) != list:
+ raise BTFailure, _("bad metainfo - node is not a list")
+ if len(node) != 2:
+ raise BTFailure, _("bad metainfo - node list must have only two elements")
+ host, port = node
+ if type(host) != str:
+ raise BTFailure, _("bad metainfo - node host must be a string")
+ if type(port) != int:
+ raise BTFailure, _("bad metainfo - node port must be an integer")
+
+def check_peers(message):
+ if type(message) != dict:
+ raise BTFailure
+ if message.has_key('failure reason'):
+ if type(message['failure reason']) != str:
+ raise BTFailure, _("failure reason must be a string")
+ return
+ if message.has_key('warning message'):
+ if type(message['warning message']) != str:
+ raise BTFailure, _("warning message must be a string")
+ peers = message.get('peers')
+ if type(peers) == list:
+ for p in peers:
+ if type(p) != dict:
+ raise BTFailure, _("invalid entry in peer list - peer info must be a dict")
+ if type(p.get('ip')) != str:
+ raise BTFailure, _("invalid entry in peer list - peer ip must be a string")
+ port = p.get('port')
+ if type(port) not in ints or p <= 0:
+ raise BTFailure, _("invalid entry in peer list - peer port must be an integer")
+ if p.has_key('peer id'):
+ peerid = p.get('peer id')
+ if type(peerid) != str or len(peerid) != 20:
+ raise BTFailure, _("invalid entry in peer list - invalid peerid")
+ elif type(peers) != str or len(peers) % 6 != 0:
+ raise BTFailure, _("invalid peer list")
+ interval = message.get('interval', 1)
+ if type(interval) not in ints or interval <= 0:
+ raise BTFailure, _("invalid announce interval")
+ minint = message.get('min interval', 1)
+ if type(minint) not in ints or minint <= 0:
+ raise BTFailure, _("invalid min announce interval")
+ if type(message.get('tracker id', '')) != str:
+ raise BTFailure, _("invalid tracker id")
+ npeers = message.get('num peers', 0)
+ if type(npeers) not in ints or npeers < 0:
+ raise BTFailure, _("invalid peer count")
+ dpeers = message.get('done peers', 0)
+ if type(dpeers) not in ints or dpeers < 0:
+ raise BTFailure, _("invalid seed count")
+ last = message.get('last', 0)
+ if type(last) not in ints or last < 0:
+ raise BTFailure, _('invalid "last" entry')
diff --git a/NohGooee/configfile.py b/NohGooee/configfile.py
new file mode 100644
index 0000000..49c4e4f
--- /dev/null
+++ b/NohGooee/configfile.py
@@ -0,0 +1,222 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Uoti Urpala and Matt Chisholm
+
+import os
+import sys
+import gettext
+import locale
+
+# Python 2.2 doesn't have RawConfigParser
+try:
+ from ConfigParser import RawConfigParser
+except ImportError:
+ from ConfigParser import ConfigParser as RawConfigParser
+
+from ConfigParser import MissingSectionHeaderError, ParsingError
+from NohGooee import parseargs
+from NohGooee import app_name, version, ERROR, BTFailure
+from NohGooee.platform import get_config_dir, locale_root, is_frozen_exe
+from NohGooee.defaultargs import MYTRUE
+from NohGooee.zurllib import bind_tracker_connection, set_zurllib_rawserver
+
+MAIN_CONFIG_FILE = 'ui_config'
+TORRENT_CONFIG_FILE = 'torrent_config'
+
+alt_uiname = {'bittorrent':'btdownloadgui',
+ 'maketorrent':'btmaketorrentgui',}
+
+def _read_config(filename):
+ # check for bad config files (Windows corrupts them all the time)
+ p = RawConfigParser()
+ fp = None
+ try:
+ fp = open(filename)
+ except IOError:
+ pass
+
+ if fp is not None:
+ try:
+ p.readfp(fp, filename=filename)
+ except MissingSectionHeaderError:
+ fp.close()
+ del fp
+ bad_config(filename)
+ except ParsingError:
+ fp.close()
+ del fp
+ bad_config(filename)
+ else:
+ fp.close()
+ return p
+
+
+def _write_config(error_callback, filename, p):
+ try:
+ f = file(filename, 'w')
+ p.write(f)
+ f.close()
+ except Exception, e:
+ try:
+ f.close()
+ except:
+ pass
+ error_callback(ERROR, _("Could not permanently save options: ")+
+ str(e))
+
+
+def bad_config(filename):
+ base_bad_filename = filename + '.broken'
+ bad_filename = base_bad_filename
+ i = 0
+ while os.access(bad_filename, os.F_OK):
+ bad_filename = base_bad_filename + str(i)
+ i+=1
+ os.rename(filename, bad_filename)
+ sys.stderr.write(_("Error reading config file. "
+ "Old config file stored in \"%s\"") % bad_filename)
+
+
+def get_config(defaults, section):
+ dir_root = get_config_dir()
+
+ if dir_root is None:
+ return {}
+
+ configdir = os.path.join(dir_root, '.bittorrent')
+
+ if not os.path.isdir(configdir):
+ try:
+ os.mkdir(configdir, 0700)
+ except:
+ pass
+
+ p = _read_config(os.path.join(configdir, 'config'))
+ values = {}
+ if p.has_section(section):
+ for name, value in p.items(section):
+ if name in defaults:
+ values[name] = value
+ if p.has_section('common'):
+ for name, value in p.items('common'):
+ if name in defaults and name not in values:
+ values[name] = value
+ if defaults.get('data_dir') == '' and \
+ 'data_dir' not in values and os.path.isdir(configdir):
+ datadir = os.path.join(configdir, 'data')
+ values['data_dir'] = datadir
+ parseargs.parse_options(defaults, values)
+ return values
+
+
+def save_ui_config(defaults, section, save_options, error_callback):
+ filename = os.path.join(defaults['data_dir'], MAIN_CONFIG_FILE)
+ p = _read_config(filename)
+ p.remove_section(section)
+ if p.has_section(alt_uiname[section]):
+ p.remove_section(alt_uiname[section])
+ p.add_section(section)
+ for name in save_options:
+ if defaults.has_key(name):
+ p.set(section, name, defaults[name])
+ else:
+ err_str = _("Configuration option mismatch: '%s'") % name
+ if is_frozen_exe:
+ err_str = _("You must quit %s and reinstall it. (%s)") % (app_name, err_str)
+ error_callback(ERROR, err_str)
+ _write_config(error_callback, filename, p)
+
+
+def save_torrent_config(path, infohash, config, error_callback):
+ section = infohash.encode('hex')
+ filename = os.path.join(path, TORRENT_CONFIG_FILE)
+ p = _read_config(filename)
+ p.remove_section(section)
+ p.add_section(section)
+ for key, value in config.items():
+ p.set(section, key, value)
+ _write_config(error_callback, filename, p)
+
+def read_torrent_config(global_config, path, infohash, error_callback):
+ section = infohash.encode('hex')
+ filename = os.path.join(path, TORRENT_CONFIG_FILE)
+ p = _read_config(filename)
+ if not p.has_section(section):
+ return {}
+ else:
+ c = {}
+ for name, value in p.items(section):
+ if global_config.has_key(name):
+ t = type(global_config[name])
+ if t == bool:
+ c[name] = value in ('1', 'True', MYTRUE, True)
+ else:
+ c[name] = type(global_config[name])(value)
+ return c
+
+def remove_torrent_config(path, infohash, error_callback):
+ section = infohash.encode('hex')
+ filename = os.path.join(path, TORRENT_CONFIG_FILE)
+ p = _read_config(filename)
+ if p.has_section(section):
+ p.remove_section(section)
+ _write_config(error_callback, filename, p)
+
+def parse_configuration_and_args(defaults, uiname, arglist=[], minargs=0,
+ maxargs=0):
+ defconfig = dict([(name, value) for (name, value, doc) in defaults])
+ if arglist[0:] == ['--version']:
+ print version
+ sys.exit(0)
+
+ if arglist[0:] in (['--help'], ['-h'], ['--usage'], ['-?']):
+ parseargs.printHelp(uiname, defaults)
+ sys.exit(0)
+
+ presets = get_config(defconfig, uiname)
+ config, args = parseargs.parseargs(arglist, defaults, minargs, maxargs,
+ presets)
+ datadir = config['data_dir']
+ if datadir:
+ if uiname in ('bittorrent', 'maketorrent'):
+ values = {}
+ p = _read_config(os.path.join(datadir, MAIN_CONFIG_FILE))
+ if not p.has_section(uiname) and p.has_section(alt_uiname[uiname]):
+ uiname = alt_uiname[uiname]
+ if p.has_section(uiname):
+ for name, value in p.items(uiname):
+ if name in defconfig:
+ values[name] = value
+ parseargs.parse_options(defconfig, values)
+ presets.update(values)
+ config, args = parseargs.parseargs(arglist, defaults, minargs,
+ maxargs, presets)
+
+ for d in ('', 'resume', 'metainfo'):
+ ddir = os.path.join(datadir, d)
+ try:
+ if not os.path.exists(ddir):
+ os.mkdir(ddir, 0700)
+ except:
+ pass
+
+ if config['language'] != '':
+ try:
+ lang = gettext.translation('bittorrent', locale_root,
+ languages=[config['language']])
+ lang.install()
+ except IOError:
+ # don't raise an error, just continue untranslated
+ sys.stderr.write(_('Could not find translation for language "%s"\n') %
+ config['language'])
+ if config.has_key('bind') and ['bind'] != '':
+ bind_tracker_connection(config['bind'])
+ return config, args
diff --git a/NohGooee/defaultargs.py b/NohGooee/defaultargs.py
new file mode 100644
index 0000000..dc9583a
--- /dev/null
+++ b/NohGooee/defaultargs.py
@@ -0,0 +1,306 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+
+# False and True are not distinct from 0 and 1 under Python 2.2,
+# and we want to handle boolean options differently.
+class MyBool(object):
+
+ def __init__(self, value):
+ self.value = value
+
+ def __repr__(self):
+ if self.value:
+ return 'True'
+ return 'False'
+
+ def __nonzero__(self):
+ return self.value
+
+MYTRUE = MyBool(True)
+MYFALSE = MyBool(False)
+
+import os
+### add your favorite here
+BAD_LIBC_WORKAROUND_DEFAULT = MYFALSE
+if os.name == 'posix':
+ if os.uname()[0] in ['Darwin']:
+ BAD_LIBC_WORKAROUND_DEFAULT = MYTRUE
+
+MIN_INCOMPLETE = 100
+if os.name == 'nt':
+ from NohGooee.platform import win_version_num
+ # starting in XP SP2 the incomplete outgoing connection limit was set to 10
+ if win_version_num >= (2, 5, 1, 2, 0):
+ MIN_INCOMPLETE = 10
+
+from NohGooee import languages
+
+basic_options = [
+ ('data_dir', '',
+ _("directory under which variable data such as fastresume information "
+ "and GUI state is saved. Defaults to subdirectory 'data' of the "
+ "bittorrent config directory.")),
+ ('filesystem_encoding', '',
+ _("character encoding used on the local filesystem. "
+ "If left empty, autodetected. "
+ "Autodetection doesn't work under python versions older than 2.3")),
+ ('language', '',
+ _("ISO Language code to use") + ': ' + ', '.join(languages)),
+ ]
+
+common_options = [
+ ('ip', '',
+ _("ip to report to the tracker (has no effect unless you are on the same "
+ "local network as the tracker)")),
+ ('forwarded_port', 0,
+ _("world-visible port number if it's different from the one the client "
+ "listens on locally")),
+ ('minport', 6881,
+ _("minimum port to listen on, counts up if unavailable")),
+ ('maxport', 6999,
+ _("maximum port to listen on")),
+ ('bind', '',
+ _("ip to bind to locally")),
+ ('display_interval', .5,
+ _("seconds between updates of displayed information")),
+ ('rerequest_interval', 5 * 60,
+ _("minutes to wait between requesting more peers")),
+ ('min_peers', 20,
+ _("minimum number of peers to not do rerequesting")),
+ ('max_initiate', 60,
+ _("number of peers at which to stop initiating new connections")),
+ ('max_incomplete', MIN_INCOMPLETE,
+ _("max number of outgoing incomplete connections")),
+ ('max_allow_in', 80,
+ _("maximum number of connections to allow, after this new incoming "
+ "connections will be immediately closed")),
+ ('check_hashes', MYTRUE,
+ _("whether to check hashes on disk")),
+ ('max_upload_rate', 20,
+ _("maximum kB/s to upload at, 0 means no limit")),
+ ('min_uploads', 2,
+ _("the number of uploads to fill out to with extra optimistic unchokes")),
+ ('max_files_open', 50,
+ _("the maximum number of files in a multifile torrent to keep open at a "
+ "time, 0 means no limit. Used to avoid running out of file descriptors.")),
+ ('start_trackerless_client', MYTRUE,
+ _("Initialize a trackerless client. This must be enabled in order to download trackerless torrents.")),
+ ('upnp', MYTRUE,
+ _("Enable automatic port mapping")+' (UPnP)'),
+ ]
+
+
+rare_options = [
+ ('keepalive_interval', 120.0,
+ _("number of seconds to pause between sending keepalives")),
+ ('download_slice_size', 2 ** 14,
+ _("how many bytes to query for per request.")),
+ ('max_message_length', 2 ** 23,
+ _("maximum length prefix encoding you'll accept over the wire - larger "
+ "values get the connection dropped.")),
+ ('socket_timeout', 300.0,
+ _("seconds to wait between closing sockets which nothing has been "
+ "received on")),
+ ('timeout_check_interval', 60.0,
+ _("seconds to wait between checking if any connections have timed out")),
+ ('max_slice_length', 16384,
+ _("maximum length slice to send to peers, close connection if a larger "
+ "request is received")),
+ ('max_rate_period', 20.0,
+ _("maximum time interval over which to estimate the current upload and download rates")),
+ ('max_rate_period_seedtime', 100.0,
+ _("maximum time interval over which to estimate the current seed rate")),
+ ('max_announce_retry_interval', 1800,
+ _("maximum time to wait between retrying announces if they keep failing")),
+ ('snub_time', 30.0,
+ _("seconds to wait for data to come in over a connection before assuming "
+ "it's semi-permanently choked")),
+ ('rarest_first_cutoff', 4,
+ _("number of downloads at which to switch from random to rarest first")),
+ ('upload_unit_size', 1380,
+ _("how many bytes to write into network buffers at once.")),
+ ('retaliate_to_garbled_data', MYTRUE,
+ _("refuse further connections from addresses with broken or intentionally "
+ "hostile peers that send incorrect data")),
+ ('one_connection_per_ip', MYTRUE,
+ _("do not connect to several peers that have the same IP address")),
+ ('peer_socket_tos', 8,
+ _("if nonzero, set the TOS option for peer connections to this value")),
+ ('bad_libc_workaround', BAD_LIBC_WORKAROUND_DEFAULT,
+ _("enable workaround for a bug in BSD libc that makes file reads very slow.")),
+ ('tracker_proxy', '',
+ _("address of HTTP proxy to use for tracker connections")),
+ ('close_with_rst', 0,
+ _("close connections with RST and avoid the TCP TIME_WAIT state")),
+ ('twisted', -1,
+ _("Use Twisted network libraries for network connections. 1 means use twisted, 0 means do not use twisted, -1 means autodetect, and prefer twisted")),
+ ]
+
+
+def get_defaults(ui):
+ assert ui in ("bittorrent" , "bittorrent-curses", "bittorrent-console" ,
+ "maketorrent", "maketorrent-console",
+ "launchmany-curses", "launchmany-console" ,
+ )
+ r = []
+
+ if ui.startswith('bittorrent') or ui.startswith('launchmany'):
+ r.extend(common_options)
+
+ if ui == 'bittorrent':
+ r.extend([
+ ('save_as', '',
+ _("file name (for single-file torrents) or directory name (for "
+ "batch torrents) to save the torrent as, overriding the default "
+ "name in the torrent. See also --save_in, if neither is "
+ "specified the user will be asked for save location")),
+ ('advanced', MYFALSE,
+ _("display advanced user interface")),
+ ('next_torrent_time', 300,
+ _("the maximum number of minutes to seed a completed torrent "
+ "before stopping seeding")),
+ ('next_torrent_ratio', 80,
+ _("the minimum upload/download ratio, in percent, to achieve "
+ "before stopping seeding. 0 means no limit.")),
+ ('last_torrent_ratio', 0,
+ _("the minimum upload/download ratio, in percent, to achieve "
+ "before stopping seeding the last torrent. 0 means no limit.")),
+ ('seed_forever', MYFALSE,
+ _("Seed each completed torrent indefinitely "
+ "(until the user cancels it)")),
+ ('seed_last_forever', MYTRUE,
+ _("Seed the last torrent indefinitely "
+ "(until the user cancels it)")),
+ ('pause', MYFALSE,
+ _("start downloader in paused state")),
+ ('start_torrent_behavior', 'replace',
+ _('specifies how the app should behave when the user manually '
+ 'tries to start another torrent: "replace" means always replace '
+ 'the running torrent with the new one, "add" means always add '
+ 'the running torrent in parallel, and "ask" means ask the user '
+ 'each time.')),
+ ('open_from', '',
+ 'local directory to look in for .torrent files to open'),
+ ('ask_for_save', MYFALSE,
+ 'whether or not to ask for a location to save downloaded files in'),
+ ('start_minimized', MYFALSE,
+ _("Start BitTorrent minimized")),
+ ('new_version', '',
+ _("override the version provided by the http version check "
+ "and enable version check debugging mode")),
+ ('current_version', '',
+ _("override the current version used in the version check "
+ "and enable version check debugging mode")),
+ ('geometry', '',
+ _("specify window size and position, in the format: "
+ "WIDTHxHEIGHT+XOFFSET+YOFFSET")),
+ ])
+
+ if os.name == 'nt':
+ r.extend([
+ ('launch_on_startup', MYTRUE,
+ _("Launch BitTorrent when Windows starts")),
+ ('minimize_to_tray', MYTRUE,
+ _("Minimize to system tray")),
+ ])
+
+ if ui in ('bittorrent-console', 'bittorrent-curses'):
+ r.append(
+ ('save_as', '',
+ _("file name (for single-file torrents) or directory name (for "
+ "batch torrents) to save the torrent as, overriding the "
+ "default name in the torrent. See also --save_in")))
+
+ if ui.startswith('bittorrent'):
+ r.extend([
+ ('max_uploads', -1,
+ _("the maximum number of uploads to allow at once. -1 means a "
+ "(hopefully) reasonable number based on --max_upload_rate. "
+ "The automatic values are only sensible when running one "
+ "torrent at a time.")),
+ ('save_in', '',
+ _("local directory where the torrent contents will be saved. The "
+ "file (single-file torrents) or directory (batch torrents) will "
+ "be created under this directory using the default name "
+ "specified in the .torrent file. See also --save_as.")),
+ ('responsefile', '',
+ _("deprecated, do not use")),
+ ('url', '',
+ _("deprecated, do not use")),
+ ('ask_for_save', 0,
+ _("whether or not to ask for a location to save downloaded files in")),
+ ])
+
+ if ui.startswith('launchmany'):
+ r.extend([
+ ('max_uploads', 6,
+ _("the maximum number of uploads to allow at once. -1 means a "
+ "(hopefully) reasonable number based on --max_upload_rate. The "
+ "automatic values are only sensible when running one torrent at "
+ "a time.")),
+ ('save_in', '',
+ _("local directory where the torrents will be saved, using a "
+ "name determined by --saveas_style. If this is left empty "
+ "each torrent will be saved under the directory of the "
+ "corresponding .torrent file")),
+ ('parse_dir_interval', 60,
+ _("how often to rescan the torrent directory, in seconds") ),
+ ('launch_delay', 0,
+ _("wait this many seconds after noticing a torrent before starting it, to avoid race with tracker")),
+ ('saveas_style', 4,
+ _("How to name torrent downloads: "
+ "1: use name OF torrent file (minus .torrent); "
+ "2: use name encoded IN torrent file; "
+ "3: create a directory with name OF torrent file "
+ "(minus .torrent) and save in that directory using name "
+ "encoded IN torrent file; "
+ "4: if name OF torrent file (minus .torrent) and name "
+ "encoded IN torrent file are identical, use that "
+ "name (style 1/2), otherwise create an intermediate "
+ "directory as in style 3; "
+ "CAUTION: options 1 and 2 have the ability to "
+ "overwrite files without warning and may present "
+ "security issues."
+ ) ),
+ ('display_path', ui == 'launchmany-console' and MYTRUE or MYFALSE,
+ _("whether to display the full path or the torrent contents for "
+ "each torrent") ),
+ ])
+
+ if ui.startswith('launchmany') or ui == 'maketorrent':
+ r.append(
+ ('torrent_dir', '',
+ _("directory to look for .torrent files (semi-recursive)")),)
+
+ if ui in ('bittorrent-curses', 'bittorrent-console'):
+ r.append(
+ ('spew', MYFALSE,
+ _("whether to display diagnostic info to stdout")))
+
+ if ui.startswith('maketorrent'):
+ r.extend([
+ ('piece_size_pow2', 18,
+ _("which power of two to set the piece size to")),
+ ('tracker_name', 'http://my.tracker:6969/announce',
+ _("default tracker name")),
+ ('tracker_list', '', ''),
+ ('use_tracker', MYTRUE,
+ _("if false then make a trackerless torrent, instead of "
+ "announce URL, use reliable node in form of <ip>:<port> or an "
+ "empty string to pull some nodes from your routing table")),
+ ])
+
+ r.extend(basic_options)
+
+ if ui.startswith('bittorrent') or ui.startswith('launchmany'):
+ r.extend(rare_options)
+
+ return r
diff --git a/NohGooee/defer.py b/NohGooee/defer.py
new file mode 100644
index 0000000..4531271
--- /dev/null
+++ b/NohGooee/defer.py
@@ -0,0 +1,56 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+class Deferred(object):
+ def __init__(self):
+ self.callbacks = []
+ self.errbacks = []
+ self.calledBack = False
+ self.erredBack = False
+ self.results = []
+ self.failures = []
+
+ def addCallback(self, cb, args=(), kwargs={}):
+ assert callable(cb)
+ self.callbacks.append((cb, args, kwargs))
+ if self.calledBack:
+ self.doCallbacks(self.results, [(cb, args, kwargs)])
+ return self
+
+ def addErrback(self, cb, args=(), kwargs={}):
+ assert callable(cb)
+ self.errbacks.append((cb, args, kwargs))
+ if self.erredBack:
+ self.doCallbacks(self.failures, [(cb, args, kwargs)])
+ return self
+
+ def addCallbacks(self, cb, eb, args=(), kwargs={},
+ ebargs=(), ebkwargs={}):
+ assert callable(cb)
+ assert callable(eb)
+ self.addCallback(cb, args, kwargs)
+ self.addErrback(eb, ebargs, ebkwargs)
+
+ def callback(self, result):
+ self.results.append(result)
+ self.calledBack = True
+ if self.callbacks:
+ self.doCallbacks([result], self.callbacks)
+
+ def errback(self, failed):
+ self.failures.append(failed)
+ self.erredBack = True
+ if self.errbacks:
+ self.doCallbacks([failed], self.errbacks)
+
+ def doCallbacks(self, results, callbacks):
+ for result in results:
+ for cb, args, kwargs in callbacks:
+ result = cb(result, *args, **kwargs)
diff --git a/NohGooee/download.py b/NohGooee/download.py
new file mode 100644
index 0000000..e3248c4
--- /dev/null
+++ b/NohGooee/download.py
@@ -0,0 +1,591 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen and Uoti Urpala
+
+from __future__ import division
+
+import os
+import sys
+import threading
+import errno
+import gc
+from sha import sha
+from socket import error as socketerror
+from random import seed
+from time import time
+from cStringIO import StringIO
+from traceback import print_exc
+from math import sqrt
+
+from NohGooee.btformats import check_message
+from NohGooee.Choker import Choker
+from NohGooee.Storage import Storage, FilePool
+from NohGooee.StorageWrapper import StorageWrapper
+from NohGooee.Uploader import Upload
+from NohGooee.Downloader import Downloader
+from NohGooee.Encoder import Encoder, SingleportListener
+from NohGooee.zurllib import set_zurllib_rawserver, add_unsafe_thread
+from NohGooee import PeerID
+
+from NohGooee.RateLimiter import MultiRateLimiter as RateLimiter
+from NohGooee.RateLimiter import RateLimitedGroup
+
+from NohGooee.RawServer_magic import RawServer
+from NohGooee.NatTraversal import NatTraverser
+from NohGooee.Rerequester import Rerequester, DHTRerequester
+from NohGooee.DownloaderFeedback import DownloaderFeedback
+from NohGooee.RateMeasure import RateMeasure
+from NohGooee.CurrentRateMeasure import Measure
+from NohGooee.PiecePicker import PiecePicker
+from NohGooee.ConvertedMetainfo import set_filesystem_encoding
+from NohGooee import version
+from NohGooee import BTFailure, BTShutdown, INFO, WARNING, ERROR, CRITICAL
+
+from khashmir.utkhashmir import UTKhashmir
+from khashmir import const
+
+class Feedback(object):
+
+ def finished(self, torrent):
+ pass
+
+ def failed(self, torrent, is_external):
+ pass
+
+ def error(self, torrent, level, text):
+ pass
+
+ def exception(self, torrent, text):
+ self.error(torrent, CRITICAL, text)
+
+ def started(self, torrent):
+ pass
+
+
+class Multitorrent(object):
+
+ def __init__(self, config, doneflag, errorfunc, listen_fail_ok=False):
+ self.dht = None
+ self.config = config
+ self.errorfunc = errorfunc
+ self.rawserver = RawServer(doneflag, config, errorfunc=errorfunc,
+ tos=config['peer_socket_tos'])
+ set_zurllib_rawserver(self.rawserver)
+ add_unsafe_thread()
+ self.nattraverser = NatTraverser(self.rawserver, logfunc=errorfunc)
+ self.singleport_listener = SingleportListener(self.rawserver,
+ self.nattraverser)
+ self.ratelimiter = RateLimiter(self.rawserver.add_task)
+ self.ratelimiter.set_parameters(config['max_upload_rate'],
+ config['upload_unit_size'])
+ self._find_port(listen_fail_ok)
+ self.filepool = FilePool(config['max_files_open'])
+ set_filesystem_encoding(config['filesystem_encoding'],
+ errorfunc)
+
+
+ def _find_port(self, listen_fail_ok=True):
+ e = _("maxport less than minport - no ports to check")
+ if self.config['minport'] < 1024:
+ self.config['minport'] = 1024
+ for port in xrange(self.config['minport'], self.config['maxport'] + 1):
+ try:
+ self.singleport_listener.open_port(port, self.config)
+ if self.config['start_trackerless_client']:
+ self.dht = UTKhashmir(self.config['bind'],
+ self.singleport_listener.get_port(),
+ self.config['data_dir'], self.rawserver,
+ int(self.config['max_upload_rate'] * 1024 * 0.01),
+ rlcount=self.ratelimiter.increase_offset,
+ config=self.config)
+ break
+ except socketerror, e:
+ pass
+ else:
+ if not listen_fail_ok:
+ raise BTFailure, _("Could not open a listening port: %s.") % str(e)
+ self.errorfunc(CRITICAL,
+ _("Could not open a listening port: %s. ") %
+ str(e) +
+ _("Check your port range settings."))
+
+ def close_listening_socket(self):
+ self.singleport_listener.close_sockets()
+
+ def start_torrent(self, metainfo, config, feedback, filename):
+ torrent = _SingleTorrent(self.rawserver, self.singleport_listener,
+ self.ratelimiter, self.filepool, config, self.dht)
+ torrent.rlgroup = RateLimitedGroup(config['max_upload_rate'], torrent.got_exception)
+ self.rawserver.add_context(torrent)
+ def start():
+ torrent.start_download(metainfo, feedback, filename)
+ self.rawserver.external_add_task(start, 0, context=torrent)
+ return torrent
+
+ def set_option(self, option, value):
+ self.config[option] = value
+ if option in ['max_upload_rate', 'upload_unit_size']:
+ self.ratelimiter.set_parameters(self.config['max_upload_rate'],
+ self.config['upload_unit_size'])
+ elif option == 'max_files_open':
+ self.filepool.set_max_files_open(value)
+ elif option == 'maxport':
+ if not self.config['minport'] <= self.singleport_listener.port <= \
+ self.config['maxport']:
+ self._find_port()
+
+ def get_completion(self, config, metainfo, save_path, filelist=False):
+ if not config['data_dir']:
+ return None
+ infohash = metainfo.infohash
+ if metainfo.is_batch:
+ myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
+ else:
+ myfiles = [save_path]
+
+ if metainfo.total_bytes == 0:
+ if filelist:
+ return None
+ return 1
+ try:
+ s = Storage(None, None, zip(myfiles, metainfo.sizes),
+ check_only=True)
+ except:
+ return None
+ filename = os.path.join(config['data_dir'], 'resume',
+ infohash.encode('hex'))
+ try:
+ f = file(filename, 'rb')
+ except:
+ f = None
+ try:
+ r = s.check_fastresume(f, filelist, metainfo.piece_length,
+ len(metainfo.hashes), myfiles)
+ except:
+ r = None
+ if f is not None:
+ f.close()
+ if r is None:
+ return None
+ if filelist:
+ return r[0] / metainfo.total_bytes, r[1], r[2]
+ return r / metainfo.total_bytes
+
+
+class _SingleTorrent(object):
+
+ def __init__(self, rawserver, singleport_listener, ratelimiter, filepool,
+ config, dht):
+ self._rawserver = rawserver
+ self._singleport_listener = singleport_listener
+ self._ratelimiter = ratelimiter
+ self._filepool = filepool
+ self._dht = dht
+ self._storage = None
+ self._storagewrapper = None
+ self._ratemeasure = None
+ self._upmeasure = None
+ self._downmeasure = None
+ self._encoder = None
+ self._rerequest = None
+ self._statuscollecter = None
+ self._announced = False
+ self._listening = False
+ self.reserved_ports = []
+ self.reported_port = None
+ self._myfiles = None
+ self.started = False
+ self.is_seed = False
+ self.closed = False
+ self.infohash = None
+ self.total_bytes = None
+ self._doneflag = threading.Event()
+ self.finflag = threading.Event()
+ self._hashcheck_thread = None
+ self._contfunc = None
+ self._activity = (_("Initial startup"), 0)
+ self.feedback = None
+ self.errors = []
+ self.rlgroup = None
+ self.config = config
+
+ def start_download(self, *args, **kwargs):
+ it = self._start_download(*args, **kwargs)
+ def cont():
+ try:
+ it.next()
+ except StopIteration:
+ self._contfunc = None
+ def contfunc():
+ self._rawserver.external_add_task(cont, 0, context=self)
+ self._contfunc = contfunc
+ contfunc()
+
+ def _start_download(self, metainfo, feedback, save_path):
+ self.feedback = feedback
+ config = self.config
+
+ self.infohash = metainfo.infohash
+ self.total_bytes = metainfo.total_bytes
+ if not metainfo.reported_errors:
+ metainfo.show_encoding_errors(self._error)
+
+ myid = self._make_id()
+ seed(myid)
+ def schedfunc(func, delay):
+ self._rawserver.add_task(func, delay, context=self)
+ def externalsched(func, delay):
+ self._rawserver.external_add_task(func, delay, context=self)
+ if metainfo.is_batch:
+ myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
+ else:
+ myfiles = [save_path]
+ self._filepool.add_files(myfiles, self)
+ self._myfiles = myfiles
+ self._storage = Storage(config, self._filepool, zip(myfiles,
+ metainfo.sizes))
+ resumefile = None
+ if config['data_dir']:
+ filename = os.path.join(config['data_dir'], 'resume',
+ self.infohash.encode('hex'))
+ if os.path.exists(filename):
+ try:
+ resumefile = file(filename, 'rb')
+ if self._storage.check_fastresume(resumefile) == 0:
+ resumefile.close()
+ resumefile = None
+ except Exception, e:
+ self._error(WARNING,
+ _("Could not load fastresume data: %s") % str(e)
+ + ' ' + _("Will perform full hash check."))
+ if resumefile is not None:
+ resumefile.close()
+ resumefile = None
+ def data_flunked(amount, index):
+ self._ratemeasure.data_rejected(amount)
+ self._error(INFO,
+ _("piece %d failed hash check, re-downloading it")
+ % index)
+ backthread_exception = []
+ def errorfunc(level, text):
+ def e():
+ self._error(level, text)
+ externalsched(e, 0)
+ def hashcheck():
+ def statusfunc(activity = None, fractionDone = 0):
+ if activity is None:
+ activity = self._activity[0]
+ self._activity = (activity, fractionDone)
+ try:
+ self._storagewrapper = StorageWrapper(self._storage,
+ config, metainfo.hashes, metainfo.piece_length,
+ self._finished, statusfunc, self._doneflag, data_flunked,
+ self.infohash, errorfunc, resumefile)
+ except:
+ backthread_exception.append(sys.exc_info())
+ self._contfunc()
+ thread = threading.Thread(target = hashcheck)
+ thread.setDaemon(False)
+ self._hashcheck_thread = thread
+ thread.start()
+ yield None
+ self._hashcheck_thread = None
+ if resumefile is not None:
+ resumefile.close()
+ if backthread_exception:
+ a, b, c = backthread_exception[0]
+ raise a, b, c
+
+ if self._storagewrapper.amount_left == 0:
+ self._finished()
+ choker = Choker(config, schedfunc, self.finflag.isSet)
+ upmeasure = Measure(config['max_rate_period'])
+ upmeasure_seedtime = Measure(config['max_rate_period_seedtime'])
+ downmeasure = Measure(config['max_rate_period'])
+ self._upmeasure = upmeasure
+ self._upmeasure_seedtime = upmeasure_seedtime
+ self._downmeasure = downmeasure
+ self._ratemeasure = RateMeasure(self._storagewrapper.
+ amount_left_with_partials)
+ picker = PiecePicker(len(metainfo.hashes), config)
+ for i in xrange(len(metainfo.hashes)):
+ if self._storagewrapper.do_I_have(i):
+ picker.complete(i)
+ for i in self._storagewrapper.stat_dirty:
+ picker.requested(i)
+ def kickpeer(connection):
+ def kick():
+ connection.close()
+ schedfunc(kick, 0)
+ def banpeer(ip):
+ self._encoder.ban(ip)
+ downloader = Downloader(config, self._storagewrapper, picker,
+ len(metainfo.hashes), downmeasure, self._ratemeasure.data_came_in,
+ kickpeer, banpeer)
+ def make_upload(connection):
+ return Upload(connection, self._ratelimiter, upmeasure,
+ upmeasure_seedtime, choker, self._storagewrapper,
+ config['max_slice_length'], config['max_rate_period'])
+
+
+ self.reported_port = self.config['forwarded_port']
+ if not self.reported_port:
+ self.reported_port = self._singleport_listener.get_port(self.change_port)
+ self.reserved_ports.append(self.reported_port)
+
+ if self._dht:
+ addContact = self._dht.addContact
+ else:
+ addContact = None
+ self._encoder = Encoder(make_upload, downloader, choker,
+ len(metainfo.hashes), self._ratelimiter, self._rawserver,
+ config, myid, schedfunc, self.infohash, self, addContact, self.reported_port)
+
+ self._singleport_listener.add_torrent(self.infohash, self._encoder)
+ self._listening = True
+ if metainfo.is_trackerless:
+ if not self._dht:
+ self._error(self, CRITICAL, _("Attempt to download a trackerless torrent with trackerless client turned off."))
+ return
+ else:
+ if len(self._dht.table.findNodes(metainfo.infohash, invalid=False)) < const.K:
+ for host, port in metainfo.nodes:
+ self._dht.addContact(host, port)
+ self._rerequest = DHTRerequester(config,
+ schedfunc, self._encoder.how_many_connections,
+ self._encoder.start_connection, externalsched,
+ self._storagewrapper.get_amount_left, upmeasure.get_total,
+ downmeasure.get_total, self.reported_port, myid,
+ self.infohash, self._error, self.finflag, upmeasure.get_rate,
+ downmeasure.get_rate, self._encoder.ever_got_incoming,
+ self.internal_shutdown, self._announce_done, self._dht)
+ else:
+ self._rerequest = Rerequester(metainfo.announce, config,
+ schedfunc, self._encoder.how_many_connections,
+ self._encoder.start_connection, externalsched,
+ self._storagewrapper.get_amount_left, upmeasure.get_total,
+ downmeasure.get_total, self.reported_port, myid,
+ self.infohash, self._error, self.finflag, upmeasure.get_rate,
+ downmeasure.get_rate, self._encoder.ever_got_incoming,
+ self.internal_shutdown, self._announce_done)
+
+ self._statuscollecter = DownloaderFeedback(choker, upmeasure.get_rate,
+ upmeasure_seedtime.get_rate, downmeasure.get_rate,
+ upmeasure.get_total, downmeasure.get_total,
+ self._ratemeasure.get_time_left, self._ratemeasure.get_size_left,
+ self.total_bytes, self.finflag, downloader, self._myfiles,
+ self._encoder.ever_got_incoming, self._rerequest)
+
+ self._announced = True
+ if self._dht and len(self._dht.table.findNodes(self.infohash)) == 0:
+ self._rawserver.add_task(self._dht.findCloseNodes, 5)
+ self._rawserver.add_task(self._rerequest.begin, 20)
+ else:
+ self._rerequest.begin()
+ self.started = True
+ if not self.finflag.isSet():
+ self._activity = (_("downloading"), 0)
+ self.feedback.started(self)
+
+ def got_exception(self, e):
+ is_external = False
+ if isinstance(e, BTShutdown):
+ self._error(ERROR, str(e))
+ is_external = True
+ elif isinstance(e, BTFailure):
+ self._error(CRITICAL, str(e))
+ self._activity = ( _("download failed: ") + str(e), 0)
+ elif isinstance(e, IOError):
+ msg = 'IO Error ' + str(e)
+ if e.errno == errno.ENOSPC:
+ msg = _("IO Error: No space left on disk, "
+ "or cannot create a file that large:") + str(e)
+ self._error(CRITICAL, msg)
+ self._activity = (_("killed by IO error: ") + str(e), 0)
+ elif isinstance(e, OSError):
+ self._error(CRITICAL, 'OS Error ' + str(e))
+ self._activity = (_("killed by OS error: ") + str(e), 0)
+ else:
+ data = StringIO()
+ print_exc(file=data)
+ self._error(CRITICAL, data.getvalue(), True)
+ self._activity = (_("killed by internal exception: ") + str(e), 0)
+ try:
+ self._close()
+ except Exception, e:
+ self._error(ERROR,
+ _("Additional error when closing down due to error: ") +
+ str(e))
+ if is_external:
+ self.feedback.failed(self, True)
+ return
+ if self.config['data_dir'] and self._storage is not None:
+ filename = os.path.join(self.config['data_dir'], 'resume',
+ self.infohash.encode('hex'))
+ if os.path.exists(filename):
+ try:
+ os.remove(filename)
+ except Exception, e:
+ self._error(WARNING,
+ _("Could not remove fastresume file after "
+ "failure:")
+ + str(e))
+ self.feedback.failed(self, False)
+
+ def _finished(self):
+ self.finflag.set()
+ # Call self._storage.close() to flush buffers and change files to
+ # read-only mode (when they're possibly reopened). Let exceptions
+ # from self._storage.close() kill the torrent since files might not
+ # be correct on disk if file.close() failed.
+ self._storage.close()
+ # If we haven't announced yet, normal first announce done later will
+ # tell the tracker about seed status.
+ self.is_seed = True
+ if self._announced:
+ self._rerequest.announce_finish()
+ self._activity = (_("seeding"), 1)
+ if self.config['check_hashes']:
+ self._save_fastresume(True)
+ self.feedback.finished(self)
+
+ def _save_fastresume(self, on_finish=False):
+ if not on_finish and (self.finflag.isSet() or not self.started):
+ return
+ if not self.config['data_dir']:
+ return
+ if on_finish: # self._ratemeasure might not exist yet
+ amount_done = self.total_bytes
+ else:
+ amount_done = self.total_bytes - self._ratemeasure.get_size_left()
+ filename = os.path.join(self.config['data_dir'], 'resume',
+ self.infohash.encode('hex'))
+ resumefile = None
+ try:
+ resumefile = file(filename, 'wb')
+ self._storage.write_fastresume(resumefile, amount_done)
+ self._storagewrapper.write_fastresume(resumefile)
+ resumefile.close()
+ except Exception, e:
+ self._error(WARNING, _("Could not write fastresume data: ") + str(e))
+ if resumefile is not None:
+ resumefile.close()
+
+ def shutdown(self):
+ if self.closed:
+ return True
+ try:
+ self._close()
+ self._save_fastresume()
+ self._activity = (_("shut down"), 0)
+ return True
+ except Exception, e:
+ self.got_exception(e)
+ return False
+ except:
+ data = StringIO()
+ print_exc(file=data)
+ self._error(WARNING, 'Unable to shutdown:\n'+data.getvalue())
+ return False
+
+ def internal_shutdown(self, level, text):
+ # This is only called when announce fails with no peers,
+ # don't try to announce again telling we're leaving the torrent
+ self._announced = False
+ self._error(level, text)
+ self.shutdown()
+ self.feedback.failed(self, True)
+
+ def _close(self):
+ if self.closed:
+ return
+ self.closed = True
+ self._rawserver.remove_context(self)
+ self._doneflag.set()
+ if self._announced:
+ self._rerequest.announce_stop()
+ self._rerequest.cleanup()
+ if self._hashcheck_thread is not None:
+ self._hashcheck_thread.join() # should die soon after doneflag set
+ if self._myfiles is not None:
+ self._filepool.remove_files(self._myfiles)
+ if self._listening:
+ self._singleport_listener.remove_torrent(self.infohash)
+ for port in self.reserved_ports:
+ self._singleport_listener.release_port(port, self.change_port)
+ if self._encoder is not None:
+ self._encoder.close_connections()
+ if self._storage is not None:
+ self._storage.close()
+ self._ratelimiter.clean_closed()
+ self._rawserver.add_task(gc.collect, 0)
+
+ def get_status(self, spew = False, fileinfo=False):
+ if self.started and not self.closed:
+ r = self._statuscollecter.get_statistics(spew, fileinfo)
+ r['activity'] = self._activity[0]
+ else:
+ r = dict(zip(('activity', 'fractionDone'), self._activity))
+ return r
+
+ def get_total_transfer(self):
+ if self._upmeasure is None:
+ return (0, 0)
+ return (self._upmeasure.get_total(), self._downmeasure.get_total())
+
+ def set_option(self, option, value):
+ if self.closed:
+ return
+ if self.config.has_key(option) and self.config[option] == value:
+ return
+ self.config[option] = value
+ if option == 'max_upload_rate':
+ # make sure counters get reset so new rate applies immediately
+ self.rlgroup.set_rate(value)
+
+ def change_port(self, new_port = None):
+ if not self._listening:
+ return
+ r = self.config['forwarded_port']
+ if r:
+ for port in self.reserved_ports:
+ self._singleport_listener.release_port(port)
+ del self.reserved_ports[:]
+ if self.reported_port == r:
+ return
+ elif new_port is not None:
+ r = new_port
+ self.reserved_ports.remove(self.reported_port)
+ self.reserved_ports.append(r)
+ elif self._singleport_listener.port != self.reported_port:
+ r = self._singleport_listener.get_port(self.change_port)
+ self.reserved_ports.append(r)
+ else:
+ return
+ self.reported_port = r
+ myid = self._make_id()
+ self._encoder.my_id = myid
+ self._rerequest.change_port(myid, r)
+
+ def _announce_done(self):
+ for port in self.reserved_ports[:-1]:
+ self._singleport_listener.release_port(port, self.change_port)
+ del self.reserved_ports[:-1]
+
+ def _make_id(self):
+ return PeerID.make_id()
+
+ def _error(self, level, text, exception=False):
+ self.errors.append((time(), level, text))
+ if exception:
+ self.feedback.exception(self, text)
+ else:
+ self.feedback.error(self, level, text)
diff --git a/NohGooee/language.py b/NohGooee/language.py
new file mode 100644
index 0000000..f7f72ef
--- /dev/null
+++ b/NohGooee/language.py
@@ -0,0 +1,202 @@
+# -*- coding: UTF-8 -*-
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# http://people.w3.org/rishida/names/languages.html
+
+language_names = {
+ 'af' :u'Afrikaans' , 'bg' :u'Български' ,
+ 'da' :u'Dansk' , 'ca' :u'Català' ,
+ 'cs' :u'Čeština' , 'de' :u'Deutsch' ,
+ 'en' :u'English' , 'es' :u'Español' ,
+ 'es_MX':u'Español de Mexico ' , 'fr' :u'Français' ,
+ 'el' :u'Ελληνικά' , 'he' :u'עברית' ,
+ 'hu' :u'Magyar' , 'it' :u'Italiano' ,
+ 'is' :u'Íslenska' , 'ja' :u'日本語' ,
+ 'ko' :u'한국어' ,'nl' :u'Nederlands' ,
+ 'nb_NO':u'Norsk bokmål' , 'pl' :u'Polski' ,
+ 'pt' :u'Português' , 'pt_BR':u'Português do Brasil' ,
+ 'ro' :u'Română' , 'ru' :u'Русский' ,
+ 'sk' :u'Slovenský' , 'sl' :u'Slovensko' ,
+ 'sv' :u'Svenska' , 'tr' :u'Türkçe' ,
+ 'vi' :u'Tiê?ng Viê?t' ,
+ 'zh_CN':u'简体中文' , # Simplified
+ 'zh_TW':u'繁體中文' , # Traditional
+ }
+
+unfinished_language_names = {
+ 'ar' :u'العربية' , 'bs' :u'Bosanski' ,
+ 'eo' :u'Esperanto' , 'eu' :u'Euskara' ,
+ 'et' :u'Eesti' , 'fi' :u'Suomi' ,
+ 'fa' :u'فارسی' , 'ga' :u'Gaeilge' ,
+ 'gl' :u'Galego' , 'hr' :u'Hrvatski' ,
+ 'hy' :u'Հայերեն' , 'in' :u'Bahasa indonesia' ,
+ 'ka' :u'ქართული ენა', 'lt' :u'Lietuvių' ,
+ 'ms' :u'Bahasa melayu' , 'ml' :u'Malayalam' ,
+ 'sq' :u'Shqipe' , 'th' :u'ภาษาไทย' ,
+ 'tlh' :u'tlhIngan-Hol' , 'uk' :u'Українська' ,
+ 'hi' :u'हिंदी' , 'cy' :u'Cymraeg' ,
+ 'nn_NO':u'Norsk Nynorsk' , 'te' :u' తెలుగు' ,
+ }
+
+#language_names.update(unfinished_language_names)
+languages = language_names.keys()
+languages.sort()
+
+# windows codepage to locale mapping
+locale_sucks = {
+ 0x0436: "af", # Afrikaans
+ 0x3801: "ar_ae", # Arabic - United Arab Emirates
+ 0x3C01: "ar_bh", # Arabic - Bahrain
+ 0x1401: "ar_dz", # Arabic - Algeria
+ 0x0C01: "ar_eg", # Arabic - Egypt
+ 0x0801: "ar_iq", # Arabic - Iraq
+ 0x2C01: "ar_jo", # Arabic - Jordan
+ 0x3401: "ar_kw", # Arabic - Kuwait
+ 0x3001: "ar_lb", # Arabic - Lebanon
+ 0x1001: "ar_ly", # Arabic - Libya
+ 0x1801: "ar_ma", # Arabic - Morocco
+ 0x2001: "ar_om", # Arabic - Oman
+ 0x4001: "ar_qa", # Arabic - Qatar
+ 0x0401: "ar_sa", # Arabic - Saudi Arabia
+ 0x2801: "ar_sy", # Arabic - Syria
+ 0x1C01: "ar_tn", # Arabic - Tunisia
+ 0x2401: "ar_ye", # Arabic - Yemen
+ 0x082C: "az_az", # Azeri - Cyrillic
+ 0x0423: "be", # Belarusian
+ 0x0402: "bg", # Bulgarian
+ 0x0403: "ca", # Catalan
+ 0x0405: "cs", # Czech
+ 0x0406: "da", # Danish
+ 0x0007: "de", # German
+ 0x0C07: "de_at", # German - Austria
+ 0x0807: "de_ch", # German - Switzerland
+ 0x0407: "de_de", # German - Germany
+ 0x1407: "de_li", # German - Liechtenstein
+ 0x1007: "de_lu", # German - Luxembourg
+ 0x0408: "el", # Greek
+ 0x0C09: "en_au", # English - Australia
+ 0x2809: "en_bz", # English - Belize
+ 0x1009: "en_ca", # English - Canada
+ 0x2409: "en_cb", # English - Carribbean
+ 0x0809: "en_gb", # English - United Kingdom
+ 0x1809: "en_ie", # English - Ireland
+ 0x2009: "en_jm", # English - Jamaica
+ 0x1409: "en_nz", # English - New Zealand
+ 0x3409: "en_ph", # English - Phillippines
+ 0x2C09: "en_tt", # English - Trinidad
+ 0x0409: "en_us", # English - United States
+ 0x1C09: "en_za", # English - South Africa
+ 0x000A: "es", # Spanish (added)
+ 0x2C0A: "es_ar", # Spanish - Argentina
+ 0x400A: "es_bo", # Spanish - Bolivia
+ 0x340A: "es_cl", # Spanish - Chile
+ 0x240A: "es_co", # Spanish - Colombia
+ 0x140A: "es_cr", # Spanish - Costa Rica
+ 0x1C0A: "es_do", # Spanish - Dominican Republic
+ 0x300A: "es_ec", # Spanish - Ecuador
+ 0x040a: "es_es", # Spanish - Spain
+ 0x100A: "es_gt", # Spanish - Guatemala
+ 0x480A: "es_hn", # Spanish - Honduras
+ 0x080A: "es_mx", # Spanish - Mexico
+ 0x4C0A: "es_ni", # Spanish - Nicaragua
+ 0x180A: "es_pa", # Spanish - Panama
+ 0x280A: "es_pe", # Spanish - Peru
+ 0x500A: "es_pr", # Spanish - Puerto Rico
+ 0x3C0A: "es_py", # Spanish - Paraguay
+ 0x440A: "es_sv", # Spanish - El Salvador
+ 0x380A: "es_uy", # Spanish - Uruguay
+ 0x200A: "es_ve", # Spanish - Venezuela
+ 0x0425: "et", # Estonian
+ 0x0009: "en", # English (added)
+ 0x042D: "eu", # Basque
+ 0x0429: "fa", # Farsi
+ 0x040B: "fi", # Finnish
+ 0x0438: "fo", # Faroese
+ 0x000C: "fr", # French (added)
+ 0x080C: "fr_be", # French - Belgium
+ 0x0C0C: "fr_ca", # French - Canada
+ 0x100C: "fr_ch", # French - Switzerland
+ 0x040C: "fr_fr", # French - France
+ 0x140C: "fr_lu", # French - Luxembourg
+ 0x043C: "gd", # Gaelic - Scotland
+ 0x083C: "gd_ie", # Gaelic - Ireland
+ 0x040D: "he", # Hebrew
+ 0x0439: "hi", # Hindi
+ 0x041A: "hr", # Croatian
+ 0x040E: "hu", # Hungarian
+ 0x042B: "hy", # Armenian
+ 0x0421: "id", # Indonesian
+ 0x040F: "is", # Icelandic
+ 0x0010: "it", # Italian (added)
+ 0x0810: "it_ch", # Italian - Switzerland
+ 0x0410: "it_it", # Italian - Italy
+ 0x0411: "ja", # Japanese
+ 0x0412: "ko", # Korean
+ 0x0427: "lt", # Lithuanian
+ 0x0426: "lv", # Latvian
+ 0x042F: "mk", # FYRO Macedonian
+ 0x044E: "mr", # Marathi
+ 0x083E: "ms_bn", # Malay - Brunei
+ 0x043E: "ms_my", # Malay - Malaysia
+ 0x043A: "mt", # Maltese
+ 0x0013: "nl", # Dutch (added)
+ 0x0813: "nl_be", # Dutch - Belgium
+ 0x0413: "nl_nl", # Dutch - The Netherlands
+ 0x0814: "no_no", # Norwegian - Nynorsk
+ 0x0414: "nb_no", # Norwegian - Bokmal (?)
+ 0x0415: "pl", # Polish
+ 0x0016: "pt", # Portuguese (added)
+ 0x0416: "pt_br", # Portuguese - Brazil
+ 0x0816: "pt_pt", # Portuguese - Portugal
+ 0x0417: "rm", # Raeto-Romance
+ 0x0418: "ro", # Romanian - Romania
+ 0x0818: "ro_mo", # Romanian - Moldova
+ 0x0419: "ru", # Russian
+ 0x0819: "ru_mo", # Russian - Moldova
+ 0x044F: "sa", # Sanskrit
+ 0x042E: "sb", # Sorbian
+ 0x041B: "sk", # Slovak
+ 0x0424: "sl", # Slovenian
+ 0x041C: "sq", # Albanian
+ 0x081A: "sr_sp", # Serbian - Latin
+ 0x001D: "sv", # Swedish (added)
+ 0x081D: "sv_fi", # Swedish - Finland
+ 0x041D: "sv_se", # Swedish - Sweden
+ 0x0441: "sw", # Swahili
+ 0x0430: "sx", # Sutu
+ 0x0449: "ta", # Tamil
+ 0x041E: "th", # Thai
+ 0x0432: "tn", # Setsuana
+ 0x041F: "tr", # Turkish
+ 0x0431: "ts", # Tsonga
+ 0X0444: "tt", # Tatar
+ 0x0422: "uk", # Ukrainian
+ 0x0420: "ur", # Urdu
+ 0x0443: "uz_uz", # Uzbek - Latin
+ 0x042A: "vi", # Vietnamese
+ 0x0434: "xh", # Xhosa
+ 0x043D: "yi", # Yiddish
+ 0x0804: "zh_cn", # Chinese - China
+ 0x0C04: "zh_hk", # Chinese - Hong Kong S.A.R.
+ 0x1404: "zh_mo", # Chinese - Macau S.A.R
+ 0x1004: "zh_sg", # Chinese - Singapore
+ 0x0404: "zh_tw", # Chinese - Taiwan
+ 0x0435: "zu", # Zulu
+}
+
+if __name__ == '__main__':
+ from sets import Set
+ internal = Set([x.lower() for x in languages])
+ windows = Set(locale_sucks.values())
+ if not windows.issuperset(internal):
+ diff = list(internal.difference(windows))
+ diff.sort()
+ print diff
diff --git a/NohGooee/launchmanycore.py b/NohGooee/launchmanycore.py
new file mode 100644
index 0000000..bf83fea
--- /dev/null
+++ b/NohGooee/launchmanycore.py
@@ -0,0 +1,261 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Original version written by John Hoffman, heavily modified for different
+# multitorrent architecture by Uoti Urpala (over 40% shorter than original)
+
+import os
+from cStringIO import StringIO
+from traceback import print_exc
+
+from NohGooee import configfile
+from NohGooee.parsedir import parsedir
+from NohGooee.download import Multitorrent, Feedback
+from NohGooee.ConvertedMetainfo import ConvertedMetainfo
+from NohGooee import BTFailure
+
+from threading import Event
+from time import time
+
+
+class LaunchMany(Feedback):
+
+ def __init__(self, config, output, configfile_key):
+ try:
+ self.config = config
+ self.output = output
+ self.configfile_key = configfile_key
+
+ self.torrent_dir = config['torrent_dir']
+ self.torrent_cache = {}
+ self.file_cache = {}
+ self.blocked_files = {}
+
+ self.torrent_list = []
+ self.downloads = {}
+ self.doneflag = Event()
+
+ self.hashcheck_queue = []
+ self.hashcheck_store = {}
+ self.hashcheck_current = None
+
+ self.multitorrent = Multitorrent(config, self.doneflag,
+ self.global_error)
+ self.rawserver = self.multitorrent.rawserver
+
+ self.rawserver.add_task(self.scan, 0)
+ self.rawserver.add_task(self.stats, 0)
+
+ try:
+ import signal
+ def handler(signum, frame):
+ self.rawserver.external_add_task(self.read_config, 0)
+ signal.signal(signal.SIGHUP, handler)
+ self.rawserver.install_sigint_handler()
+ except Exception, e:
+ self.output.message(_("Could not set signal handler: ") + str(e))
+
+ self.rawserver.listen_forever()
+
+ self.output.message(_("shutting down"))
+ for infohash in self.torrent_list:
+ self.output.message(_('dropped "%s"') %
+ self.torrent_cache[infohash]['path'])
+ torrent = self.downloads[infohash]
+ if torrent is not None:
+ torrent.shutdown()
+ except:
+ data = StringIO()
+ print_exc(file = data)
+ output.exception(data.getvalue())
+
+ def scan(self):
+ self.rawserver.add_task(self.scan, self.config['parse_dir_interval'])
+
+ r = parsedir(self.torrent_dir, self.torrent_cache,
+ self.file_cache, self.blocked_files,
+ self.output.message)
+
+ ( self.torrent_cache, self.file_cache, self.blocked_files,
+ added, removed ) = r
+
+ for infohash, data in removed.items():
+ self.output.message(_('dropped "%s"') % data['path'])
+ self.remove(infohash)
+ for infohash, data in added.items():
+ self.output.message(_('added "%s"' ) % data['path'])
+ if self.config['launch_delay'] > 0:
+ self.rawserver.add_task(self.add, self.config['launch_delay'], (infohash, data))
+ else:
+ self.add(infohash, data)
+
+ def stats(self):
+ self.rawserver.add_task(self.stats, self.config['display_interval'])
+ data = []
+ for infohash in self.torrent_list:
+ cache = self.torrent_cache[infohash]
+ if self.config['display_path']:
+ name = cache['path']
+ else:
+ name = cache['name']
+ size = cache['length']
+ d = self.downloads[infohash]
+ progress = '0.0%'
+ peers = 0
+ seeds = 0
+ seedsmsg = "S"
+ dist = 0.0
+ uprate = 0.0
+ dnrate = 0.0
+ upamt = 0
+ dnamt = 0
+ t = 0
+ msg = ''
+ if d is None:
+ status = _("waiting for hash check")
+ else:
+ stats = d.get_status()
+ status = stats['activity']
+ progress = '%.1f%%' % (int(stats['fractionDone']*1000)/10.0)
+ if d.started and not d.closed:
+ s = stats
+ dist = s['numCopies']
+ if d.is_seed:
+ seeds = 0 # s['numOldSeeds']
+ seedsmsg = "s"
+ else:
+ if s['numSeeds'] + s['numPeers']:
+ t = stats['timeEst']
+ if t is None:
+ t = -1
+ if t == 0: # unlikely
+ t = 0.01
+ status = _("downloading")
+ else:
+ t = -1
+ status = _("connecting to peers")
+ seeds = s['numSeeds']
+ dnrate = stats['downRate']
+ peers = s['numPeers']
+ uprate = stats['upRate']
+ upamt = s['upTotal']
+ dnamt = s['downTotal']
+ if d.errors and (d.closed or d.errors[-1][0] + 300 > time()):
+ msg = d.errors[-1][2]
+
+ data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
+ uprate, dnrate, upamt, dnamt, size, t, msg ))
+ stop = self.output.display(data)
+ if stop:
+ self.doneflag.set()
+
+ def remove(self, infohash):
+ self.torrent_list.remove(infohash)
+ if self.downloads[infohash] is not None:
+ self.downloads[infohash].shutdown()
+ self.was_stopped(infohash)
+ del self.downloads[infohash]
+
+ def add(self, infohash, data):
+ self.torrent_list.append(infohash)
+ self.downloads[infohash] = None
+ self.hashcheck_queue.append(infohash)
+ self.hashcheck_store[infohash] = data['metainfo']
+ self.check_hashcheck_queue()
+
+ def check_hashcheck_queue(self):
+ if self.hashcheck_current is not None or not self.hashcheck_queue:
+ return
+ self.hashcheck_current = self.hashcheck_queue.pop(0)
+ metainfo = self.hashcheck_store[self.hashcheck_current]
+ del self.hashcheck_store[self.hashcheck_current]
+ filename = self.determine_filename(self.hashcheck_current)
+ self.downloads[self.hashcheck_current] = self.multitorrent. \
+ start_torrent(ConvertedMetainfo(metainfo),
+ self.config, self, filename)
+
+ def determine_filename(self, infohash):
+ x = self.torrent_cache[infohash]
+ name = x['name']
+ savein = self.config['save_in']
+ isdir = not x['metainfo']['info'].has_key('length')
+ style = self.config['saveas_style']
+ if style == 4:
+ torrentname = os.path.split(x['path'][:-8])[1]
+ suggestedname = name
+ if torrentname == suggestedname:
+ style = 1
+ else:
+ style = 3
+
+ if style == 1 or style == 3:
+ if savein:
+ saveas = os.path.join(savein,x['file'][:-8]) # strip '.torrent'
+ else:
+ saveas = x['path'][:-8] # strip '.torrent'
+ if style == 3 and not isdir:
+ saveas = os.path.join(saveas, name)
+ else:
+ if savein:
+ saveas = os.path.join(savein, name)
+ else:
+ saveas = os.path.join(os.path.split(x['path'])[0], name)
+ return saveas
+
+ def was_stopped(self, infohash):
+ try:
+ self.hashcheck_queue.remove(infohash)
+ except:
+ pass
+ else:
+ del self.hashcheck_store[infohash]
+ if self.hashcheck_current == infohash:
+ self.hashcheck_current = None
+ self.check_hashcheck_queue()
+
+ def global_error(self, level, text):
+ self.output.message(text)
+
+ def exchandler(self, s):
+ self.output.exception(s)
+
+ def read_config(self):
+ try:
+ newvalues = configfile.get_config(self.config, self.configfile_key)
+ except Exception, e:
+ self.output.message(_("Error reading config: ") + str(e))
+ return
+ self.output.message(_("Rereading config file"))
+ self.config.update(newvalues)
+ # The set_option call can potentially trigger something that kills
+ # the torrent (when writing this the only possibility is a change in
+ # max_files_open causing an IOError while closing files), and so
+ # the self.failed() callback can run during this loop.
+ for option, value in newvalues.iteritems():
+ self.multitorrent.set_option(option, value)
+ for torrent in self.downloads.values():
+ if torrent is not None:
+ for option, value in newvalues.iteritems():
+ torrent.set_option(option, value)
+
+ # rest are callbacks from torrent instances
+
+ def started(self, torrent):
+ self.hashcheck_current = None
+ self.check_hashcheck_queue()
+
+ def failed(self, torrent, is_external):
+ infohash = torrent.infohash
+ self.was_stopped(infohash)
+ if self.torrent_cache.has_key(infohash):
+ self.output.message('DIED: "'+self.torrent_cache[infohash]['path']+'"')
+
+ def exception(self, torrent, text):
+ self.exchandler(text)
diff --git a/NohGooee/makemetafile.py b/NohGooee/makemetafile.py
new file mode 100644
index 0000000..ccd721d
--- /dev/null
+++ b/NohGooee/makemetafile.py
@@ -0,0 +1,260 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from __future__ import division
+
+import os
+import sys
+from sha import sha
+from time import time
+from threading import Event
+
+from NohGooee.bencode import bencode, bdecode
+from NohGooee.btformats import check_info
+from NohGooee.parseargs import parseargs, printHelp
+from NohGooee import BTFailure
+
+from khashmir.node import Node
+from khashmir.ktable import KTable
+from khashmir.util import packPeers, compact_peer_info
+
+ignore = ['core', 'CVS', 'Thumbs.db', 'desktop.ini']
+
+noncharacter_translate = {}
+for i in range(0xD800, 0xE000):
+ noncharacter_translate[i] = None
+for i in range(0xFDD0, 0xFDF0):
+ noncharacter_translate[i] = None
+for i in (0xFFFE, 0xFFFF):
+ noncharacter_translate[i] = None
+
+del i
+
+def dummy(v):
+ pass
+
+def make_meta_files(url,
+ files,
+ flag=Event(),
+ progressfunc=dummy,
+ filefunc=dummy,
+ piece_len_pow2=None,
+ target=None,
+ comment=None,
+ filesystem_encoding=None,
+ use_tracker=True,
+ data_dir = None):
+ if len(files) > 1 and target:
+ raise BTFailure(_("You can't specify the name of the .torrent file "
+ "when generating multiple torrents at once"))
+
+ if not filesystem_encoding:
+ try:
+ getattr(sys, 'getfilesystemencoding')
+ except AttributeError:
+ pass
+ else:
+ filesystem_encoding = sys.getfilesystemencoding()
+ if not filesystem_encoding:
+ filesystem_encoding = 'ascii'
+ try:
+ 'a1'.decode(filesystem_encoding)
+ except:
+ raise BTFailure(_('Filesystem encoding "%s" is not supported in this version')
+ % filesystem_encoding)
+ files.sort()
+ ext = '.torrent'
+
+ togen = []
+ for f in files:
+ if not f.endswith(ext):
+ togen.append(f)
+
+ total = 0
+ for f in togen:
+ total += calcsize(f)
+
+ subtotal = [0]
+ def callback(x):
+ subtotal[0] += x
+ progressfunc(subtotal[0] / total)
+ for f in togen:
+ if flag.isSet():
+ break
+ t = os.path.split(f)
+ if t[1] == '':
+ f = t[0]
+ filefunc(f)
+ if use_tracker:
+ make_meta_file(f, url, flag=flag, progress=callback,
+ piece_len_exp=piece_len_pow2, target=target,
+ comment=comment, encoding=filesystem_encoding)
+ else:
+ make_meta_file_dht(f, url, flag=flag, progress=callback,
+ piece_len_exp=piece_len_pow2, target=target,
+ comment=comment, encoding=filesystem_encoding, data_dir=data_dir)
+
+
+def make_meta_file(path, url, piece_len_exp, flag=Event(), progress=dummy,
+ comment=None, target=None, encoding='ascii'):
+ data = {'announce': url.strip(),'creation date': int(time())}
+ piece_length = 2 ** piece_len_exp
+ a, b = os.path.split(path)
+ if not target:
+ if b == '':
+ f = a + '.torrent'
+ else:
+ f = os.path.join(a, b + '.torrent')
+ else:
+ f = target
+ info = makeinfo(path, piece_length, flag, progress, encoding)
+ if flag.isSet():
+ return
+ check_info(info)
+ h = file(f, 'wb')
+
+ data['info'] = info
+ if comment:
+ data['comment'] = comment
+ h.write(bencode(data))
+ h.close()
+
+def make_meta_file_dht(path, nodes, piece_len_exp, flag=Event(), progress=dummy,
+ comment=None, target=None, encoding='ascii', data_dir=None):
+ # if nodes is empty, then get them out of the routing table in data_dir
+ # else, expect nodes to be a string of comma seperated <ip>:<port> pairs
+ # this has a lot of duplicated code from make_meta_file
+ piece_length = 2 ** piece_len_exp
+ a, b = os.path.split(path)
+ if not target:
+ if b == '':
+ f = a + '.torrent'
+ else:
+ f = os.path.join(a, b + '.torrent')
+ else:
+ f = target
+ info = makeinfo(path, piece_length, flag, progress, encoding)
+ if flag.isSet():
+ return
+ check_info(info)
+ info_hash = sha(bencode(info)).digest()
+
+ if not nodes:
+ x = open(os.path.join(data_dir, 'routing_table'), 'rb')
+ d = bdecode(x.read())
+ x.close()
+ t = KTable(Node().initWithDict({'id':d['id'], 'host':'127.0.0.1','port': 0}))
+ for n in d['rt']:
+ t.insertNode(Node().initWithDict(n))
+ nodes = [(node.host, node.port) for node in t.findNodes(info_hash) if node.host != '127.0.0.1']
+ else:
+ nodes = [(a[0], int(a[1])) for a in [node.strip().split(":") for node in nodes.split(",")]]
+ data = {'nodes': nodes, 'creation date': int(time())}
+ h = file(f, 'wb')
+
+ data['info'] = info
+ if comment:
+ data['comment'] = comment
+ h.write(bencode(data))
+ h.close()
+
+
+def calcsize(path):
+ total = 0
+ for s in subfiles(os.path.abspath(path)):
+ total += os.path.getsize(s[1])
+ return total
+
+def makeinfo(path, piece_length, flag, progress, encoding):
+ def to_utf8(name):
+ try:
+ u = name.decode(encoding)
+ except Exception, e:
+ raise BTFailure(_('Could not convert file/directory name "%s" to '
+ 'utf-8 (%s). Either the assumed filesystem '
+ 'encoding "%s" is wrong or the filename contains '
+ 'illegal bytes.') % (name, str(e), encoding))
+ if u.translate(noncharacter_translate) != u:
+ raise BTFailure(_('File/directory name "%s" contains reserved '
+ 'unicode values that do not correspond to '
+ 'characters.') % name)
+ return u.encode('utf-8')
+ path = os.path.abspath(path)
+ if os.path.isdir(path):
+ subs = subfiles(path)
+ subs.sort()
+ pieces = []
+ sh = sha()
+ done = 0
+ fs = []
+ totalsize = 0.0
+ totalhashed = 0
+ for p, f in subs:
+ totalsize += os.path.getsize(f)
+
+ for p, f in subs:
+ pos = 0
+ size = os.path.getsize(f)
+ p2 = [to_utf8(name) for name in p]
+ fs.append({'length': size, 'path': p2})
+ h = file(f, 'rb')
+ while pos < size:
+ a = min(size - pos, piece_length - done)
+ sh.update(h.read(a))
+ if flag.isSet():
+ return
+ done += a
+ pos += a
+ totalhashed += a
+
+ if done == piece_length:
+ pieces.append(sh.digest())
+ done = 0
+ sh = sha()
+ progress(a)
+ h.close()
+ if done > 0:
+ pieces.append(sh.digest())
+ return {'pieces': ''.join(pieces),
+ 'piece length': piece_length, 'files': fs,
+ 'name': to_utf8(os.path.split(path)[1])}
+ else:
+ size = os.path.getsize(path)
+ pieces = []
+ p = 0
+ h = file(path, 'rb')
+ while p < size:
+ x = h.read(min(piece_length, size - p))
+ if flag.isSet():
+ return
+ pieces.append(sha(x).digest())
+ p += piece_length
+ if p > size:
+ p = size
+ progress(min(piece_length, size - p))
+ h.close()
+ return {'pieces': ''.join(pieces),
+ 'piece length': piece_length, 'length': size,
+ 'name': to_utf8(os.path.split(path)[1])}
+
+def subfiles(d):
+ r = []
+ stack = [([], d)]
+ while stack:
+ p, n = stack.pop()
+ if os.path.isdir(n):
+ for s in os.listdir(n):
+ if s not in ignore and not s.startswith('.'):
+ stack.append((p + [s], os.path.join(n, s)))
+ else:
+ r.append((p, n))
+ return r
diff --git a/NohGooee/parseargs.py b/NohGooee/parseargs.py
new file mode 100644
index 0000000..e2f773a
--- /dev/null
+++ b/NohGooee/parseargs.py
@@ -0,0 +1,178 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bill Bumgarner and Bram Cohen
+
+from types import *
+from cStringIO import StringIO
+
+from NohGooee.defaultargs import MyBool, MYTRUE
+from NohGooee import BTFailure
+from NohGooee.bencode import bdecode
+from NohGooee.platform import is_frozen_exe
+
+def makeHelp(uiname, defaults):
+ ret = ''
+ ret += (_("Usage: %s ") % uiname)
+ if uiname.startswith('launchmany'):
+ ret += _("[OPTIONS] [TORRENTDIRECTORY]\n\n")
+ ret += _("If a non-option argument is present it's taken as the value\n"
+ "of the torrent_dir option.\n")
+ elif uiname == 'bittorrent':
+ ret += _("[OPTIONS] [TORRENTFILES]\n")
+ elif uiname.startswith('bittorrent'):
+ ret += _("[OPTIONS] [TORRENTFILE]\n")
+ elif uiname.startswith('maketorrent'):
+ ret += _("[OPTION] TRACKER_URL FILE [FILE]\n")
+ ret += '\n'
+ ret += _("arguments are -\n") + formatDefinitions(defaults, 80)
+ return ret
+
+def printHelp(uiname, defaults):
+ if uiname in ('bittorrent','maketorrent') and is_frozen_exe:
+ from NohGooee.GUI import HelpWindow
+ HelpWindow(None, makeHelp(uiname, defaults))
+ else:
+ print makeHelp(uiname, defaults)
+
+def formatDefinitions(options, COLS):
+ s = StringIO()
+ indent = " " * 10
+ width = COLS - 11
+
+ if width < 15:
+ width = COLS - 2
+ indent = " "
+
+ for option in options:
+ (longname, default, doc) = option
+ if doc == '':
+ continue
+ s.write('--' + longname)
+ is_boolean = type(default) is MyBool
+ if is_boolean:
+ s.write(', --no_' + longname)
+ else:
+ s.write(' <arg>')
+ s.write('\n')
+ if default is not None:
+ doc += _(" (defaults to ") + repr(default) + ')'
+ i = 0
+ for word in doc.split():
+ if i == 0:
+ s.write(indent + word)
+ i = len(word)
+ elif i + len(word) >= width:
+ s.write('\n' + indent + word)
+ i = len(word)
+ else:
+ s.write(' ' + word)
+ i += len(word) + 1
+ s.write('\n\n')
+ return s.getvalue()
+
+def usage(str):
+ raise BTFailure(str)
+
+def format_key(key):
+ if len(key) == 1:
+ return '-%s'%key
+ else:
+ return '--%s'%key
+
+def parseargs(argv, options, minargs=None, maxargs=None, presets=None):
+ config = {}
+ for option in options:
+ longname, default, doc = option
+ config[longname] = default
+ args = []
+ pos = 0
+ if presets is None:
+ presets = {}
+ else:
+ presets = presets.copy()
+ while pos < len(argv):
+ if argv[pos][:1] != '-': # not a cmdline option
+ args.append(argv[pos])
+ pos += 1
+ else:
+ key, value = None, None
+ if argv[pos].startswith('--'): # --aaa 1
+ if argv[pos].startswith('--no_'):
+ key = argv[pos][5:]
+ boolval = False
+ else:
+ key = argv[pos][2:]
+ boolval = True
+ if key not in config:
+ raise BTFailure(_("unknown key ") + format_key(key))
+ if type(config[key]) is MyBool: # boolean cmd line switch, no value
+ value = boolval
+ pos += 1
+ else: # --argument value
+ if pos == len(argv) - 1:
+ usage(_("parameter passed in at end with no value"))
+ key, value = argv[pos][2:], argv[pos+1]
+ pos += 2
+ elif argv[pos][:1] == '-':
+ key = argv[pos][1:2]
+ if len(argv[pos]) > 2: # -a1
+ value = argv[pos][2:]
+ pos += 1
+ else: # -a 1
+ if pos == len(argv) - 1:
+ usage(_("parameter passed in at end with no value"))
+ value = argv[pos+1]
+ pos += 2
+ else:
+ raise BTFailure(_("command line parsing failed at ")+argv[pos])
+
+ presets[key] = value
+ parse_options(config, presets)
+ config.update(presets)
+ for key, value in config.items():
+ if value is None:
+ usage(_("Option %s is required.") % format_key(key))
+ if minargs is not None and len(args) < minargs:
+ usage(_("Must supply at least %d arguments.") % minargs)
+ if maxargs is not None and len(args) > maxargs:
+ usage(_("Too many arguments - %d maximum.") % maxargs)
+
+ return (config, args)
+
+def parse_options(defaults, newvalues):
+ for key, value in newvalues.iteritems():
+ if not defaults.has_key(key):
+ raise BTFailure(_("unknown key ") + format_key(key))
+ try:
+ t = type(defaults[key])
+ if t is MyBool:
+ if value in ('True', '1', MYTRUE, True):
+ value = True
+ else:
+ value = False
+ newvalues[key] = value
+ elif t in (StringType, NoneType):
+ newvalues[key] = value
+ elif t in (IntType, LongType):
+ if value == 'False':
+ newvalues[key] == 0
+ elif value == 'True':
+ newvalues[key] == 1
+ else:
+ newvalues[key] = int(value)
+ elif t is FloatType:
+ newvalues[key] = float(value)
+ else:
+ raise TypeError, str(t)
+
+ except ValueError, e:
+ raise BTFailure(_("wrong format of %s - %s") % (format_key(key), str(e)))
+
diff --git a/NohGooee/parsedir.py b/NohGooee/parsedir.py
new file mode 100644
index 0000000..2ffe1c2
--- /dev/null
+++ b/NohGooee/parsedir.py
@@ -0,0 +1,150 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by John Hoffman and Uoti Urpala
+
+import os
+from sha import sha
+
+from NohGooee.bencode import bencode, bdecode
+from NohGooee.btformats import check_message
+
+NOISY = False
+
+def parsedir(directory, parsed, files, blocked, errfunc,
+ include_metainfo=True):
+ if NOISY:
+ errfunc('checking dir')
+ dirs_to_check = [directory]
+ new_files = {}
+ new_blocked = {}
+ while dirs_to_check: # first, recurse directories and gather torrents
+ directory = dirs_to_check.pop()
+ newtorrents = False
+ try:
+ dir_contents = os.listdir(directory)
+ except (IOError, OSError), e:
+ errfunc(_("Could not read directory ") + directory)
+ continue
+ for f in dir_contents:
+ if f.endswith('.torrent'):
+ newtorrents = True
+ p = os.path.join(directory, f)
+ try:
+ new_files[p] = [(os.path.getmtime(p),os.path.getsize(p)),0]
+ except (IOError, OSError), e:
+ errfunc(_("Could not stat ") + p + " : " + str(e))
+ if not newtorrents:
+ for f in dir_contents:
+ p = os.path.join(directory, f)
+ if os.path.isdir(p):
+ dirs_to_check.append(p)
+
+ new_parsed = {}
+ to_add = []
+ added = {}
+ removed = {}
+ # files[path] = [(modification_time, size), hash], hash is 0 if the file
+ # has not been successfully parsed
+ for p,v in new_files.items(): # re-add old items and check for changes
+ oldval = files.get(p)
+ if oldval is None: # new file
+ to_add.append(p)
+ continue
+ h = oldval[1]
+ if oldval[0] == v[0]: # file is unchanged from last parse
+ if h:
+ if p in blocked: # parseable + blocked means duplicate
+ to_add.append(p) # other duplicate may have gone away
+ else:
+ new_parsed[h] = parsed[h]
+ new_files[p] = oldval
+ else:
+ new_blocked[p] = None # same broken unparseable file
+ continue
+ if p not in blocked and h in parsed: # modified; remove+add
+ if NOISY:
+ errfunc(_("removing %s (will re-add)") % p)
+ removed[h] = parsed[h]
+ to_add.append(p)
+
+ to_add.sort()
+ for p in to_add: # then, parse new and changed torrents
+ new_file = new_files[p]
+ v = new_file[0]
+ if new_file[1] in new_parsed: # duplicate
+ if p not in blocked or files[p][0] != v:
+ errfunc(_("**warning** %s is a duplicate torrent for %s") %
+ (p, new_parsed[new_file[1]]['path']))
+ new_blocked[p] = None
+ continue
+
+ if NOISY:
+ errfunc('adding '+p)
+ try:
+ ff = open(p, 'rb')
+ d = bdecode(ff.read())
+ check_message(d)
+ h = sha(bencode(d['info'])).digest()
+ new_file[1] = h
+ if new_parsed.has_key(h):
+ errfunc(_("**warning** %s is a duplicate torrent for %s") %
+ (p, new_parsed[h]['path']))
+ new_blocked[p] = None
+ continue
+
+ a = {}
+ a['path'] = p
+ f = os.path.basename(p)
+ a['file'] = f
+ i = d['info']
+ l = 0
+ nf = 0
+ if i.has_key('length'):
+ l = i.get('length',0)
+ nf = 1
+ elif i.has_key('files'):
+ for li in i['files']:
+ nf += 1
+ if li.has_key('length'):
+ l += li['length']
+ a['numfiles'] = nf
+ a['length'] = l
+ a['name'] = i.get('name', f)
+ def setkey(k, d = d, a = a):
+ if d.has_key(k):
+ a[k] = d[k]
+ setkey('failure reason')
+ setkey('warning message')
+ setkey('announce-list')
+ if include_metainfo:
+ a['metainfo'] = d
+ except:
+ errfunc(_("**warning** %s has errors") % p)
+ new_blocked[p] = None
+ continue
+ try:
+ ff.close()
+ except:
+ pass
+ if NOISY:
+ errfunc(_("... successful"))
+ new_parsed[h] = a
+ added[h] = a
+
+ for p,v in files.iteritems(): # and finally, mark removed torrents
+ if p not in new_files and p not in blocked:
+ if NOISY:
+ errfunc(_("removing %s") % p)
+ removed[v[1]] = parsed[v[1]]
+
+ if NOISY:
+ errfunc(_("done checking"))
+ return (new_parsed, new_files, new_blocked, added, removed)
diff --git a/NohGooee/platform.py b/NohGooee/platform.py
new file mode 100644
index 0000000..4749fd3
--- /dev/null
+++ b/NohGooee/platform.py
@@ -0,0 +1,390 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Matt Chisholm and Uoti Urpala
+
+# This module is strictly for cross platform compatibility items and
+# should not import anything from other BitTorrent modules.
+
+import os
+import re
+import sys
+import time
+import gettext
+import locale
+if os.name == 'nt':
+ import _winreg
+ import win32api
+ from win32com.shell import shellcon, shell
+ import win32com.client
+ import ctypes
+elif os.name == 'posix' and os.uname()[0] == 'Darwin':
+ has_pyobjc = False
+ try:
+ from Foundation import NSBundle
+ has_pyobjc = True
+ except ImportError:
+ pass
+
+from NohGooee import app_name, version
+from NohGooee.language import locale_sucks
+
+if sys.platform.startswith('win'):
+ bttime = time.clock
+else:
+ bttime = time.time
+
+is_frozen_exe = (os.name == 'nt') and hasattr(sys, 'frozen') and (sys.frozen == 'windows_exe')
+
+os_name = os.name
+os_version = None
+if os_name == 'nt':
+
+ class OSVERSIONINFOEX(ctypes.Structure):
+ _fields_ = [("dwOSVersionInfoSize", ctypes.c_ulong),
+ ("dwMajorVersion", ctypes.c_ulong),
+ ("dwMinorVersion", ctypes.c_ulong),
+ ("dwBuildNumber", ctypes.c_ulong),
+ ("dwPlatformId", ctypes.c_ulong),
+ ("szCSDVersion", ctypes.c_char * 128),
+ ("wServicePackMajor", ctypes.c_ushort),
+ ("wServicePackMinor", ctypes.c_ushort),
+ ("wSuiteMask", ctypes.c_ushort),
+ ("wProductType", ctypes.c_byte),
+ ("wReserved", ctypes.c_byte),
+ ]
+
+ o = OSVERSIONINFOEX()
+ o.dwOSVersionInfoSize = 156 # sizeof(OSVERSIONINFOEX)
+
+ ctypes.windll.kernel32.GetVersionExA(ctypes.byref(o))
+
+ wh = {(1, 4, 0): "95",
+ (1, 4, 10): "98",
+ (1, 4, 90): "ME",
+ (2, 4, 0): "NT",
+ (2, 5, 0): "2000",
+ (2, 5, 1): "XP" ,
+ (2, 5, 2): "2003",
+ }
+
+ win_version_num = (o.dwPlatformId, o.dwMajorVersion, o.dwMinorVersion,
+ o.wServicePackMajor, o.wServicePackMinor, o.dwBuildNumber)
+
+ wk = (o.dwPlatformId, o.dwMajorVersion, o.dwMinorVersion)
+ if wh.has_key(wk):
+ os_version = wh[wk]
+ else:
+ os_version = wh[max(wh.keys())]
+ sys.stderr.write("Couldn't identify windows version: %s, "
+ "assuming '%s'\n" % (str(wk), os_version))
+ del wh, wk
+
+elif os_name == 'posix':
+ os_version = os.uname()[0]
+
+
+def calc_unix_dirs():
+ appdir = '%s-%s'%('bittorrent', version)
+ ip = os.path.join('share', 'pixmaps', appdir)
+ dp = os.path.join('share', 'doc' , appdir)
+ lp = os.path.join('share', 'locale')
+ return ip, dp, lp
+
+if is_frozen_exe:
+ app_root = os.path.split(os.path.abspath(win32api.GetModuleFileName(0)))[0]
+else:
+ app_root = os.path.split(os.path.abspath(sys.argv[0]))[0]
+doc_root = app_root
+osx = False
+if os.name == 'posix':
+ if os.uname()[0] == "Darwin":
+ doc_root = app_root = app_root.encode('utf8')
+ if has_pyobjc:
+ doc_root = NSBundle.mainBundle().resourcePath()
+ osx = True
+image_root = os.path.join(app_root, 'images')
+locale_root = os.path.join(app_root, 'locale')
+
+if not os.access(image_root, os.F_OK) or not os.access(locale_root, os.F_OK):
+ # we guess that probably we are installed on *nix in this case
+ # (I have no idea whether this is right or not -- matt)
+ if app_root[-4:] == '/bin':
+ # yep, installed on *nix
+ installed_prefix = app_root[:-4]
+ image_root, doc_root, locale_root = map(
+ lambda p: os.path.join(installed_prefix, p), calc_unix_dirs()
+ )
+
+# a cross-platform way to get user's config directory
+def get_config_dir():
+ shellvars = ['${APPDATA}', '${HOME}', '${USERPROFILE}']
+ dir_root = get_dir_root(shellvars)
+
+ if (dir_root is None) and (os.name == 'nt'):
+ app_dir = get_shell_dir(shellcon.CSIDL_APPDATA)
+ if app_dir is not None:
+ dir_root = app_dir
+
+ if dir_root is None and os.name == 'nt':
+ tmp_dir_root = os.path.split(sys.executable)[0]
+ if os.access(tmp_dir_root, os.R_OK|os.W_OK):
+ dir_root = tmp_dir_root
+
+ return dir_root
+
+def get_cache_dir():
+ dir = None
+ if os.name == 'nt':
+ dir = get_shell_dir(shellcon.CSIDL_INTERNET_CACHE)
+ return dir
+
+def get_home_dir():
+ shellvars = ['${HOME}', '${USERPROFILE}']
+ dir_root = get_dir_root(shellvars)
+
+ if (dir_root is None) and (os.name == 'nt'):
+ dir = get_shell_dir(shellcon.CSIDL_PROFILE)
+ if dir is None:
+ # there's no clear best fallback here
+ # MS discourages you from writing directly in the home dir,
+ # and sometimes (i.e. win98) there isn't one
+ dir = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
+
+ dir_root = dir
+
+ return dir_root
+
+def get_temp_dir():
+ shellvars = ['${TMP}', '${TEMP}']
+ dir_root = get_dir_root(shellvars, default_to_home=False)
+
+ #this method is preferred to the envvars
+ if os.name == 'nt':
+ try_dir_root = win32api.GetTempPath()
+ if try_dir_root is not None:
+ dir_root = try_dir_root
+
+ if dir_root is None:
+ try_dir_root = None
+ if os.name == 'nt':
+ # this should basically never happen. GetTempPath always returns something
+ try_dir_root = r'C:\WINDOWS\Temp'
+ elif os.name == 'posix':
+ try_dir_root = '/tmp'
+ if (try_dir_root is not None and
+ os.path.isdir(try_dir_root) and
+ os.access(try_dir_root, os.R_OK|os.W_OK)):
+ dir_root = try_dir_root
+ return dir_root
+
+def get_dir_root(shellvars, default_to_home=True):
+ def check_sysvars(x):
+ y = os.path.expandvars(x)
+ if y != x and os.path.isdir(y):
+ return y
+ return None
+
+ dir_root = None
+ for d in shellvars:
+ dir_root = check_sysvars(d)
+ if dir_root is not None:
+ break
+ else:
+ if default_to_home:
+ dir_root = os.path.expanduser('~')
+ if dir_root == '~' or not os.path.isdir(dir_root):
+ dir_root = None
+ return dir_root
+
+# this function is the preferred way to get windows' paths
+def get_shell_dir(value):
+ dir = None
+ if os.name == 'nt':
+ try:
+ dir = shell.SHGetFolderPath(0, value, 0, 0)
+ dir = dir.encode('mbcs')
+ except:
+ pass
+ return dir
+
+def get_startup_dir():
+ dir = None
+ if os.name == 'nt':
+ dir = get_shell_dir(shellcon.CSIDL_STARTUP)
+ return dir
+
+def create_shortcut(source, dest, *args):
+ if os.name == 'nt':
+ shell = win32com.client.Dispatch("WScript.Shell")
+ shortcut = shell.CreateShortCut(dest + ".lnk")
+ shortcut.Targetpath = source
+ shortcut.Arguments = ' '.join(args)
+ path, file = os.path.split(source)
+ shortcut.WorkingDirectory = path
+ shortcut.save()
+ else:
+ # some other os may not support this, but throwing an error is good since
+ # the function couldn't do what was requested
+ os.symlink(source, dest)
+ # linux also can't do args... maybe we should spit out a shell script?
+ assert not args;
+
+def remove_shortcut(dest):
+ if os.name == 'nt':
+ dest += ".lnk"
+ os.unlink(dest)
+
+def path_wrap(path):
+ return path
+
+if os.name == 'nt':
+ def path_wrap(path):
+ return path.decode('mbcs').encode('utf-8')
+
+def btspawn(torrentqueue, cmd, *args):
+ ext = ''
+ if is_frozen_exe:
+ ext = '.exe'
+ path = os.path.join(app_root, cmd+ext)
+ if not os.access(path, os.F_OK):
+ if os.access(path+'.py', os.F_OK):
+ path = path+'.py'
+ args = [path] + list(args) # $0
+ spawn(torrentqueue, *args)
+
+def spawn(torrentqueue, *args):
+ if os.name == 'nt':
+ # do proper argument quoting since exec/spawn on Windows doesn't
+ bargs = args
+ args = []
+ for a in bargs:
+ if not a.startswith("/"):
+ a.replace('"', '\"')
+ a = '"%s"' % a
+ args.append(a)
+
+ argstr = ' '.join(args[1:])
+ # use ShellExecute instead of spawn*() because we don't want
+ # handles (like the controlsocket) to be duplicated
+ win32api.ShellExecute(0, "open", args[0], argstr, None, 1) # 1 == SW_SHOW
+ else:
+ if os.access(args[0], os.X_OK):
+ forkback = os.fork()
+ if forkback == 0:
+ if torrentqueue is not None:
+ #BUG: should we do this?
+ #torrentqueue.set_done()
+ torrentqueue.wrapped.ipc.stop()
+ os.execl(args[0], *args)
+ else:
+ #BUG: what should we do here?
+ pass
+
+def _gettext_install(domain, localedir=None, languages=None, unicode=False):
+ # gettext on win32 does not use locale.getdefaultlocale() by default
+ # other os's will fall through and gettext.find() will do this task
+ if os_name == 'nt':
+ # this code is straight out of gettext.find()
+ if languages is None:
+ languages = []
+ for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
+ val = os.environ.get(envar)
+ if val:
+ languages = val.split(':')
+ break
+
+ # this is the important addition - since win32 does not typically
+ # have any enironment variable set, append the default locale before 'C'
+ languages.append(locale.getdefaultlocale()[0])
+
+ if 'C' not in languages:
+ languages.append('C')
+
+ # this code is straight out of gettext.install
+ t = gettext.translation(domain, localedir, languages=languages, fallback=True)
+ t.install(unicode)
+
+
+def language_path():
+ config_dir = get_config_dir()
+ lang_file_name = os.path.join(config_dir, '.bittorrent', 'data', 'language')
+ return lang_file_name
+
+
+def read_language_file():
+ lang = None
+
+ if os.name == 'nt':
+ # this pulls user-preference language from the installer location
+ try:
+ regko = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\BitTorrent")
+ lang_num = _winreg.QueryValueEx(regko, "Language")[0]
+ lang_num = int(lang_num)
+ lang = locale_sucks[lang_num]
+ except:
+ pass
+ else:
+ lang_file_name = language_path()
+ if os.access(lang_file_name, os.F_OK|os.R_OK):
+ mode = 'r'
+ if sys.version_info >= (2, 3):
+ mode = 'U'
+ lang_file = open(lang_file_name, mode)
+ lang_line = lang_file.readline()
+ lang_file.close()
+ if lang_line:
+ lang = ''
+ for i in lang_line[:5]:
+ if not i.isalpha() and i != '_':
+ break
+ lang += i
+ if lang == '':
+ lang = None
+
+ return lang
+
+
+def write_language_file(lang):
+ if os.name == 'nt':
+ regko = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, "Software\\BitTorrent")
+ if lang == '':
+ _winreg.DeleteValue(regko, "Language")
+ else:
+ lcid = None
+
+ # I want two-way dicts
+ for id, code in locale_sucks.iteritems():
+ if code.lower() == lang.lower():
+ lcid = id
+ break
+ if not lcid:
+ raise KeyError(lang)
+
+ _winreg.SetValueEx(regko, "Language", 0, _winreg.REG_SZ, str(lcid))
+
+ else:
+ lang_file_name = language_path()
+ lang_file = open(lang_file_name, 'w')
+ lang_file.write(lang)
+ lang_file.close()
+
+
+def install_translation():
+ languages = None
+ try:
+ lang = read_language_file()
+ if lang is not None:
+ languages = [lang, ]
+ except:
+ #pass
+ from traceback import print_exc
+ print_exc()
+ _gettext_install('bittorrent', locale_root, languages=languages)
diff --git a/NohGooee/prefs.py b/NohGooee/prefs.py
new file mode 100644
index 0000000..37177c2
--- /dev/null
+++ b/NohGooee/prefs.py
@@ -0,0 +1,89 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+
+class Preferences(object):
+ def __init__(self, parent=None):
+ self._parent = None
+ self._options = {}
+ if parent:
+ self._parent = parent
+
+ def initWithDict(self, dict):
+ self._options = dict
+ return self
+
+ def getDict(self):
+ return dict(self._options)
+
+ def getDifference(self):
+ if self._parent:
+ return dict([(x, y) for x, y in self._options.items() if y != self._parent.get(x, None)])
+ else:
+ return dict(self._options)
+
+ def __getitem__(self, option):
+ if self._options.has_key(option):
+ return self._options[option]
+ elif self._parent:
+ return self._parent[option]
+ return None
+
+ def __setitem__(self, option, value):
+ self._options.__setitem__(option, value)
+
+ def __len__(self):
+ l = len(self._options)
+ if self._parent:
+ return l + len(self._parent)
+ else:
+ return l
+
+ def __delitem__(self, option):
+ del(self._options[option])
+
+ def clear(self): self._options.clear()
+
+ def has_key(self, option):
+ if self._options.has_key(option):
+ return True
+ elif self._parent:
+ return self._parent.has_key(option)
+ return False
+
+ def keys(self):
+ l = self._options.keys()
+ if self._parent:
+ l += [key for key in self._parent.keys() if key not in l]
+ return l
+
+ def values(self):
+ l = self._options.values()
+ if self._parent:
+ l += [value for value in self._parent.values() if value not in l]
+ return l
+
+ def items(self):
+ l = self._options.items()
+ if self._parent:
+ l += [item for item in self._parent.items() if item not in l]
+ return l
+
+ def __iter__(self): return self.iterkeys()
+ def __str__(self): return 'Preferences({%s})' % str(self.items())
+ def iteritems(self): return self.items().__iter__()
+ def iterkeys(self): return self.keys().__iter__()
+ def itervalues(self): return self.values().__iter__()
+ def update(self, dict): return self._options.update(dict)
+
+ def get(self, key, failobj=None):
+ if not self.has_key(key):
+ return failobj
+ return self[key]
diff --git a/NohGooee/selectpoll.py b/NohGooee/selectpoll.py
new file mode 100644
index 0000000..d01f1ef
--- /dev/null
+++ b/NohGooee/selectpoll.py
@@ -0,0 +1,68 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from select import select, error
+from time import sleep
+from types import IntType
+from bisect import bisect
+POLLIN = 1
+POLLOUT = 2
+POLLERR = 8
+POLLHUP = 16
+
+
+class poll(object):
+
+ def __init__(self):
+ self.rlist = []
+ self.wlist = []
+
+ def register(self, f, t):
+ if type(f) != IntType:
+ f = f.fileno()
+ if (t & POLLIN) != 0:
+ insert(self.rlist, f)
+ else:
+ remove(self.rlist, f)
+ if (t & POLLOUT) != 0:
+ insert(self.wlist, f)
+ else:
+ remove(self.wlist, f)
+
+ def unregister(self, f):
+ if type(f) != IntType:
+ f = f.fileno()
+ remove(self.rlist, f)
+ remove(self.wlist, f)
+
+ def poll(self, timeout = None):
+ if self.rlist != [] or self.wlist != []:
+ r, w, e = select(self.rlist, self.wlist, [], timeout)
+ else:
+ sleep(timeout)
+ return []
+ result = []
+ for s in r:
+ result.append((s, POLLIN))
+ for s in w:
+ result.append((s, POLLOUT))
+ return result
+
+def remove(list, item):
+ i = bisect(list, item)
+ if i > 0 and list[i-1] == item:
+ del list[i-1]
+
+def insert(list, item):
+ i = bisect(list, item)
+ if i == 0 or list[i-1] != item:
+ list.insert(i, item)
diff --git a/NohGooee/track.py b/NohGooee/track.py
new file mode 100644
index 0000000..ee1fe5e
--- /dev/null
+++ b/NohGooee/track.py
@@ -0,0 +1,876 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License). You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License. You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen and John Hoffman
+
+import sys
+import os
+import re
+from time import time, gmtime, strftime, localtime
+from random import shuffle
+from types import StringType, IntType, LongType, ListType, DictType
+
+from twisted.web import server
+from twisted.web.resource import Resource
+from twisted.internet import reactor
+from twisted.python import log
+
+from NohGooee.parseargs import parseargs, formatDefinitions
+from NohGooee.parsedir import parsedir
+from NohGooee.NatCheck import NatCheck
+from NohGooee.bencode import bencode, bdecode, Bencached
+from NohGooee.zurllib import quote, unquote
+from NohGooee import version
+
+
+defaults = [
+ ('port', 80,
+ _("Port to listen on.")),
+ ('dfile', '/tmp/dfile.txt',
+ _("file to store recent downloader info in")),
+ ('bind', '',
+ _("ip to bind to locally")),
+ ('socket_timeout', 15,
+ _("timeout for closing connections")),
+ ('close_with_rst', 0,
+ _("close connections with RST and avoid the TCP TIME_WAIT state")),
+ ('save_dfile_interval', 5 * 60,
+ _("seconds between saving dfile")),
+ ('timeout_downloaders_interval', 45 * 60,
+ _("seconds between expiring downloaders")),
+ ('reannounce_interval', 30 * 60,
+ _("seconds downloaders should wait between reannouncements")),
+ ('response_size', 50,
+ _("default number of peers to send an info message to if the "
+ "client does not specify a number")),
+ ('timeout_check_interval', 5,
+ _("time to wait between checking if any connections have timed out")),
+ ('nat_check', 3,
+ _("how many times to check if a downloader is behind a NAT "
+ "(0 = don't check)")),
+ ('log_nat_checks', 0,
+ _("whether to add entries to the log for nat-check results")),
+ ('min_time_between_log_flushes', 3.0,
+ _("minimum time it must have been since the last flush to do "
+ "another one")),
+ ('min_time_between_cache_refreshes', 600.0,
+ _("minimum time in seconds before a cache is considered stale "
+ "and is flushed")),
+ ('allowed_dir', '',
+ _("only allow downloads for .torrents in this dir (and recursively in "
+ "subdirectories of directories that have no .torrent files "
+ "themselves). If set, torrents in this directory show up on "
+ "infopage/scrape whether they have peers or not")),
+ ('parse_dir_interval', 60,
+ _("how often to rescan the torrent directory, in seconds")),
+ ('allowed_controls', 0,
+ _("allow special keys in torrents in the allowed_dir to affect "
+ "tracker access")),
+ ('show_infopage', 1,
+ _("whether to display an info page when the tracker's root dir "
+ "is loaded")),
+ ('infopage_redirect', '',
+ _("a URL to redirect the info page to")),
+ ('show_names', 1,
+ _("whether to display names from allowed dir")),
+ ('favicon', '',
+ _("file containing x-icon data to return when browser requests "
+ "favicon.ico")),
+ ('only_local_override_ip', 2,
+ _("ignore the ip GET parameter from machines which aren't on "
+ "local network IPs (0 = never, 1 = always, 2 = ignore if NAT "
+ "checking is not enabled). HTTP proxy headers giving address "
+ "of original client are treated the same as --ip.")),
+ ('allow_get', 0,
+ _("use with allowed_dir; adds a /file?hash={hash} url that "
+ "allows users to download the torrent file")),
+ ('keep_dead', 0,
+ _("keep dead torrents after they expire (so they still show up on your "
+ "/scrape and web page). Only matters if allowed_dir is not set")),
+ ('scrape_allowed', 'full',
+ _("scrape access allowed (can be none, specific or full)")),
+ ('max_give', 200,
+ _("maximum number of peers to give with any one request")),
+ ]
+
+def statefiletemplate(x):
+ if type(x) != DictType:
+ raise ValueError
+ for cname, cinfo in x.items():
+ if cname == 'peers':
+ for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
+ if type(y) != DictType: # ... for the active torrents, and each is a dictionary
+ raise ValueError
+ for peerid, info in y.items(): # ... of client ids interested in that torrent
+ if (len(peerid) != 20):
+ raise ValueError
+ if type(info) != DictType: # ... each of which is also a dictionary
+ raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
+ if type(info.get('ip', '')) != StringType:
+ raise ValueError
+ port = info.get('port')
+ if type(port) not in (IntType, LongType) or port < 0:
+ raise ValueError
+ left = info.get('left')
+ if type(left) not in (IntType, LongType) or left < 0:
+ raise ValueError
+ elif cname == 'completed':
+ if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
+ raise ValueError # ... for keeping track of the total completions per torrent
+ for y in cinfo.values(): # ... each torrent has an integer value
+ if type(y) not in (IntType,LongType):
+ raise ValueError # ... for the number of reported completions for that torrent
+ elif cname == 'allowed':
+ if (type(cinfo) != DictType): # a list of info_hashes and included data
+ raise ValueError
+ if x.has_key('allowed_dir_files'):
+ adlist = [z[1] for z in x['allowed_dir_files'].values()]
+ for y in cinfo.keys(): # and each should have a corresponding key here
+ if not y in adlist:
+ raise ValueError
+ elif cname == 'allowed_dir_files':
+ if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
+ raise ValueError
+ dirkeys = {}
+ for y in cinfo.values(): # each entry should have a corresponding info_hash
+ if not y[1]:
+ continue
+ if not x['allowed'].has_key(y[1]):
+ raise ValueError
+ if dirkeys.has_key(y[1]): # and each should have a unique info_hash
+ raise ValueError
+ dirkeys[y[1]] = 1
+
+alas = _("your file may exist elsewhere in the universe\nbut alas, not here\n")
+
+def isotime(secs = None):
+ if secs == None:
+ secs = time()
+ return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
+
+http_via_filter = re.compile(' for ([0-9.]+)\Z')
+
+def _get_forwarded_ip(request):
+ header = request.getHeader('X-Forwarded-For')
+ if header:
+ try:
+ x,y = header.split(',')
+ except:
+ return header
+ if not is_local_ip(x):
+ return x
+ return y
+ header = request.getHeader('Client-IP')
+ if header:
+ return header
+ header = request.getHeader('Via')
+ if header:
+ x = http_via_filter.search(header)
+ if x:
+ return x.group(1)
+
+ header = request.getHeader('From')
+ if header:
+ return header
+ return None
+
+def get_forwarded_ip(request):
+ x = _get_forwarded_ip(request)
+ if x is None or not is_valid_ipv4(x) or is_local_ip(x):
+ return None
+ return x
+
+def compact_peer_info(ip, port):
+ try:
+ s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+ + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
+ if len(s) != 6:
+ s = ''
+ except:
+ s = '' # not a valid IP, must be a domain name
+ return s
+
+def is_valid_ipv4(ip):
+ a = ip.split('.')
+ if len(a) != 4:
+ return False
+ try:
+ for x in a:
+ chr(int(x))
+ return True
+ except:
+ return False
+
+def is_local_ip(ip):
+ try:
+ v = [int(x) for x in ip.split('.')]
+ if v[0] == 10 or v[0] == 127 or v[:2] in ([192, 168], [169, 254]):
+ return True
+ if v[0] == 172 and v[1] >= 16 and v[1] <= 31:
+ return True
+ except ValueError:
+ return False
+
+class Tracker(object):
+
+ def __init__(self):
+
+ config, files = parseargs([], defaults, 0, 0)
+ self.config = config
+ self.response_size = config['response_size']
+ self.max_give = config['max_give']
+ self.dfile = config['dfile']
+ self.natcheck = config['nat_check']
+ favicon = config['favicon']
+ self.favicon = None
+ if favicon:
+ try:
+ h = open(favicon,'r')
+ self.favicon = h.read()
+ h.close()
+ except:
+ log.msg(_("**warning** specified favicon file -- %s -- does not exist.") % favicon)
+ self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
+ self.cached_t = {} # format: infohash: [time, cache]
+ self.times = {}
+ self.state = {}
+ self.seedcount = {}
+
+ self.only_local_override_ip = config['only_local_override_ip']
+ if self.only_local_override_ip == 2:
+ self.only_local_override_ip = not config['nat_check']
+
+ if os.path.exists(self.dfile):
+ try:
+ h = open(self.dfile, 'rb')
+ ds = h.read()
+ h.close()
+ tempstate = bdecode(ds)
+ if not tempstate.has_key('peers'):
+ tempstate = {'peers': tempstate}
+ statefiletemplate(tempstate)
+ self.state = tempstate
+ except:
+ log.msg(_("**warning** statefile %s corrupt; resetting") % self.dfile)
+ self.downloads = self.state.setdefault('peers', {})
+ self.completed = self.state.setdefault('completed', {})
+
+ self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
+ for infohash, ds in self.downloads.items():
+ self.seedcount[infohash] = 0
+ for x,y in ds.items():
+ if not y.get('nat',-1):
+ ip = y.get('given_ip')
+ if not (ip and self.allow_local_override(y['ip'], ip)):
+ ip = y['ip']
+ self.natcheckOK(infohash,x,ip,y['port'],y['left'])
+ if not y['left']:
+ self.seedcount[infohash] += 1
+
+ for infohash in self.downloads:
+ self.times[infohash] = {}
+ for peerid in self.downloads[infohash]:
+ self.times[infohash][peerid] = 0
+
+ self.reannounce_interval = config['reannounce_interval']
+ self.save_dfile_interval = config['save_dfile_interval']
+ self.show_names = config['show_names']
+ reactor.callLater(self.save_dfile_interval, self.save_dfile)
+ self.prevtime = time()
+ self.timeout_downloaders_interval = config['timeout_downloaders_interval']
+ reactor.callLater(self.timeout_downloaders_interval, self.expire_downloaders)
+
+ self.allow_get = config['allow_get']
+
+ if config['allowed_dir'] != '':
+ self.allowed_dir = config['allowed_dir']
+ self.parse_dir_interval = config['parse_dir_interval']
+ self.allowed = self.state.setdefault('allowed',{})
+ self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
+ self.allowed_dir_blocked = {}
+ self.parse_allowed()
+ else:
+ try:
+ del self.state['allowed']
+ except:
+ pass
+ try:
+ del self.state['allowed_dir_files']
+ except:
+ pass
+ self.allowed = None
+
+ self.uq_broken = unquote('+') != ' '
+ self.keep_dead = config['keep_dead']
+
+ def allow_local_override(self, ip, given_ip):
+ return is_valid_ipv4(given_ip) and (
+ not self.only_local_override_ip or is_local_ip(ip) )
+
+ def scrapedata(self, infohash, return_name = True):
+ l = self.downloads[infohash]
+ n = self.completed.get(infohash, 0)
+ c = self.seedcount[infohash]
+ d = len(l) - c
+ f = {'complete': c, 'incomplete': d, 'downloaded': n}
+ if return_name and self.show_names and self.allowed is not None:
+ f['name'] = self.allowed[infohash]['name']
+ return (f)
+
+ def add_data(self, infohash, event, ip, paramslist):
+ peers = self.downloads.setdefault(infohash, {})
+ ts = self.times.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ myid = params('peer_id','')
+ if len(myid) != 20:
+ raise ValueError, 'id not of length 20'
+ if event not in ['started', 'completed', 'stopped', 'snooped', None]:
+ raise ValueError, 'invalid event'
+ port = int(params('port',''))
+ if port < 0 or port > 65535:
+ raise ValueError, 'invalid port'
+ left = int(params('left',''))
+ if left < 0:
+ raise ValueError, 'invalid amount left'
+
+ peer = peers.get(myid)
+ mykey = params('key')
+ auth = not peer or peer.get('key', -1) == mykey or peer.get('ip') == ip
+
+ gip = params('ip')
+ local_override = gip and self.allow_local_override(ip, gip)
+ if local_override:
+ ip1 = gip
+ else:
+ ip1 = ip
+ if not auth and local_override and self.only_local_override_ip:
+ auth = True
+
+ if params('numwant') is not None:
+ rsize = min(int(params('numwant')), self.max_give)
+ else:
+ rsize = self.response_size
+
+ if event == 'stopped':
+ if peer and auth:
+ self.delete_peer(infohash,myid)
+
+ elif not peer:
+ ts[myid] = time()
+ peer = {'ip': ip, 'port': port, 'left': left}
+ if mykey:
+ peer['key'] = mykey
+ if gip:
+ peer['given ip'] = gip
+ if port:
+ if not self.natcheck or (local_override and self.only_local_override_ip):
+ peer['nat'] = 0
+ self.natcheckOK(infohash,myid,ip1,port,left)
+ else:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port)
+ else:
+ peer['nat'] = 2**30
+ if event == 'completed':
+ self.completed[infohash] += 1
+ if not left:
+ self.seedcount[infohash] += 1
+
+ peers[myid] = peer
+
+ else:
+ if not auth:
+ return rsize # return w/o changing stats
+
+ ts[myid] = time()
+ if not left and peer['left']:
+ self.completed[infohash] += 1
+ self.seedcount[infohash] += 1
+ if not peer.get('nat', -1):
+ for bc in self.becache[infohash]:
+ bc[1][myid] = bc[0][myid]
+ del bc[0][myid]
+ if peer['left']:
+ peer['left'] = left
+
+ recheck = False
+ if ip != peer['ip']:
+ peer['ip'] = ip
+ recheck = True
+ if gip != peer.get('given ip'):
+ if gip:
+ peer['given ip'] = gip
+ elif peer.has_key('given ip'):
+ del peer['given ip']
+ if local_override:
+ if self.only_local_override_ip:
+ self.natcheckOK(infohash,myid,ip1,port,left)
+ else:
+ recheck = True
+
+ if port and self.natcheck:
+ if recheck:
+ if peer.has_key('nat'):
+ if not peer['nat']:
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ del x[y][myid]
+ del peer['nat'] # restart NAT testing
+ else:
+ natted = peer.get('nat', -1)
+ if natted and natted < self.natcheck:
+ recheck = True
+
+ if recheck:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port)
+
+ return rsize
+
+ def peerlist(self, infohash, stopped, is_seed, return_type, rsize):
+ data = {} # return data
+ seeds = self.seedcount[infohash]
+ data['complete'] = seeds
+ data['incomplete'] = len(self.downloads[infohash]) - seeds
+
+ if ( self.allowed is not None and self.config['allowed_controls'] and
+ self.allowed[infohash].has_key('warning message') ):
+ data['warning message'] = self.allowed[infohash]['warning message']
+
+ data['interval'] = self.reannounce_interval
+ if stopped or not rsize: # save some bandwidth
+ data['peers'] = []
+ return data
+
+ bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+ len_l = len(bc[0][0])
+ len_s = len(bc[0][1])
+ if not (len_l+len_s): # caches are empty!
+ data['peers'] = []
+ return data
+ l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
+ cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
+ if cache:
+ if cache[0] + self.config['min_time_between_cache_refreshes'] < time():
+ cache = None
+ else:
+ if ( (is_seed and len(cache[1]) < rsize)
+ or len(cache[1]) < l_get_size or not cache[1] ):
+ cache = None
+ if not cache:
+ vv = [[],[],[]]
+ cache = [ time(),
+ bc[return_type][0].values()+vv[return_type],
+ bc[return_type][1].values() ]
+ shuffle(cache[1])
+ shuffle(cache[2])
+ self.cached[infohash][return_type] = cache
+ for rr in xrange(len(self.cached[infohash])):
+ if rr != return_type:
+ try:
+ self.cached[infohash][rr][1].extend(vv[rr])
+ except:
+ pass
+ if len(cache[1]) < l_get_size:
+ peerdata = cache[1]
+ if not is_seed:
+ peerdata.extend(cache[2])
+ cache[1] = []
+ cache[2] = []
+ else:
+ if not is_seed:
+ peerdata = cache[2][l_get_size-rsize:]
+ del cache[2][l_get_size-rsize:]
+ rsize -= len(peerdata)
+ else:
+ peerdata = []
+ if rsize:
+ peerdata.extend(cache[1][-rsize:])
+ del cache[1][-rsize:]
+ if return_type == 2:
+ peerdata = ''.join(peerdata)
+ data['peers'] = peerdata
+ return data
+
+
+ def natcheckOK(self, infohash, peerid, ip, port, not_seed):
+ bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
+ bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
+ 'peer id': peerid}))
+ bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
+ bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
+
+ def natchecklog(self, peerid, ip, port, result):
+ year, month, day, hour, minute, second, a, b, c = localtime(time())
+ log.msg('%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
+ ip, quote(peerid), day, months[month], year, hour, minute, second,
+ ip, port, result))
+
+ def connectback_result(self, result, downloadid, peerid, ip, port):
+ record = self.downloads.get(downloadid, {}).get(peerid)
+ if ( record is None
+ or (record['ip'] != ip and record.get('given ip') != ip)
+ or record['port'] != port ):
+ if self.config['log_nat_checks']:
+ self.natchecklog(peerid, ip, port, 404)
+ return
+ if self.config['log_nat_checks']:
+ if result:
+ x = 200
+ else:
+ x = 503
+ self.natchecklog(peerid, ip, port, x)
+ if not record.has_key('nat'):
+ record['nat'] = int(not result)
+ if result:
+ self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+ elif result and record['nat']:
+ record['nat'] = 0
+ self.natcheckOK(downloadid,peerid,ip,port,record['left'])
+ elif not result:
+ record['nat'] += 1
+
+ def save_dfile(self):
+ # need to arrange for this to be called just before shutdown
+ log.msg('save_dfile')
+ reactor.callLater(self.save_dfile_interval, self.save_dfile)
+ h = open(self.dfile, 'wb')
+ h.write(bencode(self.state))
+ h.close()
+
+ def parse_allowed(self):
+ log.msg('parse_allowed')
+ reactor.callLater(self.parse_dir_interval, self.parse_allowed)
+
+ # logging broken .torrent files would be useful but could confuse
+ # programs parsing log files, so errors are just ignored for now
+ def ignore(message):
+ pass
+ r = parsedir(self.allowed_dir, self.allowed, self.allowed_dir_files,
+ self.allowed_dir_blocked, ignore,include_metainfo = False)
+ ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
+ added, garbage2 ) = r
+
+ for infohash in added:
+ self.downloads.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+ self.state['allowed'] = self.allowed
+ self.state['allowed_dir_files'] = self.allowed_dir_files
+
+ def delete_peer(self, infohash, peerid):
+ dls = self.downloads[infohash]
+ peer = dls[peerid]
+ if not peer['left']:
+ self.seedcount[infohash] -= 1
+ if not peer.get('nat',-1):
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ del x[y][peerid]
+ del self.times[infohash][peerid]
+ del dls[peerid]
+
+ def expire_downloaders(self):
+ log.msg('expire_downloaders')
+ reactor.callLater(self.timeout_downloaders_interval, self.expire_downloaders)
+ for infohash, peertimes in self.times.items():
+ for myid, t in peertimes.items():
+ if t < self.prevtime:
+ self.delete_peer(infohash, myid)
+ self.prevtime = time()
+ if (self.keep_dead != 1):
+ for key, peers in self.downloads.items():
+ if len(peers) == 0 and (self.allowed is None or
+ key not in self.allowed):
+ del self.times[key]
+ del self.downloads[key]
+ del self.seedcount[key]
+
+class InfoPage(Resource):
+ def __init__(self, tracker):
+ Resource.__init__(self)
+ self.tracker = tracker
+
+ def getChild(self, name, request):
+ if name in ['', 'index.html', 'index.htm']:
+ return self
+ return Resource.getChild(self, name, request)
+
+ def render_GET(self, request):
+ try:
+ if not self.tracker.config['show_infopage']:
+ request.setResponseCode(404, 'Not Found')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return alas
+
+ red = self.tracker.config['infopage_redirect']
+ if red != '':
+ request.redirect(red)
+ request.finish()
+ return server.NOT_DONE_YET
+
+ request.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
+ '<html><head><title>BitTorrent download info</title>\n')
+ if self.tracker.favicon is not None:
+ request.write('<link rel="shortcut icon" href="/favicon.ico">\n')
+ request.write('</head>\n<body>\n' \
+ '<h3>BitTorrent download info</h3>\n'\
+ '<ul>\n'
+ '<li><strong>tracker version:</strong> %s</li>\n' \
+ '<li><strong>server time:</strong> %s</li>\n' \
+ '</ul>\n' % (version, isotime()))
+ if self.tracker.allowed is not None:
+ if self.tracker.show_names:
+ names = [ (value['name'], infohash)
+ for infohash, value in self.tracker.allowed.iteritems()]
+ else:
+ names = [(None, infohash) for infohash in self.tracker.allowed]
+ else:
+ names = [ (None, infohash) for infohash in self.tracker.downloads]
+ if not names:
+ request.write('<p>not tracking any files yet...</p>\n')
+ else:
+ names.sort()
+ tn = 0
+ tc = 0
+ td = 0
+ tt = 0 # Total transferred
+ ts = 0 # Total size
+ nf = 0 # Number of files displayed
+ if self.tracker.allowed is not None and self.tracker.show_names:
+ request.write('<table summary="files" border="1">\n' \
+ '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
+ else:
+ request.write('<table summary="files">\n' \
+ '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
+ for name, infohash in names:
+ l = self.tracker.downloads[infohash]
+ n = self.tracker.completed.get(infohash, 0)
+ tn = tn + n
+ c = self.tracker.seedcount[infohash]
+ tc = tc + c
+ d = len(l) - c
+ td = td + d
+ nf = nf + 1
+ if self.tracker.allowed is not None and self.tracker.show_names:
+ if self.tracker.allowed.has_key(infohash):
+ sz = self.tracker.allowed[infohash]['length'] # size
+ ts = ts + sz
+ szt = sz * n # Transferred for this torrent
+ tt = tt + szt
+ if self.tracker.allow_get == 1:
+ linkname = '<a href="/file?info_hash=' + quote(infohash) + '">' + name + '</a>'
+ else:
+ linkname = name
+ request.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
+ % (infohash.encode('hex'), linkname, size_format(sz), c, d, n, size_format(szt)))
+ else:
+ request.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
+ % (infohash.encode('hex'), c, d, n))
+ ttn = 0
+ for i in self.tracker.completed.values():
+ ttn = ttn + i
+ if self.tracker.allowed is not None and self.tracker.show_names:
+ request.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
+ % (nf, size_format(ts), tc, td, tn, ttn, size_format(tt)))
+ else:
+ request.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td></tr>\n'
+ % (nf, tc, td, tn, ttn))
+ request.write('</table>\n' \
+ '<ul>\n' \
+ '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
+ '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
+ '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
+ '<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
+ '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
+ '</ul>\n')
+
+ request.write('</body>\n' \
+ '</html>\n')
+ request.finish()
+ return server.NOT_DONE_YET
+
+ except:
+ request.setResponseCode(500, 'Internal Server Error')
+ log.err()
+ return 'Server Error'
+
+class Scrape(Resource):
+ isLeaf = True
+
+ def __init__(self, tracker):
+ Resource.__init__(self)
+ self.tracker = tracker
+
+ def render_GET(self, request):
+ fs = {}
+ if request.args.has_key('info_hash'):
+ if self.tracker.config['scrape_allowed'] not in ['specific', 'full']:
+ request.setResponseCode(400, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return bencode({'failure reason':
+ _("specific scrape function is not available with this tracker.")})
+ for infohash in request.args['info_hash']:
+ if self.tracker.allowed is not None and infohash not in self.tracker.allowed:
+ continue
+ if infohash in self.tracker.downloads:
+ fs[infohash] = self.tracker.scrapedata(infohash)
+ else:
+ if self.tracker.config['scrape_allowed'] != 'full':
+ request.setResponseCode(400, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return bencode({'failure reason':
+ _("full scrape function is not available with this tracker.")})
+ if self.tracker.allowed is not None:
+ hashes = self.tracker.allowed
+ else:
+ hashes = self.tracker.downloads
+ for infohash in hashes:
+ fs[infohash] = self.tracker.scrapedata(infohash)
+
+ request.setHeader('Content-Type', 'text/plain')
+ return bencode({'files': fs})
+
+class File(Resource):
+ isLeaf = True
+
+ def __init__(self, tracker):
+ Resource.__init__(self)
+ self.tracker = tracker
+
+ def render_GET(self, request):
+ if not self.tracker.allow_get:
+ request.setResponseCode(400, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return _("get function is not available with this tracker.")
+
+ infohash = None
+ if request.args.has_key('info_hash'):
+ infohash = request.args['info_hash'][0]
+
+ if not self.tracker.allowed.has_key(infohash):
+ request.setResponseCode(400, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return alas
+
+ fname = self.tracker.allowed[infohash]['file']
+ fpath = self.tracker.allowed[infohash]['path']
+
+ request.setHeader('Content-Type', 'application/x-bittorrent')
+ reuqest.setHeader('Content-Disposition', 'attachment; filename=' + fname)
+ return open(fpath, 'rb').read()
+
+class Announce(Resource):
+ isLeaf = True
+
+ def __init__(self, tracker):
+ Resource.__init__(self)
+ self.tracker = tracker
+
+ def render_GET(self, request):
+ ip = request.getClientIP()
+
+ nip = get_forwarded_ip(request)
+ if nip and not self.tracker.only_local_override_ip:
+ ip = nip
+
+ infohash = request.args.get('info_hash', [None])[0]
+
+ if infohash is None:
+ request.setResponseCode(400, 'Bad Request')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return 'info_hash not specified'
+
+ if self.tracker.allowed is not None:
+ if not self.tracker.allowed.has_key(infohash):
+ # is 200 really right?
+ request.setResponseCode(200, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return bencode({'failure reason':
+ _("Requested download is not authorized for use with this tracker.")})
+
+ if self.tracker.config['allowed_controls']:
+ if self.tracker.allowed[infohash].has_key('failure reason'):
+ # is 200 really right?
+ request.setResponseCode(200, 'Not Authorized')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return bencode({'failure reason': self.tracker.allowed[infohash]['failure reason']})
+
+ event = request.args.get('event', [None])[0]
+
+ rsize = self.tracker.add_data(infohash, event, ip, request.args)
+
+ compact = request.args.get('compact', [None])[0]
+
+ no_peer_id = request.args.get('no_peer_id', [None])[0]
+
+ if compact:
+ return_type = 2
+ elif no_peer_id:
+ return_type = 1
+ else:
+ return_type = 0
+
+ left = request.args.get('left', [None])[0]
+
+ data = self.tracker.peerlist(infohash, event == 'stopped', not left, return_type, rsize)
+
+ if request.args.has_key('scrape'):
+ data['scrape'] = self.tracker.scrapedata(infohash, False)
+
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return bencode(data)
+
+class FavIcon(Resource):
+ isLeaf = True
+
+ def __init__(self, tracker):
+ Resource.__init__(self)
+ self.tracker = tracker
+
+ def render_GET(self, request):
+ if self.tracker.favicon is None:
+ request.setResponseCode(404, 'Not Found')
+ request.setHeader('Content-Type', 'text/plain')
+ request.setHeader('Pragma', 'no-cache')
+ return 'Not Found!'
+
+ request.setHeader('Content-Type', 'image/x-icon')
+ return self.tracker.favicon
+
+def size_format(s):
+ if (s < 1024):
+ r = str(s) + 'B'
+ elif (s < 1048576):
+ r = str(int(s/1024)) + 'KiB'
+ elif (s < 1073741824):
+ r = str(int(s/1048576)) + 'MiB'
+ elif (s < 1099511627776):
+ r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+ else:
+ r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+ return(r)
diff --git a/NohGooee/zurllib.py b/NohGooee/zurllib.py
new file mode 100644
index 0000000..399118f
--- /dev/null
+++ b/NohGooee/zurllib.py
@@ -0,0 +1,269 @@
+#
+# zurllib.py
+#
+# This is (hopefully) a drop-in for urllib which will request gzip/deflate
+# compression and then decompress the output if a compressed response is
+# received while maintaining the API.
+#
+# by Robert Stone 2/22/2003
+# extended by Matt Chisholm
+# tracker announce --bind support added by Jeremy Evans 11/2005
+
+import sys
+
+import threading
+import thread
+from NohGooee import PeerID
+user_agent = PeerID.make_id()
+del PeerID
+
+import urllib2
+OldOpenerDirector = urllib2.OpenerDirector
+
+class MyOpenerDirector(OldOpenerDirector):
+ def __init__(self):
+ OldOpenerDirector.__init__(self)
+ self.addheaders = [('User-agent', user_agent)]
+
+urllib2.OpenerDirector = MyOpenerDirector
+
+del urllib2
+
+from httplib import HTTPConnection, HTTP
+from urllib import *
+from urllib2 import *
+from gzip import GzipFile
+from StringIO import StringIO
+import pprint
+
+DEBUG=0
+
+http_bindaddr = None
+
+# ow ow ow.
+# this is here so we can track open http connections in our pending
+# connection count. we have to buffer because maybe urllib connections
+# start before rawserver does - hopefully not more than 10 of them!
+#
+# this can all go away when we use a reasonable http client library
+# and the connections are managed inside rawserver
+class PreRawServerBuffer(object):
+ def __init__(self):
+ self.pending_sockets = {}
+ self.pending_sockets_lock = threading.Lock()
+
+ def _add_pending_connection(self, addr):
+ # the XP connection rate limiting is unique at the IP level
+ assert isinstance(addr, str)
+ self.pending_sockets_lock.acquire()
+ self.__add_pending_connection(addr)
+ self.pending_sockets_lock.release()
+
+ def __add_pending_connection(self, addr):
+ if addr not in self.pending_sockets:
+ self.pending_sockets[addr] = 1
+ else:
+ self.pending_sockets[addr] += 1
+
+ def _remove_pending_connection(self, addr):
+ self.pending_sockets_lock.acquire()
+ self.__remove_pending_connection(addr)
+ self.pending_sockets_lock.release()
+
+ def __remove_pending_connection(self, addr):
+ self.pending_sockets[addr] -= 1
+ if self.pending_sockets[addr] <= 0:
+ del self.pending_sockets[addr]
+rawserver = PreRawServerBuffer()
+
+def bind_tracker_connection(bindaddr):
+ global http_bindaddr
+ http_bindaddr = bindaddr
+
+def set_zurllib_rawserver(new_rawserver):
+ global rawserver
+ for addr in rawserver.pending_sockets:
+ new_rawserver._add_pending_connections(addr)
+ rawserver._remove_pending_connection(addr)
+ assert len(rawserver.pending_sockets) == 0
+ rawserver = new_rawserver
+
+unsafe_threads = []
+def add_unsafe_thread():
+ global unsafe_threads
+ unsafe_threads.append(thread.get_ident())
+
+class BindingHTTPConnection(HTTPConnection):
+ def connect(self):
+
+ ident = thread.get_ident()
+ # never, ever, ever call urlopen from any of these threads
+ assert ident not in unsafe_threads
+
+ """Connect to the host and port specified in __init__."""
+ msg = "getaddrinfo returns an empty list"
+ for res in socket.getaddrinfo(self.host, self.port, 0,
+ socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+
+ addr = sa[0]
+ # the obvious multithreading problem is avoided by using locks.
+ # the lock is only acquired during the function call, so there's
+ # no danger of urllib blocking rawserver.
+ rawserver._add_pending_connection(addr)
+ try:
+ self.sock = socket.socket(af, socktype, proto)
+ if http_bindaddr:
+ self.sock.bind((http_bindaddr, 0))
+ if self.debuglevel > 0:
+ print "connect: (%s, %s)" % (self.host, self.port)
+ self.sock.connect(sa)
+ except socket.error, msg:
+ if self.debuglevel > 0:
+ print 'connect fail:', (self.host, self.port)
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ rawserver._remove_pending_connection(addr)
+
+ if self.sock:
+ break
+
+ if not self.sock:
+ raise socket.error, msg
+
+class BindingHTTP(HTTP):
+ _connection_class = BindingHTTPConnection
+
+if sys.version_info >= (2,4):
+ BindingHTTP = BindingHTTPConnection
+
+class HTTPContentEncodingHandler(HTTPHandler):
+ """Inherit and add gzip/deflate/etc support to HTTP gets."""
+ def http_open(self, req):
+ # add the Accept-Encoding header to the request
+ # support gzip encoding (identity is assumed)
+ req.add_header("Accept-Encoding","gzip")
+ if DEBUG:
+ print "Sending:"
+ print req.headers
+ print "\n"
+ fp = self.do_open(BindingHTTP, req)
+ headers = fp.headers
+ if DEBUG:
+ pprint.pprint(headers.dict)
+ url = fp.url
+ resp = addinfourldecompress(fp, headers, url)
+ if hasattr(fp, 'code'):
+ resp.code = fp.code
+ if hasattr(fp, 'msg'):
+ resp.msg = fp.msg
+ return resp
+
+class addinfourldecompress(addinfourl):
+ """Do gzip decompression if necessary. Do addinfourl stuff too."""
+ def __init__(self, fp, headers, url):
+ # we need to do something more sophisticated here to deal with
+ # multiple values? What about other weird crap like q-values?
+ # basically this only works for the most simplistic case and will
+ # break in some other cases, but for now we only care about making
+ # this work with the BT tracker so....
+ if headers.has_key('content-encoding') and headers['content-encoding'] == 'gzip':
+ if DEBUG:
+ print "Contents of Content-encoding: " + headers['Content-encoding'] + "\n"
+ self.gzip = 1
+ self.rawfp = fp
+ fp = GzipStream(fp)
+ else:
+ self.gzip = 0
+ return addinfourl.__init__(self, fp, headers, url)
+
+ def close(self):
+ self.fp.close()
+ if self.gzip:
+ self.rawfp.close()
+
+ def iscompressed(self):
+ return self.gzip
+
+class GzipStream(StringIO):
+ """Magically decompress a file object.
+
+ This is not the most efficient way to do this but GzipFile() wants
+ to seek, etc, which won't work for a stream such as that from a socket.
+ So we copy the whole shebang info a StringIO object, decompress that
+ then let people access the decompressed output as a StringIO object.
+
+ The disadvantage is memory use and the advantage is random access.
+
+ Will mess with fixing this later.
+ """
+
+ def __init__(self,fp):
+ self.fp = fp
+
+ # this is nasty and needs to be fixed at some point
+ # copy everything into a StringIO (compressed)
+ compressed = StringIO()
+ r = fp.read()
+ while r:
+ compressed.write(r)
+ r = fp.read()
+ # now, unzip (gz) the StringIO to a string
+ compressed.seek(0,0)
+ gz = GzipFile(fileobj = compressed)
+ str = ''
+ r = gz.read()
+ while r:
+ str += r
+ r = gz.read()
+ # close our utility files
+ compressed.close()
+ gz.close()
+ # init our stringio selves with the string
+ StringIO.__init__(self, str)
+ del str
+
+ def close(self):
+ self.fp.close()
+ return StringIO.close(self)
+
+
+def test():
+ """Test this module.
+
+ At the moment this is lame.
+ """
+
+ print "Running unit tests.\n"
+
+ def printcomp(fp):
+ try:
+ if fp.iscompressed():
+ print "GET was compressed.\n"
+ else:
+ print "GET was uncompressed.\n"
+ except:
+ print "no iscompressed function! this shouldn't happen"
+
+ print "Trying to GET a compressed document...\n"
+ fp = urlopen('http://a.scarywater.net/hng/index.shtml')
+ print fp.read()
+ printcomp(fp)
+ fp.close()
+
+ print "Trying to GET an unknown document...\n"
+ fp = urlopen('http://www.otaku.org/')
+ print fp.read()
+ printcomp(fp)
+ fp.close()
+
+
+#
+# Install the HTTPContentEncodingHandler that we've defined above.
+#
+install_opener(build_opener(HTTPContentEncodingHandler, ProxyHandler({})))
+
+if __name__ == '__main__':
+ test()
+