murder 0.0.0.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +1 -0
- data/LICENSE +17 -0
- data/README +224 -0
- data/Rakefile +52 -0
- data/VERSION +1 -0
- data/dist/BitTornado/BT1/Choker.py +128 -0
- data/dist/BitTornado/BT1/Connecter.py +288 -0
- data/dist/BitTornado/BT1/Downloader.py +594 -0
- data/dist/BitTornado/BT1/DownloaderFeedback.py +155 -0
- data/dist/BitTornado/BT1/Encrypter.py +333 -0
- data/dist/BitTornado/BT1/FileSelector.py +245 -0
- data/dist/BitTornado/BT1/Filter.py +12 -0
- data/dist/BitTornado/BT1/HTTPDownloader.py +251 -0
- data/dist/BitTornado/BT1/NatCheck.py +95 -0
- data/dist/BitTornado/BT1/PiecePicker.py +320 -0
- data/dist/BitTornado/BT1/Rerequester.py +426 -0
- data/dist/BitTornado/BT1/Statistics.py +177 -0
- data/dist/BitTornado/BT1/Storage.py +584 -0
- data/dist/BitTornado/BT1/StorageWrapper.py +1045 -0
- data/dist/BitTornado/BT1/StreamCheck.py +135 -0
- data/dist/BitTornado/BT1/T2T.py +193 -0
- data/dist/BitTornado/BT1/Uploader.py +145 -0
- data/dist/BitTornado/BT1/__init__.py +1 -0
- data/dist/BitTornado/BT1/btformats.py +100 -0
- data/dist/BitTornado/BT1/fakeopen.py +89 -0
- data/dist/BitTornado/BT1/makemetafile.py +263 -0
- data/dist/BitTornado/BT1/track.py +1067 -0
- data/dist/BitTornado/ConfigDir.py +401 -0
- data/dist/BitTornado/ConfigReader.py +1068 -0
- data/dist/BitTornado/ConnChoice.py +31 -0
- data/dist/BitTornado/CreateIcons.py +105 -0
- data/dist/BitTornado/CurrentRateMeasure.py +37 -0
- data/dist/BitTornado/HTTPHandler.py +167 -0
- data/dist/BitTornado/PSYCO.py +5 -0
- data/dist/BitTornado/RateLimiter.py +153 -0
- data/dist/BitTornado/RateMeasure.py +75 -0
- data/dist/BitTornado/RawServer.py +195 -0
- data/dist/BitTornado/ServerPortHandler.py +188 -0
- data/dist/BitTornado/SocketHandler.py +375 -0
- data/dist/BitTornado/__init__.py +63 -0
- data/dist/BitTornado/bencode.py +319 -0
- data/dist/BitTornado/bitfield.py +162 -0
- data/dist/BitTornado/clock.py +27 -0
- data/dist/BitTornado/download_bt1.py +882 -0
- data/dist/BitTornado/inifile.py +169 -0
- data/dist/BitTornado/iprangeparse.py +194 -0
- data/dist/BitTornado/launchmanycore.py +381 -0
- data/dist/BitTornado/natpunch.py +254 -0
- data/dist/BitTornado/parseargs.py +137 -0
- data/dist/BitTornado/parsedir.py +150 -0
- data/dist/BitTornado/piecebuffer.py +86 -0
- data/dist/BitTornado/selectpoll.py +109 -0
- data/dist/BitTornado/subnetparse.py +218 -0
- data/dist/BitTornado/torrentlistparse.py +38 -0
- data/dist/BitTornado/zurllib.py +100 -0
- data/dist/murder_client.py +291 -0
- data/dist/murder_make_torrent.py +46 -0
- data/dist/murder_tracker.py +28 -0
- data/doc/examples/Capfile +28 -0
- data/lib/capistrano/recipes/deploy/strategy/murder.rb +52 -0
- data/lib/murder.rb +43 -0
- data/lib/murder/admin.rb +47 -0
- data/lib/murder/murder.rb +121 -0
- data/murder.gemspec +101 -0
- metadata +129 -0
@@ -0,0 +1,254 @@
|
|
1
|
+
# Written by John Hoffman
|
2
|
+
# derived from NATPortMapping.py by Yejun Yang
|
3
|
+
# and from example code by Myers Carpenter
|
4
|
+
# see LICENSE.txt for license information
|
5
|
+
|
6
|
+
import socket
|
7
|
+
from traceback import print_exc
|
8
|
+
from subnetparse import IP_List
|
9
|
+
from clock import clock
|
10
|
+
from __init__ import createPeerID
|
11
|
+
try:
|
12
|
+
True
|
13
|
+
except:
|
14
|
+
True = 1
|
15
|
+
False = 0
|
16
|
+
|
17
|
+
DEBUG = False
|
18
|
+
|
19
|
+
EXPIRE_CACHE = 30 # seconds
|
20
|
+
ID = "BT-"+createPeerID()[-4:]
|
21
|
+
|
22
|
+
try:
|
23
|
+
import pythoncom, win32com.client
|
24
|
+
_supported = 1
|
25
|
+
except ImportError:
|
26
|
+
_supported = 0
|
27
|
+
|
28
|
+
|
29
|
+
|
30
|
+
class _UPnP1: # derived from Myers Carpenter's code
|
31
|
+
# seems to use the machine's local UPnP
|
32
|
+
# system for its operation. Runs fairly fast
|
33
|
+
|
34
|
+
def __init__(self):
|
35
|
+
self.map = None
|
36
|
+
self.last_got_map = -10e10
|
37
|
+
|
38
|
+
def _get_map(self):
|
39
|
+
if self.last_got_map + EXPIRE_CACHE < clock():
|
40
|
+
try:
|
41
|
+
dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP")
|
42
|
+
self.map = dispatcher.StaticPortMappingCollection
|
43
|
+
self.last_got_map = clock()
|
44
|
+
except:
|
45
|
+
self.map = None
|
46
|
+
return self.map
|
47
|
+
|
48
|
+
def test(self):
|
49
|
+
try:
|
50
|
+
assert self._get_map() # make sure a map was found
|
51
|
+
success = True
|
52
|
+
except:
|
53
|
+
success = False
|
54
|
+
return success
|
55
|
+
|
56
|
+
|
57
|
+
def open(self, ip, p):
|
58
|
+
map = self._get_map()
|
59
|
+
try:
|
60
|
+
map.Add(p,'TCP',p,ip,True,ID)
|
61
|
+
if DEBUG:
|
62
|
+
print 'port opened: '+ip+':'+str(p)
|
63
|
+
success = True
|
64
|
+
except:
|
65
|
+
if DEBUG:
|
66
|
+
print "COULDN'T OPEN "+str(p)
|
67
|
+
print_exc()
|
68
|
+
success = False
|
69
|
+
return success
|
70
|
+
|
71
|
+
|
72
|
+
def close(self, p):
|
73
|
+
map = self._get_map()
|
74
|
+
try:
|
75
|
+
map.Remove(p,'TCP')
|
76
|
+
success = True
|
77
|
+
if DEBUG:
|
78
|
+
print 'port closed: '+str(p)
|
79
|
+
except:
|
80
|
+
if DEBUG:
|
81
|
+
print 'ERROR CLOSING '+str(p)
|
82
|
+
print_exc()
|
83
|
+
success = False
|
84
|
+
return success
|
85
|
+
|
86
|
+
|
87
|
+
def clean(self, retry = False):
|
88
|
+
if not _supported:
|
89
|
+
return
|
90
|
+
try:
|
91
|
+
map = self._get_map()
|
92
|
+
ports_in_use = []
|
93
|
+
for i in xrange(len(map)):
|
94
|
+
try:
|
95
|
+
mapping = map[i]
|
96
|
+
port = mapping.ExternalPort
|
97
|
+
prot = str(mapping.Protocol).lower()
|
98
|
+
desc = str(mapping.Description).lower()
|
99
|
+
except:
|
100
|
+
port = None
|
101
|
+
if port and prot == 'tcp' and desc[:3] == 'bt-':
|
102
|
+
ports_in_use.append(port)
|
103
|
+
success = True
|
104
|
+
for port in ports_in_use:
|
105
|
+
try:
|
106
|
+
map.Remove(port,'TCP')
|
107
|
+
except:
|
108
|
+
success = False
|
109
|
+
if not success and not retry:
|
110
|
+
self.clean(retry = True)
|
111
|
+
except:
|
112
|
+
pass
|
113
|
+
|
114
|
+
|
115
|
+
class _UPnP2: # derived from Yejun Yang's code
|
116
|
+
# apparently does a direct search for UPnP hardware
|
117
|
+
# may work in some cases where _UPnP1 won't, but is slow
|
118
|
+
# still need to implement "clean" method
|
119
|
+
|
120
|
+
def __init__(self):
|
121
|
+
self.services = None
|
122
|
+
self.last_got_services = -10e10
|
123
|
+
|
124
|
+
def _get_services(self):
|
125
|
+
if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
|
126
|
+
self.services = []
|
127
|
+
try:
|
128
|
+
f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder")
|
129
|
+
for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1",
|
130
|
+
"urn:schemas-upnp-org:service:WANPPPConnection:1" ):
|
131
|
+
try:
|
132
|
+
conns = f.FindByType(t,0)
|
133
|
+
for c in xrange(len(conns)):
|
134
|
+
try:
|
135
|
+
svcs = conns[c].Services
|
136
|
+
for s in xrange(len(svcs)):
|
137
|
+
try:
|
138
|
+
self.services.append(svcs[s])
|
139
|
+
except:
|
140
|
+
pass
|
141
|
+
except:
|
142
|
+
pass
|
143
|
+
except:
|
144
|
+
pass
|
145
|
+
except:
|
146
|
+
pass
|
147
|
+
self.last_got_services = clock()
|
148
|
+
return self.services
|
149
|
+
|
150
|
+
def test(self):
|
151
|
+
try:
|
152
|
+
assert self._get_services() # make sure some services can be found
|
153
|
+
success = True
|
154
|
+
except:
|
155
|
+
success = False
|
156
|
+
return success
|
157
|
+
|
158
|
+
|
159
|
+
def open(self, ip, p):
|
160
|
+
svcs = self._get_services()
|
161
|
+
success = False
|
162
|
+
for s in svcs:
|
163
|
+
try:
|
164
|
+
s.InvokeAction('AddPortMapping',['',p,'TCP',p,ip,True,ID,0],'')
|
165
|
+
success = True
|
166
|
+
except:
|
167
|
+
pass
|
168
|
+
if DEBUG and not success:
|
169
|
+
print "COULDN'T OPEN "+str(p)
|
170
|
+
print_exc()
|
171
|
+
return success
|
172
|
+
|
173
|
+
|
174
|
+
def close(self, p):
|
175
|
+
svcs = self._get_services()
|
176
|
+
success = False
|
177
|
+
for s in svcs:
|
178
|
+
try:
|
179
|
+
s.InvokeAction('DeletePortMapping', ['',p,'TCP'], '')
|
180
|
+
success = True
|
181
|
+
except:
|
182
|
+
pass
|
183
|
+
if DEBUG and not success:
|
184
|
+
print "COULDN'T OPEN "+str(p)
|
185
|
+
print_exc()
|
186
|
+
return success
|
187
|
+
|
188
|
+
|
189
|
+
class _UPnP: # master holding class
|
190
|
+
def __init__(self):
|
191
|
+
self.upnp1 = _UPnP1()
|
192
|
+
self.upnp2 = _UPnP2()
|
193
|
+
self.upnplist = (None, self.upnp1, self.upnp2)
|
194
|
+
self.upnp = None
|
195
|
+
self.local_ip = None
|
196
|
+
self.last_got_ip = -10e10
|
197
|
+
|
198
|
+
def get_ip(self):
|
199
|
+
if self.last_got_ip + EXPIRE_CACHE < clock():
|
200
|
+
local_ips = IP_List()
|
201
|
+
local_ips.set_intranet_addresses()
|
202
|
+
try:
|
203
|
+
for info in socket.getaddrinfo(socket.gethostname(),0,socket.AF_INET):
|
204
|
+
# exception if socket library isn't recent
|
205
|
+
self.local_ip = info[4][0]
|
206
|
+
if local_ips.includes(self.local_ip):
|
207
|
+
self.last_got_ip = clock()
|
208
|
+
if DEBUG:
|
209
|
+
print 'Local IP found: '+self.local_ip
|
210
|
+
break
|
211
|
+
else:
|
212
|
+
raise ValueError('couldn\'t find intranet IP')
|
213
|
+
except:
|
214
|
+
self.local_ip = None
|
215
|
+
if DEBUG:
|
216
|
+
print 'Error finding local IP'
|
217
|
+
print_exc()
|
218
|
+
return self.local_ip
|
219
|
+
|
220
|
+
def test(self, upnp_type):
|
221
|
+
if DEBUG:
|
222
|
+
print 'testing UPnP type '+str(upnp_type)
|
223
|
+
if not upnp_type or not _supported or self.get_ip() is None:
|
224
|
+
if DEBUG:
|
225
|
+
print 'not supported'
|
226
|
+
return 0
|
227
|
+
pythoncom.CoInitialize() # leave initialized
|
228
|
+
self.upnp = self.upnplist[upnp_type] # cache this
|
229
|
+
if self.upnp.test():
|
230
|
+
if DEBUG:
|
231
|
+
print 'ok'
|
232
|
+
return upnp_type
|
233
|
+
if DEBUG:
|
234
|
+
print 'tested bad'
|
235
|
+
return 0
|
236
|
+
|
237
|
+
def open(self, p):
|
238
|
+
assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
|
239
|
+
return self.upnp.open(self.get_ip(), p)
|
240
|
+
|
241
|
+
def close(self, p):
|
242
|
+
assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
|
243
|
+
return self.upnp.close(p)
|
244
|
+
|
245
|
+
def clean(self):
|
246
|
+
return self.upnp1.clean()
|
247
|
+
|
248
|
+
_upnp_ = _UPnP()
|
249
|
+
|
250
|
+
UPnP_test = _upnp_.test
|
251
|
+
UPnP_open_port = _upnp_.open
|
252
|
+
UPnP_close_port = _upnp_.close
|
253
|
+
UPnP_reset = _upnp_.clean
|
254
|
+
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# Written by Bill Bumgarner and Bram Cohen
|
2
|
+
# see LICENSE.txt for license information
|
3
|
+
|
4
|
+
from types import *
|
5
|
+
from cStringIO import StringIO
|
6
|
+
|
7
|
+
|
8
|
+
def splitLine(line, COLS=80, indent=10):
|
9
|
+
indent = " " * indent
|
10
|
+
width = COLS - (len(indent) + 1)
|
11
|
+
if indent and width < 15:
|
12
|
+
width = COLS - 2
|
13
|
+
indent = " "
|
14
|
+
s = StringIO()
|
15
|
+
i = 0
|
16
|
+
for word in line.split():
|
17
|
+
if i == 0:
|
18
|
+
s.write(indent+word)
|
19
|
+
i = len(word)
|
20
|
+
continue
|
21
|
+
if i + len(word) >= width:
|
22
|
+
s.write('\n'+indent+word)
|
23
|
+
i = len(word)
|
24
|
+
continue
|
25
|
+
s.write(' '+word)
|
26
|
+
i += len(word) + 1
|
27
|
+
return s.getvalue()
|
28
|
+
|
29
|
+
def formatDefinitions(options, COLS, presets = {}):
|
30
|
+
s = StringIO()
|
31
|
+
for (longname, default, doc) in options:
|
32
|
+
s.write('--' + longname + ' <arg>\n')
|
33
|
+
default = presets.get(longname, default)
|
34
|
+
if type(default) in (IntType, LongType):
|
35
|
+
try:
|
36
|
+
default = int(default)
|
37
|
+
except:
|
38
|
+
pass
|
39
|
+
if default is not None:
|
40
|
+
doc += ' (defaults to ' + repr(default) + ')'
|
41
|
+
s.write(splitLine(doc,COLS,10))
|
42
|
+
s.write('\n\n')
|
43
|
+
return s.getvalue()
|
44
|
+
|
45
|
+
|
46
|
+
def usage(str):
|
47
|
+
raise ValueError(str)
|
48
|
+
|
49
|
+
|
50
|
+
def defaultargs(options):
|
51
|
+
l = {}
|
52
|
+
for (longname, default, doc) in options:
|
53
|
+
if default is not None:
|
54
|
+
l[longname] = default
|
55
|
+
return l
|
56
|
+
|
57
|
+
|
58
|
+
def parseargs(argv, options, minargs = None, maxargs = None, presets = {}):
|
59
|
+
config = {}
|
60
|
+
longkeyed = {}
|
61
|
+
for option in options:
|
62
|
+
longname, default, doc = option
|
63
|
+
longkeyed[longname] = option
|
64
|
+
config[longname] = default
|
65
|
+
for longname in presets.keys(): # presets after defaults but before arguments
|
66
|
+
config[longname] = presets[longname]
|
67
|
+
options = []
|
68
|
+
args = []
|
69
|
+
pos = 0
|
70
|
+
while pos < len(argv):
|
71
|
+
if argv[pos][:2] != '--':
|
72
|
+
args.append(argv[pos])
|
73
|
+
pos += 1
|
74
|
+
else:
|
75
|
+
if pos == len(argv) - 1:
|
76
|
+
usage('parameter passed in at end with no value')
|
77
|
+
key, value = argv[pos][2:], argv[pos+1]
|
78
|
+
pos += 2
|
79
|
+
if not longkeyed.has_key(key):
|
80
|
+
usage('unknown key --' + key)
|
81
|
+
longname, default, doc = longkeyed[key]
|
82
|
+
try:
|
83
|
+
t = type(config[longname])
|
84
|
+
if t is NoneType or t is StringType:
|
85
|
+
config[longname] = value
|
86
|
+
elif t in (IntType, LongType):
|
87
|
+
config[longname] = long(value)
|
88
|
+
elif t is FloatType:
|
89
|
+
config[longname] = float(value)
|
90
|
+
else:
|
91
|
+
assert 0
|
92
|
+
except ValueError, e:
|
93
|
+
usage('wrong format of --%s - %s' % (key, str(e)))
|
94
|
+
for key, value in config.items():
|
95
|
+
if value is None:
|
96
|
+
usage("Option --%s is required." % key)
|
97
|
+
if minargs is not None and len(args) < minargs:
|
98
|
+
usage("Must supply at least %d args." % minargs)
|
99
|
+
if maxargs is not None and len(args) > maxargs:
|
100
|
+
usage("Too many args - %d max." % maxargs)
|
101
|
+
return (config, args)
|
102
|
+
|
103
|
+
def test_parseargs():
|
104
|
+
assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f'])
|
105
|
+
assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, [])
|
106
|
+
assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, [])
|
107
|
+
try:
|
108
|
+
parseargs([], [('a', 'x', '')])
|
109
|
+
except ValueError:
|
110
|
+
pass
|
111
|
+
try:
|
112
|
+
parseargs(['--a', 'x'], [])
|
113
|
+
except ValueError:
|
114
|
+
pass
|
115
|
+
try:
|
116
|
+
parseargs(['--a'], [('a', 'x', '')])
|
117
|
+
except ValueError:
|
118
|
+
pass
|
119
|
+
try:
|
120
|
+
parseargs([], [], 1, 2)
|
121
|
+
except ValueError:
|
122
|
+
pass
|
123
|
+
assert parseargs(['x'], [], 1, 2) == ({}, ['x'])
|
124
|
+
assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y'])
|
125
|
+
try:
|
126
|
+
parseargs(['x', 'y', 'z'], [], 1, 2)
|
127
|
+
except ValueError:
|
128
|
+
pass
|
129
|
+
try:
|
130
|
+
parseargs(['--a', '2.0'], [('a', 3, '')])
|
131
|
+
except ValueError:
|
132
|
+
pass
|
133
|
+
try:
|
134
|
+
parseargs(['--a', 'z'], [('a', 2.1, '')])
|
135
|
+
except ValueError:
|
136
|
+
pass
|
137
|
+
|
@@ -0,0 +1,150 @@
|
|
1
|
+
# Written by John Hoffman and Uoti Urpala
|
2
|
+
# see LICENSE.txt for license information
|
3
|
+
from bencode import bencode, bdecode
|
4
|
+
from BT1.btformats import check_info
|
5
|
+
from os.path import exists, isfile
|
6
|
+
from sha import sha
|
7
|
+
import sys, os
|
8
|
+
|
9
|
+
try:
|
10
|
+
True
|
11
|
+
except:
|
12
|
+
True = 1
|
13
|
+
False = 0
|
14
|
+
|
15
|
+
NOISY = False
|
16
|
+
|
17
|
+
def _errfunc(x):
|
18
|
+
print ":: "+x
|
19
|
+
|
20
|
+
def parsedir(directory, parsed, files, blocked,
|
21
|
+
exts = ['.torrent'], return_metainfo = False, errfunc = _errfunc):
|
22
|
+
if NOISY:
|
23
|
+
errfunc('checking dir')
|
24
|
+
dirs_to_check = [directory]
|
25
|
+
new_files = {}
|
26
|
+
new_blocked = {}
|
27
|
+
torrent_type = {}
|
28
|
+
while dirs_to_check: # first, recurse directories and gather torrents
|
29
|
+
directory = dirs_to_check.pop()
|
30
|
+
newtorrents = False
|
31
|
+
for f in os.listdir(directory):
|
32
|
+
newtorrent = None
|
33
|
+
for ext in exts:
|
34
|
+
if f.endswith(ext):
|
35
|
+
newtorrent = ext[1:]
|
36
|
+
break
|
37
|
+
if newtorrent:
|
38
|
+
newtorrents = True
|
39
|
+
p = os.path.join(directory, f)
|
40
|
+
new_files[p] = [(os.path.getmtime(p), os.path.getsize(p)), 0]
|
41
|
+
torrent_type[p] = newtorrent
|
42
|
+
if not newtorrents:
|
43
|
+
for f in os.listdir(directory):
|
44
|
+
p = os.path.join(directory, f)
|
45
|
+
if os.path.isdir(p):
|
46
|
+
dirs_to_check.append(p)
|
47
|
+
|
48
|
+
new_parsed = {}
|
49
|
+
to_add = []
|
50
|
+
added = {}
|
51
|
+
removed = {}
|
52
|
+
# files[path] = [(modification_time, size), hash], hash is 0 if the file
|
53
|
+
# has not been successfully parsed
|
54
|
+
for p,v in new_files.items(): # re-add old items and check for changes
|
55
|
+
oldval = files.get(p)
|
56
|
+
if not oldval: # new file
|
57
|
+
to_add.append(p)
|
58
|
+
continue
|
59
|
+
h = oldval[1]
|
60
|
+
if oldval[0] == v[0]: # file is unchanged from last parse
|
61
|
+
if h:
|
62
|
+
if blocked.has_key(p): # parseable + blocked means duplicate
|
63
|
+
to_add.append(p) # other duplicate may have gone away
|
64
|
+
else:
|
65
|
+
new_parsed[h] = parsed[h]
|
66
|
+
new_files[p] = oldval
|
67
|
+
else:
|
68
|
+
new_blocked[p] = 1 # same broken unparseable file
|
69
|
+
continue
|
70
|
+
if parsed.has_key(h) and not blocked.has_key(p):
|
71
|
+
if NOISY:
|
72
|
+
errfunc('removing '+p+' (will re-add)')
|
73
|
+
removed[h] = parsed[h]
|
74
|
+
to_add.append(p)
|
75
|
+
|
76
|
+
to_add.sort()
|
77
|
+
for p in to_add: # then, parse new and changed torrents
|
78
|
+
new_file = new_files[p]
|
79
|
+
v,h = new_file
|
80
|
+
if new_parsed.has_key(h): # duplicate
|
81
|
+
if not blocked.has_key(p) or files[p][0] != v:
|
82
|
+
errfunc('**warning** '+
|
83
|
+
p +' is a duplicate torrent for '+new_parsed[h]['path'])
|
84
|
+
new_blocked[p] = 1
|
85
|
+
continue
|
86
|
+
|
87
|
+
if NOISY:
|
88
|
+
errfunc('adding '+p)
|
89
|
+
try:
|
90
|
+
ff = open(p, 'rb')
|
91
|
+
d = bdecode(ff.read())
|
92
|
+
check_info(d['info'])
|
93
|
+
h = sha(bencode(d['info'])).digest()
|
94
|
+
new_file[1] = h
|
95
|
+
if new_parsed.has_key(h):
|
96
|
+
errfunc('**warning** '+
|
97
|
+
p +' is a duplicate torrent for '+new_parsed[h]['path'])
|
98
|
+
new_blocked[p] = 1
|
99
|
+
continue
|
100
|
+
|
101
|
+
a = {}
|
102
|
+
a['path'] = p
|
103
|
+
f = os.path.basename(p)
|
104
|
+
a['file'] = f
|
105
|
+
a['type'] = torrent_type[p]
|
106
|
+
i = d['info']
|
107
|
+
l = 0
|
108
|
+
nf = 0
|
109
|
+
if i.has_key('length'):
|
110
|
+
l = i.get('length',0)
|
111
|
+
nf = 1
|
112
|
+
elif i.has_key('files'):
|
113
|
+
for li in i['files']:
|
114
|
+
nf += 1
|
115
|
+
if li.has_key('length'):
|
116
|
+
l += li['length']
|
117
|
+
a['numfiles'] = nf
|
118
|
+
a['length'] = l
|
119
|
+
a['name'] = i.get('name', f)
|
120
|
+
def setkey(k, d = d, a = a):
|
121
|
+
if d.has_key(k):
|
122
|
+
a[k] = d[k]
|
123
|
+
setkey('failure reason')
|
124
|
+
setkey('warning message')
|
125
|
+
setkey('announce-list')
|
126
|
+
if return_metainfo:
|
127
|
+
a['metainfo'] = d
|
128
|
+
except:
|
129
|
+
errfunc('**warning** '+p+' has errors')
|
130
|
+
new_blocked[p] = 1
|
131
|
+
continue
|
132
|
+
try:
|
133
|
+
ff.close()
|
134
|
+
except:
|
135
|
+
pass
|
136
|
+
if NOISY:
|
137
|
+
errfunc('... successful')
|
138
|
+
new_parsed[h] = a
|
139
|
+
added[h] = a
|
140
|
+
|
141
|
+
for p,v in files.items(): # and finally, mark removed torrents
|
142
|
+
if not new_files.has_key(p) and not blocked.has_key(p):
|
143
|
+
if NOISY:
|
144
|
+
errfunc('removing '+p)
|
145
|
+
removed[v[1]] = parsed[v[1]]
|
146
|
+
|
147
|
+
if NOISY:
|
148
|
+
errfunc('done checking')
|
149
|
+
return (new_parsed, new_files, new_blocked, added, removed)
|
150
|
+
|