quartz_torrent 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/bin/quartztorrent_download +127 -0
- data/bin/quartztorrent_download_curses +841 -0
- data/bin/quartztorrent_magnet_from_torrent +32 -0
- data/bin/quartztorrent_show_info +62 -0
- data/lib/quartz_torrent.rb +2 -0
- data/lib/quartz_torrent/bitfield.rb +314 -0
- data/lib/quartz_torrent/blockstate.rb +354 -0
- data/lib/quartz_torrent/classifiedpeers.rb +95 -0
- data/lib/quartz_torrent/extension.rb +37 -0
- data/lib/quartz_torrent/filemanager.rb +543 -0
- data/lib/quartz_torrent/formatter.rb +92 -0
- data/lib/quartz_torrent/httptrackerclient.rb +121 -0
- data/lib/quartz_torrent/interruptiblesleep.rb +27 -0
- data/lib/quartz_torrent/log.rb +132 -0
- data/lib/quartz_torrent/magnet.rb +92 -0
- data/lib/quartz_torrent/memprofiler.rb +27 -0
- data/lib/quartz_torrent/metainfo.rb +221 -0
- data/lib/quartz_torrent/metainfopiecestate.rb +265 -0
- data/lib/quartz_torrent/peer.rb +145 -0
- data/lib/quartz_torrent/peerclient.rb +1627 -0
- data/lib/quartz_torrent/peerholder.rb +123 -0
- data/lib/quartz_torrent/peermanager.rb +170 -0
- data/lib/quartz_torrent/peermsg.rb +502 -0
- data/lib/quartz_torrent/peermsgserialization.rb +102 -0
- data/lib/quartz_torrent/piecemanagerrequestmetadata.rb +12 -0
- data/lib/quartz_torrent/rate.rb +58 -0
- data/lib/quartz_torrent/ratelimit.rb +48 -0
- data/lib/quartz_torrent/reactor.rb +949 -0
- data/lib/quartz_torrent/regionmap.rb +124 -0
- data/lib/quartz_torrent/semaphore.rb +43 -0
- data/lib/quartz_torrent/trackerclient.rb +271 -0
- data/lib/quartz_torrent/udptrackerclient.rb +70 -0
- data/lib/quartz_torrent/udptrackermsg.rb +250 -0
- data/lib/quartz_torrent/util.rb +100 -0
- metadata +195 -0
@@ -0,0 +1,95 @@
|
|
1
|
+
module QuartzTorrent
|
2
|
+
# This class is used to classift torrent peers by connection state.
|
3
|
+
class ClassifiedPeers
|
4
|
+
# Pass a list of Peer objects for a specific torrent
|
5
|
+
def initialize(peers)
|
6
|
+
# Classify peers by state
|
7
|
+
@disconnectedPeers = []
|
8
|
+
@handshakingPeers = []
|
9
|
+
@establishedPeers = []
|
10
|
+
@interestedPeers = []
|
11
|
+
@uninterestedPeers = []
|
12
|
+
@chokedInterestedPeers = []
|
13
|
+
@chokedUninterestedPeers = []
|
14
|
+
@unchokedInterestedPeers = []
|
15
|
+
@unchokedUninterestedPeers = []
|
16
|
+
@requestablePeers = []
|
17
|
+
|
18
|
+
peers.each do |peer|
|
19
|
+
|
20
|
+
# If we come across ourself, ignore it.
|
21
|
+
next if peer.isUs
|
22
|
+
|
23
|
+
if peer.state == :disconnected
|
24
|
+
@disconnectedPeers.push peer
|
25
|
+
elsif peer.state == :handshaking
|
26
|
+
@handshakingPeers.push peer
|
27
|
+
elsif peer.state == :established
|
28
|
+
@establishedPeers.push peer
|
29
|
+
if peer.peerChoked
|
30
|
+
if peer.peerInterested
|
31
|
+
@chokedInterestedPeers.push peer
|
32
|
+
@interestedPeers.push peer
|
33
|
+
else
|
34
|
+
@chokedUninterestedPeers.push peer
|
35
|
+
@uninterestedPeers.push peer
|
36
|
+
end
|
37
|
+
else
|
38
|
+
if peer.peerInterested
|
39
|
+
@unchokedInterestedPeers.push peer
|
40
|
+
@interestedPeers.push peer
|
41
|
+
else
|
42
|
+
@unchokedUninterestedPeers.push peer
|
43
|
+
@uninterestedPeers.push peer
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
if !peer.amChoked
|
48
|
+
@requestablePeers.push peer
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Peers that are disconnected. Either they have never been connected to, or
|
55
|
+
# they were connected to and have been disconnected.
|
56
|
+
attr_accessor :disconnectedPeers
|
57
|
+
|
58
|
+
# Peers still performing a handshake.
|
59
|
+
attr_accessor :handshakingPeers
|
60
|
+
|
61
|
+
# Peers with an established connection. This is the union of
|
62
|
+
# chokedInterestedPeers, chokedUninterestedPeers, and unchokedPeers.
|
63
|
+
attr_accessor :establishedPeers
|
64
|
+
|
65
|
+
# Peers that we have an established connection to, and are choked but are interested.
|
66
|
+
attr_accessor :chokedInterestedPeers
|
67
|
+
|
68
|
+
# Peers that we have an established connection to, and are choked and are not interested.
|
69
|
+
attr_accessor :chokedUninterestedPeers
|
70
|
+
|
71
|
+
# Peers that we have an established connection to, and are not choked and are interested.
|
72
|
+
attr_accessor :unchokedInterestedPeers
|
73
|
+
|
74
|
+
# Peers that we have an established connection to, and are not choked and are not interested.
|
75
|
+
attr_accessor :unchokedUninterestedPeers
|
76
|
+
|
77
|
+
# Peers that we have an established connection to, and are interested
|
78
|
+
attr_accessor :interestedPeers
|
79
|
+
|
80
|
+
# Peers that we have an established connection to, and are not interested
|
81
|
+
attr_accessor :uninterestedPeers
|
82
|
+
|
83
|
+
# Peers that we have an established connection to, that are not choking us, that we are interested in
|
84
|
+
attr_accessor :requestablePeers
|
85
|
+
|
86
|
+
def to_s
|
87
|
+
s = ""
|
88
|
+
s << " Choked and interested #{chokedInterestedPeers.inspect}"
|
89
|
+
s << " Choked and uninterested #{chokedUninterestedPeers.inspect}"
|
90
|
+
s << " Unchoked and interested #{unchokedInterestedPeers.inspect}"
|
91
|
+
s << " Unchoked and uninterested #{unchokedUninterestedPeers.inspect}"
|
92
|
+
s
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
require 'quartz_torrent/peermsg'
|
2
|
+
module QuartzTorrent
|
3
|
+
# This class contains constants that represent our numbering of the Bittorrent extensions we support.
|
4
|
+
# It also has some utility methods related to extensions.
|
5
|
+
class Extension
|
6
|
+
|
7
|
+
MetadataExtensionId = 1
|
8
|
+
|
9
|
+
# Parameter info should be the metadata info struct. It is used to determine the size to send
|
10
|
+
# when negotiating the metadata extension.
|
11
|
+
def self.createExtendedHandshake(info)
|
12
|
+
msg = ExtendedHandshake.new
|
13
|
+
|
14
|
+
extensionIds = {
|
15
|
+
'ut_metadata' => MetadataExtensionId
|
16
|
+
}
|
17
|
+
|
18
|
+
msg.dict['m'] = extensionIds
|
19
|
+
|
20
|
+
if info
|
21
|
+
msg.dict['metadata_size'] = info.bencode.length
|
22
|
+
else
|
23
|
+
msg.dict['metadata_size'] = 0
|
24
|
+
end
|
25
|
+
|
26
|
+
msg
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.peerMsgClassForExtensionName(info)
|
30
|
+
if info == 'ut_metadata'
|
31
|
+
ExtendedMetaInfo
|
32
|
+
else
|
33
|
+
nil
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,543 @@
|
|
1
|
+
require 'digest/sha1'
|
2
|
+
require 'fileutils'
|
3
|
+
require 'thread'
|
4
|
+
require 'quartz_torrent/regionmap'
|
5
|
+
require 'quartz_torrent/bitfield'
|
6
|
+
require 'quartz_torrent/util'
|
7
|
+
require 'quartz_torrent/semaphore'
|
8
|
+
|
9
|
+
module QuartzTorrent
|
10
|
+
class RequestedBlock
|
11
|
+
attr_accessor :index
|
12
|
+
attr_accessor :time
|
13
|
+
end
|
14
|
+
|
15
|
+
# Represents a piece as it is being downloaded.
|
16
|
+
class IncompletePiece
|
17
|
+
# Which blocks have we downloaded of this piece
|
18
|
+
attr_accessor :completeBlockBitfield
|
19
|
+
|
20
|
+
# Which blocks have been requested. List of RequestedBlock objects.
|
21
|
+
attr_accessor :requests
|
22
|
+
|
23
|
+
# Piece index inside the torrent
|
24
|
+
attr_accessor :index
|
25
|
+
|
26
|
+
# Do we have all pieces?
|
27
|
+
def complete?
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
# Represents a unique region of a file: a filename, offset, and length.
|
32
|
+
class FileRegion
|
33
|
+
def initialize(path = nil, offset = nil, length = nil)
|
34
|
+
@path = path
|
35
|
+
@offset = offset
|
36
|
+
@length = length
|
37
|
+
end
|
38
|
+
|
39
|
+
attr_accessor :path
|
40
|
+
attr_accessor :offset
|
41
|
+
attr_accessor :length
|
42
|
+
end
|
43
|
+
|
44
|
+
# Maps pieces to sections of files.
|
45
|
+
class PieceMapper
|
46
|
+
# Create a new PieceMapper that will map to files inside 'baseDirectory'. Parameter 'torrinfo' should
|
47
|
+
# be a Metainfo::Info object (the info part of the metainfo).
|
48
|
+
def initialize(baseDirectory, torrinfo)
|
49
|
+
@torrinfo = torrinfo
|
50
|
+
@pieceSize = torrinfo.pieceLen
|
51
|
+
@logger = LogManager.getLogger("piecemapper")
|
52
|
+
|
53
|
+
@fileRegionMap = RegionMap.new
|
54
|
+
offset = 0
|
55
|
+
@logger.debug "Map (offset to path):"
|
56
|
+
torrinfo.files.each do |file|
|
57
|
+
offset += file.length
|
58
|
+
path = baseDirectory + File::SEPARATOR + file.path
|
59
|
+
@fileRegionMap.add offset-1, path
|
60
|
+
@logger.debug " #{offset-1}\t#{path}"
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
# Return a list of FileRegion objects. The FileRegion offsets specify
|
65
|
+
# in order which regions of files the piece covers.
|
66
|
+
def findPiece(pieceIndex)
|
67
|
+
leftOffset = @pieceSize*pieceIndex
|
68
|
+
rightOffset = leftOffset + @pieceSize-1
|
69
|
+
|
70
|
+
findPart(leftOffset, rightOffset)
|
71
|
+
end
|
72
|
+
|
73
|
+
# Return a list of FileRegion objects. The FileRegion offsets specify
|
74
|
+
# in order which regions of files the piece covers.
|
75
|
+
def findBlock(pieceIndex, offset, length)
|
76
|
+
leftOffset = @pieceSize*pieceIndex + offset
|
77
|
+
rightOffset = leftOffset + length-1
|
78
|
+
|
79
|
+
findPart(leftOffset, rightOffset)
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
def findPart(leftOffset, rightOffset)
|
84
|
+
# [index, value, left, right, offset]
|
85
|
+
leftData = @fileRegionMap.find(leftOffset)
|
86
|
+
rightData = @fileRegionMap.find(rightOffset)
|
87
|
+
if rightData.nil?
|
88
|
+
# Right end is past the end of the rightmost limit. Scale it back.
|
89
|
+
rightData = @fileRegionMap.last
|
90
|
+
rightData.push rightData[3]-rightData[2]
|
91
|
+
end
|
92
|
+
raise "Offset #{leftOffset} is out of range" if leftData.nil?
|
93
|
+
leftIndex = leftData[0]
|
94
|
+
rightIndex = rightData[0]
|
95
|
+
if leftIndex == rightIndex
|
96
|
+
return [FileRegion.new(leftData[1], leftData[4], rightData[4]-leftData[4]+1)]
|
97
|
+
end
|
98
|
+
|
99
|
+
result = []
|
100
|
+
(leftIndex..rightIndex).each do |i|
|
101
|
+
if i == leftIndex
|
102
|
+
result.push FileRegion.new(leftData[1], leftData[4], leftData[3]-leftData[4]-leftData[2]+1)
|
103
|
+
elsif i == rightIndex
|
104
|
+
result.push FileRegion.new(rightData[1], 0, rightData[4]+1)
|
105
|
+
else
|
106
|
+
value, left, right = @fileRegionMap[i]
|
107
|
+
result.push FileRegion.new(value, 0, right-left+1)
|
108
|
+
end
|
109
|
+
end
|
110
|
+
result
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
# Basic IOManager that isn't used by a reactor.
|
115
|
+
class IOManager
|
116
|
+
def initialize
|
117
|
+
@io = {}
|
118
|
+
end
|
119
|
+
|
120
|
+
def get(path)
|
121
|
+
@io[path]
|
122
|
+
end
|
123
|
+
|
124
|
+
def open(path)
|
125
|
+
# Open the file for read/write.
|
126
|
+
# If the file exists, open as r+ so it is not truncated.
|
127
|
+
# Otherwise open as w+
|
128
|
+
if File.exists?(path)
|
129
|
+
io = File.open(path, "rb+")
|
130
|
+
else
|
131
|
+
io = File.open(path, "wb+")
|
132
|
+
end
|
133
|
+
@io[path] = io
|
134
|
+
io
|
135
|
+
end
|
136
|
+
|
137
|
+
def flush
|
138
|
+
@io.each do |k,v|
|
139
|
+
v.flush
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
# Can read and write pieces and blocks of a torrent.
|
145
|
+
class PieceIO
|
146
|
+
# Create a new PieceIO that will map to files inside 'baseDirectory'. Parameter 'torrinfo' should
|
147
|
+
# be a Metainfo::Info object (the info part of the metainfo).
|
148
|
+
def initialize(baseDirectory, torrinfo, ioManager = IOManager.new)
|
149
|
+
@baseDirectory = baseDirectory
|
150
|
+
@torrinfo = torrinfo
|
151
|
+
@pieceMapper = PieceMapper.new(baseDirectory, torrinfo)
|
152
|
+
@ioManager = ioManager
|
153
|
+
@logger = LogManager.getLogger("pieceio")
|
154
|
+
@torrentDataLength = torrinfo.dataLength
|
155
|
+
end
|
156
|
+
|
157
|
+
# Get the overall length of the torrent data
|
158
|
+
attr_reader :torrentDataLength
|
159
|
+
|
160
|
+
# Write a block to an in-progress piece. The block is written to
|
161
|
+
# piece 'peiceIndex', at offset 'offset'. The block data is in block.
|
162
|
+
# Throws exceptions on failure.
|
163
|
+
def writeBlock(pieceIndex, offset, block)
|
164
|
+
regions = @pieceMapper.findBlock(pieceIndex, offset, block.length)
|
165
|
+
indexInBlock = 0
|
166
|
+
regions.each do |region|
|
167
|
+
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
|
168
|
+
# are not then this is a real IO.
|
169
|
+
io = @ioManager.get(region.path)
|
170
|
+
if ! io
|
171
|
+
# No IO for this file.
|
172
|
+
raise "This process doesn't have write permission for the file #{region.path}" if File.exists?(region.path) && ! File.writable?(region.path)
|
173
|
+
|
174
|
+
# Ensure parent directories exist.
|
175
|
+
dir = File.dirname region.path
|
176
|
+
FileUtils.mkdir_p dir if ! File.directory?(dir)
|
177
|
+
|
178
|
+
begin
|
179
|
+
io = @ioManager.open(region.path)
|
180
|
+
rescue
|
181
|
+
@logger.error "Opening file #{region.path} failed: #{$!}"
|
182
|
+
raise "Opening file #{region.path} failed"
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
io.seek region.offset, IO::SEEK_SET
|
187
|
+
begin
|
188
|
+
io.write(block[indexInBlock, region.length])
|
189
|
+
indexInBlock += region.length
|
190
|
+
rescue
|
191
|
+
# Error when writing...
|
192
|
+
@logger.error "Writing block to file #{region.path} failed: #{$!}"
|
193
|
+
piece = nil
|
194
|
+
break
|
195
|
+
end
|
196
|
+
|
197
|
+
break if indexInBlock >= block.length
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
# Read a block from a completed piece. Returns nil if the block doesn't exist yet. Throws exceptions
|
202
|
+
# on error (for example, opening a file failed)
|
203
|
+
def readBlock(pieceIndex, offset, length)
|
204
|
+
readRegions @pieceMapper.findBlock(pieceIndex, offset, length)
|
205
|
+
end
|
206
|
+
|
207
|
+
# Read a piece. Returns nil if the piece is not yet present.
|
208
|
+
# NOTE: this method expects that if the ioManager is a reactor iomanager,
|
209
|
+
# that the io was set with errorHandler=false so that we get the EOF errors.
|
210
|
+
def readPiece(pieceIndex)
|
211
|
+
readRegions @pieceMapper.findPiece(pieceIndex)
|
212
|
+
end
|
213
|
+
|
214
|
+
def flush
|
215
|
+
@ioManager.flush
|
216
|
+
end
|
217
|
+
|
218
|
+
private
|
219
|
+
# Pass an ordered list of FileRegions to load.
|
220
|
+
def readRegions(regions)
|
221
|
+
piece = ""
|
222
|
+
regions.each do |region|
|
223
|
+
# Get the IO for the file with path 'path'. If we are being used in a reactor, this is the IO facade. If we
|
224
|
+
# are not then this is a real IO.
|
225
|
+
io = @ioManager.get(region.path)
|
226
|
+
if ! io
|
227
|
+
# No IO for this file.
|
228
|
+
if ! File.exists?(region.path)
|
229
|
+
# This file hasn't been created yet by having blocks written to it.
|
230
|
+
piece = nil
|
231
|
+
break
|
232
|
+
end
|
233
|
+
|
234
|
+
raise "This process doesn't have read permission for the file #{region.path}" if ! File.readable?(region.path)
|
235
|
+
|
236
|
+
begin
|
237
|
+
io = @ioManager.open(region.path)
|
238
|
+
rescue
|
239
|
+
@logger.error "Opening file #{region.path} failed: #{$!}"
|
240
|
+
raise "Opening file #{region.path} failed"
|
241
|
+
end
|
242
|
+
end
|
243
|
+
io.seek region.offset, IO::SEEK_SET
|
244
|
+
begin
|
245
|
+
piece << io.read(region.length)
|
246
|
+
rescue
|
247
|
+
# Error when reading. Likely EOF, meaning this peice isn't all there yet.
|
248
|
+
piece = nil
|
249
|
+
break
|
250
|
+
end
|
251
|
+
end
|
252
|
+
piece
|
253
|
+
end
|
254
|
+
end
|
255
|
+
|
256
|
+
# A class that spawns a thread for performing PieceIO operations asynchronously. This class is what is used to
|
257
|
+
# read and write blocks of a torrent.
|
258
|
+
class PieceManager
|
259
|
+
# The result of an asynchronous operation preformed by the PieceManager.
|
260
|
+
class Result
|
261
|
+
def initialize(requestId, success, data, error = nil)
|
262
|
+
@success = success
|
263
|
+
@error = error
|
264
|
+
@data = data
|
265
|
+
@requestId = requestId
|
266
|
+
end
|
267
|
+
|
268
|
+
# The ID of the request that this result is for. This is the same
|
269
|
+
# as the id returned when making the request.
|
270
|
+
attr_accessor :requestId
|
271
|
+
|
272
|
+
# The error message if the operation was not successful
|
273
|
+
attr_accessor :error
|
274
|
+
|
275
|
+
# Any data returned in the result
|
276
|
+
attr_accessor :data
|
277
|
+
|
278
|
+
# Returns true if the operation was succesful
|
279
|
+
def successful?
|
280
|
+
@success
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
# Create a new PieceManager that will map to files inside 'baseDirectory'. Parameter 'torrinfo' should
|
285
|
+
# be a Metainfo::Info object (the info part of the metainfo).
|
286
|
+
# Parameter 'alertCallback' should be a Proc. It will be called when an operation is complete. The
|
287
|
+
# alerted code can then retrieve the events from the completed queue.
|
288
|
+
# This callback will be called from a different thread.
|
289
|
+
def initialize(baseDirectory, torrinfo, alertCallback = nil)
|
290
|
+
@alertCallback = alertCallback
|
291
|
+
@mutex = Mutex.new
|
292
|
+
@results = []
|
293
|
+
@requests = []
|
294
|
+
# The progress of requests as they are being serviced, keyed by request id.
|
295
|
+
@requestProgress = {}
|
296
|
+
@progressMutex = Mutex.new
|
297
|
+
@requestsSemaphore = Semaphore.new
|
298
|
+
@resultsSemaphore = Semaphore.new
|
299
|
+
@baseDirectory = baseDirectory
|
300
|
+
@torrinfo = torrinfo
|
301
|
+
@pieceIO = PieceIO.new(baseDirectory, torrinfo)
|
302
|
+
@requestId = 0
|
303
|
+
@logger = LogManager.getLogger("piecemanager")
|
304
|
+
@torrentDataLength = torrinfo.dataLength
|
305
|
+
@startedCondition = ConditionVariable.new
|
306
|
+
@startedMutex = Mutex.new
|
307
|
+
@state = :before_start
|
308
|
+
startThread
|
309
|
+
end
|
310
|
+
|
311
|
+
attr_reader :torrentDataLength
|
312
|
+
|
313
|
+
# Read a block from the torrent asynchronously. When the operation
|
314
|
+
# is complete the result is stored in the 'results' list.
|
315
|
+
# This method returns an id that can be used to match the response
|
316
|
+
# to the request.
|
317
|
+
# The readBlock and writeBlock methods are not threadsafe with respect to callers;
|
318
|
+
# they shouldn't be called by multiple threads concurrently.
|
319
|
+
def readBlock(pieceIndex, offset, length)
|
320
|
+
id = returnAndIncrRequestId
|
321
|
+
return id if @state == :after_stop
|
322
|
+
@requests.push [id, :read_block, pieceIndex, offset, length]
|
323
|
+
@requestsSemaphore.signal
|
324
|
+
id
|
325
|
+
end
|
326
|
+
|
327
|
+
# Write a block to the torrent asynchronously.
|
328
|
+
def writeBlock(pieceIndex, offset, block)
|
329
|
+
id = returnAndIncrRequestId
|
330
|
+
return id if @state == :after_stop
|
331
|
+
@requests.push [id, :write_block, pieceIndex, offset, block]
|
332
|
+
@requestsSemaphore.signal
|
333
|
+
id
|
334
|
+
end
|
335
|
+
|
336
|
+
# Read a block of the torrent asynchronously.
|
337
|
+
def readPiece(pieceIndex)
|
338
|
+
id = returnAndIncrRequestId
|
339
|
+
return id if @state == :after_stop
|
340
|
+
@requests.push [id, :read_piece, pieceIndex]
|
341
|
+
@requestsSemaphore.signal
|
342
|
+
id
|
343
|
+
end
|
344
|
+
|
345
|
+
# This is meant to be called when the torrent is first loaded
|
346
|
+
# to check what pieces we've already downloaded.
|
347
|
+
# The data property of the result for this call is set to a Bitfield representing
|
348
|
+
# the complete pieces.
|
349
|
+
def findExistingPieces
|
350
|
+
id = returnAndIncrRequestId
|
351
|
+
return id if @state == :after_stop
|
352
|
+
@requests.push [id, :find_existing]
|
353
|
+
@requestsSemaphore.signal
|
354
|
+
id
|
355
|
+
end
|
356
|
+
|
357
|
+
# Validate that the hash of the downloaded piece matches the hash from the metainfo.
|
358
|
+
# The result is successful? if the hash matches, false otherwise. The data of the result is
|
359
|
+
# set to the piece index.
|
360
|
+
def checkPieceHash(pieceIndex)
|
361
|
+
id = returnAndIncrRequestId
|
362
|
+
return id if @state == :after_stop
|
363
|
+
@requests.push [id, :hash_piece, pieceIndex]
|
364
|
+
@requestsSemaphore.signal
|
365
|
+
id
|
366
|
+
end
|
367
|
+
|
368
|
+
# Flush to disk. The result for this operation is always successful.
|
369
|
+
def flush()
|
370
|
+
id = returnAndIncrRequestId
|
371
|
+
return id if @state == :after_stop
|
372
|
+
@requests.push [id, :flush]
|
373
|
+
@requestsSemaphore.signal
|
374
|
+
id
|
375
|
+
end
|
376
|
+
|
377
|
+
# Result retrieval. Returns the next result, or nil if none are ready.
|
378
|
+
# The results that are returned are PieceIOWorker::Result objects.
|
379
|
+
# For readBlock operations the data property of the result object contains
|
380
|
+
# the block.
|
381
|
+
def nextResult
|
382
|
+
result = nil
|
383
|
+
@mutex.synchronize do
|
384
|
+
result = @results.shift
|
385
|
+
@progressMutex.synchronize{ @requestProgress.delete result.requestId } if result
|
386
|
+
end
|
387
|
+
result
|
388
|
+
end
|
389
|
+
|
390
|
+
# Get the progress of the specified request as an integer between 0 and 100.
|
391
|
+
# Currently, only the findExistingPieces operation registers progress; other operations
|
392
|
+
# just return nil for this.
|
393
|
+
def progress(requestId)
|
394
|
+
result = nil
|
395
|
+
@progressMutex.synchronize{ result = @requestProgress[requestId] }
|
396
|
+
result
|
397
|
+
end
|
398
|
+
|
399
|
+
# Wait until the next result is ready. If this method is used it must always
|
400
|
+
# be called before nextResult. This is mostly useful for testing.
|
401
|
+
def wait
|
402
|
+
waitUntilStarted
|
403
|
+
|
404
|
+
@resultsSemaphore.wait
|
405
|
+
end
|
406
|
+
|
407
|
+
# Check if there are results ready. This method will return immediately
|
408
|
+
# without blocking.
|
409
|
+
def hasResults?
|
410
|
+
! @results.empty?
|
411
|
+
end
|
412
|
+
|
413
|
+
# Stop the PieceManager.
|
414
|
+
def stop
|
415
|
+
waitUntilStarted
|
416
|
+
@state = :after_stop
|
417
|
+
id = returnAndIncrRequestId
|
418
|
+
@requests.push [id, :stop]
|
419
|
+
@requestsSemaphore.signal
|
420
|
+
end
|
421
|
+
|
422
|
+
private
|
423
|
+
def startThread
|
424
|
+
@thread = Thread.new do
|
425
|
+
@startedMutex.synchronize do
|
426
|
+
@state = :running
|
427
|
+
@startedCondition.broadcast
|
428
|
+
end
|
429
|
+
QuartzTorrent.initThread("piecemanager")
|
430
|
+
while @state == :running
|
431
|
+
begin
|
432
|
+
@requestsSemaphore.wait
|
433
|
+
|
434
|
+
if @requests.size > 1000
|
435
|
+
@logger.warn "Request queue has grown past 1000 entries; we are io bound"
|
436
|
+
end
|
437
|
+
|
438
|
+
result = nil
|
439
|
+
req = @requests.shift
|
440
|
+
@progressMutex.synchronize{ @requestProgress[req[0]] = 0 }
|
441
|
+
begin
|
442
|
+
if req[1] == :read_block
|
443
|
+
result = @pieceIO.readBlock req[2], req[3], req[4]
|
444
|
+
elsif req[1] == :write_block
|
445
|
+
@pieceIO.writeBlock req[2], req[3], req[4]
|
446
|
+
elsif req[1] == :read_piece
|
447
|
+
result = @pieceIO.readPiece req[2]
|
448
|
+
elsif req[1] == :find_existing
|
449
|
+
result = findExistingPiecesInternal(req[0])
|
450
|
+
elsif req[1] == :hash_piece
|
451
|
+
result = hashPiece req[2]
|
452
|
+
result = Result.new(req[0], result, req[2])
|
453
|
+
elsif req[1] == :flush
|
454
|
+
@pieceIO.flush
|
455
|
+
result = true
|
456
|
+
elsif req[1] == :stop
|
457
|
+
result = true
|
458
|
+
end
|
459
|
+
result = Result.new(req[0], true, result) if ! result.is_a?(Result)
|
460
|
+
rescue
|
461
|
+
@logger.error "Exception when processing request: #{$!}"
|
462
|
+
@logger.error "#{$!.backtrace.join("\n")}"
|
463
|
+
result = Result.new(req[0], false, nil, $!)
|
464
|
+
end
|
465
|
+
@progressMutex.synchronize{ @requestProgress[req[0]] = 100 }
|
466
|
+
|
467
|
+
@mutex.synchronize do
|
468
|
+
@results.push result
|
469
|
+
end
|
470
|
+
@resultsSemaphore.signal
|
471
|
+
|
472
|
+
@alertCallback.call() if @alertCallback
|
473
|
+
rescue
|
474
|
+
@logger.error "Unexpected exception in PieceManager worker thread: #{$!}"
|
475
|
+
@logger.error "#{$!.backtrace.join("\n")}"
|
476
|
+
end
|
477
|
+
end
|
478
|
+
end
|
479
|
+
end
|
480
|
+
|
481
|
+
def waitUntilStarted
|
482
|
+
if @state == :before_start
|
483
|
+
@startedMutex.synchronize{ @startedCondition.wait(@startedMutex) if @state == :before_start }
|
484
|
+
end
|
485
|
+
end
|
486
|
+
|
487
|
+
def returnAndIncrRequestId
|
488
|
+
result = @requestId
|
489
|
+
@requestId += 1
|
490
|
+
# Wrap?
|
491
|
+
@requestId = 0 if @requestId > 0xffffffff
|
492
|
+
result
|
493
|
+
end
|
494
|
+
|
495
|
+
def findExistingPiecesInternal(requestId)
|
496
|
+
completePieceBitfield = Bitfield.new(@torrinfo.pieces.length)
|
497
|
+
raise "Base directory #{@baseDirectory} doesn't exist" if ! File.directory?(@baseDirectory)
|
498
|
+
raise "Base directory #{@baseDirectory} is not writable" if ! File.writable?(@baseDirectory)
|
499
|
+
raise "Base directory #{@baseDirectory} is not readable" if ! File.readable?(@baseDirectory)
|
500
|
+
piecesHashes = @torrinfo.pieces
|
501
|
+
index = 0
|
502
|
+
piecesHashes.each do |hash|
|
503
|
+
@logger.debug "Checking piece #{index+1}/#{piecesHashes.length}"
|
504
|
+
piece = @pieceIO.readPiece(index)
|
505
|
+
if piece
|
506
|
+
# Check hash
|
507
|
+
calc = Digest::SHA1.digest(piece)
|
508
|
+
if calc != hash
|
509
|
+
@logger.debug "Piece #{index} calculated hash #{QuartzTorrent.bytesToHex(calc)} doesn't match tracker hash #{QuartzTorrent.bytesToHex(hash)}"
|
510
|
+
else
|
511
|
+
completePieceBitfield.set(index)
|
512
|
+
@logger.debug "Piece #{index+1}/#{piecesHashes.length} is complete."
|
513
|
+
end
|
514
|
+
else
|
515
|
+
@logger.debug "Piece #{index+1}/#{piecesHashes.length} doesn't exist"
|
516
|
+
end
|
517
|
+
index += 1
|
518
|
+
@progressMutex.synchronize{ @requestProgress[requestId] = (index+1)*100/piecesHashes.length }
|
519
|
+
end
|
520
|
+
completePieceBitfield
|
521
|
+
end
|
522
|
+
|
523
|
+
def hashPiece(pieceIndex)
|
524
|
+
result = false
|
525
|
+
piece = @pieceIO.readPiece pieceIndex
|
526
|
+
if piece
|
527
|
+
# Check hash
|
528
|
+
piecesHashes = @torrinfo.pieces
|
529
|
+
hash = piecesHashes[pieceIndex]
|
530
|
+
calc = Digest::SHA1.digest(piece)
|
531
|
+
if calc != hash
|
532
|
+
@logger.info "Piece #{pieceIndex} calculated hash #{QuartzTorrent.bytesToHex(calc)} doesn't match tracker hash #{QuartzTorrent.bytesToHex(hash)}"
|
533
|
+
else
|
534
|
+
@logger.debug "Piece #{pieceIndex+1}/#{piecesHashes.length} hash is correct."
|
535
|
+
result = true
|
536
|
+
end
|
537
|
+
else
|
538
|
+
@logger.debug "Piece #{pieceIndex+1}/#{piecesHashes.length} doesn't exist"
|
539
|
+
end
|
540
|
+
result
|
541
|
+
end
|
542
|
+
end
|
543
|
+
end
|