rubytorrent 0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/COPYING +340 -0
- data/README +21 -0
- data/ReleaseNotes.txt +25 -0
- data/doc/api.txt +289 -0
- data/doc/design.txt +59 -0
- data/dump-metainfo.rb +55 -0
- data/dump-peers.rb +45 -0
- data/lib/rubytorrent.rb +94 -0
- data/lib/rubytorrent/bencoding.rb +174 -0
- data/lib/rubytorrent/controller.rb +610 -0
- data/lib/rubytorrent/message.rb +128 -0
- data/lib/rubytorrent/metainfo.rb +214 -0
- data/lib/rubytorrent/package.rb +595 -0
- data/lib/rubytorrent/peer.rb +536 -0
- data/lib/rubytorrent/server.rb +166 -0
- data/lib/rubytorrent/tracker.rb +225 -0
- data/lib/rubytorrent/typedstruct.rb +132 -0
- data/lib/rubytorrent/util.rb +186 -0
- data/make-metainfo.rb +211 -0
- data/rtpeer-ncurses.rb +340 -0
- data/rtpeer.rb +125 -0
- metadata +78 -0
data/doc/design.txt
ADDED
@@ -0,0 +1,59 @@
|
|
1
|
+
RubyTorrent Design
|
2
|
+
------------------
|
3
|
+
|
4
|
+
This is pretty sketchy at the moment but it might help if you want to
|
5
|
+
do some hacking.
|
6
|
+
|
7
|
+
|
8
|
+
+---------+ +----------------+
|
9
|
+
disk <::::::::>| Package | /-| PeerConnection |<=== network ===> peer
|
10
|
+
+---------+ | +----------------+
|
11
|
+
| |
|
12
|
+
+---------+ +------------+ / +----------------+
|
13
|
+
| Tracker |--| Controller |----| PeerConnection |<=== network ===> peer
|
14
|
+
+---------+ +------------+ \ +----------------+
|
15
|
+
/ | .
|
16
|
+
+--------+ / | .
|
17
|
+
| Server |- | .
|
18
|
+
+--------+ \
|
19
|
+
\ +---------+
|
20
|
+
| | Package |<:::::::> disk
|
21
|
+
| +---------+
|
22
|
+
| |
|
23
|
+
+---------+ \+------------+ +----------------+
|
24
|
+
| Tracker |--| Controller |----| PeerConnection |<=== network ===> peer
|
25
|
+
+---------+ +------------+ \ +----------------+
|
26
|
+
|
|
27
|
+
| +----------------+
|
28
|
+
|--| PeerConnection |<=== network ===> peer
|
29
|
+
| +----------------+
|
30
|
+
.
|
31
|
+
.
|
32
|
+
.
|
33
|
+
|
34
|
+
|
35
|
+
Each .torrent download is associated with a Package. A Package is
|
36
|
+
composed of several Pieces, each corresponding to a BitTorrent
|
37
|
+
piece. A Package provides simple aggregate operations over all the
|
38
|
+
Pieces. Each Piece handles writing to and reading from disk (across
|
39
|
+
potentially multiple file pointers), as well as dividing its data into
|
40
|
+
one or more Blocks. Each Block is an in-memory section of a Piece and
|
41
|
+
corresponds to the BitTorrent piece, transferrable across the network.
|
42
|
+
|
43
|
+
One Server coordinates all BitTorrent downloads. It maintains several
|
44
|
+
Controllers, one per .torrent download. The server handles all
|
45
|
+
handshaking. It accepts incoming connections, shunting them to the
|
46
|
+
appropriate Controller, and creates outgoing ones at the Controllers'
|
47
|
+
behest. Each connection to a peer is maintained by a PeerConnection,
|
48
|
+
which keeps track of the peer's state and the connection state.
|
49
|
+
PeerConnections get empty Blocks from their Controller and send
|
50
|
+
requests for them across the wire, and, upon receiving requests from
|
51
|
+
the peer, get full Blocks from the Package and transmit them back.
|
52
|
+
|
53
|
+
The Controller also keeps a Tracker object, which it uses to
|
54
|
+
communicate with the tracker.
|
55
|
+
|
56
|
+
PeerConnections are completely reactive, and are tightly integrated
|
57
|
+
with their Controller. They rely on the Controller's heartbeat thread
|
58
|
+
to trigger any time-dependent events, and also for propagating any
|
59
|
+
messages to other peers.
|
data/dump-metainfo.rb
ADDED
@@ -0,0 +1,55 @@
|
|
1
|
+
## dump-metainfo.rb -- command-line .torrent dumper
|
2
|
+
## Copyright 2004 William Morgan.
|
3
|
+
##
|
4
|
+
## This file is part of RubyTorrent. RubyTorrent is free software;
|
5
|
+
## you can redistribute it and/or modify it under the terms of version
|
6
|
+
## 2 of the GNU General Public License as published by the Free
|
7
|
+
## Software Foundation.
|
8
|
+
##
|
9
|
+
## RubyTorrent is distributed in the hope that it will be useful, but
|
10
|
+
## WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
+
## General Public License (in the file COPYING) for more details.
|
13
|
+
|
14
|
+
require 'rubytorrent'
|
15
|
+
|
16
|
+
def dump_metainfoinfo(mii)
|
17
|
+
if mii.single?
|
18
|
+
<<EOS
|
19
|
+
length: #{mii.length / 1024}kb
|
20
|
+
filename: #{mii.name}
|
21
|
+
EOS
|
22
|
+
else
|
23
|
+
mii.files.map do |f|
|
24
|
+
<<EOS
|
25
|
+
- filename: #{File.join(mii.name, f.path)}
|
26
|
+
length: #{f.length}
|
27
|
+
EOS
|
28
|
+
end.join + "\n"
|
29
|
+
end + <<EOS
|
30
|
+
piece length: #{mii.piece_length / 1024}kb
|
31
|
+
pieces: #{mii.pieces.length / 20}
|
32
|
+
EOS
|
33
|
+
end
|
34
|
+
|
35
|
+
def dump_metainfo(mi)
|
36
|
+
<<EOS
|
37
|
+
#{dump_metainfoinfo(mi.info).chomp}
|
38
|
+
announce: #{mi.announce}
|
39
|
+
announce-list: #{(mi.announce_list.nil? ? "<not specified>" : mi.announce_list.map { |x| x.join(', ') }.join('; '))}
|
40
|
+
creation date: #{mi.creation_date || "<not specified>"}
|
41
|
+
created by: #{mi.created_by || "<not specified>"}
|
42
|
+
comment: #{mi.comment || "<not specified>"}
|
43
|
+
EOS
|
44
|
+
end
|
45
|
+
|
46
|
+
if ARGV.length == 1
|
47
|
+
fn = ARGV[0]
|
48
|
+
begin
|
49
|
+
puts dump_metainfo(RubyTorrent::MetaInfo.from_location(fn))
|
50
|
+
rescue RubyTorrent::MetaInfoFormatError, RubyTorrent::BEncodingError => e
|
51
|
+
puts "Can't parse #{fn}: maybe not a .torrent file?"
|
52
|
+
end
|
53
|
+
else
|
54
|
+
puts "Usage: dump-metainfo <filename>"
|
55
|
+
end
|
data/dump-peers.rb
ADDED
@@ -0,0 +1,45 @@
|
|
1
|
+
## dump-peers.rb -- command-line peer lister
|
2
|
+
## Copyright 2004 William Morgan.
|
3
|
+
##
|
4
|
+
## This file is part of RubyTorrent. RubyTorrent is free software;
|
5
|
+
## you can redistribute it and/or modify it under the terms of version
|
6
|
+
## 2 of the GNU General Public License as published by the Free
|
7
|
+
## Software Foundation.
|
8
|
+
##
|
9
|
+
## RubyTorrent is distributed in the hope that it will be useful, but
|
10
|
+
## WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
+
## General Public License (in the file COPYING) for more details.
|
13
|
+
|
14
|
+
require "rubytorrent"
|
15
|
+
|
16
|
+
def die(x); $stderr << "#{x}\n" && exit(-1); end
|
17
|
+
def dump_peer(p)
|
18
|
+
"#{(p.peer_id.nil? ? '<not specified>' : p.peer_id.inspect)} on #{p.ip}:#{p.port}"
|
19
|
+
end
|
20
|
+
|
21
|
+
fn = ARGV.shift or raise "first argument must be .torrent file"
|
22
|
+
|
23
|
+
mi = nil
|
24
|
+
begin
|
25
|
+
mi = RubyTorrent::MetaInfo.from_location(fn)
|
26
|
+
rescue RubyTorrent::MetaInfoFormatError, RubyTorrent::BEncodingError => e
|
27
|
+
die "error parsing metainfo file #{fn}---maybe not a .torrent?"
|
28
|
+
end
|
29
|
+
|
30
|
+
# complete abuse
|
31
|
+
mi.trackers.each do |track|
|
32
|
+
puts "#{track}:"
|
33
|
+
|
34
|
+
tc = RubyTorrent::TrackerConnection.new(track, mi.info.sha1, mi.info.total_length, 9999, "rubytorrent.dumppeer") # complete abuse, i know
|
35
|
+
begin
|
36
|
+
tc.force_refresh
|
37
|
+
puts "<no peers>" if tc.peers.length == 0
|
38
|
+
tc.peers.each do |p|
|
39
|
+
puts dump_peer(p)
|
40
|
+
end
|
41
|
+
rescue RubyTorrent::TrackerError => e
|
42
|
+
puts "error connecting to tracker: #{e.message}"
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
data/lib/rubytorrent.rb
ADDED
@@ -0,0 +1,94 @@
|
|
1
|
+
## rubytorrent.rb -- top-level RubyTorrent file.
|
2
|
+
## Copyright 2004 William Morgan.
|
3
|
+
##
|
4
|
+
## This file is part of RubyTorrent. RubyTorrent is free software;
|
5
|
+
## you can redistribute it and/or modify it under the terms of version
|
6
|
+
## 2 of the GNU General Public License as published by the Free
|
7
|
+
## Software Foundation.
|
8
|
+
##
|
9
|
+
## RubyTorrent is distributed in the hope that it will be useful, but
|
10
|
+
## WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
+
## General Public License (in the file COPYING) for more details.
|
13
|
+
|
14
|
+
require 'rubytorrent/util'
|
15
|
+
require 'rubytorrent/bencoding'
|
16
|
+
require 'rubytorrent/metainfo'
|
17
|
+
require 'rubytorrent/tracker'
|
18
|
+
require 'rubytorrent/package'
|
19
|
+
require 'rubytorrent/server'
|
20
|
+
|
21
|
+
require "socket"
|
22
|
+
Socket.do_not_reverse_lookup = true
|
23
|
+
|
24
|
+
module RubyTorrent
|
25
|
+
VERSION = 0.3
|
26
|
+
|
27
|
+
## the top-level class for RubyTorrent.
|
28
|
+
class BitTorrent
|
29
|
+
include EventSource
|
30
|
+
event :trying_peer, :forgetting_peer, :added_peer, :removed_peer,
|
31
|
+
:received_block, :sent_block, :have_piece, :discarded_piece, :complete,
|
32
|
+
:tracker_connected, :tracker_lost, :requested_block
|
33
|
+
|
34
|
+
@@server = nil
|
35
|
+
|
36
|
+
## hash arguments: host, port, dlratelim, ulratelim
|
37
|
+
def initialize(metainfo, *rest)
|
38
|
+
args, rest = RubyTorrent::get_args(rest, :host, :port, :dlratelim, :ulratelim, :http_proxy)
|
39
|
+
out = rest.shift
|
40
|
+
raise ArgumentError, "wrong number of arguments (expected 0/1, got #{rest.length})" unless rest.empty?
|
41
|
+
|
42
|
+
case metainfo
|
43
|
+
when MetaInfo
|
44
|
+
@metainfo = metainfo
|
45
|
+
when String
|
46
|
+
@metainfo = MetaInfo.from_location(metainfo)
|
47
|
+
when IO
|
48
|
+
@metainfo = MetaInfo.from_stream(metainfo)
|
49
|
+
else
|
50
|
+
raise ArgumentError, "'metainfo' should be a String, IO or RubyTorrent::MetaInfo object"
|
51
|
+
end
|
52
|
+
|
53
|
+
case out
|
54
|
+
when Package
|
55
|
+
@package = out
|
56
|
+
else
|
57
|
+
@package = Package.new(@metainfo, out)
|
58
|
+
end
|
59
|
+
|
60
|
+
unless @@server
|
61
|
+
@@server = RubyTorrent::Server.new(args[:host], args[:port], args[:http_proxy])
|
62
|
+
@@server.start
|
63
|
+
end
|
64
|
+
|
65
|
+
@cont = @@server.add_torrent(@metainfo, @package, args[:dlratelim], args[:ulratelim])
|
66
|
+
|
67
|
+
@cont.relay_event self, :trying_peer, :forgetting_peer, :added_peer,
|
68
|
+
:removed_peer, :received_block, :sent_block,
|
69
|
+
:have_piece, :discarded_piece, :tracker_connected,
|
70
|
+
:tracker_lost, :requested_block
|
71
|
+
@package.relay_event self, :complete
|
72
|
+
end
|
73
|
+
|
74
|
+
def ip; @@server.ip; end
|
75
|
+
def port; @@server.port; end
|
76
|
+
def peer_info; @cont.peer_info; end
|
77
|
+
def shutdown; @cont.shutdown; end
|
78
|
+
def shutdown_all; @@server.shutdown; end
|
79
|
+
def complete?; @package.complete?; end
|
80
|
+
def bytes_completed; @package.bytes_completed; end
|
81
|
+
def percent_completed; @package.percent_completed; end
|
82
|
+
def pieces_completed; @package.pieces_completed; end
|
83
|
+
def dlrate; @cont.dlrate; end
|
84
|
+
def ulrate; @cont.ulrate; end
|
85
|
+
def dlamt; @cont.dlamt; end
|
86
|
+
def ulamt; @cont.ulamt; end
|
87
|
+
def num_pieces; @package.num_pieces; end
|
88
|
+
def tracker; (@cont.tracker ? @cont.tracker.url : nil); end
|
89
|
+
def num_possible_peers; (@cont.tracker ? @cont.tracker.peers.length : 0); end
|
90
|
+
def num_active_peers; @cont.num_peers; end
|
91
|
+
def total_bytes; @package.size; end
|
92
|
+
end
|
93
|
+
|
94
|
+
end
|
@@ -0,0 +1,174 @@
|
|
1
|
+
## bencoding.rb -- parse and generate bencoded values.
|
2
|
+
## Copyright 2004 William Morgan.
|
3
|
+
##
|
4
|
+
## This file is part of RubyTorrent. RubyTorrent is free software;
|
5
|
+
## you can redistribute it and/or modify it under the terms of version
|
6
|
+
## 2 of the GNU General Public License as published by the Free
|
7
|
+
## Software Foundation.
|
8
|
+
##
|
9
|
+
## RubyTorrent is distributed in the hope that it will be useful, but
|
10
|
+
## WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
+
## General Public License (in the file COPYING) for more details.
|
13
|
+
|
14
|
+
require 'uri'
|
15
|
+
require 'digest/sha1'
|
16
|
+
|
17
|
+
module RubyTorrent
|
18
|
+
|
19
|
+
## we mess in the users' namespaces in this file. there's no good way
|
20
|
+
## around it. i don't think it's too egregious though.
|
21
|
+
|
22
|
+
class BEncodingError < StandardError; end
|
23
|
+
|
24
|
+
class BStream
|
25
|
+
include Enumerable
|
26
|
+
|
27
|
+
@@classes = []
|
28
|
+
|
29
|
+
def initialize(s)
|
30
|
+
@s = s
|
31
|
+
end
|
32
|
+
|
33
|
+
def self.register_bencoded_class(c)
|
34
|
+
@@classes.push c
|
35
|
+
end
|
36
|
+
|
37
|
+
def each
|
38
|
+
happy = true
|
39
|
+
begin
|
40
|
+
happy = false
|
41
|
+
c = @s.getc
|
42
|
+
@@classes.each do |klass|
|
43
|
+
if klass.bencoded? c
|
44
|
+
o = klass.parse_bencoding(c, @s)
|
45
|
+
happy = true
|
46
|
+
yield o
|
47
|
+
break
|
48
|
+
end
|
49
|
+
end unless c.nil?
|
50
|
+
unless happy
|
51
|
+
@s.ungetc c unless c.nil?
|
52
|
+
end
|
53
|
+
end while happy
|
54
|
+
self
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
end
|
59
|
+
|
60
|
+
class String
|
61
|
+
def to_bencoding
|
62
|
+
self.length.to_s + ":" + self.to_s
|
63
|
+
end
|
64
|
+
|
65
|
+
def self.bencoded?(c)
|
66
|
+
(?0 .. ?9).include? c
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.parse_bencoding(c, s)
|
70
|
+
lens = c.chr
|
71
|
+
while ((x = s.getc) != ?:)
|
72
|
+
unless (?0 .. ?9).include? x
|
73
|
+
s.ungetc x
|
74
|
+
raise RubyTorrent::BEncodingError, "invalid bencoded string length #{lens} + #{x}"
|
75
|
+
end
|
76
|
+
lens += x.chr
|
77
|
+
end
|
78
|
+
raise RubyTorrent::BEncodingError, %{invalid length #{lens} in bencoded string} unless lens.length <= 20
|
79
|
+
len = lens.to_i
|
80
|
+
raise RubyTorrent::BEncodingError, %{invalid length #{lens} in bencoded string} unless len >= 0
|
81
|
+
(len > 0 ? s.read(len) : "")
|
82
|
+
end
|
83
|
+
|
84
|
+
RubyTorrent::BStream.register_bencoded_class self
|
85
|
+
end
|
86
|
+
|
87
|
+
class Integer
|
88
|
+
def to_bencoding
|
89
|
+
"i" + self.to_s + "e"
|
90
|
+
end
|
91
|
+
|
92
|
+
def self.bencoded?(c)
|
93
|
+
c == ?i
|
94
|
+
end
|
95
|
+
|
96
|
+
def self.parse_bencoding(c, s)
|
97
|
+
ints = ""
|
98
|
+
while ((x = s.getc.chr) != 'e')
|
99
|
+
raise RubyTorrent::BEncodingError, "invalid bencoded integer #{x.inspect}" unless x =~ /\d|-/
|
100
|
+
ints += x
|
101
|
+
end
|
102
|
+
raise RubyTorrent::BEncodingError, "invalid integer #{ints} (too long)" unless ints.length <= 20
|
103
|
+
int = ints.to_i
|
104
|
+
raise RubyTorrent::BEncodingError, %{can't parse bencoded integer "#{ints}"} if (int == 0) && (ints !~ /^0$/) #'
|
105
|
+
int
|
106
|
+
end
|
107
|
+
|
108
|
+
RubyTorrent::BStream.register_bencoded_class self
|
109
|
+
end
|
110
|
+
|
111
|
+
class Time
|
112
|
+
def to_bencoding
|
113
|
+
self.to_i.to_bencoding
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
module URI
|
118
|
+
def to_bencoding
|
119
|
+
self.to_s.to_bencoding
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
class Array
|
124
|
+
def to_bencoding
|
125
|
+
"l" + self.map { |e| e.to_bencoding }.join + "e"
|
126
|
+
end
|
127
|
+
|
128
|
+
def self.bencoded?(c)
|
129
|
+
c == ?l
|
130
|
+
end
|
131
|
+
|
132
|
+
def self.parse_bencoding(c, s)
|
133
|
+
ret = RubyTorrent::BStream.new(s).map { |x| x }
|
134
|
+
raise RubyTorrent::BEncodingError, "missing list terminator" unless s.getc == ?e
|
135
|
+
ret
|
136
|
+
end
|
137
|
+
|
138
|
+
RubyTorrent::BStream.register_bencoded_class self
|
139
|
+
end
|
140
|
+
|
141
|
+
class Hash
|
142
|
+
def to_bencoding
|
143
|
+
"d" + keys.sort.map do |k|
|
144
|
+
v = self[k]
|
145
|
+
if v.nil?
|
146
|
+
nil
|
147
|
+
else
|
148
|
+
[k.to_bencoding, v.to_bencoding].join
|
149
|
+
end
|
150
|
+
end.compact.join + "e"
|
151
|
+
end
|
152
|
+
|
153
|
+
def self.bencoded?(c)
|
154
|
+
c == ?d
|
155
|
+
end
|
156
|
+
|
157
|
+
def self.parse_bencoding(c, s)
|
158
|
+
ret = {}
|
159
|
+
key = nil
|
160
|
+
RubyTorrent::BStream.new(s).each do |x|
|
161
|
+
if key == nil
|
162
|
+
key = x
|
163
|
+
else
|
164
|
+
ret[key] = x
|
165
|
+
key = nil
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
raise RubyTorrent::BEncodingError, "no dictionary terminator" unless s.getc == ?e
|
170
|
+
ret
|
171
|
+
end
|
172
|
+
|
173
|
+
RubyTorrent::BStream.register_bencoded_class self
|
174
|
+
end
|
@@ -0,0 +1,610 @@
|
|
1
|
+
## controller.rb -- cross-peer logic.
|
2
|
+
## Copyright 2004 William Morgan.
|
3
|
+
##
|
4
|
+
## This file is part of RubyTorrent. RubyTorrent is free software;
|
5
|
+
## you can redistribute it and/or modify it under the terms of version
|
6
|
+
## 2 of the GNU General Public License as published by the Free
|
7
|
+
## Software Foundation.
|
8
|
+
##
|
9
|
+
## RubyTorrent is distributed in the hope that it will be useful, but
|
10
|
+
## WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
+
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12
|
+
## General Public License (in the file COPYING) for more details.
|
13
|
+
|
14
|
+
require 'socket'
|
15
|
+
require 'thread'
|
16
|
+
|
17
|
+
module RubyTorrent
|
18
|
+
|
19
|
+
## keeps pieces in order
|
20
|
+
class PieceOrder
|
21
|
+
POP_RECALC_THRESH = 20 # popularity of all pieces is recalculated
|
22
|
+
# (expensive sort) when this number of pieces
|
23
|
+
# have arrived at any of the peers, OR:
|
24
|
+
POP_RECALC_LIMIT = 30 # ... when this many seconds have passed when
|
25
|
+
# at least one piece has changed in
|
26
|
+
# popularity, or if we're in fuseki mode.
|
27
|
+
def initialize(package)
|
28
|
+
@package = package
|
29
|
+
@order = nil
|
30
|
+
@num_changed = 0
|
31
|
+
@pop = Array.new(@package.pieces.length, 0)
|
32
|
+
@jitter = Array.new(@package.pieces.length) { rand }
|
33
|
+
@m = Mutex.new
|
34
|
+
@last_recalc = nil
|
35
|
+
end
|
36
|
+
|
37
|
+
## increment the popularity of a piece
|
38
|
+
def inc(i)
|
39
|
+
@m.synchronize do
|
40
|
+
@pop[i.to_i] += 1
|
41
|
+
@num_changed += 1
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
## increment the popularity of multiple pieces
|
46
|
+
def inc_all(bitfield, inc=1)
|
47
|
+
@m.synchronize do
|
48
|
+
bitfield.each_index do |i|
|
49
|
+
if bitfield[i]
|
50
|
+
@pop[i] += inc
|
51
|
+
@num_changed += 1
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def dec_all(bitfield)
|
58
|
+
inc_all(bitfield, -1)
|
59
|
+
end
|
60
|
+
|
61
|
+
def each(in_fuseki, num_peers)
|
62
|
+
if (@num_changed > POP_RECALC_THRESH) || @last_recalc.nil? || (((@num_changed > 0) || in_fuseki) && ((Time.now - @last_recalc) > POP_RECALC_LIMIT))
|
63
|
+
rt_debug "* reordering pieces: (#@num_changed changed, last recalc #{(@last_recalc.nil? ? '(never)' : (Time.now - @last_recalc).round)}s ago)..."
|
64
|
+
recalc_order(in_fuseki, num_peers)
|
65
|
+
end
|
66
|
+
|
67
|
+
@order.each { |i| yield i }
|
68
|
+
end
|
69
|
+
|
70
|
+
private
|
71
|
+
|
72
|
+
def recalc_order(in_fuseki, num_peers)
|
73
|
+
@m.synchronize do
|
74
|
+
@num_changed = 0
|
75
|
+
@order = (0 ... @pop.length).sort_by do |i|
|
76
|
+
p = @package.pieces[i]
|
77
|
+
@jitter[i] +
|
78
|
+
if p.started? && !p.complete? # always try to complete a started piece
|
79
|
+
pri = -1 + p.unclaimed_bytes.to_f / p.length
|
80
|
+
rt_debug " piece #{i} is started but not completed => priority #{pri} (#{p.percent_claimed.round}% claimed, #{p.percent_done.round}% done)"
|
81
|
+
pri
|
82
|
+
elsif p.complete? # don't need these
|
83
|
+
# puts " piece #{i} is completed => #{@pop.length}"
|
84
|
+
@pop.length # a big number
|
85
|
+
elsif in_fuseki # distance from (# peers) / 2
|
86
|
+
# puts " piece #{i} has fuseki score #{(@pop[i] - (num_peers / 2)).abs}"
|
87
|
+
(@pop[i] - (num_peers / 2)).abs
|
88
|
+
else
|
89
|
+
# puts " piece #{i} has popularity #{@pop[i]}"
|
90
|
+
@pop[i]
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
@last_recalc = Time.now
|
95
|
+
rt_debug "* new piece priority: " + @order[0...15].map { |x| x.to_s }.join(', ') + " ..."
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
## The Controller manages all PeerConnections for a single Package. It
|
100
|
+
## instructs them to request blocks, and tells them whether to choke
|
101
|
+
## their connections or not. It also reports progress to the tracker.
|
102
|
+
##
|
103
|
+
## Incoming, post-handshake peer connections are added by the Server
|
104
|
+
## via calling add_connection; deciding to accept these is the
|
105
|
+
## Controller's responsibility, as is connecting to any new peers.
|
106
|
+
class Controller
|
107
|
+
include EventSource
|
108
|
+
extend AttrReaderQ, MinIntervalMethods
|
109
|
+
|
110
|
+
## general behavior parameters
|
111
|
+
HEARTBEAT = 5 # seconds between iterations of the heartbeat
|
112
|
+
MAX_PEERS = 15 # hard limit on the number of peers
|
113
|
+
ENDGAME_PIECE_THRESH = 5 # (wild guess) number of pieces remaining
|
114
|
+
# before we trigger end-game mode
|
115
|
+
FUSEKI_PIECE_THRESH = 2 # number of pieces we must have before
|
116
|
+
# getting out of fuseki mode. in fuseki
|
117
|
+
# ("opening", if you're not a weiqi/go fan)
|
118
|
+
# mode, rather than ranking pieces by
|
119
|
+
# rarity, we rank them by how distant their
|
120
|
+
# popularity is from (# peers) / 2, and we're
|
121
|
+
# also stingly in handing out requests.
|
122
|
+
SPAWN_NEW_PEER_THRESH = 0.75 # portion of the download rate above
|
123
|
+
# which we'll stop making new peer
|
124
|
+
# connections
|
125
|
+
RATE_WINDOW = 20 # window size (in seconds) of the rate calculation.
|
126
|
+
# presumably this should be the same as the window
|
127
|
+
# used in the RateMeter class.
|
128
|
+
|
129
|
+
## tracker parameters. when we can't access a tracker, we retry at
|
130
|
+
## DEAD_TRACKER_INITIAL_DELAY seconds and double that after every
|
131
|
+
## failure, capping at DEAD_TRACKER_MAX_DELAY.
|
132
|
+
DEAD_TRACKER_INITIAL_INTERVAL = 5
|
133
|
+
DEAD_TRACKER_MAX_INTERVAL = 3600
|
134
|
+
|
135
|
+
## single peer parameters
|
136
|
+
KEEPALIVE_INTERVAL = 120 # seconds of silence before sending a keepalive
|
137
|
+
SILENT_DEATH_INTERVAL = 240 # seconds of silence before we drop a peer
|
138
|
+
BOREDOM_DEATH_INTERVAL = 120 # seconds of existence with no downloaded data
|
139
|
+
# at which we drop a peer in favor of
|
140
|
+
# an incoming peer (unless the package
|
141
|
+
# is complete)
|
142
|
+
|
143
|
+
BLOCK_SIZE = 2**15 # send this size blocks. need to find out more
|
144
|
+
# about this parameter: how does it affect
|
145
|
+
# transfer rates?
|
146
|
+
|
147
|
+
## antisnubbing
|
148
|
+
ANTISNUB_RATE_THRESH = 1024 # if the total bytes/second across all
|
149
|
+
# peers falls below this threshold, we
|
150
|
+
# trigger anti-snubbing mode
|
151
|
+
ANTISNUB_INTERVAL = 60 # seconds of no blocks from a peer before we
|
152
|
+
# add an optimistic unchoke slot when in
|
153
|
+
# anti-snubbing mode.
|
154
|
+
|
155
|
+
## choking and optimistic unchoking parameters
|
156
|
+
NUM_FRIENDS = 4 # number of peers unchoked due to high download rates
|
157
|
+
CALC_FRIENDS_INTERVAL = 10 # seconds between recalculating choked
|
158
|
+
# status for each peer
|
159
|
+
CALC_OPTUNCHOKES_INTERVAL = 30 # seconds between reassigning
|
160
|
+
# optimistic unchoked status
|
161
|
+
NUM_OPTUNCHOKES = 1 # number of optimistic unchoke slots
|
162
|
+
# (not including any temporary ones
|
163
|
+
# generated in anti-snubbing mode.
|
164
|
+
NEW_OPTUNCHOKE_PROB = 0.5 # peers are ranked by the age of
|
165
|
+
# their connection, and optimistic
|
166
|
+
# unchoking slots are given with
|
167
|
+
# probability p*(1-p)^r, where r
|
168
|
+
# is the rank and p is this number.
|
169
|
+
|
170
|
+
attr_accessor :package, :info_hash, :tracker, :ulratelim, :dlratelim,
|
171
|
+
:http_proxy
|
172
|
+
attr_reader_q :running
|
173
|
+
event :trying_peer, :forgetting_peer, :added_peer, :removed_peer,
|
174
|
+
:received_block, :sent_block, :have_piece, :discarded_piece,
|
175
|
+
:tracker_connected, :tracker_lost, :requested_block
|
176
|
+
|
177
|
+
def initialize(server, package, info_hash, trackers, dlratelim=nil, ulratelim=nil, http_proxy=ENV["http_proxy"])
|
178
|
+
@server = server
|
179
|
+
@info_hash = info_hash
|
180
|
+
@package = package
|
181
|
+
@trackers = trackers
|
182
|
+
@http_proxy = http_proxy
|
183
|
+
|
184
|
+
@dlratelim = dlratelim
|
185
|
+
@ulratelim = ulratelim
|
186
|
+
|
187
|
+
@peers = [].extend(ArrayShuffle)
|
188
|
+
@peers_m = Mutex.new
|
189
|
+
@thread = nil
|
190
|
+
|
191
|
+
@tracker = nil
|
192
|
+
@last_tracker_attempt = nil
|
193
|
+
@tracker_delay = DEAD_TRACKER_INITIAL_INTERVAL
|
194
|
+
|
195
|
+
## friends
|
196
|
+
@num_friends = 0
|
197
|
+
@num_optunchokes = 0
|
198
|
+
@num_snubbed = 0
|
199
|
+
|
200
|
+
## keep track of the popularity of the pieces so as to assign
|
201
|
+
## blocks optimally to peers.
|
202
|
+
@piece_order = PieceOrder.new @package
|
203
|
+
|
204
|
+
@running = false
|
205
|
+
end
|
206
|
+
|
207
|
+
def dlrate; @peers.inject(0) { |s, p| s + p.dlrate }; end
|
208
|
+
def ulrate; @peers.inject(0) { |s, p| s + p.ulrate }; end
|
209
|
+
def dlamt; @peers.inject(0) { |s, p| s + p.dlamt }; end
|
210
|
+
def ulamt; @peers.inject(0) { |s, p| s + p.ulamt }; end
|
211
|
+
def num_peers; @peers.length; end
|
212
|
+
|
213
|
+
def start
|
214
|
+
raise "already" if @running
|
215
|
+
|
216
|
+
find_tracker
|
217
|
+
|
218
|
+
@in_endgame = false
|
219
|
+
@in_antisnub = false
|
220
|
+
@in_fuseki = false
|
221
|
+
@running = true
|
222
|
+
@thread = Thread.new do
|
223
|
+
while @running
|
224
|
+
step
|
225
|
+
sleep HEARTBEAT
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
@peers.each { |p| p.start unless p.running? }
|
230
|
+
|
231
|
+
self
|
232
|
+
end
|
233
|
+
|
234
|
+
def shutdown
|
235
|
+
@running = false
|
236
|
+
@tracker.stopped unless @tracker.nil? rescue TrackerError
|
237
|
+
@thread.join(0.2)
|
238
|
+
@peers.each { |c| c.shutdown }
|
239
|
+
self
|
240
|
+
end
|
241
|
+
|
242
|
+
def to_s
|
243
|
+
"<#{self.class}: package #{@package}>"
|
244
|
+
end
|
245
|
+
|
246
|
+
## this could be called at any point by the Server, if it receives
|
247
|
+
## incoming peer connections.
|
248
|
+
def add_peer(p)
|
249
|
+
accept = true
|
250
|
+
|
251
|
+
if @peers.length >= MAX_PEERS && !@package.complete?
|
252
|
+
oldp = @peers.find { |x| !x.running? || ((x.dlamt == 0) && ((Time.now - x.start_time) > BOREDOM_DEATH_INTERVAL)) }
|
253
|
+
|
254
|
+
if oldp
|
255
|
+
rt_debug "killing peer for being boring: #{oldp}"
|
256
|
+
oldp.shutdown
|
257
|
+
else
|
258
|
+
rt_debug "too many peers, ignoring #{p}"
|
259
|
+
p.shutdown
|
260
|
+
accept = false
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
if accept
|
265
|
+
p.on_event(self, :received_block) { |peer, block| received_block(block, peer) }
|
266
|
+
p.on_event(self, :peer_has_piece) { |peer, piece| peer_has_piece(piece, peer) }
|
267
|
+
p.on_event(self, :peer_has_pieces) { |peer, bitfield| peer_has_pieces(bitfield, peer) }
|
268
|
+
p.on_event(self, :sent_block) { |peer, block| send_event(:sent_block, block, peer.name) }
|
269
|
+
p.on_event(self, :requested_block) { |peer, block| send_event(:requested_block, block, peer.name) }
|
270
|
+
|
271
|
+
@peers_m.synchronize do
|
272
|
+
@peers.push p
|
273
|
+
## it's important not to call p.start (which triggers the
|
274
|
+
## bitfield message) until it's been added to @peer, such that
|
275
|
+
## any :have messages that might happen from other peers in
|
276
|
+
## the mean time are propagated to it.
|
277
|
+
##
|
278
|
+
## of course that means we need to call p.start within the
|
279
|
+
## mutex context so that the reaper section of the heartbeat
|
280
|
+
## doesn't kill it between push and start.
|
281
|
+
##
|
282
|
+
## ah, the joys of threaded programming.
|
283
|
+
p.start if @running
|
284
|
+
end
|
285
|
+
|
286
|
+
send_event(:added_peer, p.name)
|
287
|
+
end
|
288
|
+
end
|
289
|
+
|
290
|
+
def received_block(block, peer)
|
291
|
+
if @in_endgame
|
292
|
+
@peers_m.synchronize { @peers.each { |p| p.cancel block if p.running? && (p != peer)} }
|
293
|
+
end
|
294
|
+
send_event(:received_block, block, peer.name)
|
295
|
+
|
296
|
+
piece = @package.pieces[block.pindex] # find corresponding piece
|
297
|
+
if piece.complete?
|
298
|
+
if piece.valid?
|
299
|
+
@peers_m.synchronize { @peers.each { |peer| peer.have_piece piece } }
|
300
|
+
send_event(:have_piece, piece)
|
301
|
+
else
|
302
|
+
rt_warning "#{self}: received data for #{piece} does not match SHA1 hash, discarding"
|
303
|
+
send_event(:discarded_piece, piece)
|
304
|
+
piece.discard
|
305
|
+
end
|
306
|
+
end
|
307
|
+
end
|
308
|
+
|
309
|
+
def peer_has_piece(piece, peer)
|
310
|
+
@piece_order.inc piece.index
|
311
|
+
end
|
312
|
+
|
313
|
+
def peer_has_pieces(bitfield, peer)
|
314
|
+
@piece_order.inc_all bitfield
|
315
|
+
end
|
316
|
+
|
317
|
+
## yield all desired blocks, in order of desire. called by peers to
|
318
|
+
## refill their queues.
|
319
|
+
def claim_blocks
|
320
|
+
@piece_order.each(@in_fuseki, @peers.length) do |i|
|
321
|
+
p = @package.pieces[i]
|
322
|
+
next if p.complete?
|
323
|
+
# rt_debug "+ considering piece #{p}"
|
324
|
+
if @in_endgame
|
325
|
+
p.each_empty_block(BLOCK_SIZE) { |b| yield b }
|
326
|
+
else
|
327
|
+
p.each_unclaimed_block(BLOCK_SIZE) do |b|
|
328
|
+
if yield b
|
329
|
+
p.claim_block b
|
330
|
+
return if @in_fuseki # fuseki shortcut
|
331
|
+
end
|
332
|
+
end
|
333
|
+
end
|
334
|
+
end
|
335
|
+
end
|
336
|
+
|
337
|
+
def forget_blocks(blocks)
|
338
|
+
# rt_debug "#{self}: forgetting blocks #{blocks.join(', ')}"
|
339
|
+
blocks.each { |b| @package.pieces[b.pindex].unclaim_block b }
|
340
|
+
end
|
341
|
+
|
342
|
+
def peer_info
|
343
|
+
@peers.map do |p|
|
344
|
+
next nil unless p.running?
|
345
|
+
{:name => p.name, :seed => p.peer_complete?,
|
346
|
+
:dlamt => p.dlamt, :ulamt => p.ulamt,
|
347
|
+
:dlrate => p.dlrate, :ulrate => p.ulrate,
|
348
|
+
:pending_send => p.pending_send, :pending_recv => p.pending_recv,
|
349
|
+
:interested => p.interested?, :peer_interested => p.peer_interested?,
|
350
|
+
:choking => p.choking?, :peer_choking => p.peer_choking?,
|
351
|
+
:snubbing => p.snubbing?,
|
352
|
+
:we_desire => @package.pieces.inject(0) do |s, piece|
|
353
|
+
s + (!piece.complete? && p.piece_available?(piece.index) ? 1 : 0)
|
354
|
+
end,
|
355
|
+
:they_desire => @package.pieces.inject(0) do |s, piece|
|
356
|
+
s + (piece.complete? && !p.piece_available?(piece.index) ? 1 : 0)
|
357
|
+
end,
|
358
|
+
:start_time => p.start_time
|
359
|
+
}
|
360
|
+
end.compact
|
361
|
+
end
|
362
|
+
|
363
|
+
private
|
364
|
+
|
365
|
+
def find_tracker
|
366
|
+
return if @tracker || (@last_tracker_attempt && (Time.now - @last_tracker_attempt) < @tracker_delay)
|
367
|
+
|
368
|
+
@last_tracker_attempt = Time.now
|
369
|
+
Thread.new do
|
370
|
+
@trackers.each do |tracker|
|
371
|
+
break if @tracker
|
372
|
+
rt_debug "trying tracker #{tracker}"
|
373
|
+
tc = TrackerConnection.new(tracker, @info_hash, @package.size, @server.port, @server.id, nil, 50, @http_proxy)
|
374
|
+
begin
|
375
|
+
@tracker = tc.started
|
376
|
+
tc.already_completed if @package.complete?
|
377
|
+
@tracker_delay = DEAD_TRACKER_INITIAL_INTERVAL
|
378
|
+
send_event(:tracker_connected, tc.url)
|
379
|
+
rescue TrackerError => e
|
380
|
+
rt_debug "couldn't connect: #{e.message}"
|
381
|
+
end
|
382
|
+
end
|
383
|
+
end
|
384
|
+
|
385
|
+
@tracker_delay = [@tracker_delay * 2, DEAD_TRACKER_MAX_INTERVAL].min if @tracker.nil?
|
386
|
+
rt_warning "couldn't connect to tracker, next try in #@tracker_delay seconds" if @tracker.nil?
|
387
|
+
end
|
388
|
+
|
389
|
+
def add_a_peer
|
390
|
+
return false if @tracker.nil? || (@peers.length >= MAX_PEERS) || @package.complete? || (@num_friends >= NUM_FRIENDS) || (@dlratelim && (dlrate > (@dlratelim * SPAWN_NEW_PEER_THRESH)))
|
391
|
+
|
392
|
+
@tracker.peers.shuffle.each do |peer|
|
393
|
+
# rt_debug "]] comparing: #{peer.ip} vs #{@server.ip} and #{peer.port} vs #{@server.port} (tried? #{peer.tried?})"
|
394
|
+
next if peer.tried? || ((peer.ip == @server.ip) && (peer.port == @server.port)) rescue next
|
395
|
+
|
396
|
+
peername = "#{peer.ip}:#{peer.port}"
|
397
|
+
send_event(:trying_peer, peername)
|
398
|
+
|
399
|
+
Thread.new do # this may ultimately result in a call to add_peer
|
400
|
+
sleep rand(10)
|
401
|
+
rt_debug "=> making outgoing connection to #{peername}"
|
402
|
+
begin
|
403
|
+
peer.tried = true
|
404
|
+
socket = TCPSocket.new(peer.ip, peer.port)
|
405
|
+
@server.add_connection(peername, self, socket)
|
406
|
+
rescue SocketError, SystemCallError, Timeout::Error => e
|
407
|
+
rt_debug "couldn't connect to #{peername}: #{e}"
|
408
|
+
send_event(:forgetting_peer, peername)
|
409
|
+
end
|
410
|
+
end
|
411
|
+
break
|
412
|
+
end
|
413
|
+
true
|
414
|
+
end
|
415
|
+
|
416
|
+
def refresh_tracker
|
417
|
+
return if @tracker.nil?
|
418
|
+
|
419
|
+
@tracker.downloaded = dlamt
|
420
|
+
@tracker.uploaded = ulamt
|
421
|
+
@tracker.left = @package.size - @package.bytes_completed
|
422
|
+
begin
|
423
|
+
@tracker.refresh
|
424
|
+
rescue TrackerError
|
425
|
+
send_event(:tracker_lost, @tracker.url)
|
426
|
+
@tracker = nil
|
427
|
+
find_tracker # find a new one
|
428
|
+
end
|
429
|
+
end
|
430
|
+
|
431
|
+
def calc_friends
|
432
|
+
@num_friends = 0
|
433
|
+
|
434
|
+
if @package.complete?
|
435
|
+
@peers.sort_by { |p| -p.ulrate }.each do |p|
|
436
|
+
next if p.snubbing? || !p.running?
|
437
|
+
p.choke = (@num_friends >= NUM_FRIENDS)
|
438
|
+
@num_friends += 1 if p.peer_interested?
|
439
|
+
end
|
440
|
+
else
|
441
|
+
@peers.sort_by { |p| -p.dlrate }.each do |p|
|
442
|
+
next if p.snubbing? || !p.running?
|
443
|
+
p.choke = (@num_friends >= NUM_FRIENDS)
|
444
|
+
@num_friends += 1 if p.peer_interested?
|
445
|
+
end
|
446
|
+
end
|
447
|
+
end
|
448
|
+
min_interval :calc_friends, CALC_FRIENDS_INTERVAL
|
449
|
+
|
450
|
+
def calc_optunchokes
|
451
|
+
rt_debug "* calculating optimistic unchokes..."
|
452
|
+
@num_optunchokes = 0
|
453
|
+
|
454
|
+
if @in_antisnub
|
455
|
+
## count up the number of our fair weather friends: peers who
|
456
|
+
## are interested and whom we're not choking, but who haven't
|
457
|
+
## sent us a block for ANTISNUB_INTERVAL seconds. for each of
|
458
|
+
## these, we add an extra optimistic unchoking slot to our usual
|
459
|
+
## NUM_OPTUNCHOKES slots. in actuality that's the number of
|
460
|
+
## friends PLUS the number of optimistic unchokes who are
|
461
|
+
## snubbing us, but that's not a big deal, as long as we cap the
|
462
|
+
## number of extra slots at NUM_FRIENDS.
|
463
|
+
@num_optunchokes -= @peers.inject(0) { |s, p| s + (p.running? && p.peer_interested? && !p.choking? && (Time.now - (p.last_recv_block_time || p.start_time) > ANTISNUB_INTERVAL) ? 1 : 0) }
|
464
|
+
@num_optunchokes = [-NUM_FRIENDS, @num_optunchokes].max
|
465
|
+
rt_debug "* anti-snubbing mode, #{-@num_optunchokes} extra optimistic unchoke slots"
|
466
|
+
end
|
467
|
+
|
468
|
+
## i love ruby
|
469
|
+
@peers.find_all { |p| p.running? }.sort_by { |p| p.start_time }.reverse.each do |p|
|
470
|
+
break if @num_optunchokes >= NUM_OPTUNCHOKES
|
471
|
+
next if p.snubbing?
|
472
|
+
# rt_debug "* considering #{p}: #{p.peer_interested?} and #{@num_optunchokes < NUM_OPTUNCHOKES} and #{rand(0.999) < NEW_OPTUNCHOKE_PROB}"
|
473
|
+
if p.peer_interested? && (rand < NEW_OPTUNCHOKE_PROB)
|
474
|
+
rt_debug " #{p}: awarded optimistic unchoke"
|
475
|
+
p.choke = false
|
476
|
+
@num_optunchokes += 1
|
477
|
+
end
|
478
|
+
end
|
479
|
+
end
|
480
|
+
min_interval :calc_optunchokes, CALC_OPTUNCHOKES_INTERVAL
|
481
|
+
|
482
|
+
## the "heartbeat". all time-based actions are triggered here.
|
483
|
+
def step
|
484
|
+
## see if we should be in antisnubbing mode
|
485
|
+
if !@package.complete? && (dlrate < ANTISNUB_RATE_THRESH)
|
486
|
+
rt_debug "= dl rate #{dlrate} < #{ANTISNUB_RATE_THRESH}, in antisnub mode" if !@in_antisnub
|
487
|
+
@in_antisnub = true
|
488
|
+
else
|
489
|
+
rt_debug "= dl rate #{dlrate} >= #{ANTISNUB_RATE_THRESH}, out of antisnub mode" if @in_antisnub
|
490
|
+
@in_antisnub = false
|
491
|
+
end
|
492
|
+
|
493
|
+
## see if we should be in fuseki mode
|
494
|
+
if !@package.complete? && (@package.pieces_completed < FUSEKI_PIECE_THRESH)
|
495
|
+
rt_debug "= num pieces #{@package.pieces_completed} < #{FUSEKI_PIECE_THRESH}, in fuseki mode" if !@in_fuseki
|
496
|
+
@in_fuseki = true
|
497
|
+
else
|
498
|
+
rt_debug "= num pieces #{@package.pieces_completed} >= #{FUSEKI_PIECE_THRESH}, out of fuseki mode" if @in_fuseki
|
499
|
+
@in_fuseki = false
|
500
|
+
end
|
501
|
+
|
502
|
+
## see if we should be in endgame mode
|
503
|
+
if @package.complete?
|
504
|
+
rt_debug "= left endgame mode" if @in_endgame
|
505
|
+
@in_endgame = false
|
506
|
+
elsif (@package.pieces.length - @package.pieces_completed) <= ENDGAME_PIECE_THRESH
|
507
|
+
rt_debug "= have #{@package.pieces_completed} pieces, in endgame mode"
|
508
|
+
@in_endgame = true
|
509
|
+
end
|
510
|
+
|
511
|
+
# puts " heartbeat: dlrate #{(dlrate / 1024.0).round}kb/s (lim #{(@dlratelim ? (@dlratelim / 1024.0).round : 'none')}) ulrate #{(ulrate / 1024.0).round}kb/s (lim #{(@ulratelim ? (@ulratelim / 1024.0).round : 'none')}) endgame? #@in_endgame antisnubbing? #@in_antisnub fuseki? #@in_fuseki"
|
512
|
+
# @package.pieces.each do |p|
|
513
|
+
# next if p.complete? || !p.started?
|
514
|
+
# l1 = 0
|
515
|
+
# p.each_unclaimed_block(9999999) { |b| l1 += b.length }
|
516
|
+
# l2 = 0
|
517
|
+
# p.each_empty_block(9999999) { |b| l2 += b.length }
|
518
|
+
# puts " heartbeat: #{p.index}: #{l1} unclaimed bytes, #{l2} unfilled bytes"
|
519
|
+
# end
|
520
|
+
|
521
|
+
## find a tracker if we aren't already connected to one
|
522
|
+
find_tracker if @tracker.nil?
|
523
|
+
|
524
|
+
if @package.complete? # if package is complete...
|
525
|
+
## kill all peers who are complete as well, as per bram's client
|
526
|
+
@peers.each { |p| p.shutdown if p.peer_complete? }
|
527
|
+
@tracker.completed unless @tracker.nil? || @tracker.sent_completed?
|
528
|
+
## reopen all files as readonly (dunno why, just seems like a
|
529
|
+
## good idea)
|
530
|
+
@package.reopen_ro unless @package.ro?
|
531
|
+
end
|
532
|
+
|
533
|
+
## kill any silent connections, and anyone who hasn't sent or
|
534
|
+
## received data in a long time.
|
535
|
+
@peers_m.synchronize do
|
536
|
+
@peers.each do |p|
|
537
|
+
next unless p.running?
|
538
|
+
if ((Time.now - (p.last_send_time || p.start_time)) > SILENT_DEATH_INTERVAL)
|
539
|
+
rt_warning "shutting down peer #{p} for silence/boredom"
|
540
|
+
p.shutdown
|
541
|
+
end
|
542
|
+
end
|
543
|
+
end
|
544
|
+
|
545
|
+
## discard any dead connections
|
546
|
+
@peers_m.synchronize do
|
547
|
+
@peers.delete_if do |p|
|
548
|
+
!p.running? && begin
|
549
|
+
p.unregister_events self
|
550
|
+
@piece_order.dec_all p.peer_pieces
|
551
|
+
rt_debug "burying corpse of #{p}"
|
552
|
+
send_event(:removed_peer, p)
|
553
|
+
true
|
554
|
+
end
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
## get more peers from the tracker, if all of the following are true:
|
559
|
+
## a) the package is incomplete (i.e. we're downloading, not uploading)
|
560
|
+
## b) we're connected to a tracker
|
561
|
+
## c) we've tried all the peers we've gotten so far
|
562
|
+
## d) the tracker hasn't already reported the maximum number of peers
|
563
|
+
if !@package.complete? && @tracker && (@tracker.peers.inject(0) { |s, p| s + (p.tried? ? 0 : 1) } == 0) && (@tracker.numwant <= @tracker.peers.length)
|
564
|
+
rt_debug "* getting more peers from the tracker"
|
565
|
+
@tracker.numwant += 50
|
566
|
+
unless @tracker.in_force_refresh
|
567
|
+
Thread.new do
|
568
|
+
begin
|
569
|
+
@tracker.force_refresh
|
570
|
+
rescue TrackerError
|
571
|
+
end
|
572
|
+
end
|
573
|
+
end
|
574
|
+
end
|
575
|
+
|
576
|
+
## add peer if necessary
|
577
|
+
3.times { add_a_peer } # there's no place like home
|
578
|
+
|
579
|
+
|
580
|
+
## iterate choking policy
|
581
|
+
calc_friends
|
582
|
+
calc_optunchokes
|
583
|
+
|
584
|
+
## this is needed. sigh.
|
585
|
+
break unless @running
|
586
|
+
|
587
|
+
## send keepalives
|
588
|
+
@peers_m.synchronize { @peers.each { |p| p.send_keepalive if p.running? && p.last_send_time && ((Time.now - p.last_send_time) > KEEPALIVE_INTERVAL) } }
|
589
|
+
|
590
|
+
## now we apportion our bandwidth amongst all the peers. we'll go
|
591
|
+
## through them at random, dump everything we can, and move on iff
|
592
|
+
## we don't expect to hit our bandwidth cap.
|
593
|
+
dllim = @dlratelim.nil? ? nil : (@dlratelim.to_f * (RATE_WINDOW.to_f + HEARTBEAT)) - (dlrate.to_f * RATE_WINDOW)
|
594
|
+
ullim = @ulratelim.nil? ? nil : (@ulratelim.to_f * (RATE_WINDOW.to_f + HEARTBEAT)) - (ulrate.to_f * RATE_WINDOW)
|
595
|
+
dl = ul = 0
|
596
|
+
@peers.shuffle.each do |p|
|
597
|
+
break if (dllim && (dl >= dllim)) || (ullim && (ul >= ullim))
|
598
|
+
if p.running?
|
599
|
+
pdl, pul = p.send_blocks_and_reqs(dllim && (dllim - dl), ullim && (ullim - ul))
|
600
|
+
dl += pdl
|
601
|
+
ul += pul
|
602
|
+
end
|
603
|
+
end
|
604
|
+
|
605
|
+
## refresh tracker stats
|
606
|
+
refresh_tracker if @tracker
|
607
|
+
end
|
608
|
+
end
|
609
|
+
|
610
|
+
end
|