murder 0.0.0.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +1 -0
- data/LICENSE +17 -0
- data/README +224 -0
- data/Rakefile +52 -0
- data/VERSION +1 -0
- data/dist/BitTornado/BT1/Choker.py +128 -0
- data/dist/BitTornado/BT1/Connecter.py +288 -0
- data/dist/BitTornado/BT1/Downloader.py +594 -0
- data/dist/BitTornado/BT1/DownloaderFeedback.py +155 -0
- data/dist/BitTornado/BT1/Encrypter.py +333 -0
- data/dist/BitTornado/BT1/FileSelector.py +245 -0
- data/dist/BitTornado/BT1/Filter.py +12 -0
- data/dist/BitTornado/BT1/HTTPDownloader.py +251 -0
- data/dist/BitTornado/BT1/NatCheck.py +95 -0
- data/dist/BitTornado/BT1/PiecePicker.py +320 -0
- data/dist/BitTornado/BT1/Rerequester.py +426 -0
- data/dist/BitTornado/BT1/Statistics.py +177 -0
- data/dist/BitTornado/BT1/Storage.py +584 -0
- data/dist/BitTornado/BT1/StorageWrapper.py +1045 -0
- data/dist/BitTornado/BT1/StreamCheck.py +135 -0
- data/dist/BitTornado/BT1/T2T.py +193 -0
- data/dist/BitTornado/BT1/Uploader.py +145 -0
- data/dist/BitTornado/BT1/__init__.py +1 -0
- data/dist/BitTornado/BT1/btformats.py +100 -0
- data/dist/BitTornado/BT1/fakeopen.py +89 -0
- data/dist/BitTornado/BT1/makemetafile.py +263 -0
- data/dist/BitTornado/BT1/track.py +1067 -0
- data/dist/BitTornado/ConfigDir.py +401 -0
- data/dist/BitTornado/ConfigReader.py +1068 -0
- data/dist/BitTornado/ConnChoice.py +31 -0
- data/dist/BitTornado/CreateIcons.py +105 -0
- data/dist/BitTornado/CurrentRateMeasure.py +37 -0
- data/dist/BitTornado/HTTPHandler.py +167 -0
- data/dist/BitTornado/PSYCO.py +5 -0
- data/dist/BitTornado/RateLimiter.py +153 -0
- data/dist/BitTornado/RateMeasure.py +75 -0
- data/dist/BitTornado/RawServer.py +195 -0
- data/dist/BitTornado/ServerPortHandler.py +188 -0
- data/dist/BitTornado/SocketHandler.py +375 -0
- data/dist/BitTornado/__init__.py +63 -0
- data/dist/BitTornado/bencode.py +319 -0
- data/dist/BitTornado/bitfield.py +162 -0
- data/dist/BitTornado/clock.py +27 -0
- data/dist/BitTornado/download_bt1.py +882 -0
- data/dist/BitTornado/inifile.py +169 -0
- data/dist/BitTornado/iprangeparse.py +194 -0
- data/dist/BitTornado/launchmanycore.py +381 -0
- data/dist/BitTornado/natpunch.py +254 -0
- data/dist/BitTornado/parseargs.py +137 -0
- data/dist/BitTornado/parsedir.py +150 -0
- data/dist/BitTornado/piecebuffer.py +86 -0
- data/dist/BitTornado/selectpoll.py +109 -0
- data/dist/BitTornado/subnetparse.py +218 -0
- data/dist/BitTornado/torrentlistparse.py +38 -0
- data/dist/BitTornado/zurllib.py +100 -0
- data/dist/murder_client.py +291 -0
- data/dist/murder_make_torrent.py +46 -0
- data/dist/murder_tracker.py +28 -0
- data/doc/examples/Capfile +28 -0
- data/lib/capistrano/recipes/deploy/strategy/murder.rb +52 -0
- data/lib/murder.rb +43 -0
- data/lib/murder/admin.rb +47 -0
- data/lib/murder/murder.rb +121 -0
- data/murder.gemspec +101 -0
- metadata +129 -0
data/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
pkg/*
|
data/LICENSE
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
This software is licensed under the Apache 2 license, quoted below.
|
2
|
+
|
3
|
+
Copyright 2010 Twitter, Inc.
|
4
|
+
Copyright 2010 Larry Gadea <lg@twitter.com>
|
5
|
+
Copyright 2010 Matt Freels <freels@twitter.com>
|
6
|
+
|
7
|
+
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
8
|
+
use this file except in compliance with the License. You may obtain a copy of
|
9
|
+
the License at
|
10
|
+
|
11
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
12
|
+
|
13
|
+
Unless required by applicable law or agreed to in writing, software
|
14
|
+
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
15
|
+
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
16
|
+
License for the specific language governing permissions and limitations under
|
17
|
+
the License.
|
data/README
ADDED
@@ -0,0 +1,224 @@
|
|
1
|
+
Murder by Larry Gadea and Matt Freels
|
2
|
+
Copyright 2010 Twitter Inc.
|
3
|
+
|
4
|
+
DESCRIPTION
|
5
|
+
-----------
|
6
|
+
|
7
|
+
Murder is a method of using Bittorrent to distribute files to a large amount
|
8
|
+
of servers within a production environment. This allows for scaleable and fast
|
9
|
+
deploys in environments of hundreds to tens of thousands of servers where
|
10
|
+
centralized distribution systems wouldn't otherwise function. A "Murder" is
|
11
|
+
normally used to refer to a flock of crows, which in this case applies to a
|
12
|
+
bunch of servers doing something.
|
13
|
+
|
14
|
+
In order to do a Murder transfer, there are several components required to be
|
15
|
+
set up beforehand -- many the result of BitTorrent nature of the system. Murder
|
16
|
+
is based on BitTornado.
|
17
|
+
|
18
|
+
- A torrent tracker. This tracker, started by running the 'murder_tracker.py'
|
19
|
+
script, runs a self-contained server on one machine. Although technically this
|
20
|
+
is still a centralized system (everyone relying on this tracker), the
|
21
|
+
communication between this server and the rest is minimal and normally
|
22
|
+
acceptable. To keep things simple tracker-less distribution (DHT) is currently
|
23
|
+
not supported. The tracker is actually just a mini-httpd that hosts a
|
24
|
+
/announce path which the Bittorrent clients update their state onto.
|
25
|
+
|
26
|
+
- A seeder. This is the server which has the files that you'd like to deploy
|
27
|
+
onto all other servers. For Twitter, this is the server that did the git diff.
|
28
|
+
The files are placed into a directory that a torrent gets created from. Murder
|
29
|
+
will tgz up the directory and create a .torrent file (a very small file
|
30
|
+
containing basic hash information about the tgz file). This .torrent file lets
|
31
|
+
the peers know what they're downloading. The tracker keeps track of which
|
32
|
+
.torrent files are currently being distributed. Once a Murder transfer is
|
33
|
+
started, the seeder will be the first server many machines go to to get
|
34
|
+
pieces. These pieces will then be distributed in a tree-fashion to the rest of
|
35
|
+
the network, but without necessarily getting the parts from the seeder.
|
36
|
+
|
37
|
+
- Peers. This is the group of servers (hundreds to tens of thousands) which
|
38
|
+
will be receiving the files and distributing the pieces amongst themselves.
|
39
|
+
Once a peer is done downloading the entire tgz file, it will continue seeding
|
40
|
+
for a while to prevent a hotspot effect on the seeder.
|
41
|
+
|
42
|
+
MURDER TRANSFER PROCESS
|
43
|
+
-----------------------
|
44
|
+
|
45
|
+
1. Configure the list of servers and general settings in config.rb. (one time)
|
46
|
+
2. Distribute the Murder files to all your servers: (one time)
|
47
|
+
cap murder:distribute_files
|
48
|
+
3. Start the tracker: (one time)
|
49
|
+
cap murder:start_tracker
|
50
|
+
4. Create a torrent file from a remote directory of files (on seeder):
|
51
|
+
cap murder:create_torrent tag="Deploy20100101" files_path="~/files"
|
52
|
+
5. Start seeding the files:
|
53
|
+
cap murder:start_seeding tag="Deploy20100101"
|
54
|
+
6. Distribute the files to all servers:
|
55
|
+
cap murder:peer tag="Deploy20100101" destination_path="/tmp/out"
|
56
|
+
|
57
|
+
Once completed, all files will be in /tmp/out/Deploy20091015/ on all servers.
|
58
|
+
|
59
|
+
EXAMPLE DEPLOY
|
60
|
+
--------------
|
61
|
+
|
62
|
+
cap murder:distribute_files
|
63
|
+
cap murder:start_tracker
|
64
|
+
cap murder:create_torrent tag="Deploy20100101"
|
65
|
+
files_path="/usr/local/twitter/production/current"
|
66
|
+
cap murder:start_seeding tag="Deploy20100101"
|
67
|
+
time cap murder:peer tag="Deploy20100101"
|
68
|
+
destination_path="/usr/local/twitter/releases"
|
69
|
+
|
70
|
+
All the files have been transferred to all servers at this point. To clean up
|
71
|
+
use:
|
72
|
+
|
73
|
+
cap murder:stop_seeding
|
74
|
+
cap murder:stop_tracker
|
75
|
+
|
76
|
+
HOW TO LEARN MURDER INCREMENTALLY
|
77
|
+
---------------------------------
|
78
|
+
|
79
|
+
- Skim dist/*.py to see what Murder does behind the scenes
|
80
|
+
- Experiment with the python scripts
|
81
|
+
- Read Capfile
|
82
|
+
- Read murder_config.rb
|
83
|
+
- Read murder_admin.rb
|
84
|
+
- Read murder.rb
|
85
|
+
- Read the reference below
|
86
|
+
- Experiment with the Capistrano deploy methods
|
87
|
+
- Read and experiment with the Murder Capistrano strategy
|
88
|
+
|
89
|
+
TASK REFERENCE
|
90
|
+
--------------
|
91
|
+
|
92
|
+
distribute_files:
|
93
|
+
SCPs a compressed version of all files from ./dist (the python Bittorrent
|
94
|
+
library and custom scripts) to all server. The entire directory is sent,
|
95
|
+
regardless of the role of each individual server. The path on the server is
|
96
|
+
specified by remote_murder_path and will be cleared prior to transferring
|
97
|
+
files over.
|
98
|
+
|
99
|
+
start_tracker:
|
100
|
+
Starts the Bittorrent tracker (essentially a mini-web-server) listening on
|
101
|
+
port 8998.
|
102
|
+
|
103
|
+
stop_tracker:
|
104
|
+
If the Bittorrent tracker is running, this will kill the process. Note that
|
105
|
+
if it is not running you will receive an error.
|
106
|
+
|
107
|
+
create_torrent:
|
108
|
+
Compresses the directory specified by the passed-in argument 'files_path'
|
109
|
+
and creates a .torrent file identified by the 'tag' argument. Be sure to use
|
110
|
+
the same 'tag' value with any following commands. Any .git directories will be
|
111
|
+
skipped. Once completed, the .torrent will be downloaded to your local
|
112
|
+
/tmp/TAG.tgz.torrent.
|
113
|
+
|
114
|
+
download_torrent:
|
115
|
+
Although not necessary to run, if the file from create_torrent was lost, you
|
116
|
+
can redownload it from the seeder using this task. You must specify a valid
|
117
|
+
'tag' argument.
|
118
|
+
|
119
|
+
start_seeding:
|
120
|
+
Will cause the seeder machine to connect to the tracker and start seeding.
|
121
|
+
The ip address returned by the 'host' bash command will be announced to the
|
122
|
+
tracker. The server will not stop seeding until the stop_seeding task is
|
123
|
+
called. You must specify a valid 'tag' argument (which identifies the .torrent
|
124
|
+
in /tmp to use)
|
125
|
+
|
126
|
+
stop_seeding:
|
127
|
+
If the seeder is currently seeding, this will kill the process. Note that if
|
128
|
+
it is not running, you will receive an error. If a peer was downloading from
|
129
|
+
this seed, the peer will find another host to receive any remaining data. You
|
130
|
+
must specify a valid 'tag' argument.
|
131
|
+
|
132
|
+
stop_all_seeding:
|
133
|
+
Identical to stop_seeding, except this will kill all seeding processes. No
|
134
|
+
'tag' argument is needed.
|
135
|
+
|
136
|
+
peer:
|
137
|
+
Instructs all the peer servers to connect to the tracker and start download
|
138
|
+
and spreading pieces and files amongst themselves. You must specify a valid
|
139
|
+
'tag' argument. Once the download is complete on a server, that server will
|
140
|
+
fork the download process and seed for 30 seconds while returning control to
|
141
|
+
Capistrano. Cap will then extract the files to the passed in
|
142
|
+
'destination_path' argument to destination_path/TAG/*. To not create this tag
|
143
|
+
named directory, pass in the 'no_tag_directory=1' argument. If the directory
|
144
|
+
is empty, this command will fail. To clean it, pass in the
|
145
|
+
'unsafe_please_delete=1' argument. The compressed tgz in /tmp is never
|
146
|
+
removed. When this task completes, all files have been transferred and moved
|
147
|
+
into the requested directory.
|
148
|
+
|
149
|
+
stop_all_peering:
|
150
|
+
Sometimes peers can go on forever (usually because of an error). This
|
151
|
+
command will forcibly kill all "murder_client.py peer" commands that are
|
152
|
+
running.
|
153
|
+
|
154
|
+
CONFIG REFERENCE:
|
155
|
+
|
156
|
+
user:
|
157
|
+
Provided by Capistrano to specify if to use a different username when sshing
|
158
|
+
into machines.
|
159
|
+
|
160
|
+
host_suffix:
|
161
|
+
For the tracker_host, seeder_host and peers servers, this suffix will always
|
162
|
+
be added to the end when trying to connect to them.
|
163
|
+
|
164
|
+
default_tag:
|
165
|
+
A tag name to use by default such that a tag parameter doesn't need to be
|
166
|
+
manually entered on every task. Not recommended to be used since files will be
|
167
|
+
overwritten.
|
168
|
+
|
169
|
+
default_seeder_files_path:
|
170
|
+
A path on the seeder's file system where the files to be distributed are
|
171
|
+
stored.
|
172
|
+
|
173
|
+
default_destination_path:
|
174
|
+
A path on the peers' file system where the files that were distributed
|
175
|
+
should be decompressed into.
|
176
|
+
|
177
|
+
tracker_host:
|
178
|
+
The hostname of a single tracker server. If you're using a host_suffix, do
|
179
|
+
not specify a suffix here.
|
180
|
+
|
181
|
+
tracker_port:
|
182
|
+
This is the port on which the mini-web-server tracker that the Bittorrent
|
183
|
+
libraries run is hosted on. Must be 8998 for now.
|
184
|
+
|
185
|
+
seeder_host:
|
186
|
+
The hostname of a single seeder server. This server is the one that has all
|
187
|
+
the files that the peers want. If you're using a host_suffix, do not specify a
|
188
|
+
suffix here.
|
189
|
+
|
190
|
+
peers:
|
191
|
+
A list of peers which will be receiving the files and distributing them
|
192
|
+
amongst themselves. If you're using a host_suffix, do not specify a suffix
|
193
|
+
here.
|
194
|
+
|
195
|
+
CAPISTRANO COPY STRATEGY
|
196
|
+
------------------------
|
197
|
+
|
198
|
+
In addition to being usable from both the commandline and Capistrano commands,
|
199
|
+
an optional Capistrano copy strategy is included to help easily retrofit
|
200
|
+
existing deploy environments you might already have.
|
201
|
+
|
202
|
+
Requirements to use build.rb
|
203
|
+
- Add a require line for build.rb in your Capfile. This will override the
|
204
|
+
default build strategy. There are some other parameters you must now
|
205
|
+
specify though.
|
206
|
+
- To enable it, add the following to your Capfile:
|
207
|
+
load 'deploy'
|
208
|
+
set :strategy, Capistrano::Deploy::Strategy::Build.new(self)
|
209
|
+
- Add "set :murder, true" to your Capfile
|
210
|
+
- Make sure your release_name is correct (should be already)
|
211
|
+
- If you're distribution is a file instead of a directory,
|
212
|
+
Add "distribution_is_a_file" to your Capfile
|
213
|
+
- Add the following to your Capfile:
|
214
|
+
set :build_task do
|
215
|
+
"true"
|
216
|
+
end
|
217
|
+
set :copy_compression, :gz
|
218
|
+
set :package_name do
|
219
|
+
"#{release_name}-#{real_revision[0, 8]}.tar.#{copy_compression}"
|
220
|
+
end
|
221
|
+
set :branch, variables[:branch] || 'deploy'
|
222
|
+
|
223
|
+
The requirements are a bit hefty for now, though hopefully this will get easier
|
224
|
+
in the future.
|
data/Rakefile
ADDED
@@ -0,0 +1,52 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rake'
|
3
|
+
|
4
|
+
begin
|
5
|
+
require 'jeweler'
|
6
|
+
Jeweler::Tasks.new do |gem|
|
7
|
+
gem.name = "murder"
|
8
|
+
gem.summary = %Q{Large scale server deploys using BitTorrent and the BitTornado library}
|
9
|
+
gem.description = %Q{Large scale server deploys using BitTorrent and the BitTornado library}
|
10
|
+
gem.email = "lg@twitter.com"
|
11
|
+
gem.homepage = "http://github.com/lg/murder"
|
12
|
+
gem.authors = ["Larry Gadea"]
|
13
|
+
# gem is a Gem::Specification... see http://www.rubygems.org/read/chapter/20 for additional settings
|
14
|
+
end
|
15
|
+
Jeweler::GemcutterTasks.new
|
16
|
+
rescue LoadError
|
17
|
+
puts "Jeweler (or a dependency) not available. Install it with: gem install jeweler"
|
18
|
+
end
|
19
|
+
|
20
|
+
# require 'rake/testtask'
|
21
|
+
# Rake::TestTask.new(:test) do |test|
|
22
|
+
# test.libs << 'lib' << 'test'
|
23
|
+
# test.pattern = 'test/**/test_*.rb'
|
24
|
+
# test.verbose = true
|
25
|
+
# end
|
26
|
+
|
27
|
+
# begin
|
28
|
+
# require 'rcov/rcovtask'
|
29
|
+
# Rcov::RcovTask.new do |test|
|
30
|
+
# test.libs << 'test'
|
31
|
+
# test.pattern = 'test/**/test_*.rb'
|
32
|
+
# test.verbose = true
|
33
|
+
# end
|
34
|
+
# rescue LoadError
|
35
|
+
# task :rcov do
|
36
|
+
# abort "RCov is not available. In order to run rcov, you must: sudo gem install spicycode-rcov"
|
37
|
+
# end
|
38
|
+
# end
|
39
|
+
|
40
|
+
# task :test => :check_dependencies
|
41
|
+
|
42
|
+
# task :default => :test
|
43
|
+
|
44
|
+
# require 'rake/rdoctask'
|
45
|
+
# Rake::RDocTask.new do |rdoc|
|
46
|
+
# version = File.exist?('VERSION') ? File.read('VERSION') : ""
|
47
|
+
|
48
|
+
# rdoc.rdoc_dir = 'rdoc'
|
49
|
+
# rdoc.title = "murder #{version}"
|
50
|
+
# rdoc.rdoc_files.include('README*')
|
51
|
+
# rdoc.rdoc_files.include('lib/**/*.rb')
|
52
|
+
# end
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.0.0.pre
|
@@ -0,0 +1,128 @@
|
|
1
|
+
# Written by Bram Cohen
|
2
|
+
# see LICENSE.txt for license information
|
3
|
+
|
4
|
+
from random import randrange, shuffle
|
5
|
+
from BitTornado.clock import clock
|
6
|
+
try:
|
7
|
+
True
|
8
|
+
except:
|
9
|
+
True = 1
|
10
|
+
False = 0
|
11
|
+
|
12
|
+
class Choker:
|
13
|
+
def __init__(self, config, schedule, picker, done = lambda: False):
|
14
|
+
self.config = config
|
15
|
+
self.round_robin_period = config['round_robin_period']
|
16
|
+
self.schedule = schedule
|
17
|
+
self.picker = picker
|
18
|
+
self.connections = []
|
19
|
+
self.last_preferred = 0
|
20
|
+
self.last_round_robin = clock()
|
21
|
+
self.done = done
|
22
|
+
self.super_seed = False
|
23
|
+
self.paused = False
|
24
|
+
schedule(self._round_robin, 5)
|
25
|
+
|
26
|
+
def set_round_robin_period(self, x):
|
27
|
+
self.round_robin_period = x
|
28
|
+
|
29
|
+
def _round_robin(self):
|
30
|
+
self.schedule(self._round_robin, 5)
|
31
|
+
if self.super_seed:
|
32
|
+
cons = range(len(self.connections))
|
33
|
+
to_close = []
|
34
|
+
count = self.config['min_uploads']-self.last_preferred
|
35
|
+
if count > 0: # optimization
|
36
|
+
shuffle(cons)
|
37
|
+
for c in cons:
|
38
|
+
i = self.picker.next_have(self.connections[c], count > 0)
|
39
|
+
if i is None:
|
40
|
+
continue
|
41
|
+
if i < 0:
|
42
|
+
to_close.append(self.connections[c])
|
43
|
+
continue
|
44
|
+
self.connections[c].send_have(i)
|
45
|
+
count -= 1
|
46
|
+
for c in to_close:
|
47
|
+
c.close()
|
48
|
+
if self.last_round_robin + self.round_robin_period < clock():
|
49
|
+
self.last_round_robin = clock()
|
50
|
+
for i in xrange(1, len(self.connections)):
|
51
|
+
c = self.connections[i]
|
52
|
+
u = c.get_upload()
|
53
|
+
if u.is_choked() and u.is_interested():
|
54
|
+
self.connections = self.connections[i:] + self.connections[:i]
|
55
|
+
break
|
56
|
+
self._rechoke()
|
57
|
+
|
58
|
+
def _rechoke(self):
|
59
|
+
preferred = []
|
60
|
+
maxuploads = self.config['max_uploads']
|
61
|
+
if self.paused:
|
62
|
+
for c in self.connections:
|
63
|
+
c.get_upload().choke()
|
64
|
+
return
|
65
|
+
if maxuploads > 1:
|
66
|
+
for c in self.connections:
|
67
|
+
u = c.get_upload()
|
68
|
+
if not u.is_interested():
|
69
|
+
continue
|
70
|
+
if self.done():
|
71
|
+
r = u.get_rate()
|
72
|
+
else:
|
73
|
+
d = c.get_download()
|
74
|
+
r = d.get_rate()
|
75
|
+
if r < 1000 or d.is_snubbed():
|
76
|
+
continue
|
77
|
+
preferred.append((-r, c))
|
78
|
+
self.last_preferred = len(preferred)
|
79
|
+
preferred.sort()
|
80
|
+
del preferred[maxuploads-1:]
|
81
|
+
preferred = [x[1] for x in preferred]
|
82
|
+
count = len(preferred)
|
83
|
+
hit = False
|
84
|
+
to_unchoke = []
|
85
|
+
for c in self.connections:
|
86
|
+
u = c.get_upload()
|
87
|
+
if c in preferred:
|
88
|
+
to_unchoke.append(u)
|
89
|
+
else:
|
90
|
+
if count < maxuploads or not hit:
|
91
|
+
to_unchoke.append(u)
|
92
|
+
if u.is_interested():
|
93
|
+
count += 1
|
94
|
+
hit = True
|
95
|
+
else:
|
96
|
+
u.choke()
|
97
|
+
for u in to_unchoke:
|
98
|
+
u.unchoke()
|
99
|
+
|
100
|
+
def connection_made(self, connection, p = None):
|
101
|
+
if p is None:
|
102
|
+
p = randrange(-2, len(self.connections) + 1)
|
103
|
+
self.connections.insert(max(p, 0), connection)
|
104
|
+
self._rechoke()
|
105
|
+
|
106
|
+
def connection_lost(self, connection):
|
107
|
+
self.connections.remove(connection)
|
108
|
+
self.picker.lost_peer(connection)
|
109
|
+
if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
|
110
|
+
self._rechoke()
|
111
|
+
|
112
|
+
def interested(self, connection):
|
113
|
+
if not connection.get_upload().is_choked():
|
114
|
+
self._rechoke()
|
115
|
+
|
116
|
+
def not_interested(self, connection):
|
117
|
+
if not connection.get_upload().is_choked():
|
118
|
+
self._rechoke()
|
119
|
+
|
120
|
+
def set_super_seed(self):
|
121
|
+
while self.connections: # close all connections
|
122
|
+
self.connections[0].close()
|
123
|
+
self.picker.set_superseed()
|
124
|
+
self.super_seed = True
|
125
|
+
|
126
|
+
def pause(self, flag):
|
127
|
+
self.paused = flag
|
128
|
+
self._rechoke()
|
@@ -0,0 +1,288 @@
|
|
1
|
+
# Written by Bram Cohen
|
2
|
+
# see LICENSE.txt for license information
|
3
|
+
|
4
|
+
from BitTornado.bitfield import Bitfield
|
5
|
+
from BitTornado.clock import clock
|
6
|
+
from binascii import b2a_hex
|
7
|
+
|
8
|
+
try:
|
9
|
+
True
|
10
|
+
except:
|
11
|
+
True = 1
|
12
|
+
False = 0
|
13
|
+
|
14
|
+
DEBUG = False
|
15
|
+
|
16
|
+
def toint(s):
|
17
|
+
return long(b2a_hex(s), 16)
|
18
|
+
|
19
|
+
def tobinary(i):
|
20
|
+
return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
|
21
|
+
chr((i >> 8) & 0xFF) + chr(i & 0xFF))
|
22
|
+
|
23
|
+
CHOKE = chr(0)
|
24
|
+
UNCHOKE = chr(1)
|
25
|
+
INTERESTED = chr(2)
|
26
|
+
NOT_INTERESTED = chr(3)
|
27
|
+
# index
|
28
|
+
HAVE = chr(4)
|
29
|
+
# index, bitfield
|
30
|
+
BITFIELD = chr(5)
|
31
|
+
# index, begin, length
|
32
|
+
REQUEST = chr(6)
|
33
|
+
# index, begin, piece
|
34
|
+
PIECE = chr(7)
|
35
|
+
# index, begin, piece
|
36
|
+
CANCEL = chr(8)
|
37
|
+
|
38
|
+
class Connection:
|
39
|
+
def __init__(self, connection, connecter):
|
40
|
+
self.connection = connection
|
41
|
+
self.connecter = connecter
|
42
|
+
self.got_anything = False
|
43
|
+
self.next_upload = None
|
44
|
+
self.outqueue = []
|
45
|
+
self.partial_message = None
|
46
|
+
self.download = None
|
47
|
+
self.send_choke_queued = False
|
48
|
+
self.just_unchoked = None
|
49
|
+
|
50
|
+
def get_ip(self, real=False):
|
51
|
+
return self.connection.get_ip(real)
|
52
|
+
|
53
|
+
def get_id(self):
|
54
|
+
return self.connection.get_id()
|
55
|
+
|
56
|
+
def get_readable_id(self):
|
57
|
+
return self.connection.get_readable_id()
|
58
|
+
|
59
|
+
def close(self):
|
60
|
+
if DEBUG:
|
61
|
+
print 'connection closed'
|
62
|
+
self.connection.close()
|
63
|
+
|
64
|
+
def is_locally_initiated(self):
|
65
|
+
return self.connection.is_locally_initiated()
|
66
|
+
|
67
|
+
def send_interested(self):
|
68
|
+
self._send_message(INTERESTED)
|
69
|
+
|
70
|
+
def send_not_interested(self):
|
71
|
+
self._send_message(NOT_INTERESTED)
|
72
|
+
|
73
|
+
def send_choke(self):
|
74
|
+
if self.partial_message:
|
75
|
+
self.send_choke_queued = True
|
76
|
+
else:
|
77
|
+
self._send_message(CHOKE)
|
78
|
+
self.upload.choke_sent()
|
79
|
+
self.just_unchoked = 0
|
80
|
+
|
81
|
+
def send_unchoke(self):
|
82
|
+
if self.send_choke_queued:
|
83
|
+
self.send_choke_queued = False
|
84
|
+
if DEBUG:
|
85
|
+
print 'CHOKE SUPPRESSED'
|
86
|
+
else:
|
87
|
+
self._send_message(UNCHOKE)
|
88
|
+
if ( self.partial_message or self.just_unchoked is None
|
89
|
+
or not self.upload.interested or self.download.active_requests ):
|
90
|
+
self.just_unchoked = 0
|
91
|
+
else:
|
92
|
+
self.just_unchoked = clock()
|
93
|
+
|
94
|
+
def send_request(self, index, begin, length):
|
95
|
+
self._send_message(REQUEST + tobinary(index) +
|
96
|
+
tobinary(begin) + tobinary(length))
|
97
|
+
if DEBUG:
|
98
|
+
print 'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length)
|
99
|
+
|
100
|
+
def send_cancel(self, index, begin, length):
|
101
|
+
self._send_message(CANCEL + tobinary(index) +
|
102
|
+
tobinary(begin) + tobinary(length))
|
103
|
+
if DEBUG:
|
104
|
+
print 'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length)
|
105
|
+
|
106
|
+
def send_bitfield(self, bitfield):
|
107
|
+
self._send_message(BITFIELD + bitfield)
|
108
|
+
|
109
|
+
def send_have(self, index):
|
110
|
+
self._send_message(HAVE + tobinary(index))
|
111
|
+
|
112
|
+
def send_keepalive(self):
|
113
|
+
self._send_message('')
|
114
|
+
|
115
|
+
def _send_message(self, s):
|
116
|
+
s = tobinary(len(s))+s
|
117
|
+
if self.partial_message:
|
118
|
+
self.outqueue.append(s)
|
119
|
+
else:
|
120
|
+
self.connection.send_message_raw(s)
|
121
|
+
|
122
|
+
def send_partial(self, bytes):
|
123
|
+
if self.connection.closed:
|
124
|
+
return 0
|
125
|
+
if self.partial_message is None:
|
126
|
+
s = self.upload.get_upload_chunk()
|
127
|
+
if s is None:
|
128
|
+
return 0
|
129
|
+
index, begin, piece = s
|
130
|
+
self.partial_message = ''.join((
|
131
|
+
tobinary(len(piece) + 9), PIECE,
|
132
|
+
tobinary(index), tobinary(begin), piece.tostring() ))
|
133
|
+
if DEBUG:
|
134
|
+
print 'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece))
|
135
|
+
|
136
|
+
if bytes < len(self.partial_message):
|
137
|
+
self.connection.send_message_raw(self.partial_message[:bytes])
|
138
|
+
self.partial_message = self.partial_message[bytes:]
|
139
|
+
return bytes
|
140
|
+
|
141
|
+
q = [self.partial_message]
|
142
|
+
self.partial_message = None
|
143
|
+
if self.send_choke_queued:
|
144
|
+
self.send_choke_queued = False
|
145
|
+
self.outqueue.append(tobinary(1)+CHOKE)
|
146
|
+
self.upload.choke_sent()
|
147
|
+
self.just_unchoked = 0
|
148
|
+
q.extend(self.outqueue)
|
149
|
+
self.outqueue = []
|
150
|
+
q = ''.join(q)
|
151
|
+
self.connection.send_message_raw(q)
|
152
|
+
return len(q)
|
153
|
+
|
154
|
+
def get_upload(self):
|
155
|
+
return self.upload
|
156
|
+
|
157
|
+
def get_download(self):
|
158
|
+
return self.download
|
159
|
+
|
160
|
+
def set_download(self, download):
|
161
|
+
self.download = download
|
162
|
+
|
163
|
+
def backlogged(self):
|
164
|
+
return not self.connection.is_flushed()
|
165
|
+
|
166
|
+
def got_request(self, i, p, l):
|
167
|
+
self.upload.got_request(i, p, l)
|
168
|
+
if self.just_unchoked:
|
169
|
+
self.connecter.ratelimiter.ping(clock() - self.just_unchoked)
|
170
|
+
self.just_unchoked = 0
|
171
|
+
|
172
|
+
|
173
|
+
|
174
|
+
|
175
|
+
class Connecter:
|
176
|
+
def __init__(self, make_upload, downloader, choker, numpieces,
|
177
|
+
totalup, config, ratelimiter, sched = None):
|
178
|
+
self.downloader = downloader
|
179
|
+
self.make_upload = make_upload
|
180
|
+
self.choker = choker
|
181
|
+
self.numpieces = numpieces
|
182
|
+
self.config = config
|
183
|
+
self.ratelimiter = ratelimiter
|
184
|
+
self.rate_capped = False
|
185
|
+
self.sched = sched
|
186
|
+
self.totalup = totalup
|
187
|
+
self.rate_capped = False
|
188
|
+
self.connections = {}
|
189
|
+
self.external_connection_made = 0
|
190
|
+
|
191
|
+
def how_many_connections(self):
|
192
|
+
return len(self.connections)
|
193
|
+
|
194
|
+
def connection_made(self, connection):
|
195
|
+
c = Connection(connection, self)
|
196
|
+
self.connections[connection] = c
|
197
|
+
c.upload = self.make_upload(c, self.ratelimiter, self.totalup)
|
198
|
+
c.download = self.downloader.make_download(c)
|
199
|
+
self.choker.connection_made(c)
|
200
|
+
return c
|
201
|
+
|
202
|
+
def connection_lost(self, connection):
|
203
|
+
c = self.connections[connection]
|
204
|
+
del self.connections[connection]
|
205
|
+
if c.download:
|
206
|
+
c.download.disconnected()
|
207
|
+
self.choker.connection_lost(c)
|
208
|
+
|
209
|
+
def connection_flushed(self, connection):
|
210
|
+
conn = self.connections[connection]
|
211
|
+
if conn.next_upload is None and (conn.partial_message is not None
|
212
|
+
or len(conn.upload.buffer) > 0):
|
213
|
+
self.ratelimiter.queue(conn)
|
214
|
+
|
215
|
+
def got_piece(self, i):
|
216
|
+
for co in self.connections.values():
|
217
|
+
co.send_have(i)
|
218
|
+
|
219
|
+
def got_message(self, connection, message):
|
220
|
+
c = self.connections[connection]
|
221
|
+
t = message[0]
|
222
|
+
if t == BITFIELD and c.got_anything:
|
223
|
+
connection.close()
|
224
|
+
return
|
225
|
+
c.got_anything = True
|
226
|
+
if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
|
227
|
+
len(message) != 1):
|
228
|
+
connection.close()
|
229
|
+
return
|
230
|
+
if t == CHOKE:
|
231
|
+
c.download.got_choke()
|
232
|
+
elif t == UNCHOKE:
|
233
|
+
c.download.got_unchoke()
|
234
|
+
elif t == INTERESTED:
|
235
|
+
if not c.download.have.complete():
|
236
|
+
c.upload.got_interested()
|
237
|
+
elif t == NOT_INTERESTED:
|
238
|
+
c.upload.got_not_interested()
|
239
|
+
elif t == HAVE:
|
240
|
+
if len(message) != 5:
|
241
|
+
connection.close()
|
242
|
+
return
|
243
|
+
i = toint(message[1:])
|
244
|
+
if i >= self.numpieces:
|
245
|
+
connection.close()
|
246
|
+
return
|
247
|
+
if c.download.got_have(i):
|
248
|
+
c.upload.got_not_interested()
|
249
|
+
elif t == BITFIELD:
|
250
|
+
try:
|
251
|
+
b = Bitfield(self.numpieces, message[1:])
|
252
|
+
except ValueError:
|
253
|
+
connection.close()
|
254
|
+
return
|
255
|
+
if c.download.got_have_bitfield(b):
|
256
|
+
c.upload.got_not_interested()
|
257
|
+
elif t == REQUEST:
|
258
|
+
if len(message) != 13:
|
259
|
+
connection.close()
|
260
|
+
return
|
261
|
+
i = toint(message[1:5])
|
262
|
+
if i >= self.numpieces:
|
263
|
+
connection.close()
|
264
|
+
return
|
265
|
+
c.got_request(i, toint(message[5:9]),
|
266
|
+
toint(message[9:]))
|
267
|
+
elif t == CANCEL:
|
268
|
+
if len(message) != 13:
|
269
|
+
connection.close()
|
270
|
+
return
|
271
|
+
i = toint(message[1:5])
|
272
|
+
if i >= self.numpieces:
|
273
|
+
connection.close()
|
274
|
+
return
|
275
|
+
c.upload.got_cancel(i, toint(message[5:9]),
|
276
|
+
toint(message[9:]))
|
277
|
+
elif t == PIECE:
|
278
|
+
if len(message) <= 9:
|
279
|
+
connection.close()
|
280
|
+
return
|
281
|
+
i = toint(message[1:5])
|
282
|
+
if i >= self.numpieces:
|
283
|
+
connection.close()
|
284
|
+
return
|
285
|
+
if c.download.got_piece(i, toint(message[5:9]), message[9:]):
|
286
|
+
self.got_piece(i)
|
287
|
+
else:
|
288
|
+
connection.close()
|