htttee 0.5.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +6 -0
- data/Gemfile +2 -0
- data/README.md +105 -0
- data/Rakefile +14 -0
- data/bin/htttee +39 -0
- data/bin/htttee-exec +18 -0
- data/config.ru +6 -0
- data/deploy/after_restart.rb +1 -0
- data/deploy/before_restart.rb +4 -0
- data/deploy/before_symlink.rb +2 -0
- data/deploy/cookbooks/cloudkick-plugins/recipes/default.rb +8 -0
- data/deploy/cookbooks/cloudkick-plugins/recipes/resque.rb +18 -0
- data/deploy/cookbooks/cloudkick-plugins/templates/default/resque.sh.erb +4 -0
- data/deploy/cookbooks/god/recipes/default.rb +50 -0
- data/deploy/cookbooks/god/templates/default/config.erb +3 -0
- data/deploy/cookbooks/god/templates/default/god-inittab.erb +3 -0
- data/deploy/cookbooks/main/attributes/owner_name.rb +3 -0
- data/deploy/cookbooks/main/attributes/recipes.rb +1 -0
- data/deploy/cookbooks/main/libraries/dnapi.rb +7 -0
- data/deploy/cookbooks/main/recipes/default.rb +2 -0
- data/deploy/cookbooks/nginx/files/default/chunkin-nginx-module-v0.22rc1.zip +0 -0
- data/deploy/cookbooks/nginx/files/default/nginx-1.0.0.tar.gz +0 -0
- data/deploy/cookbooks/nginx/recipes/default.rb +2 -0
- data/deploy/cookbooks/nginx/recipes/install.rb +0 -0
- data/deploy/cookbooks/nginx/templates/default/nginx.conf.erb +41 -0
- data/deploy/cookbooks/resque/recipes/default.rb +47 -0
- data/deploy/cookbooks/resque/templates/default/resque.rb.erb +73 -0
- data/deploy/cookbooks/resque/templates/default/resque.yml.erb +3 -0
- data/deploy/cookbooks/resque/templates/default/resque_scheduler.rb.erb +73 -0
- data/deploy/solo.rb +7 -0
- data/htttee.gemspec +30 -0
- data/lib/htttee.rb +0 -0
- data/lib/htttee/client.rb +16 -0
- data/lib/htttee/client/consumer.rb +49 -0
- data/lib/htttee/client/ext/net/http.rb +27 -0
- data/lib/htttee/server.rb +74 -0
- data/lib/htttee/server/api.rb +201 -0
- data/lib/htttee/server/chunked_body.rb +22 -0
- data/lib/htttee/server/ext/em-redis.rb +49 -0
- data/lib/htttee/server/ext/thin.rb +4 -0
- data/lib/htttee/server/ext/thin/connection.rb +8 -0
- data/lib/htttee/server/ext/thin/deferrable_body.rb +24 -0
- data/lib/htttee/server/ext/thin/deferred_request.rb +31 -0
- data/lib/htttee/server/ext/thin/deferred_response.rb +4 -0
- data/lib/htttee/server/middleware/async_fixer.rb +22 -0
- data/lib/htttee/server/middleware/dechunker.rb +63 -0
- data/lib/htttee/server/mock.rb +69 -0
- data/lib/htttee/server/pubsub_redis.rb +78 -0
- data/lib/htttee/version.rb +5 -0
- data/spec/client_spec.rb +126 -0
- data/spec/helpers/rackup.rb +15 -0
- data/spec/spec_helper.rb +21 -0
- metadata +201 -0
@@ -0,0 +1,22 @@
|
|
1
|
+
module EY
|
2
|
+
module Tea
|
3
|
+
module Server
|
4
|
+
class ChunkedBody < Thin::DeferrableBody
|
5
|
+
def call(body)
|
6
|
+
body.each do |fragment|
|
7
|
+
@body_callback.call(chunk(fragment))
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
def succeed(*a)
|
12
|
+
@body_callback.call("0\r\n\r\n")
|
13
|
+
super
|
14
|
+
end
|
15
|
+
|
16
|
+
def chunk(fragment)
|
17
|
+
"#{fragment.size.to_s(16)}\r\n#{fragment}\r\n"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module EventMachine
|
2
|
+
module Protocols
|
3
|
+
module Redis
|
4
|
+
|
5
|
+
def pipeline(*commands, &blk)
|
6
|
+
command = ''
|
7
|
+
|
8
|
+
commands.each do |argv|
|
9
|
+
command << "*#{argv.size}\r\n"
|
10
|
+
argv.each do |a|
|
11
|
+
a = a.to_s
|
12
|
+
command << "$#{get_size(a)}\r\n"
|
13
|
+
command << a
|
14
|
+
command << "\r\n"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
maybe_lock do
|
19
|
+
commands.map {|c| c.first }.each do |command_name|
|
20
|
+
@redis_callbacks << [REPLY_PROCESSOR[command_name], blk]
|
21
|
+
end
|
22
|
+
send_data command
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def multi(*command_groups, &blk)
|
27
|
+
|
28
|
+
command = "*1\r\n$5\r\nMULTI\r\n"
|
29
|
+
|
30
|
+
command_groups.each do |argv|
|
31
|
+
command << "*#{argv.size}\r\n"
|
32
|
+
argv.each do |a|
|
33
|
+
a = a.to_s
|
34
|
+
command << "$#{get_size(a)}\r\n"
|
35
|
+
command << a
|
36
|
+
command << "\r\n"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
command << "*1\r\n$4\r\nEXEC\r\n"
|
41
|
+
|
42
|
+
maybe_lock do
|
43
|
+
@redis_callbacks << [REPLY_PROCESSOR['multi'], blk]
|
44
|
+
send_data command
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
module Thin
|
2
|
+
class DeferrableBody
|
3
|
+
include EventMachine::Deferrable
|
4
|
+
|
5
|
+
def initialize(initial_body = '')
|
6
|
+
@initial_body = initial_body.to_s
|
7
|
+
end
|
8
|
+
|
9
|
+
def call(body)
|
10
|
+
body.each do |chunk|
|
11
|
+
@body_callback.call(chunk)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
def <<(*chunks)
|
16
|
+
call(chunks)
|
17
|
+
end
|
18
|
+
|
19
|
+
def each(&blk)
|
20
|
+
blk.call(@initial_body) unless @initial_body.empty?
|
21
|
+
@body_callback = blk
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
module Thin
|
2
|
+
class DeferredRequest < Request
|
3
|
+
def parse(data)
|
4
|
+
if @parser.finished? # Header finished, can only be some more body
|
5
|
+
body << data
|
6
|
+
else # Parse more header using the super parser
|
7
|
+
@data << data
|
8
|
+
raise InvalidRequest, 'Header longer than allowed' if @data.size > MAX_HEADER
|
9
|
+
|
10
|
+
@nparsed = @parser.execute(@env, @data, @nparsed)
|
11
|
+
|
12
|
+
if @parser.finished?
|
13
|
+
return super(data) unless @env['HTTP_TRANSFER_ENCODING'] == 'chunked'
|
14
|
+
|
15
|
+
_, initial_body = @data.split("\r\n\r\n")
|
16
|
+
initial_body ||= ''
|
17
|
+
|
18
|
+
@body = DeferrableBody.new(initial_body)
|
19
|
+
|
20
|
+
return true # trigger the rack call chain
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
return false # only trigger the rack call chain once, just after the headers are parsed
|
25
|
+
end
|
26
|
+
|
27
|
+
def env
|
28
|
+
super.merge(RACK_INPUT => body)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
|
2
|
+
module EY
|
3
|
+
module Tea
|
4
|
+
module Server
|
5
|
+
class AsyncFixer
|
6
|
+
def initialize(app)
|
7
|
+
@app = app
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(env)
|
11
|
+
tuple = @app.call(env)
|
12
|
+
|
13
|
+
if tuple.first == -1
|
14
|
+
Thin::Connection::AsyncResponse
|
15
|
+
else
|
16
|
+
tuple
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
module EY
|
2
|
+
module Tea
|
3
|
+
module Server
|
4
|
+
class Dechunker
|
5
|
+
def initialize(app)
|
6
|
+
@app = app
|
7
|
+
end
|
8
|
+
|
9
|
+
def call(env)
|
10
|
+
env['rack.input'] = ChunkedBody.new(env['rack.input']) if chunked?(env)
|
11
|
+
|
12
|
+
@app.call(env)
|
13
|
+
end
|
14
|
+
|
15
|
+
def chunked?(env)
|
16
|
+
env['HTTP_TRANSFER_ENCODING'] == 'chunked'
|
17
|
+
end
|
18
|
+
|
19
|
+
class ChunkedBody
|
20
|
+
extend Forwardable
|
21
|
+
|
22
|
+
CRLF = "\r\n"
|
23
|
+
|
24
|
+
attr_reader :input
|
25
|
+
|
26
|
+
def_delegators :input, :callback, :errback
|
27
|
+
|
28
|
+
def initialize(input)
|
29
|
+
@input, @buffer = input, ''
|
30
|
+
end
|
31
|
+
|
32
|
+
def each(&blk)
|
33
|
+
@input.each do |chunk|
|
34
|
+
dechunk(chunk, &blk)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def dechunk(chunk, &blk)
|
39
|
+
@buffer << chunk
|
40
|
+
|
41
|
+
loop do
|
42
|
+
return unless @buffer[CRLF]
|
43
|
+
|
44
|
+
string_length, remainder = @buffer.split(CRLF, 2)
|
45
|
+
length = string_length.to_i(16)
|
46
|
+
|
47
|
+
if length == 0
|
48
|
+
@buffer = ''
|
49
|
+
@input.succeed
|
50
|
+
return
|
51
|
+
elsif remainder.size >= length + 2 # length + CRLF
|
52
|
+
data, @buffer = remainder.split(CRLF, 2)
|
53
|
+
blk.call(data)
|
54
|
+
else
|
55
|
+
return
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'rack/mux'
|
2
|
+
|
3
|
+
module EY
|
4
|
+
module Tea
|
5
|
+
module Server
|
6
|
+
module Mock
|
7
|
+
|
8
|
+
def self.boot_forking_server
|
9
|
+
o,i = IO.pipe
|
10
|
+
|
11
|
+
if pid = fork
|
12
|
+
at_exit { Process.kill(:SIGTERM, pid) }
|
13
|
+
|
14
|
+
i.close
|
15
|
+
URI.parse(o.read)
|
16
|
+
else
|
17
|
+
o.close
|
18
|
+
process_child(i)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def self.process_child(i)
|
23
|
+
EM.run do
|
24
|
+
client = Rack::Client.new { run EY::Tea::Server.mock_app }
|
25
|
+
|
26
|
+
uri = client.get("/mux-uri").body
|
27
|
+
i << uri
|
28
|
+
i.close
|
29
|
+
end
|
30
|
+
|
31
|
+
exit
|
32
|
+
end
|
33
|
+
|
34
|
+
class ThinMuxer
|
35
|
+
def initialize(app)
|
36
|
+
@app = Rack::Mux.new(async_safe(app), thin_options)
|
37
|
+
end
|
38
|
+
|
39
|
+
def call(env)
|
40
|
+
@app.call(env)
|
41
|
+
end
|
42
|
+
|
43
|
+
def async_safe(app)
|
44
|
+
AsyncFixer.new(app)
|
45
|
+
end
|
46
|
+
|
47
|
+
def thin_options
|
48
|
+
{ :server => Thin, :environment => 'none' }
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
class EchoUri
|
53
|
+
|
54
|
+
def initialize(app)
|
55
|
+
@app = app
|
56
|
+
end
|
57
|
+
|
58
|
+
def call(env)
|
59
|
+
if env['PATH_INFO'] == '/mux-uri'
|
60
|
+
[200, {'Content-Type' => 'text/plain'}, [env['X-Mux-Uri']]]
|
61
|
+
else
|
62
|
+
@app.call(env)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,78 @@
|
|
1
|
+
module EventMachine
|
2
|
+
module Protocols
|
3
|
+
class PubSubRedis < EventMachine::Connection
|
4
|
+
include Redis
|
5
|
+
|
6
|
+
def subscribe(channel, &block)
|
7
|
+
@pubsub_callback = block
|
8
|
+
|
9
|
+
call_command(['subscribe', channel])
|
10
|
+
end
|
11
|
+
|
12
|
+
def unsubscribe(channel)
|
13
|
+
@pubsub_callback = lambda do |*args|
|
14
|
+
close_connection
|
15
|
+
end
|
16
|
+
|
17
|
+
call_command(['unsubscribe', channel])
|
18
|
+
end
|
19
|
+
|
20
|
+
def dispatch_response(value)
|
21
|
+
if @multibulk_n
|
22
|
+
@multibulk_values << value
|
23
|
+
@multibulk_n -= 1
|
24
|
+
|
25
|
+
if @multibulk_n == 0
|
26
|
+
value = @multibulk_values
|
27
|
+
@multibulk_n,@multibulk_values = @previous_multibulks.pop
|
28
|
+
if @multibulk_n
|
29
|
+
dispatch_response(value)
|
30
|
+
return
|
31
|
+
end
|
32
|
+
else
|
33
|
+
return
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
@pubsub_callback.call(value)
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.connect(*args)
|
41
|
+
case args.length
|
42
|
+
when 0
|
43
|
+
options = {}
|
44
|
+
when 1
|
45
|
+
arg = args.shift
|
46
|
+
case arg
|
47
|
+
when Hash then options = arg
|
48
|
+
when String then options = {:host => arg}
|
49
|
+
else raise ArgumentError, 'first argument must be Hash or String'
|
50
|
+
end
|
51
|
+
when 2
|
52
|
+
options = {:host => args[0], :port => args[1]}
|
53
|
+
else
|
54
|
+
raise ArgumentError, "wrong number of arguments (#{args.length} for 1)"
|
55
|
+
end
|
56
|
+
options[:host] ||= '127.0.0.1'
|
57
|
+
options[:port] = (options[:port] || 6379).to_i
|
58
|
+
EM.connect options[:host], options[:port], self, options
|
59
|
+
end
|
60
|
+
|
61
|
+
def initialize(options = {})
|
62
|
+
@host = options[:host]
|
63
|
+
@port = options[:port]
|
64
|
+
@db = (options[:db] || 0).to_i
|
65
|
+
@password = options[:password]
|
66
|
+
@logger = options[:logger]
|
67
|
+
@error_callback = lambda do |code|
|
68
|
+
err = RedisError.new
|
69
|
+
err.code = code
|
70
|
+
raise err, "Redis server returned error code: #{code}"
|
71
|
+
end
|
72
|
+
|
73
|
+
# These commands should be first
|
74
|
+
auth_and_select_db
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
data/spec/client_spec.rb
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe EY::Tea::Client do
|
4
|
+
subject { @client }
|
5
|
+
|
6
|
+
def new_client
|
7
|
+
EY::Tea::Client.new(:endpoint => EY::Tea::Server.mock_uri.to_s)
|
8
|
+
end
|
9
|
+
|
10
|
+
def run(thread)
|
11
|
+
thread.join(0.1) if thread.alive?
|
12
|
+
end
|
13
|
+
|
14
|
+
it "can stream an IO" do
|
15
|
+
uuid = rand(10_000).to_s
|
16
|
+
o,i = IO.pipe
|
17
|
+
i << "Hello, World!"
|
18
|
+
i.close
|
19
|
+
|
20
|
+
new_client.up(o, uuid)
|
21
|
+
|
22
|
+
body = ''
|
23
|
+
new_client.down(uuid) do |chunk|
|
24
|
+
body << chunk
|
25
|
+
end
|
26
|
+
body.should == 'Hello, World!'
|
27
|
+
end
|
28
|
+
|
29
|
+
it "can stream out before the incoming stream has finished." do
|
30
|
+
scheduler = Thread.current
|
31
|
+
uuid = rand(10_000).to_s
|
32
|
+
o, i = IO.pipe
|
33
|
+
i << 'Hello, '
|
34
|
+
|
35
|
+
up_thread = Thread.new(new_client, o) do |client, reader|
|
36
|
+
run scheduler
|
37
|
+
client.up(reader, uuid)
|
38
|
+
end
|
39
|
+
|
40
|
+
down_thread = Thread.new(new_client, i, scheduler) do |client, writer|
|
41
|
+
run scheduler
|
42
|
+
client.down(uuid) do |chunk|
|
43
|
+
if chunk == 'Hello, '
|
44
|
+
writer << 'World!'
|
45
|
+
writer.close
|
46
|
+
run scheduler
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
run up_thread
|
52
|
+
run down_thread
|
53
|
+
run up_thread
|
54
|
+
run down_thread
|
55
|
+
end
|
56
|
+
|
57
|
+
it "streams already recieved data on an open stream to peers." do
|
58
|
+
scheduler = Thread.current
|
59
|
+
uuid = rand(10_000).to_s
|
60
|
+
o, i = IO.pipe
|
61
|
+
|
62
|
+
i << 'Existing Data'
|
63
|
+
|
64
|
+
up_thread = Thread.new(subject, o) do |client, reader|
|
65
|
+
run scheduler
|
66
|
+
client.up(reader, uuid)
|
67
|
+
end
|
68
|
+
|
69
|
+
down_thread = Thread.new(new_client) do |client|
|
70
|
+
run scheduler
|
71
|
+
Thread.current[:data] = ''
|
72
|
+
|
73
|
+
client.down(uuid) do |chunk|
|
74
|
+
Thread.current[:data] << chunk
|
75
|
+
run scheduler
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
run up_thread
|
80
|
+
run down_thread
|
81
|
+
|
82
|
+
down_thread[:data] == 'Existing Data'
|
83
|
+
end
|
84
|
+
|
85
|
+
it "streams the same data to all peers." do
|
86
|
+
scheduler = Thread.current
|
87
|
+
uuid = rand(10_000).to_s
|
88
|
+
o, i = IO.pipe
|
89
|
+
|
90
|
+
up_thread = Thread.new(new_client, o) do |client, reader|
|
91
|
+
client.up(reader, uuid)
|
92
|
+
end
|
93
|
+
|
94
|
+
run up_thread
|
95
|
+
|
96
|
+
down_threads = Array.new(5, new_client).map do |c|
|
97
|
+
Thread.new(c) do |client|
|
98
|
+
Thread.current[:chunks] = []
|
99
|
+
run scheduler
|
100
|
+
|
101
|
+
client.down(uuid) do |chunk|
|
102
|
+
Thread.current[:chunks] << chunk
|
103
|
+
run scheduler
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
i << "First Part"
|
109
|
+
|
110
|
+
down_threads.each {|t| run t }
|
111
|
+
down_threads.each {|t| t[:chunks].should == ['First Part'] }
|
112
|
+
|
113
|
+
i << "Second Part"
|
114
|
+
|
115
|
+
down_threads.each {|t| run t }
|
116
|
+
down_threads.each {|t| t[:chunks].should == ['First Part', 'Second Part'] }
|
117
|
+
|
118
|
+
i << "Third Part"
|
119
|
+
|
120
|
+
down_threads.each {|t| run t }
|
121
|
+
down_threads.each {|t| t[:chunks].should == ['First Part', 'Second Part', 'Third Part'] }
|
122
|
+
|
123
|
+
i.close
|
124
|
+
end
|
125
|
+
|
126
|
+
end
|