firehose 0.1.1 → 0.2.alpha.2
Sign up to get free protection for your applications and to get access to all the features.
- data/.env.sample +10 -0
- data/.gitignore +2 -0
- data/Procfile +1 -1
- data/README.md +117 -11
- data/config/rainbows.rb +20 -0
- data/firehose.gemspec +9 -6
- data/lib/assets/flash/firehose/WebSocketMain.swf +0 -0
- data/lib/assets/javascripts/firehose.js.coffee +4 -1
- data/lib/assets/javascripts/firehose/consumer.js.coffee +3 -11
- data/lib/assets/javascripts/firehose/lib/jquery.cors.headers.js.coffee +16 -0
- data/lib/assets/javascripts/firehose/lib/swfobject.js +4 -0
- data/lib/assets/javascripts/firehose/lib/web_socket.js +389 -0
- data/lib/assets/javascripts/firehose/long_poll.js.coffee +42 -39
- data/lib/assets/javascripts/firehose/transport.js.coffee +1 -1
- data/lib/assets/javascripts/firehose/web_socket.js.coffee +8 -14
- data/lib/firehose.rb +12 -17
- data/lib/firehose/channel.rb +84 -0
- data/lib/firehose/cli.rb +57 -13
- data/lib/firehose/client.rb +92 -0
- data/lib/firehose/default.rb +2 -2
- data/lib/firehose/logging.rb +35 -0
- data/lib/firehose/producer.rb +1 -0
- data/lib/firehose/publisher.rb +56 -4
- data/lib/firehose/rack.rb +37 -120
- data/lib/firehose/rack/consumer_app.rb +143 -0
- data/lib/firehose/rack/ping_app.rb +84 -0
- data/lib/firehose/rack/publisher_app.rb +40 -0
- data/lib/firehose/server.rb +48 -0
- data/lib/firehose/subscriber.rb +54 -0
- data/lib/firehose/swf_policy_request.rb +23 -0
- data/lib/firehose/version.rb +2 -2
- data/lib/rainbows_em_swf_policy.rb +33 -0
- data/lib/thin_em_swf_policy.rb +26 -0
- data/spec/integrations/integration_test_helper.rb +16 -0
- data/spec/integrations/rainbows_spec.rb +7 -0
- data/spec/integrations/shared_examples.rb +111 -0
- data/spec/integrations/thin_spec.rb +5 -79
- data/spec/lib/channel_spec.rb +164 -0
- data/spec/lib/client_spec.rb +9 -0
- data/spec/lib/default_spec.rb +2 -2
- data/spec/lib/publisher_spec.rb +82 -0
- data/spec/lib/rack/consumer_app_spec.rb +11 -0
- data/spec/lib/rack/ping_app_spec.rb +28 -0
- data/spec/lib/rack/publisher_app_spec.rb +26 -0
- data/spec/lib/subscriber_spec.rb +69 -0
- data/spec/spec_helper.rb +49 -8
- metadata +114 -45
- data/config.ru +0 -6
- data/lib/firehose/subscription.rb +0 -77
data/lib/firehose/default.rb
CHANGED
@@ -0,0 +1,35 @@
|
|
1
|
+
# Sets up logging
|
2
|
+
|
3
|
+
require 'logger'
|
4
|
+
|
5
|
+
module Firehose
|
6
|
+
def self.logger
|
7
|
+
@logger ||= Logger.new($stdout)
|
8
|
+
end
|
9
|
+
|
10
|
+
def self.logger=(logger)
|
11
|
+
@logger = logger
|
12
|
+
end
|
13
|
+
|
14
|
+
self.logger.level = if ENV['LOG_LEVEL']
|
15
|
+
Logger.const_get(ENV['LOG_LEVEL'].upcase)
|
16
|
+
else
|
17
|
+
case ENV['RACK_ENV']
|
18
|
+
when 'test' then Logger::ERROR
|
19
|
+
when 'development' then Logger::DEBUG
|
20
|
+
else Logger::INFO
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# TODO: Provide some way to allow this to be configured via an ENV variable.
|
25
|
+
self.logger.formatter = lambda do |severity, time, name, msg|
|
26
|
+
out_time = time.utc.strftime "%Y-%m-%d %H:%M:%S.%L"
|
27
|
+
"[#{out_time} ##$$] #{severity} : #{msg}\n"
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
EM::Hiredis.logger = Firehose.logger
|
32
|
+
|
33
|
+
# stdout gets "lost" in Foreman if this isn't here
|
34
|
+
# https://github.com/ddollar/foreman/wiki/Missing-Output
|
35
|
+
$stdout.sync = true if ENV['RACK_ENV'] == 'development' || ENV['SYNC_LOGGING']
|
data/lib/firehose/producer.rb
CHANGED
data/lib/firehose/publisher.rb
CHANGED
@@ -1,13 +1,65 @@
|
|
1
1
|
module Firehose
|
2
2
|
class Publisher
|
3
|
-
|
4
|
-
|
5
|
-
|
3
|
+
MAX_MESSAGES = 100
|
4
|
+
TTL = 60*60*24 # 1 day of time, yay!
|
5
|
+
PAYLOAD_DELIMITER = "\n"
|
6
|
+
|
7
|
+
def publish(channel_key, message)
|
8
|
+
# TODO hi-redis isn't that awesome... we have to setup an errback per even for wrong
|
9
|
+
# commands because of the lack of a method_missing whitelist. Perhaps implement a whitelist in
|
10
|
+
# em-hiredis or us a diff lib?
|
11
|
+
deferrable = EM::DefaultDeferrable.new
|
12
|
+
deferrable.errback {|e| EM.next_tick { raise e } }
|
13
|
+
|
14
|
+
# DRY up keys a little bit for the epic publish command to come.
|
15
|
+
list_key = key(channel_key, :list)
|
16
|
+
sequence_key = key(channel_key, :sequence)
|
17
|
+
|
18
|
+
redis.eval(%(local current_sequence = redis.call('get', KEYS[1])
|
19
|
+
if (current_sequence == nil) or (current_sequence == false)
|
20
|
+
then
|
21
|
+
current_sequence = 0
|
22
|
+
end
|
23
|
+
local sequence = current_sequence + 1
|
24
|
+
redis.call('set', KEYS[1], sequence)
|
25
|
+
redis.call('expire', KEYS[1], #{TTL})
|
26
|
+
redis.call('lpush', KEYS[2], "#{lua_escape(message)}")
|
27
|
+
redis.call('ltrim', KEYS[2], 0, #{MAX_MESSAGES - 1})
|
28
|
+
redis.call('expire', KEYS[2], #{TTL})
|
29
|
+
redis.call('publish', KEYS[3], "#{lua_escape(channel_key + PAYLOAD_DELIMITER)}" .. sequence .. "#{lua_escape(PAYLOAD_DELIMITER + message)}")
|
30
|
+
return sequence
|
31
|
+
), 3, sequence_key, list_key, key(:channel_updates)).
|
32
|
+
errback{|e| deferrable.fail e }.
|
33
|
+
callback do |sequence|
|
34
|
+
Firehose.logger.debug "Redis stored/published `#{message}` to list `#{list_key}` with sequence `#{sequence}`"
|
35
|
+
deferrable.succeed
|
36
|
+
end
|
37
|
+
|
38
|
+
deferrable
|
6
39
|
end
|
7
40
|
|
8
41
|
private
|
42
|
+
def key(*segments)
|
43
|
+
segments.unshift(:firehose).join(':')
|
44
|
+
end
|
45
|
+
|
9
46
|
def redis
|
10
47
|
@redis ||= EM::Hiredis.connect
|
11
48
|
end
|
49
|
+
|
50
|
+
def self.to_payload(channel_key, sequence, message)
|
51
|
+
[channel_key, sequence, message].join(PAYLOAD_DELIMITER)
|
52
|
+
end
|
53
|
+
|
54
|
+
def self.from_payload(payload)
|
55
|
+
payload.split(PAYLOAD_DELIMITER, method(:to_payload).arity)
|
56
|
+
end
|
57
|
+
|
58
|
+
# TODO: Make this FAR more robust. Ideally we'd whitelist the permitted
|
59
|
+
# characters and then escape or remove everything else.
|
60
|
+
# See: http://en.wikibooks.org/wiki/Lua_Programming/How_to_Lua/escape_sequence
|
61
|
+
def lua_escape(str)
|
62
|
+
str.gsub(/\\/,'\\\\\\').gsub(/"/,'\"').gsub(/\n/,'\n').gsub(/\r/,'\r')
|
63
|
+
end
|
12
64
|
end
|
13
|
-
end
|
65
|
+
end
|
data/lib/firehose/rack.rb
CHANGED
@@ -1,142 +1,59 @@
|
|
1
|
-
require 'rack/websocket'
|
2
|
-
|
3
1
|
module Firehose
|
4
2
|
module Rack
|
5
|
-
|
6
|
-
|
7
|
-
|
3
|
+
autoload :ConsumerApp, 'firehose/rack/consumer_app'
|
4
|
+
autoload :PublisherApp, 'firehose/rack/publisher_app'
|
5
|
+
autoload :PingApp, 'firehose/rack/ping_app'
|
6
|
+
|
7
|
+
# Evented web servers recognize this as a response deferral.
|
8
|
+
ASYNC_RESPONSE = [-1, {}, []].freeze
|
9
|
+
|
10
|
+
# Normally we'd want to use a custom header to reduce the likelihood of some
|
11
|
+
# HTTP middleware clobbering the value. But Safari seems to ignore our CORS
|
12
|
+
# header instructions, so we are using 'pragma' because it is always allowed.
|
13
|
+
LAST_MESSAGE_SEQUENCE_HEADER = 'pragma'
|
14
|
+
RACK_LAST_MESSAGE_SEQUENCE_HEADER = "HTTP_#{LAST_MESSAGE_SEQUENCE_HEADER.upcase.gsub('-', '_')}"
|
15
|
+
# Don't cache in development mode
|
16
|
+
CORS_OPTIONS_MAX_AGE = ENV['RACK_ENV'] == 'development' ? '1' : '1728000'
|
17
|
+
|
18
|
+
# Allows the publisher and consumer to be mounted on the same port.
|
19
|
+
class App
|
8
20
|
def call(env)
|
9
|
-
|
10
|
-
|
11
|
-
|
21
|
+
# Cache the parsed request so we don't need to re-parse it when we pass
|
22
|
+
# control onto another app.
|
23
|
+
req = env['parsed_request'] ||= ::Rack::Request.new(env)
|
12
24
|
method = req.request_method
|
13
|
-
timeout = 30
|
14
|
-
queue_name = "#{cid}@#{path}"
|
15
|
-
|
16
|
-
# TODO seperate out CORS logic as an async middleware with a Goliath web server.
|
17
|
-
cors_origin = env['HTTP_ORIGIN']
|
18
|
-
cors_headers = {
|
19
|
-
'Access-Control-Allow-Origin' => cors_origin,
|
20
|
-
'Access-Control-Allow-Methods' => 'GET',
|
21
|
-
'Access-Control-Max-Age' => '1728000',
|
22
|
-
'Access-Control-Allow-Headers' => 'Content-Type, User-Agent, If-Modified-Since, Cache-Control'
|
23
|
-
}
|
24
25
|
|
25
26
|
case method
|
26
|
-
# GET is how clients subscribe to the queue. When a messages comes in, we flush out a response,
|
27
|
-
# close down the requeust, and the client then reconnects.
|
28
|
-
when 'GET'
|
29
|
-
EM.next_tick do
|
30
|
-
# If the request is a CORS request, return those headers, otherwise don't worry 'bout it
|
31
|
-
response_headers = cors_origin ? cors_headers : {}
|
32
|
-
|
33
|
-
# Setup a subscription with a client id. We haven't subscribed yet here.
|
34
|
-
if queue = queues[queue_name]
|
35
|
-
queue.live
|
36
|
-
else
|
37
|
-
queue = queues[queue_name] = Firehose::Subscription::Queue.new(cid, path)
|
38
|
-
end
|
39
|
-
|
40
|
-
# Setup a timeout timer to tell clients that time out that everything is OK
|
41
|
-
# and they should come back for more
|
42
|
-
long_poll_timer = EM::Timer.new(timeout) do
|
43
|
-
# We send a 204 OK to tell the client to reconnect.
|
44
|
-
env['async.callback'].call [204, response_headers, []]
|
45
|
-
Firehose.logger.debug "HTTP wait `#{cid}@#{path}` timed out"
|
46
|
-
end
|
47
|
-
|
48
|
-
# Ok, now subscribe to the subscription.
|
49
|
-
queue.pop do |message, subscription|
|
50
|
-
long_poll_timer.cancel # Turn off the heart beat so we don't execute any of that business.
|
51
|
-
env['async.callback'].call [200, response_headers, [message]]
|
52
|
-
Firehose.logger.debug "HTTP sent `#{message}` to `#{cid}@#{path}`"
|
53
|
-
end
|
54
|
-
Firehose.logger.debug "HTTP subscribed to `#{cid}@#{path}`"
|
55
|
-
|
56
|
-
# Unsubscribe from the subscription if its still open and something bad happened
|
57
|
-
# or the heart beat triggered before we could finish.
|
58
|
-
env['async.close'].callback do
|
59
|
-
# Kill queue if we don't hear back in 30s
|
60
|
-
queue.kill timeout do
|
61
|
-
Firehose.logger.debug "Deleting queue to `#{queue_name}`"
|
62
|
-
queues.delete queue_name
|
63
|
-
end
|
64
|
-
Firehose.logger.debug "HTTP connection `#{cid}@#{path}` closing"
|
65
|
-
end
|
66
|
-
end
|
67
|
-
|
68
|
-
# Tell the web server that this will be an async response.
|
69
|
-
Firehose::Rack::AsyncResponse
|
70
|
-
|
71
|
-
# PUT is how we throw messages on to the fan-out queue.
|
72
27
|
when 'PUT'
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
[202, {}, []]
|
28
|
+
publisher.call(env)
|
29
|
+
when 'HEAD'
|
30
|
+
ping.call(env)
|
78
31
|
else
|
79
|
-
|
80
|
-
[501, {'Content-Type' => 'text/plain'}, ["#{method} not supported."]]
|
32
|
+
consumer.call(env)
|
81
33
|
end
|
82
34
|
end
|
83
35
|
|
84
|
-
private
|
85
|
-
def publisher
|
86
|
-
@publisher ||= Firehose::Publisher.new
|
87
|
-
end
|
88
36
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
end
|
93
|
-
|
94
|
-
class WebSocket < ::Rack::WebSocket::Application
|
95
|
-
attr_reader :cid, :path, :subscription
|
96
|
-
|
97
|
-
# Subscribe to a path and make some magic happen, mmkmay?
|
98
|
-
def on_open(env)
|
99
|
-
req = ::Rack::Request.new(env)
|
100
|
-
@cid = req.params['cid']
|
101
|
-
@path = req.path
|
102
|
-
@subscription = Firehose::Subscription.new(cid, path)
|
103
|
-
|
104
|
-
subscription.subscribe do |message, subscription|
|
105
|
-
Firehose.logger.debug "WS sent `#{message}` to `#{cid}@#{path}`"
|
106
|
-
send_data message
|
107
|
-
end
|
108
|
-
Firehose.logger.debug "WS subscribed to `#{cid}@#{path}`"
|
37
|
+
private
|
38
|
+
def publisher
|
39
|
+
@publisher ||= PublisherApp.new
|
109
40
|
end
|
110
41
|
|
111
|
-
|
112
|
-
|
113
|
-
subscription.unsubscribe
|
114
|
-
Firehose.logger.debug "WS connection `#{cid}@#{path}` closing"
|
42
|
+
def consumer
|
43
|
+
@consumer ||= ConsumerApp.new
|
115
44
|
end
|
116
45
|
|
117
|
-
|
118
|
-
|
119
|
-
Firehose.logger.error "WS connection `#{cid}@#{path}` error `#{error}`: #{error.backtrace}"
|
46
|
+
def ping
|
47
|
+
@ping ||= PingApp.new
|
120
48
|
end
|
121
49
|
end
|
122
50
|
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
private
|
129
|
-
def websocket
|
130
|
-
WebSocket.new
|
131
|
-
end
|
132
|
-
|
133
|
-
def http_long_poll
|
134
|
-
@http_long_poll ||= HttpLongPoll.new
|
135
|
-
end
|
136
|
-
|
137
|
-
def websocket_request?(env)
|
138
|
-
env['HTTP_UPGRADE'] =~ /websocket/i
|
51
|
+
module Helpers
|
52
|
+
# Calculates the content length for you
|
53
|
+
def response(status, body='', headers={})
|
54
|
+
headers = {'Content-Length' => body.size.to_s}.merge(headers)
|
55
|
+
[status, headers, [body]]
|
139
56
|
end
|
140
57
|
end
|
141
58
|
end
|
142
|
-
end
|
59
|
+
end
|
@@ -0,0 +1,143 @@
|
|
1
|
+
require 'faye/websocket'
|
2
|
+
|
3
|
+
module Firehose
|
4
|
+
module Rack
|
5
|
+
class ConsumerApp
|
6
|
+
def call(env)
|
7
|
+
websocket_request?(env) ? websocket.call(env) : http_long_poll.call(env)
|
8
|
+
end
|
9
|
+
|
10
|
+
private
|
11
|
+
def websocket
|
12
|
+
WebSocket.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def http_long_poll
|
16
|
+
@http_long_poll ||= HttpLongPoll.new
|
17
|
+
end
|
18
|
+
|
19
|
+
def websocket_request?(env)
|
20
|
+
Faye::WebSocket.websocket?(env)
|
21
|
+
end
|
22
|
+
|
23
|
+
class HttpLongPoll
|
24
|
+
include Firehose::Rack::Helpers
|
25
|
+
|
26
|
+
# How long should we wait before closing out the consuming clients web connection
|
27
|
+
# for long polling? Most browsers timeout after a connection has been idle for 30s.
|
28
|
+
TIMEOUT = 20
|
29
|
+
|
30
|
+
def call(env)
|
31
|
+
req = env['parsed_request'] ||= ::Rack::Request.new(env)
|
32
|
+
path = req.path
|
33
|
+
method = req.request_method
|
34
|
+
# Get the Last Message Sequence from the query string.
|
35
|
+
# Ideally we'd use an HTTP header, but android devices don't let us
|
36
|
+
# set any HTTP headers for CORS requests.
|
37
|
+
last_sequence = req.params['last_message_sequence'].to_i
|
38
|
+
|
39
|
+
case method
|
40
|
+
# GET is how clients subscribe to the queue. When a messages comes in, we flush out a response,
|
41
|
+
# close down the requeust, and the client then reconnects.
|
42
|
+
when 'GET'
|
43
|
+
Firehose.logger.debug "HTTP GET with last_sequence #{last_sequence} for path #{path} with query #{env["QUERY_STRING"].inspect} and params #{req.params.inspect}"
|
44
|
+
EM.next_tick do
|
45
|
+
|
46
|
+
if last_sequence < 0
|
47
|
+
env['async.callback'].call response(400, "Header '#{LAST_MESSAGE_SEQUENCE_HEADER}' may not be less than zero", response_headers(env))
|
48
|
+
else
|
49
|
+
Channel.new(path).next_message(last_sequence, :timeout => TIMEOUT).callback do |message, sequence|
|
50
|
+
env['async.callback'].call response(200, message, response_headers(env).merge(LAST_MESSAGE_SEQUENCE_HEADER => sequence.to_s))
|
51
|
+
end.errback do |e|
|
52
|
+
if e == :timeout
|
53
|
+
env['async.callback'].call response(204, '', response_headers(env))
|
54
|
+
else
|
55
|
+
Firehose.logger.error "Unexpected error when trying to GET last_sequence #{last_sequence} for path #{path}: #{e.inspect}"
|
56
|
+
env['async.callback'].call response(500, 'Unexpected error', response_headers(env))
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
end
|
62
|
+
|
63
|
+
# Tell the web server that this will be an async response.
|
64
|
+
ASYNC_RESPONSE
|
65
|
+
|
66
|
+
else
|
67
|
+
Firehose.logger.debug "HTTP #{method} not supported"
|
68
|
+
response(501, "#{method} not supported.")
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
|
73
|
+
private
|
74
|
+
|
75
|
+
# If the request is a CORS request, return those headers, otherwise don't worry 'bout it
|
76
|
+
def response_headers(env)
|
77
|
+
cors_origin(env) ? cors_headers(env) : {}
|
78
|
+
end
|
79
|
+
|
80
|
+
def cors_origin(env)
|
81
|
+
env['HTTP_ORIGIN']
|
82
|
+
end
|
83
|
+
|
84
|
+
def cors_headers(env)
|
85
|
+
# TODO seperate out CORS logic as an async middleware with a Goliath web server.
|
86
|
+
{
|
87
|
+
'Access-Control-Allow-Origin' => cors_origin(env),
|
88
|
+
'Access-Control-Expose-Headers' => LAST_MESSAGE_SEQUENCE_HEADER
|
89
|
+
}
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
|
94
|
+
# It _may_ be more memory efficient if we used the same instance of this
|
95
|
+
# class (or even if we just used a proc/lambda) for every
|
96
|
+
# request/connection. However, we couldn't use instance variables, and
|
97
|
+
# so I'd need to confirm that local variables would be accessible from
|
98
|
+
# the callback blocks.
|
99
|
+
class WebSocket
|
100
|
+
def call(env)
|
101
|
+
req = ::Rack::Request.new(env)
|
102
|
+
@path = req.path
|
103
|
+
ws = Faye::WebSocket.new(env)
|
104
|
+
|
105
|
+
ws.onopen = lambda do |event|
|
106
|
+
Firehose.logger.debug "WS subscribed to `#{@path}`"
|
107
|
+
|
108
|
+
subscribe = Proc.new do |last_sequence|
|
109
|
+
@channel = Channel.new(@path)
|
110
|
+
@deferrable = @channel.next_message(last_sequence).callback do |message, sequence|
|
111
|
+
Firehose.logger.debug "WS sent `#{message}` to `#{@path}` with sequence `#{sequence}`"
|
112
|
+
ws.send message
|
113
|
+
subscribe.call(sequence)
|
114
|
+
end.errback { |e| EM.next_tick { raise e.inspect } unless e == :disconnect }
|
115
|
+
end
|
116
|
+
|
117
|
+
subscribe.call nil
|
118
|
+
end
|
119
|
+
|
120
|
+
#ws.onmessage = lambda do |event|
|
121
|
+
# event.data
|
122
|
+
#end
|
123
|
+
|
124
|
+
ws.onclose = lambda do |event|
|
125
|
+
if @deferrable
|
126
|
+
@deferrable.fail :disconnect
|
127
|
+
@channel.unsubscribe(@deferrable) if @channel
|
128
|
+
end
|
129
|
+
Firehose.logger.debug "WS connection `#{@path}` closing. Code: #{event.code.inspect}; Reason #{event.reason.inspect}"
|
130
|
+
end
|
131
|
+
|
132
|
+
ws.onerror = lambda do |event|
|
133
|
+
Firehose.logger.error "WS connection `#{@path}` error `#{error}`: #{error.backtrace}"
|
134
|
+
end
|
135
|
+
|
136
|
+
|
137
|
+
# Return async Rack response
|
138
|
+
ws.rack_response
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
module Firehose
|
2
|
+
module Rack
|
3
|
+
class PingApp
|
4
|
+
attr_reader :redis
|
5
|
+
|
6
|
+
def initialize(redis=nil)
|
7
|
+
@redis = redis
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(env)
|
11
|
+
PingCheck.new(env, redis).call
|
12
|
+
ASYNC_RESPONSE
|
13
|
+
end
|
14
|
+
|
15
|
+
|
16
|
+
# Encapsulate this in a class so we aren't passing a bunch of variables around
|
17
|
+
class PingCheck
|
18
|
+
include Firehose::Rack::Helpers
|
19
|
+
|
20
|
+
attr_reader :req, :env, :key, :redis
|
21
|
+
|
22
|
+
TEST_VALUE = 'Firehose Healthcheck Test Value'
|
23
|
+
SECONDS_TO_EXPIRE = 60
|
24
|
+
|
25
|
+
def self.redis
|
26
|
+
@redis ||= EM::Hiredis.connect
|
27
|
+
end
|
28
|
+
|
29
|
+
def initialize(env, redis=nil)
|
30
|
+
@redis = redis || self.class.redis
|
31
|
+
@env = env
|
32
|
+
@req = env['parsed_request'] ||= ::Rack::Request.new(env)
|
33
|
+
@key = "/firehose/ping/#{Time.now.to_i}/#{rand}"
|
34
|
+
end
|
35
|
+
|
36
|
+
def call
|
37
|
+
log req, 'started'
|
38
|
+
test_redis
|
39
|
+
end
|
40
|
+
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def log(req, msg)
|
45
|
+
Firehose.logger.debug "HTTP PING request for path '#{req.path}': #{msg}"
|
46
|
+
end
|
47
|
+
|
48
|
+
def test_redis
|
49
|
+
redis.set(key, TEST_VALUE).
|
50
|
+
callback { expire_key }.
|
51
|
+
callback { read_and_respond }.
|
52
|
+
errback do |e|
|
53
|
+
log req, "failed with write value to redis: #{e.inspect}"
|
54
|
+
env['async.callback'].call response(500)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def expire_key
|
59
|
+
redis.expire(key, SECONDS_TO_EXPIRE).
|
60
|
+
errback do
|
61
|
+
log req, "failed to expire key #{key.inspect}. If this key is not manually deleted, it may cause a memory leak."
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def read_and_respond
|
66
|
+
redis.get(key).
|
67
|
+
callback do |val|
|
68
|
+
if val == TEST_VALUE
|
69
|
+
log req, 'succeeded'
|
70
|
+
env['async.callback'].call response(200)
|
71
|
+
else
|
72
|
+
log req, "failed with unexpected value retrieved from redis: #{val.inspect}"
|
73
|
+
env['async.callback'].call response(500)
|
74
|
+
end
|
75
|
+
end.
|
76
|
+
errback do |e|
|
77
|
+
log req, "failed with read value from redis: #{e.inspect}"
|
78
|
+
env['async.callback'].call response(500)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|