firehose 1.2.20 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.codeclimate.yml +29 -0
- data/.dockerignore +2 -0
- data/.gitignore +3 -1
- data/.rubocop.yml +1156 -0
- data/.ruby-version +1 -0
- data/.travis.yml +3 -7
- data/CHANGELOG.md +15 -0
- data/Dockerfile +11 -0
- data/Gemfile +4 -2
- data/Procfile.dev +0 -1
- data/README.md +66 -8
- data/Rakefile +43 -32
- data/coffeelint.json +129 -0
- data/docker-compose.yml +17 -0
- data/firehose.gemspec +5 -9
- data/karma.config.coffee +89 -0
- data/lib/assets/javascripts/firehose.js.coffee +1 -2
- data/lib/assets/javascripts/firehose/consumer.js.coffee +18 -2
- data/lib/assets/javascripts/firehose/core.js.coffee +2 -1
- data/lib/assets/javascripts/firehose/long_poll.js.coffee +69 -8
- data/lib/assets/javascripts/firehose/multiplexed_consumer.js.coffee +74 -0
- data/lib/assets/javascripts/firehose/transport.js.coffee +4 -2
- data/lib/assets/javascripts/firehose/web_socket.js.coffee +51 -5
- data/lib/firehose/cli.rb +2 -1
- data/lib/firehose/client/producer.rb +10 -4
- data/lib/firehose/rack/consumer.rb +39 -0
- data/lib/firehose/rack/consumer/http_long_poll.rb +118 -45
- data/lib/firehose/rack/consumer/web_socket.rb +133 -28
- data/lib/firehose/rack/ping.rb +1 -1
- data/lib/firehose/rack/publisher.rb +10 -4
- data/lib/firehose/server.rb +9 -9
- data/lib/firehose/server/channel.rb +23 -31
- data/lib/firehose/server/message_buffer.rb +59 -0
- data/lib/firehose/server/publisher.rb +16 -17
- data/lib/firehose/server/redis.rb +32 -0
- data/lib/firehose/server/subscriber.rb +7 -7
- data/lib/firehose/version.rb +2 -2
- data/package.json +14 -2
- data/spec/integrations/shared_examples.rb +89 -7
- data/spec/javascripts/firehose/multiplexed_consumer_spec.coffee +72 -0
- data/spec/javascripts/firehose/transport_spec.coffee +0 -2
- data/spec/javascripts/firehose/websocket_spec.coffee +2 -0
- data/spec/javascripts/helpers/spec_helper.js +1 -0
- data/spec/javascripts/support/jquery-1.11.1.js +10308 -0
- data/{lib/assets/javascripts/vendor → spec/javascripts/support}/json2.js +0 -0
- data/spec/javascripts/support/spec_helper.coffee +3 -0
- data/spec/lib/assets_spec.rb +8 -8
- data/spec/lib/client/producer_spec.rb +14 -14
- data/spec/lib/firehose_spec.rb +2 -2
- data/spec/lib/rack/consumer/http_long_poll_spec.rb +21 -3
- data/spec/lib/rack/consumer_spec.rb +4 -4
- data/spec/lib/rack/ping_spec.rb +4 -4
- data/spec/lib/rack/publisher_spec.rb +5 -5
- data/spec/lib/server/app_spec.rb +2 -2
- data/spec/lib/server/channel_spec.rb +58 -44
- data/spec/lib/server/message_buffer_spec.rb +148 -0
- data/spec/lib/server/publisher_spec.rb +29 -22
- data/spec/lib/server/redis_spec.rb +13 -0
- data/spec/lib/server/subscriber_spec.rb +14 -13
- data/spec/spec_helper.rb +8 -1
- metadata +34 -95
- data/.rbenv-version +0 -1
- data/Guardfile +0 -31
- data/config/evergreen.rb +0 -9
data/lib/firehose/rack/ping.rb
CHANGED
@@ -1,12 +1,14 @@
|
|
1
|
+
require "rack/utils"
|
2
|
+
|
1
3
|
module Firehose
|
2
4
|
module Rack
|
3
5
|
class Publisher
|
4
6
|
include Firehose::Rack::Helpers
|
5
7
|
|
6
8
|
def call(env)
|
7
|
-
req
|
8
|
-
path
|
9
|
-
method
|
9
|
+
req = env['parsed_request'] ||= ::Rack::Request.new(env)
|
10
|
+
path = req.path
|
11
|
+
method = req.request_method
|
10
12
|
cache_control = {}
|
11
13
|
|
12
14
|
# Parse out cache control directives from the Cache-Control header.
|
@@ -26,7 +28,11 @@ module Firehose
|
|
26
28
|
EM.next_tick do
|
27
29
|
body = env['rack.input'].read
|
28
30
|
Firehose.logger.debug "HTTP published #{body.inspect} to #{path.inspect} with ttl #{ttl.inspect}"
|
29
|
-
|
31
|
+
opts = { :ttl => ttl }
|
32
|
+
if buffer_size = env["HTTP_X_FIREHOSE_BUFFER_SIZE"]
|
33
|
+
opts[:buffer_size] = buffer_size.to_i
|
34
|
+
end
|
35
|
+
publisher.publish(path, body, opts).callback do
|
30
36
|
env['async.callback'].call [202, {'Content-Type' => 'text/plain', 'Content-Length' => '0'}, []]
|
31
37
|
env['async.callback'].call response(202, '', 'Content-Type' => 'text/plain')
|
32
38
|
end.errback do |e|
|
data/lib/firehose/server.rb
CHANGED
@@ -8,15 +8,15 @@ module Firehose
|
|
8
8
|
# Firehose components that sit between the Rack HTTP software and the Redis server.
|
9
9
|
# This mostly handles message sequencing and different HTTP channel names.
|
10
10
|
module Server
|
11
|
-
autoload :
|
12
|
-
autoload :
|
13
|
-
autoload :
|
14
|
-
autoload :
|
11
|
+
autoload :MessageBuffer, 'firehose/server/message_buffer'
|
12
|
+
autoload :Subscriber, 'firehose/server/subscriber'
|
13
|
+
autoload :Publisher, 'firehose/server/publisher'
|
14
|
+
autoload :Channel, 'firehose/server/channel'
|
15
|
+
autoload :App, 'firehose/server/app'
|
16
|
+
autoload :Redis, 'firehose/server/redis'
|
15
17
|
|
16
|
-
|
17
|
-
|
18
|
-
def self.key(*segments)
|
19
|
-
segments.unshift(:firehose).join(':')
|
18
|
+
def self.redis
|
19
|
+
@redis ||= Redis.new
|
20
20
|
end
|
21
21
|
end
|
22
|
-
end
|
22
|
+
end
|
@@ -2,57 +2,49 @@ module Firehose
|
|
2
2
|
module Server
|
3
3
|
# Connects to a specific channel on Redis and listens for messages to notify subscribers.
|
4
4
|
class Channel
|
5
|
-
attr_reader :channel_key, :
|
5
|
+
attr_reader :channel_key, :list_key, :sequence_key
|
6
|
+
attr_reader :redis, :subscriber
|
6
7
|
|
7
8
|
def self.redis
|
8
|
-
@redis ||=
|
9
|
+
@redis ||= Firehose::Server.redis.connection
|
9
10
|
end
|
10
11
|
|
11
12
|
def self.subscriber
|
12
|
-
@subscriber ||= Server::Subscriber.new(
|
13
|
+
@subscriber ||= Server::Subscriber.new(Firehose::Server.redis.connection)
|
13
14
|
end
|
14
15
|
|
15
16
|
def initialize(channel_key, redis=self.class.redis, subscriber=self.class.subscriber)
|
16
|
-
@
|
17
|
-
@
|
17
|
+
@redis = redis
|
18
|
+
@subscriber = subscriber
|
19
|
+
@channel_key = channel_key
|
20
|
+
@list_key = Server::Redis.key(channel_key, :list)
|
21
|
+
@sequence_key = Server::Redis.key(channel_key, :sequence)
|
18
22
|
end
|
19
23
|
|
20
|
-
def
|
21
|
-
last_sequence = last_sequence.to_i
|
22
|
-
|
24
|
+
def next_messages(consumer_sequence=nil, options={})
|
23
25
|
deferrable = EM::DefaultDeferrable.new
|
24
|
-
# TODO - Think this through a little harder... maybe some tests ol buddy!
|
25
26
|
deferrable.errback {|e| EM.next_tick { raise e } unless [:timeout, :disconnect].include?(e) }
|
26
27
|
|
27
|
-
# TODO: Use HSET so we don't have to pull 100 messages back every time.
|
28
28
|
redis.multi
|
29
29
|
redis.get(sequence_key).
|
30
30
|
errback {|e| deferrable.fail e }
|
31
|
-
|
31
|
+
# Fetch entire list: http://stackoverflow.com/questions/10703019/redis-fetch-all-value-of-list-without-iteration-and-without-popping
|
32
|
+
redis.lrange(list_key, 0, -1).
|
32
33
|
errback {|e| deferrable.fail e }
|
33
|
-
redis.exec.callback do |(
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
34
|
+
redis.exec.callback do |(channel_sequence, message_list)|
|
35
|
+
# Reverse the messages so they can be correctly procesed by the MessageBuffer class. There's
|
36
|
+
# a patch in the message-buffer-redis branch that moves this concern into the Publisher LUA
|
37
|
+
# script. We kept it out of this for now because it represents a deployment risk and `reverse!`
|
38
|
+
# is a cheap operation in Ruby.
|
39
|
+
message_list.reverse!
|
40
|
+
buffer = MessageBuffer.new(message_list, channel_sequence, consumer_sequence)
|
41
|
+
if buffer.remaining_messages.empty?
|
42
|
+
Firehose.logger.debug "No messages in buffer, subscribing. sequence: `#{channel_sequence}` consumer_sequence: #{consumer_sequence}"
|
39
43
|
# Either this resource has never been seen before or we are all caught up.
|
40
44
|
# Subscribe and hope something gets published to this end-point.
|
41
45
|
subscribe(deferrable, options[:timeout])
|
42
|
-
|
43
|
-
|
44
|
-
# up. Catch them up FTW.
|
45
|
-
# But we won't "catch them up" if last_sequence was zero/nil because
|
46
|
-
# that implies the client is connecting for the 1st time.
|
47
|
-
message = message_list[diff-1]
|
48
|
-
Firehose.logger.debug "Sending old message `#{message}` and sequence `#{sequence}` to client directly. Client is `#{diff}` behind, at `#{last_sequence}`."
|
49
|
-
deferrable.succeed message, last_sequence + 1
|
50
|
-
else
|
51
|
-
# The client is hopelessly behind and underwater. Just reset
|
52
|
-
# their whole world with the lastest message.
|
53
|
-
message = message_list[0]
|
54
|
-
Firehose.logger.debug "Sending latest message `#{message}` and sequence `#{sequence}` to client directly."
|
55
|
-
deferrable.succeed message, sequence
|
46
|
+
else # Either the client is under water or caught up to head.
|
47
|
+
deferrable.succeed buffer.remaining_messages
|
56
48
|
end
|
57
49
|
end.errback {|e| deferrable.fail e }
|
58
50
|
|
@@ -0,0 +1,59 @@
|
|
1
|
+
module Firehose
|
2
|
+
module Server
|
3
|
+
# Encapsulates a sequence of messages from the server along with their
|
4
|
+
# consumer_sequences calculate by offset.
|
5
|
+
class MessageBuffer
|
6
|
+
# Number of messages that Redis buffers for the client if its
|
7
|
+
# connection drops, then reconnects.
|
8
|
+
DEFAULT_SIZE = 100
|
9
|
+
|
10
|
+
Message = Struct.new(:payload, :sequence)
|
11
|
+
|
12
|
+
def initialize(message_list, channel_sequence, consumer_sequence = nil)
|
13
|
+
@message_list = message_list
|
14
|
+
@channel_sequence = channel_sequence.to_i
|
15
|
+
@consumer_sequence = consumer_sequence.to_i
|
16
|
+
end
|
17
|
+
|
18
|
+
def remaining_messages
|
19
|
+
messages.last(remaining_message_count)
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def remaining_message_count
|
25
|
+
# Special case to always get the latest message.
|
26
|
+
return 1 unless @consumer_sequence > 0
|
27
|
+
|
28
|
+
count = @channel_sequence - @consumer_sequence
|
29
|
+
|
30
|
+
if count < 0
|
31
|
+
# UNEXPECTED: Somehow the sequence is ahead of the channel.
|
32
|
+
# It is likely a bug in the consumer, but we'll assume
|
33
|
+
# the consumer has all the messages.
|
34
|
+
0
|
35
|
+
elsif count > @message_list.size
|
36
|
+
# Consumer is under water since the last request. Just send the most recent message.
|
37
|
+
1
|
38
|
+
else
|
39
|
+
count
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Calculates the last_message_sequence per message.
|
44
|
+
# [a b c e f]
|
45
|
+
def messages
|
46
|
+
@messages ||= @message_list.map.with_index do |payload, index|
|
47
|
+
Message.new(payload, starting_channel_sequence + index)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
# Channel sequence is 10
|
52
|
+
# Buffer size of 5
|
53
|
+
# Start of sequence in buffer ... which would be 6
|
54
|
+
def starting_channel_sequence
|
55
|
+
@channel_sequence - @message_list.size + 1
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -1,10 +1,6 @@
|
|
1
1
|
module Firehose
|
2
2
|
module Server
|
3
3
|
class Publisher
|
4
|
-
# Number of messages that Redis buffers for the client if its
|
5
|
-
# connection drops, then reconnects.
|
6
|
-
MAX_MESSAGES = 100
|
7
|
-
|
8
4
|
# Seconds that the message buffer should live before Redis expires it.
|
9
5
|
TTL = 60*60*24
|
10
6
|
|
@@ -16,6 +12,7 @@ module Firehose
|
|
16
12
|
def publish(channel_key, message, opts={})
|
17
13
|
# How long should we hang on to the resource once is published?
|
18
14
|
ttl = (opts[:ttl] || TTL).to_i
|
15
|
+
buffer_size = (opts[:buffer_size] || MessageBuffer::DEFAULT_SIZE).to_i
|
19
16
|
|
20
17
|
# TODO hi-redis isn't that awesome... we have to setup an errback per even for wrong
|
21
18
|
# commands because of the lack of a method_missing whitelist. Perhaps implement a whitelist in
|
@@ -44,10 +41,10 @@ module Firehose
|
|
44
41
|
end.callback do |digest|
|
45
42
|
@publish_script_digest = digest
|
46
43
|
Firehose.logger.debug "Registered Lua publishing script with Redis => #{digest}"
|
47
|
-
eval_publish_script channel_key, message, ttl, deferrable
|
44
|
+
eval_publish_script channel_key, message, ttl, buffer_size, deferrable
|
48
45
|
end
|
49
46
|
else
|
50
|
-
eval_publish_script channel_key, message, ttl, deferrable
|
47
|
+
eval_publish_script channel_key, message, ttl, buffer_size, deferrable
|
51
48
|
end
|
52
49
|
|
53
50
|
deferrable
|
@@ -55,7 +52,7 @@ module Firehose
|
|
55
52
|
|
56
53
|
private
|
57
54
|
def redis
|
58
|
-
@redis ||=
|
55
|
+
@redis ||= Firehose::Server.redis.connection
|
59
56
|
end
|
60
57
|
|
61
58
|
# Serialize components of a message into something that can be dropped into Redis.
|
@@ -63,9 +60,10 @@ module Firehose
|
|
63
60
|
[channel_key, sequence, message].join(PAYLOAD_DELIMITER)
|
64
61
|
end
|
65
62
|
|
66
|
-
#
|
63
|
+
# Deserialize components of a message back into Ruby.
|
67
64
|
def self.from_payload(payload)
|
68
|
-
|
65
|
+
@payload_size ||= method(:to_payload).arity
|
66
|
+
payload.split(PAYLOAD_DELIMITER, @payload_size)
|
69
67
|
end
|
70
68
|
|
71
69
|
# TODO: Make this FAR more robust. Ideally we'd whitelist the permitted
|
@@ -79,18 +77,19 @@ module Firehose
|
|
79
77
|
redis.script 'LOAD', REDIS_PUBLISH_SCRIPT
|
80
78
|
end
|
81
79
|
|
82
|
-
def eval_publish_script(channel_key, message, ttl, deferrable)
|
83
|
-
list_key = Server.key(channel_key, :list)
|
80
|
+
def eval_publish_script(channel_key, message, ttl, buffer_size, deferrable)
|
81
|
+
list_key = Server::Redis.key(channel_key, :list)
|
84
82
|
script_args = [
|
85
|
-
Server.key(channel_key, :sequence),
|
83
|
+
Server::Redis.key(channel_key, :sequence),
|
86
84
|
list_key,
|
87
|
-
Server.key(:channel_updates),
|
85
|
+
Server::Redis.key(:channel_updates),
|
88
86
|
ttl,
|
89
87
|
message,
|
90
|
-
|
88
|
+
buffer_size,
|
91
89
|
PAYLOAD_DELIMITER,
|
92
90
|
channel_key
|
93
91
|
]
|
92
|
+
|
94
93
|
redis.evalsha(
|
95
94
|
@publish_script_digest, script_args.length, *script_args
|
96
95
|
).errback do |e|
|
@@ -107,7 +106,7 @@ module Firehose
|
|
107
106
|
local channel_key = KEYS[3]
|
108
107
|
local ttl = KEYS[4]
|
109
108
|
local message = KEYS[5]
|
110
|
-
local
|
109
|
+
local buffer_size = KEYS[6]
|
111
110
|
local payload_delimiter = KEYS[7]
|
112
111
|
local firehose_resource = KEYS[8]
|
113
112
|
|
@@ -122,7 +121,7 @@ module Firehose
|
|
122
121
|
redis.call('set', sequence_key, sequence)
|
123
122
|
redis.call('expire', sequence_key, ttl)
|
124
123
|
redis.call('lpush', list_key, message)
|
125
|
-
redis.call('ltrim', list_key, 0,
|
124
|
+
redis.call('ltrim', list_key, 0, buffer_size - 1)
|
126
125
|
redis.call('expire', list_key, ttl)
|
127
126
|
redis.call('publish', channel_key, message_payload)
|
128
127
|
|
@@ -131,4 +130,4 @@ module Firehose
|
|
131
130
|
|
132
131
|
end
|
133
132
|
end
|
134
|
-
end
|
133
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
require "uri"
|
2
|
+
|
3
|
+
module Firehose
|
4
|
+
module Server
|
5
|
+
# Manages redis configuration and connections.
|
6
|
+
class Redis
|
7
|
+
DEFAULT_URL = "redis://127.0.0.1:6379/0".freeze
|
8
|
+
KEY_DELIMITER = ":".freeze
|
9
|
+
ROOT_KEY = "firehose".freeze
|
10
|
+
|
11
|
+
attr_reader :url
|
12
|
+
|
13
|
+
def initialize(url = self.class.url)
|
14
|
+
@url = URI(url)
|
15
|
+
end
|
16
|
+
|
17
|
+
def connection
|
18
|
+
EM::Hiredis.connect(@url)
|
19
|
+
end
|
20
|
+
|
21
|
+
# Generates keys for all firehose interactions with Redis. Ensures a root
|
22
|
+
# key of `firehose`
|
23
|
+
def self.key(*segments)
|
24
|
+
segments.flatten.unshift(ROOT_KEY).join(KEY_DELIMITER)
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.url
|
28
|
+
ENV.fetch("REDIS_URL", DEFAULT_URL)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -16,18 +16,18 @@ module Firehose
|
|
16
16
|
# with the same error.
|
17
17
|
# The final goal is to allow the failed deferrable bubble back up
|
18
18
|
# so we can send back a nice, clean 500 error to the client.
|
19
|
-
channel_updates_key = Server.key('channel_updates')
|
19
|
+
channel_updates_key = Server::Redis.key('channel_updates')
|
20
20
|
pubsub.subscribe(channel_updates_key).
|
21
21
|
errback{|e| EM.next_tick { raise e } }.
|
22
22
|
callback { Firehose.logger.debug "Redis subscribed to `#{channel_updates_key}`" }
|
23
23
|
pubsub.on(:message) do |_, payload|
|
24
|
-
channel_key,
|
25
|
-
|
24
|
+
channel_key, channel_sequence, message = Server::Publisher.from_payload(payload)
|
25
|
+
messages = [ MessageBuffer::Message.new(message, channel_sequence.to_i) ]
|
26
26
|
if deferrables = subscriptions.delete(channel_key)
|
27
|
-
Firehose.logger.debug "Redis notifying #{deferrables.count} deferrable(s) at `#{channel_key}` with
|
27
|
+
Firehose.logger.debug "Redis notifying #{deferrables.count} deferrable(s) at `#{channel_key}` with channel_sequence `#{channel_sequence}` and message `#{message}`"
|
28
28
|
deferrables.each do |deferrable|
|
29
|
-
Firehose.logger.debug "Sending message #{message} and
|
30
|
-
deferrable.succeed
|
29
|
+
Firehose.logger.debug "Sending message #{message} and channel_sequence #{channel_sequence} to client from subscriber"
|
30
|
+
deferrable.succeed messages
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end
|
@@ -48,4 +48,4 @@ module Firehose
|
|
48
48
|
end
|
49
49
|
end
|
50
50
|
end
|
51
|
-
end
|
51
|
+
end
|
data/lib/firehose/version.rb
CHANGED
data/package.json
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
{
|
2
2
|
"name": "firehose",
|
3
|
-
"version": "1.
|
4
|
-
"main": "lib/assets/javascripts/firehose.js.coffee"
|
3
|
+
"version": "1.3.3",
|
4
|
+
"main": "lib/assets/javascripts/firehose.js.coffee",
|
5
|
+
"devDependencies": {
|
6
|
+
"coffee-script": "*",
|
7
|
+
"jasmine-jquery": "git://github.com/velesin/jasmine-jquery.git",
|
8
|
+
"karma": "~0.12.16",
|
9
|
+
"karma-chrome-launcher": "~0.1.4",
|
10
|
+
"karma-coffee-preprocessor": "~0.2.1",
|
11
|
+
"karma-jasmine": "~0.2.0",
|
12
|
+
"karma-junit-reporter": "^0.2.2",
|
13
|
+
"karma-phantomjs-launcher": "^0.1.4",
|
14
|
+
"karma-safari-launcher": "*",
|
15
|
+
"karma-sprockets-mincer": "0.1.2"
|
16
|
+
}
|
5
17
|
}
|
@@ -23,9 +23,12 @@ shared_examples_for 'Firehose::Rack::App' do
|
|
23
23
|
let(:messages) { (1..200).map{|n| "msg-#{n}" } }
|
24
24
|
let(:channel) { "/firehose/integration/#{Time.now.to_i}" }
|
25
25
|
let(:http_url) { "http://#{uri.host}:#{uri.port}#{channel}" }
|
26
|
+
let(:http_multi_url) { "http://#{uri.host}:#{uri.port}/channels@firehose" }
|
26
27
|
let(:ws_url) { "ws://#{uri.host}:#{uri.port}#{channel}" }
|
28
|
+
let(:multiplex_channels) { ["/foo/bar", "/bar/baz", "/baz/quux"] }
|
29
|
+
let(:subscription_query) { multiplex_channels.map{|c| "#{c}!0"}.join(",") }
|
27
30
|
|
28
|
-
it "
|
31
|
+
it "supports pub-sub http and websockets" do
|
29
32
|
# Setup variables that we'll use after we turn off EM to validate our
|
30
33
|
# test assertions.
|
31
34
|
outgoing, received = messages.dup, Hash.new{|h,k| h[k] = []}
|
@@ -41,7 +44,7 @@ shared_examples_for 'Firehose::Rack::App' do
|
|
41
44
|
|
42
45
|
# Setup a publisher
|
43
46
|
publish = Proc.new do
|
44
|
-
Firehose::Client::Producer::Http.new.publish(outgoing.shift).to(channel) do
|
47
|
+
Firehose::Client::Producer::Http.new.publish(outgoing.shift).to(channel, buffer_size: rand(100)) do
|
45
48
|
# The random timer ensures that sometimes the clients will be behind
|
46
49
|
# and sometimes they will be caught up.
|
47
50
|
EM::add_timer(rand*0.005) { publish.call } unless outgoing.empty?
|
@@ -100,19 +103,98 @@ shared_examples_for 'Firehose::Rack::App' do
|
|
100
103
|
end
|
101
104
|
|
102
105
|
# When EM stops, these assertions will be made.
|
103
|
-
received.size.
|
104
|
-
received.
|
105
|
-
arr.
|
106
|
+
expect(received.size).to eql(4)
|
107
|
+
received.each_value do |arr|
|
108
|
+
expect(arr.size).to eql(messages.size)
|
109
|
+
expect(arr.sort).to eql(messages.sort)
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
it "supports channel multiplexing for http_long_poll and websockets" do
|
114
|
+
# Setup variables that we'll use after we turn off EM to validate our
|
115
|
+
# test assertions.
|
116
|
+
outgoing, received = messages.dup, Hash.new{|h,k| h[k] = []}
|
117
|
+
|
118
|
+
# Our WS and Http clients call this when they have received their messages to determine
|
119
|
+
# when to turn off EM and make the test assertion at the very bottom.
|
120
|
+
succeed = Proc.new do
|
121
|
+
# TODO: For some weird reason the `add_timer` call causes up to 20 seconds of delay after
|
122
|
+
# the test finishes running. However, without it the test will randomly fail with a
|
123
|
+
# "Redis disconnected" error.
|
124
|
+
em.add_timer(1) { em.stop } if received.values.all?{|arr| arr.size == messages.size }
|
125
|
+
end
|
126
|
+
|
127
|
+
# Lets have an HTTP Long poll client using channel multiplexing
|
128
|
+
multiplexed_http_long_poll = Proc.new do |cid, last_sequence|
|
129
|
+
http = EM::HttpRequest.new(http_multi_url).get(:query => {'subscribe' => subscription_query})
|
130
|
+
|
131
|
+
http.errback { em.stop }
|
132
|
+
http.callback do
|
133
|
+
frame = JSON.parse(http.response, :symbolize_names => true)
|
134
|
+
received[cid] << frame[:message]
|
135
|
+
if received[cid].size < messages.size
|
136
|
+
# Add some jitter so the clients aren't syncronized
|
137
|
+
EM::add_timer(rand*0.001) { multiplexed_http_long_poll.call cid, frame[:last_sequence] }
|
138
|
+
else
|
139
|
+
succeed.call cid
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
# Test multiplexed web socket client
|
145
|
+
outgoing = messages.dup
|
146
|
+
publish_multi = Proc.new do
|
147
|
+
msg = outgoing.shift
|
148
|
+
chan = multiplex_channels[rand(multiplex_channels.size)]
|
149
|
+
Firehose::Client::Producer::Http.new.publish(msg).to(chan) do
|
150
|
+
EM::add_timer(rand*0.005) { publish_multi.call } unless outgoing.empty?
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
multiplexed_websocket = Proc.new do |cid|
|
155
|
+
ws = Faye::WebSocket::Client.new("ws://#{uri.host}:#{uri.port}/channels@firehose?subscribe=#{subscription_query}")
|
156
|
+
|
157
|
+
ws.onmessage = lambda do |event|
|
158
|
+
frame = JSON.parse(event.data, :symbolize_names => true)
|
159
|
+
received[cid] << frame[:message]
|
160
|
+
succeed.call cid unless received[cid].size < messages.size
|
161
|
+
end
|
162
|
+
|
163
|
+
ws.onclose = lambda do |event|
|
164
|
+
ws = nil
|
165
|
+
end
|
166
|
+
|
167
|
+
ws.onerror = lambda do |event|
|
168
|
+
raise 'ws failed' + "\n" + event.inspect
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
em 180 do
|
173
|
+
# Start the clients.
|
174
|
+
multiplexed_http_long_poll.call(5)
|
175
|
+
multiplexed_http_long_poll.call(6)
|
176
|
+
multiplexed_websocket.call(7)
|
177
|
+
multiplexed_websocket.call(8)
|
178
|
+
|
179
|
+
# Wait a sec to let our clients set up.
|
180
|
+
em.add_timer(1){ publish_multi.call }
|
181
|
+
end
|
182
|
+
|
183
|
+
# When EM stops, these assertions will be made.
|
184
|
+
expect(received.size).to eql(4)
|
185
|
+
received.each_value do |arr|
|
186
|
+
expect(arr.size).to be <= messages.size
|
187
|
+
# expect(arr.sort).to eql(messages.sort)
|
106
188
|
end
|
107
189
|
end
|
108
190
|
|
109
191
|
|
110
|
-
it "
|
192
|
+
it "returns 400 error for long-polling when using http long polling and sequence header is < 0" do
|
111
193
|
em 5 do
|
112
194
|
http = EM::HttpRequest.new(http_url).get(:query => {'last_message_sequence' => -1})
|
113
195
|
http.errback { |e| raise e.inspect }
|
114
196
|
http.callback do
|
115
|
-
http.response_header.status.
|
197
|
+
expect(http.response_header.status).to eql(400)
|
116
198
|
em.stop
|
117
199
|
end
|
118
200
|
end
|