message_bus 2.1.6 → 2.2.0.pre

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of message_bus might be problematic. Click here for more details.

Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +13 -92
  3. data/.rubocop_todo.yml +659 -0
  4. data/.travis.yml +1 -1
  5. data/CHANGELOG +61 -0
  6. data/Dockerfile +18 -0
  7. data/Gemfile +3 -1
  8. data/Guardfile +0 -1
  9. data/README.md +188 -101
  10. data/Rakefile +12 -1
  11. data/assets/message-bus.js +1 -1
  12. data/docker-compose.yml +46 -0
  13. data/examples/bench/config.ru +8 -9
  14. data/examples/bench/unicorn.conf.rb +1 -1
  15. data/examples/chat/chat.rb +150 -153
  16. data/examples/minimal/config.ru +2 -3
  17. data/lib/message_bus.rb +224 -36
  18. data/lib/message_bus/backends.rb +7 -0
  19. data/lib/message_bus/backends/base.rb +184 -0
  20. data/lib/message_bus/backends/memory.rb +304 -226
  21. data/lib/message_bus/backends/postgres.rb +359 -318
  22. data/lib/message_bus/backends/redis.rb +380 -337
  23. data/lib/message_bus/client.rb +99 -41
  24. data/lib/message_bus/connection_manager.rb +29 -21
  25. data/lib/message_bus/diagnostics.rb +50 -41
  26. data/lib/message_bus/distributed_cache.rb +5 -7
  27. data/lib/message_bus/message.rb +2 -2
  28. data/lib/message_bus/rack/diagnostics.rb +65 -55
  29. data/lib/message_bus/rack/middleware.rb +64 -44
  30. data/lib/message_bus/rack/thin_ext.rb +13 -9
  31. data/lib/message_bus/rails/railtie.rb +2 -0
  32. data/lib/message_bus/timer_thread.rb +2 -2
  33. data/lib/message_bus/version.rb +2 -1
  34. data/message_bus.gemspec +3 -2
  35. data/spec/assets/support/jasmine_helper.rb +1 -1
  36. data/spec/lib/fake_async_middleware.rb +1 -6
  37. data/spec/lib/message_bus/assets/asset_encoding_spec.rb +3 -3
  38. data/spec/lib/message_bus/backend_spec.rb +409 -0
  39. data/spec/lib/message_bus/client_spec.rb +8 -11
  40. data/spec/lib/message_bus/connection_manager_spec.rb +8 -14
  41. data/spec/lib/message_bus/distributed_cache_spec.rb +0 -4
  42. data/spec/lib/message_bus/multi_process_spec.rb +6 -7
  43. data/spec/lib/message_bus/rack/middleware_spec.rb +47 -43
  44. data/spec/lib/message_bus/timer_thread_spec.rb +0 -2
  45. data/spec/lib/message_bus_spec.rb +59 -43
  46. data/spec/spec_helper.rb +16 -4
  47. metadata +12 -9
  48. data/spec/lib/message_bus/backends/postgres_spec.rb +0 -221
  49. data/spec/lib/message_bus/backends/redis_spec.rb +0 -271
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module MessageBus
4
+ # @see MessageBus::Backends::Base
5
+ module Backends
6
+ end
7
+ end
@@ -0,0 +1,184 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "message_bus/backends"
4
+
5
+ module MessageBus
6
+ module Backends
7
+ # Backends provide a consistent API over a variety of options for persisting
8
+ # published messages. The API they present is around the publication to and
9
+ # reading of messages from those backlogs in a manner consistent with
10
+ # message_bus' philosophy.
11
+ #
12
+ # The heart of the message bus, a backend acts as two things:
13
+ #
14
+ # 1. A channel multiplexer
15
+ # 2. Backlog storage per-multiplexed channel.
16
+ #
17
+ # Backends manage and expose multiple backlogs:
18
+ #
19
+ # * A backlog for each channel, in which messages that were published to
20
+ # that channel are stored.
21
+ # * A global backlog, which conceptually stores all published messages,
22
+ # regardless of the channel to which they were published.
23
+ #
24
+ # Backlog storage mechanisms and schemas are up to each individual backend
25
+ # implementation, and some backends store messages very differently than
26
+ # others. It is not necessary in order to be considered a valid backend,
27
+ # to, for example, store each channel backlog as a separate collection.
28
+ # As long as the API is presented per this documentation, the backend is
29
+ # free to make its own storage and performance optimisations.
30
+ #
31
+ # The concept of a per-channel backlog permits for lookups of messages in
32
+ # a manner that is optimised for the use case of a subscriber catching up
33
+ # from a message pointer, while a global backlog allows for optimising the
34
+ # case where another system subscribes to the firehose of messages, for
35
+ # example a message_bus server receiving all publications for delivery
36
+ # to subscribed clients.
37
+ #
38
+ # Backends are fully responsible for maintaining their storage, including
39
+ # any pruning or expiration of that storage that is necessary. message_bus
40
+ # allows for several options for limiting the required storage capacity
41
+ # by either backlog size or the TTL of messages in a backlog. Backends take
42
+ # these settings and effect them either forcibly or by delegating to their
43
+ # storage mechanism.
44
+ #
45
+ # Message which are published to message_bus have two IDs; one which they
46
+ # are known by in the channel-specific backlog that they are published to,
47
+ # and another (the "global ID") which is unique across all channels and by
48
+ # which the message can be found in the global backlog. IDs are all
49
+ # sequential integers starting at 0.
50
+ #
51
+ # @abstract
52
+ class Base
53
+ # rubocop:disable Lint/UnusedMethodArgument
54
+
55
+ # Raised to indicate that the concrete backend implementation does not implement part of the API
56
+ ConcreteClassMustImplementError = Class.new(StandardError)
57
+
58
+ # @return [String] a special message published to trigger termination of backend subscriptions
59
+ UNSUB_MESSAGE = "$$UNSUBSCRIBE"
60
+
61
+ # @return [Boolean] The subscription state of the backend
62
+ attr_reader :subscribed
63
+ # @return [Integer] the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
64
+ attr_accessor :max_backlog_size
65
+ # @return [Integer] the largest permitted size (number of messages) for the global backlog; beyond this capacity, old messages will be dropped.
66
+ attr_accessor :max_global_backlog_size
67
+ # @return [Integer] the longest amount of time a message may live in a backlog before beging removed, in seconds.
68
+ attr_accessor :max_backlog_age
69
+ # Typically, backlogs are trimmed whenever we publish to them. This setting allows some tolerance in order to improve performance.
70
+ # @return [Integer] the interval of publications between which the backlog will not be cleared.
71
+ attr_accessor :clear_every
72
+ # @return [Integer] the largest permitted size (number of messages) to be held in a memory buffer when publication fails, for later re-publication.
73
+ attr_accessor :max_in_memory_publish_backlog
74
+
75
+ # @param [Hash] config backend-specific configuration options; see the concrete class for details
76
+ # @param [Integer] max_backlog_size the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
77
+ def initialize(config = {}, max_backlog_size = 1000); end
78
+
79
+ # Performs routines specific to the backend that are necessary after a process fork, typically triggerd by a forking webserver. Typically this re-opens sockets to the backend.
80
+ def after_fork
81
+ raise ConcreteClassMustImplementError
82
+ end
83
+
84
+ # Deletes all message_bus data from the backend. Use with extreme caution.
85
+ def reset!
86
+ raise ConcreteClassMustImplementError
87
+ end
88
+
89
+ # Deletes all backlogs and their data. Does not delete non-backlog data that message_bus may persist, depending on the concrete backend implementation. Use with extreme caution.
90
+ # @abstract
91
+ def expire_all_backlogs!
92
+ raise ConcreteClassMustImplementError
93
+ end
94
+
95
+ # Publishes a message to a channel
96
+ #
97
+ # @param [String] channel the name of the channel to which the message should be published
98
+ # @param [JSON] data some data to publish to the channel. Must be an object that can be encoded as JSON
99
+ # @param [Hash] opts
100
+ # @option opts [Boolean] :queue_in_memory (true) whether or not to hold the message in an in-memory buffer if publication fails, to be re-tried later
101
+ # @option opts [Integer] :max_backlog_age (`self.max_backlog_age`) the longest amount of time a message may live in a backlog before beging removed, in seconds
102
+ # @option opts [Integer] :max_backlog_size (`self.max_backlog_size`) the largest permitted size (number of messages) for the channel backlog; beyond this capacity, old messages will be dropped
103
+ #
104
+ # @return [Integer] the channel-specific ID the message was given
105
+ def publish(channel, data, opts = nil)
106
+ raise ConcreteClassMustImplementError
107
+ end
108
+
109
+ # Get the ID of the last message published on a channel
110
+ #
111
+ # @param [String] channel the name of the channel in question
112
+ #
113
+ # @return [Integer] the channel-specific ID of the last message published to the given channel
114
+ def last_id(channel)
115
+ raise ConcreteClassMustImplementError
116
+ end
117
+
118
+ # Get messages from a channel backlog
119
+ #
120
+ # @param [String] channel the name of the channel in question
121
+ # @param [#to_i] last_id the channel-specific ID of the last message that the caller received on the specified channel
122
+ #
123
+ # @return [Array<MessageBus::Message>] all messages published to the specified channel since the specified last ID
124
+ def backlog(channel, last_id = 0)
125
+ raise ConcreteClassMustImplementError
126
+ end
127
+
128
+ # Get messages from the global backlog
129
+ #
130
+ # @param [#to_i] last_id the global ID of the last message that the caller received
131
+ #
132
+ # @return [Array<MessageBus::Message>] all messages published on any channel since the specified last ID
133
+ def global_backlog(last_id = 0)
134
+ raise ConcreteClassMustImplementError
135
+ end
136
+
137
+ # Get a specific message from a channel
138
+ #
139
+ # @param [String] channel the name of the channel in question
140
+ # @param [Integer] message_id the channel-specific ID of the message required
141
+ #
142
+ # @return [MessageBus::Message, nil] the requested message, or nil if it does not exist
143
+ def get_message(channel, message_id)
144
+ raise ConcreteClassMustImplementError
145
+ end
146
+
147
+ # Subscribe to messages on a particular channel. Each message since the
148
+ # last ID specified will be delivered by yielding to the passed block as
149
+ # soon as it is available. This will block until subscription is terminated.
150
+ #
151
+ # @param [String] channel the name of the channel to which we should subscribe
152
+ # @param [#to_i] last_id the channel-specific ID of the last message that the caller received on the specified channel
153
+ #
154
+ # @yield [message] a message-handler block
155
+ # @yieldparam [MessageBus::Message] message each message as it is delivered
156
+ #
157
+ # @return [nil]
158
+ def subscribe(channel, last_id = nil)
159
+ raise ConcreteClassMustImplementError
160
+ end
161
+
162
+ # Causes all subscribers to the bus to unsubscribe, and terminates the local connection. Typically used to reset tests.
163
+ def global_unsubscribe
164
+ raise ConcreteClassMustImplementError
165
+ end
166
+
167
+ # Subscribe to messages on all channels. Each message since the last ID
168
+ # specified will be delivered by yielding to the passed block as soon as
169
+ # it is available. This will block until subscription is terminated.
170
+ #
171
+ # @param [#to_i] last_id the global ID of the last message that the caller received
172
+ #
173
+ # @yield [message] a message-handler block
174
+ # @yieldparam [MessageBus::Message] message each message as it is delivered
175
+ #
176
+ # @return [nil]
177
+ def global_subscribe(last_id = nil)
178
+ raise ConcreteClassMustImplementError
179
+ end
180
+
181
+ # rubocop:enable Lint/UnusedMethodArgument
182
+ end
183
+ end
184
+ end
@@ -1,287 +1,365 @@
1
1
  # frozen_string_literal: true
2
- module MessageBus::Memory; end
3
2
 
4
- class MessageBus::Memory::Client
5
- class Listener
6
- attr_reader :do_sub, :do_unsub, :do_message
3
+ require "message_bus/backends/base"
4
+
5
+ module MessageBus
6
+ module Backends
7
+ # The memory backend stores published messages in a simple array per
8
+ # channel, and does not store a separate global backlog.
9
+ #
10
+ # @note This backend diverges from the standard in Base in the following ways:
11
+ #
12
+ # * Does not support forking
13
+ # * Does not support in-memory buffering of messages on publication (redundant)
14
+ #
15
+ # @see Base general information about message_bus backends
16
+ class Memory < Base
17
+ class Client
18
+ attr_accessor :max_backlog_age
19
+
20
+ class Listener
21
+ attr_reader :do_sub, :do_unsub, :do_message
22
+
23
+ def subscribe(&block)
24
+ @do_sub = block
25
+ end
7
26
 
8
- def subscribe(&block)
9
- @do_sub = block
10
- end
27
+ def unsubscribe(&block)
28
+ @do_unsub = block
29
+ end
11
30
 
12
- def unsubscribe(&block)
13
- @do_unsub = block
14
- end
31
+ def message(&block)
32
+ @do_message = block
33
+ end
34
+ end
15
35
 
16
- def message(&block)
17
- @do_message = block
18
- end
19
- end
36
+ class Channel
37
+ attr_accessor :backlog, :ttl
20
38
 
21
- def initialize(config)
22
- @mutex = Mutex.new
23
- @listeners = []
24
- reset!
25
- end
39
+ def initialize(ttl:)
40
+ @backlog = []
41
+ @ttl = ttl
42
+ end
26
43
 
27
- def add(channel, value)
28
- listeners = nil
29
- id = nil
30
- sync do
31
- id = @global_id += 1
32
- chan(channel) << [id, value]
33
- listeners = @listeners.dup
34
- end
35
- msg = MessageBus::Message.new id, id, channel, value
36
- payload = msg.encode
37
- listeners.each { |l| l.push(payload) }
38
- id
39
- end
44
+ def expired?
45
+ last_publication_time = nil
46
+ backlog.each do |_id, _value, published_at|
47
+ if !last_publication_time || published_at > last_publication_time
48
+ last_publication_time = published_at
49
+ end
50
+ end
51
+ return true unless last_publication_time
40
52
 
41
- def clear_global_backlog(backlog_id, num_to_keep)
42
- if backlog_id > num_to_keep
43
- oldest = backlog_id - num_to_keep
44
- sync do
45
- @channels.each_value do |entries|
46
- entries.delete_if { |id, _| id <= oldest }
53
+ last_publication_time < Time.now - ttl
54
+ end
47
55
  end
48
- end
49
- nil
50
- end
51
- end
52
56
 
53
- def clear_channel_backlog(channel, backlog_id, num_to_keep)
54
- oldest = backlog_id - num_to_keep
55
- sync { chan(channel).delete_if { |id, _| id <= oldest } }
56
- nil
57
- end
57
+ def initialize(_config)
58
+ @mutex = Mutex.new
59
+ @listeners = []
60
+ @timer_thread = MessageBus::TimerThread.new
61
+ @timer_thread.on_error do |e|
62
+ logger.warn "Failed to process job: #{e} #{e.backtrace}"
63
+ end
64
+ @timer_thread.every(1) { expire }
65
+ reset!
66
+ end
58
67
 
59
- def backlog(channel, backlog_id)
60
- sync { chan(channel).select { |id, _| id > backlog_id } }
61
- end
68
+ def add(channel, value, max_backlog_age:)
69
+ listeners = nil
70
+ id = nil
71
+ sync do
72
+ id = @global_id += 1
73
+ channel_object = chan(channel)
74
+ channel_object.backlog << [id, value, Time.now]
75
+ if max_backlog_age
76
+ channel_object.ttl = max_backlog_age
77
+ end
78
+ listeners = @listeners.dup
79
+ end
80
+ msg = MessageBus::Message.new id, id, channel, value
81
+ payload = msg.encode
82
+ listeners.each { |l| l.push(payload) }
83
+ id
84
+ end
62
85
 
63
- def global_backlog(backlog_id)
64
- sync do
65
- @channels.dup.flat_map do |channel, messages|
66
- messages.select { |id, _| id > backlog_id }.map { |id, value| [id, channel, value] }
67
- end.sort
68
- end
69
- end
86
+ def expire
87
+ sync do
88
+ @channels.delete_if { |_name, channel| channel.expired? }
89
+ end
90
+ end
70
91
 
71
- def get_value(channel, id)
72
- sync { chan(channel).find { |i, _| i == id }.last }
73
- end
92
+ def clear_global_backlog(backlog_id, num_to_keep)
93
+ if backlog_id > num_to_keep
94
+ oldest = backlog_id - num_to_keep
95
+ sync do
96
+ @channels.each_value do |channel|
97
+ channel.backlog.delete_if { |id, _| id <= oldest }
98
+ end
99
+ end
100
+ nil
101
+ end
102
+ end
74
103
 
75
- # Dangerous, drops the message_bus table containing the backlog if it exists.
76
- def reset!
77
- sync do
78
- @global_id = 0
79
- @channels = {}
80
- end
81
- end
104
+ def clear_channel_backlog(channel, backlog_id, num_to_keep)
105
+ oldest = backlog_id - num_to_keep
106
+ sync { chan(channel).backlog.delete_if { |id, _| id <= oldest } }
107
+ nil
108
+ end
82
109
 
83
- def max_id(channel = nil)
84
- if channel
85
- sync do
86
- if entry = chan(channel).last
87
- entry.first
110
+ def backlog(channel, backlog_id)
111
+ sync { chan(channel).backlog.select { |id, _| id > backlog_id } }
88
112
  end
89
- end
90
- else
91
- sync { @global_id - 1 }
92
- end || 0
93
- end
94
113
 
95
- def subscribe
96
- listener = Listener.new
97
- yield listener
114
+ def global_backlog(backlog_id)
115
+ sync do
116
+ @channels.dup.flat_map do |channel_name, channel|
117
+ channel.backlog.select { |id, _| id > backlog_id }.map { |id, value| [id, channel_name, value] }
118
+ end.sort
119
+ end
120
+ end
98
121
 
99
- q = Queue.new
100
- sync do
101
- @listeners << q
102
- end
122
+ def get_value(channel, id)
123
+ sync { chan(channel).backlog.find { |i, _| i == id }[1] }
124
+ end
103
125
 
104
- listener.do_sub.call
105
- while msg = q.pop
106
- listener.do_message.call(nil, msg)
107
- end
108
- listener.do_unsub.call
109
- sync do
110
- @listeners.delete(q)
111
- end
126
+ # Dangerous, drops the message_bus table containing the backlog if it exists.
127
+ def reset!
128
+ sync do
129
+ @global_id = 0
130
+ @channels = {}
131
+ end
132
+ end
112
133
 
113
- nil
114
- end
134
+ # use with extreme care, will nuke all of the data
135
+ def expire_all_backlogs!
136
+ sync do
137
+ @channels = {}
138
+ end
139
+ end
115
140
 
116
- def unsubscribe
117
- sync { @listeners.each { |l| l.push(nil) } }
118
- end
141
+ def max_id(channel = nil)
142
+ if channel
143
+ sync do
144
+ if entry = chan(channel).backlog.last
145
+ entry.first
146
+ end
147
+ end
148
+ else
149
+ sync { @global_id - 1 }
150
+ end || 0
151
+ end
119
152
 
120
- private
153
+ def subscribe
154
+ listener = Listener.new
155
+ yield listener
121
156
 
122
- def chan(channel)
123
- @channels[channel] ||= []
124
- end
157
+ q = Queue.new
158
+ sync do
159
+ @listeners << q
160
+ end
125
161
 
126
- def sync
127
- @mutex.synchronize { yield }
128
- end
129
- end
162
+ listener.do_sub.call
163
+ while msg = q.pop
164
+ listener.do_message.call(nil, msg)
165
+ end
166
+ listener.do_unsub.call
167
+ sync do
168
+ @listeners.delete(q)
169
+ end
130
170
 
131
- class MessageBus::Memory::ReliablePubSub
132
- attr_reader :subscribed
133
- attr_accessor :max_backlog_size, :max_global_backlog_size, :clear_every
171
+ nil
172
+ end
134
173
 
135
- UNSUB_MESSAGE = "$$UNSUBSCRIBE"
174
+ def unsubscribe
175
+ sync { @listeners.each { |l| l.push(nil) } }
176
+ end
136
177
 
137
- # max_backlog_size is per multiplexed channel
138
- def initialize(config = {}, max_backlog_size = 1000)
139
- @config = config
140
- @max_backlog_size = max_backlog_size
141
- @max_global_backlog_size = 2000
142
- # after 7 days inactive backlogs will be removed
143
- @clear_every = config[:clear_every] || 1
144
- end
178
+ private
145
179
 
146
- def new_connection
147
- MessageBus::Memory::Client.new(@config)
148
- end
180
+ def chan(channel)
181
+ @channels[channel] ||= Channel.new(ttl: @max_backlog_age)
182
+ end
149
183
 
150
- def backend
151
- :memory
152
- end
184
+ def sync
185
+ @mutex.synchronize { yield }
186
+ end
187
+ end
153
188
 
154
- def after_fork
155
- nil
156
- end
189
+ # @param [Hash] config
190
+ # @option config [Logger] :logger a logger to which logs will be output
191
+ # @option config [Integer] :clear_every the interval of publications between which the backlog will not be cleared
192
+ # @param [Integer] max_backlog_size the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
193
+ def initialize(config = {}, max_backlog_size = 1000)
194
+ @config = config
195
+ @max_backlog_size = max_backlog_size
196
+ @max_global_backlog_size = 2000
197
+ # after 7 days inactive backlogs will be removed
198
+ self.max_backlog_age = 604800
199
+ @clear_every = config[:clear_every] || 1
200
+ end
157
201
 
158
- def client
159
- @client ||= new_connection
160
- end
202
+ def max_backlog_age=(value)
203
+ client.max_backlog_age = value
204
+ end
161
205
 
162
- # use with extreme care, will nuke all of the data
163
- def reset!
164
- client.reset!
165
- end
206
+ # No-op; this backend doesn't support forking.
207
+ # @see Base#after_fork
208
+ def after_fork
209
+ nil
210
+ end
166
211
 
167
- def publish(channel, data, opts = nil)
168
- client = self.client
169
- backlog_id = client.add(channel, data)
170
- if backlog_id % clear_every == 0
171
- client.clear_global_backlog(backlog_id, @max_global_backlog_size)
172
- client.clear_channel_backlog(channel, backlog_id, @max_backlog_size)
173
- end
212
+ # (see Base#reset!)
213
+ def reset!
214
+ client.reset!
215
+ end
174
216
 
175
- backlog_id
176
- end
217
+ # (see Base#expire_all_backlogs!)
218
+ def expire_all_backlogs!
219
+ client.expire_all_backlogs!
220
+ end
177
221
 
178
- def last_id(channel)
179
- client.max_id(channel)
180
- end
222
+ # (see Base#publish)
223
+ # @todo :queue_in_memory NOT SUPPORTED
224
+ def publish(channel, data, opts = nil)
225
+ c = client
226
+ max_backlog_age = opts && opts[:max_backlog_age]
227
+ backlog_id = c.add(channel, data, max_backlog_age: max_backlog_age)
228
+
229
+ if backlog_id % clear_every == 0
230
+ max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
231
+ c.clear_global_backlog(backlog_id, @max_global_backlog_size)
232
+ c.clear_channel_backlog(channel, backlog_id, max_backlog_size)
233
+ end
181
234
 
182
- def backlog(channel, last_id = nil)
183
- items = client.backlog channel, last_id.to_i
235
+ backlog_id
236
+ end
184
237
 
185
- items.map! do |id, data|
186
- MessageBus::Message.new id, id, channel, data
187
- end
188
- end
238
+ # (see Base#last_id)
239
+ def last_id(channel)
240
+ client.max_id(channel)
241
+ end
189
242
 
190
- def global_backlog(last_id = nil)
191
- last_id = last_id.to_i
243
+ # (see Base#backlog)
244
+ def backlog(channel, last_id = 0)
245
+ items = client.backlog channel, last_id.to_i
192
246
 
193
- items = client.global_backlog last_id.to_i
247
+ items.map! do |id, data|
248
+ MessageBus::Message.new id, id, channel, data
249
+ end
250
+ end
194
251
 
195
- items.map! do |id, channel, data|
196
- MessageBus::Message.new id, id, channel, data
197
- end
198
- end
252
+ # (see Base#global_backlog)
253
+ def global_backlog(last_id = 0)
254
+ items = client.global_backlog last_id.to_i
199
255
 
200
- def get_message(channel, message_id)
201
- if data = client.get_value(channel, message_id)
202
- MessageBus::Message.new message_id, message_id, channel, data
203
- else
204
- nil
205
- end
206
- end
256
+ items.map! do |id, channel, data|
257
+ MessageBus::Message.new id, id, channel, data
258
+ end
259
+ end
207
260
 
208
- def subscribe(channel, last_id = nil)
209
- # trivial implementation for now,
210
- # can cut down on connections if we only have one global subscriber
211
- raise ArgumentError unless block_given?
261
+ # (see Base#get_message)
262
+ def get_message(channel, message_id)
263
+ if data = client.get_value(channel, message_id)
264
+ MessageBus::Message.new message_id, message_id, channel, data
265
+ else
266
+ nil
267
+ end
268
+ end
212
269
 
213
- global_subscribe(last_id) do |m|
214
- yield m if m.channel == channel
215
- end
216
- end
270
+ # (see Base#subscribe)
271
+ def subscribe(channel, last_id = nil)
272
+ # trivial implementation for now,
273
+ # can cut down on connections if we only have one global subscriber
274
+ raise ArgumentError unless block_given?
217
275
 
218
- def process_global_backlog(highest_id)
219
- if highest_id > client.max_id
220
- highest_id = 0
221
- end
276
+ global_subscribe(last_id) do |m|
277
+ yield m if m.channel == channel
278
+ end
279
+ end
222
280
 
223
- global_backlog(highest_id).each do |old|
224
- yield old
225
- highest_id = old.global_id
226
- end
281
+ # (see Base#global_unsubscribe)
282
+ def global_unsubscribe
283
+ client.unsubscribe
284
+ @subscribed = false
285
+ end
227
286
 
228
- highest_id
229
- end
287
+ # (see Base#global_subscribe)
288
+ def global_subscribe(last_id = nil)
289
+ raise ArgumentError unless block_given?
230
290
 
231
- def global_unsubscribe
232
- client.unsubscribe
233
- @subscribed = false
234
- end
291
+ highest_id = last_id
235
292
 
236
- def global_subscribe(last_id = nil, &blk)
237
- raise ArgumentError unless block_given?
238
- highest_id = last_id
293
+ begin
294
+ client.subscribe do |on|
295
+ h = {}
239
296
 
240
- begin
241
- client.subscribe do |on|
242
- h = {}
297
+ on.subscribe do
298
+ if highest_id
299
+ process_global_backlog(highest_id) do |m|
300
+ h[m.global_id] = true
301
+ yield m
302
+ end
303
+ end
304
+ @subscribed = true
305
+ end
243
306
 
244
- on.subscribe do
245
- if highest_id
246
- process_global_backlog(highest_id) do |m|
247
- h[m.global_id] = true
248
- yield m
307
+ on.unsubscribe do
308
+ @subscribed = false
309
+ end
310
+
311
+ on.message do |_c, m|
312
+ m = MessageBus::Message.decode m
313
+
314
+ # we have 3 options
315
+ #
316
+ # 1. message came in the correct order GREAT, just deal with it
317
+ # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
318
+ # 3. message came in the incorrect order and is lowest than current highest id, reset
319
+
320
+ if h
321
+ # If already yielded during the clear backlog when subscribing,
322
+ # don't yield a duplicate copy.
323
+ unless h.delete(m.global_id)
324
+ h = nil if h.empty?
325
+ yield m
326
+ end
327
+ else
328
+ yield m
329
+ end
249
330
  end
250
331
  end
251
- @subscribed = true
332
+ rescue => error
333
+ @config[:logger].warn "#{error} subscribe failed, reconnecting in 1 second. Call stack\n#{error.backtrace.join("\n")}"
334
+ sleep 1
335
+ retry
252
336
  end
337
+ end
338
+
339
+ private
340
+
341
+ def client
342
+ @client ||= new_connection
343
+ end
344
+
345
+ def new_connection
346
+ Client.new(@config)
347
+ end
253
348
 
254
- on.unsubscribe do
255
- @subscribed = false
349
+ def process_global_backlog(highest_id)
350
+ if highest_id > client.max_id
351
+ highest_id = 0
256
352
  end
257
353
 
258
- on.message do |c, m|
259
- m = MessageBus::Message.decode m
260
-
261
- # we have 3 options
262
- #
263
- # 1. message came in the correct order GREAT, just deal with it
264
- # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
265
- # 3. message came in the incorrect order and is lowest than current highest id, reset
266
-
267
- if h
268
- # If already yielded during the clear backlog when subscribing,
269
- # don't yield a duplicate copy.
270
- unless h.delete(m.global_id)
271
- h = nil if h.empty?
272
- yield m
273
- end
274
- else
275
- yield m
276
- end
354
+ global_backlog(highest_id).each do |old|
355
+ yield old
356
+ highest_id = old.global_id
277
357
  end
358
+
359
+ highest_id
278
360
  end
279
- rescue => error
280
- @config[:logger].warn "#{error} subscribe failed, reconnecting in 1 second. Call stack\n#{error.backtrace.join("\n")}"
281
- sleep 1
282
- retry
361
+
362
+ MessageBus::BACKENDS[:memory] = self
283
363
  end
284
364
  end
285
-
286
- MessageBus::BACKENDS[:memory] = self
287
365
  end