message_bus 2.1.6 → 2.2.0.pre

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of message_bus might be problematic. Click here for more details.

Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +13 -92
  3. data/.rubocop_todo.yml +659 -0
  4. data/.travis.yml +1 -1
  5. data/CHANGELOG +61 -0
  6. data/Dockerfile +18 -0
  7. data/Gemfile +3 -1
  8. data/Guardfile +0 -1
  9. data/README.md +188 -101
  10. data/Rakefile +12 -1
  11. data/assets/message-bus.js +1 -1
  12. data/docker-compose.yml +46 -0
  13. data/examples/bench/config.ru +8 -9
  14. data/examples/bench/unicorn.conf.rb +1 -1
  15. data/examples/chat/chat.rb +150 -153
  16. data/examples/minimal/config.ru +2 -3
  17. data/lib/message_bus.rb +224 -36
  18. data/lib/message_bus/backends.rb +7 -0
  19. data/lib/message_bus/backends/base.rb +184 -0
  20. data/lib/message_bus/backends/memory.rb +304 -226
  21. data/lib/message_bus/backends/postgres.rb +359 -318
  22. data/lib/message_bus/backends/redis.rb +380 -337
  23. data/lib/message_bus/client.rb +99 -41
  24. data/lib/message_bus/connection_manager.rb +29 -21
  25. data/lib/message_bus/diagnostics.rb +50 -41
  26. data/lib/message_bus/distributed_cache.rb +5 -7
  27. data/lib/message_bus/message.rb +2 -2
  28. data/lib/message_bus/rack/diagnostics.rb +65 -55
  29. data/lib/message_bus/rack/middleware.rb +64 -44
  30. data/lib/message_bus/rack/thin_ext.rb +13 -9
  31. data/lib/message_bus/rails/railtie.rb +2 -0
  32. data/lib/message_bus/timer_thread.rb +2 -2
  33. data/lib/message_bus/version.rb +2 -1
  34. data/message_bus.gemspec +3 -2
  35. data/spec/assets/support/jasmine_helper.rb +1 -1
  36. data/spec/lib/fake_async_middleware.rb +1 -6
  37. data/spec/lib/message_bus/assets/asset_encoding_spec.rb +3 -3
  38. data/spec/lib/message_bus/backend_spec.rb +409 -0
  39. data/spec/lib/message_bus/client_spec.rb +8 -11
  40. data/spec/lib/message_bus/connection_manager_spec.rb +8 -14
  41. data/spec/lib/message_bus/distributed_cache_spec.rb +0 -4
  42. data/spec/lib/message_bus/multi_process_spec.rb +6 -7
  43. data/spec/lib/message_bus/rack/middleware_spec.rb +47 -43
  44. data/spec/lib/message_bus/timer_thread_spec.rb +0 -2
  45. data/spec/lib/message_bus_spec.rb +59 -43
  46. data/spec/spec_helper.rb +16 -4
  47. metadata +12 -9
  48. data/spec/lib/message_bus/backends/postgres_spec.rb +0 -221
  49. data/spec/lib/message_bus/backends/redis_spec.rb +0 -271
@@ -1,406 +1,449 @@
1
1
  # frozen_string_literal: true
2
- #
2
+
3
3
  require 'redis'
4
4
  require 'digest'
5
5
 
6
- # the heart of the message bus, it acts as 2 things
7
- #
8
- # 1. A channel multiplexer
9
- # 2. Backlog storage per-multiplexed channel.
10
- #
11
- # ids are all sequencially increasing numbers starting at 0
12
- #
13
-
14
- module MessageBus::Redis; end
15
- class MessageBus::Redis::ReliablePubSub
16
- attr_reader :subscribed
17
- attr_accessor :max_backlog_size, :max_global_backlog_size, :max_in_memory_publish_backlog, :max_backlog_age
18
-
19
- UNSUB_MESSAGE = "$$UNSUBSCRIBE"
20
-
21
- class NoMoreRetries < StandardError; end
22
- class BackLogOutOfOrder < StandardError
23
- attr_accessor :highest_id
24
-
25
- def initialize(highest_id)
26
- @highest_id = highest_id
27
- end
28
- end
29
-
30
- # max_backlog_size is per multiplexed channel
31
- def initialize(redis_config = {}, max_backlog_size = 1000)
32
- @redis_config = redis_config.dup
33
- @logger = @redis_config[:logger]
34
- unless @redis_config[:enable_redis_logger]
35
- @redis_config[:logger] = nil
36
- end
37
- @max_backlog_size = max_backlog_size
38
- @max_global_backlog_size = 2000
39
- @max_in_memory_publish_backlog = 1000
40
- @in_memory_backlog = []
41
- @lock = Mutex.new
42
- @flush_backlog_thread = nil
43
- # after 7 days inactive backlogs will be removed
44
- @max_backlog_age = 604800
45
- end
46
-
47
- def new_redis_connection
48
- ::Redis.new(@redis_config)
49
- end
50
-
51
- def after_fork
52
- pub_redis.disconnect!
53
- end
54
-
55
- def redis_channel_name
56
- db = @redis_config[:db] || 0
57
- "_message_bus_#{db}"
58
- end
59
-
60
- # redis connection used for publishing messages
61
- def pub_redis
62
- @pub_redis ||= new_redis_connection
63
- end
6
+ require "message_bus/backends/base"
7
+
8
+ module MessageBus
9
+ module Backends
10
+ # The Redis backend stores published messages in Redis sorted sets (using
11
+ # ZADD, where the score is the message ID), one for each channel (where
12
+ # the full message is stored), and also in a global backlog as a simple
13
+ # pointer to the respective channel and channel-specific ID. In addition,
14
+ # publication publishes full messages to a Redis PubSub channel; this is
15
+ # used for actively subscribed message_bus servers to consume published
16
+ # messages in real-time while connected and forward them to subscribers,
17
+ # while catch-up is performed from the backlog sorted sets.
18
+ #
19
+ # Message lookup is performed using the Redis ZRANGEBYSCORE command, and
20
+ # backlog trimming uses ZREMRANGEBYSCORE. The last used channel-specific
21
+ # and global IDs are stored as integers in simple Redis keys and
22
+ # incremented on publication.
23
+ #
24
+ # Publication is implemented using a Lua script to ensure that it is
25
+ # atomic and messages are not corrupted by parallel publication.
26
+ #
27
+ # @note This backend diverges from the standard in Base in the following ways:
28
+ #
29
+ # * `max_backlog_age` options in this backend differ from the behaviour of
30
+ # other backends, in that either no messages are removed (when
31
+ # publications happen more regularly than this time-frame) or all
32
+ # messages are removed (when no publication happens during this
33
+ # time-frame).
34
+ #
35
+ # * `clear_every` is not a supported option for this backend.
36
+ #
37
+ # @see Base general information about message_bus backends
38
+ class Redis < Base
39
+ class BackLogOutOfOrder < StandardError
40
+ attr_accessor :highest_id
41
+
42
+ def initialize(highest_id)
43
+ @highest_id = highest_id
44
+ end
45
+ end
64
46
 
65
- def backlog_key(channel)
66
- "__mb_backlog_n_#{channel}"
67
- end
47
+ # @param [Hash] redis_config in addition to the options listed, see https://github.com/redis/redis-rb for other available options
48
+ # @option redis_config [Logger] :logger a logger to which logs will be output
49
+ # @option redis_config [Boolean] :enable_redis_logger (false) whether or not to enable logging by the underlying Redis library
50
+ # @param [Integer] max_backlog_size the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
51
+ def initialize(redis_config = {}, max_backlog_size = 1000)
52
+ @redis_config = redis_config.dup
53
+ @logger = @redis_config[:logger]
54
+ unless @redis_config[:enable_redis_logger]
55
+ @redis_config[:logger] = nil
56
+ end
57
+ @max_backlog_size = max_backlog_size
58
+ @max_global_backlog_size = 2000
59
+ @max_in_memory_publish_backlog = 1000
60
+ @in_memory_backlog = []
61
+ @lock = Mutex.new
62
+ @flush_backlog_thread = nil
63
+ # after 7 days inactive backlogs will be removed
64
+ @max_backlog_age = 604800
65
+ end
68
66
 
69
- def backlog_id_key(channel)
70
- "__mb_backlog_id_n_#{channel}"
71
- end
67
+ # Reconnects to Redis; used after a process fork, typically triggerd by a forking webserver
68
+ # @see Base#after_fork
69
+ def after_fork
70
+ pub_redis.disconnect!
71
+ end
72
72
 
73
- def global_id_key
74
- "__mb_global_id_n"
75
- end
73
+ # (see Base#reset!)
74
+ def reset!
75
+ pub_redis.keys("__mb_*").each do |k|
76
+ pub_redis.del k
77
+ end
78
+ end
76
79
 
77
- def global_backlog_key
78
- "__mb_global_backlog_n"
79
- end
80
+ # Deletes all backlogs and their data. Does not delete ID pointers, so new publications will get IDs that continue from the last publication before the expiry. Use with extreme caution.
81
+ # @see Base#expire_all_backlogs!
82
+ def expire_all_backlogs!
83
+ pub_redis.keys("__mb_*backlog_n").each do |k|
84
+ pub_redis.del k
85
+ end
86
+ end
80
87
 
81
- # use with extreme care, will nuke all of the data
82
- def reset!
83
- pub_redis.keys("__mb_*").each do |k|
84
- pub_redis.del k
85
- end
86
- end
88
+ # Note, the script takes care of all expiry of keys, however
89
+ # we do not expire the global backlog key cause we have no simple way to determine what it should be on publish
90
+ # we do not provide a mechanism to set a global max backlog age, only a per-channel which we can override on publish
91
+ LUA_PUBLISH = <<LUA
92
+
93
+ local start_payload = ARGV[1]
94
+ local max_backlog_age = ARGV[2]
95
+ local max_backlog_size = tonumber(ARGV[3])
96
+ local max_global_backlog_size = tonumber(ARGV[4])
97
+ local channel = ARGV[5]
98
+
99
+ local global_id_key = KEYS[1]
100
+ local backlog_id_key = KEYS[2]
101
+ local backlog_key = KEYS[3]
102
+ local global_backlog_key = KEYS[4]
103
+ local redis_channel_name = KEYS[5]
104
+
105
+ local global_id = redis.call("INCR", global_id_key)
106
+ local backlog_id = redis.call("INCR", backlog_id_key)
107
+ local payload = string.format("%i|%i|%s", global_id, backlog_id, start_payload)
108
+ local global_backlog_message = string.format("%i|%s", backlog_id, channel)
109
+
110
+ redis.call("ZADD", backlog_key, backlog_id, payload)
111
+ redis.call("EXPIRE", backlog_key, max_backlog_age)
112
+ redis.call("ZADD", global_backlog_key, global_id, global_backlog_message)
113
+ redis.call("EXPIRE", global_backlog_key, max_backlog_age)
114
+ redis.call("PUBLISH", redis_channel_name, payload)
115
+
116
+ redis.call("EXPIRE", backlog_id_key, max_backlog_age)
117
+
118
+ if backlog_id > max_backlog_size then
119
+ redis.call("ZREMRANGEBYSCORE", backlog_key, 1, backlog_id - max_backlog_size)
120
+ end
87
121
 
88
- LUA_PUBLISH = <<LUA
122
+ if global_id > max_global_backlog_size then
123
+ redis.call("ZREMRANGEBYSCORE", global_backlog_key, 1, global_id - max_global_backlog_size)
124
+ end
89
125
 
90
- local start_payload = ARGV[1]
91
- local max_backlog_age = ARGV[2]
92
- local max_backlog_size = tonumber(ARGV[3])
93
- local max_global_backlog_size = tonumber(ARGV[4])
94
- local channel = ARGV[5]
126
+ return backlog_id
127
+ LUA
95
128
 
96
- local global_id_key = KEYS[1]
97
- local backlog_id_key = KEYS[2]
98
- local backlog_key = KEYS[3]
99
- local global_backlog_key = KEYS[4]
100
- local redis_channel_name = KEYS[5]
129
+ LUA_PUBLISH_SHA1 = Digest::SHA1.hexdigest(LUA_PUBLISH)
130
+
131
+ # (see Base#publish)
132
+ def publish(channel, data, opts = nil)
133
+ queue_in_memory = (opts && opts[:queue_in_memory]) != false
134
+
135
+ max_backlog_age = (opts && opts[:max_backlog_age]) || self.max_backlog_age
136
+ max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
137
+
138
+ redis = pub_redis
139
+ backlog_id_key = backlog_id_key(channel)
140
+ backlog_key = backlog_key(channel)
141
+
142
+ msg = MessageBus::Message.new nil, nil, channel, data
143
+
144
+ cached_eval(
145
+ redis,
146
+ LUA_PUBLISH,
147
+ LUA_PUBLISH_SHA1,
148
+ argv: [
149
+ msg.encode_without_ids,
150
+ max_backlog_age,
151
+ max_backlog_size,
152
+ max_global_backlog_size,
153
+ channel
154
+ ],
155
+ keys: [
156
+ global_id_key,
157
+ backlog_id_key,
158
+ backlog_key,
159
+ global_backlog_key,
160
+ redis_channel_name
161
+ ]
162
+ )
163
+ rescue ::Redis::CommandError => e
164
+ if queue_in_memory && e.message =~ /READONLY/
165
+ @lock.synchronize do
166
+ @in_memory_backlog << [channel, data]
167
+ if @in_memory_backlog.length > @max_in_memory_publish_backlog
168
+ @in_memory_backlog.delete_at(0)
169
+ @logger.warn("Dropping old message cause max_in_memory_publish_backlog is full: #{e.message}\n#{e.backtrace.join('\n')}")
170
+ end
171
+ end
101
172
 
102
- local global_id = redis.call("INCR", global_id_key)
103
- local backlog_id = redis.call("INCR", backlog_id_key)
104
- local payload = string.format("%i|%i|%s", global_id, backlog_id, start_payload)
105
- local global_backlog_message = string.format("%i|%s", backlog_id, channel)
173
+ if @flush_backlog_thread == nil
174
+ @lock.synchronize do
175
+ if @flush_backlog_thread == nil
176
+ @flush_backlog_thread = Thread.new { ensure_backlog_flushed }
177
+ end
178
+ end
179
+ end
180
+ nil
181
+ else
182
+ raise
183
+ end
184
+ end
106
185
 
107
- redis.call("ZADD", backlog_key, backlog_id, payload)
108
- redis.call("EXPIRE", backlog_key, max_backlog_age)
186
+ # (see Base#last_id)
187
+ def last_id(channel)
188
+ backlog_id_key = backlog_id_key(channel)
189
+ pub_redis.get(backlog_id_key).to_i
190
+ end
109
191
 
110
- redis.call("ZADD", global_backlog_key, global_id, global_backlog_message)
111
- redis.call("EXPIRE", global_backlog_key, max_backlog_age)
192
+ # (see Base#backlog)
193
+ def backlog(channel, last_id = 0)
194
+ redis = pub_redis
195
+ backlog_key = backlog_key(channel)
196
+ items = redis.zrangebyscore backlog_key, last_id.to_i + 1, "+inf"
112
197
 
113
- redis.call("PUBLISH", redis_channel_name, payload)
198
+ items.map do |i|
199
+ MessageBus::Message.decode(i)
200
+ end
201
+ end
114
202
 
115
- if backlog_id > max_backlog_size then
116
- redis.call("ZREMRANGEBYSCORE", backlog_key, 1, backlog_id - max_backlog_size)
117
- end
203
+ # (see Base#global_backlog)
204
+ def global_backlog(last_id = 0)
205
+ items = pub_redis.zrangebyscore global_backlog_key, last_id.to_i + 1, "+inf"
118
206
 
119
- if global_id > max_global_backlog_size then
120
- redis.call("ZREMRANGEBYSCORE", global_backlog_key, 1, global_id - max_global_backlog_size)
121
- end
207
+ items.map! do |i|
208
+ pipe = i.index "|"
209
+ message_id = i[0..pipe].to_i
210
+ channel = i[pipe + 1..-1]
211
+ m = get_message(channel, message_id)
212
+ m
213
+ end
122
214
 
123
- return backlog_id
215
+ items.compact!
216
+ items
217
+ end
124
218
 
125
- LUA
219
+ # (see Base#get_message)
220
+ def get_message(channel, message_id)
221
+ redis = pub_redis
222
+ backlog_key = backlog_key(channel)
126
223
 
127
- LUA_PUBLISH_SHA1 = Digest::SHA1.hexdigest(LUA_PUBLISH)
128
-
129
- def publish(channel, data, opts = nil)
130
- queue_in_memory = (opts && opts[:queue_in_memory]) != false
131
-
132
- max_backlog_age = (opts && opts[:max_backlog_age]) || self.max_backlog_age
133
- max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
134
-
135
- redis = pub_redis
136
- backlog_id_key = backlog_id_key(channel)
137
- backlog_key = backlog_key(channel)
138
-
139
- msg = MessageBus::Message.new nil, nil, channel, data
140
-
141
- cached_eval(redis, LUA_PUBLISH, LUA_PUBLISH_SHA1,
142
- argv: [
143
- msg.encode_without_ids,
144
- max_backlog_age,
145
- max_backlog_size,
146
- max_global_backlog_size,
147
- channel
148
- ],
149
- keys: [
150
- global_id_key,
151
- backlog_id_key,
152
- backlog_key,
153
- global_backlog_key,
154
- redis_channel_name
155
- ]
156
- )
157
-
158
- rescue Redis::CommandError => e
159
- if queue_in_memory && e.message =~ /READONLY/
160
- @lock.synchronize do
161
- @in_memory_backlog << [channel, data]
162
- if @in_memory_backlog.length > @max_in_memory_publish_backlog
163
- @in_memory_backlog.delete_at(0)
164
- @logger.warn("Dropping old message cause max_in_memory_publish_backlog is full: #{e.message}\n#{e.backtrace.join('\n')}")
224
+ items = redis.zrangebyscore backlog_key, message_id, message_id
225
+ if items && items[0]
226
+ MessageBus::Message.decode(items[0])
227
+ else
228
+ nil
165
229
  end
166
230
  end
167
231
 
168
- if @flush_backlog_thread == nil
169
- @lock.synchronize do
170
- if @flush_backlog_thread == nil
171
- @flush_backlog_thread = Thread.new { ensure_backlog_flushed }
232
+ # (see Base#subscribe)
233
+ def subscribe(channel, last_id = nil)
234
+ # trivial implementation for now,
235
+ # can cut down on connections if we only have one global subscriber
236
+ raise ArgumentError unless block_given?
237
+
238
+ if last_id
239
+ # we need to translate this to a global id, at least give it a shot
240
+ # we are subscribing on global and global is always going to be bigger than local
241
+ # so worst case is a replay of a few messages
242
+ message = get_message(channel, last_id)
243
+ if message
244
+ last_id = message.global_id
172
245
  end
173
246
  end
247
+ global_subscribe(last_id) do |m|
248
+ yield m if m.channel == channel
249
+ end
174
250
  end
175
- nil
176
- else
177
- raise
178
- end
179
- end
180
-
181
- def ensure_backlog_flushed
182
- flushed = false
183
-
184
- while !flushed
185
- try_again = false
186
251
 
187
- if is_readonly?
188
- sleep 1
189
- next
252
+ # (see Base#global_unsubscribe)
253
+ def global_unsubscribe
254
+ if @redis_global
255
+ # new connection to avoid deadlock
256
+ new_redis_connection.publish(redis_channel_name, UNSUB_MESSAGE)
257
+ @redis_global.disconnect
258
+ @redis_global = nil
259
+ end
190
260
  end
191
261
 
192
- @lock.synchronize do
193
- if @in_memory_backlog.length == 0
194
- flushed = true
195
- break
262
+ # (see Base#global_subscribe)
263
+ def global_subscribe(last_id = nil, &blk)
264
+ raise ArgumentError unless block_given?
265
+
266
+ highest_id = last_id
267
+
268
+ clear_backlog = lambda do
269
+ retries = 4
270
+ begin
271
+ highest_id = process_global_backlog(highest_id, retries > 0, &blk)
272
+ rescue BackLogOutOfOrder => e
273
+ highest_id = e.highest_id
274
+ retries -= 1
275
+ sleep(rand(50) / 1000.0)
276
+ retry
277
+ end
196
278
  end
197
279
 
198
280
  begin
199
- # TODO recover special options
200
- publish(*@in_memory_backlog[0], queue_in_memory: false)
201
- rescue Redis::CommandError => e
202
- if e.message =~ /^READONLY/
203
- try_again = true
204
- else
205
- @logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
281
+ @redis_global = new_redis_connection
282
+
283
+ if highest_id
284
+ clear_backlog.call(&blk)
206
285
  end
207
- rescue => e
208
- @logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
209
- end
210
286
 
211
- @in_memory_backlog.delete_at(0) unless try_again
287
+ @redis_global.subscribe(redis_channel_name) do |on|
288
+ on.subscribe do
289
+ if highest_id
290
+ clear_backlog.call(&blk)
291
+ end
292
+ @subscribed = true
293
+ end
294
+
295
+ on.unsubscribe do
296
+ @subscribed = false
297
+ end
298
+
299
+ on.message do |_c, m|
300
+ if m == UNSUB_MESSAGE
301
+ @redis_global.unsubscribe
302
+ return
303
+ end
304
+ m = MessageBus::Message.decode m
305
+
306
+ # we have 3 options
307
+ #
308
+ # 1. message came in the correct order GREAT, just deal with it
309
+ # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
310
+ # 3. message came in the incorrect order and is lowest than current highest id, reset
311
+
312
+ if highest_id.nil? || m.global_id == highest_id + 1
313
+ highest_id = m.global_id
314
+ yield m
315
+ else
316
+ clear_backlog.call(&blk)
317
+ end
318
+ end
319
+ end
320
+ rescue => error
321
+ @logger.warn "#{error} subscribe failed, reconnecting in 1 second. Call stack #{error.backtrace}"
322
+ sleep 1
323
+ retry
324
+ end
212
325
  end
213
- end
214
- ensure
215
- @lock.synchronize do
216
- @flush_backlog_thread = nil
217
- end
218
- end
219
-
220
- def last_id(channel)
221
- backlog_id_key = backlog_id_key(channel)
222
- pub_redis.get(backlog_id_key).to_i
223
- end
224
-
225
- def backlog(channel, last_id = nil)
226
- redis = pub_redis
227
- backlog_key = backlog_key(channel)
228
- items = redis.zrangebyscore backlog_key, last_id.to_i + 1, "+inf"
229
-
230
- items.map do |i|
231
- MessageBus::Message.decode(i)
232
- end
233
- end
234
326
 
235
- def global_backlog(last_id = nil)
236
- last_id = last_id.to_i
237
- redis = pub_redis
327
+ private
238
328
 
239
- items = redis.zrangebyscore global_backlog_key, last_id.to_i + 1, "+inf"
329
+ def new_redis_connection
330
+ ::Redis.new(@redis_config)
331
+ end
240
332
 
241
- items.map! do |i|
242
- pipe = i.index "|"
243
- message_id = i[0..pipe].to_i
244
- channel = i[pipe + 1..-1]
245
- m = get_message(channel, message_id)
246
- m
247
- end
333
+ # redis connection used for publishing messages
334
+ def pub_redis
335
+ @pub_redis ||= new_redis_connection
336
+ end
248
337
 
249
- items.compact!
250
- items
251
- end
338
+ def redis_channel_name
339
+ db = @redis_config[:db] || 0
340
+ "_message_bus_#{db}"
341
+ end
252
342
 
253
- def get_message(channel, message_id)
254
- redis = pub_redis
255
- backlog_key = backlog_key(channel)
343
+ def backlog_key(channel)
344
+ "__mb_backlog_n_#{channel}"
345
+ end
256
346
 
257
- items = redis.zrangebyscore backlog_key, message_id, message_id
258
- if items && items[0]
259
- MessageBus::Message.decode(items[0])
260
- else
261
- nil
262
- end
263
- end
347
+ def backlog_id_key(channel)
348
+ "__mb_backlog_id_n_#{channel}"
349
+ end
264
350
 
265
- def subscribe(channel, last_id = nil)
266
- # trivial implementation for now,
267
- # can cut down on connections if we only have one global subscriber
268
- raise ArgumentError unless block_given?
269
-
270
- if last_id
271
- # we need to translate this to a global id, at least give it a shot
272
- # we are subscribing on global and global is always going to be bigger than local
273
- # so worst case is a replay of a few messages
274
- message = get_message(channel, last_id)
275
- if message
276
- last_id = message.global_id
351
+ def global_id_key
352
+ "__mb_global_id_n"
277
353
  end
278
- end
279
- global_subscribe(last_id) do |m|
280
- yield m if m.channel == channel
281
- end
282
- end
283
354
 
284
- def process_global_backlog(highest_id, raise_error, &blk)
285
- if highest_id > pub_redis.get(global_id_key).to_i
286
- highest_id = 0
287
- end
355
+ def global_backlog_key
356
+ "__mb_global_backlog_n"
357
+ end
288
358
 
289
- global_backlog(highest_id).each do |old|
290
- if highest_id + 1 == old.global_id
291
- yield old
292
- highest_id = old.global_id
293
- else
294
- raise BackLogOutOfOrder.new(highest_id) if raise_error
295
- if old.global_id > highest_id
296
- yield old
297
- highest_id = old.global_id
359
+ def process_global_backlog(highest_id, raise_error)
360
+ if highest_id > pub_redis.get(global_id_key).to_i
361
+ highest_id = 0
298
362
  end
299
- end
300
- end
301
363
 
302
- highest_id
303
- end
364
+ global_backlog(highest_id).each do |old|
365
+ if highest_id + 1 == old.global_id
366
+ yield old
367
+ highest_id = old.global_id
368
+ else
369
+ raise BackLogOutOfOrder.new(highest_id) if raise_error
304
370
 
305
- def global_unsubscribe
306
- if @redis_global
307
- # new connection to avoid deadlock
308
- new_redis_connection.publish(redis_channel_name, UNSUB_MESSAGE)
309
- @redis_global.disconnect
310
- @redis_global = nil
311
- end
312
- end
371
+ if old.global_id > highest_id
372
+ yield old
373
+ highest_id = old.global_id
374
+ end
375
+ end
376
+ end
313
377
 
314
- def global_subscribe(last_id = nil, &blk)
315
- raise ArgumentError unless block_given?
316
- highest_id = last_id
317
-
318
- clear_backlog = lambda do
319
- retries = 4
320
- begin
321
- highest_id = process_global_backlog(highest_id, retries > 0, &blk)
322
- rescue BackLogOutOfOrder => e
323
- highest_id = e.highest_id
324
- retries -= 1
325
- sleep(rand(50) / 1000.0)
326
- retry
378
+ highest_id
327
379
  end
328
- end
329
380
 
330
- begin
331
- @redis_global = new_redis_connection
381
+ def ensure_backlog_flushed
382
+ flushed = false
332
383
 
333
- if highest_id
334
- clear_backlog.call(&blk)
335
- end
384
+ while !flushed
385
+ try_again = false
336
386
 
337
- @redis_global.subscribe(redis_channel_name) do |on|
338
- on.subscribe do
339
- if highest_id
340
- clear_backlog.call(&blk)
387
+ if is_readonly?
388
+ sleep 1
389
+ next
341
390
  end
342
- @subscribed = true
343
- end
344
-
345
- on.unsubscribe do
346
- @subscribed = false
347
- end
348
391
 
349
- on.message do |c, m|
350
- if m == UNSUB_MESSAGE
351
- @redis_global.unsubscribe
352
- return
392
+ @lock.synchronize do
393
+ if @in_memory_backlog.length == 0
394
+ flushed = true
395
+ break
396
+ end
397
+
398
+ begin
399
+ # TODO recover special options
400
+ publish(*@in_memory_backlog[0], queue_in_memory: false)
401
+ rescue ::Redis::CommandError => e
402
+ if e.message =~ /^READONLY/
403
+ try_again = true
404
+ else
405
+ @logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
406
+ end
407
+ rescue => e
408
+ @logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
409
+ end
410
+
411
+ @in_memory_backlog.delete_at(0) unless try_again
353
412
  end
354
- m = MessageBus::Message.decode m
355
-
356
- # we have 3 options
357
- #
358
- # 1. message came in the correct order GREAT, just deal with it
359
- # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
360
- # 3. message came in the incorrect order and is lowest than current highest id, reset
413
+ end
414
+ ensure
415
+ @lock.synchronize do
416
+ @flush_backlog_thread = nil
417
+ end
418
+ end
361
419
 
362
- if highest_id.nil? || m.global_id == highest_id + 1
363
- highest_id = m.global_id
364
- yield m
420
+ def cached_eval(redis, script, script_sha1, params)
421
+ begin
422
+ redis.evalsha script_sha1, params
423
+ rescue ::Redis::CommandError => e
424
+ if e.to_s =~ /^NOSCRIPT/
425
+ redis.eval script, params
365
426
  else
366
- clear_backlog.call(&blk)
427
+ raise
367
428
  end
368
429
  end
369
430
  end
370
- rescue => error
371
- @logger.warn "#{error} subscribe failed, reconnecting in 1 second. Call stack #{error.backtrace}"
372
- sleep 1
373
- retry
374
- end
375
- end
376
431
 
377
- private
432
+ def is_readonly?
433
+ key = "__mb_is_readonly"
378
434
 
379
- def cached_eval(redis, script, script_sha1, params)
380
- begin
381
- redis.evalsha script_sha1, params
382
- rescue Redis::CommandError => e
383
- if e.to_s =~ /^NOSCRIPT/
384
- redis.eval script, params
385
- else
386
- raise
435
+ begin
436
+ # in case we are not connected to the correct server
437
+ # which can happen when sharing ips
438
+ pub_redis.client.reconnect
439
+ pub_redis.client.call([:set, key, '1'])
440
+ false
441
+ rescue ::Redis::CommandError => e
442
+ return true if e.message =~ /^READONLY/
443
+ end
387
444
  end
388
- end
389
- end
390
445
 
391
- def is_readonly?
392
- key = "__mb_is_readonly"
393
-
394
- begin
395
- # in case we are not connected to the correct server
396
- # which can happen when sharing ips
397
- pub_redis.client.reconnect
398
- pub_redis.client.call([:set, key, '1'])
399
- false
400
- rescue Redis::CommandError => e
401
- return true if e.message =~ /^READONLY/
446
+ MessageBus::BACKENDS[:redis] = self
402
447
  end
403
448
  end
404
-
405
- MessageBus::BACKENDS[:redis] = self
406
449
  end