message_bus 2.1.6 → 2.2.0.pre
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of message_bus might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.rubocop.yml +13 -92
- data/.rubocop_todo.yml +659 -0
- data/.travis.yml +1 -1
- data/CHANGELOG +61 -0
- data/Dockerfile +18 -0
- data/Gemfile +3 -1
- data/Guardfile +0 -1
- data/README.md +188 -101
- data/Rakefile +12 -1
- data/assets/message-bus.js +1 -1
- data/docker-compose.yml +46 -0
- data/examples/bench/config.ru +8 -9
- data/examples/bench/unicorn.conf.rb +1 -1
- data/examples/chat/chat.rb +150 -153
- data/examples/minimal/config.ru +2 -3
- data/lib/message_bus.rb +224 -36
- data/lib/message_bus/backends.rb +7 -0
- data/lib/message_bus/backends/base.rb +184 -0
- data/lib/message_bus/backends/memory.rb +304 -226
- data/lib/message_bus/backends/postgres.rb +359 -318
- data/lib/message_bus/backends/redis.rb +380 -337
- data/lib/message_bus/client.rb +99 -41
- data/lib/message_bus/connection_manager.rb +29 -21
- data/lib/message_bus/diagnostics.rb +50 -41
- data/lib/message_bus/distributed_cache.rb +5 -7
- data/lib/message_bus/message.rb +2 -2
- data/lib/message_bus/rack/diagnostics.rb +65 -55
- data/lib/message_bus/rack/middleware.rb +64 -44
- data/lib/message_bus/rack/thin_ext.rb +13 -9
- data/lib/message_bus/rails/railtie.rb +2 -0
- data/lib/message_bus/timer_thread.rb +2 -2
- data/lib/message_bus/version.rb +2 -1
- data/message_bus.gemspec +3 -2
- data/spec/assets/support/jasmine_helper.rb +1 -1
- data/spec/lib/fake_async_middleware.rb +1 -6
- data/spec/lib/message_bus/assets/asset_encoding_spec.rb +3 -3
- data/spec/lib/message_bus/backend_spec.rb +409 -0
- data/spec/lib/message_bus/client_spec.rb +8 -11
- data/spec/lib/message_bus/connection_manager_spec.rb +8 -14
- data/spec/lib/message_bus/distributed_cache_spec.rb +0 -4
- data/spec/lib/message_bus/multi_process_spec.rb +6 -7
- data/spec/lib/message_bus/rack/middleware_spec.rb +47 -43
- data/spec/lib/message_bus/timer_thread_spec.rb +0 -2
- data/spec/lib/message_bus_spec.rb +59 -43
- data/spec/spec_helper.rb +16 -4
- metadata +12 -9
- data/spec/lib/message_bus/backends/postgres_spec.rb +0 -221
- data/spec/lib/message_bus/backends/redis_spec.rb +0 -271
@@ -1,406 +1,449 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
2
|
+
|
3
3
|
require 'redis'
|
4
4
|
require 'digest'
|
5
5
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
#
|
11
|
-
#
|
12
|
-
#
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
@
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
def new_redis_connection
|
48
|
-
::Redis.new(@redis_config)
|
49
|
-
end
|
50
|
-
|
51
|
-
def after_fork
|
52
|
-
pub_redis.disconnect!
|
53
|
-
end
|
54
|
-
|
55
|
-
def redis_channel_name
|
56
|
-
db = @redis_config[:db] || 0
|
57
|
-
"_message_bus_#{db}"
|
58
|
-
end
|
59
|
-
|
60
|
-
# redis connection used for publishing messages
|
61
|
-
def pub_redis
|
62
|
-
@pub_redis ||= new_redis_connection
|
63
|
-
end
|
6
|
+
require "message_bus/backends/base"
|
7
|
+
|
8
|
+
module MessageBus
|
9
|
+
module Backends
|
10
|
+
# The Redis backend stores published messages in Redis sorted sets (using
|
11
|
+
# ZADD, where the score is the message ID), one for each channel (where
|
12
|
+
# the full message is stored), and also in a global backlog as a simple
|
13
|
+
# pointer to the respective channel and channel-specific ID. In addition,
|
14
|
+
# publication publishes full messages to a Redis PubSub channel; this is
|
15
|
+
# used for actively subscribed message_bus servers to consume published
|
16
|
+
# messages in real-time while connected and forward them to subscribers,
|
17
|
+
# while catch-up is performed from the backlog sorted sets.
|
18
|
+
#
|
19
|
+
# Message lookup is performed using the Redis ZRANGEBYSCORE command, and
|
20
|
+
# backlog trimming uses ZREMRANGEBYSCORE. The last used channel-specific
|
21
|
+
# and global IDs are stored as integers in simple Redis keys and
|
22
|
+
# incremented on publication.
|
23
|
+
#
|
24
|
+
# Publication is implemented using a Lua script to ensure that it is
|
25
|
+
# atomic and messages are not corrupted by parallel publication.
|
26
|
+
#
|
27
|
+
# @note This backend diverges from the standard in Base in the following ways:
|
28
|
+
#
|
29
|
+
# * `max_backlog_age` options in this backend differ from the behaviour of
|
30
|
+
# other backends, in that either no messages are removed (when
|
31
|
+
# publications happen more regularly than this time-frame) or all
|
32
|
+
# messages are removed (when no publication happens during this
|
33
|
+
# time-frame).
|
34
|
+
#
|
35
|
+
# * `clear_every` is not a supported option for this backend.
|
36
|
+
#
|
37
|
+
# @see Base general information about message_bus backends
|
38
|
+
class Redis < Base
|
39
|
+
class BackLogOutOfOrder < StandardError
|
40
|
+
attr_accessor :highest_id
|
41
|
+
|
42
|
+
def initialize(highest_id)
|
43
|
+
@highest_id = highest_id
|
44
|
+
end
|
45
|
+
end
|
64
46
|
|
65
|
-
|
66
|
-
|
67
|
-
|
47
|
+
# @param [Hash] redis_config in addition to the options listed, see https://github.com/redis/redis-rb for other available options
|
48
|
+
# @option redis_config [Logger] :logger a logger to which logs will be output
|
49
|
+
# @option redis_config [Boolean] :enable_redis_logger (false) whether or not to enable logging by the underlying Redis library
|
50
|
+
# @param [Integer] max_backlog_size the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
|
51
|
+
def initialize(redis_config = {}, max_backlog_size = 1000)
|
52
|
+
@redis_config = redis_config.dup
|
53
|
+
@logger = @redis_config[:logger]
|
54
|
+
unless @redis_config[:enable_redis_logger]
|
55
|
+
@redis_config[:logger] = nil
|
56
|
+
end
|
57
|
+
@max_backlog_size = max_backlog_size
|
58
|
+
@max_global_backlog_size = 2000
|
59
|
+
@max_in_memory_publish_backlog = 1000
|
60
|
+
@in_memory_backlog = []
|
61
|
+
@lock = Mutex.new
|
62
|
+
@flush_backlog_thread = nil
|
63
|
+
# after 7 days inactive backlogs will be removed
|
64
|
+
@max_backlog_age = 604800
|
65
|
+
end
|
68
66
|
|
69
|
-
|
70
|
-
|
71
|
-
|
67
|
+
# Reconnects to Redis; used after a process fork, typically triggerd by a forking webserver
|
68
|
+
# @see Base#after_fork
|
69
|
+
def after_fork
|
70
|
+
pub_redis.disconnect!
|
71
|
+
end
|
72
72
|
|
73
|
-
|
74
|
-
|
75
|
-
|
73
|
+
# (see Base#reset!)
|
74
|
+
def reset!
|
75
|
+
pub_redis.keys("__mb_*").each do |k|
|
76
|
+
pub_redis.del k
|
77
|
+
end
|
78
|
+
end
|
76
79
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
+
# Deletes all backlogs and their data. Does not delete ID pointers, so new publications will get IDs that continue from the last publication before the expiry. Use with extreme caution.
|
81
|
+
# @see Base#expire_all_backlogs!
|
82
|
+
def expire_all_backlogs!
|
83
|
+
pub_redis.keys("__mb_*backlog_n").each do |k|
|
84
|
+
pub_redis.del k
|
85
|
+
end
|
86
|
+
end
|
80
87
|
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
88
|
+
# Note, the script takes care of all expiry of keys, however
|
89
|
+
# we do not expire the global backlog key cause we have no simple way to determine what it should be on publish
|
90
|
+
# we do not provide a mechanism to set a global max backlog age, only a per-channel which we can override on publish
|
91
|
+
LUA_PUBLISH = <<LUA
|
92
|
+
|
93
|
+
local start_payload = ARGV[1]
|
94
|
+
local max_backlog_age = ARGV[2]
|
95
|
+
local max_backlog_size = tonumber(ARGV[3])
|
96
|
+
local max_global_backlog_size = tonumber(ARGV[4])
|
97
|
+
local channel = ARGV[5]
|
98
|
+
|
99
|
+
local global_id_key = KEYS[1]
|
100
|
+
local backlog_id_key = KEYS[2]
|
101
|
+
local backlog_key = KEYS[3]
|
102
|
+
local global_backlog_key = KEYS[4]
|
103
|
+
local redis_channel_name = KEYS[5]
|
104
|
+
|
105
|
+
local global_id = redis.call("INCR", global_id_key)
|
106
|
+
local backlog_id = redis.call("INCR", backlog_id_key)
|
107
|
+
local payload = string.format("%i|%i|%s", global_id, backlog_id, start_payload)
|
108
|
+
local global_backlog_message = string.format("%i|%s", backlog_id, channel)
|
109
|
+
|
110
|
+
redis.call("ZADD", backlog_key, backlog_id, payload)
|
111
|
+
redis.call("EXPIRE", backlog_key, max_backlog_age)
|
112
|
+
redis.call("ZADD", global_backlog_key, global_id, global_backlog_message)
|
113
|
+
redis.call("EXPIRE", global_backlog_key, max_backlog_age)
|
114
|
+
redis.call("PUBLISH", redis_channel_name, payload)
|
115
|
+
|
116
|
+
redis.call("EXPIRE", backlog_id_key, max_backlog_age)
|
117
|
+
|
118
|
+
if backlog_id > max_backlog_size then
|
119
|
+
redis.call("ZREMRANGEBYSCORE", backlog_key, 1, backlog_id - max_backlog_size)
|
120
|
+
end
|
87
121
|
|
88
|
-
|
122
|
+
if global_id > max_global_backlog_size then
|
123
|
+
redis.call("ZREMRANGEBYSCORE", global_backlog_key, 1, global_id - max_global_backlog_size)
|
124
|
+
end
|
89
125
|
|
90
|
-
|
91
|
-
|
92
|
-
local max_backlog_size = tonumber(ARGV[3])
|
93
|
-
local max_global_backlog_size = tonumber(ARGV[4])
|
94
|
-
local channel = ARGV[5]
|
126
|
+
return backlog_id
|
127
|
+
LUA
|
95
128
|
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
129
|
+
LUA_PUBLISH_SHA1 = Digest::SHA1.hexdigest(LUA_PUBLISH)
|
130
|
+
|
131
|
+
# (see Base#publish)
|
132
|
+
def publish(channel, data, opts = nil)
|
133
|
+
queue_in_memory = (opts && opts[:queue_in_memory]) != false
|
134
|
+
|
135
|
+
max_backlog_age = (opts && opts[:max_backlog_age]) || self.max_backlog_age
|
136
|
+
max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
|
137
|
+
|
138
|
+
redis = pub_redis
|
139
|
+
backlog_id_key = backlog_id_key(channel)
|
140
|
+
backlog_key = backlog_key(channel)
|
141
|
+
|
142
|
+
msg = MessageBus::Message.new nil, nil, channel, data
|
143
|
+
|
144
|
+
cached_eval(
|
145
|
+
redis,
|
146
|
+
LUA_PUBLISH,
|
147
|
+
LUA_PUBLISH_SHA1,
|
148
|
+
argv: [
|
149
|
+
msg.encode_without_ids,
|
150
|
+
max_backlog_age,
|
151
|
+
max_backlog_size,
|
152
|
+
max_global_backlog_size,
|
153
|
+
channel
|
154
|
+
],
|
155
|
+
keys: [
|
156
|
+
global_id_key,
|
157
|
+
backlog_id_key,
|
158
|
+
backlog_key,
|
159
|
+
global_backlog_key,
|
160
|
+
redis_channel_name
|
161
|
+
]
|
162
|
+
)
|
163
|
+
rescue ::Redis::CommandError => e
|
164
|
+
if queue_in_memory && e.message =~ /READONLY/
|
165
|
+
@lock.synchronize do
|
166
|
+
@in_memory_backlog << [channel, data]
|
167
|
+
if @in_memory_backlog.length > @max_in_memory_publish_backlog
|
168
|
+
@in_memory_backlog.delete_at(0)
|
169
|
+
@logger.warn("Dropping old message cause max_in_memory_publish_backlog is full: #{e.message}\n#{e.backtrace.join('\n')}")
|
170
|
+
end
|
171
|
+
end
|
101
172
|
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
173
|
+
if @flush_backlog_thread == nil
|
174
|
+
@lock.synchronize do
|
175
|
+
if @flush_backlog_thread == nil
|
176
|
+
@flush_backlog_thread = Thread.new { ensure_backlog_flushed }
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
nil
|
181
|
+
else
|
182
|
+
raise
|
183
|
+
end
|
184
|
+
end
|
106
185
|
|
107
|
-
|
108
|
-
|
186
|
+
# (see Base#last_id)
|
187
|
+
def last_id(channel)
|
188
|
+
backlog_id_key = backlog_id_key(channel)
|
189
|
+
pub_redis.get(backlog_id_key).to_i
|
190
|
+
end
|
109
191
|
|
110
|
-
|
111
|
-
|
192
|
+
# (see Base#backlog)
|
193
|
+
def backlog(channel, last_id = 0)
|
194
|
+
redis = pub_redis
|
195
|
+
backlog_key = backlog_key(channel)
|
196
|
+
items = redis.zrangebyscore backlog_key, last_id.to_i + 1, "+inf"
|
112
197
|
|
113
|
-
|
198
|
+
items.map do |i|
|
199
|
+
MessageBus::Message.decode(i)
|
200
|
+
end
|
201
|
+
end
|
114
202
|
|
115
|
-
|
116
|
-
|
117
|
-
|
203
|
+
# (see Base#global_backlog)
|
204
|
+
def global_backlog(last_id = 0)
|
205
|
+
items = pub_redis.zrangebyscore global_backlog_key, last_id.to_i + 1, "+inf"
|
118
206
|
|
119
|
-
|
120
|
-
|
121
|
-
|
207
|
+
items.map! do |i|
|
208
|
+
pipe = i.index "|"
|
209
|
+
message_id = i[0..pipe].to_i
|
210
|
+
channel = i[pipe + 1..-1]
|
211
|
+
m = get_message(channel, message_id)
|
212
|
+
m
|
213
|
+
end
|
122
214
|
|
123
|
-
|
215
|
+
items.compact!
|
216
|
+
items
|
217
|
+
end
|
124
218
|
|
125
|
-
|
219
|
+
# (see Base#get_message)
|
220
|
+
def get_message(channel, message_id)
|
221
|
+
redis = pub_redis
|
222
|
+
backlog_key = backlog_key(channel)
|
126
223
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
max_backlog_age = (opts && opts[:max_backlog_age]) || self.max_backlog_age
|
133
|
-
max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
|
134
|
-
|
135
|
-
redis = pub_redis
|
136
|
-
backlog_id_key = backlog_id_key(channel)
|
137
|
-
backlog_key = backlog_key(channel)
|
138
|
-
|
139
|
-
msg = MessageBus::Message.new nil, nil, channel, data
|
140
|
-
|
141
|
-
cached_eval(redis, LUA_PUBLISH, LUA_PUBLISH_SHA1,
|
142
|
-
argv: [
|
143
|
-
msg.encode_without_ids,
|
144
|
-
max_backlog_age,
|
145
|
-
max_backlog_size,
|
146
|
-
max_global_backlog_size,
|
147
|
-
channel
|
148
|
-
],
|
149
|
-
keys: [
|
150
|
-
global_id_key,
|
151
|
-
backlog_id_key,
|
152
|
-
backlog_key,
|
153
|
-
global_backlog_key,
|
154
|
-
redis_channel_name
|
155
|
-
]
|
156
|
-
)
|
157
|
-
|
158
|
-
rescue Redis::CommandError => e
|
159
|
-
if queue_in_memory && e.message =~ /READONLY/
|
160
|
-
@lock.synchronize do
|
161
|
-
@in_memory_backlog << [channel, data]
|
162
|
-
if @in_memory_backlog.length > @max_in_memory_publish_backlog
|
163
|
-
@in_memory_backlog.delete_at(0)
|
164
|
-
@logger.warn("Dropping old message cause max_in_memory_publish_backlog is full: #{e.message}\n#{e.backtrace.join('\n')}")
|
224
|
+
items = redis.zrangebyscore backlog_key, message_id, message_id
|
225
|
+
if items && items[0]
|
226
|
+
MessageBus::Message.decode(items[0])
|
227
|
+
else
|
228
|
+
nil
|
165
229
|
end
|
166
230
|
end
|
167
231
|
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
232
|
+
# (see Base#subscribe)
|
233
|
+
def subscribe(channel, last_id = nil)
|
234
|
+
# trivial implementation for now,
|
235
|
+
# can cut down on connections if we only have one global subscriber
|
236
|
+
raise ArgumentError unless block_given?
|
237
|
+
|
238
|
+
if last_id
|
239
|
+
# we need to translate this to a global id, at least give it a shot
|
240
|
+
# we are subscribing on global and global is always going to be bigger than local
|
241
|
+
# so worst case is a replay of a few messages
|
242
|
+
message = get_message(channel, last_id)
|
243
|
+
if message
|
244
|
+
last_id = message.global_id
|
172
245
|
end
|
173
246
|
end
|
247
|
+
global_subscribe(last_id) do |m|
|
248
|
+
yield m if m.channel == channel
|
249
|
+
end
|
174
250
|
end
|
175
|
-
nil
|
176
|
-
else
|
177
|
-
raise
|
178
|
-
end
|
179
|
-
end
|
180
|
-
|
181
|
-
def ensure_backlog_flushed
|
182
|
-
flushed = false
|
183
|
-
|
184
|
-
while !flushed
|
185
|
-
try_again = false
|
186
251
|
|
187
|
-
|
188
|
-
|
189
|
-
|
252
|
+
# (see Base#global_unsubscribe)
|
253
|
+
def global_unsubscribe
|
254
|
+
if @redis_global
|
255
|
+
# new connection to avoid deadlock
|
256
|
+
new_redis_connection.publish(redis_channel_name, UNSUB_MESSAGE)
|
257
|
+
@redis_global.disconnect
|
258
|
+
@redis_global = nil
|
259
|
+
end
|
190
260
|
end
|
191
261
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
262
|
+
# (see Base#global_subscribe)
|
263
|
+
def global_subscribe(last_id = nil, &blk)
|
264
|
+
raise ArgumentError unless block_given?
|
265
|
+
|
266
|
+
highest_id = last_id
|
267
|
+
|
268
|
+
clear_backlog = lambda do
|
269
|
+
retries = 4
|
270
|
+
begin
|
271
|
+
highest_id = process_global_backlog(highest_id, retries > 0, &blk)
|
272
|
+
rescue BackLogOutOfOrder => e
|
273
|
+
highest_id = e.highest_id
|
274
|
+
retries -= 1
|
275
|
+
sleep(rand(50) / 1000.0)
|
276
|
+
retry
|
277
|
+
end
|
196
278
|
end
|
197
279
|
|
198
280
|
begin
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
try_again = true
|
204
|
-
else
|
205
|
-
@logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
|
281
|
+
@redis_global = new_redis_connection
|
282
|
+
|
283
|
+
if highest_id
|
284
|
+
clear_backlog.call(&blk)
|
206
285
|
end
|
207
|
-
rescue => e
|
208
|
-
@logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
|
209
|
-
end
|
210
286
|
|
211
|
-
|
287
|
+
@redis_global.subscribe(redis_channel_name) do |on|
|
288
|
+
on.subscribe do
|
289
|
+
if highest_id
|
290
|
+
clear_backlog.call(&blk)
|
291
|
+
end
|
292
|
+
@subscribed = true
|
293
|
+
end
|
294
|
+
|
295
|
+
on.unsubscribe do
|
296
|
+
@subscribed = false
|
297
|
+
end
|
298
|
+
|
299
|
+
on.message do |_c, m|
|
300
|
+
if m == UNSUB_MESSAGE
|
301
|
+
@redis_global.unsubscribe
|
302
|
+
return
|
303
|
+
end
|
304
|
+
m = MessageBus::Message.decode m
|
305
|
+
|
306
|
+
# we have 3 options
|
307
|
+
#
|
308
|
+
# 1. message came in the correct order GREAT, just deal with it
|
309
|
+
# 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
|
310
|
+
# 3. message came in the incorrect order and is lowest than current highest id, reset
|
311
|
+
|
312
|
+
if highest_id.nil? || m.global_id == highest_id + 1
|
313
|
+
highest_id = m.global_id
|
314
|
+
yield m
|
315
|
+
else
|
316
|
+
clear_backlog.call(&blk)
|
317
|
+
end
|
318
|
+
end
|
319
|
+
end
|
320
|
+
rescue => error
|
321
|
+
@logger.warn "#{error} subscribe failed, reconnecting in 1 second. Call stack #{error.backtrace}"
|
322
|
+
sleep 1
|
323
|
+
retry
|
324
|
+
end
|
212
325
|
end
|
213
|
-
end
|
214
|
-
ensure
|
215
|
-
@lock.synchronize do
|
216
|
-
@flush_backlog_thread = nil
|
217
|
-
end
|
218
|
-
end
|
219
|
-
|
220
|
-
def last_id(channel)
|
221
|
-
backlog_id_key = backlog_id_key(channel)
|
222
|
-
pub_redis.get(backlog_id_key).to_i
|
223
|
-
end
|
224
|
-
|
225
|
-
def backlog(channel, last_id = nil)
|
226
|
-
redis = pub_redis
|
227
|
-
backlog_key = backlog_key(channel)
|
228
|
-
items = redis.zrangebyscore backlog_key, last_id.to_i + 1, "+inf"
|
229
|
-
|
230
|
-
items.map do |i|
|
231
|
-
MessageBus::Message.decode(i)
|
232
|
-
end
|
233
|
-
end
|
234
326
|
|
235
|
-
|
236
|
-
last_id = last_id.to_i
|
237
|
-
redis = pub_redis
|
327
|
+
private
|
238
328
|
|
239
|
-
|
329
|
+
def new_redis_connection
|
330
|
+
::Redis.new(@redis_config)
|
331
|
+
end
|
240
332
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
m = get_message(channel, message_id)
|
246
|
-
m
|
247
|
-
end
|
333
|
+
# redis connection used for publishing messages
|
334
|
+
def pub_redis
|
335
|
+
@pub_redis ||= new_redis_connection
|
336
|
+
end
|
248
337
|
|
249
|
-
|
250
|
-
|
251
|
-
|
338
|
+
def redis_channel_name
|
339
|
+
db = @redis_config[:db] || 0
|
340
|
+
"_message_bus_#{db}"
|
341
|
+
end
|
252
342
|
|
253
|
-
|
254
|
-
|
255
|
-
|
343
|
+
def backlog_key(channel)
|
344
|
+
"__mb_backlog_n_#{channel}"
|
345
|
+
end
|
256
346
|
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
else
|
261
|
-
nil
|
262
|
-
end
|
263
|
-
end
|
347
|
+
def backlog_id_key(channel)
|
348
|
+
"__mb_backlog_id_n_#{channel}"
|
349
|
+
end
|
264
350
|
|
265
|
-
|
266
|
-
|
267
|
-
# can cut down on connections if we only have one global subscriber
|
268
|
-
raise ArgumentError unless block_given?
|
269
|
-
|
270
|
-
if last_id
|
271
|
-
# we need to translate this to a global id, at least give it a shot
|
272
|
-
# we are subscribing on global and global is always going to be bigger than local
|
273
|
-
# so worst case is a replay of a few messages
|
274
|
-
message = get_message(channel, last_id)
|
275
|
-
if message
|
276
|
-
last_id = message.global_id
|
351
|
+
def global_id_key
|
352
|
+
"__mb_global_id_n"
|
277
353
|
end
|
278
|
-
end
|
279
|
-
global_subscribe(last_id) do |m|
|
280
|
-
yield m if m.channel == channel
|
281
|
-
end
|
282
|
-
end
|
283
354
|
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
end
|
355
|
+
def global_backlog_key
|
356
|
+
"__mb_global_backlog_n"
|
357
|
+
end
|
288
358
|
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
highest_id = old.global_id
|
293
|
-
else
|
294
|
-
raise BackLogOutOfOrder.new(highest_id) if raise_error
|
295
|
-
if old.global_id > highest_id
|
296
|
-
yield old
|
297
|
-
highest_id = old.global_id
|
359
|
+
def process_global_backlog(highest_id, raise_error)
|
360
|
+
if highest_id > pub_redis.get(global_id_key).to_i
|
361
|
+
highest_id = 0
|
298
362
|
end
|
299
|
-
end
|
300
|
-
end
|
301
363
|
|
302
|
-
|
303
|
-
|
364
|
+
global_backlog(highest_id).each do |old|
|
365
|
+
if highest_id + 1 == old.global_id
|
366
|
+
yield old
|
367
|
+
highest_id = old.global_id
|
368
|
+
else
|
369
|
+
raise BackLogOutOfOrder.new(highest_id) if raise_error
|
304
370
|
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
end
|
312
|
-
end
|
371
|
+
if old.global_id > highest_id
|
372
|
+
yield old
|
373
|
+
highest_id = old.global_id
|
374
|
+
end
|
375
|
+
end
|
376
|
+
end
|
313
377
|
|
314
|
-
|
315
|
-
raise ArgumentError unless block_given?
|
316
|
-
highest_id = last_id
|
317
|
-
|
318
|
-
clear_backlog = lambda do
|
319
|
-
retries = 4
|
320
|
-
begin
|
321
|
-
highest_id = process_global_backlog(highest_id, retries > 0, &blk)
|
322
|
-
rescue BackLogOutOfOrder => e
|
323
|
-
highest_id = e.highest_id
|
324
|
-
retries -= 1
|
325
|
-
sleep(rand(50) / 1000.0)
|
326
|
-
retry
|
378
|
+
highest_id
|
327
379
|
end
|
328
|
-
end
|
329
380
|
|
330
|
-
|
331
|
-
|
381
|
+
def ensure_backlog_flushed
|
382
|
+
flushed = false
|
332
383
|
|
333
|
-
|
334
|
-
|
335
|
-
end
|
384
|
+
while !flushed
|
385
|
+
try_again = false
|
336
386
|
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
clear_backlog.call(&blk)
|
387
|
+
if is_readonly?
|
388
|
+
sleep 1
|
389
|
+
next
|
341
390
|
end
|
342
|
-
@subscribed = true
|
343
|
-
end
|
344
|
-
|
345
|
-
on.unsubscribe do
|
346
|
-
@subscribed = false
|
347
|
-
end
|
348
391
|
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
392
|
+
@lock.synchronize do
|
393
|
+
if @in_memory_backlog.length == 0
|
394
|
+
flushed = true
|
395
|
+
break
|
396
|
+
end
|
397
|
+
|
398
|
+
begin
|
399
|
+
# TODO recover special options
|
400
|
+
publish(*@in_memory_backlog[0], queue_in_memory: false)
|
401
|
+
rescue ::Redis::CommandError => e
|
402
|
+
if e.message =~ /^READONLY/
|
403
|
+
try_again = true
|
404
|
+
else
|
405
|
+
@logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
|
406
|
+
end
|
407
|
+
rescue => e
|
408
|
+
@logger.warn("Dropping undeliverable message: #{e.message}\n#{e.backtrace.join('\n')}")
|
409
|
+
end
|
410
|
+
|
411
|
+
@in_memory_backlog.delete_at(0) unless try_again
|
353
412
|
end
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
# 3. message came in the incorrect order and is lowest than current highest id, reset
|
413
|
+
end
|
414
|
+
ensure
|
415
|
+
@lock.synchronize do
|
416
|
+
@flush_backlog_thread = nil
|
417
|
+
end
|
418
|
+
end
|
361
419
|
|
362
|
-
|
363
|
-
|
364
|
-
|
420
|
+
def cached_eval(redis, script, script_sha1, params)
|
421
|
+
begin
|
422
|
+
redis.evalsha script_sha1, params
|
423
|
+
rescue ::Redis::CommandError => e
|
424
|
+
if e.to_s =~ /^NOSCRIPT/
|
425
|
+
redis.eval script, params
|
365
426
|
else
|
366
|
-
|
427
|
+
raise
|
367
428
|
end
|
368
429
|
end
|
369
430
|
end
|
370
|
-
rescue => error
|
371
|
-
@logger.warn "#{error} subscribe failed, reconnecting in 1 second. Call stack #{error.backtrace}"
|
372
|
-
sleep 1
|
373
|
-
retry
|
374
|
-
end
|
375
|
-
end
|
376
431
|
|
377
|
-
|
432
|
+
def is_readonly?
|
433
|
+
key = "__mb_is_readonly"
|
378
434
|
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
435
|
+
begin
|
436
|
+
# in case we are not connected to the correct server
|
437
|
+
# which can happen when sharing ips
|
438
|
+
pub_redis.client.reconnect
|
439
|
+
pub_redis.client.call([:set, key, '1'])
|
440
|
+
false
|
441
|
+
rescue ::Redis::CommandError => e
|
442
|
+
return true if e.message =~ /^READONLY/
|
443
|
+
end
|
387
444
|
end
|
388
|
-
end
|
389
|
-
end
|
390
445
|
|
391
|
-
|
392
|
-
key = "__mb_is_readonly"
|
393
|
-
|
394
|
-
begin
|
395
|
-
# in case we are not connected to the correct server
|
396
|
-
# which can happen when sharing ips
|
397
|
-
pub_redis.client.reconnect
|
398
|
-
pub_redis.client.call([:set, key, '1'])
|
399
|
-
false
|
400
|
-
rescue Redis::CommandError => e
|
401
|
-
return true if e.message =~ /^READONLY/
|
446
|
+
MessageBus::BACKENDS[:redis] = self
|
402
447
|
end
|
403
448
|
end
|
404
|
-
|
405
|
-
MessageBus::BACKENDS[:redis] = self
|
406
449
|
end
|