message_bus 2.1.6 → 2.2.0.pre

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of message_bus might be problematic. Click here for more details.

Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +13 -92
  3. data/.rubocop_todo.yml +659 -0
  4. data/.travis.yml +1 -1
  5. data/CHANGELOG +61 -0
  6. data/Dockerfile +18 -0
  7. data/Gemfile +3 -1
  8. data/Guardfile +0 -1
  9. data/README.md +188 -101
  10. data/Rakefile +12 -1
  11. data/assets/message-bus.js +1 -1
  12. data/docker-compose.yml +46 -0
  13. data/examples/bench/config.ru +8 -9
  14. data/examples/bench/unicorn.conf.rb +1 -1
  15. data/examples/chat/chat.rb +150 -153
  16. data/examples/minimal/config.ru +2 -3
  17. data/lib/message_bus.rb +224 -36
  18. data/lib/message_bus/backends.rb +7 -0
  19. data/lib/message_bus/backends/base.rb +184 -0
  20. data/lib/message_bus/backends/memory.rb +304 -226
  21. data/lib/message_bus/backends/postgres.rb +359 -318
  22. data/lib/message_bus/backends/redis.rb +380 -337
  23. data/lib/message_bus/client.rb +99 -41
  24. data/lib/message_bus/connection_manager.rb +29 -21
  25. data/lib/message_bus/diagnostics.rb +50 -41
  26. data/lib/message_bus/distributed_cache.rb +5 -7
  27. data/lib/message_bus/message.rb +2 -2
  28. data/lib/message_bus/rack/diagnostics.rb +65 -55
  29. data/lib/message_bus/rack/middleware.rb +64 -44
  30. data/lib/message_bus/rack/thin_ext.rb +13 -9
  31. data/lib/message_bus/rails/railtie.rb +2 -0
  32. data/lib/message_bus/timer_thread.rb +2 -2
  33. data/lib/message_bus/version.rb +2 -1
  34. data/message_bus.gemspec +3 -2
  35. data/spec/assets/support/jasmine_helper.rb +1 -1
  36. data/spec/lib/fake_async_middleware.rb +1 -6
  37. data/spec/lib/message_bus/assets/asset_encoding_spec.rb +3 -3
  38. data/spec/lib/message_bus/backend_spec.rb +409 -0
  39. data/spec/lib/message_bus/client_spec.rb +8 -11
  40. data/spec/lib/message_bus/connection_manager_spec.rb +8 -14
  41. data/spec/lib/message_bus/distributed_cache_spec.rb +0 -4
  42. data/spec/lib/message_bus/multi_process_spec.rb +6 -7
  43. data/spec/lib/message_bus/rack/middleware_spec.rb +47 -43
  44. data/spec/lib/message_bus/timer_thread_spec.rb +0 -2
  45. data/spec/lib/message_bus_spec.rb +59 -43
  46. data/spec/spec_helper.rb +16 -4
  47. metadata +12 -9
  48. data/spec/lib/message_bus/backends/postgres_spec.rb +0 -221
  49. data/spec/lib/message_bus/backends/redis_spec.rb +0 -271
@@ -1,389 +1,430 @@
1
1
  # frozen_string_literal: true
2
- require 'pg'
3
-
4
- module MessageBus::Postgres; end
5
2
 
6
- class MessageBus::Postgres::Client
7
- INHERITED_CONNECTIONS = []
3
+ require 'pg'
8
4
 
9
- class Listener
10
- attr_reader :do_sub, :do_unsub, :do_message
5
+ require "message_bus/backends/base"
6
+
7
+ module MessageBus
8
+ module Backends
9
+ # The Postgres backend stores published messages in a single Postgres table
10
+ # with only global IDs, and an index on channel name and ID for fast
11
+ # per-channel lookup. All queries are implemented as prepared statements
12
+ # to reduce the wire-chatter during use. In addition to storage in the
13
+ # table, messages are published using `pg_notify`; this is used for
14
+ # actively subscribed message_bus servers to consume published messages in
15
+ # real-time while connected and forward them to subscribers, while catch-up
16
+ # is performed from the backlog table.
17
+ #
18
+ # @note This backend diverges from the standard in Base in the following ways:
19
+ #
20
+ # * Does not support in-memory buffering of messages on publication
21
+ # * Does not expire backlogs until they are published to
22
+ #
23
+ # @see Base general information about message_bus backends
24
+ class Postgres < Base
25
+ class Client
26
+ INHERITED_CONNECTIONS = []
27
+
28
+ class Listener
29
+ attr_reader :do_sub, :do_unsub, :do_message
30
+
31
+ def subscribe(&block)
32
+ @do_sub = block
33
+ end
11
34
 
12
- def subscribe(&block)
13
- @do_sub = block
14
- end
35
+ def unsubscribe(&block)
36
+ @do_unsub = block
37
+ end
15
38
 
16
- def unsubscribe(&block)
17
- @do_unsub = block
18
- end
39
+ def message(&block)
40
+ @do_message = block
41
+ end
42
+ end
19
43
 
20
- def message(&block)
21
- @do_message = block
22
- end
23
- end
44
+ def initialize(config)
45
+ @config = config
46
+ @listening_on = {}
47
+ @available = []
48
+ @allocated = {}
49
+ @mutex = Mutex.new
50
+ @pid = Process.pid
51
+ end
24
52
 
25
- def initialize(config)
26
- @config = config
27
- @listening_on = {}
28
- @available = []
29
- @allocated = {}
30
- @mutex = Mutex.new
31
- @pid = Process.pid
32
- end
53
+ def add(channel, value)
54
+ hold { |conn| exec_prepared(conn, 'insert_message', [channel, value]) { |r| r.getvalue(0, 0).to_i } }
55
+ end
33
56
 
34
- def add(channel, value)
35
- hold { |conn| exec_prepared(conn, 'insert_message', [channel, value]) { |r| r.getvalue(0, 0).to_i } }
36
- end
57
+ def clear_global_backlog(backlog_id, num_to_keep)
58
+ if backlog_id > num_to_keep
59
+ hold { |conn| exec_prepared(conn, 'clear_global_backlog', [backlog_id - num_to_keep]) }
60
+ nil
61
+ end
62
+ end
37
63
 
38
- def clear_global_backlog(backlog_id, num_to_keep)
39
- if backlog_id > num_to_keep
40
- hold { |conn| exec_prepared(conn, 'clear_global_backlog', [backlog_id - num_to_keep]) }
41
- nil
42
- end
43
- end
64
+ def clear_channel_backlog(channel, backlog_id, num_to_keep)
65
+ hold { |conn| exec_prepared(conn, 'clear_channel_backlog', [channel, backlog_id, num_to_keep]) }
66
+ nil
67
+ end
44
68
 
45
- def clear_channel_backlog(channel, backlog_id, num_to_keep)
46
- hold { |conn| exec_prepared(conn, 'clear_channel_backlog', [channel, backlog_id, num_to_keep]) }
47
- nil
48
- end
69
+ def expire(max_backlog_age)
70
+ hold { |conn| exec_prepared(conn, 'expire', [max_backlog_age]) }
71
+ nil
72
+ end
49
73
 
50
- def expire(max_backlog_age)
51
- hold { |conn| exec_prepared(conn, 'expire', [max_backlog_age]) }
52
- nil
53
- end
74
+ def backlog(channel, backlog_id)
75
+ hold do |conn|
76
+ exec_prepared(conn, 'channel_backlog', [channel, backlog_id]) { |r| r.values.each { |a| a[0] = a[0].to_i } }
77
+ end || []
78
+ end
54
79
 
55
- def backlog(channel, backlog_id)
56
- hold do |conn|
57
- exec_prepared(conn, 'channel_backlog', [channel, backlog_id]) { |r| r.values.each { |a| a[0] = a[0].to_i } }
58
- end || []
59
- end
80
+ def global_backlog(backlog_id)
81
+ hold do |conn|
82
+ exec_prepared(conn, 'global_backlog', [backlog_id]) { |r| r.values.each { |a| a[0] = a[0].to_i } }
83
+ end || []
84
+ end
60
85
 
61
- def global_backlog(backlog_id)
62
- hold do |conn|
63
- exec_prepared(conn, 'global_backlog', [backlog_id]) { |r| r.values.each { |a| a[0] = a[0].to_i } }
64
- end || []
65
- end
86
+ def get_value(channel, id)
87
+ hold { |conn| exec_prepared(conn, 'get_message', [channel, id]) { |r| r.getvalue(0, 0) } }
88
+ end
66
89
 
67
- def get_value(channel, id)
68
- hold { |conn| exec_prepared(conn, 'get_message', [channel, id]) { |r| r.getvalue(0, 0) } }
69
- end
90
+ def reconnect
91
+ sync do
92
+ @listening_on.clear
93
+ @available.clear
94
+ end
95
+ end
70
96
 
71
- def reconnect
72
- sync do
73
- @listening_on.clear
74
- @available.clear
75
- end
76
- end
97
+ # Dangerous, drops the message_bus table containing the backlog if it exists.
98
+ def reset!
99
+ hold do |conn|
100
+ conn.exec 'DROP TABLE IF EXISTS message_bus'
101
+ create_table(conn)
102
+ end
103
+ end
77
104
 
78
- # Dangerous, drops the message_bus table containing the backlog if it exists.
79
- def reset!
80
- hold do |conn|
81
- conn.exec 'DROP TABLE IF EXISTS message_bus'
82
- create_table(conn)
83
- end
84
- end
105
+ # use with extreme care, will nuke all of the data
106
+ def expire_all_backlogs!
107
+ reset!
108
+ end
85
109
 
86
- def max_id(channel = nil)
87
- block = proc do |r|
88
- if r.ntuples > 0
89
- r.getvalue(0, 0).to_i
90
- else
91
- 0
92
- end
93
- end
110
+ def max_id(channel = nil)
111
+ block = proc do |r|
112
+ if r.ntuples > 0
113
+ r.getvalue(0, 0).to_i
114
+ else
115
+ 0
116
+ end
117
+ end
94
118
 
95
- if channel
96
- hold { |conn| exec_prepared(conn, 'max_channel_id', [channel], &block) }
97
- else
98
- hold { |conn| exec_prepared(conn, 'max_id', &block) }
99
- end
100
- end
119
+ if channel
120
+ hold { |conn| exec_prepared(conn, 'max_channel_id', [channel], &block) }
121
+ else
122
+ hold { |conn| exec_prepared(conn, 'max_id', &block) }
123
+ end
124
+ end
101
125
 
102
- def publish(channel, data)
103
- hold { |conn| exec_prepared(conn, 'publish', [channel, data]) }
104
- end
126
+ def publish(channel, data)
127
+ hold { |conn| exec_prepared(conn, 'publish', [channel, data]) }
128
+ end
105
129
 
106
- def subscribe(channel)
107
- obj = Object.new
108
- sync { @listening_on[channel] = obj }
109
- listener = Listener.new
110
- yield listener
111
-
112
- conn = raw_pg_connection
113
- conn.exec "LISTEN #{channel}"
114
- listener.do_sub.call
115
- while listening_on?(channel, obj)
116
- conn.wait_for_notify(10) do |_, _, payload|
117
- break unless listening_on?(channel, obj)
118
- listener.do_message.call(nil, payload)
119
- end
120
- end
121
- listener.do_unsub.call
130
+ def subscribe(channel)
131
+ obj = Object.new
132
+ sync { @listening_on[channel] = obj }
133
+ listener = Listener.new
134
+ yield listener
122
135
 
123
- conn.exec "UNLISTEN #{channel}"
124
- nil
125
- end
136
+ conn = raw_pg_connection
137
+ conn.exec "LISTEN #{channel}"
138
+ listener.do_sub.call
139
+ while listening_on?(channel, obj)
140
+ conn.wait_for_notify(10) do |_, _, payload|
141
+ break unless listening_on?(channel, obj)
126
142
 
127
- def unsubscribe
128
- sync { @listening_on.clear }
129
- end
143
+ listener.do_message.call(nil, payload)
144
+ end
145
+ end
146
+ listener.do_unsub.call
130
147
 
131
- private
148
+ conn.exec "UNLISTEN #{channel}"
149
+ nil
150
+ end
132
151
 
133
- def exec_prepared(conn, *a)
134
- r = conn.exec_prepared(*a)
135
- yield r if block_given?
136
- ensure
137
- r.clear if r.respond_to?(:clear)
138
- end
152
+ def unsubscribe
153
+ sync { @listening_on.clear }
154
+ end
139
155
 
140
- def create_table(conn)
141
- conn.exec 'CREATE TABLE message_bus (id bigserial PRIMARY KEY, channel text NOT NULL, value text NOT NULL CHECK (octet_length(value) >= 2), added_at timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL)'
142
- conn.exec 'CREATE INDEX table_channel_id_index ON message_bus (channel, id)'
143
- conn.exec 'CREATE INDEX table_added_at_index ON message_bus (added_at)'
144
- nil
145
- end
156
+ private
146
157
 
147
- def hold
148
- current_pid = Process.pid
149
- if current_pid != @pid
150
- @pid = current_pid
151
- sync do
152
- INHERITED_CONNECTIONS.concat(@available)
153
- @available.clear
154
- end
155
- end
158
+ def exec_prepared(conn, *a)
159
+ r = conn.exec_prepared(*a)
160
+ yield r if block_given?
161
+ ensure
162
+ r.clear if r.respond_to?(:clear)
163
+ end
156
164
 
157
- if conn = sync { @allocated[Thread.current] }
158
- return yield(conn)
159
- end
165
+ def create_table(conn)
166
+ conn.exec 'CREATE TABLE message_bus (id bigserial PRIMARY KEY, channel text NOT NULL, value text NOT NULL CHECK (octet_length(value) >= 2), added_at timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL)'
167
+ conn.exec 'CREATE INDEX table_channel_id_index ON message_bus (channel, id)'
168
+ conn.exec 'CREATE INDEX table_added_at_index ON message_bus (added_at)'
169
+ nil
170
+ end
160
171
 
161
- begin
162
- conn = sync { @available.shift } || new_pg_connection
163
- sync { @allocated[Thread.current] = conn }
164
- yield conn
165
- rescue PG::ConnectionBad, PG::UnableToSend => e
166
- # don't add this connection back to the pool
167
- ensure
168
- sync { @allocated.delete(Thread.current) }
169
- if Process.pid != current_pid
170
- sync { INHERITED_CONNECTIONS << conn }
171
- elsif conn && !e
172
- sync { @available << conn }
173
- end
174
- end
175
- end
172
+ def hold
173
+ current_pid = Process.pid
174
+ if current_pid != @pid
175
+ @pid = current_pid
176
+ sync do
177
+ INHERITED_CONNECTIONS.concat(@available)
178
+ @available.clear
179
+ end
180
+ end
176
181
 
177
- def raw_pg_connection
178
- PG::Connection.connect(@config[:backend_options] || {})
179
- end
182
+ if conn = sync { @allocated[Thread.current] }
183
+ return yield(conn)
184
+ end
180
185
 
181
- def new_pg_connection
182
- conn = raw_pg_connection
186
+ begin
187
+ conn = sync { @available.shift } || new_pg_connection
188
+ sync { @allocated[Thread.current] = conn }
189
+ yield conn
190
+ rescue PG::ConnectionBad, PG::UnableToSend => e
191
+ # don't add this connection back to the pool
192
+ ensure
193
+ sync { @allocated.delete(Thread.current) }
194
+ if Process.pid != current_pid
195
+ sync { INHERITED_CONNECTIONS << conn }
196
+ elsif conn && !e
197
+ sync { @available << conn }
198
+ end
199
+ end
200
+ end
183
201
 
184
- begin
185
- conn.exec("SELECT 'message_bus'::regclass")
186
- rescue PG::UndefinedTable
187
- create_table(conn)
188
- end
202
+ def raw_pg_connection
203
+ PG::Connection.connect(@config[:backend_options] || {})
204
+ end
189
205
 
190
- conn.exec 'PREPARE insert_message AS INSERT INTO message_bus (channel, value) VALUES ($1, $2) RETURNING id'
191
- conn.exec 'PREPARE clear_global_backlog AS DELETE FROM message_bus WHERE (id <= $1)'
192
- conn.exec 'PREPARE clear_channel_backlog AS DELETE FROM message_bus WHERE ((channel = $1) AND (id <= (SELECT id FROM message_bus WHERE ((channel = $1) AND (id <= $2)) ORDER BY id DESC LIMIT 1 OFFSET $3)))'
193
- conn.exec 'PREPARE channel_backlog AS SELECT id, value FROM message_bus WHERE ((channel = $1) AND (id > $2)) ORDER BY id'
194
- conn.exec 'PREPARE global_backlog AS SELECT id, channel, value FROM message_bus WHERE (id > $1) ORDER BY id'
195
- conn.exec "PREPARE expire AS DELETE FROM message_bus WHERE added_at < CURRENT_TIMESTAMP - ($1::text || ' seconds')::interval"
196
- conn.exec 'PREPARE get_message AS SELECT value FROM message_bus WHERE ((channel = $1) AND (id = $2))'
197
- conn.exec 'PREPARE max_channel_id AS SELECT max(id) FROM message_bus WHERE (channel = $1)'
198
- conn.exec 'PREPARE max_id AS SELECT max(id) FROM message_bus'
199
- conn.exec 'PREPARE publish AS SELECT pg_notify($1, $2)'
200
-
201
- conn
202
- end
206
+ def new_pg_connection
207
+ conn = raw_pg_connection
203
208
 
204
- def listening_on?(channel, obj)
205
- sync { @listening_on[channel] } == obj
206
- end
209
+ begin
210
+ conn.exec("SELECT 'message_bus'::regclass")
211
+ rescue PG::UndefinedTable
212
+ create_table(conn)
213
+ end
207
214
 
208
- def sync
209
- @mutex.synchronize { yield }
210
- end
211
- end
215
+ conn.exec 'PREPARE insert_message AS INSERT INTO message_bus (channel, value) VALUES ($1, $2) RETURNING id'
216
+ conn.exec 'PREPARE clear_global_backlog AS DELETE FROM message_bus WHERE (id <= $1)'
217
+ conn.exec 'PREPARE clear_channel_backlog AS DELETE FROM message_bus WHERE ((channel = $1) AND (id <= (SELECT id FROM message_bus WHERE ((channel = $1) AND (id <= $2)) ORDER BY id DESC LIMIT 1 OFFSET $3)))'
218
+ conn.exec 'PREPARE channel_backlog AS SELECT id, value FROM message_bus WHERE ((channel = $1) AND (id > $2)) ORDER BY id'
219
+ conn.exec 'PREPARE global_backlog AS SELECT id, channel, value FROM message_bus WHERE (id > $1) ORDER BY id'
220
+ conn.exec "PREPARE expire AS DELETE FROM message_bus WHERE added_at < CURRENT_TIMESTAMP - ($1::text || ' seconds')::interval"
221
+ conn.exec 'PREPARE get_message AS SELECT value FROM message_bus WHERE ((channel = $1) AND (id = $2))'
222
+ conn.exec 'PREPARE max_channel_id AS SELECT max(id) FROM message_bus WHERE (channel = $1)'
223
+ conn.exec 'PREPARE max_id AS SELECT max(id) FROM message_bus'
224
+ conn.exec 'PREPARE publish AS SELECT pg_notify($1, $2)'
225
+
226
+ conn
227
+ end
212
228
 
213
- class MessageBus::Postgres::ReliablePubSub
214
- attr_reader :subscribed
215
- attr_accessor :max_backlog_size, :max_global_backlog_size, :max_backlog_age, :clear_every
229
+ def listening_on?(channel, obj)
230
+ sync { @listening_on[channel] } == obj
231
+ end
216
232
 
217
- UNSUB_MESSAGE = "$$UNSUBSCRIBE"
233
+ def sync
234
+ @mutex.synchronize { yield }
235
+ end
236
+ end
218
237
 
219
- def self.reset!(config)
220
- MessageBus::Postgres::Client.new(config).reset!
221
- end
238
+ def self.reset!(config)
239
+ MessageBus::Postgres::Client.new(config).reset!
240
+ end
222
241
 
223
- # max_backlog_size is per multiplexed channel
224
- def initialize(config = {}, max_backlog_size = 1000)
225
- @config = config
226
- @max_backlog_size = max_backlog_size
227
- @max_global_backlog_size = 2000
228
- # after 7 days inactive backlogs will be removed
229
- @max_backlog_age = 604800
230
- @clear_every = config[:clear_every] || 1
231
- end
242
+ # @param [Hash] config
243
+ # @option config [Logger] :logger a logger to which logs will be output
244
+ # @option config [Integer] :clear_every the interval of publications between which the backlog will not be cleared
245
+ # @option config [Hash] :backend_options see PG::Connection.connect for details of which options may be provided
246
+ # @param [Integer] max_backlog_size the largest permitted size (number of messages) for per-channel backlogs; beyond this capacity, old messages will be dropped.
247
+ def initialize(config = {}, max_backlog_size = 1000)
248
+ @config = config
249
+ @max_backlog_size = max_backlog_size
250
+ @max_global_backlog_size = 2000
251
+ # after 7 days inactive backlogs will be removed
252
+ @max_backlog_age = 604800
253
+ @clear_every = config[:clear_every] || 1
254
+ end
232
255
 
233
- def new_connection
234
- MessageBus::Postgres::Client.new(@config)
235
- end
256
+ # Reconnects to Postgres; used after a process fork, typically triggerd by a forking webserver
257
+ # @see Base#after_fork
258
+ def after_fork
259
+ client.reconnect
260
+ end
236
261
 
237
- def backend
238
- :postgres
239
- end
262
+ # (see Base#reset!)
263
+ def reset!
264
+ client.reset!
265
+ end
240
266
 
241
- def after_fork
242
- client.reconnect
243
- end
267
+ # (see Base#expire_all_backlogs!)
268
+ def expire_all_backlogs!
269
+ client.expire_all_backlogs!
270
+ end
244
271
 
245
- def postgresql_channel_name
246
- db = @config[:db] || 0
247
- "_message_bus_#{db}"
248
- end
272
+ # (see Base#publish)
273
+ # @todo :queue_in_memory NOT SUPPORTED
274
+ def publish(channel, data, opts = nil)
275
+ # TODO in memory queue?
276
+
277
+ c = client
278
+ backlog_id = c.add(channel, data)
279
+ msg = MessageBus::Message.new backlog_id, backlog_id, channel, data
280
+ payload = msg.encode
281
+ c.publish postgresql_channel_name, payload
282
+ if backlog_id % clear_every == 0
283
+ max_backlog_size = (opts && opts[:max_backlog_size]) || self.max_backlog_size
284
+ max_backlog_age = (opts && opts[:max_backlog_age]) || self.max_backlog_age
285
+ c.clear_global_backlog(backlog_id, @max_global_backlog_size)
286
+ c.expire(max_backlog_age)
287
+ c.clear_channel_backlog(channel, backlog_id, max_backlog_size)
288
+ end
249
289
 
250
- def client
251
- @client ||= new_connection
252
- end
290
+ backlog_id
291
+ end
253
292
 
254
- # use with extreme care, will nuke all of the data
255
- def reset!
256
- client.reset!
257
- end
293
+ # (see Base#last_id)
294
+ def last_id(channel)
295
+ client.max_id(channel)
296
+ end
258
297
 
259
- def publish(channel, data, opts = nil)
298
+ # (see Base#last_id)
299
+ def backlog(channel, last_id = 0)
300
+ items = client.backlog channel, last_id.to_i
260
301
 
261
- # TODO in memory queue?
302
+ items.map! do |id, data|
303
+ MessageBus::Message.new id, id, channel, data
304
+ end
305
+ end
262
306
 
263
- client = self.client
264
- backlog_id = client.add(channel, data)
265
- msg = MessageBus::Message.new backlog_id, backlog_id, channel, data
266
- payload = msg.encode
267
- client.publish postgresql_channel_name, payload
268
- if backlog_id % clear_every == 0
269
- client.clear_global_backlog(backlog_id, @max_global_backlog_size)
270
- client.expire(@max_backlog_age)
271
- client.clear_channel_backlog(channel, backlog_id, @max_backlog_size)
272
- end
307
+ # (see Base#global_backlog)
308
+ def global_backlog(last_id = 0)
309
+ items = client.global_backlog last_id.to_i
273
310
 
274
- backlog_id
275
- end
311
+ items.map! do |id, channel, data|
312
+ MessageBus::Message.new id, id, channel, data
313
+ end
314
+ end
276
315
 
277
- def last_id(channel)
278
- client.max_id(channel)
279
- end
316
+ # (see Base#get_message)
317
+ def get_message(channel, message_id)
318
+ if data = client.get_value(channel, message_id)
319
+ MessageBus::Message.new message_id, message_id, channel, data
320
+ else
321
+ nil
322
+ end
323
+ end
280
324
 
281
- def backlog(channel, last_id = nil)
282
- items = client.backlog channel, last_id.to_i
325
+ # (see Base#subscribe)
326
+ def subscribe(channel, last_id = nil)
327
+ # trivial implementation for now,
328
+ # can cut down on connections if we only have one global subscriber
329
+ raise ArgumentError unless block_given?
283
330
 
284
- items.map! do |id, data|
285
- MessageBus::Message.new id, id, channel, data
286
- end
287
- end
331
+ global_subscribe(last_id) do |m|
332
+ yield m if m.channel == channel
333
+ end
334
+ end
288
335
 
289
- def global_backlog(last_id = nil)
290
- items = client.global_backlog last_id.to_i
336
+ # (see Base#global_unsubscribe)
337
+ def global_unsubscribe
338
+ client.publish(postgresql_channel_name, UNSUB_MESSAGE)
339
+ @subscribed = false
340
+ end
291
341
 
292
- items.map! do |id, channel, data|
293
- MessageBus::Message.new id, id, channel, data
294
- end
295
- end
342
+ # (see Base#global_subscribe)
343
+ def global_subscribe(last_id = nil)
344
+ raise ArgumentError unless block_given?
296
345
 
297
- def get_message(channel, message_id)
298
- if data = client.get_value(channel, message_id)
299
- MessageBus::Message.new message_id, message_id, channel, data
300
- else
301
- nil
302
- end
303
- end
346
+ highest_id = last_id
304
347
 
305
- def subscribe(channel, last_id = nil)
306
- # trivial implementation for now,
307
- # can cut down on connections if we only have one global subscriber
308
- raise ArgumentError unless block_given?
348
+ begin
349
+ client.subscribe(postgresql_channel_name) do |on|
350
+ h = {}
309
351
 
310
- global_subscribe(last_id) do |m|
311
- yield m if m.channel == channel
312
- end
313
- end
352
+ on.subscribe do
353
+ if highest_id
354
+ process_global_backlog(highest_id) do |m|
355
+ h[m.global_id] = true
356
+ yield m
357
+ end
358
+ end
359
+ h = nil if h.empty?
360
+ @subscribed = true
361
+ end
314
362
 
315
- def process_global_backlog(highest_id)
316
- if highest_id > client.max_id
317
- highest_id = 0
318
- end
363
+ on.unsubscribe do
364
+ @subscribed = false
365
+ end
319
366
 
320
- global_backlog(highest_id).each do |old|
321
- yield old
322
- highest_id = old.global_id
323
- end
367
+ on.message do |_c, m|
368
+ if m == UNSUB_MESSAGE
369
+ @subscribed = false
370
+ return
371
+ end
372
+ m = MessageBus::Message.decode m
373
+
374
+ # we have 3 options
375
+ #
376
+ # 1. message came in the correct order GREAT, just deal with it
377
+ # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
378
+ # 3. message came in the incorrect order and is lowest than current highest id, reset
379
+
380
+ if h
381
+ # If already yielded during the clear backlog when subscribing,
382
+ # don't yield a duplicate copy.
383
+ unless h.delete(m.global_id)
384
+ h = nil if h.empty?
385
+ yield m
386
+ end
387
+ else
388
+ yield m
389
+ end
390
+ end
391
+ end
392
+ rescue => error
393
+ @config[:logger].warn "#{error} subscribe failed, reconnecting in 1 second. Call stack\n#{error.backtrace.join("\n")}"
394
+ sleep 1
395
+ retry
396
+ end
397
+ end
324
398
 
325
- highest_id
326
- end
399
+ private
327
400
 
328
- def global_unsubscribe
329
- client.publish(postgresql_channel_name, UNSUB_MESSAGE)
330
- @subscribed = false
331
- end
401
+ def client
402
+ @client ||= new_connection
403
+ end
332
404
 
333
- def global_subscribe(last_id = nil, &blk)
334
- raise ArgumentError unless block_given?
335
- highest_id = last_id
405
+ def new_connection
406
+ Client.new(@config)
407
+ end
336
408
 
337
- begin
338
- client.subscribe(postgresql_channel_name) do |on|
339
- h = {}
409
+ def postgresql_channel_name
410
+ db = @config[:db] || 0
411
+ "_message_bus_#{db}"
412
+ end
340
413
 
341
- on.subscribe do
342
- if highest_id
343
- process_global_backlog(highest_id) do |m|
344
- h[m.global_id] = true
345
- yield m
346
- end
347
- end
348
- h = nil if h.empty?
349
- @subscribed = true
414
+ def process_global_backlog(highest_id)
415
+ if highest_id > client.max_id
416
+ highest_id = 0
350
417
  end
351
418
 
352
- on.unsubscribe do
353
- @subscribed = false
419
+ global_backlog(highest_id).each do |old|
420
+ yield old
421
+ highest_id = old.global_id
354
422
  end
355
423
 
356
- on.message do |c, m|
357
- if m == UNSUB_MESSAGE
358
- @subscribed = false
359
- return
360
- end
361
- m = MessageBus::Message.decode m
362
-
363
- # we have 3 options
364
- #
365
- # 1. message came in the correct order GREAT, just deal with it
366
- # 2. message came in the incorrect order COMPLICATED, wait a tiny bit and clear backlog
367
- # 3. message came in the incorrect order and is lowest than current highest id, reset
368
-
369
- if h
370
- # If already yielded during the clear backlog when subscribing,
371
- # don't yield a duplicate copy.
372
- unless h.delete(m.global_id)
373
- h = nil if h.empty?
374
- yield m
375
- end
376
- else
377
- yield m
378
- end
379
- end
424
+ highest_id
380
425
  end
381
- rescue => error
382
- @config[:logger].warn "#{error} subscribe failed, reconnecting in 1 second. Call stack\n#{error.backtrace.join("\n")}"
383
- sleep 1
384
- retry
426
+
427
+ MessageBus::BACKENDS[:postgres] = self
385
428
  end
386
429
  end
387
-
388
- MessageBus::BACKENDS[:postgres] = self
389
430
  end