nsq-ruby-fastly 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +42 -0
- data/LICENSE.txt +22 -0
- data/README.md +421 -0
- data/lib/nsq/client_base.rb +120 -0
- data/lib/nsq/connection.rb +466 -0
- data/lib/nsq/consumer.rb +100 -0
- data/lib/nsq/discovery.rb +98 -0
- data/lib/nsq/exceptions.rb +5 -0
- data/lib/nsq/frames/error.rb +6 -0
- data/lib/nsq/frames/frame.rb +16 -0
- data/lib/nsq/frames/message.rb +33 -0
- data/lib/nsq/frames/response.rb +6 -0
- data/lib/nsq/logger.rb +38 -0
- data/lib/nsq/producer.rb +94 -0
- data/lib/nsq.rb +13 -0
- data/lib/version.rb +9 -0
- metadata +117 -0
@@ -0,0 +1,466 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'socket'
|
3
|
+
require 'openssl'
|
4
|
+
require 'timeout'
|
5
|
+
|
6
|
+
require_relative 'frames/error'
|
7
|
+
require_relative 'frames/message'
|
8
|
+
require_relative 'frames/response'
|
9
|
+
require_relative 'logger'
|
10
|
+
|
11
|
+
module Nsq
|
12
|
+
class Connection
|
13
|
+
include Nsq::AttributeLogger
|
14
|
+
@@log_attributes = [:host, :port]
|
15
|
+
|
16
|
+
attr_reader :host
|
17
|
+
attr_reader :port
|
18
|
+
attr_accessor :max_in_flight
|
19
|
+
attr_reader :presumed_in_flight
|
20
|
+
|
21
|
+
USER_AGENT = "nsq-ruby/#{Nsq::Version::STRING}"
|
22
|
+
RESPONSE_HEARTBEAT = '_heartbeat_'
|
23
|
+
RESPONSE_OK = 'OK'
|
24
|
+
|
25
|
+
|
26
|
+
def initialize(opts = {})
|
27
|
+
@host = opts[:host] || (raise ArgumentError, 'host is required')
|
28
|
+
@port = opts[:port] || (raise ArgumentError, 'port is required')
|
29
|
+
@queue = opts[:queue]
|
30
|
+
@topic = opts[:topic]
|
31
|
+
@channel = opts[:channel]
|
32
|
+
@msg_timeout = opts[:msg_timeout] || 60_000 # 60s
|
33
|
+
@max_in_flight = opts[:max_in_flight] || 1
|
34
|
+
@tls_options = opts[:tls_options]
|
35
|
+
@max_attempts = opts[:max_attempts]
|
36
|
+
if opts[:ssl_context]
|
37
|
+
if @tls_options
|
38
|
+
warn 'ssl_context and tls_options both set. Using tls_options. Ignoring ssl_context.'
|
39
|
+
else
|
40
|
+
@tls_options = opts[:ssl_context]
|
41
|
+
warn 'ssl_context will be deprecated nsq-ruby version 3. Please use tls_options instead.'
|
42
|
+
end
|
43
|
+
end
|
44
|
+
@tls_v1 = !!opts[:tls_v1]
|
45
|
+
|
46
|
+
if @tls_options
|
47
|
+
if @tls_v1
|
48
|
+
validate_tls_options!
|
49
|
+
else
|
50
|
+
warn 'tls_options was provided, but tls_v1 is false. Skipping validation of tls_options.'
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
if @msg_timeout < 1000
|
55
|
+
raise ArgumentError, 'msg_timeout cannot be less than 1000. it\'s in milliseconds.'
|
56
|
+
end
|
57
|
+
|
58
|
+
# for outgoing communication
|
59
|
+
@write_queue = SizedQueue.new(10000)
|
60
|
+
|
61
|
+
# For indicating that the connection has died.
|
62
|
+
# We use a Queue so we don't have to poll. Used to communicate across
|
63
|
+
# threads (from write_loop and read_loop to connect_and_monitor).
|
64
|
+
@death_queue = Queue.new
|
65
|
+
|
66
|
+
@connected = false
|
67
|
+
@presumed_in_flight = 0
|
68
|
+
|
69
|
+
open_connection
|
70
|
+
start_monitoring_connection
|
71
|
+
end
|
72
|
+
|
73
|
+
|
74
|
+
def connected?
|
75
|
+
@connected
|
76
|
+
end
|
77
|
+
|
78
|
+
|
79
|
+
# close the connection and don't try to re-open it
|
80
|
+
def close
|
81
|
+
stop_monitoring_connection
|
82
|
+
close_connection
|
83
|
+
end
|
84
|
+
|
85
|
+
|
86
|
+
def sub(topic, channel)
|
87
|
+
write "SUB #{topic} #{channel}\n"
|
88
|
+
end
|
89
|
+
|
90
|
+
|
91
|
+
def rdy(count)
|
92
|
+
write "RDY #{count}\n"
|
93
|
+
end
|
94
|
+
|
95
|
+
|
96
|
+
def fin(message_id)
|
97
|
+
write "FIN #{message_id}\n"
|
98
|
+
decrement_in_flight
|
99
|
+
end
|
100
|
+
|
101
|
+
|
102
|
+
def req(message_id, timeout)
|
103
|
+
write "REQ #{message_id} #{timeout}\n"
|
104
|
+
decrement_in_flight
|
105
|
+
end
|
106
|
+
|
107
|
+
|
108
|
+
def touch(message_id)
|
109
|
+
write "TOUCH #{message_id}\n"
|
110
|
+
end
|
111
|
+
|
112
|
+
|
113
|
+
def pub(topic, message)
|
114
|
+
write ["PUB #{topic}\n", message.bytesize, message].pack('a*l>a*')
|
115
|
+
end
|
116
|
+
|
117
|
+
def dpub(topic, delay_in_ms, message)
|
118
|
+
write ["DPUB #{topic} #{delay_in_ms}\n", message.bytesize, message].pack('a*l>a*')
|
119
|
+
end
|
120
|
+
|
121
|
+
def mpub(topic, messages)
|
122
|
+
body = messages.map do |message|
|
123
|
+
[message.bytesize, message].pack('l>a*')
|
124
|
+
end.join
|
125
|
+
|
126
|
+
write ["MPUB #{topic}\n", body.bytesize, messages.size, body].pack('a*l>l>a*')
|
127
|
+
end
|
128
|
+
|
129
|
+
|
130
|
+
# Tell the server we are ready for more messages!
|
131
|
+
def re_up_ready
|
132
|
+
rdy(@max_in_flight)
|
133
|
+
# assume these messages are coming our way. yes, this might not be the
|
134
|
+
# case, but it's much easier to manage our RDY state with the server if
|
135
|
+
# we treat things this way.
|
136
|
+
@presumed_in_flight = @max_in_flight
|
137
|
+
end
|
138
|
+
|
139
|
+
|
140
|
+
private
|
141
|
+
|
142
|
+
def cls
|
143
|
+
write "CLS\n"
|
144
|
+
end
|
145
|
+
|
146
|
+
|
147
|
+
def nop
|
148
|
+
write "NOP\n"
|
149
|
+
end
|
150
|
+
|
151
|
+
|
152
|
+
def write(raw)
|
153
|
+
@write_queue.push(raw)
|
154
|
+
end
|
155
|
+
|
156
|
+
|
157
|
+
def write_to_socket(raw)
|
158
|
+
debug ">>> #{raw.inspect}"
|
159
|
+
@socket.write(raw)
|
160
|
+
end
|
161
|
+
|
162
|
+
|
163
|
+
def identify
|
164
|
+
hostname = Socket.gethostname
|
165
|
+
metadata = {
|
166
|
+
client_id: hostname,
|
167
|
+
hostname: hostname,
|
168
|
+
feature_negotiation: true,
|
169
|
+
heartbeat_interval: 30_000, # 30 seconds
|
170
|
+
output_buffer: 16_000, # 16kb
|
171
|
+
output_buffer_timeout: 250, # 250ms
|
172
|
+
tls_v1: @tls_v1,
|
173
|
+
snappy: false,
|
174
|
+
deflate: false,
|
175
|
+
sample_rate: 0, # disable sampling
|
176
|
+
user_agent: USER_AGENT,
|
177
|
+
msg_timeout: @msg_timeout
|
178
|
+
}.to_json
|
179
|
+
write_to_socket ["IDENTIFY\n", metadata.length, metadata].pack('a*l>a*')
|
180
|
+
|
181
|
+
# Now wait for the response!
|
182
|
+
frame = receive_frame
|
183
|
+
server = JSON.parse(frame.data)
|
184
|
+
|
185
|
+
if @max_in_flight > server['max_rdy_count']
|
186
|
+
raise "max_in_flight is set to #{@max_in_flight}, server only supports #{server['max_rdy_count']}"
|
187
|
+
end
|
188
|
+
|
189
|
+
@server_version = server['version']
|
190
|
+
end
|
191
|
+
|
192
|
+
|
193
|
+
def handle_response(frame)
|
194
|
+
if frame.data == RESPONSE_HEARTBEAT
|
195
|
+
debug 'Received heartbeat'
|
196
|
+
nop
|
197
|
+
elsif frame.data == RESPONSE_OK
|
198
|
+
debug 'Received OK'
|
199
|
+
else
|
200
|
+
die "Received response we don't know how to handle: #{frame.data}"
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
|
205
|
+
def receive_frame
|
206
|
+
if buffer = @socket.read(8)
|
207
|
+
size, type = buffer.unpack('l>l>')
|
208
|
+
size -= 4 # we want the size of the data part and type already took up 4 bytes
|
209
|
+
data = @socket.read(size)
|
210
|
+
frame_class = frame_class_for_type(type)
|
211
|
+
return frame_class.new(data, self)
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
|
216
|
+
FRAME_CLASSES = [Response, Error, Message]
|
217
|
+
def frame_class_for_type(type)
|
218
|
+
raise "Bad frame type specified: #{type}" if type > FRAME_CLASSES.length - 1
|
219
|
+
[Response, Error, Message][type]
|
220
|
+
end
|
221
|
+
|
222
|
+
|
223
|
+
def decrement_in_flight
|
224
|
+
@presumed_in_flight -= 1
|
225
|
+
|
226
|
+
if server_needs_rdy_re_ups?
|
227
|
+
# now that we're less than @max_in_flight we might need to re-up our RDY state
|
228
|
+
threshold = (@max_in_flight * 0.2).ceil
|
229
|
+
re_up_ready if @presumed_in_flight <= threshold
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
|
234
|
+
def start_read_loop
|
235
|
+
@read_loop_thread ||= Thread.new{read_loop}
|
236
|
+
end
|
237
|
+
|
238
|
+
|
239
|
+
def stop_read_loop
|
240
|
+
@read_loop_thread.kill if @read_loop_thread
|
241
|
+
@read_loop_thread = nil
|
242
|
+
end
|
243
|
+
|
244
|
+
|
245
|
+
def read_loop
|
246
|
+
loop do
|
247
|
+
frame = receive_frame
|
248
|
+
if frame.is_a?(Response)
|
249
|
+
handle_response(frame)
|
250
|
+
elsif frame.is_a?(Error)
|
251
|
+
error "Error received: #{frame.data}"
|
252
|
+
elsif frame.is_a?(Message)
|
253
|
+
debug "<<< #{frame.body}"
|
254
|
+
if @max_attempts && frame.attempts > @max_attempts
|
255
|
+
fin(frame.id)
|
256
|
+
else
|
257
|
+
@queue.push(frame) if @queue
|
258
|
+
end
|
259
|
+
else
|
260
|
+
raise 'No data from socket'
|
261
|
+
end
|
262
|
+
end
|
263
|
+
rescue Exception => ex
|
264
|
+
die(ex)
|
265
|
+
end
|
266
|
+
|
267
|
+
|
268
|
+
def start_write_loop
|
269
|
+
@write_loop_thread ||= Thread.new{write_loop}
|
270
|
+
end
|
271
|
+
|
272
|
+
|
273
|
+
def stop_write_loop
|
274
|
+
if @write_loop_thread
|
275
|
+
@write_queue.push(:stop_write_loop)
|
276
|
+
@write_loop_thread.join
|
277
|
+
end
|
278
|
+
@write_loop_thread = nil
|
279
|
+
end
|
280
|
+
|
281
|
+
|
282
|
+
def write_loop
|
283
|
+
data = nil
|
284
|
+
loop do
|
285
|
+
data = @write_queue.pop
|
286
|
+
break if data == :stop_write_loop
|
287
|
+
write_to_socket(data)
|
288
|
+
end
|
289
|
+
rescue Exception => ex
|
290
|
+
# requeue PUB and MPUB commands
|
291
|
+
if data =~ /^M?PUB/
|
292
|
+
debug "Requeueing to write_queue: #{data.inspect}"
|
293
|
+
@write_queue.push(data)
|
294
|
+
end
|
295
|
+
die(ex)
|
296
|
+
end
|
297
|
+
|
298
|
+
|
299
|
+
# Waits for death of connection
|
300
|
+
def start_monitoring_connection
|
301
|
+
@connection_monitor_thread ||= Thread.new{monitor_connection}
|
302
|
+
@connection_monitor_thread.abort_on_exception = true
|
303
|
+
end
|
304
|
+
|
305
|
+
|
306
|
+
def stop_monitoring_connection
|
307
|
+
@connection_monitor_thread.kill if @connection_monitor_thread
|
308
|
+
@connection_monitor = nil
|
309
|
+
end
|
310
|
+
|
311
|
+
|
312
|
+
def monitor_connection
|
313
|
+
loop do
|
314
|
+
# wait for death, hopefully it never comes
|
315
|
+
cause_of_death = @death_queue.pop
|
316
|
+
warn "Died from: #{cause_of_death}"
|
317
|
+
|
318
|
+
debug 'Reconnecting...'
|
319
|
+
reconnect
|
320
|
+
debug 'Reconnected!'
|
321
|
+
|
322
|
+
# clear all death messages, since we're now reconnected.
|
323
|
+
# we don't want to complete this loop and immediately reconnect again.
|
324
|
+
@death_queue.clear
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
|
329
|
+
# close the connection if it's not already closed and try to reconnect
|
330
|
+
# over and over until we succeed!
|
331
|
+
def reconnect
|
332
|
+
close_connection
|
333
|
+
with_retries do
|
334
|
+
open_connection
|
335
|
+
end
|
336
|
+
end
|
337
|
+
|
338
|
+
|
339
|
+
def open_connection
|
340
|
+
@socket = TCPSocket.new(@host, @port)
|
341
|
+
# write the version and IDENTIFY directly to the socket to make sure
|
342
|
+
# it gets to nsqd ahead of anything in the `@write_queue`
|
343
|
+
write_to_socket ' V2'
|
344
|
+
identify
|
345
|
+
upgrade_to_ssl_socket if @tls_v1
|
346
|
+
|
347
|
+
start_read_loop
|
348
|
+
start_write_loop
|
349
|
+
@connected = true
|
350
|
+
|
351
|
+
# we need to re-subscribe if there's a topic specified
|
352
|
+
if @topic
|
353
|
+
debug "Subscribing to #{@topic}"
|
354
|
+
sub(@topic, @channel)
|
355
|
+
re_up_ready
|
356
|
+
end
|
357
|
+
end
|
358
|
+
|
359
|
+
|
360
|
+
# closes the connection and stops listening for messages
|
361
|
+
def close_connection
|
362
|
+
cls if connected?
|
363
|
+
stop_read_loop
|
364
|
+
stop_write_loop
|
365
|
+
@socket.close if @socket
|
366
|
+
@socket = nil
|
367
|
+
@connected = false
|
368
|
+
end
|
369
|
+
|
370
|
+
|
371
|
+
# this is called when there's a connection error in the read or write loop
|
372
|
+
# it triggers `connect_and_monitor` to try to reconnect
|
373
|
+
def die(reason)
|
374
|
+
@connected = false
|
375
|
+
@death_queue.push(reason)
|
376
|
+
end
|
377
|
+
|
378
|
+
|
379
|
+
def upgrade_to_ssl_socket
|
380
|
+
ssl_opts = [@socket, openssl_context].compact
|
381
|
+
@socket = OpenSSL::SSL::SSLSocket.new(*ssl_opts)
|
382
|
+
@socket.sync_close = true
|
383
|
+
@socket.connect
|
384
|
+
end
|
385
|
+
|
386
|
+
|
387
|
+
def openssl_context
|
388
|
+
return unless @tls_options
|
389
|
+
|
390
|
+
context = OpenSSL::SSL::SSLContext.new
|
391
|
+
context.cert = OpenSSL::X509::Certificate.new(File.read(@tls_options[:certificate]))
|
392
|
+
context.key = OpenSSL::PKey::RSA.new(File.read(@tls_options[:key]))
|
393
|
+
if @tls_options[:ca_certificate]
|
394
|
+
context.ca_file = @tls_options[:ca_certificate]
|
395
|
+
end
|
396
|
+
context.verify_mode = @tls_options[:verify_mode] || OpenSSL::SSL::VERIFY_NONE
|
397
|
+
context
|
398
|
+
end
|
399
|
+
|
400
|
+
|
401
|
+
# Retry the supplied block with exponential backoff.
|
402
|
+
#
|
403
|
+
# Borrowed liberally from:
|
404
|
+
# https://github.com/ooyala/retries/blob/master/lib/retries.rb
|
405
|
+
def with_retries(&block)
|
406
|
+
base_sleep_seconds = 0.5
|
407
|
+
max_sleep_seconds = 300 # 5 minutes
|
408
|
+
|
409
|
+
# Let's do this thing
|
410
|
+
attempts = 0
|
411
|
+
|
412
|
+
begin
|
413
|
+
attempts += 1
|
414
|
+
return block.call(attempts)
|
415
|
+
|
416
|
+
rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH,
|
417
|
+
Errno::ENETDOWN, Errno::ENETUNREACH, Errno::ETIMEDOUT, Timeout::Error => ex
|
418
|
+
|
419
|
+
raise ex if attempts >= 100
|
420
|
+
|
421
|
+
# The sleep time is an exponentially-increasing function of base_sleep_seconds.
|
422
|
+
# But, it never exceeds max_sleep_seconds.
|
423
|
+
sleep_seconds = [base_sleep_seconds * (2 ** (attempts - 1)), max_sleep_seconds].min
|
424
|
+
# Randomize to a random value in the range sleep_seconds/2 .. sleep_seconds
|
425
|
+
sleep_seconds = sleep_seconds * (0.5 * (1 + rand()))
|
426
|
+
# But never sleep less than base_sleep_seconds
|
427
|
+
sleep_seconds = [base_sleep_seconds, sleep_seconds].max
|
428
|
+
|
429
|
+
warn "Failed to connect: #{ex}. Retrying in #{sleep_seconds.round(1)} seconds."
|
430
|
+
|
431
|
+
snooze(sleep_seconds)
|
432
|
+
|
433
|
+
retry
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
437
|
+
|
438
|
+
# Se we can stub for testing and reconnect in a tight loop
|
439
|
+
def snooze(t)
|
440
|
+
sleep(t)
|
441
|
+
end
|
442
|
+
|
443
|
+
|
444
|
+
def server_needs_rdy_re_ups?
|
445
|
+
# versions less than 0.3.0 need RDY re-ups
|
446
|
+
# see: https://github.com/bitly/nsq/blob/master/ChangeLog.md#030---2014-11-18
|
447
|
+
major, minor = @server_version.split('.').map(&:to_i)
|
448
|
+
major == 0 && minor <= 2
|
449
|
+
end
|
450
|
+
|
451
|
+
|
452
|
+
def validate_tls_options!
|
453
|
+
[:key, :certificate].each do |key|
|
454
|
+
unless @tls_options.has_key?(key)
|
455
|
+
raise ArgumentError.new "@tls_options requires a :#{key}"
|
456
|
+
end
|
457
|
+
end
|
458
|
+
|
459
|
+
[:key, :certificate, :ca_certificate].each do |key|
|
460
|
+
if @tls_options[key] && !File.readable?(@tls_options[key])
|
461
|
+
raise LoadError.new "@tls_options :#{key} is unreadable"
|
462
|
+
end
|
463
|
+
end
|
464
|
+
end
|
465
|
+
end
|
466
|
+
end
|
data/lib/nsq/consumer.rb
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
require_relative 'client_base'
|
2
|
+
|
3
|
+
module Nsq
|
4
|
+
class Consumer < ClientBase
|
5
|
+
|
6
|
+
attr_reader :max_in_flight
|
7
|
+
|
8
|
+
def initialize(opts = {})
|
9
|
+
if opts[:nsqlookupd]
|
10
|
+
@nsqlookupds = [opts[:nsqlookupd]].flatten
|
11
|
+
else
|
12
|
+
@nsqlookupds = []
|
13
|
+
end
|
14
|
+
|
15
|
+
@topic = opts[:topic] || raise(ArgumentError, 'topic is required')
|
16
|
+
@channel = opts[:channel] || raise(ArgumentError, 'channel is required')
|
17
|
+
@max_in_flight = opts[:max_in_flight] || 1
|
18
|
+
@discovery_interval = opts[:discovery_interval] || 60
|
19
|
+
@msg_timeout = opts[:msg_timeout]
|
20
|
+
@max_attempts = opts[:max_attempts]
|
21
|
+
@ssl_context = opts[:ssl_context]
|
22
|
+
@tls_options = opts[:tls_options]
|
23
|
+
@tls_v1 = opts[:tls_v1]
|
24
|
+
|
25
|
+
# This is where we queue up the messages we receive from each connection
|
26
|
+
@messages = opts[:queue] || Queue.new
|
27
|
+
|
28
|
+
# This is where we keep a record of our active nsqd connections
|
29
|
+
# The key is a string with the host and port of the instance (e.g.
|
30
|
+
# '127.0.0.1:4150') and the value is the Connection instance.
|
31
|
+
@connections = {}
|
32
|
+
|
33
|
+
if !@nsqlookupds.empty?
|
34
|
+
discover_repeatedly(
|
35
|
+
nsqlookupds: @nsqlookupds,
|
36
|
+
topic: @topic,
|
37
|
+
interval: @discovery_interval
|
38
|
+
)
|
39
|
+
else
|
40
|
+
# normally, we find nsqd instances to connect to via nsqlookupd(s)
|
41
|
+
# in this case let's connect to an nsqd instance directly
|
42
|
+
add_connection(opts[:nsqd] || '127.0.0.1:4150', max_in_flight: @max_in_flight)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
|
47
|
+
# pop the next message off the queue
|
48
|
+
def pop
|
49
|
+
@messages.pop
|
50
|
+
end
|
51
|
+
|
52
|
+
|
53
|
+
# By default, if the internal queue is empty, pop will block until
|
54
|
+
# a new message comes in.
|
55
|
+
#
|
56
|
+
# Calling this method won't block. If there are no messages, it just
|
57
|
+
# returns nil.
|
58
|
+
def pop_without_blocking
|
59
|
+
@messages.pop(true)
|
60
|
+
rescue ThreadError
|
61
|
+
# When the Queue is empty calling `Queue#pop(true)` will raise a ThreadError
|
62
|
+
nil
|
63
|
+
end
|
64
|
+
|
65
|
+
|
66
|
+
# returns the number of messages we have locally in the queue
|
67
|
+
def size
|
68
|
+
@messages.size
|
69
|
+
end
|
70
|
+
|
71
|
+
|
72
|
+
private
|
73
|
+
def add_connection(nsqd, options = {})
|
74
|
+
super(nsqd, {
|
75
|
+
topic: @topic,
|
76
|
+
channel: @channel,
|
77
|
+
queue: @messages,
|
78
|
+
msg_timeout: @msg_timeout,
|
79
|
+
max_in_flight: 1,
|
80
|
+
max_attempts: @max_attempts
|
81
|
+
}.merge(options))
|
82
|
+
end
|
83
|
+
|
84
|
+
# Be conservative, but don't set a connection's max_in_flight below 1
|
85
|
+
def max_in_flight_per_connection(number_of_connections = @connections.length)
|
86
|
+
[@max_in_flight / number_of_connections, 1].max
|
87
|
+
end
|
88
|
+
|
89
|
+
def connections_changed
|
90
|
+
redistribute_ready
|
91
|
+
end
|
92
|
+
|
93
|
+
def redistribute_ready
|
94
|
+
@connections.values.each do |connection|
|
95
|
+
connection.max_in_flight = max_in_flight_per_connection
|
96
|
+
connection.re_up_ready
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
@@ -0,0 +1,98 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'net/http'
|
3
|
+
require 'uri'
|
4
|
+
|
5
|
+
require_relative 'logger'
|
6
|
+
|
7
|
+
# Connects to nsqlookup's to find the nsqd instances for a given topic
|
8
|
+
module Nsq
|
9
|
+
class Discovery
|
10
|
+
include Nsq::AttributeLogger
|
11
|
+
|
12
|
+
# lookupd addresses must be formatted like so: '<host>:<http-port>'
|
13
|
+
def initialize(lookupds)
|
14
|
+
@lookupds = lookupds
|
15
|
+
end
|
16
|
+
|
17
|
+
# Returns an array of nsqds instances
|
18
|
+
#
|
19
|
+
# nsqd instances returned are strings in this format: '<host>:<tcp-port>'
|
20
|
+
#
|
21
|
+
# discovery.nsqds
|
22
|
+
# #=> ['127.0.0.1:4150', '127.0.0.1:4152']
|
23
|
+
#
|
24
|
+
# If all nsqlookupd's are unreachable, raises Nsq::DiscoveryException
|
25
|
+
#
|
26
|
+
def nsqds
|
27
|
+
gather_nsqds_from_all_lookupds do |lookupd|
|
28
|
+
get_nsqds(lookupd)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# Returns an array of nsqds instances that have messages for
|
33
|
+
# that topic.
|
34
|
+
#
|
35
|
+
# nsqd instances returned are strings in this format: '<host>:<tcp-port>'
|
36
|
+
#
|
37
|
+
# discovery.nsqds_for_topic('a-topic')
|
38
|
+
# #=> ['127.0.0.1:4150', '127.0.0.1:4152']
|
39
|
+
#
|
40
|
+
# If all nsqlookupd's are unreachable, raises Nsq::DiscoveryException
|
41
|
+
#
|
42
|
+
def nsqds_for_topic(topic)
|
43
|
+
gather_nsqds_from_all_lookupds do |lookupd|
|
44
|
+
get_nsqds(lookupd, topic)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
|
49
|
+
private
|
50
|
+
|
51
|
+
def gather_nsqds_from_all_lookupds
|
52
|
+
nsqd_list = @lookupds.map do |lookupd|
|
53
|
+
yield(lookupd)
|
54
|
+
end.flatten
|
55
|
+
|
56
|
+
# All nsqlookupds were unreachable, raise an error!
|
57
|
+
if nsqd_list.length > 0 && nsqd_list.all? { |nsqd| nsqd.nil? }
|
58
|
+
raise DiscoveryException
|
59
|
+
end
|
60
|
+
|
61
|
+
nsqd_list.compact.uniq
|
62
|
+
end
|
63
|
+
|
64
|
+
# Returns an array of nsqd addresses
|
65
|
+
# If there's an error, return nil
|
66
|
+
def get_nsqds(lookupd, topic = nil)
|
67
|
+
uri_scheme = 'http://' unless lookupd.match(%r(https?://))
|
68
|
+
uri = URI.parse("#{uri_scheme}#{lookupd}")
|
69
|
+
|
70
|
+
uri.query = "ts=#{Time.now.to_i}"
|
71
|
+
if topic
|
72
|
+
uri.path = '/lookup'
|
73
|
+
uri.query += "&topic=#{URI.escape(topic)}"
|
74
|
+
else
|
75
|
+
uri.path = '/nodes'
|
76
|
+
end
|
77
|
+
|
78
|
+
begin
|
79
|
+
body = Net::HTTP.get(uri)
|
80
|
+
data = JSON.parse(body)
|
81
|
+
producers = data['producers'] || # v1.0.0-compat
|
82
|
+
(data['data'] && data['data']['producers'])
|
83
|
+
|
84
|
+
if producers
|
85
|
+
producers.map do |producer|
|
86
|
+
"#{producer['broadcast_address']}:#{producer['tcp_port']}"
|
87
|
+
end
|
88
|
+
else
|
89
|
+
[]
|
90
|
+
end
|
91
|
+
rescue Exception => e
|
92
|
+
error "Error during discovery for #{lookupd}: #{e}"
|
93
|
+
nil
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
end
|
98
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
require_relative '../logger'
|
2
|
+
|
3
|
+
module Nsq
|
4
|
+
class Frame
|
5
|
+
include Nsq::AttributeLogger
|
6
|
+
@@log_attributes = [:connection]
|
7
|
+
|
8
|
+
attr_reader :data
|
9
|
+
attr_reader :connection
|
10
|
+
|
11
|
+
def initialize(data, connection)
|
12
|
+
@data = data
|
13
|
+
@connection = connection
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|