omq 0.9.0 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +67 -0
- data/lib/omq/channel.rb +3 -3
- data/lib/omq/client_server.rb +6 -6
- data/lib/omq/engine.rb +641 -0
- data/lib/omq/options.rb +46 -0
- data/lib/omq/pair.rb +2 -2
- data/lib/omq/peer.rb +3 -3
- data/lib/omq/pub_sub.rb +6 -6
- data/lib/omq/push_pull.rb +2 -2
- data/lib/omq/radio_dish.rb +2 -2
- data/lib/omq/reactor.rb +128 -0
- data/lib/omq/readable.rb +42 -0
- data/lib/omq/req_rep.rb +4 -4
- data/lib/omq/router_dealer.rb +4 -4
- data/lib/omq/routing/channel.rb +83 -0
- data/lib/omq/routing/client.rb +56 -0
- data/lib/omq/routing/dealer.rb +57 -0
- data/lib/omq/routing/dish.rb +78 -0
- data/lib/omq/routing/fan_out.rb +131 -0
- data/lib/omq/routing/gather.rb +46 -0
- data/lib/omq/routing/pair.rb +86 -0
- data/lib/omq/routing/peer.rb +101 -0
- data/lib/omq/routing/pub.rb +60 -0
- data/lib/omq/routing/pull.rb +46 -0
- data/lib/omq/routing/push.rb +81 -0
- data/lib/omq/routing/radio.rb +140 -0
- data/lib/omq/routing/rep.rb +101 -0
- data/lib/omq/routing/req.rb +65 -0
- data/lib/omq/routing/round_robin.rb +168 -0
- data/lib/omq/routing/router.rb +110 -0
- data/lib/omq/routing/scatter.rb +82 -0
- data/lib/omq/routing/server.rb +101 -0
- data/lib/omq/routing/sub.rb +78 -0
- data/lib/omq/routing/xpub.rb +72 -0
- data/lib/omq/routing/xsub.rb +83 -0
- data/lib/omq/routing.rb +66 -0
- data/lib/omq/scatter_gather.rb +4 -4
- data/lib/omq/single_frame.rb +18 -0
- data/lib/omq/socket.rb +24 -9
- data/lib/omq/transport/inproc.rb +355 -0
- data/lib/omq/transport/ipc.rb +117 -0
- data/lib/omq/transport/tcp.rb +111 -0
- data/lib/omq/version.rb +1 -1
- data/lib/omq/writable.rb +65 -0
- data/lib/omq.rb +60 -4
- metadata +32 -33
- data/lib/omq/zmtp/engine.rb +0 -551
- data/lib/omq/zmtp/options.rb +0 -48
- data/lib/omq/zmtp/reactor.rb +0 -131
- data/lib/omq/zmtp/readable.rb +0 -29
- data/lib/omq/zmtp/routing/channel.rb +0 -81
- data/lib/omq/zmtp/routing/client.rb +0 -56
- data/lib/omq/zmtp/routing/dealer.rb +0 -57
- data/lib/omq/zmtp/routing/dish.rb +0 -80
- data/lib/omq/zmtp/routing/fan_out.rb +0 -131
- data/lib/omq/zmtp/routing/gather.rb +0 -48
- data/lib/omq/zmtp/routing/pair.rb +0 -84
- data/lib/omq/zmtp/routing/peer.rb +0 -100
- data/lib/omq/zmtp/routing/pub.rb +0 -62
- data/lib/omq/zmtp/routing/pull.rb +0 -48
- data/lib/omq/zmtp/routing/push.rb +0 -80
- data/lib/omq/zmtp/routing/radio.rb +0 -139
- data/lib/omq/zmtp/routing/rep.rb +0 -101
- data/lib/omq/zmtp/routing/req.rb +0 -65
- data/lib/omq/zmtp/routing/round_robin.rb +0 -143
- data/lib/omq/zmtp/routing/router.rb +0 -109
- data/lib/omq/zmtp/routing/scatter.rb +0 -81
- data/lib/omq/zmtp/routing/server.rb +0 -100
- data/lib/omq/zmtp/routing/sub.rb +0 -80
- data/lib/omq/zmtp/routing/xpub.rb +0 -74
- data/lib/omq/zmtp/routing/xsub.rb +0 -86
- data/lib/omq/zmtp/routing.rb +0 -65
- data/lib/omq/zmtp/single_frame.rb +0 -20
- data/lib/omq/zmtp/transport/inproc.rb +0 -359
- data/lib/omq/zmtp/transport/ipc.rb +0 -118
- data/lib/omq/zmtp/transport/tcp.rb +0 -117
- data/lib/omq/zmtp/writable.rb +0 -61
- data/lib/omq/zmtp.rb +0 -81
data/lib/omq/engine.rb
ADDED
|
@@ -0,0 +1,641 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
|
|
5
|
+
module OMQ
|
|
6
|
+
# Per-socket orchestrator.
|
|
7
|
+
#
|
|
8
|
+
# Manages connections, transports, and the routing strategy for one
|
|
9
|
+
# OMQ::Socket instance. Each socket type creates one Engine.
|
|
10
|
+
#
|
|
11
|
+
class Engine
|
|
12
|
+
# @return [Symbol] socket type (e.g. :REQ, :PAIR)
|
|
13
|
+
#
|
|
14
|
+
attr_reader :socket_type
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# @return [Options] socket options
|
|
18
|
+
#
|
|
19
|
+
attr_reader :options
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# @return [Routing] routing strategy
|
|
23
|
+
#
|
|
24
|
+
attr_reader :routing
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# @return [String, nil] last bound endpoint
|
|
28
|
+
#
|
|
29
|
+
attr_reader :last_endpoint
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# @return [Integer, nil] last auto-selected TCP port
|
|
33
|
+
#
|
|
34
|
+
attr_reader :last_tcp_port
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# @param socket_type [Symbol] e.g. :REQ, :REP, :PAIR
|
|
38
|
+
# @param options [Options]
|
|
39
|
+
#
|
|
40
|
+
def initialize(socket_type, options)
|
|
41
|
+
@socket_type = socket_type
|
|
42
|
+
@options = options
|
|
43
|
+
@routing = Routing.for(socket_type).new(self)
|
|
44
|
+
@connections = []
|
|
45
|
+
@connection_endpoints = {} # connection => endpoint (for reconnection)
|
|
46
|
+
@connected_endpoints = [] # endpoints we connected to (not bound)
|
|
47
|
+
@listeners = []
|
|
48
|
+
@tasks = []
|
|
49
|
+
@closed = false
|
|
50
|
+
@closing = false
|
|
51
|
+
@last_endpoint = nil
|
|
52
|
+
@last_tcp_port = nil
|
|
53
|
+
@peer_connected = Async::Promise.new
|
|
54
|
+
@all_peers_gone = Async::Promise.new
|
|
55
|
+
@reconnect_enabled = true
|
|
56
|
+
@parent_task = nil
|
|
57
|
+
@on_io_thread = false
|
|
58
|
+
@connection_promises = {} # connection => Async::Promise
|
|
59
|
+
@fatal_error = nil
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
attr_reader :peer_connected, :all_peers_gone, :connections, :parent_task
|
|
64
|
+
|
|
65
|
+
attr_writer :reconnect_enabled
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# Spawns an inproc reconnect retry task under @parent_task.
|
|
69
|
+
#
|
|
70
|
+
# @param endpoint [String]
|
|
71
|
+
# @yield [interval] the retry loop body
|
|
72
|
+
#
|
|
73
|
+
def spawn_inproc_retry(endpoint)
|
|
74
|
+
ri = @options.reconnect_interval
|
|
75
|
+
ivl = ri.is_a?(Range) ? ri.begin : ri
|
|
76
|
+
@tasks << @parent_task.async(transient: true, annotation: "inproc reconnect #{endpoint}") do
|
|
77
|
+
yield ivl
|
|
78
|
+
rescue Async::Stop
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# Binds to an endpoint.
|
|
84
|
+
#
|
|
85
|
+
# @param endpoint [String] e.g. "tcp://127.0.0.1:5555", "inproc://foo"
|
|
86
|
+
# @return [void]
|
|
87
|
+
# @raise [ArgumentError] on unsupported transport
|
|
88
|
+
#
|
|
89
|
+
def bind(endpoint)
|
|
90
|
+
transport = transport_for(endpoint)
|
|
91
|
+
listener = transport.bind(endpoint, self)
|
|
92
|
+
start_accept_loops(listener)
|
|
93
|
+
@listeners << listener
|
|
94
|
+
@last_endpoint = listener.endpoint
|
|
95
|
+
@last_tcp_port = extract_tcp_port(listener.endpoint)
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# Connects to an endpoint.
|
|
100
|
+
#
|
|
101
|
+
# @param endpoint [String]
|
|
102
|
+
# @return [void]
|
|
103
|
+
#
|
|
104
|
+
def connect(endpoint)
|
|
105
|
+
validate_endpoint!(endpoint)
|
|
106
|
+
@connected_endpoints << endpoint
|
|
107
|
+
if endpoint.start_with?("inproc://")
|
|
108
|
+
# Inproc connect is synchronous and instant
|
|
109
|
+
transport = transport_for(endpoint)
|
|
110
|
+
transport.connect(endpoint, self)
|
|
111
|
+
else
|
|
112
|
+
# TCP/IPC connect in background — never blocks the caller
|
|
113
|
+
schedule_reconnect(endpoint, delay: 0)
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# Disconnects from an endpoint. Closes connections to that endpoint
|
|
119
|
+
# and stops auto-reconnection for it.
|
|
120
|
+
#
|
|
121
|
+
# @param endpoint [String]
|
|
122
|
+
# @return [void]
|
|
123
|
+
#
|
|
124
|
+
def disconnect(endpoint)
|
|
125
|
+
@connected_endpoints.delete(endpoint)
|
|
126
|
+
conns = @connection_endpoints.select { |_, ep| ep == endpoint }.keys
|
|
127
|
+
conns.each do |conn|
|
|
128
|
+
@connection_endpoints.delete(conn)
|
|
129
|
+
@connections.delete(conn)
|
|
130
|
+
@routing.connection_removed(conn)
|
|
131
|
+
conn.close
|
|
132
|
+
end
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# Unbinds from an endpoint. Stops the listener and closes all
|
|
137
|
+
# connections that were accepted on it.
|
|
138
|
+
#
|
|
139
|
+
# @param endpoint [String]
|
|
140
|
+
# @return [void]
|
|
141
|
+
#
|
|
142
|
+
def unbind(endpoint)
|
|
143
|
+
listener = @listeners.find { |l| l.endpoint == endpoint }
|
|
144
|
+
return unless listener
|
|
145
|
+
listener.stop
|
|
146
|
+
@listeners.delete(listener)
|
|
147
|
+
|
|
148
|
+
# Close connections accepted on this endpoint
|
|
149
|
+
conns = @connection_endpoints.select { |_, ep| ep == endpoint }.keys
|
|
150
|
+
conns.each do |conn|
|
|
151
|
+
@connection_endpoints.delete(conn)
|
|
152
|
+
@connections.delete(conn)
|
|
153
|
+
@routing.connection_removed(conn)
|
|
154
|
+
conn.close
|
|
155
|
+
end
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
# Called by a transport when an incoming connection is accepted.
|
|
160
|
+
#
|
|
161
|
+
# @param io [#read, #write, #close]
|
|
162
|
+
# @param endpoint [String, nil] the endpoint this was accepted on
|
|
163
|
+
# @return [void]
|
|
164
|
+
#
|
|
165
|
+
def handle_accepted(io, endpoint: nil)
|
|
166
|
+
spawn_connection(io, as_server: true, endpoint: endpoint)
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
# Called by a transport when an outgoing connection is established.
|
|
171
|
+
#
|
|
172
|
+
# @param io [#read, #write, #close]
|
|
173
|
+
# @return [void]
|
|
174
|
+
#
|
|
175
|
+
def handle_connected(io, endpoint: nil)
|
|
176
|
+
spawn_connection(io, as_server: false, endpoint: endpoint)
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
# Called by inproc transport with a pre-validated DirectPipe.
|
|
181
|
+
# Skips ZMTP handshake — just registers with routing strategy.
|
|
182
|
+
#
|
|
183
|
+
# @param pipe [Transport::Inproc::DirectPipe]
|
|
184
|
+
# @return [void]
|
|
185
|
+
#
|
|
186
|
+
def connection_ready(pipe, endpoint: nil)
|
|
187
|
+
@connections << pipe
|
|
188
|
+
@connection_endpoints[pipe] = endpoint if endpoint
|
|
189
|
+
@routing.connection_added(pipe)
|
|
190
|
+
@peer_connected.resolve(pipe)
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
# Dequeues the next received message. Blocks until available.
|
|
195
|
+
#
|
|
196
|
+
# @return [Array<String>] message parts
|
|
197
|
+
# @raise if a background pump task crashed
|
|
198
|
+
#
|
|
199
|
+
def dequeue_recv
|
|
200
|
+
raise @fatal_error if @fatal_error
|
|
201
|
+
msg = @routing.recv_queue.dequeue
|
|
202
|
+
raise @fatal_error if msg.nil? && @fatal_error
|
|
203
|
+
msg
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# Dequeues up to +max+ messages. Blocks on the first, then
|
|
208
|
+
# drains non-blocking.
|
|
209
|
+
#
|
|
210
|
+
# @param max [Integer]
|
|
211
|
+
# @return [Array<Array<String>>]
|
|
212
|
+
#
|
|
213
|
+
def dequeue_recv_batch(max)
|
|
214
|
+
raise @fatal_error if @fatal_error
|
|
215
|
+
queue = @routing.recv_queue
|
|
216
|
+
msg = queue.dequeue
|
|
217
|
+
raise @fatal_error if msg.nil? && @fatal_error
|
|
218
|
+
batch = [msg]
|
|
219
|
+
while batch.size < max
|
|
220
|
+
msg = queue.dequeue(timeout: 0)
|
|
221
|
+
break unless msg
|
|
222
|
+
batch << msg
|
|
223
|
+
end
|
|
224
|
+
batch
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# Pushes a nil sentinel into the recv queue, unblocking a
|
|
229
|
+
# pending {#dequeue_recv} with a nil return value.
|
|
230
|
+
#
|
|
231
|
+
def dequeue_recv_sentinel
|
|
232
|
+
@routing.recv_queue.push(nil)
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
# Enqueues a message for sending. Blocks at HWM.
|
|
237
|
+
#
|
|
238
|
+
# @param parts [Array<String>]
|
|
239
|
+
# @return [void]
|
|
240
|
+
# @raise if a background pump task crashed
|
|
241
|
+
#
|
|
242
|
+
def enqueue_send(parts)
|
|
243
|
+
raise @fatal_error if @fatal_error
|
|
244
|
+
@routing.enqueue(parts)
|
|
245
|
+
end
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
# Starts a recv pump for a connection, or wires the inproc
|
|
249
|
+
# fast path when the connection is a DirectPipe.
|
|
250
|
+
#
|
|
251
|
+
# @param conn [Connection, Transport::Inproc::DirectPipe]
|
|
252
|
+
# Starts a recv pump that dequeues messages from a connection
|
|
253
|
+
# and enqueues them into the routing strategy's recv queue.
|
|
254
|
+
#
|
|
255
|
+
# When a block is given, each message is yielded for transformation
|
|
256
|
+
# before enqueueing. The block is compiled at the call site, giving
|
|
257
|
+
# YJIT a monomorphic call per routing strategy instead of a shared
|
|
258
|
+
# megamorphic `transform.call` dispatch.
|
|
259
|
+
#
|
|
260
|
+
# @param conn [Connection, Transport::Inproc::DirectPipe]
|
|
261
|
+
# @param recv_queue [Async::LimitedQueue] routing strategy's recv queue
|
|
262
|
+
# @yield [msg] optional per-message transform
|
|
263
|
+
# @return [#stop, nil] pump task handle, or nil for DirectPipe bypass
|
|
264
|
+
#
|
|
265
|
+
def start_recv_pump(conn, recv_queue, &transform)
|
|
266
|
+
if conn.is_a?(Transport::Inproc::DirectPipe) && conn.peer
|
|
267
|
+
conn.peer.direct_recv_queue = recv_queue
|
|
268
|
+
conn.peer.direct_recv_transform = transform
|
|
269
|
+
return nil
|
|
270
|
+
end
|
|
271
|
+
|
|
272
|
+
if transform
|
|
273
|
+
@parent_task.async(transient: true, annotation: "recv pump") do
|
|
274
|
+
loop do
|
|
275
|
+
msg = conn.receive_message
|
|
276
|
+
msg = transform.call(msg).freeze
|
|
277
|
+
recv_queue.enqueue(msg)
|
|
278
|
+
end
|
|
279
|
+
rescue Async::Stop
|
|
280
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
281
|
+
connection_lost(conn)
|
|
282
|
+
rescue => error
|
|
283
|
+
signal_fatal_error(error)
|
|
284
|
+
end
|
|
285
|
+
else
|
|
286
|
+
@parent_task.async(transient: true, annotation: "recv pump") do
|
|
287
|
+
loop do
|
|
288
|
+
recv_queue.enqueue(conn.receive_message)
|
|
289
|
+
end
|
|
290
|
+
rescue Async::Stop
|
|
291
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
292
|
+
connection_lost(conn)
|
|
293
|
+
rescue => error
|
|
294
|
+
signal_fatal_error(error)
|
|
295
|
+
end
|
|
296
|
+
end
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
# Called when a connection is lost.
|
|
301
|
+
#
|
|
302
|
+
# @param connection [Connection]
|
|
303
|
+
# @return [void]
|
|
304
|
+
#
|
|
305
|
+
def connection_lost(connection)
|
|
306
|
+
endpoint = @connection_endpoints.delete(connection)
|
|
307
|
+
@connections.delete(connection)
|
|
308
|
+
@routing.connection_removed(connection)
|
|
309
|
+
connection.close
|
|
310
|
+
|
|
311
|
+
# Signal the connection task to exit.
|
|
312
|
+
done = @connection_promises.delete(connection)
|
|
313
|
+
done&.resolve(true)
|
|
314
|
+
|
|
315
|
+
# Resolve all_peers_gone once: had peers, now have none.
|
|
316
|
+
if @peer_connected.resolved? && @connections.empty?
|
|
317
|
+
@all_peers_gone.resolve(true)
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
# Auto-reconnect if this was a connected (not bound) endpoint
|
|
321
|
+
if endpoint && @connected_endpoints.include?(endpoint) && !@closed && !@closing && @reconnect_enabled
|
|
322
|
+
schedule_reconnect(endpoint)
|
|
323
|
+
end
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
# Closes all connections and listeners.
|
|
328
|
+
#
|
|
329
|
+
# @return [void]
|
|
330
|
+
#
|
|
331
|
+
def close
|
|
332
|
+
return if @closed || @closing
|
|
333
|
+
@closing = true
|
|
334
|
+
|
|
335
|
+
# Stop accepting new connections — but only if we already have
|
|
336
|
+
# peers to drain to. With zero connections the listeners must
|
|
337
|
+
# stay open so late-arriving peers can still receive queued
|
|
338
|
+
# messages during the linger period.
|
|
339
|
+
unless @connections.empty?
|
|
340
|
+
@listeners.each(&:stop)
|
|
341
|
+
@listeners.clear
|
|
342
|
+
end
|
|
343
|
+
|
|
344
|
+
# Linger: wait for send queues to drain before closing.
|
|
345
|
+
# linger=0 → close immediately, linger=nil → wait forever.
|
|
346
|
+
# @closed is set AFTER draining so reconnect tasks keep
|
|
347
|
+
# running during the linger period.
|
|
348
|
+
linger = @options.linger
|
|
349
|
+
if linger.nil? || linger > 0
|
|
350
|
+
drain_timeout = linger # nil = wait forever, >0 = seconds
|
|
351
|
+
drain_send_queues(drain_timeout)
|
|
352
|
+
end
|
|
353
|
+
|
|
354
|
+
@closed = true
|
|
355
|
+
Reactor.untrack_linger(@options.linger) if @on_io_thread
|
|
356
|
+
|
|
357
|
+
# Stop any remaining listeners.
|
|
358
|
+
@listeners.each(&:stop)
|
|
359
|
+
@listeners.clear
|
|
360
|
+
|
|
361
|
+
# Close connections — causes pump tasks to get EOFError/IOError
|
|
362
|
+
@connections.each(&:close)
|
|
363
|
+
@connections.clear
|
|
364
|
+
# Stop any remaining pump tasks
|
|
365
|
+
@routing.stop rescue nil
|
|
366
|
+
@tasks.each { |t| t.stop rescue nil }
|
|
367
|
+
@tasks.clear
|
|
368
|
+
end
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
# Spawns a transient pump task with error propagation.
|
|
372
|
+
#
|
|
373
|
+
# Unexpected exceptions are caught and forwarded to
|
|
374
|
+
# {#signal_fatal_error} so blocked callers (send/recv)
|
|
375
|
+
# see the real error instead of deadlocking.
|
|
376
|
+
#
|
|
377
|
+
# @param annotation [String] task annotation for debugging
|
|
378
|
+
# @yield the pump loop body
|
|
379
|
+
# @return [Async::Task]
|
|
380
|
+
#
|
|
381
|
+
def spawn_pump_task(annotation:, &block)
|
|
382
|
+
@parent_task.async(transient: true, annotation: annotation) do
|
|
383
|
+
yield
|
|
384
|
+
rescue Async::Stop, Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
385
|
+
# normal shutdown / expected disconnect
|
|
386
|
+
rescue => error
|
|
387
|
+
signal_fatal_error(error)
|
|
388
|
+
end
|
|
389
|
+
end
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
# Wraps an unexpected pump error as {OMQ::SocketDeadError} and
|
|
393
|
+
# unblocks any callers waiting on the recv queue.
|
|
394
|
+
#
|
|
395
|
+
# Must be called from inside a rescue block so that +error+ is
|
|
396
|
+
# +$!+ and Ruby sets it as +#cause+ on the new exception.
|
|
397
|
+
#
|
|
398
|
+
# @param error [Exception]
|
|
399
|
+
#
|
|
400
|
+
def signal_fatal_error(error)
|
|
401
|
+
return if @closing || @closed
|
|
402
|
+
@fatal_error = begin
|
|
403
|
+
raise OMQ::SocketDeadError, "internal error killed #{@socket_type} socket"
|
|
404
|
+
rescue => wrapped
|
|
405
|
+
wrapped
|
|
406
|
+
end
|
|
407
|
+
@routing.recv_queue.enqueue(nil) rescue nil
|
|
408
|
+
@peer_connected.resolve(nil) rescue nil
|
|
409
|
+
end
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
# Saves the current Async task so connection subtrees can be
|
|
413
|
+
# spawned under the caller's task tree. Called by Socket before
|
|
414
|
+
# the first bind/connect — outside Reactor.run so non-Async
|
|
415
|
+
# callers get the IO thread's root task, not an ephemeral work task.
|
|
416
|
+
#
|
|
417
|
+
def capture_parent_task
|
|
418
|
+
return if @parent_task
|
|
419
|
+
if Async::Task.current?
|
|
420
|
+
@parent_task = Async::Task.current
|
|
421
|
+
else
|
|
422
|
+
@parent_task = Reactor.root_task
|
|
423
|
+
@on_io_thread = true
|
|
424
|
+
Reactor.track_linger(@options.linger)
|
|
425
|
+
end
|
|
426
|
+
end
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
private
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
# Spawns an isolated connection task as a sibling of accept/reconnect
|
|
433
|
+
# tasks. All per-connection children (heartbeat, recv pump, reaper)
|
|
434
|
+
# live inside this task. When the connection dies, the entire subtree
|
|
435
|
+
# is cleaned up by Async.
|
|
436
|
+
#
|
|
437
|
+
def spawn_connection(io, as_server:, endpoint: nil)
|
|
438
|
+
task = @parent_task&.async(transient: true, annotation: "conn #{endpoint}") do
|
|
439
|
+
done = Async::Promise.new
|
|
440
|
+
conn = setup_connection(io, as_server: as_server, endpoint: endpoint, done: done)
|
|
441
|
+
done.wait
|
|
442
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
443
|
+
# handshake failed or connection lost — subtree cleaned up
|
|
444
|
+
ensure
|
|
445
|
+
conn&.close rescue nil
|
|
446
|
+
end
|
|
447
|
+
@tasks << task if task
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
# Waits for the send queue to drain.
|
|
452
|
+
#
|
|
453
|
+
# @param timeout [Numeric, nil] max seconds to wait (nil = forever)
|
|
454
|
+
#
|
|
455
|
+
def drain_send_queues(timeout)
|
|
456
|
+
return unless @routing.respond_to?(:send_queue)
|
|
457
|
+
deadline = timeout ? Async::Clock.now + timeout : nil
|
|
458
|
+
|
|
459
|
+
until @routing.send_queue.empty? && @routing.send_pump_idle?
|
|
460
|
+
if deadline
|
|
461
|
+
remaining = deadline - Async::Clock.now
|
|
462
|
+
break if remaining <= 0
|
|
463
|
+
end
|
|
464
|
+
sleep 0.001
|
|
465
|
+
end
|
|
466
|
+
end
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
# Performs the ZMTP handshake, starts heartbeating, and registers
|
|
470
|
+
# the new connection with the routing strategy.
|
|
471
|
+
#
|
|
472
|
+
# @param io [#read, #write, #close] underlying transport stream
|
|
473
|
+
# @param as_server [Boolean] whether we are the ZMTP server side
|
|
474
|
+
# @param endpoint [String, nil] endpoint for reconnection tracking
|
|
475
|
+
# @param done [Async::Promise, nil] resolved when the connection is lost
|
|
476
|
+
#
|
|
477
|
+
def setup_connection(io, as_server:, endpoint: nil, done: nil)
|
|
478
|
+
conn = Protocol::ZMTP::Connection.new(
|
|
479
|
+
io,
|
|
480
|
+
socket_type: @socket_type.to_s,
|
|
481
|
+
identity: @options.identity,
|
|
482
|
+
as_server: as_server,
|
|
483
|
+
mechanism: @options.mechanism&.dup,
|
|
484
|
+
max_message_size: @options.max_message_size,
|
|
485
|
+
)
|
|
486
|
+
conn.handshake!
|
|
487
|
+
start_heartbeat(conn)
|
|
488
|
+
@connections << conn
|
|
489
|
+
@connection_endpoints[conn] = endpoint if endpoint
|
|
490
|
+
@connection_promises[conn] = done if done
|
|
491
|
+
@routing.connection_added(conn)
|
|
492
|
+
@peer_connected.resolve(conn)
|
|
493
|
+
conn
|
|
494
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
495
|
+
conn&.close
|
|
496
|
+
raise
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
# Spawns a heartbeat task for the connection.
|
|
501
|
+
# The connection only tracks timestamps — the engine drives the loop.
|
|
502
|
+
#
|
|
503
|
+
# @param conn [Connection]
|
|
504
|
+
# @return [void]
|
|
505
|
+
#
|
|
506
|
+
def start_heartbeat(conn)
|
|
507
|
+
interval = @options.heartbeat_interval
|
|
508
|
+
return unless interval
|
|
509
|
+
|
|
510
|
+
ttl = @options.heartbeat_ttl || interval
|
|
511
|
+
timeout = @options.heartbeat_timeout || interval
|
|
512
|
+
conn.touch_heartbeat
|
|
513
|
+
|
|
514
|
+
@tasks << @parent_task.async(transient: true, annotation: "heartbeat") do
|
|
515
|
+
loop do
|
|
516
|
+
sleep interval
|
|
517
|
+
conn.send_command(Protocol::ZMTP::Codec::Command.ping(ttl: ttl, context: "".b))
|
|
518
|
+
if conn.heartbeat_expired?(timeout)
|
|
519
|
+
conn.close
|
|
520
|
+
break
|
|
521
|
+
end
|
|
522
|
+
end
|
|
523
|
+
rescue Async::Stop
|
|
524
|
+
rescue *CONNECTION_LOST
|
|
525
|
+
# connection closed
|
|
526
|
+
end
|
|
527
|
+
end
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
# Spawns a background task that reconnects to the given endpoint
|
|
531
|
+
# with exponential back-off based on the reconnect_interval option.
|
|
532
|
+
#
|
|
533
|
+
# @param endpoint [String] endpoint to reconnect to
|
|
534
|
+
# @param delay [Numeric, nil] initial delay in seconds (defaults to reconnect_interval)
|
|
535
|
+
#
|
|
536
|
+
def schedule_reconnect(endpoint, delay: nil)
|
|
537
|
+
ri = @options.reconnect_interval
|
|
538
|
+
if ri.is_a?(Range)
|
|
539
|
+
delay ||= ri.begin
|
|
540
|
+
max_delay = ri.end
|
|
541
|
+
else
|
|
542
|
+
delay ||= ri
|
|
543
|
+
max_delay = nil
|
|
544
|
+
end
|
|
545
|
+
|
|
546
|
+
@tasks << @parent_task.async(transient: true, annotation: "reconnect #{endpoint}") do
|
|
547
|
+
loop do
|
|
548
|
+
break if @closed
|
|
549
|
+
sleep delay if delay > 0
|
|
550
|
+
break if @closed
|
|
551
|
+
begin
|
|
552
|
+
transport = transport_for(endpoint)
|
|
553
|
+
transport.connect(endpoint, self)
|
|
554
|
+
break # connected successfully
|
|
555
|
+
rescue *CONNECTION_LOST, *CONNECTION_FAILED, Protocol::ZMTP::Error
|
|
556
|
+
delay = [delay * 2, max_delay].min if max_delay
|
|
557
|
+
# After first attempt with delay: 0, use the configured interval
|
|
558
|
+
delay = ri.is_a?(Range) ? ri.begin : ri if delay == 0
|
|
559
|
+
end
|
|
560
|
+
end
|
|
561
|
+
rescue Async::Stop
|
|
562
|
+
# normal shutdown
|
|
563
|
+
rescue => error
|
|
564
|
+
signal_fatal_error(error)
|
|
565
|
+
end
|
|
566
|
+
end
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
# Eagerly validates TCP hostnames so resolution errors fail
|
|
570
|
+
# on connect, not silently in the background reconnect loop.
|
|
571
|
+
# Reconnects still re-resolve (DNS may change), and transient
|
|
572
|
+
# resolution failures during reconnect are retried with backoff.
|
|
573
|
+
#
|
|
574
|
+
def validate_endpoint!(endpoint)
|
|
575
|
+
return unless endpoint.start_with?("tcp://")
|
|
576
|
+
host = URI.parse(endpoint.sub("tcp://", "http://")).hostname
|
|
577
|
+
Addrinfo.getaddrinfo(host, nil, nil, :STREAM) if host
|
|
578
|
+
end
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def transport_for(endpoint)
|
|
582
|
+
case endpoint
|
|
583
|
+
when /\Atcp:\/\// then Transport::TCP
|
|
584
|
+
when /\Aipc:\/\// then Transport::IPC
|
|
585
|
+
when /\Ainproc:\/\// then Transport::Inproc
|
|
586
|
+
else raise ArgumentError, "unsupported transport: #{endpoint}"
|
|
587
|
+
end
|
|
588
|
+
end
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def extract_tcp_port(endpoint)
|
|
592
|
+
return nil unless endpoint&.start_with?("tcp://")
|
|
593
|
+
port = endpoint.split(":").last.to_i
|
|
594
|
+
port.positive? ? port : nil
|
|
595
|
+
end
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
# Spawns accept loops for a listener under @parent_task.
|
|
599
|
+
#
|
|
600
|
+
# TCP listeners have multiple server sockets (IPv4/IPv6);
|
|
601
|
+
# IPC listeners have one. Inproc listeners have none.
|
|
602
|
+
#
|
|
603
|
+
def start_accept_loops(listener)
|
|
604
|
+
case listener
|
|
605
|
+
when Transport::TCP::Listener
|
|
606
|
+
tasks = listener.servers.map do |server|
|
|
607
|
+
@parent_task.async(transient: true, annotation: "tcp accept #{listener.endpoint}") do
|
|
608
|
+
loop do
|
|
609
|
+
client = server.accept
|
|
610
|
+
Async::Task.current.defer_stop do
|
|
611
|
+
handle_accepted(IO::Stream::Buffered.wrap(client), endpoint: listener.endpoint)
|
|
612
|
+
end
|
|
613
|
+
end
|
|
614
|
+
rescue Async::Stop
|
|
615
|
+
rescue IOError
|
|
616
|
+
# server closed
|
|
617
|
+
ensure
|
|
618
|
+
server.close rescue nil
|
|
619
|
+
end
|
|
620
|
+
end
|
|
621
|
+
listener.accept_tasks = tasks
|
|
622
|
+
|
|
623
|
+
when Transport::IPC::Listener
|
|
624
|
+
task = @parent_task.async(transient: true, annotation: "ipc accept #{listener.endpoint}") do
|
|
625
|
+
loop do
|
|
626
|
+
client = listener.server.accept
|
|
627
|
+
Async::Task.current.defer_stop do
|
|
628
|
+
handle_accepted(IO::Stream::Buffered.wrap(client), endpoint: listener.endpoint)
|
|
629
|
+
end
|
|
630
|
+
end
|
|
631
|
+
rescue Async::Stop
|
|
632
|
+
rescue IOError
|
|
633
|
+
# server closed
|
|
634
|
+
ensure
|
|
635
|
+
listener.server.close rescue nil
|
|
636
|
+
end
|
|
637
|
+
listener.accept_task = task
|
|
638
|
+
end
|
|
639
|
+
end
|
|
640
|
+
end
|
|
641
|
+
end
|
data/lib/omq/options.rb
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OMQ
|
|
4
|
+
# Pure Ruby socket options.
|
|
5
|
+
#
|
|
6
|
+
# All timeouts are in seconds (Numeric) or nil (no timeout).
|
|
7
|
+
# HWM values are integers.
|
|
8
|
+
#
|
|
9
|
+
class Options
|
|
10
|
+
DEFAULT_HWM = 1000
|
|
11
|
+
|
|
12
|
+
# @param linger [Integer] linger period in seconds (default 0)
|
|
13
|
+
#
|
|
14
|
+
def initialize(linger: 0)
|
|
15
|
+
@send_hwm = DEFAULT_HWM
|
|
16
|
+
@recv_hwm = DEFAULT_HWM
|
|
17
|
+
@linger = linger
|
|
18
|
+
@identity = "".b
|
|
19
|
+
@router_mandatory = false
|
|
20
|
+
@read_timeout = nil # seconds, nil = no timeout
|
|
21
|
+
@write_timeout = nil
|
|
22
|
+
@reconnect_interval = 0.1 # seconds, or Range for backoff (e.g. 0.1..5.0)
|
|
23
|
+
@heartbeat_interval = nil # seconds, nil = disabled
|
|
24
|
+
@heartbeat_ttl = nil # seconds, nil = use heartbeat_interval
|
|
25
|
+
@heartbeat_timeout = nil # seconds, nil = use heartbeat_interval
|
|
26
|
+
@max_message_size = nil # bytes, nil = unlimited
|
|
27
|
+
@conflate = false
|
|
28
|
+
@mechanism = Protocol::ZMTP::Mechanism::Null.new
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
attr_accessor :send_hwm, :recv_hwm,
|
|
32
|
+
:linger, :identity,
|
|
33
|
+
:router_mandatory, :conflate,
|
|
34
|
+
:read_timeout, :write_timeout,
|
|
35
|
+
:reconnect_interval,
|
|
36
|
+
:heartbeat_interval, :heartbeat_ttl, :heartbeat_timeout,
|
|
37
|
+
:max_message_size,
|
|
38
|
+
:mechanism
|
|
39
|
+
|
|
40
|
+
alias_method :router_mandatory?, :router_mandatory
|
|
41
|
+
alias_method :recv_timeout, :read_timeout
|
|
42
|
+
alias_method :recv_timeout=, :read_timeout=
|
|
43
|
+
alias_method :send_timeout, :write_timeout
|
|
44
|
+
alias_method :send_timeout=, :write_timeout=
|
|
45
|
+
end
|
|
46
|
+
end
|
data/lib/omq/pair.rb
CHANGED
data/lib/omq/peer.rb
CHANGED
|
@@ -2,9 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
module OMQ
|
|
4
4
|
class PEER < Socket
|
|
5
|
-
include
|
|
6
|
-
include
|
|
7
|
-
include
|
|
5
|
+
include Readable
|
|
6
|
+
include Writable
|
|
7
|
+
include SingleFrame
|
|
8
8
|
|
|
9
9
|
def initialize(endpoints = nil, linger: 0)
|
|
10
10
|
_init_engine(:PEER, linger: linger)
|