omq 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +49 -1
- data/lib/omq/engine/connection_setup.rb +47 -0
- data/lib/omq/engine/heartbeat.rb +40 -0
- data/lib/omq/engine/reconnect.rb +56 -0
- data/lib/omq/engine/recv_pump.rb +76 -0
- data/lib/omq/engine.rb +104 -311
- data/lib/omq/routing/conn_send_pump.rb +36 -0
- data/lib/omq/routing/dealer.rb +8 -10
- data/lib/omq/routing/fair_queue.rb +144 -0
- data/lib/omq/routing/fair_recv.rb +27 -0
- data/lib/omq/routing/fan_out.rb +113 -72
- data/lib/omq/routing/pair.rb +39 -20
- data/lib/omq/routing/pub.rb +5 -7
- data/lib/omq/routing/pull.rb +5 -4
- data/lib/omq/routing/push.rb +3 -12
- data/lib/omq/routing/rep.rb +31 -51
- data/lib/omq/routing/req.rb +8 -10
- data/lib/omq/routing/round_robin.rb +82 -64
- data/lib/omq/routing/router.rb +23 -48
- data/lib/omq/routing/sub.rb +8 -5
- data/lib/omq/routing/xpub.rb +7 -3
- data/lib/omq/routing/xsub.rb +43 -27
- data/lib/omq/routing.rb +3 -0
- data/lib/omq/socket.rb +2 -2
- data/lib/omq/transport/inproc/direct_pipe.rb +162 -0
- data/lib/omq/transport/inproc.rb +37 -218
- data/lib/omq/version.rb +1 -1
- metadata +9 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 91e6db2b4fd881530030f63c0c47a534c3f6361497d0f55ad1d28c7dbb85a669
|
|
4
|
+
data.tar.gz: 1af1d5692333586b650f7d0f14118ae04f64a65ab3d127d8ea3a3781e0eaae34
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 4a724ddc29600b8d101046ca82869e4598f733564be4022fc8eb287084a80319a1586d5e0ced449f3eea3a26127add8f962fc3914177cc95f4f495e98f87e7d4
|
|
7
|
+
data.tar.gz: '08935cd0ad4548af6a526984d84514b242ab1d29bfd14fb8933f7b84b9f906f1197588f6bc0fec95332d2dbac6e016308cbdb5bf5efe9e2cba7b2bbf7d695d58'
|
data/CHANGELOG.md
CHANGED
|
@@ -1,9 +1,57 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
-
##
|
|
3
|
+
## 0.13.0
|
|
4
|
+
|
|
5
|
+
### Changed
|
|
6
|
+
|
|
7
|
+
- **`Engine` internals: `ConnectionRecord` + lifecycle state** — three parallel
|
|
8
|
+
per-connection ivars (`@connections` Array, `@connection_endpoints`,
|
|
9
|
+
`@connection_promises`) replaced by a single `@connections` Hash keyed by
|
|
10
|
+
connection, with values `ConnectionRecord = Data.define(:endpoint, :done)`.
|
|
11
|
+
`@connected_endpoints` renamed to `@dialed` (`Set`). `@closed`/`@closing`
|
|
12
|
+
booleans replaced by a `@state` symbol (`:open`/`:closing`/`:closed`).
|
|
13
|
+
Net: −4 instance variables.
|
|
14
|
+
- **`@connections` in `FanOut`, `Sub`, `XSub` routing strategies changed from
|
|
15
|
+
`Array` to `Set`** — O(1) `#delete` on peer disconnect; semantics already
|
|
16
|
+
required uniqueness.
|
|
17
|
+
|
|
18
|
+
### Fixed
|
|
19
|
+
|
|
20
|
+
- **FanOut send queues no longer drop messages** — per-connection send queues in
|
|
21
|
+
`FanOut` (PUB/XPUB/RADIO) used `DropQueue` (`Thread::SizedQueue`) which never
|
|
22
|
+
blocked the publisher fiber. When burst-sending beyond `send_hwm`, the sender
|
|
23
|
+
ran without yielding and messages were silently dropped. Switched to
|
|
24
|
+
`Async::LimitedQueue` (`:block`) so the publisher yields when a per-connection
|
|
25
|
+
queue is full, giving the send pump fiber a chance to drain it.
|
|
26
|
+
|
|
27
|
+
### Changed
|
|
28
|
+
|
|
29
|
+
- **Benchmark suite redesign** — replaced ASCII plots (unicode_plot) with JSONL
|
|
30
|
+
result storage and a colored terminal regression report. Results are appended
|
|
31
|
+
to `bench/results.jsonl` (gitignored, machine-local). New commands:
|
|
32
|
+
`ruby bench/run_all.rb` (run all patterns), `ruby bench/report.rb` (compare
|
|
33
|
+
last runs, highlight regressions/improvements).
|
|
4
34
|
|
|
5
35
|
### Added
|
|
6
36
|
|
|
37
|
+
- **Per-peer HWM** — send and receive high-water marks now apply per connected
|
|
38
|
+
peer (RFC 28/29/30). Each peer gets its own bounded send queue and its own
|
|
39
|
+
bounded recv queue. A slow or muted peer no longer steals capacity from
|
|
40
|
+
other peers. `FairQueue` + `SignalingQueue` aggregate per-connection recv
|
|
41
|
+
queues with fair round-robin delivery; `RoundRobin` and `FanOut` mixins
|
|
42
|
+
maintain per-connection send queues with dedicated send pump fibers.
|
|
43
|
+
`PUSH`/`DEALER`/`PAIR` buffer messages in a staging queue when no peers are
|
|
44
|
+
connected yet, draining into the first peer's queue on connect.
|
|
45
|
+
- **`FairQueue`** — new aggregator class (`lib/omq/routing/fair_queue.rb`)
|
|
46
|
+
that fair-queues across per-connection bounded queues. Pending messages from
|
|
47
|
+
a disconnected peer are drained before the queue is discarded.
|
|
48
|
+
- **`Socket.bind` / `Socket.connect` class-method fix** — now pass the
|
|
49
|
+
endpoint via `@`/`>` prefix into the constructor so any post-attach
|
|
50
|
+
initialization in subclasses (e.g. XSUB's `subscribe:` kwarg) runs after
|
|
51
|
+
the connection is established.
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
|
|
7
55
|
- **QoS infrastructure** — `Options#qos` attribute (default 0) and inproc
|
|
8
56
|
command queue support for QoS-enabled connections. The
|
|
9
57
|
[omq-qos](https://github.com/paddor/omq-qos) gem activates delivery
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OMQ
|
|
4
|
+
class Engine
|
|
5
|
+
# Performs ZMTP handshake and registers a new connection.
|
|
6
|
+
#
|
|
7
|
+
class ConnectionSetup
|
|
8
|
+
# @param io [#read, #write, #close] underlying transport stream
|
|
9
|
+
# @param engine [Engine]
|
|
10
|
+
# @param as_server [Boolean]
|
|
11
|
+
# @param endpoint [String, nil]
|
|
12
|
+
# @param done [Async::Promise, nil] resolved when connection is lost
|
|
13
|
+
# @return [Connection]
|
|
14
|
+
#
|
|
15
|
+
def self.run(io, engine, as_server:, endpoint: nil, done: nil)
|
|
16
|
+
conn = build_connection(io, engine, as_server)
|
|
17
|
+
conn.handshake!
|
|
18
|
+
Heartbeat.start(engine.parent_task, conn, engine.options, engine.tasks)
|
|
19
|
+
conn = engine.connection_wrapper.call(conn) if engine.connection_wrapper
|
|
20
|
+
register(conn, engine, endpoint, done)
|
|
21
|
+
engine.emit_monitor_event(:handshake_succeeded, endpoint: endpoint)
|
|
22
|
+
conn
|
|
23
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST => error
|
|
24
|
+
engine.emit_monitor_event(:handshake_failed, endpoint: endpoint, detail: { error: error })
|
|
25
|
+
conn&.close
|
|
26
|
+
raise
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def self.build_connection(io, engine, as_server)
|
|
30
|
+
Protocol::ZMTP::Connection.new(
|
|
31
|
+
io,
|
|
32
|
+
socket_type: engine.socket_type.to_s,
|
|
33
|
+
identity: engine.options.identity,
|
|
34
|
+
as_server: as_server,
|
|
35
|
+
mechanism: engine.options.mechanism&.dup,
|
|
36
|
+
max_message_size: engine.options.max_message_size,
|
|
37
|
+
)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def self.register(conn, engine, endpoint, done)
|
|
41
|
+
engine.connections[conn] = Engine::ConnectionRecord.new(endpoint: endpoint, done: done)
|
|
42
|
+
engine.routing.connection_added(conn)
|
|
43
|
+
engine.peer_connected.resolve(conn)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OMQ
|
|
4
|
+
class Engine
|
|
5
|
+
# Spawns a heartbeat task for a connection.
|
|
6
|
+
#
|
|
7
|
+
# Sends PING frames at +interval+ seconds and closes the connection
|
|
8
|
+
# if no traffic is seen within +timeout+ seconds.
|
|
9
|
+
#
|
|
10
|
+
class Heartbeat
|
|
11
|
+
# @param parent_task [Async::Task]
|
|
12
|
+
# @param conn [Connection]
|
|
13
|
+
# @param options [Options]
|
|
14
|
+
# @param tasks [Array]
|
|
15
|
+
#
|
|
16
|
+
def self.start(parent_task, conn, options, tasks)
|
|
17
|
+
interval = options.heartbeat_interval
|
|
18
|
+
return unless interval
|
|
19
|
+
|
|
20
|
+
ttl = options.heartbeat_ttl || interval
|
|
21
|
+
timeout = options.heartbeat_timeout || interval
|
|
22
|
+
conn.touch_heartbeat
|
|
23
|
+
|
|
24
|
+
tasks << parent_task.async(transient: true, annotation: "heartbeat") do
|
|
25
|
+
loop do
|
|
26
|
+
sleep interval
|
|
27
|
+
conn.send_command(Protocol::ZMTP::Codec::Command.ping(ttl: ttl, context: "".b))
|
|
28
|
+
if conn.heartbeat_expired?(timeout)
|
|
29
|
+
conn.close
|
|
30
|
+
break
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
rescue Async::Stop
|
|
34
|
+
rescue *CONNECTION_LOST
|
|
35
|
+
# connection closed
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OMQ
|
|
4
|
+
class Engine
|
|
5
|
+
# Schedules reconnect attempts with exponential back-off.
|
|
6
|
+
#
|
|
7
|
+
# Runs a background task that loops until a connection is established
|
|
8
|
+
# or the engine is closed.
|
|
9
|
+
#
|
|
10
|
+
class Reconnect
|
|
11
|
+
# @param endpoint [String]
|
|
12
|
+
# @param options [Options]
|
|
13
|
+
# @param parent_task [Async::Task]
|
|
14
|
+
# @param engine [Engine] for transport_for / emit_monitor_event / signal_fatal_error / closed?
|
|
15
|
+
# @param delay [Numeric, nil] initial delay (defaults to reconnect_interval)
|
|
16
|
+
#
|
|
17
|
+
def self.schedule(endpoint, options, parent_task, engine, delay: nil)
|
|
18
|
+
ri = options.reconnect_interval
|
|
19
|
+
delay, max_delay = init_delay(ri, delay)
|
|
20
|
+
|
|
21
|
+
engine.tasks << parent_task.async(transient: true, annotation: "reconnect #{endpoint}") do
|
|
22
|
+
loop do
|
|
23
|
+
break if engine.closed?
|
|
24
|
+
sleep delay if delay > 0
|
|
25
|
+
break if engine.closed?
|
|
26
|
+
begin
|
|
27
|
+
engine.transport_for(endpoint).connect(endpoint, engine)
|
|
28
|
+
break
|
|
29
|
+
rescue *CONNECTION_LOST, *CONNECTION_FAILED, Protocol::ZMTP::Error
|
|
30
|
+
delay = next_delay(delay, max_delay, ri)
|
|
31
|
+
engine.emit_monitor_event(:connect_retried, endpoint: endpoint, detail: { interval: delay })
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
rescue Async::Stop
|
|
35
|
+
rescue => error
|
|
36
|
+
engine.signal_fatal_error(error)
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def self.init_delay(ri, delay)
|
|
41
|
+
if ri.is_a?(Range)
|
|
42
|
+
[delay || ri.begin, ri.end]
|
|
43
|
+
else
|
|
44
|
+
[delay || ri, nil]
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def self.next_delay(delay, max_delay, ri)
|
|
49
|
+
delay = delay * 2
|
|
50
|
+
delay = [delay, max_delay].min if max_delay
|
|
51
|
+
delay = (ri.is_a?(Range) ? ri.begin : ri) if delay == 0
|
|
52
|
+
delay
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OMQ
|
|
4
|
+
class Engine
|
|
5
|
+
# Starts a recv pump for a connection.
|
|
6
|
+
#
|
|
7
|
+
# For inproc DirectPipe: wires the direct recv path (no fiber spawned).
|
|
8
|
+
# For TCP/IPC: spawns a transient task that reads messages from the
|
|
9
|
+
# connection and enqueues them into +recv_queue+.
|
|
10
|
+
#
|
|
11
|
+
# The two-branch structure (with/without transform) is intentional for
|
|
12
|
+
# YJIT: it gives the JIT a monomorphic call per routing strategy instead
|
|
13
|
+
# of a megamorphic `transform.call` dispatch inside a shared loop.
|
|
14
|
+
#
|
|
15
|
+
# @param parent_task [Async::Task]
|
|
16
|
+
# @param conn [Connection, Transport::Inproc::DirectPipe]
|
|
17
|
+
# @param recv_queue [SignalingQueue]
|
|
18
|
+
# @param engine [Engine] for connection_lost / signal_fatal_error callbacks
|
|
19
|
+
# @param transform [Proc, nil]
|
|
20
|
+
# @return [Async::Task, nil]
|
|
21
|
+
#
|
|
22
|
+
class RecvPump
|
|
23
|
+
FAIRNESS_MESSAGES = 64
|
|
24
|
+
FAIRNESS_BYTES = 1 << 20 # 1 MB
|
|
25
|
+
|
|
26
|
+
def self.start(parent_task, conn, recv_queue, engine, transform)
|
|
27
|
+
if conn.is_a?(Transport::Inproc::DirectPipe) && conn.peer
|
|
28
|
+
conn.peer.direct_recv_queue = recv_queue
|
|
29
|
+
conn.peer.direct_recv_transform = transform
|
|
30
|
+
return nil
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
if transform
|
|
34
|
+
parent_task.async(transient: true, annotation: "recv pump") do |task|
|
|
35
|
+
loop do
|
|
36
|
+
count = 0
|
|
37
|
+
bytes = 0
|
|
38
|
+
while count < FAIRNESS_MESSAGES && bytes < FAIRNESS_BYTES
|
|
39
|
+
msg = conn.receive_message
|
|
40
|
+
msg = transform.call(msg).freeze
|
|
41
|
+
recv_queue.enqueue(msg)
|
|
42
|
+
count += 1
|
|
43
|
+
bytes += msg.is_a?(Array) && msg.first.is_a?(String) ? msg.sum(&:bytesize) : 0
|
|
44
|
+
end
|
|
45
|
+
task.yield
|
|
46
|
+
end
|
|
47
|
+
rescue Async::Stop
|
|
48
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
49
|
+
engine.connection_lost(conn)
|
|
50
|
+
rescue => error
|
|
51
|
+
engine.signal_fatal_error(error)
|
|
52
|
+
end
|
|
53
|
+
else
|
|
54
|
+
parent_task.async(transient: true, annotation: "recv pump") do |task|
|
|
55
|
+
loop do
|
|
56
|
+
count = 0
|
|
57
|
+
bytes = 0
|
|
58
|
+
while count < FAIRNESS_MESSAGES && bytes < FAIRNESS_BYTES
|
|
59
|
+
msg = conn.receive_message
|
|
60
|
+
recv_queue.enqueue(msg)
|
|
61
|
+
count += 1
|
|
62
|
+
bytes += msg.is_a?(Array) && msg.first.is_a?(String) ? msg.sum(&:bytesize) : 0
|
|
63
|
+
end
|
|
64
|
+
task.yield
|
|
65
|
+
end
|
|
66
|
+
rescue Async::Stop
|
|
67
|
+
rescue Protocol::ZMTP::Error, *CONNECTION_LOST
|
|
68
|
+
engine.connection_lost(conn)
|
|
69
|
+
rescue => error
|
|
70
|
+
engine.signal_fatal_error(error)
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
end
|