nnq 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +27 -0
- data/LICENSE +13 -0
- data/README.md +57 -0
- data/lib/nnq/connection.rb +90 -0
- data/lib/nnq/engine/connection_lifecycle.rb +132 -0
- data/lib/nnq/engine/socket_lifecycle.rb +88 -0
- data/lib/nnq/engine.rb +198 -0
- data/lib/nnq/error.rb +10 -0
- data/lib/nnq/options.rb +31 -0
- data/lib/nnq/pair.rb +33 -0
- data/lib/nnq/pub_sub.rb +72 -0
- data/lib/nnq/push_pull.rb +52 -0
- data/lib/nnq/reactor.rb +83 -0
- data/lib/nnq/req_rep.rb +60 -0
- data/lib/nnq/routing/pair.rb +72 -0
- data/lib/nnq/routing/pub.rb +89 -0
- data/lib/nnq/routing/pull.rb +38 -0
- data/lib/nnq/routing/push.rb +43 -0
- data/lib/nnq/routing/rep.rb +113 -0
- data/lib/nnq/routing/req.rb +107 -0
- data/lib/nnq/routing/send_pump.rb +129 -0
- data/lib/nnq/routing/sub.rb +58 -0
- data/lib/nnq/socket.rb +86 -0
- data/lib/nnq/transport/inproc.rb +94 -0
- data/lib/nnq/transport/ipc.rb +97 -0
- data/lib/nnq/transport/tcp.rb +90 -0
- data/lib/nnq/version.rb +5 -0
- data/lib/nnq.rb +17 -0
- metadata +112 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
require "securerandom"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Routing
|
|
8
|
+
# REQ: client side of req0/rep0.
|
|
9
|
+
#
|
|
10
|
+
# Wire format: each message body on the wire is `[4-byte BE
|
|
11
|
+
# request_id][user_payload]`. The request id has the high bit set
|
|
12
|
+
# (`0x80000000..0xFFFFFFFF`) — that's nng's marker for "this is the
|
|
13
|
+
# last (deepest) frame on the backtrace stack". Direct REQ→REP has
|
|
14
|
+
# exactly one id.
|
|
15
|
+
#
|
|
16
|
+
# Semantics (cooked mode, what this implements):
|
|
17
|
+
# - At most one in-flight request per socket. Issuing a new
|
|
18
|
+
# send_request while a previous one is still waiting for its
|
|
19
|
+
# reply cancels the previous one: the blocked caller wakes up
|
|
20
|
+
# with a {NNQ::RequestCancelled} error and the late reply (if
|
|
21
|
+
# any) is silently dropped. This matches nng cooked req0, where
|
|
22
|
+
# a new nng_sendmsg abandons the prior request.
|
|
23
|
+
# - Reply is matched by id, NOT by pipe. Late or unmatched replies
|
|
24
|
+
# are silently dropped.
|
|
25
|
+
# - Round-robin peer selection, but no retry timer (real nng resends
|
|
26
|
+
# on a timer; we leave that to the user via timeouts).
|
|
27
|
+
# - Blocks waiting for a peer if no connection is currently up.
|
|
28
|
+
#
|
|
29
|
+
class Req
|
|
30
|
+
def initialize(engine)
|
|
31
|
+
@engine = engine
|
|
32
|
+
@next_idx = 0
|
|
33
|
+
@mutex = Mutex.new
|
|
34
|
+
@outstanding = nil # [id, promise] or nil
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Sends +body+ as a request, blocks until the matching reply
|
|
39
|
+
# comes back. Returns the reply payload (without the id header).
|
|
40
|
+
#
|
|
41
|
+
# If another fiber issues a send_request while this call is
|
|
42
|
+
# waiting, this call raises {NNQ::RequestCancelled}.
|
|
43
|
+
#
|
|
44
|
+
# @param body [String]
|
|
45
|
+
# @return [String]
|
|
46
|
+
def send_request(body)
|
|
47
|
+
id = SecureRandom.random_number(0x80000000) | 0x80000000
|
|
48
|
+
promise = Async::Promise.new
|
|
49
|
+
|
|
50
|
+
@mutex.synchronize do
|
|
51
|
+
# Cancel any in-flight request — new send supersedes it.
|
|
52
|
+
@outstanding&.last&.reject(RequestCancelled.new("cancelled by new send_request"))
|
|
53
|
+
@outstanding = [id, promise]
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
conn = pick_peer
|
|
57
|
+
header = [id].pack("N")
|
|
58
|
+
conn.send_message(header + body)
|
|
59
|
+
promise.wait
|
|
60
|
+
ensure
|
|
61
|
+
@mutex.synchronize do
|
|
62
|
+
# Only clear the slot if it's still ours. If a concurrent
|
|
63
|
+
# send_request already replaced it, leave the new entry alone.
|
|
64
|
+
@outstanding = nil if @outstanding && @outstanding[0] == id
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# Called by the engine recv loop with each received frame.
|
|
70
|
+
def enqueue(body, _conn)
|
|
71
|
+
return if body.bytesize < 4
|
|
72
|
+
id = body.unpack1("N")
|
|
73
|
+
payload = body.byteslice(4..)
|
|
74
|
+
|
|
75
|
+
@mutex.synchronize do
|
|
76
|
+
if @outstanding && @outstanding[0] == id
|
|
77
|
+
@outstanding[1].resolve(payload)
|
|
78
|
+
end
|
|
79
|
+
# Mismatched id → late/spurious reply, silently dropped.
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def close
|
|
85
|
+
@mutex.synchronize do
|
|
86
|
+
@outstanding&.last&.reject(NNQ::Error.new("REQ socket closed"))
|
|
87
|
+
@outstanding = nil
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
private
|
|
93
|
+
|
|
94
|
+
def pick_peer
|
|
95
|
+
loop do
|
|
96
|
+
conns = @engine.connections.keys
|
|
97
|
+
if conns.empty?
|
|
98
|
+
@engine.new_pipe.wait
|
|
99
|
+
next
|
|
100
|
+
end
|
|
101
|
+
@next_idx = (@next_idx + 1) % conns.size
|
|
102
|
+
return conns[@next_idx]
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
require "async/limited_queue"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Routing
|
|
8
|
+
# Mixin for routing strategies that drain a shared bounded send queue
|
|
9
|
+
# via per-connection work-stealing pumps. Used by PUSH (load-balance
|
|
10
|
+
# across N peers) and PAIR (single peer, but the same pump shape).
|
|
11
|
+
#
|
|
12
|
+
# See DESIGN.md "Per-socket HWM" for the rationale.
|
|
13
|
+
#
|
|
14
|
+
# The per-pump batch caps (BATCH_MSG_CAP / BATCH_BYTE_CAP) enforce
|
|
15
|
+
# fairness across the work-stealing pumps. Without them, the first
|
|
16
|
+
# pump that wakes up would drain the entire queue in one non-blocking
|
|
17
|
+
# burst before any other pump got a turn (TCP send buffers absorb
|
|
18
|
+
# bursts without forcing a fiber yield).
|
|
19
|
+
#
|
|
20
|
+
# Including classes must call {#init_send_pump} from #initialize and
|
|
21
|
+
# {#spawn_send_pump_for} from their #connection_added hook.
|
|
22
|
+
#
|
|
23
|
+
module SendPump
|
|
24
|
+
BATCH_MSG_CAP = 256
|
|
25
|
+
BATCH_BYTE_CAP = 256 * 1024
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# @return [Boolean] true once the shared queue is empty AND no
|
|
29
|
+
# batch is mid-write across any pump.
|
|
30
|
+
def send_queue_drained?
|
|
31
|
+
@send_queue.empty? && @in_flight.zero?
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# Removes a pump and stops its task (unless called from inside
|
|
36
|
+
# the pump itself, in which case the pump is already on its way
|
|
37
|
+
# out via the rescue/ensure path).
|
|
38
|
+
def remove_send_pump_for(conn)
|
|
39
|
+
task = @pumps.delete(conn)
|
|
40
|
+
return if task.nil? || task == Async::Task.current
|
|
41
|
+
task.stop
|
|
42
|
+
rescue IOError, Errno::EPIPE
|
|
43
|
+
# Pump was mid-flush when its conn was closed; cancel surfaced
|
|
44
|
+
# the same IOError. Already handled — pump is gone.
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# Stops all send pump tasks. Each pump's ensure block calls
|
|
49
|
+
# engine.handle_connection_lost → routing.connection_removed
|
|
50
|
+
# which removes its own entry, so iterate over a snapshot.
|
|
51
|
+
def close
|
|
52
|
+
@pumps.values.each(&:stop)
|
|
53
|
+
@pumps.clear
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
private
|
|
58
|
+
|
|
59
|
+
# @param engine [Engine]
|
|
60
|
+
def init_send_pump(engine)
|
|
61
|
+
@engine = engine
|
|
62
|
+
@send_queue = Async::LimitedQueue.new(engine.options.send_hwm)
|
|
63
|
+
@pumps = {} # conn => pump task
|
|
64
|
+
@in_flight = 0 # batches dequeued but not yet flushed
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# Enqueues +body+ on the shared send queue. Blocks the caller when
|
|
69
|
+
# the queue is full (HWM backpressure).
|
|
70
|
+
#
|
|
71
|
+
# @param body [String]
|
|
72
|
+
def enqueue_for_send(body)
|
|
73
|
+
@send_queue.enqueue(body)
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Spawns a send pump fiber for +conn+ that races to drain the
|
|
78
|
+
# shared queue.
|
|
79
|
+
#
|
|
80
|
+
# @param conn [Connection]
|
|
81
|
+
def spawn_send_pump_for(conn)
|
|
82
|
+
task = @engine.spawn_task(annotation: "nnq send pump #{conn.endpoint}") do
|
|
83
|
+
loop do
|
|
84
|
+
first = @send_queue.dequeue
|
|
85
|
+
break if first.nil? # queue closed
|
|
86
|
+
@in_flight += 1
|
|
87
|
+
begin
|
|
88
|
+
batch = [first]
|
|
89
|
+
drain_capped(batch)
|
|
90
|
+
write_batch(conn, batch)
|
|
91
|
+
ensure
|
|
92
|
+
@in_flight -= 1
|
|
93
|
+
end
|
|
94
|
+
rescue EOFError, IOError, Errno::EPIPE, Errno::ECONNRESET
|
|
95
|
+
# Peer died mid-flush. In-flight batch dropped.
|
|
96
|
+
break
|
|
97
|
+
end
|
|
98
|
+
ensure
|
|
99
|
+
@engine.handle_connection_lost(conn)
|
|
100
|
+
end
|
|
101
|
+
@pumps[conn] = task
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def drain_capped(batch)
|
|
106
|
+
bytes = batch[0].bytesize
|
|
107
|
+
while batch.size < BATCH_MSG_CAP && bytes < BATCH_BYTE_CAP
|
|
108
|
+
msg = @send_queue.dequeue(timeout: 0)
|
|
109
|
+
break unless msg
|
|
110
|
+
batch << msg
|
|
111
|
+
bytes += msg.bytesize
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def write_batch(conn, batch)
|
|
117
|
+
if batch.size == 1
|
|
118
|
+
conn.write_message(batch[0])
|
|
119
|
+
else
|
|
120
|
+
# Single mutex acquisition for the whole batch (batches run
|
|
121
|
+
# up to BATCH_MSG_CAP messages). The per-message pump loop
|
|
122
|
+
# would otherwise lock/unlock the SP mutex N times.
|
|
123
|
+
conn.write_messages(batch)
|
|
124
|
+
end
|
|
125
|
+
conn.flush
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
end
|
|
129
|
+
end
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async/queue"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
module Routing
|
|
7
|
+
# SUB side of the pub/sub pattern (nng sub0).
|
|
8
|
+
#
|
|
9
|
+
# All filtering happens locally — pub0 broadcasts blindly and sub0
|
|
10
|
+
# drops messages that don't match any subscription prefix. An empty
|
|
11
|
+
# subscription set means receive nothing (matching nng — unlike
|
|
12
|
+
# ZeroMQ's pre-4.x "no subscription = receive everything").
|
|
13
|
+
#
|
|
14
|
+
# Subscriptions are byte-prefix matches. A subscription to the
|
|
15
|
+
# empty string matches every message.
|
|
16
|
+
#
|
|
17
|
+
class Sub
|
|
18
|
+
def initialize
|
|
19
|
+
@queue = Async::Queue.new
|
|
20
|
+
@subscriptions = [] # array of byte strings
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def subscribe(prefix)
|
|
25
|
+
prefix = prefix.b
|
|
26
|
+
@subscriptions << prefix unless @subscriptions.include?(prefix)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def unsubscribe(prefix)
|
|
31
|
+
@subscriptions.delete(prefix.b)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def enqueue(body, _conn = nil)
|
|
36
|
+
return unless matches?(body)
|
|
37
|
+
@queue.enqueue(body)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# @return [String, nil]
|
|
42
|
+
def receive
|
|
43
|
+
@queue.dequeue
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def close
|
|
48
|
+
@queue.enqueue(nil)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
private
|
|
52
|
+
|
|
53
|
+
def matches?(body)
|
|
54
|
+
@subscriptions.any? { |prefix| body.start_with?(prefix) }
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
data/lib/nnq/socket.rb
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "options"
|
|
4
|
+
require_relative "engine"
|
|
5
|
+
require_relative "reactor"
|
|
6
|
+
|
|
7
|
+
module NNQ
|
|
8
|
+
# Socket base class. Subclasses (PUSH, PULL, ...) wire up a routing
|
|
9
|
+
# strategy and the SP protocol id.
|
|
10
|
+
#
|
|
11
|
+
class Socket
|
|
12
|
+
# @return [Options]
|
|
13
|
+
attr_reader :options
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def self.bind(endpoint, **opts)
|
|
17
|
+
sock = new(**opts)
|
|
18
|
+
sock.bind(endpoint)
|
|
19
|
+
sock
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def self.connect(endpoint, **opts)
|
|
24
|
+
sock = new(**opts)
|
|
25
|
+
sock.connect(endpoint)
|
|
26
|
+
sock
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def initialize(linger: nil, send_hwm: Options::DEFAULT_HWM)
|
|
31
|
+
@options = Options.new(linger: linger, send_hwm: send_hwm)
|
|
32
|
+
@engine = Engine.new(protocol: protocol, options: @options) { |engine| build_routing(engine) }
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def bind(endpoint)
|
|
37
|
+
ensure_parent_task
|
|
38
|
+
Reactor.run { @engine.bind(endpoint) }
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def connect(endpoint)
|
|
43
|
+
ensure_parent_task
|
|
44
|
+
Reactor.run { @engine.connect(endpoint) }
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def close
|
|
49
|
+
Reactor.run { @engine.close }
|
|
50
|
+
nil
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def last_endpoint = @engine.last_endpoint
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def connection_count = @engine.connections.size
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
private
|
|
61
|
+
|
|
62
|
+
def ensure_parent_task
|
|
63
|
+
# Must run OUTSIDE Reactor.run so that non-Async callers capture
|
|
64
|
+
# the IO thread's root task, not the ephemeral work-item task
|
|
65
|
+
# that Reactor wraps each dispatched block in. Inside an Async
|
|
66
|
+
# reactor, the current task is the right parent.
|
|
67
|
+
if Async::Task.current?
|
|
68
|
+
@engine.capture_parent_task(Async::Task.current)
|
|
69
|
+
else
|
|
70
|
+
@engine.capture_parent_task(Reactor.root_task)
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# Subclass hooks.
|
|
76
|
+
|
|
77
|
+
def protocol
|
|
78
|
+
raise NotImplementedError
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def build_routing(_engine)
|
|
83
|
+
raise NotImplementedError
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
require "io/stream"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Transport
|
|
8
|
+
# In-process transport. Both peers live in the same process and
|
|
9
|
+
# exchange frames over a Unix socketpair — no network, no address.
|
|
10
|
+
#
|
|
11
|
+
# Unlike omq's DirectPipe, inproc here still runs through
|
|
12
|
+
# Protocol::SP: the socketpair just replaces TCP. Kernel buffering
|
|
13
|
+
# across the pair is plenty to avoid contention for typical
|
|
14
|
+
# in-process message sizes, and reusing the SP handshake + framing
|
|
15
|
+
# keeps the transport ~40 LOC instead of a parallel Connection
|
|
16
|
+
# implementation.
|
|
17
|
+
#
|
|
18
|
+
module Inproc
|
|
19
|
+
@registry = {}
|
|
20
|
+
@mutex = Mutex.new
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class << self
|
|
24
|
+
# Binds +engine+ to +endpoint+ in the process-global registry.
|
|
25
|
+
#
|
|
26
|
+
# @param endpoint [String] e.g. "inproc://my-endpoint"
|
|
27
|
+
# @param engine [Engine]
|
|
28
|
+
# @return [Listener]
|
|
29
|
+
def bind(endpoint, engine)
|
|
30
|
+
@mutex.synchronize do
|
|
31
|
+
raise Error, "inproc endpoint already bound: #{endpoint}" if @registry.key?(endpoint)
|
|
32
|
+
@registry[endpoint] = engine
|
|
33
|
+
end
|
|
34
|
+
Listener.new(endpoint)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Connects +engine+ to a bound inproc endpoint. Creates a Unix
|
|
39
|
+
# socketpair, hands one side to the bound engine (accepted),
|
|
40
|
+
# the other to the connecting engine (connected). Both sides
|
|
41
|
+
# run the normal SP handshake concurrently.
|
|
42
|
+
#
|
|
43
|
+
# @param endpoint [String]
|
|
44
|
+
# @param engine [Engine]
|
|
45
|
+
# @return [void]
|
|
46
|
+
def connect(endpoint, engine)
|
|
47
|
+
bound = @mutex.synchronize { @registry[endpoint] }
|
|
48
|
+
raise Error, "inproc endpoint not bound: #{endpoint}" unless bound
|
|
49
|
+
a, b = UNIXSocket.pair
|
|
50
|
+
# Handshake on the bound side must run concurrently with
|
|
51
|
+
# ours — if we called bound.handle_accepted synchronously
|
|
52
|
+
# it would block on reading our greeting before we've had
|
|
53
|
+
# a chance to write it.
|
|
54
|
+
bound.spawn_task(annotation: "nnq inproc accept #{endpoint}") do
|
|
55
|
+
bound.handle_accepted(IO::Stream::Buffered.wrap(b), endpoint: endpoint)
|
|
56
|
+
end
|
|
57
|
+
engine.handle_connected(IO::Stream::Buffered.wrap(a), endpoint: endpoint)
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Removes +endpoint+ from the registry. Called by Listener#stop.
|
|
62
|
+
def unbind(endpoint)
|
|
63
|
+
@mutex.synchronize { @registry.delete(endpoint) }
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# Clears the registry. For tests.
|
|
68
|
+
def reset!
|
|
69
|
+
@mutex.synchronize { @registry.clear }
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# A bound inproc endpoint. Owns no fibers — just a registry entry.
|
|
75
|
+
class Listener
|
|
76
|
+
attr_reader :endpoint
|
|
77
|
+
|
|
78
|
+
def initialize(endpoint)
|
|
79
|
+
@endpoint = endpoint
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# No accept loop: inproc connects synchronously.
|
|
84
|
+
def start_accept_loop(_parent_task, &_on_accepted)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def stop
|
|
89
|
+
Inproc.unbind(@endpoint)
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
end
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
require "io/stream"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Transport
|
|
8
|
+
# IPC transport using Unix domain sockets.
|
|
9
|
+
#
|
|
10
|
+
# Supports both file-based paths and Linux abstract namespace
|
|
11
|
+
# (paths starting with @). Wire format is identical to TCP: SP/TCP
|
|
12
|
+
# greeting followed by framed messages, so Protocol::SP handles it
|
|
13
|
+
# verbatim.
|
|
14
|
+
#
|
|
15
|
+
module IPC
|
|
16
|
+
class << self
|
|
17
|
+
# Binds an IPC server.
|
|
18
|
+
#
|
|
19
|
+
# @param endpoint [String] e.g. "ipc:///tmp/nnq.sock" or "ipc://@abstract"
|
|
20
|
+
# @param engine [Engine]
|
|
21
|
+
# @return [Listener]
|
|
22
|
+
def bind(endpoint, engine)
|
|
23
|
+
path = parse_path(endpoint)
|
|
24
|
+
sock_path = to_socket_path(path)
|
|
25
|
+
File.delete(sock_path) if !abstract?(path) && File.exist?(sock_path)
|
|
26
|
+
server = UNIXServer.new(sock_path)
|
|
27
|
+
Listener.new(endpoint, server, path, engine)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Connects to an IPC endpoint.
|
|
32
|
+
#
|
|
33
|
+
# @param endpoint [String]
|
|
34
|
+
# @param engine [Engine]
|
|
35
|
+
# @return [void]
|
|
36
|
+
def connect(endpoint, engine)
|
|
37
|
+
path = parse_path(endpoint)
|
|
38
|
+
sock_path = to_socket_path(path)
|
|
39
|
+
sock = UNIXSocket.new(sock_path)
|
|
40
|
+
engine.handle_connected(IO::Stream::Buffered.wrap(sock), endpoint: endpoint, framing: :ipc)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def parse_path(endpoint)
|
|
45
|
+
endpoint.sub(%r{\Aipc://}, "")
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# Converts @ prefix to \0 for Linux abstract namespace.
|
|
50
|
+
def to_socket_path(path)
|
|
51
|
+
abstract?(path) ? "\0#{path[1..]}" : path
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def abstract?(path)
|
|
56
|
+
path.start_with?("@")
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# A bound IPC listener.
|
|
62
|
+
class Listener
|
|
63
|
+
attr_reader :endpoint
|
|
64
|
+
|
|
65
|
+
def initialize(endpoint, server, path, engine)
|
|
66
|
+
@endpoint = endpoint
|
|
67
|
+
@server = server
|
|
68
|
+
@path = path
|
|
69
|
+
@engine = engine
|
|
70
|
+
@task = nil
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def start_accept_loop(parent_task, &on_accepted)
|
|
75
|
+
@task = parent_task.async(annotation: "nnq ipc accept #{@endpoint}") do
|
|
76
|
+
loop do
|
|
77
|
+
client = @server.accept
|
|
78
|
+
# Engine's per-listener block closes over +framing+ below.
|
|
79
|
+
on_accepted.call(IO::Stream::Buffered.wrap(client), :ipc)
|
|
80
|
+
rescue Async::Stop, IOError
|
|
81
|
+
break
|
|
82
|
+
end
|
|
83
|
+
ensure
|
|
84
|
+
@server.close rescue nil
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def stop
|
|
90
|
+
@task&.stop
|
|
91
|
+
@server.close rescue nil
|
|
92
|
+
File.delete(@path) rescue nil unless IPC.abstract?(@path)
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "socket"
|
|
4
|
+
require "uri"
|
|
5
|
+
require "io/stream"
|
|
6
|
+
|
|
7
|
+
module NNQ
|
|
8
|
+
module Transport
|
|
9
|
+
# TCP transport. Smaller than omq's: no IPv6 dual-bind dance, no
|
|
10
|
+
# custom buffer-size sockopts (yet). One server per bind, blocking
|
|
11
|
+
# accept inside an Async fiber.
|
|
12
|
+
#
|
|
13
|
+
module TCP
|
|
14
|
+
class << self
|
|
15
|
+
# Binds a TCP server to +endpoint+.
|
|
16
|
+
#
|
|
17
|
+
# @param endpoint [String] e.g. "tcp://127.0.0.1:5570" or "tcp://127.0.0.1:0"
|
|
18
|
+
# @param engine [Engine]
|
|
19
|
+
# @return [Listener]
|
|
20
|
+
def bind(endpoint, engine)
|
|
21
|
+
host, port = parse_endpoint(endpoint)
|
|
22
|
+
host = "0.0.0.0" if host == "*"
|
|
23
|
+
server = TCPServer.new(host, port)
|
|
24
|
+
actual = server.local_address.ip_port
|
|
25
|
+
host_part = host.include?(":") ? "[#{host}]" : host
|
|
26
|
+
Listener.new("tcp://#{host_part}:#{actual}", server, actual, engine)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Connects to +endpoint+ and registers the resulting pipe with
|
|
31
|
+
# the engine. Synchronous (errors propagate to the caller).
|
|
32
|
+
#
|
|
33
|
+
# @param endpoint [String]
|
|
34
|
+
# @param engine [Engine]
|
|
35
|
+
# @return [void]
|
|
36
|
+
def connect(endpoint, engine)
|
|
37
|
+
host, port = parse_endpoint(endpoint)
|
|
38
|
+
sock = TCPSocket.new(host, port)
|
|
39
|
+
engine.handle_connected(IO::Stream::Buffered.wrap(sock), endpoint: endpoint)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def parse_endpoint(endpoint)
|
|
44
|
+
uri = URI.parse(endpoint)
|
|
45
|
+
[uri.hostname, uri.port]
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# A bound TCP listener.
|
|
51
|
+
#
|
|
52
|
+
class Listener
|
|
53
|
+
attr_reader :endpoint
|
|
54
|
+
attr_reader :port
|
|
55
|
+
|
|
56
|
+
def initialize(endpoint, server, port, engine)
|
|
57
|
+
@endpoint = endpoint
|
|
58
|
+
@server = server
|
|
59
|
+
@port = port
|
|
60
|
+
@engine = engine
|
|
61
|
+
@task = nil
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# Spawns an accept loop fiber under +parent_task+ that yields
|
|
66
|
+
# IO::Stream::Buffered for each accepted connection.
|
|
67
|
+
def start_accept_loop(parent_task, &on_accepted)
|
|
68
|
+
@task = parent_task.async(annotation: "nnq tcp accept #{@endpoint}") do
|
|
69
|
+
loop do
|
|
70
|
+
client = @server.accept
|
|
71
|
+
on_accepted.call(IO::Stream::Buffered.wrap(client))
|
|
72
|
+
rescue Async::Stop
|
|
73
|
+
break
|
|
74
|
+
rescue IOError
|
|
75
|
+
break
|
|
76
|
+
end
|
|
77
|
+
ensure
|
|
78
|
+
@server.close rescue nil
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def stop
|
|
84
|
+
@task&.stop
|
|
85
|
+
@server.close rescue nil
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
data/lib/nnq/version.rb
ADDED
data/lib/nnq.rb
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "protocol/sp"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
end
|
|
7
|
+
|
|
8
|
+
require_relative "nnq/version"
|
|
9
|
+
require_relative "nnq/error"
|
|
10
|
+
require_relative "nnq/options"
|
|
11
|
+
require_relative "nnq/connection"
|
|
12
|
+
require_relative "nnq/engine"
|
|
13
|
+
require_relative "nnq/socket"
|
|
14
|
+
require_relative "nnq/push_pull"
|
|
15
|
+
require_relative "nnq/pair"
|
|
16
|
+
require_relative "nnq/req_rep"
|
|
17
|
+
require_relative "nnq/pub_sub"
|