nnq 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +27 -0
- data/LICENSE +13 -0
- data/README.md +57 -0
- data/lib/nnq/connection.rb +90 -0
- data/lib/nnq/engine/connection_lifecycle.rb +132 -0
- data/lib/nnq/engine/socket_lifecycle.rb +88 -0
- data/lib/nnq/engine.rb +198 -0
- data/lib/nnq/error.rb +10 -0
- data/lib/nnq/options.rb +31 -0
- data/lib/nnq/pair.rb +33 -0
- data/lib/nnq/pub_sub.rb +72 -0
- data/lib/nnq/push_pull.rb +52 -0
- data/lib/nnq/reactor.rb +83 -0
- data/lib/nnq/req_rep.rb +60 -0
- data/lib/nnq/routing/pair.rb +72 -0
- data/lib/nnq/routing/pub.rb +89 -0
- data/lib/nnq/routing/pull.rb +38 -0
- data/lib/nnq/routing/push.rb +43 -0
- data/lib/nnq/routing/rep.rb +113 -0
- data/lib/nnq/routing/req.rb +107 -0
- data/lib/nnq/routing/send_pump.rb +129 -0
- data/lib/nnq/routing/sub.rb +58 -0
- data/lib/nnq/socket.rb +86 -0
- data/lib/nnq/transport/inproc.rb +94 -0
- data/lib/nnq/transport/ipc.rb +97 -0
- data/lib/nnq/transport/tcp.rb +90 -0
- data/lib/nnq/version.rb +5 -0
- data/lib/nnq.rb +17 -0
- metadata +112 -0
data/lib/nnq/pub_sub.rb
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "socket"
|
|
4
|
+
require_relative "routing/pub"
|
|
5
|
+
require_relative "routing/sub"
|
|
6
|
+
|
|
7
|
+
module NNQ
|
|
8
|
+
# PUB side of the pub/sub pattern (nng pub0). Broadcasts every
|
|
9
|
+
# message to every connected SUB. Per-peer bounded send queues —
|
|
10
|
+
# a slow peer drops messages instead of blocking fast peers.
|
|
11
|
+
# Defaults to listening.
|
|
12
|
+
#
|
|
13
|
+
class PUB < Socket
|
|
14
|
+
def send(body)
|
|
15
|
+
Reactor.run { @engine.routing.send(body) }
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
private
|
|
20
|
+
|
|
21
|
+
def protocol
|
|
22
|
+
Protocol::SP::Protocols::PUB_V0
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def build_routing(engine)
|
|
27
|
+
Routing::Pub.new(engine)
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# SUB side of the pub/sub pattern (nng sub0). Applies local
|
|
33
|
+
# byte-prefix filtering. Empty subscription set means no messages
|
|
34
|
+
# are delivered — matching nng (unlike pre-4.x ZeroMQ). Defaults
|
|
35
|
+
# to dialing.
|
|
36
|
+
#
|
|
37
|
+
class SUB < Socket
|
|
38
|
+
# Subscribes to +prefix+. Bytes-level match. The empty string
|
|
39
|
+
# matches everything.
|
|
40
|
+
#
|
|
41
|
+
# @param prefix [String]
|
|
42
|
+
def subscribe(prefix)
|
|
43
|
+
Reactor.run { @engine.routing.subscribe(prefix) }
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Removes a previously-added subscription. No-op if not present.
|
|
48
|
+
#
|
|
49
|
+
# @param prefix [String]
|
|
50
|
+
def unsubscribe(prefix)
|
|
51
|
+
Reactor.run { @engine.routing.unsubscribe(prefix) }
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# @return [String, nil]
|
|
56
|
+
def receive
|
|
57
|
+
Reactor.run { @engine.routing.receive }
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
private
|
|
62
|
+
|
|
63
|
+
def protocol
|
|
64
|
+
Protocol::SP::Protocols::SUB_V0
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def build_routing(_engine)
|
|
69
|
+
Routing::Sub.new
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "socket"
|
|
4
|
+
require_relative "routing/push"
|
|
5
|
+
require_relative "routing/pull"
|
|
6
|
+
|
|
7
|
+
module NNQ
|
|
8
|
+
# PUSH side of the pipeline pattern (nng push0). Enqueues onto a single
|
|
9
|
+
# bounded send queue (`send_hwm`); per-peer send pumps work-steal from
|
|
10
|
+
# it. Defaults to dialing.
|
|
11
|
+
#
|
|
12
|
+
class PUSH < Socket
|
|
13
|
+
def send(body)
|
|
14
|
+
Reactor.run { @engine.routing.send(body) }
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
private
|
|
19
|
+
|
|
20
|
+
def protocol
|
|
21
|
+
Protocol::SP::Protocols::PUSH_V0
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def build_routing(engine)
|
|
26
|
+
Routing::Push.new(engine)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# PULL side of the pipeline pattern (nng pull0). Fair-queues messages
|
|
32
|
+
# from all live PUSH peers into one unbounded receive queue. Defaults
|
|
33
|
+
# to listening.
|
|
34
|
+
#
|
|
35
|
+
class PULL < Socket
|
|
36
|
+
def receive
|
|
37
|
+
Reactor.run { @engine.routing.receive }
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
private
|
|
42
|
+
|
|
43
|
+
def protocol
|
|
44
|
+
Protocol::SP::Protocols::PULL_V0
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def build_routing(_engine)
|
|
49
|
+
Routing::Pull.new
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
end
|
data/lib/nnq/reactor.rb
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
# Per-process fallback IO thread for non-Async callers.
|
|
7
|
+
#
|
|
8
|
+
# When user code already runs inside an Async reactor, NNQ tasks attach
|
|
9
|
+
# directly to the caller's task tree. When the caller is bare (e.g. a
|
|
10
|
+
# plain `Thread.new` or the main thread of a script), NNQ::Reactor lazily
|
|
11
|
+
# spawns one shared background thread that hosts an Async reactor and
|
|
12
|
+
# processes work items dispatched via {.run}.
|
|
13
|
+
#
|
|
14
|
+
# This is *not* an Async scheduler — it is a fallback thread for an
|
|
15
|
+
# Async reactor. NNQ and OMQ each have their own private fallback for
|
|
16
|
+
# bare-thread callers; both can coexist with the user's own reactor with
|
|
17
|
+
# no extraction or sharing required.
|
|
18
|
+
#
|
|
19
|
+
module Reactor
|
|
20
|
+
@mutex = Mutex.new
|
|
21
|
+
@thread = nil
|
|
22
|
+
@root_task = nil
|
|
23
|
+
@work_queue = nil
|
|
24
|
+
|
|
25
|
+
class << self
|
|
26
|
+
def root_task
|
|
27
|
+
return @root_task if @root_task
|
|
28
|
+
@mutex.synchronize do
|
|
29
|
+
return @root_task if @root_task
|
|
30
|
+
ready = Thread::Queue.new
|
|
31
|
+
@work_queue = Async::Queue.new
|
|
32
|
+
@thread = Thread.new { run_reactor(ready) }
|
|
33
|
+
@thread.name = "nnq-io"
|
|
34
|
+
@root_task = ready.pop
|
|
35
|
+
at_exit { stop! }
|
|
36
|
+
end
|
|
37
|
+
@root_task
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def run(&block)
|
|
42
|
+
if Async::Task.current?
|
|
43
|
+
yield
|
|
44
|
+
else
|
|
45
|
+
result = Thread::Queue.new
|
|
46
|
+
root_task # ensure started
|
|
47
|
+
@work_queue.push([block, result])
|
|
48
|
+
status, value = result.pop
|
|
49
|
+
raise value if status == :error
|
|
50
|
+
value
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def stop!
|
|
56
|
+
return unless @thread&.alive?
|
|
57
|
+
@work_queue&.push(nil)
|
|
58
|
+
@thread&.join(2)
|
|
59
|
+
@thread = nil
|
|
60
|
+
@root_task = nil
|
|
61
|
+
@work_queue = nil
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
private
|
|
65
|
+
|
|
66
|
+
def run_reactor(ready)
|
|
67
|
+
Async do |task|
|
|
68
|
+
ready.push(task)
|
|
69
|
+
loop do
|
|
70
|
+
item = @work_queue.dequeue
|
|
71
|
+
break if item.nil?
|
|
72
|
+
block, result = item
|
|
73
|
+
task.async do
|
|
74
|
+
result.push([:ok, block.call])
|
|
75
|
+
rescue => e
|
|
76
|
+
result.push([:error, e])
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
data/lib/nnq/req_rep.rb
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "socket"
|
|
4
|
+
require_relative "routing/req"
|
|
5
|
+
require_relative "routing/rep"
|
|
6
|
+
|
|
7
|
+
module NNQ
|
|
8
|
+
# REQ (nng req0): client side of request/reply. Single in-flight
|
|
9
|
+
# request per socket. #send_request blocks until the matching reply
|
|
10
|
+
# comes back.
|
|
11
|
+
#
|
|
12
|
+
class REQ < Socket
|
|
13
|
+
# Sends +body+ as a request, blocks until the matching reply
|
|
14
|
+
# arrives. Returns the reply body (without the id header).
|
|
15
|
+
def send_request(body)
|
|
16
|
+
Reactor.run { @engine.routing.send_request(body) }
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
private
|
|
21
|
+
|
|
22
|
+
def protocol
|
|
23
|
+
Protocol::SP::Protocols::REQ_V0
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def build_routing(engine)
|
|
28
|
+
Routing::Req.new(engine)
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# REP (nng rep0): server side of request/reply. Strict alternation
|
|
34
|
+
# of #receive then #send_reply, per request.
|
|
35
|
+
#
|
|
36
|
+
class REP < Socket
|
|
37
|
+
# Blocks until the next request arrives. Returns the request body.
|
|
38
|
+
def receive
|
|
39
|
+
Reactor.run { @engine.routing.receive }
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Routes +body+ back to the pipe the most recent #receive came from.
|
|
44
|
+
def send_reply(body)
|
|
45
|
+
Reactor.run { @engine.routing.send_reply(body) }
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
private
|
|
50
|
+
|
|
51
|
+
def protocol
|
|
52
|
+
Protocol::SP::Protocols::REP_V0
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def build_routing(engine)
|
|
57
|
+
Routing::Rep.new(engine)
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async/queue"
|
|
4
|
+
require_relative "send_pump"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Routing
|
|
8
|
+
# PAIR0: exclusive bidirectional channel with a single peer.
|
|
9
|
+
#
|
|
10
|
+
# Wire format: no SP header. Body on the wire is exactly the user
|
|
11
|
+
# payload (same as push0/pull0). Per nng's pair0, when a second peer
|
|
12
|
+
# tries to connect while one is already paired, the new pipe is
|
|
13
|
+
# rejected — first peer wins.
|
|
14
|
+
#
|
|
15
|
+
# Send side: shared send queue + 1 pump (reuses {SendPump}). The
|
|
16
|
+
# pump infrastructure is identical to PUSH; PAIR just never has
|
|
17
|
+
# more than one pump because it never has more than one peer.
|
|
18
|
+
#
|
|
19
|
+
# Recv side: messages fed by the engine's recv loop into a local
|
|
20
|
+
# Async::Queue. Unbounded — TCP throttles the peer.
|
|
21
|
+
#
|
|
22
|
+
class Pair
|
|
23
|
+
include SendPump
|
|
24
|
+
|
|
25
|
+
def initialize(engine)
|
|
26
|
+
init_send_pump(engine)
|
|
27
|
+
@recv_queue = Async::Queue.new
|
|
28
|
+
@peer = nil
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# @param body [String]
|
|
33
|
+
def send(body)
|
|
34
|
+
enqueue_for_send(body)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# @return [String, nil] message body, or nil once the socket is closed
|
|
39
|
+
def receive
|
|
40
|
+
@recv_queue.dequeue
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# Called by the recv loop with each frame off the wire.
|
|
45
|
+
def enqueue(body, _conn = nil)
|
|
46
|
+
@recv_queue.enqueue(body)
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# First-pipe-wins. Raising {ConnectionRejected} tells the
|
|
51
|
+
# ConnectionLifecycle to tear down the just-registered connection
|
|
52
|
+
# without ever exposing it to pumps.
|
|
53
|
+
def connection_added(conn)
|
|
54
|
+
raise ConnectionRejected, "PAIR socket already has a peer" if @peer
|
|
55
|
+
@peer = conn
|
|
56
|
+
spawn_send_pump_for(conn)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def connection_removed(conn)
|
|
61
|
+
remove_send_pump_for(conn)
|
|
62
|
+
@peer = nil if @peer == conn
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def close
|
|
67
|
+
super
|
|
68
|
+
@recv_queue.enqueue(nil) # wake any waiter
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
require "async/limited_queue"
|
|
5
|
+
|
|
6
|
+
module NNQ
|
|
7
|
+
module Routing
|
|
8
|
+
# PUB side of the pub/sub pattern (nng pub0).
|
|
9
|
+
#
|
|
10
|
+
# Broadcasts every message to every connected SUB. Each peer gets
|
|
11
|
+
# its own bounded send queue (`send_hwm`) and its own send pump
|
|
12
|
+
# fiber — a slow subscriber cannot block fast ones. When a peer's
|
|
13
|
+
# queue is full, new messages are dropped for that peer (matching
|
|
14
|
+
# nng's non-blocking fan-out semantics).
|
|
15
|
+
#
|
|
16
|
+
# Pub0 has no subscription state on the sender side: SUBs filter
|
|
17
|
+
# locally. Pub0 is strictly one-directional; nothing is read from
|
|
18
|
+
# SUB peers.
|
|
19
|
+
#
|
|
20
|
+
class Pub
|
|
21
|
+
def initialize(engine)
|
|
22
|
+
@engine = engine
|
|
23
|
+
@queues = {} # conn => Async::LimitedQueue
|
|
24
|
+
@pump_tasks = {} # conn => Async::Task
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Broadcasts +body+ to every connected peer. Non-blocking per
|
|
29
|
+
# peer: drops when a peer's queue is at HWM.
|
|
30
|
+
#
|
|
31
|
+
# @param body [String]
|
|
32
|
+
def send(body)
|
|
33
|
+
@queues.each_value do |queue|
|
|
34
|
+
queue.enqueue(body) unless queue.limited?
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def connection_added(conn)
|
|
40
|
+
queue = Async::LimitedQueue.new(@engine.options.send_hwm)
|
|
41
|
+
# Register queue BEFORE spawning the pump. spawn_task yields
|
|
42
|
+
# control into the new task body, which parks on queue.dequeue;
|
|
43
|
+
# at that park the publisher fiber can run and must already see
|
|
44
|
+
# this peer's queue.
|
|
45
|
+
@queues[conn] = queue
|
|
46
|
+
@pump_tasks[conn] = spawn_pump(conn, queue)
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def connection_removed(conn)
|
|
51
|
+
@queues.delete(conn)
|
|
52
|
+
task = @pump_tasks.delete(conn)
|
|
53
|
+
return unless task
|
|
54
|
+
return if task == Async::Task.current
|
|
55
|
+
task.stop
|
|
56
|
+
rescue IOError, Errno::EPIPE
|
|
57
|
+
# pump was mid-flush; already unwinding
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# True once every peer's queue is empty. Engine linger polls this.
|
|
62
|
+
def send_queue_drained?
|
|
63
|
+
@queues.each_value.all?(&:empty?)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def close
|
|
68
|
+
@pump_tasks.each_value(&:stop)
|
|
69
|
+
@pump_tasks.clear
|
|
70
|
+
@queues.clear
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
private
|
|
74
|
+
|
|
75
|
+
def spawn_pump(conn, queue)
|
|
76
|
+
@engine.spawn_task(annotation: "nnq pub pump #{conn.endpoint}") do
|
|
77
|
+
loop do
|
|
78
|
+
body = queue.dequeue
|
|
79
|
+
conn.send_message(body)
|
|
80
|
+
rescue EOFError, IOError, Errno::EPIPE, Errno::ECONNRESET
|
|
81
|
+
break
|
|
82
|
+
end
|
|
83
|
+
ensure
|
|
84
|
+
@engine.handle_connection_lost(conn)
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async/queue"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
module Routing
|
|
7
|
+
# PULL side: an unbounded queue of received messages. Per-connection
|
|
8
|
+
# recv fibers (spawned by the Engine when each pipe is established)
|
|
9
|
+
# call {#enqueue} on each frame; user code calls {#receive}.
|
|
10
|
+
#
|
|
11
|
+
# No HWM, no prefetch buffer — TCP throttles the senders directly
|
|
12
|
+
# via the kernel buffer.
|
|
13
|
+
#
|
|
14
|
+
class Pull
|
|
15
|
+
def initialize
|
|
16
|
+
@queue = Async::Queue.new
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def enqueue(body, _conn = nil)
|
|
21
|
+
@queue.enqueue(body)
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# @return [String, nil] message body, or nil if the queue was closed
|
|
26
|
+
def receive
|
|
27
|
+
@queue.dequeue
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Wakes any waiters with nil so receive returns from a closed
|
|
32
|
+
# socket.
|
|
33
|
+
def close
|
|
34
|
+
@queue.enqueue(nil)
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "send_pump"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
module Routing
|
|
7
|
+
# PUSH side of the pipeline pattern.
|
|
8
|
+
#
|
|
9
|
+
# Architecture: ONE shared bounded send queue per socket. Each peer
|
|
10
|
+
# connection gets its own send pump fiber that races to dequeue from
|
|
11
|
+
# the shared queue and write to its peer (work-stealing). A slow
|
|
12
|
+
# peer's pump just stops pulling (blocked on its own TCP flush);
|
|
13
|
+
# fast peers' pumps keep draining. Strictly better than per-pipe
|
|
14
|
+
# round-robin for PUSH semantics — load naturally biases to whoever
|
|
15
|
+
# is keeping up.
|
|
16
|
+
#
|
|
17
|
+
class Push
|
|
18
|
+
include SendPump
|
|
19
|
+
|
|
20
|
+
def initialize(engine)
|
|
21
|
+
init_send_pump(engine)
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# User-facing send: enqueue onto the shared send queue.
|
|
26
|
+
#
|
|
27
|
+
# @param body [String]
|
|
28
|
+
def send(body)
|
|
29
|
+
enqueue_for_send(body)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def connection_added(conn)
|
|
34
|
+
spawn_send_pump_for(conn)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def connection_removed(conn)
|
|
39
|
+
remove_send_pump_for(conn)
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async/queue"
|
|
4
|
+
|
|
5
|
+
module NNQ
|
|
6
|
+
module Routing
|
|
7
|
+
# REP: server side of req0/rep0.
|
|
8
|
+
#
|
|
9
|
+
# Wire format: incoming bodies are `[backtrace stack][user_payload]`.
|
|
10
|
+
# The backtrace is one or more 4-byte BE words; we keep reading words
|
|
11
|
+
# off the front until we hit one whose top byte has its high bit set
|
|
12
|
+
# (the original REQ's request id terminates the stack). The whole
|
|
13
|
+
# backtrace is stashed and echoed verbatim on reply, prepended to the
|
|
14
|
+
# reply body. REP never reorders or rewrites the stack — it's pure
|
|
15
|
+
# echo back to the originating pipe.
|
|
16
|
+
#
|
|
17
|
+
# Semantics (cooked mode):
|
|
18
|
+
# - At most one pending request at a time. Calling #receive while a
|
|
19
|
+
# previous request is pending silently discards that request — its
|
|
20
|
+
# backtrace is forgotten and any later #send_reply will target the
|
|
21
|
+
# *new* request. This matches nng cooked rep0, where nng_recvmsg
|
|
22
|
+
# after nng_recvmsg drops the earlier message.
|
|
23
|
+
# - Calling #send_reply with no pending request raises.
|
|
24
|
+
# - The reply must be routed back to the same pipe the request came
|
|
25
|
+
# from. If that pipe died in the meantime, #send_reply silently
|
|
26
|
+
# drops the reply (matches nng's pipe_terminated behavior).
|
|
27
|
+
# - TTL cap on the backtrace stack: 8 hops, matching nng's default.
|
|
28
|
+
#
|
|
29
|
+
class Rep
|
|
30
|
+
MAX_HOPS = 8 # nng's default ttl
|
|
31
|
+
|
|
32
|
+
def initialize(engine)
|
|
33
|
+
@engine = engine
|
|
34
|
+
@recv_queue = Async::Queue.new # holds [conn, btrace, body]
|
|
35
|
+
@pending = nil # [conn, btrace] or nil
|
|
36
|
+
@mutex = Mutex.new
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# Receives one request body. Stashes the backtrace + originating
|
|
41
|
+
# connection so the next #send_reply can route the reply back.
|
|
42
|
+
#
|
|
43
|
+
# @return [String, nil] body, or nil if the socket was closed
|
|
44
|
+
def receive
|
|
45
|
+
# Any prior pending request is discarded — calling receive
|
|
46
|
+
# again without replying is how users drop unwanted requests.
|
|
47
|
+
@mutex.synchronize { @pending = nil }
|
|
48
|
+
item = @recv_queue.dequeue
|
|
49
|
+
return nil if item.nil?
|
|
50
|
+
conn, btrace, body = item
|
|
51
|
+
@mutex.synchronize { @pending = [conn, btrace] }
|
|
52
|
+
body
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# Sends +body+ as the reply to the most recently received request.
|
|
57
|
+
#
|
|
58
|
+
# @param body [String]
|
|
59
|
+
def send_reply(body)
|
|
60
|
+
conn, btrace = @mutex.synchronize do
|
|
61
|
+
raise Error, "REP socket has no pending request to reply to" unless @pending
|
|
62
|
+
taken = @pending
|
|
63
|
+
@pending = nil
|
|
64
|
+
taken
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
return if conn.closed?
|
|
68
|
+
conn.send_message(btrace + body)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# Called by the engine recv loop with each received frame.
|
|
73
|
+
def enqueue(body, conn)
|
|
74
|
+
btrace, payload = parse_backtrace(body)
|
|
75
|
+
return unless btrace # malformed/over-TTL — drop
|
|
76
|
+
@recv_queue.enqueue([conn, btrace, payload])
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def connection_removed(conn)
|
|
81
|
+
@mutex.synchronize do
|
|
82
|
+
@pending = nil if @pending && @pending[0] == conn
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def close
|
|
88
|
+
@recv_queue.enqueue(nil)
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
private
|
|
93
|
+
|
|
94
|
+
# Reads 4-byte BE words off the front of +body+, stopping at the
|
|
95
|
+
# first one whose top byte has its high bit set. Returns
|
|
96
|
+
# [backtrace_bytes, remaining_payload] or nil on malformed input.
|
|
97
|
+
def parse_backtrace(body)
|
|
98
|
+
offset = 0
|
|
99
|
+
hops = 0
|
|
100
|
+
while hops < MAX_HOPS
|
|
101
|
+
return nil if body.bytesize - offset < 4
|
|
102
|
+
word = body.byteslice(offset, 4)
|
|
103
|
+
offset += 4
|
|
104
|
+
hops += 1
|
|
105
|
+
if word.getbyte(0) & 0x80 != 0
|
|
106
|
+
return [body.byteslice(0, offset), body.byteslice(offset..)]
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
nil # exceeded TTL without finding terminator
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
end
|