nnq-cli 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +78 -0
- data/LICENSE +15 -0
- data/README.md +391 -0
- data/exe/nnq +10 -0
- data/lib/nnq/cli/base_runner.rb +448 -0
- data/lib/nnq/cli/bus.rb +33 -0
- data/lib/nnq/cli/cli_parser.rb +485 -0
- data/lib/nnq/cli/config.rb +59 -0
- data/lib/nnq/cli/expression_evaluator.rb +142 -0
- data/lib/nnq/cli/formatter.rb +140 -0
- data/lib/nnq/cli/pair.rb +33 -0
- data/lib/nnq/cli/pipe.rb +206 -0
- data/lib/nnq/cli/pipe_worker.rb +138 -0
- data/lib/nnq/cli/pub_sub.rb +16 -0
- data/lib/nnq/cli/push_pull.rb +19 -0
- data/lib/nnq/cli/ractor_helpers.rb +81 -0
- data/lib/nnq/cli/req_rep.rb +105 -0
- data/lib/nnq/cli/socket_setup.rb +93 -0
- data/lib/nnq/cli/surveyor_respondent.rb +112 -0
- data/lib/nnq/cli/term.rb +86 -0
- data/lib/nnq/cli/transient_monitor.rb +41 -0
- data/lib/nnq/cli/version.rb +7 -0
- data/lib/nnq/cli.rb +190 -0
- metadata +110 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Raised when LZ4 decompression fails.
|
|
6
|
+
class DecompressError < RuntimeError; end
|
|
7
|
+
|
|
8
|
+
# Handles encoding/decoding a single-body message in the configured
|
|
9
|
+
# format, plus optional LZ4 compression.
|
|
10
|
+
#
|
|
11
|
+
# Unlike omq-cli's Formatter, nnq messages are not multipart — one
|
|
12
|
+
# `String` body per message. The API still accepts/returns a
|
|
13
|
+
# 1-element array so that `$F`-based eval expressions work the same
|
|
14
|
+
# way.
|
|
15
|
+
class Formatter
|
|
16
|
+
# @param format [Symbol] wire format (:ascii, :quoted, :raw, :jsonl, :msgpack, :marshal)
|
|
17
|
+
# @param compress [Boolean] whether to apply LZ4 compression
|
|
18
|
+
def initialize(format, compress: false)
|
|
19
|
+
@format = format
|
|
20
|
+
@compress = compress
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Encodes a message body into a printable string for output.
|
|
25
|
+
#
|
|
26
|
+
# @param msg [Array<String>] single-element array (the body)
|
|
27
|
+
# @return [String] formatted output line
|
|
28
|
+
def encode(msg)
|
|
29
|
+
body = msg.first.to_s
|
|
30
|
+
case @format
|
|
31
|
+
when :ascii
|
|
32
|
+
body.b.gsub(/[^[:print:]\t]/, ".") + "\n"
|
|
33
|
+
when :quoted
|
|
34
|
+
body.b.dump[1..-2] + "\n"
|
|
35
|
+
when :raw
|
|
36
|
+
body
|
|
37
|
+
when :jsonl
|
|
38
|
+
JSON.generate([body]) + "\n"
|
|
39
|
+
when :msgpack
|
|
40
|
+
MessagePack.pack([body])
|
|
41
|
+
when :marshal
|
|
42
|
+
body.inspect + "\n"
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Decodes a formatted input line into a 1-element message array.
|
|
48
|
+
#
|
|
49
|
+
# @param line [String] input line (newline-terminated)
|
|
50
|
+
# @return [Array<String>] 1-element array
|
|
51
|
+
def decode(line)
|
|
52
|
+
case @format
|
|
53
|
+
when :ascii, :marshal
|
|
54
|
+
[line.chomp]
|
|
55
|
+
when :quoted
|
|
56
|
+
["\"#{line.chomp}\"".undump]
|
|
57
|
+
when :raw
|
|
58
|
+
[line]
|
|
59
|
+
when :jsonl
|
|
60
|
+
arr = JSON.parse(line.chomp)
|
|
61
|
+
unless arr.is_a?(Array) && arr.all? { |e| e.is_a?(String) }
|
|
62
|
+
abort "JSON Lines input must be an array of strings"
|
|
63
|
+
end
|
|
64
|
+
arr.first(1)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# Decodes one Marshal object from the given IO stream.
|
|
70
|
+
#
|
|
71
|
+
# @param io [IO] input stream
|
|
72
|
+
# @return [Object, nil] deserialized object, or nil on EOF
|
|
73
|
+
def decode_marshal(io)
|
|
74
|
+
Marshal.load(io)
|
|
75
|
+
rescue EOFError, TypeError
|
|
76
|
+
nil
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# Decodes one MessagePack object from the given IO stream.
|
|
81
|
+
#
|
|
82
|
+
# @param io [IO] input stream
|
|
83
|
+
# @return [Object, nil] deserialized object, or nil on EOF
|
|
84
|
+
def decode_msgpack(io)
|
|
85
|
+
@msgpack_unpacker ||= MessagePack::Unpacker.new(io)
|
|
86
|
+
@msgpack_unpacker.read
|
|
87
|
+
rescue EOFError
|
|
88
|
+
nil
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# Compresses the body with LZ4 if compression is enabled.
|
|
93
|
+
#
|
|
94
|
+
# @param msg [Array<String>] single-element array
|
|
95
|
+
# @return [Array<String>] optionally compressed
|
|
96
|
+
def compress(msg)
|
|
97
|
+
@compress ? msg.map { |p| RLZ4.compress(p) if p } : msg
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# Decompresses the body with LZ4 if compression is enabled.
|
|
102
|
+
# nil/empty bodies pass through.
|
|
103
|
+
#
|
|
104
|
+
# @param msg [Array<String>] possibly compressed single-element array
|
|
105
|
+
# @return [Array<String>] decompressed
|
|
106
|
+
def decompress(msg)
|
|
107
|
+
@compress ? msg.map { |p| p && !p.empty? ? RLZ4.decompress(p) : p } : msg
|
|
108
|
+
rescue RLZ4::DecompressError
|
|
109
|
+
raise DecompressError, "decompression failed (did the sender use --compress?)"
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# Formats a message body for human-readable preview (logging).
|
|
114
|
+
#
|
|
115
|
+
# @param msg [Array<String>] single-element array
|
|
116
|
+
# @return [String] truncated preview
|
|
117
|
+
def self.preview(msg)
|
|
118
|
+
body = msg.first.to_s
|
|
119
|
+
"(#{body.bytesize}B) #{preview_body(body)}"
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def self.preview_body(body)
|
|
124
|
+
bytes = body.b
|
|
125
|
+
return "''" if bytes.empty?
|
|
126
|
+
|
|
127
|
+
sample = bytes[0, 12]
|
|
128
|
+
printable = sample.count("\x20-\x7e")
|
|
129
|
+
|
|
130
|
+
if printable < sample.bytesize / 2
|
|
131
|
+
"[#{bytes.bytesize}B]"
|
|
132
|
+
elsif bytes.bytesize > 12
|
|
133
|
+
"#{sample.gsub(/[^[:print:]]/, ".")}..."
|
|
134
|
+
else
|
|
135
|
+
sample.gsub(/[^[:print:]]/, ".")
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
data/lib/nnq/cli/pair.rb
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Runner for PAIR sockets (bidirectional messaging).
|
|
6
|
+
class PairRunner < BaseRunner
|
|
7
|
+
private
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def run_loop(task)
|
|
11
|
+
receiver = recv_async(task)
|
|
12
|
+
sender = task.async { run_send_logic }
|
|
13
|
+
wait_for_loops(receiver, sender)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def recv_async(task)
|
|
18
|
+
task.async do
|
|
19
|
+
n = config.count
|
|
20
|
+
i = 0
|
|
21
|
+
loop do
|
|
22
|
+
msg = recv_msg
|
|
23
|
+
break if msg.nil?
|
|
24
|
+
msg = eval_recv_expr(msg)
|
|
25
|
+
output(msg)
|
|
26
|
+
i += 1
|
|
27
|
+
break if n && n > 0 && i >= n
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
data/lib/nnq/cli/pipe.rb
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Runner for the virtual "pipe" socket type (PULL -> eval -> PUSH).
|
|
6
|
+
# Supports sequential and parallel (Ractor-based) processing modes.
|
|
7
|
+
class PipeRunner
|
|
8
|
+
# @return [Config] frozen CLI configuration
|
|
9
|
+
attr_reader :config
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# @param config [Config] frozen CLI configuration
|
|
13
|
+
def initialize(config)
|
|
14
|
+
@config = config
|
|
15
|
+
@fmt_in = Formatter.new(config.format, compress: config.compress_in || config.compress)
|
|
16
|
+
@fmt_out = Formatter.new(config.format, compress: config.compress_out || config.compress)
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# Runs the pipe in sequential or parallel mode based on config.
|
|
21
|
+
#
|
|
22
|
+
# @param task [Async::Task] the parent async task
|
|
23
|
+
# @return [void]
|
|
24
|
+
def call(task)
|
|
25
|
+
if config.parallel
|
|
26
|
+
run_parallel(task)
|
|
27
|
+
else
|
|
28
|
+
run_sequential(task)
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def resolve_endpoints
|
|
37
|
+
if config.in_endpoints.any?
|
|
38
|
+
[config.in_endpoints, config.out_endpoints]
|
|
39
|
+
else
|
|
40
|
+
[[config.endpoints[0]], [config.endpoints[1]]]
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# -- Sequential ---------------------------------------------------
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def run_sequential(task)
|
|
49
|
+
set_pipe_process_title
|
|
50
|
+
in_eps, out_eps = resolve_endpoints
|
|
51
|
+
@pull, @push = build_pull_push(in_eps, out_eps)
|
|
52
|
+
compile_expr
|
|
53
|
+
@sock = @pull # for eval instance_exec
|
|
54
|
+
start_event_monitors if config.verbose >= 2
|
|
55
|
+
wait_for_peers_with_timeout if config.timeout
|
|
56
|
+
setup_sequential_transient(task)
|
|
57
|
+
@sock.instance_exec(&@recv_begin_proc) if @recv_begin_proc
|
|
58
|
+
sequential_message_loop(fan_out: out_eps.size > 1)
|
|
59
|
+
@sock.instance_exec(&@recv_end_proc) if @recv_end_proc
|
|
60
|
+
ensure
|
|
61
|
+
@pull&.close
|
|
62
|
+
@push&.close
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def build_pull_push(in_eps, out_eps)
|
|
67
|
+
pull = SocketSetup.build(NNQ::PULL0, config)
|
|
68
|
+
push = SocketSetup.build(NNQ::PUSH0, config)
|
|
69
|
+
SocketSetup.attach_endpoints(pull, in_eps, verbose: config.verbose)
|
|
70
|
+
SocketSetup.attach_endpoints(push, out_eps, verbose: config.verbose)
|
|
71
|
+
[pull, push]
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# With --timeout set, fail fast if peers never show up. Without
|
|
76
|
+
# it, there's no point waiting: PULL#receive blocks naturally
|
|
77
|
+
# and PUSH buffers up to send_hwm when no peer is present.
|
|
78
|
+
def wait_for_peers_with_timeout
|
|
79
|
+
Fiber.scheduler.with_timeout(config.timeout) do
|
|
80
|
+
Async::Barrier.new.tap do |barrier|
|
|
81
|
+
barrier.async(annotation: "wait push peer") { @push.peer_connected.wait }
|
|
82
|
+
barrier.async(annotation: "wait pull peer") { @pull.peer_connected.wait }
|
|
83
|
+
barrier.wait
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def setup_sequential_transient(task)
|
|
90
|
+
return unless config.transient
|
|
91
|
+
task.async do
|
|
92
|
+
@pull.all_peers_gone.wait
|
|
93
|
+
@pull.reconnect_enabled = false
|
|
94
|
+
@pull.close_read
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def sequential_message_loop(fan_out: false)
|
|
100
|
+
n = config.count
|
|
101
|
+
i = 0
|
|
102
|
+
|
|
103
|
+
loop do
|
|
104
|
+
body = @pull.receive
|
|
105
|
+
break if body.nil?
|
|
106
|
+
msg = @fmt_in.decompress([body])
|
|
107
|
+
msg = eval_recv_expr(msg)
|
|
108
|
+
|
|
109
|
+
if msg && !msg.empty?
|
|
110
|
+
out = @fmt_out.compress(msg)
|
|
111
|
+
@push.send(out.first)
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
# Yield after send so send-pump fibers can drain the queue
|
|
115
|
+
# before the next message is enqueued. Without this, one pump
|
|
116
|
+
# monopolizes the shared queue when messages arrive in bursts.
|
|
117
|
+
# Only needed for multi-output pipes; single-output has no
|
|
118
|
+
# fairness concern.
|
|
119
|
+
Async::Task.current.yield if fan_out
|
|
120
|
+
|
|
121
|
+
i += 1
|
|
122
|
+
break if n && n > 0 && i >= n
|
|
123
|
+
end
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# -- Parallel -----------------------------------------------------
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def run_parallel(task)
|
|
131
|
+
set_pipe_process_title
|
|
132
|
+
NNQ.freeze_for_ractors! if NNQ.respond_to?(:freeze_for_ractors!)
|
|
133
|
+
in_eps, out_eps = resolve_endpoints
|
|
134
|
+
in_eps = RactorHelpers.preresolve_tcp(in_eps)
|
|
135
|
+
out_eps = RactorHelpers.preresolve_tcp(out_eps)
|
|
136
|
+
log_port, log_thread = RactorHelpers.start_log_consumer
|
|
137
|
+
error_port = Ractor::Port.new
|
|
138
|
+
error_thread = Thread.new(error_port) do |p|
|
|
139
|
+
msg = p.receive
|
|
140
|
+
abort "nnq: #{msg}" unless msg.equal?(RactorHelpers::SHUTDOWN)
|
|
141
|
+
rescue Ractor::ClosedError
|
|
142
|
+
# port closed, no error
|
|
143
|
+
end
|
|
144
|
+
workers = config.parallel.times.map do
|
|
145
|
+
::Ractor.new(config, in_eps, out_eps, log_port, error_port) do |cfg, ins, outs, lport, eport|
|
|
146
|
+
PipeWorker.new(cfg, ins, outs, lport, eport).call
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
workers.each do |w|
|
|
150
|
+
w.join
|
|
151
|
+
rescue ::Ractor::RemoteError => e
|
|
152
|
+
$stderr.write("nnq: Ractor error: #{e.cause&.message || e.message}\n")
|
|
153
|
+
end
|
|
154
|
+
ensure
|
|
155
|
+
RactorHelpers.stop_consumer(error_port, error_thread) if error_port
|
|
156
|
+
RactorHelpers.stop_consumer(log_port, log_thread) if log_port
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
# -- Process title -------------------------------------------------
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def set_pipe_process_title
|
|
164
|
+
in_eps, out_eps = resolve_endpoints
|
|
165
|
+
title = ["nnq pipe"]
|
|
166
|
+
title << "-z" if config.compress || config.compress_in || config.compress_out
|
|
167
|
+
title << "-P#{config.parallel}" if config.parallel
|
|
168
|
+
title.concat(in_eps.map(&:url))
|
|
169
|
+
title << "->"
|
|
170
|
+
title.concat(out_eps.map(&:url))
|
|
171
|
+
Process.setproctitle(title.join(" "))
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# -- Expression eval ----------------------------------------------
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def compile_expr
|
|
179
|
+
@recv_evaluator = ExpressionEvaluator.new(config.recv_expr, format: config.format)
|
|
180
|
+
@recv_begin_proc = @recv_evaluator.begin_proc
|
|
181
|
+
@recv_eval_proc = @recv_evaluator.eval_proc
|
|
182
|
+
@recv_end_proc = @recv_evaluator.end_proc
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def eval_recv_expr(msg)
|
|
187
|
+
result = @recv_evaluator.call(msg, @sock)
|
|
188
|
+
result.equal?(ExpressionEvaluator::SENT) ? nil : result
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
# -- Event monitoring ---------------------------------------------
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def start_event_monitors
|
|
196
|
+
verbose = config.verbose >= 3
|
|
197
|
+
v = config.verbose
|
|
198
|
+
[@pull, @push].each do |sock|
|
|
199
|
+
sock.monitor(verbose: verbose) do |event|
|
|
200
|
+
CLI::Term.write_event(event, v)
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
end
|
|
205
|
+
end
|
|
206
|
+
end
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Worker that runs inside a Ractor for pipe -P parallel mode.
|
|
6
|
+
# Each worker owns its own Async reactor, PULL socket, and PUSH socket.
|
|
7
|
+
#
|
|
8
|
+
class PipeWorker
|
|
9
|
+
def initialize(config, in_eps, out_eps, log_port, error_port = nil)
|
|
10
|
+
@config = config
|
|
11
|
+
@in_eps = in_eps
|
|
12
|
+
@out_eps = out_eps
|
|
13
|
+
@log_port = log_port
|
|
14
|
+
@error_port = error_port
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def call
|
|
19
|
+
Async do
|
|
20
|
+
setup_sockets
|
|
21
|
+
log_endpoints if @config.verbose >= 1
|
|
22
|
+
start_monitors if @config.verbose >= 2
|
|
23
|
+
wait_for_peers
|
|
24
|
+
compile_expr
|
|
25
|
+
run_message_loop
|
|
26
|
+
run_end_block
|
|
27
|
+
rescue NNQ::CLI::DecompressError => e
|
|
28
|
+
@error_port&.send(e.message)
|
|
29
|
+
ensure
|
|
30
|
+
@pull&.close
|
|
31
|
+
@push&.close
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
private
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def setup_sockets
|
|
40
|
+
@pull = NNQ::CLI::SocketSetup.build(NNQ::PULL0, @config)
|
|
41
|
+
@push = NNQ::CLI::SocketSetup.build(NNQ::PUSH0, @config)
|
|
42
|
+
NNQ::CLI::SocketSetup.attach_endpoints(@pull, @in_eps, verbose: 0)
|
|
43
|
+
NNQ::CLI::SocketSetup.attach_endpoints(@push, @out_eps, verbose: 0)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def log_endpoints
|
|
48
|
+
@in_eps.each { |ep| @log_port.send(ep.bind? ? "Bound to #{ep.url}" : "Connecting to #{ep.url}") }
|
|
49
|
+
@out_eps.each { |ep| @log_port.send(ep.bind? ? "Bound to #{ep.url}" : "Connecting to #{ep.url}") }
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def start_monitors
|
|
54
|
+
trace = @config.verbose >= 3
|
|
55
|
+
[@pull, @push].each do |sock|
|
|
56
|
+
sock.monitor(verbose: trace) do |event|
|
|
57
|
+
@log_port.send(format_event(event))
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def format_event(event)
|
|
64
|
+
case event.type
|
|
65
|
+
when :message_sent
|
|
66
|
+
"nnq: >> #{NNQ::CLI::Formatter.preview([event.detail[:body]])}"
|
|
67
|
+
when :message_received
|
|
68
|
+
"nnq: << #{NNQ::CLI::Formatter.preview([event.detail[:body]])}"
|
|
69
|
+
else
|
|
70
|
+
ep = event.endpoint ? " #{event.endpoint}" : ""
|
|
71
|
+
detail = event.detail ? " #{event.detail}" : ""
|
|
72
|
+
"nnq: #{event.type}#{ep}#{detail}"
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def wait_for_peers
|
|
78
|
+
Async::Barrier.new.tap do |barrier|
|
|
79
|
+
barrier.async { @pull.peer_connected.wait }
|
|
80
|
+
barrier.async { @push.peer_connected.wait }
|
|
81
|
+
barrier.wait
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def compile_expr
|
|
87
|
+
@begin_proc, @end_proc, @eval_proc =
|
|
88
|
+
NNQ::CLI::ExpressionEvaluator.compile_inside_ractor(@config.recv_expr)
|
|
89
|
+
@fmt_in = NNQ::CLI::Formatter.new(@config.format, compress: @config.compress_in || @config.compress)
|
|
90
|
+
@fmt_out = NNQ::CLI::Formatter.new(@config.format, compress: @config.compress_out || @config.compress)
|
|
91
|
+
@ctx = Object.new
|
|
92
|
+
@ctx.instance_exec(&@begin_proc) if @begin_proc
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def run_message_loop
|
|
97
|
+
n = @config.count
|
|
98
|
+
if @eval_proc
|
|
99
|
+
loop do
|
|
100
|
+
body = @pull.receive
|
|
101
|
+
break if body.nil?
|
|
102
|
+
msg = NNQ::CLI::ExpressionEvaluator.normalize_result(
|
|
103
|
+
@ctx.instance_exec(@fmt_in.decompress([body]), &@eval_proc)
|
|
104
|
+
)
|
|
105
|
+
unless msg.nil? || msg.empty?
|
|
106
|
+
out = @fmt_out.compress(msg)
|
|
107
|
+
@push.send(out.first)
|
|
108
|
+
end
|
|
109
|
+
n -= 1 if n && n > 0
|
|
110
|
+
break if n == 0
|
|
111
|
+
end
|
|
112
|
+
else
|
|
113
|
+
loop do
|
|
114
|
+
body = @pull.receive
|
|
115
|
+
break if body.nil?
|
|
116
|
+
out = @fmt_out.compress(@fmt_in.decompress([body]))
|
|
117
|
+
@push.send(out.first)
|
|
118
|
+
n -= 1 if n && n > 0
|
|
119
|
+
break if n == 0
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
rescue IO::TimeoutError, Async::TimeoutError
|
|
123
|
+
# recv timed out -- fall through to END block
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def run_end_block
|
|
128
|
+
return unless @end_proc
|
|
129
|
+
out = NNQ::CLI::ExpressionEvaluator.normalize_result(
|
|
130
|
+
@ctx.instance_exec(&@end_proc)
|
|
131
|
+
)
|
|
132
|
+
if out && !out.empty?
|
|
133
|
+
@push.send(@fmt_out.compress(out).first)
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
end
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Runner for PUB sockets (publish messages to subscribers).
|
|
6
|
+
class PubRunner < BaseRunner
|
|
7
|
+
def run_loop(task) = run_send_logic
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Runner for SUB sockets (subscribe and receive published messages).
|
|
12
|
+
class SubRunner < BaseRunner
|
|
13
|
+
def run_loop(task) = run_recv_logic
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Runner for PUSH sockets (send-only pipeline producer).
|
|
6
|
+
class PushRunner < BaseRunner
|
|
7
|
+
def run_loop(task) = run_send_logic
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Runner for PULL sockets (receive-only pipeline consumer).
|
|
12
|
+
class PullRunner < BaseRunner
|
|
13
|
+
private
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run_loop(task) = run_recv_logic
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module NNQ
|
|
4
|
+
module CLI
|
|
5
|
+
# Shared Ractor infrastructure for parallel worker modes.
|
|
6
|
+
module RactorHelpers
|
|
7
|
+
# Sentinel value sent through ports to signal consumer threads to exit.
|
|
8
|
+
# Port#close does not unblock a waiting #receive, so we must send an
|
|
9
|
+
# explicit shutdown marker.
|
|
10
|
+
SHUTDOWN = :__nnq_shutdown__
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Resolves TCP hostnames to IP addresses so Ractors don't touch
|
|
14
|
+
# Resolv::DefaultResolver (which is not shareable).
|
|
15
|
+
#
|
|
16
|
+
def self.preresolve_tcp(endpoints)
|
|
17
|
+
endpoints.flat_map do |ep|
|
|
18
|
+
url = ep.url
|
|
19
|
+
if url.start_with?("tcp://")
|
|
20
|
+
host, port = NNQ::Transport::TCP.parse_endpoint(url)
|
|
21
|
+
Addrinfo.getaddrinfo(host, port, nil, :STREAM).map do |addr|
|
|
22
|
+
ip = addr.ip_address
|
|
23
|
+
ip = "[#{ip}]" if ip.include?(":")
|
|
24
|
+
Endpoint.new("tcp://#{ip}:#{addr.ip_port}", ep.bind?)
|
|
25
|
+
end
|
|
26
|
+
else
|
|
27
|
+
ep
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Starts a Ractor::Port and a consumer thread that drains log
|
|
34
|
+
# messages to stderr sequentially. Returns [port, thread].
|
|
35
|
+
# Send SHUTDOWN through the port to stop the consumer.
|
|
36
|
+
#
|
|
37
|
+
def self.start_log_consumer
|
|
38
|
+
port = Ractor::Port.new
|
|
39
|
+
thread = Thread.new(port) do |p|
|
|
40
|
+
loop do
|
|
41
|
+
msg = p.receive
|
|
42
|
+
break if msg.equal?(SHUTDOWN)
|
|
43
|
+
$stderr.write("#{msg}\n")
|
|
44
|
+
rescue Ractor::ClosedError
|
|
45
|
+
break
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
[port, thread]
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# Starts a Ractor::Port and a consumer thread that drains
|
|
53
|
+
# formatted output to stdout sequentially. Returns [port, thread].
|
|
54
|
+
# Send SHUTDOWN through the port to stop the consumer.
|
|
55
|
+
#
|
|
56
|
+
def self.start_output_consumer
|
|
57
|
+
port = Ractor::Port.new
|
|
58
|
+
thread = Thread.new(port) do |p|
|
|
59
|
+
loop do
|
|
60
|
+
msg = p.receive
|
|
61
|
+
break if msg.equal?(SHUTDOWN)
|
|
62
|
+
$stdout.write(msg)
|
|
63
|
+
rescue Ractor::ClosedError
|
|
64
|
+
break
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
[port, thread]
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Sends the shutdown sentinel and joins the consumer thread.
|
|
72
|
+
#
|
|
73
|
+
def self.stop_consumer(port, thread)
|
|
74
|
+
port.send(SHUTDOWN)
|
|
75
|
+
thread.join
|
|
76
|
+
rescue Ractor::ClosedError
|
|
77
|
+
thread.join(1)
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|