uringmachine 0.19.1 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +3 -4
- data/CHANGELOG.md +32 -1
- data/TODO.md +0 -39
- data/examples/bm_fileno.rb +33 -0
- data/examples/bm_mutex.rb +85 -0
- data/examples/bm_mutex_single.rb +33 -0
- data/examples/bm_queue.rb +29 -29
- data/examples/bm_send.rb +2 -5
- data/examples/bm_snooze.rb +20 -42
- data/examples/bm_write.rb +4 -1
- data/examples/fiber_scheduler_demo.rb +15 -51
- data/examples/fiber_scheduler_fork.rb +24 -0
- data/examples/nc_ssl.rb +71 -0
- data/ext/um/extconf.rb +5 -15
- data/ext/um/um.c +310 -74
- data/ext/um/um.h +66 -29
- data/ext/um/um_async_op.c +1 -1
- data/ext/um/um_async_op_class.c +2 -2
- data/ext/um/um_buffer.c +1 -1
- data/ext/um/um_class.c +178 -31
- data/ext/um/um_const.c +51 -3
- data/ext/um/um_mutex_class.c +1 -1
- data/ext/um/um_op.c +37 -0
- data/ext/um/um_queue_class.c +1 -1
- data/ext/um/um_stream.c +5 -5
- data/ext/um/um_stream_class.c +3 -0
- data/ext/um/um_sync.c +28 -39
- data/ext/um/um_utils.c +59 -19
- data/grant-2025/journal.md +353 -0
- data/grant-2025/tasks.md +135 -0
- data/lib/uringmachine/fiber_scheduler.rb +316 -57
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +6 -0
- data/test/test_fiber_scheduler.rb +640 -0
- data/test/test_stream.rb +2 -2
- data/test/test_um.rb +722 -54
- data/uringmachine.gemspec +5 -5
- data/vendor/liburing/.github/workflows/ci.yml +94 -1
- data/vendor/liburing/.github/workflows/test_build.c +9 -0
- data/vendor/liburing/configure +27 -0
- data/vendor/liburing/examples/Makefile +6 -0
- data/vendor/liburing/examples/helpers.c +8 -0
- data/vendor/liburing/examples/helpers.h +5 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/Makefile +9 -3
- data/vendor/liburing/src/include/liburing/barrier.h +11 -5
- data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
- data/vendor/liburing/src/include/liburing/io_uring.h +51 -0
- data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
- data/vendor/liburing/src/include/liburing.h +458 -121
- data/vendor/liburing/src/liburing-ffi.map +16 -0
- data/vendor/liburing/src/liburing.map +8 -0
- data/vendor/liburing/src/sanitize.c +4 -1
- data/vendor/liburing/src/setup.c +7 -4
- data/vendor/liburing/test/232c93d07b74.c +4 -16
- data/vendor/liburing/test/Makefile +15 -1
- data/vendor/liburing/test/accept.c +2 -13
- data/vendor/liburing/test/bind-listen.c +175 -13
- data/vendor/liburing/test/conn-unreach.c +132 -0
- data/vendor/liburing/test/fd-pass.c +32 -7
- data/vendor/liburing/test/fdinfo.c +39 -12
- data/vendor/liburing/test/fifo-futex-poll.c +114 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
- data/vendor/liburing/test/futex.c +1 -1
- data/vendor/liburing/test/helpers.c +99 -2
- data/vendor/liburing/test/helpers.h +9 -0
- data/vendor/liburing/test/io_uring_passthrough.c +6 -12
- data/vendor/liburing/test/mock_file.c +379 -0
- data/vendor/liburing/test/mock_file.h +47 -0
- data/vendor/liburing/test/nop.c +2 -2
- data/vendor/liburing/test/nop32-overflow.c +150 -0
- data/vendor/liburing/test/nop32.c +126 -0
- data/vendor/liburing/test/pipe.c +166 -0
- data/vendor/liburing/test/poll-race-mshot.c +13 -1
- data/vendor/liburing/test/read-write.c +4 -4
- data/vendor/liburing/test/recv-mshot-fair.c +81 -34
- data/vendor/liburing/test/recvsend_bundle.c +1 -1
- data/vendor/liburing/test/resize-rings.c +2 -0
- data/vendor/liburing/test/ring-query.c +322 -0
- data/vendor/liburing/test/ringbuf-loop.c +87 -0
- data/vendor/liburing/test/ringbuf-read.c +4 -4
- data/vendor/liburing/test/runtests.sh +2 -2
- data/vendor/liburing/test/send-zerocopy.c +43 -5
- data/vendor/liburing/test/send_recv.c +103 -32
- data/vendor/liburing/test/shutdown.c +2 -12
- data/vendor/liburing/test/socket-nb.c +3 -14
- data/vendor/liburing/test/socket-rw-eagain.c +2 -12
- data/vendor/liburing/test/socket-rw-offset.c +2 -12
- data/vendor/liburing/test/socket-rw.c +2 -12
- data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
- data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
- data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
- data/vendor/liburing/test/timestamp.c +56 -19
- data/vendor/liburing/test/vec-regbuf.c +2 -4
- data/vendor/liburing/test/wq-aff.c +7 -0
- metadata +37 -15
|
@@ -0,0 +1,640 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'helper'
|
|
4
|
+
require 'uringmachine/fiber_scheduler'
|
|
5
|
+
require 'securerandom'
|
|
6
|
+
require 'socket'
|
|
7
|
+
|
|
8
|
+
class MethodCallAuditor
|
|
9
|
+
attr_reader :calls
|
|
10
|
+
|
|
11
|
+
def initialize(target)
|
|
12
|
+
@target = target
|
|
13
|
+
@calls = []
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def respond_to?(sym, include_all = false) = @target.respond_to?(sym, include_all)
|
|
17
|
+
|
|
18
|
+
def method_missing(sym, *args, &block)
|
|
19
|
+
res = @target.send(sym, *args, &block)
|
|
20
|
+
@calls << ({ sym:, args:, res:})
|
|
21
|
+
res
|
|
22
|
+
rescue => e
|
|
23
|
+
@calls << ({ sym:, args:, res: e})
|
|
24
|
+
raise
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def last_call
|
|
28
|
+
calls.last
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
class FiberSchedulerTest < UMBaseTest
|
|
33
|
+
def setup
|
|
34
|
+
super
|
|
35
|
+
@raw_scheduler = UM::FiberScheduler.new(@machine)
|
|
36
|
+
@scheduler = MethodCallAuditor.new(@raw_scheduler)
|
|
37
|
+
Fiber.set_scheduler(@scheduler)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def teardown
|
|
41
|
+
Fiber.set_scheduler(nil)
|
|
42
|
+
GC.start
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def test_fiber_scheduler_initialize_without_machine
|
|
46
|
+
s = UM::FiberScheduler.new
|
|
47
|
+
assert_kind_of UringMachine, s.machine
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def test_fiber_scheduler_spinning
|
|
51
|
+
f1 = Fiber.schedule do
|
|
52
|
+
sleep 0.001
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
f2 = Fiber.schedule do
|
|
56
|
+
sleep 0.001
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
assert_kind_of Fiber, f1
|
|
60
|
+
assert_kind_of Fiber, f2
|
|
61
|
+
|
|
62
|
+
assert_equal 2, @scheduler.calls.size
|
|
63
|
+
assert_equal [:fiber] * 2, @scheduler.calls.map { it[:sym] }
|
|
64
|
+
assert_equal 2, @scheduler.fiber_map.size
|
|
65
|
+
|
|
66
|
+
# close scheduler
|
|
67
|
+
Fiber.set_scheduler nil
|
|
68
|
+
assert_equal :scheduler_close, @scheduler.last_call[:sym]
|
|
69
|
+
GC.start
|
|
70
|
+
assert_equal 0, @scheduler.fiber_map.size
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def test_fiber_scheduler_io_read_io_write
|
|
74
|
+
i, o = IO.pipe
|
|
75
|
+
buffer = []
|
|
76
|
+
|
|
77
|
+
f1 = Fiber.schedule do
|
|
78
|
+
sleep 0.01
|
|
79
|
+
o.write 'foo'
|
|
80
|
+
buffer << :f1
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
f2 = Fiber.schedule do
|
|
84
|
+
sleep 0.02
|
|
85
|
+
o.write 'bar'
|
|
86
|
+
buffer << :f2
|
|
87
|
+
o.close
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
f3 = Fiber.schedule do
|
|
91
|
+
str = i.read
|
|
92
|
+
buffer << str
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
@scheduler.join
|
|
96
|
+
assert_equal [true] * 3, [f1, f2, f3].map(&:done?)
|
|
97
|
+
assert_equal [:f1, :f2, 'foobar'], buffer
|
|
98
|
+
|
|
99
|
+
assert_equal({
|
|
100
|
+
fiber: 3,
|
|
101
|
+
kernel_sleep: 2,
|
|
102
|
+
io_write: 2,
|
|
103
|
+
io_read: 3,
|
|
104
|
+
blocking_operation_wait: 1,
|
|
105
|
+
join: 1
|
|
106
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
107
|
+
ensure
|
|
108
|
+
i.close rescue nil
|
|
109
|
+
o.close rescue nil
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def test_io_read_with_timeout
|
|
113
|
+
i, o = IO.pipe
|
|
114
|
+
i.timeout = 0.01
|
|
115
|
+
buf = []
|
|
116
|
+
|
|
117
|
+
Fiber.schedule do
|
|
118
|
+
buf << i.read
|
|
119
|
+
rescue Timeout::Error
|
|
120
|
+
buf << :timeout
|
|
121
|
+
end
|
|
122
|
+
@scheduler.join
|
|
123
|
+
assert_equal [:timeout], buf
|
|
124
|
+
|
|
125
|
+
assert_equal({
|
|
126
|
+
fiber: 1,
|
|
127
|
+
io_read: 1,
|
|
128
|
+
join: 1
|
|
129
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
def test_io_write_with_timeout
|
|
133
|
+
i, o = IO.pipe
|
|
134
|
+
o << ('*' * (1 << 16))
|
|
135
|
+
o.timeout = 0.01
|
|
136
|
+
|
|
137
|
+
buf = []
|
|
138
|
+
|
|
139
|
+
Fiber.schedule do
|
|
140
|
+
buf << o.write('!')
|
|
141
|
+
rescue Timeout::Error
|
|
142
|
+
buf << :timeout
|
|
143
|
+
end
|
|
144
|
+
@scheduler.join
|
|
145
|
+
assert_equal [:timeout], buf
|
|
146
|
+
|
|
147
|
+
assert_equal({
|
|
148
|
+
fiber: 1,
|
|
149
|
+
io_write: 1,
|
|
150
|
+
join: 1
|
|
151
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
def test_fiber_io_pread
|
|
155
|
+
fn = "/tmp/#{SecureRandom.hex}"
|
|
156
|
+
IO.write(fn, 'foobar')
|
|
157
|
+
|
|
158
|
+
buf = nil
|
|
159
|
+
Fiber.schedule do
|
|
160
|
+
File.open(fn, 'r') do |f|
|
|
161
|
+
buf = f.pread(3, 2)
|
|
162
|
+
end
|
|
163
|
+
rescue => e
|
|
164
|
+
buf = e
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
@scheduler.join
|
|
168
|
+
assert_equal 'oba', buf
|
|
169
|
+
assert_equal({
|
|
170
|
+
fiber: 1,
|
|
171
|
+
blocking_operation_wait: 1,
|
|
172
|
+
io_pread: 1,
|
|
173
|
+
join: 1
|
|
174
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
def test_fiber_scheduler_io_pwrite
|
|
178
|
+
fn = "/tmp/#{SecureRandom.hex}"
|
|
179
|
+
IO.write(fn, 'foobar')
|
|
180
|
+
|
|
181
|
+
res = nil
|
|
182
|
+
Fiber.schedule do
|
|
183
|
+
File.open(fn, 'r+') do |f|
|
|
184
|
+
res = f.pwrite('baz', 2)
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
|
|
188
|
+
@scheduler.join
|
|
189
|
+
assert_equal 3, res
|
|
190
|
+
|
|
191
|
+
assert_equal 'fobazr', IO.read(fn)
|
|
192
|
+
assert_equal({
|
|
193
|
+
fiber: 1,
|
|
194
|
+
blocking_operation_wait: 2,
|
|
195
|
+
io_pwrite: 1,
|
|
196
|
+
join: 1
|
|
197
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
def test_fiber_scheduler_sleep
|
|
201
|
+
t0 = monotonic_clock
|
|
202
|
+
assert_equal 0, machine.pending_count
|
|
203
|
+
Fiber.schedule do
|
|
204
|
+
sleep(0.01)
|
|
205
|
+
end
|
|
206
|
+
Fiber.schedule do
|
|
207
|
+
sleep(0.02)
|
|
208
|
+
end
|
|
209
|
+
assert_equal 2, machine.pending_count
|
|
210
|
+
@scheduler.join
|
|
211
|
+
t1 = monotonic_clock
|
|
212
|
+
assert_in_range 0.02..0.025, t1 - t0
|
|
213
|
+
|
|
214
|
+
assert_equal({
|
|
215
|
+
fiber: 2,
|
|
216
|
+
kernel_sleep: 2,
|
|
217
|
+
join: 1
|
|
218
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
def test_fiber_scheduler_block
|
|
222
|
+
mutex = Mutex.new
|
|
223
|
+
buffer = []
|
|
224
|
+
t0 = monotonic_clock
|
|
225
|
+
Fiber.schedule do
|
|
226
|
+
10.times { sleep 0.001; buffer << it }
|
|
227
|
+
end
|
|
228
|
+
Fiber.schedule do
|
|
229
|
+
mutex.synchronize { sleep(0.005) }
|
|
230
|
+
end
|
|
231
|
+
Fiber.schedule do
|
|
232
|
+
mutex.synchronize { sleep(0.005) }
|
|
233
|
+
end
|
|
234
|
+
@scheduler.join
|
|
235
|
+
t1 = monotonic_clock
|
|
236
|
+
assert_in_range 0.01..0.020, t1 - t0
|
|
237
|
+
assert_equal({
|
|
238
|
+
fiber: 3,
|
|
239
|
+
kernel_sleep: 12,
|
|
240
|
+
block: 1,
|
|
241
|
+
unblock: 1,
|
|
242
|
+
join: 1
|
|
243
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
def test_fiber_scheduler_process_wait
|
|
247
|
+
skip if !@scheduler.respond_to?(:process_wait)
|
|
248
|
+
|
|
249
|
+
child_pid = nil
|
|
250
|
+
status = nil
|
|
251
|
+
f1 = Fiber.schedule do
|
|
252
|
+
child_pid = fork {
|
|
253
|
+
Fiber.scheduler.process_fork
|
|
254
|
+
Fiber.set_scheduler nil
|
|
255
|
+
sleep(0.01);
|
|
256
|
+
exit! 42
|
|
257
|
+
}
|
|
258
|
+
status = Process::Status.wait(child_pid)
|
|
259
|
+
rescue => e
|
|
260
|
+
p e
|
|
261
|
+
end
|
|
262
|
+
@scheduler.join(f1)
|
|
263
|
+
assert_kind_of Process::Status, status
|
|
264
|
+
assert_equal child_pid, status.pid
|
|
265
|
+
assert_equal 42, status.exitstatus
|
|
266
|
+
assert_equal({
|
|
267
|
+
fiber: 1,
|
|
268
|
+
process_wait: 1,
|
|
269
|
+
join: 1
|
|
270
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
271
|
+
ensure
|
|
272
|
+
if child_pid
|
|
273
|
+
Process.wait(child_pid) rescue nil
|
|
274
|
+
end
|
|
275
|
+
end
|
|
276
|
+
|
|
277
|
+
# Currently the fiber scheduler doesn't have hooks for send/recv. The only
|
|
278
|
+
# hook that will be invoked is `io_wait`.
|
|
279
|
+
def test_fiber_scheduler_sockets
|
|
280
|
+
s1, s2 = UNIXSocket.pair(:STREAM)
|
|
281
|
+
|
|
282
|
+
buf = +''
|
|
283
|
+
sent = nil
|
|
284
|
+
|
|
285
|
+
assert_equal 0, machine.total_op_count
|
|
286
|
+
Fiber.schedule do
|
|
287
|
+
buf = s1.recv(12)
|
|
288
|
+
end
|
|
289
|
+
Fiber.schedule do
|
|
290
|
+
sent = s2.send('foobar', 0)
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
# In Ruby, sockets are by default non-blocking. The recv will cause io_wait
|
|
294
|
+
# to be invoked, the send should get through without needing to poll.
|
|
295
|
+
assert_equal 1, machine.total_op_count
|
|
296
|
+
@scheduler.join
|
|
297
|
+
|
|
298
|
+
assert_equal 6, sent
|
|
299
|
+
assert_equal 'foobar', buf
|
|
300
|
+
assert_equal({
|
|
301
|
+
fiber: 2,
|
|
302
|
+
io_wait: 1,
|
|
303
|
+
join: 1
|
|
304
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
305
|
+
ensure
|
|
306
|
+
s1.close rescue nil
|
|
307
|
+
s2.close rescue nil
|
|
308
|
+
end
|
|
309
|
+
|
|
310
|
+
def test_fiber_scheduler_io_write_io_read
|
|
311
|
+
fn = "/tmp/#{SecureRandom.hex}"
|
|
312
|
+
Fiber.schedule do
|
|
313
|
+
IO.write(fn, 'foobar')
|
|
314
|
+
end
|
|
315
|
+
assert_equal 1, machine.total_op_count
|
|
316
|
+
|
|
317
|
+
buf = nil
|
|
318
|
+
Fiber.schedule do
|
|
319
|
+
buf = IO.read(fn)
|
|
320
|
+
end
|
|
321
|
+
assert_equal 2, machine.total_op_count
|
|
322
|
+
|
|
323
|
+
@scheduler.join
|
|
324
|
+
assert_equal 'foobar', buf
|
|
325
|
+
assert_equal({
|
|
326
|
+
fiber: 2,
|
|
327
|
+
blocking_operation_wait: 3,
|
|
328
|
+
io_read: 2,
|
|
329
|
+
join: 1
|
|
330
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
def test_fiber_scheduler_file_io
|
|
334
|
+
fn = "/tmp/#{SecureRandom.hex}"
|
|
335
|
+
Fiber.schedule do
|
|
336
|
+
File.open(fn, 'w') { it.write 'foobar' }
|
|
337
|
+
end
|
|
338
|
+
assert_equal 1, machine.total_op_count
|
|
339
|
+
|
|
340
|
+
buf = nil
|
|
341
|
+
Fiber.schedule do
|
|
342
|
+
File.open(fn, 'r') { buf = it.read }
|
|
343
|
+
end
|
|
344
|
+
assert_equal 2, machine.total_op_count
|
|
345
|
+
@scheduler.join
|
|
346
|
+
assert_equal 'foobar', buf
|
|
347
|
+
assert_equal({
|
|
348
|
+
fiber: 2,
|
|
349
|
+
blocking_operation_wait: 3,
|
|
350
|
+
io_read: 2,
|
|
351
|
+
join: 1
|
|
352
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
353
|
+
end
|
|
354
|
+
|
|
355
|
+
def test_fiber_scheduler_mutex
|
|
356
|
+
mutex = Mutex.new
|
|
357
|
+
|
|
358
|
+
buf = []
|
|
359
|
+
Fiber.schedule do
|
|
360
|
+
buf << 11
|
|
361
|
+
mutex.synchronize {
|
|
362
|
+
buf << [12, machine.total_op_count]
|
|
363
|
+
sleep 0.01
|
|
364
|
+
buf << [13, machine.total_op_count]
|
|
365
|
+
}
|
|
366
|
+
buf << 14
|
|
367
|
+
end
|
|
368
|
+
assert_equal 1, machine.total_op_count
|
|
369
|
+
|
|
370
|
+
Fiber.schedule do
|
|
371
|
+
buf << 21
|
|
372
|
+
mutex.synchronize {
|
|
373
|
+
buf << [22, machine.total_op_count]
|
|
374
|
+
sleep 0.01
|
|
375
|
+
buf << [23, machine.total_op_count]
|
|
376
|
+
}
|
|
377
|
+
buf << 24
|
|
378
|
+
end
|
|
379
|
+
assert_equal 1, machine.total_op_count
|
|
380
|
+
|
|
381
|
+
@scheduler.join
|
|
382
|
+
assert_equal [11, [12, 0], 21, [13, 2], 14, [22, 2], [23, 4], 24], buf
|
|
383
|
+
assert_equal({
|
|
384
|
+
fiber: 2,
|
|
385
|
+
kernel_sleep: 2,
|
|
386
|
+
block: 1,
|
|
387
|
+
unblock: 1,
|
|
388
|
+
join: 1
|
|
389
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
390
|
+
end
|
|
391
|
+
|
|
392
|
+
def test_fiber_scheduler_queue_shift
|
|
393
|
+
queue = Queue.new
|
|
394
|
+
|
|
395
|
+
buf = []
|
|
396
|
+
Fiber.schedule do
|
|
397
|
+
buf << [11, machine.total_op_count]
|
|
398
|
+
buf << queue.shift
|
|
399
|
+
buf << [12, machine.total_op_count]
|
|
400
|
+
end
|
|
401
|
+
Fiber.schedule do
|
|
402
|
+
buf << [21, machine.total_op_count]
|
|
403
|
+
queue << :foo
|
|
404
|
+
buf << [22, machine.total_op_count]
|
|
405
|
+
end
|
|
406
|
+
assert_equal 0, machine.total_op_count
|
|
407
|
+
@scheduler.join
|
|
408
|
+
|
|
409
|
+
assert_equal [[11, 0], [21, 0], [22, 0], :foo, [12, 1]], buf
|
|
410
|
+
assert_equal({
|
|
411
|
+
fiber: 2,
|
|
412
|
+
block: 1,
|
|
413
|
+
unblock: 1,
|
|
414
|
+
join: 1
|
|
415
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
416
|
+
end
|
|
417
|
+
|
|
418
|
+
def test_fiber_scheduler_queue_shift_with_timeout
|
|
419
|
+
queue = Queue.new
|
|
420
|
+
|
|
421
|
+
buf = []
|
|
422
|
+
Fiber.schedule do
|
|
423
|
+
buf << [11, machine.total_op_count]
|
|
424
|
+
buf << queue.shift(timeout: 0.01)
|
|
425
|
+
buf << [12, machine.total_op_count]
|
|
426
|
+
end
|
|
427
|
+
Fiber.schedule do
|
|
428
|
+
buf << [21, machine.total_op_count]
|
|
429
|
+
end
|
|
430
|
+
assert_equal 1, machine.total_op_count
|
|
431
|
+
@scheduler.join
|
|
432
|
+
|
|
433
|
+
assert_equal [[11, 0], [21, 1], nil, [12, 2]], buf
|
|
434
|
+
assert_equal({
|
|
435
|
+
fiber: 2,
|
|
436
|
+
block: 1,
|
|
437
|
+
join: 1
|
|
438
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
439
|
+
end
|
|
440
|
+
|
|
441
|
+
def test_fiber_scheduler_thread_join
|
|
442
|
+
thread = Thread.new do
|
|
443
|
+
sleep 0.1
|
|
444
|
+
end
|
|
445
|
+
Fiber.schedule do
|
|
446
|
+
thread.join
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
# No ops are issued, except for a NOP SQE used to wakeup the waiting thread.
|
|
450
|
+
assert_equal 0, machine.total_op_count
|
|
451
|
+
|
|
452
|
+
@scheduler.join
|
|
453
|
+
assert_equal 1, machine.total_op_count
|
|
454
|
+
assert_equal({
|
|
455
|
+
fiber: 1,
|
|
456
|
+
block: 1,
|
|
457
|
+
unblock: 1,
|
|
458
|
+
join: 1
|
|
459
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
460
|
+
end
|
|
461
|
+
|
|
462
|
+
def test_fiber_scheduler_system
|
|
463
|
+
skip if !@scheduler.respond_to?(:process_wait)
|
|
464
|
+
|
|
465
|
+
buf = []
|
|
466
|
+
Fiber.schedule do
|
|
467
|
+
buf << system('sleep 0.01')
|
|
468
|
+
end
|
|
469
|
+
@scheduler.join
|
|
470
|
+
assert_equal [true], buf
|
|
471
|
+
assert_equal({
|
|
472
|
+
fiber: 1,
|
|
473
|
+
process_wait: 1,
|
|
474
|
+
join: 1
|
|
475
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
476
|
+
ensure
|
|
477
|
+
Process.wait(0, Process::WNOHANG) rescue nil
|
|
478
|
+
end
|
|
479
|
+
|
|
480
|
+
def test_fiber_scheduler_cmd
|
|
481
|
+
skip if !@scheduler.respond_to?(:process_wait)
|
|
482
|
+
|
|
483
|
+
buf = []
|
|
484
|
+
Fiber.schedule do
|
|
485
|
+
buf << `echo 'foo'`
|
|
486
|
+
end
|
|
487
|
+
assert_equal 1, machine.total_op_count
|
|
488
|
+
@scheduler.join
|
|
489
|
+
assert_equal ["foo\n"], buf
|
|
490
|
+
assert_equal({
|
|
491
|
+
fiber: 1,
|
|
492
|
+
io_read: 2,
|
|
493
|
+
process_wait: 1,
|
|
494
|
+
join: 1
|
|
495
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
496
|
+
ensure
|
|
497
|
+
Process.wait(0, Process::WNOHANG) rescue nil
|
|
498
|
+
end
|
|
499
|
+
|
|
500
|
+
def test_fiber_scheduler_popen
|
|
501
|
+
skip if !@scheduler.respond_to?(:process_wait)
|
|
502
|
+
|
|
503
|
+
buf = []
|
|
504
|
+
Fiber.schedule do
|
|
505
|
+
IO.popen('ruby', 'r+') do |pipe|
|
|
506
|
+
buf << [11, machine.total_op_count]
|
|
507
|
+
pipe.puts 'puts "bar"'
|
|
508
|
+
buf << [12, machine.total_op_count]
|
|
509
|
+
pipe.close_write
|
|
510
|
+
buf << [13, pipe.gets.chomp, machine.total_op_count]
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
assert_equal 1, machine.total_op_count
|
|
514
|
+
@scheduler.join
|
|
515
|
+
assert_equal [[11, 0], [12, 3], [13, "bar", 5]], buf
|
|
516
|
+
assert_equal({
|
|
517
|
+
fiber: 1,
|
|
518
|
+
io_write: 2,
|
|
519
|
+
io_read: 1,
|
|
520
|
+
blocking_operation_wait: 1,
|
|
521
|
+
process_wait: 1,
|
|
522
|
+
join: 1
|
|
523
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
524
|
+
ensure
|
|
525
|
+
Process.wait(0, Process::WNOHANG) rescue nil
|
|
526
|
+
end
|
|
527
|
+
|
|
528
|
+
def test_fiber_scheduler_fiber_interrupt
|
|
529
|
+
r, w = IO.pipe
|
|
530
|
+
w << 'foo'
|
|
531
|
+
|
|
532
|
+
exception = nil
|
|
533
|
+
Fiber.schedule do
|
|
534
|
+
r.read
|
|
535
|
+
rescue Exception => e
|
|
536
|
+
exception = e
|
|
537
|
+
end
|
|
538
|
+
assert_equal 1, machine.total_op_count
|
|
539
|
+
machine.snooze
|
|
540
|
+
Thread.new {
|
|
541
|
+
r.close
|
|
542
|
+
}
|
|
543
|
+
@scheduler.join
|
|
544
|
+
assert_kind_of IOError, exception
|
|
545
|
+
assert_equal({
|
|
546
|
+
fiber: 1,
|
|
547
|
+
io_read: 2,
|
|
548
|
+
fiber_interrupt: 1,
|
|
549
|
+
join: 1
|
|
550
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
551
|
+
ensure
|
|
552
|
+
r.close rescue nil
|
|
553
|
+
w.close rescue nil
|
|
554
|
+
end
|
|
555
|
+
|
|
556
|
+
def test_fiber_scheduler_address_resolve
|
|
557
|
+
addrs = nil
|
|
558
|
+
Fiber.schedule do
|
|
559
|
+
addrs = Addrinfo.getaddrinfo("localhost", 80, Socket::AF_INET, :STREAM)
|
|
560
|
+
end
|
|
561
|
+
assert_equal 1, machine.total_op_count
|
|
562
|
+
@scheduler.join
|
|
563
|
+
assert_kind_of Array, addrs
|
|
564
|
+
addr = addrs.first
|
|
565
|
+
assert_kind_of Addrinfo, addr
|
|
566
|
+
assert_includes ['127.0.0.1', '::1'], addr.ip_address
|
|
567
|
+
assert_equal({
|
|
568
|
+
fiber: 1,
|
|
569
|
+
io_read: 2,
|
|
570
|
+
blocking_operation_wait: 1,
|
|
571
|
+
address_resolve: 1,
|
|
572
|
+
join: 1
|
|
573
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
574
|
+
end
|
|
575
|
+
|
|
576
|
+
def test_fiber_scheduler_timeout_after
|
|
577
|
+
res = nil
|
|
578
|
+
Fiber.schedule do
|
|
579
|
+
Timeout.timeout(0.05) do
|
|
580
|
+
sleep 1
|
|
581
|
+
end
|
|
582
|
+
res = true
|
|
583
|
+
rescue => e
|
|
584
|
+
res = e
|
|
585
|
+
end
|
|
586
|
+
@scheduler.join
|
|
587
|
+
assert_equal 3, machine.total_op_count
|
|
588
|
+
assert_kind_of Timeout::Error, res
|
|
589
|
+
assert_equal({
|
|
590
|
+
fiber: 1,
|
|
591
|
+
timeout_after: 1,
|
|
592
|
+
kernel_sleep: 1,
|
|
593
|
+
join: 1
|
|
594
|
+
}, @scheduler.calls.map { it[:sym] }.tally)
|
|
595
|
+
end
|
|
596
|
+
|
|
597
|
+
def test_fiber_scheduler_io_select
|
|
598
|
+
r, w = IO.pipe
|
|
599
|
+
buf = []
|
|
600
|
+
|
|
601
|
+
Fiber.schedule do
|
|
602
|
+
buf << IO.select([r], [], [])
|
|
603
|
+
buf << IO.select([], [w], [])
|
|
604
|
+
end
|
|
605
|
+
@machine.snooze
|
|
606
|
+
w << 'foo'
|
|
607
|
+
@machine.snooze
|
|
608
|
+
assert_equal [[[r], [], []]], buf
|
|
609
|
+
@machine.snooze
|
|
610
|
+
@scheduler.join
|
|
611
|
+
assert_equal [[[r], [], []], [[], [w], []]], buf
|
|
612
|
+
ensure
|
|
613
|
+
r.close rescue nil
|
|
614
|
+
w.close rescue nil
|
|
615
|
+
end
|
|
616
|
+
|
|
617
|
+
def test_fiber_scheduler_blocking_operation_wait_single_issuer
|
|
618
|
+
buf = []
|
|
619
|
+
(1..10).each { |i|
|
|
620
|
+
op = -> { i * 10}
|
|
621
|
+
buf << @scheduler.blocking_operation_wait(op)
|
|
622
|
+
sleep 0.01
|
|
623
|
+
@machine.snooze
|
|
624
|
+
}
|
|
625
|
+
assert_equal (1..10).map { it * 10 }, buf
|
|
626
|
+
|
|
627
|
+
buf = []
|
|
628
|
+
(1..20).each { |i|
|
|
629
|
+
op = -> { i * 10}
|
|
630
|
+
Fiber.schedule do
|
|
631
|
+
sleep 0.001
|
|
632
|
+
buf << @scheduler.blocking_operation_wait(op)
|
|
633
|
+
sleep 0.001
|
|
634
|
+
end
|
|
635
|
+
}
|
|
636
|
+
@scheduler.join
|
|
637
|
+
|
|
638
|
+
assert_equal (1..20).map { it * 10 }, buf.sort
|
|
639
|
+
end
|
|
640
|
+
end
|
data/test/test_stream.rb
CHANGED
|
@@ -110,12 +110,12 @@ class StreamRespTest < StreamBaseTest
|
|
|
110
110
|
|
|
111
111
|
machine.write(@wfd, "-foobar\r\n")
|
|
112
112
|
o = @stream.resp_decode
|
|
113
|
-
assert_kind_of
|
|
113
|
+
assert_kind_of UM::Stream::RESPError, o
|
|
114
114
|
assert_equal "foobar", o.message
|
|
115
115
|
|
|
116
116
|
machine.write(@wfd, "!3\r\nbaz\r\n")
|
|
117
117
|
o = @stream.resp_decode
|
|
118
|
-
assert_kind_of
|
|
118
|
+
assert_kind_of UM::Stream::RESPError, o
|
|
119
119
|
assert_equal "baz", o.message
|
|
120
120
|
|
|
121
121
|
machine.write(@wfd, ":123\r\n")
|