uringmachine 0.19.1 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +3 -4
- data/CHANGELOG.md +32 -1
- data/TODO.md +0 -39
- data/examples/bm_fileno.rb +33 -0
- data/examples/bm_mutex.rb +85 -0
- data/examples/bm_mutex_single.rb +33 -0
- data/examples/bm_queue.rb +29 -29
- data/examples/bm_send.rb +2 -5
- data/examples/bm_snooze.rb +20 -42
- data/examples/bm_write.rb +4 -1
- data/examples/fiber_scheduler_demo.rb +15 -51
- data/examples/fiber_scheduler_fork.rb +24 -0
- data/examples/nc_ssl.rb +71 -0
- data/ext/um/extconf.rb +5 -15
- data/ext/um/um.c +310 -74
- data/ext/um/um.h +66 -29
- data/ext/um/um_async_op.c +1 -1
- data/ext/um/um_async_op_class.c +2 -2
- data/ext/um/um_buffer.c +1 -1
- data/ext/um/um_class.c +178 -31
- data/ext/um/um_const.c +51 -3
- data/ext/um/um_mutex_class.c +1 -1
- data/ext/um/um_op.c +37 -0
- data/ext/um/um_queue_class.c +1 -1
- data/ext/um/um_stream.c +5 -5
- data/ext/um/um_stream_class.c +3 -0
- data/ext/um/um_sync.c +28 -39
- data/ext/um/um_utils.c +59 -19
- data/grant-2025/journal.md +353 -0
- data/grant-2025/tasks.md +135 -0
- data/lib/uringmachine/fiber_scheduler.rb +316 -57
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +6 -0
- data/test/test_fiber_scheduler.rb +640 -0
- data/test/test_stream.rb +2 -2
- data/test/test_um.rb +722 -54
- data/uringmachine.gemspec +5 -5
- data/vendor/liburing/.github/workflows/ci.yml +94 -1
- data/vendor/liburing/.github/workflows/test_build.c +9 -0
- data/vendor/liburing/configure +27 -0
- data/vendor/liburing/examples/Makefile +6 -0
- data/vendor/liburing/examples/helpers.c +8 -0
- data/vendor/liburing/examples/helpers.h +5 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/Makefile +9 -3
- data/vendor/liburing/src/include/liburing/barrier.h +11 -5
- data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
- data/vendor/liburing/src/include/liburing/io_uring.h +51 -0
- data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
- data/vendor/liburing/src/include/liburing.h +458 -121
- data/vendor/liburing/src/liburing-ffi.map +16 -0
- data/vendor/liburing/src/liburing.map +8 -0
- data/vendor/liburing/src/sanitize.c +4 -1
- data/vendor/liburing/src/setup.c +7 -4
- data/vendor/liburing/test/232c93d07b74.c +4 -16
- data/vendor/liburing/test/Makefile +15 -1
- data/vendor/liburing/test/accept.c +2 -13
- data/vendor/liburing/test/bind-listen.c +175 -13
- data/vendor/liburing/test/conn-unreach.c +132 -0
- data/vendor/liburing/test/fd-pass.c +32 -7
- data/vendor/liburing/test/fdinfo.c +39 -12
- data/vendor/liburing/test/fifo-futex-poll.c +114 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
- data/vendor/liburing/test/futex.c +1 -1
- data/vendor/liburing/test/helpers.c +99 -2
- data/vendor/liburing/test/helpers.h +9 -0
- data/vendor/liburing/test/io_uring_passthrough.c +6 -12
- data/vendor/liburing/test/mock_file.c +379 -0
- data/vendor/liburing/test/mock_file.h +47 -0
- data/vendor/liburing/test/nop.c +2 -2
- data/vendor/liburing/test/nop32-overflow.c +150 -0
- data/vendor/liburing/test/nop32.c +126 -0
- data/vendor/liburing/test/pipe.c +166 -0
- data/vendor/liburing/test/poll-race-mshot.c +13 -1
- data/vendor/liburing/test/read-write.c +4 -4
- data/vendor/liburing/test/recv-mshot-fair.c +81 -34
- data/vendor/liburing/test/recvsend_bundle.c +1 -1
- data/vendor/liburing/test/resize-rings.c +2 -0
- data/vendor/liburing/test/ring-query.c +322 -0
- data/vendor/liburing/test/ringbuf-loop.c +87 -0
- data/vendor/liburing/test/ringbuf-read.c +4 -4
- data/vendor/liburing/test/runtests.sh +2 -2
- data/vendor/liburing/test/send-zerocopy.c +43 -5
- data/vendor/liburing/test/send_recv.c +103 -32
- data/vendor/liburing/test/shutdown.c +2 -12
- data/vendor/liburing/test/socket-nb.c +3 -14
- data/vendor/liburing/test/socket-rw-eagain.c +2 -12
- data/vendor/liburing/test/socket-rw-offset.c +2 -12
- data/vendor/liburing/test/socket-rw.c +2 -12
- data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
- data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
- data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
- data/vendor/liburing/test/timestamp.c +56 -19
- data/vendor/liburing/test/vec-regbuf.c +2 -4
- data/vendor/liburing/test/wq-aff.c +7 -0
- metadata +37 -15
data/ext/um/um.c
CHANGED
|
@@ -1,25 +1,34 @@
|
|
|
1
|
-
#include <float.h>
|
|
2
1
|
#include "um.h"
|
|
3
|
-
#include
|
|
2
|
+
#include <float.h>
|
|
3
|
+
#include <ruby/thread.h>
|
|
4
|
+
#include <assert.h>
|
|
5
|
+
#include <poll.h>
|
|
6
|
+
|
|
7
|
+
#define DEFAULT_ENTRIES 4096
|
|
8
|
+
|
|
9
|
+
inline void prepare_io_uring_params(struct io_uring_params *params, uint sqpoll_timeout_msec) {
|
|
10
|
+
memset(params, 0, sizeof(struct io_uring_params));
|
|
11
|
+
params->flags = IORING_SETUP_SUBMIT_ALL;
|
|
12
|
+
if (sqpoll_timeout_msec) {
|
|
13
|
+
params->flags |= IORING_SETUP_SQPOLL;
|
|
14
|
+
params->sq_thread_idle = sqpoll_timeout_msec;
|
|
15
|
+
}
|
|
16
|
+
else
|
|
17
|
+
params->flags |= IORING_SETUP_COOP_TASKRUN;
|
|
18
|
+
}
|
|
4
19
|
|
|
5
|
-
void um_setup(VALUE self, struct um *machine) {
|
|
20
|
+
void um_setup(VALUE self, struct um *machine, uint entries, uint sqpoll_timeout_msec) {
|
|
6
21
|
memset(machine, 0, sizeof(struct um));
|
|
7
22
|
|
|
8
23
|
RB_OBJ_WRITE(self, &machine->self, self);
|
|
9
24
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
while (1) {
|
|
14
|
-
int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
|
|
15
|
-
if (likely(!ret)) break;
|
|
25
|
+
machine->entries = (entries > 0) ? entries : DEFAULT_ENTRIES;
|
|
26
|
+
machine->sqpoll_mode = !!sqpoll_timeout_msec;
|
|
16
27
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
rb_syserr_fail(-ret, strerror(-ret));
|
|
22
|
-
}
|
|
28
|
+
struct io_uring_params params;
|
|
29
|
+
prepare_io_uring_params(¶ms, sqpoll_timeout_msec);
|
|
30
|
+
int ret = io_uring_queue_init_params(machine->entries, &machine->ring, ¶ms);
|
|
31
|
+
if (ret) rb_syserr_fail(-ret, strerror(-ret));
|
|
23
32
|
machine->ring_initialized = 1;
|
|
24
33
|
}
|
|
25
34
|
|
|
@@ -39,11 +48,16 @@ inline void um_teardown(struct um *machine) {
|
|
|
39
48
|
}
|
|
40
49
|
|
|
41
50
|
inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
|
51
|
+
if (DEBUG) fprintf(stderr, "-> %p um_get_sqe: op->kind=%s unsubmitted=%d pending=%d total=%lu\n",
|
|
52
|
+
&machine->ring, op ? um_op_kind_name(op->kind) : "NULL", machine->unsubmitted_count,
|
|
53
|
+
machine->pending_count, machine->total_op_count
|
|
54
|
+
);
|
|
55
|
+
|
|
42
56
|
struct io_uring_sqe *sqe;
|
|
43
57
|
sqe = io_uring_get_sqe(&machine->ring);
|
|
44
58
|
if (likely(sqe)) goto done;
|
|
45
59
|
|
|
46
|
-
|
|
60
|
+
um_raise_internal_error("Failed to get SQE");
|
|
47
61
|
|
|
48
62
|
// TODO: retry getting SQE?
|
|
49
63
|
|
|
@@ -57,22 +71,78 @@ done:
|
|
|
57
71
|
sqe->user_data = (long long)op;
|
|
58
72
|
sqe->flags = 0;
|
|
59
73
|
machine->unsubmitted_count++;
|
|
60
|
-
if (op)
|
|
74
|
+
if (op) {
|
|
75
|
+
machine->pending_count++;
|
|
76
|
+
machine->total_op_count++;
|
|
77
|
+
}
|
|
61
78
|
return sqe;
|
|
62
79
|
}
|
|
63
80
|
|
|
81
|
+
struct um_submit_ctx {
|
|
82
|
+
struct um *machine;
|
|
83
|
+
int result;
|
|
84
|
+
};
|
|
85
|
+
|
|
86
|
+
// adapted from liburing/src/queue.c
|
|
87
|
+
static inline bool sq_ring_needs_enter(struct um *machine) {
|
|
88
|
+
if (machine->sqpoll_mode) {
|
|
89
|
+
io_uring_smp_mb();
|
|
90
|
+
if (unlikely(IO_URING_READ_ONCE(*machine->ring.sq.kflags) & IORING_SQ_NEED_WAKEUP))
|
|
91
|
+
return true;
|
|
92
|
+
}
|
|
93
|
+
return true;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
void *um_submit_without_gvl(void *ptr) {
|
|
97
|
+
struct um_submit_ctx *ctx = ptr;
|
|
98
|
+
ctx->result = io_uring_submit(&ctx->machine->ring);
|
|
99
|
+
return NULL;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
inline uint um_submit(struct um *machine) {
|
|
103
|
+
if (DEBUG) fprintf(stderr, "-> %p um_submit: unsubmitted=%d pending=%d total=%lu\n",
|
|
104
|
+
&machine->ring, machine->unsubmitted_count, machine->pending_count, machine->total_op_count
|
|
105
|
+
);
|
|
106
|
+
if (!machine->unsubmitted_count) {
|
|
107
|
+
if (DEBUG) fprintf(stderr, "<- %p um_submit: no unsubmitted SQEs, early return\n",
|
|
108
|
+
&machine->ring
|
|
109
|
+
);
|
|
110
|
+
return 0;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
struct um_submit_ctx ctx = { .machine = machine };
|
|
114
|
+
if (sq_ring_needs_enter(machine))
|
|
115
|
+
rb_thread_call_without_gvl(um_submit_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
|
|
116
|
+
else
|
|
117
|
+
ctx.result = io_uring_submit(&machine->ring);
|
|
118
|
+
|
|
119
|
+
if (DEBUG) fprintf(stderr, "<- %p um_submit: result=%d\n",
|
|
120
|
+
&machine->ring, ctx.result
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
if (ctx.result < 0)
|
|
124
|
+
rb_syserr_fail(-ctx.result, strerror(-ctx.result));
|
|
125
|
+
|
|
126
|
+
machine->unsubmitted_count = 0;
|
|
127
|
+
return ctx.result;
|
|
128
|
+
}
|
|
129
|
+
|
|
64
130
|
static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
|
65
131
|
struct um_op *op = (struct um_op *)cqe->user_data;
|
|
132
|
+
if (DEBUG) {
|
|
133
|
+
if (op) fprintf(stderr, "<- %p um_process_cqe: op %p kind %s flags %d cqe_res %d cqe_flags %d pending %d\n",
|
|
134
|
+
&machine->ring, op, um_op_kind_name(op->kind), op->flags, cqe->res, cqe->flags, machine->pending_count
|
|
135
|
+
);
|
|
136
|
+
else fprintf(stderr, "<- %p um_process_cqe: op NULL cqe_res %d cqe_flags %d pending %d\n",
|
|
137
|
+
&machine->ring, cqe->res, cqe->flags, machine->pending_count
|
|
138
|
+
);
|
|
139
|
+
}
|
|
66
140
|
if (unlikely(!op)) return;
|
|
67
141
|
|
|
142
|
+
|
|
68
143
|
if (!(cqe->flags & IORING_CQE_F_MORE))
|
|
69
144
|
machine->pending_count--;
|
|
70
145
|
|
|
71
|
-
// printf(
|
|
72
|
-
// ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
|
|
73
|
-
// op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
|
|
74
|
-
// );
|
|
75
|
-
|
|
76
146
|
if (op->flags & OP_F_FREE_ON_COMPLETE) {
|
|
77
147
|
if (op->flags & OP_F_TRANSIENT)
|
|
78
148
|
um_op_transient_remove(machine, op);
|
|
@@ -81,9 +151,10 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
|
|
|
81
151
|
return;
|
|
82
152
|
}
|
|
83
153
|
|
|
154
|
+
op->flags |= OP_F_COMPLETED;
|
|
84
155
|
if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
|
|
156
|
+
if (unlikely(op->flags & OP_F_CANCELED)) return;
|
|
85
157
|
|
|
86
|
-
op->flags |= OP_F_COMPLETED;
|
|
87
158
|
if (op->flags & OP_F_TRANSIENT)
|
|
88
159
|
um_op_transient_remove(machine, op);
|
|
89
160
|
|
|
@@ -108,6 +179,10 @@ static inline int cq_ring_needs_flush(struct io_uring *ring) {
|
|
|
108
179
|
}
|
|
109
180
|
|
|
110
181
|
static inline int um_process_ready_cqes(struct um *machine) {
|
|
182
|
+
if (DEBUG) fprintf(stderr, "-> %p um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
|
|
183
|
+
&machine->ring, machine->unsubmitted_count, machine->pending_count, machine->total_op_count
|
|
184
|
+
);
|
|
185
|
+
|
|
111
186
|
unsigned total_count = 0;
|
|
112
187
|
iterate:
|
|
113
188
|
bool overflow_checked = false;
|
|
@@ -124,12 +199,21 @@ iterate:
|
|
|
124
199
|
if (overflow_checked) goto done;
|
|
125
200
|
|
|
126
201
|
if (cq_ring_needs_flush(&machine->ring)) {
|
|
127
|
-
|
|
202
|
+
if (DEBUG) fprintf(stderr, "-> %p io_uring_enter\n", &machine->ring);
|
|
203
|
+
int ret = io_uring_enter(machine->ring.ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
|
|
204
|
+
if (DEBUG) fprintf(stderr, "<- %p io_uring_enter: result=%d\n", &machine->ring, ret);
|
|
205
|
+
if (ret < 0)
|
|
206
|
+
rb_syserr_fail(-ret, strerror(-ret));
|
|
207
|
+
|
|
128
208
|
overflow_checked = true;
|
|
129
209
|
goto iterate;
|
|
130
210
|
}
|
|
131
211
|
|
|
132
212
|
done:
|
|
213
|
+
if (DEBUG) fprintf(stderr, "<- %p um_process_ready_cqes: total_processed=%u\n",
|
|
214
|
+
&machine->ring, total_count
|
|
215
|
+
);
|
|
216
|
+
|
|
133
217
|
return total_count;
|
|
134
218
|
}
|
|
135
219
|
|
|
@@ -143,18 +227,33 @@ struct wait_for_cqe_ctx {
|
|
|
143
227
|
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
|
144
228
|
struct wait_for_cqe_ctx *ctx = ptr;
|
|
145
229
|
if (ctx->machine->unsubmitted_count) {
|
|
146
|
-
|
|
230
|
+
if (DEBUG) fprintf(stderr, "-> %p io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
|
|
231
|
+
&ctx->machine->ring, ctx->machine->unsubmitted_count, ctx->machine->pending_count,
|
|
232
|
+
ctx->machine->total_op_count
|
|
233
|
+
);
|
|
147
234
|
|
|
148
235
|
// Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
|
|
149
236
|
// interrupted with a signal. We can detect this by testing ctx->cqe for
|
|
150
237
|
// NULL.
|
|
151
238
|
//
|
|
152
239
|
// https://github.com/axboe/liburing/issues/1280
|
|
153
|
-
int
|
|
154
|
-
ctx->
|
|
240
|
+
int ret = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
|
|
241
|
+
ctx->machine->unsubmitted_count = 0;
|
|
242
|
+
if (DEBUG) fprintf(stderr, "<- %p io_uring_submit_and_wait_timeout: result=%d\n",
|
|
243
|
+
&ctx->machine->ring, ret
|
|
244
|
+
);
|
|
245
|
+
ctx->result = (ret > 0 && !ctx->cqe) ? -EINTR : ret;
|
|
155
246
|
}
|
|
156
|
-
else
|
|
247
|
+
else {
|
|
248
|
+
if (DEBUG) fprintf(stderr, "-> %p io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
|
|
249
|
+
&ctx->machine->ring, ctx->machine->unsubmitted_count, ctx->machine->pending_count,
|
|
250
|
+
ctx->machine->total_op_count
|
|
251
|
+
);
|
|
157
252
|
ctx->result = io_uring_wait_cqes(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
|
|
253
|
+
if (DEBUG) fprintf(stderr, "<- %p io_uring_wait_cqes: result=%d\n",
|
|
254
|
+
&ctx->machine->ring, ctx->result
|
|
255
|
+
);
|
|
256
|
+
}
|
|
158
257
|
return NULL;
|
|
159
258
|
}
|
|
160
259
|
|
|
@@ -201,9 +300,14 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
|
|
|
201
300
|
}
|
|
202
301
|
|
|
203
302
|
inline VALUE um_fiber_switch(struct um *machine) {
|
|
303
|
+
if (DEBUG) fprintf(stderr, "-> %p um_fiber_switch: unsubmitted=%d pending=%d total=%lu\n",
|
|
304
|
+
&machine->ring, machine->unsubmitted_count, machine->pending_count, machine->total_op_count
|
|
305
|
+
);
|
|
204
306
|
while (true) {
|
|
205
307
|
struct um_op *op = um_runqueue_shift(machine);
|
|
206
308
|
if (op) {
|
|
309
|
+
if (unlikely(op->flags & OP_F_RUNQUEUE_SKIP)) continue;
|
|
310
|
+
|
|
207
311
|
// in case of a snooze, we need to prevent a situation where completions
|
|
208
312
|
// are not processed because the runqueue is never empty. Theoretically,
|
|
209
313
|
// we can still have a situation where multiple fibers are all doing a
|
|
@@ -231,16 +335,15 @@ inline VALUE um_fiber_switch(struct um *machine) {
|
|
|
231
335
|
}
|
|
232
336
|
}
|
|
233
337
|
|
|
234
|
-
void
|
|
338
|
+
void um_cancel_op(struct um *machine, struct um_op *op) {
|
|
235
339
|
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
|
236
340
|
io_uring_prep_cancel64(sqe, (long long)op, 0);
|
|
237
341
|
}
|
|
238
342
|
|
|
239
343
|
inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
|
|
240
|
-
|
|
241
|
-
while (
|
|
344
|
+
um_cancel_op(machine, op);
|
|
345
|
+
while (!um_op_completed_p(op)) {
|
|
242
346
|
um_fiber_switch(machine);
|
|
243
|
-
if (um_op_completed_p(op)) break;
|
|
244
347
|
}
|
|
245
348
|
}
|
|
246
349
|
|
|
@@ -261,7 +364,14 @@ inline VALUE um_await(struct um *machine) {
|
|
|
261
364
|
return ret;
|
|
262
365
|
}
|
|
263
366
|
|
|
264
|
-
|
|
367
|
+
VALUE um_wakeup(struct um *machine) {
|
|
368
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
|
369
|
+
io_uring_prep_nop(sqe);
|
|
370
|
+
io_uring_submit(&machine->ring);
|
|
371
|
+
return Qnil;
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, unsigned flags) {
|
|
265
375
|
memset(op, 0, sizeof(struct um_op));
|
|
266
376
|
op->kind = kind;
|
|
267
377
|
op->flags = flags;
|
|
@@ -299,7 +409,7 @@ VALUE um_timeout_complete(VALUE arg) {
|
|
|
299
409
|
struct op_ctx *ctx = (struct op_ctx *)arg;
|
|
300
410
|
|
|
301
411
|
if (!um_op_completed_p(ctx->op)) {
|
|
302
|
-
|
|
412
|
+
um_cancel_op(ctx->machine, ctx->op);
|
|
303
413
|
ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
|
|
304
414
|
um_op_transient_add(ctx->machine, ctx->op);
|
|
305
415
|
}
|
|
@@ -353,12 +463,12 @@ VALUE um_sleep(struct um *machine, double duration) {
|
|
|
353
463
|
return ret;
|
|
354
464
|
}
|
|
355
465
|
|
|
356
|
-
|
|
466
|
+
VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset, __u64 file_offset) {
|
|
357
467
|
struct um_op op;
|
|
358
468
|
um_prep_op(machine, &op, OP_READ, 0);
|
|
359
469
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
360
470
|
void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
|
|
361
|
-
io_uring_prep_read(sqe, fd, ptr, maxlen,
|
|
471
|
+
io_uring_prep_read(sqe, fd, ptr, maxlen, file_offset);
|
|
362
472
|
|
|
363
473
|
VALUE ret = um_fiber_switch(machine);
|
|
364
474
|
if (um_check_completion(machine, &op)) {
|
|
@@ -366,14 +476,13 @@ inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int b
|
|
|
366
476
|
ret = INT2NUM(op.result.res);
|
|
367
477
|
|
|
368
478
|
}
|
|
369
|
-
RB_GC_GUARD(buffer);
|
|
370
479
|
|
|
371
480
|
RAISE_IF_EXCEPTION(ret);
|
|
372
481
|
RB_GC_GUARD(ret);
|
|
373
482
|
return ret;
|
|
374
483
|
}
|
|
375
484
|
|
|
376
|
-
|
|
485
|
+
size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
|
|
377
486
|
struct um_op op;
|
|
378
487
|
um_prep_op(machine, &op, OP_READ, 0);
|
|
379
488
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
@@ -390,38 +499,47 @@ inline size_t um_read_raw(struct um *machine, int fd, char *buffer, int maxlen)
|
|
|
390
499
|
return 0;
|
|
391
500
|
}
|
|
392
501
|
|
|
393
|
-
VALUE um_write(struct um *machine, int fd, VALUE
|
|
502
|
+
VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
|
|
503
|
+
const void *base;
|
|
504
|
+
size_t size;
|
|
505
|
+
um_get_buffer_bytes_for_writing(buffer, &base, &size);
|
|
506
|
+
if ((len == (size_t)-1) || (len > size)) len = size;
|
|
507
|
+
if (unlikely(!len)) return INT2NUM(0);
|
|
508
|
+
|
|
394
509
|
struct um_op op;
|
|
395
510
|
um_prep_op(machine, &op, OP_WRITE, 0);
|
|
396
511
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
397
|
-
const int str_len = RSTRING_LEN(str);
|
|
398
|
-
if (len > str_len) len = str_len;
|
|
399
512
|
|
|
400
|
-
io_uring_prep_write(sqe, fd,
|
|
513
|
+
io_uring_prep_write(sqe, fd, base, len, file_offset);
|
|
401
514
|
|
|
402
515
|
VALUE ret = um_fiber_switch(machine);
|
|
403
516
|
if (um_check_completion(machine, &op))
|
|
404
517
|
ret = INT2NUM(op.result.res);
|
|
405
518
|
|
|
406
|
-
RB_GC_GUARD(str);
|
|
407
|
-
|
|
408
519
|
RAISE_IF_EXCEPTION(ret);
|
|
409
520
|
RB_GC_GUARD(ret);
|
|
410
521
|
return ret;
|
|
411
522
|
}
|
|
412
523
|
|
|
413
|
-
VALUE um_write_async(struct um *machine, int fd, VALUE
|
|
524
|
+
VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
|
|
525
|
+
const void *base;
|
|
526
|
+
size_t size;
|
|
527
|
+
um_get_buffer_bytes_for_writing(buffer, &base, &size);
|
|
528
|
+
if ((len == (size_t)-1) || (len > size)) len = size;
|
|
529
|
+
if (unlikely(!len)) return INT2NUM(0);
|
|
530
|
+
|
|
414
531
|
struct um_op *op = um_op_alloc(machine);
|
|
415
532
|
um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
|
|
416
533
|
RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
|
|
417
|
-
RB_OBJ_WRITE(machine->self, &op->value,
|
|
534
|
+
RB_OBJ_WRITE(machine->self, &op->value, buffer);
|
|
418
535
|
RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
|
|
419
536
|
|
|
537
|
+
|
|
420
538
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
421
|
-
io_uring_prep_write(sqe, fd,
|
|
539
|
+
io_uring_prep_write(sqe, fd, base, len, file_offset);
|
|
422
540
|
um_op_transient_add(machine, op);
|
|
423
541
|
|
|
424
|
-
return
|
|
542
|
+
return buffer;
|
|
425
543
|
}
|
|
426
544
|
|
|
427
545
|
VALUE um_close(struct um *machine, int fd) {
|
|
@@ -497,18 +615,22 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
|
|
|
497
615
|
return ret;
|
|
498
616
|
}
|
|
499
617
|
|
|
500
|
-
VALUE um_send(struct um *machine, int fd, VALUE buffer,
|
|
618
|
+
VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
|
|
501
619
|
struct um_op op;
|
|
502
620
|
um_prep_op(machine, &op, OP_SEND, 0);
|
|
503
621
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
504
|
-
|
|
622
|
+
|
|
623
|
+
const void *base;
|
|
624
|
+
size_t size;
|
|
625
|
+
um_get_buffer_bytes_for_writing(buffer, &base, &size);
|
|
626
|
+
if ((len == (size_t)-1) || (len > size)) len = size;
|
|
627
|
+
|
|
628
|
+
io_uring_prep_send(sqe, fd, base, len, flags);
|
|
505
629
|
|
|
506
630
|
VALUE ret = um_fiber_switch(machine);
|
|
507
631
|
if (um_check_completion(machine, &op))
|
|
508
632
|
ret = INT2NUM(op.result.res);
|
|
509
633
|
|
|
510
|
-
RB_GC_GUARD(buffer);
|
|
511
|
-
|
|
512
634
|
RAISE_IF_EXCEPTION(ret);
|
|
513
635
|
RB_GC_GUARD(ret);
|
|
514
636
|
return ret;
|
|
@@ -534,11 +656,12 @@ VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
|
|
|
534
656
|
return ret;
|
|
535
657
|
}
|
|
536
658
|
|
|
537
|
-
VALUE um_recv(struct um *machine, int fd, VALUE buffer,
|
|
659
|
+
VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags) {
|
|
538
660
|
struct um_op op;
|
|
539
661
|
um_prep_op(machine, &op, OP_RECV, 0);
|
|
540
662
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
541
663
|
void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
|
|
664
|
+
|
|
542
665
|
io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
|
|
543
666
|
|
|
544
667
|
VALUE ret = um_fiber_switch(machine);
|
|
@@ -547,8 +670,6 @@ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
|
|
|
547
670
|
ret = INT2NUM(op.result.res);
|
|
548
671
|
}
|
|
549
672
|
|
|
550
|
-
RB_GC_GUARD(buffer);
|
|
551
|
-
|
|
552
673
|
RAISE_IF_EXCEPTION(ret);
|
|
553
674
|
RB_GC_GUARD(ret);
|
|
554
675
|
return ret;
|
|
@@ -588,7 +709,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
|
|
588
709
|
VALUE ret = Qnil;
|
|
589
710
|
int value;
|
|
590
711
|
|
|
591
|
-
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
|
592
712
|
struct um_op op;
|
|
593
713
|
um_prep_op(machine, &op, OP_GETSOCKOPT, 0);
|
|
594
714
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
@@ -597,13 +717,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
|
|
597
717
|
ret = um_fiber_switch(machine);
|
|
598
718
|
if (um_check_completion(machine, &op))
|
|
599
719
|
ret = INT2NUM(value);
|
|
600
|
-
#else
|
|
601
|
-
socklen_t nvalue = sizeof(value);
|
|
602
|
-
int res = getsockopt(fd, level, opt, &value, &nvalue);
|
|
603
|
-
if (res)
|
|
604
|
-
rb_syserr_fail(errno, strerror(errno));
|
|
605
|
-
ret = INT2NUM(value);
|
|
606
|
-
#endif
|
|
607
720
|
|
|
608
721
|
RAISE_IF_EXCEPTION(ret);
|
|
609
722
|
RB_GC_GUARD(ret);
|
|
@@ -613,7 +726,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
|
|
613
726
|
VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
|
|
614
727
|
VALUE ret = Qnil;
|
|
615
728
|
|
|
616
|
-
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
|
617
729
|
struct um_op op;
|
|
618
730
|
um_prep_op(machine, &op, OP_SETSOCKOPT, 0);
|
|
619
731
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
@@ -622,12 +734,6 @@ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
|
|
|
622
734
|
ret = um_fiber_switch(machine);
|
|
623
735
|
if (um_check_completion(machine, &op))
|
|
624
736
|
ret = INT2NUM(op.result.res);
|
|
625
|
-
#else
|
|
626
|
-
int res = setsockopt(fd, level, opt, &value, sizeof(value));
|
|
627
|
-
if (res)
|
|
628
|
-
rb_syserr_fail(errno, strerror(errno));
|
|
629
|
-
ret = INT2NUM(0);
|
|
630
|
-
#endif
|
|
631
737
|
|
|
632
738
|
RAISE_IF_EXCEPTION(ret);
|
|
633
739
|
RB_GC_GUARD(ret);
|
|
@@ -694,13 +800,121 @@ VALUE um_poll(struct um *machine, int fd, unsigned mask) {
|
|
|
694
800
|
return ret;
|
|
695
801
|
}
|
|
696
802
|
|
|
697
|
-
|
|
803
|
+
static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op *ops, VALUE fds, uint len, uint flags, uint event) {
|
|
804
|
+
for (uint i = 0; i < len; i++) {
|
|
805
|
+
struct um_op *op = ops + ((*idx)++);
|
|
806
|
+
um_prep_op(machine, op, OP_POLL, flags | OP_F_IGNORE_CANCELED);
|
|
807
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
808
|
+
VALUE fd = rb_ary_entry(fds, i);
|
|
809
|
+
RB_OBJ_WRITE(machine->self, &op->value, fd);
|
|
810
|
+
io_uring_prep_poll_add(sqe, NUM2INT(fd), event);
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, uint rfds_len, uint wfds_len, uint efds_len) {
|
|
698
815
|
struct um_op op;
|
|
699
|
-
|
|
816
|
+
uint idx = 0;
|
|
817
|
+
if (rfds_len)
|
|
818
|
+
prepare_select_poll_ops(machine, &idx, &op, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
|
|
819
|
+
else if (wfds_len)
|
|
820
|
+
prepare_select_poll_ops(machine, &idx, &op, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
|
|
821
|
+
else if (efds_len)
|
|
822
|
+
prepare_select_poll_ops(machine, &idx, &op, efds, efds_len, OP_F_SELECT_POLLPRI, POLLPRI);
|
|
823
|
+
assert(idx == 1);
|
|
824
|
+
|
|
825
|
+
VALUE ret = um_fiber_switch(machine);
|
|
826
|
+
um_check_completion(machine, &op);
|
|
827
|
+
RAISE_IF_EXCEPTION(ret);
|
|
828
|
+
|
|
829
|
+
if (op.flags & OP_F_SELECT_POLLIN)
|
|
830
|
+
return rb_ary_new3(3, rb_ary_new3(1, ret), rb_ary_new(), rb_ary_new());
|
|
831
|
+
else if (op.flags & OP_F_SELECT_POLLOUT)
|
|
832
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new3(1, ret), rb_ary_new());
|
|
833
|
+
else
|
|
834
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new3(1, ret));
|
|
835
|
+
|
|
836
|
+
RB_GC_GUARD(ret);
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
|
|
840
|
+
uint rfds_len = RARRAY_LEN(rfds);
|
|
841
|
+
uint wfds_len = RARRAY_LEN(wfds);
|
|
842
|
+
uint efds_len = RARRAY_LEN(efds);
|
|
843
|
+
uint total_len = rfds_len + wfds_len + efds_len;
|
|
844
|
+
if (total_len == 1)
|
|
845
|
+
return um_select_single(machine, rfds, wfds, efds, rfds_len, wfds_len, efds_len);
|
|
846
|
+
|
|
847
|
+
if (unlikely(!total_len))
|
|
848
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new());
|
|
849
|
+
|
|
850
|
+
struct um_op *ops = malloc(sizeof(struct um_op) * total_len);
|
|
851
|
+
uint idx = 0;
|
|
852
|
+
prepare_select_poll_ops(machine, &idx, ops, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
|
|
853
|
+
prepare_select_poll_ops(machine, &idx, ops, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
|
|
854
|
+
prepare_select_poll_ops(machine, &idx, ops, efds, efds_len, OP_F_SELECT_POLLPRI, POLLPRI);
|
|
855
|
+
assert(idx == total_len);
|
|
856
|
+
|
|
857
|
+
VALUE ret = um_fiber_switch(machine);
|
|
858
|
+
if (unlikely(um_value_is_exception_p(ret))) {
|
|
859
|
+
free(ops);
|
|
860
|
+
um_raise_exception(ret);
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
VALUE rfds_out = rb_ary_new();
|
|
864
|
+
VALUE wfds_out = rb_ary_new();
|
|
865
|
+
VALUE efds_out = rb_ary_new();
|
|
866
|
+
|
|
867
|
+
int error_code = 0;
|
|
868
|
+
uint pending = total_len;
|
|
869
|
+
for (uint i = 0; i < total_len; i++) {
|
|
870
|
+
if (um_op_completed_p(&ops[i])) {
|
|
871
|
+
ops[i].flags |= OP_F_RUNQUEUE_SKIP;
|
|
872
|
+
pending--;
|
|
873
|
+
|
|
874
|
+
if (unlikely((ops[i].result.res < 0) && !error_code)) {
|
|
875
|
+
error_code = ops[i].result.res;
|
|
876
|
+
}
|
|
877
|
+
else {
|
|
878
|
+
if (ops[i].flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i].value);
|
|
879
|
+
if (ops[i].flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i].value);
|
|
880
|
+
if (ops[i].flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i].value);
|
|
881
|
+
}
|
|
882
|
+
}
|
|
883
|
+
else {
|
|
884
|
+
ops[i].flags |= OP_F_CANCELED;
|
|
885
|
+
um_cancel_op(machine, &ops[i]);
|
|
886
|
+
}
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
while (pending) {
|
|
890
|
+
um_wait_for_and_process_ready_cqes(machine, 0);
|
|
891
|
+
|
|
892
|
+
for (uint i = 0; i < total_len; i++) {
|
|
893
|
+
struct um_op *op = ops + i;
|
|
894
|
+
if (op->flags & OP_F_CANCELED && um_op_completed_p(op)) {
|
|
895
|
+
pending--;
|
|
896
|
+
}
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
free(ops);
|
|
900
|
+
|
|
901
|
+
if (error_code)
|
|
902
|
+
um_raise_on_error_result(error_code);
|
|
903
|
+
|
|
904
|
+
return rb_ary_new3(3, rfds_out, wfds_out, efds_out);
|
|
905
|
+
|
|
906
|
+
RB_GC_GUARD(rfds_out);
|
|
907
|
+
RB_GC_GUARD(wfds_out);
|
|
908
|
+
RB_GC_GUARD(efds_out);
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
|
|
912
|
+
struct um_op op;
|
|
913
|
+
um_prep_op(machine, &op, OP_WAITID, 0);
|
|
700
914
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
701
915
|
|
|
702
916
|
siginfo_t infop;
|
|
703
|
-
io_uring_prep_waitid(sqe,
|
|
917
|
+
io_uring_prep_waitid(sqe, idtype, id, &infop, options, 0);
|
|
704
918
|
|
|
705
919
|
VALUE ret = um_fiber_switch(machine);
|
|
706
920
|
if (um_check_completion(machine, &op))
|
|
@@ -709,9 +923,31 @@ VALUE um_waitpid(struct um *machine, int pid, int options) {
|
|
|
709
923
|
RAISE_IF_EXCEPTION(ret);
|
|
710
924
|
RB_GC_GUARD(ret);
|
|
711
925
|
|
|
712
|
-
return rb_ary_new_from_args(
|
|
926
|
+
return rb_ary_new_from_args(
|
|
927
|
+
3, INT2NUM(infop.si_pid), INT2NUM(infop.si_status), INT2NUM(infop.si_code)
|
|
928
|
+
);
|
|
713
929
|
}
|
|
714
930
|
|
|
931
|
+
#ifdef HAVE_RB_PROCESS_STATUS_NEW
|
|
932
|
+
VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
|
|
933
|
+
struct um_op op;
|
|
934
|
+
um_prep_op(machine, &op, OP_WAITID, 0);
|
|
935
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
936
|
+
|
|
937
|
+
siginfo_t infop;
|
|
938
|
+
io_uring_prep_waitid(sqe, idtype, id, &infop, options | WNOWAIT, 0);
|
|
939
|
+
|
|
940
|
+
VALUE ret = um_fiber_switch(machine);
|
|
941
|
+
if (um_check_completion(machine, &op))
|
|
942
|
+
ret = INT2NUM(op.result.res);
|
|
943
|
+
|
|
944
|
+
RAISE_IF_EXCEPTION(ret);
|
|
945
|
+
RB_GC_GUARD(ret);
|
|
946
|
+
|
|
947
|
+
return rb_process_status_new(infop.si_pid, (infop.si_status & 0xff) << 8, 0);
|
|
948
|
+
}
|
|
949
|
+
#endif
|
|
950
|
+
|
|
715
951
|
#define hash_set(h, sym, v) rb_hash_aset(h, ID2SYM(rb_intern(sym)), v)
|
|
716
952
|
|
|
717
953
|
VALUE statx_to_hash(struct statx *stat) {
|