uringmachine 0.20.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +3 -4
- data/.rubocop.yml +2 -0
- data/CHANGELOG.md +34 -0
- data/TODO.md +132 -26
- data/benchmark/README.md +173 -0
- data/benchmark/bm_io_pipe.rb +70 -0
- data/benchmark/bm_io_socketpair.rb +71 -0
- data/benchmark/bm_mutex_cpu.rb +57 -0
- data/benchmark/bm_mutex_io.rb +64 -0
- data/benchmark/bm_pg_client.rb +109 -0
- data/benchmark/bm_queue.rb +76 -0
- data/benchmark/chart.png +0 -0
- data/benchmark/common.rb +135 -0
- data/benchmark/dns_client.rb +47 -0
- data/{examples/bm_http_parse.rb → benchmark/http_parse.rb} +1 -1
- data/benchmark/run_bm.rb +8 -0
- data/benchmark/sqlite.rb +108 -0
- data/{examples/bm_write.rb → benchmark/write.rb} +6 -3
- data/ext/um/extconf.rb +1 -1
- data/ext/um/um.c +404 -95
- data/ext/um/um.h +77 -24
- data/ext/um/um_async_op.c +2 -2
- data/ext/um/um_class.c +168 -18
- data/ext/um/um_op.c +43 -0
- data/ext/um/um_sync.c +10 -16
- data/ext/um/um_utils.c +16 -0
- data/grant-2025/journal.md +242 -1
- data/grant-2025/tasks.md +136 -41
- data/lib/uringmachine/actor.rb +8 -0
- data/lib/uringmachine/dns_resolver.rb +1 -2
- data/lib/uringmachine/fiber_scheduler.rb +283 -110
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +32 -3
- data/test/helper.rb +7 -18
- data/test/test_actor.rb +12 -3
- data/test/test_async_op.rb +10 -10
- data/test/test_fiber.rb +84 -1
- data/test/test_fiber_scheduler.rb +1425 -20
- data/test/test_um.rb +565 -113
- data/uringmachine.gemspec +6 -5
- data/vendor/liburing/src/include/liburing/io_uring.h +1 -0
- data/vendor/liburing/src/include/liburing.h +13 -0
- data/vendor/liburing/src/liburing-ffi.map +1 -0
- data/vendor/liburing/test/bind-listen.c +175 -13
- data/vendor/liburing/test/read-write.c +4 -4
- data/vendor/liburing/test/ringbuf-read.c +4 -4
- data/vendor/liburing/test/send_recv.c +8 -7
- metadata +50 -28
- data/examples/bm_fileno.rb +0 -33
- data/examples/bm_queue.rb +0 -110
- data/examples/bm_side_running.rb +0 -83
- data/examples/bm_sqlite.rb +0 -89
- data/examples/dns_client.rb +0 -12
- /data/{examples/bm_mutex.rb → benchmark/mutex.rb} +0 -0
- /data/{examples/bm_mutex_single.rb → benchmark/mutex_single.rb} +0 -0
- /data/{examples/bm_send.rb → benchmark/send.rb} +0 -0
- /data/{examples/bm_snooze.rb → benchmark/snooze.rb} +0 -0
data/ext/um/um.c
CHANGED
|
@@ -1,25 +1,35 @@
|
|
|
1
|
-
#include <float.h>
|
|
2
1
|
#include "um.h"
|
|
2
|
+
#include <float.h>
|
|
3
3
|
#include <ruby/thread.h>
|
|
4
|
+
#include <assert.h>
|
|
5
|
+
#include <poll.h>
|
|
6
|
+
|
|
7
|
+
#define DEFAULT_SIZE 4096
|
|
8
|
+
|
|
9
|
+
inline void prepare_io_uring_params(struct io_uring_params *params, uint sqpoll_timeout_msec) {
|
|
10
|
+
memset(params, 0, sizeof(struct io_uring_params));
|
|
11
|
+
params->flags = IORING_SETUP_SUBMIT_ALL;
|
|
12
|
+
if (sqpoll_timeout_msec) {
|
|
13
|
+
params->flags |= IORING_SETUP_SQPOLL;
|
|
14
|
+
params->sq_thread_idle = sqpoll_timeout_msec;
|
|
15
|
+
}
|
|
16
|
+
else
|
|
17
|
+
params->flags |= IORING_SETUP_COOP_TASKRUN;
|
|
18
|
+
}
|
|
4
19
|
|
|
5
|
-
void um_setup(VALUE self, struct um *machine) {
|
|
20
|
+
void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_msec) {
|
|
6
21
|
memset(machine, 0, sizeof(struct um));
|
|
7
22
|
|
|
8
23
|
RB_OBJ_WRITE(self, &machine->self, self);
|
|
24
|
+
RB_OBJ_WRITE(self, &machine->pending_fibers, rb_hash_new());
|
|
9
25
|
|
|
10
|
-
|
|
11
|
-
|
|
26
|
+
machine->size = (size > 0) ? size : DEFAULT_SIZE;
|
|
27
|
+
machine->sqpoll_mode = !!sqpoll_timeout_msec;
|
|
12
28
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
// if ENOMEM is returned, try with half as much entries
|
|
18
|
-
if (unlikely(ret == -ENOMEM && prepared_limit > 64))
|
|
19
|
-
prepared_limit = prepared_limit / 2;
|
|
20
|
-
else
|
|
21
|
-
rb_syserr_fail(-ret, strerror(-ret));
|
|
22
|
-
}
|
|
29
|
+
struct io_uring_params params;
|
|
30
|
+
prepare_io_uring_params(¶ms, sqpoll_timeout_msec);
|
|
31
|
+
int ret = io_uring_queue_init_params(machine->size, &machine->ring, ¶ms);
|
|
32
|
+
if (ret) rb_syserr_fail(-ret, strerror(-ret));
|
|
23
33
|
machine->ring_initialized = 1;
|
|
24
34
|
}
|
|
25
35
|
|
|
@@ -39,10 +49,16 @@ inline void um_teardown(struct um *machine) {
|
|
|
39
49
|
}
|
|
40
50
|
|
|
41
51
|
inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
|
52
|
+
DEBUG_PRINTF("-> %p um_get_sqe: op %p kind=%s unsubmitted=%d pending=%d total=%lu\n",
|
|
53
|
+
&machine->ring, op, um_op_kind_name(op ? op->kind : OP_UNDEFINED),
|
|
54
|
+
machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
|
|
55
|
+
);
|
|
56
|
+
|
|
42
57
|
struct io_uring_sqe *sqe;
|
|
43
58
|
sqe = io_uring_get_sqe(&machine->ring);
|
|
44
59
|
if (likely(sqe)) goto done;
|
|
45
60
|
|
|
61
|
+
fprintf(stderr, "!!!Failed to get SQE\n");
|
|
46
62
|
um_raise_internal_error("Failed to get SQE");
|
|
47
63
|
|
|
48
64
|
// TODO: retry getting SQE?
|
|
@@ -56,22 +72,78 @@ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
|
|
56
72
|
done:
|
|
57
73
|
sqe->user_data = (long long)op;
|
|
58
74
|
sqe->flags = 0;
|
|
59
|
-
machine->
|
|
60
|
-
if (op)
|
|
75
|
+
machine->metrics.ops_unsubmitted++;
|
|
76
|
+
if (op) {
|
|
77
|
+
machine->metrics.ops_pending++;
|
|
78
|
+
machine->metrics.total_ops++;
|
|
79
|
+
}
|
|
61
80
|
return sqe;
|
|
62
81
|
}
|
|
63
82
|
|
|
83
|
+
struct um_submit_ctx {
|
|
84
|
+
struct um *machine;
|
|
85
|
+
int result;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
// adapted from liburing/src/queue.c
|
|
89
|
+
static inline bool sq_ring_needs_enter(struct um *machine) {
|
|
90
|
+
if (machine->sqpoll_mode) {
|
|
91
|
+
io_uring_smp_mb();
|
|
92
|
+
if (unlikely(IO_URING_READ_ONCE(*machine->ring.sq.kflags) & IORING_SQ_NEED_WAKEUP))
|
|
93
|
+
return true;
|
|
94
|
+
}
|
|
95
|
+
return true;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
void *um_submit_without_gvl(void *ptr) {
|
|
99
|
+
struct um_submit_ctx *ctx = ptr;
|
|
100
|
+
ctx->result = io_uring_submit(&ctx->machine->ring);
|
|
101
|
+
return NULL;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
inline uint um_submit(struct um *machine) {
|
|
105
|
+
DEBUG_PRINTF("-> %p um_submit: unsubmitted=%d pending=%d total=%lu\n",
|
|
106
|
+
&machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
|
|
107
|
+
machine->metrics.total_ops
|
|
108
|
+
);
|
|
109
|
+
if (!machine->metrics.ops_unsubmitted) {
|
|
110
|
+
DEBUG_PRINTF("<- %p um_submit: no unsubmitted SQEs, early return\n", &machine->ring);
|
|
111
|
+
return 0;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
struct um_submit_ctx ctx = { .machine = machine };
|
|
115
|
+
if (sq_ring_needs_enter(machine))
|
|
116
|
+
rb_thread_call_without_gvl(um_submit_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
|
|
117
|
+
else
|
|
118
|
+
ctx.result = io_uring_submit(&machine->ring);
|
|
119
|
+
|
|
120
|
+
DEBUG_PRINTF("<- %p um_submit: result=%d\n", &machine->ring, ctx.result);
|
|
121
|
+
|
|
122
|
+
if (ctx.result < 0)
|
|
123
|
+
rb_syserr_fail(-ctx.result, strerror(-ctx.result));
|
|
124
|
+
|
|
125
|
+
machine->metrics.ops_unsubmitted = 0;
|
|
126
|
+
return ctx.result;
|
|
127
|
+
}
|
|
128
|
+
|
|
64
129
|
static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
|
65
130
|
struct um_op *op = (struct um_op *)cqe->user_data;
|
|
131
|
+
if (DEBUG) {
|
|
132
|
+
if (op) {
|
|
133
|
+
DEBUG_PRINTF("<- %p um_process_cqe: op %p kind %s flags %d cqe_res %d cqe_flags %d pending %d\n",
|
|
134
|
+
&machine->ring, op, um_op_kind_name(op->kind), op->flags, cqe->res, cqe->flags, machine->metrics.ops_pending
|
|
135
|
+
);
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
DEBUG_PRINTF("<- %p um_process_cqe: op NULL cqe_res %d cqe_flags %d pending %d\n",
|
|
139
|
+
&machine->ring, cqe->res, cqe->flags, machine->metrics.ops_pending
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
66
143
|
if (unlikely(!op)) return;
|
|
67
144
|
|
|
68
145
|
if (!(cqe->flags & IORING_CQE_F_MORE))
|
|
69
|
-
machine->
|
|
70
|
-
|
|
71
|
-
// printf(
|
|
72
|
-
// ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
|
|
73
|
-
// op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
|
|
74
|
-
// );
|
|
146
|
+
machine->metrics.ops_pending--;
|
|
75
147
|
|
|
76
148
|
if (op->flags & OP_F_FREE_ON_COMPLETE) {
|
|
77
149
|
if (op->flags & OP_F_TRANSIENT)
|
|
@@ -81,9 +153,10 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
|
|
|
81
153
|
return;
|
|
82
154
|
}
|
|
83
155
|
|
|
156
|
+
op->flags |= OP_F_COMPLETED;
|
|
84
157
|
if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
|
|
158
|
+
if (unlikely(op->flags & OP_F_CANCELED)) return;
|
|
85
159
|
|
|
86
|
-
op->flags |= OP_F_COMPLETED;
|
|
87
160
|
if (op->flags & OP_F_TRANSIENT)
|
|
88
161
|
um_op_transient_remove(machine, op);
|
|
89
162
|
|
|
@@ -108,6 +181,10 @@ static inline int cq_ring_needs_flush(struct io_uring *ring) {
|
|
|
108
181
|
}
|
|
109
182
|
|
|
110
183
|
static inline int um_process_ready_cqes(struct um *machine) {
|
|
184
|
+
DEBUG_PRINTF("-> %p um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
|
|
185
|
+
&machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
|
|
186
|
+
);
|
|
187
|
+
|
|
111
188
|
unsigned total_count = 0;
|
|
112
189
|
iterate:
|
|
113
190
|
bool overflow_checked = false;
|
|
@@ -124,12 +201,19 @@ iterate:
|
|
|
124
201
|
if (overflow_checked) goto done;
|
|
125
202
|
|
|
126
203
|
if (cq_ring_needs_flush(&machine->ring)) {
|
|
127
|
-
|
|
204
|
+
DEBUG_PRINTF("-> %p io_uring_enter\n", &machine->ring);
|
|
205
|
+
int ret = io_uring_enter(machine->ring.ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
|
|
206
|
+
DEBUG_PRINTF("<- %p io_uring_enter: result=%d\n", &machine->ring, ret);
|
|
207
|
+
if (ret < 0)
|
|
208
|
+
rb_syserr_fail(-ret, strerror(-ret));
|
|
209
|
+
|
|
128
210
|
overflow_checked = true;
|
|
129
211
|
goto iterate;
|
|
130
212
|
}
|
|
131
213
|
|
|
132
214
|
done:
|
|
215
|
+
DEBUG_PRINTF("<- %p um_process_ready_cqes: total_processed=%u\n", &machine->ring, total_count);
|
|
216
|
+
|
|
133
217
|
return total_count;
|
|
134
218
|
}
|
|
135
219
|
|
|
@@ -142,28 +226,61 @@ struct wait_for_cqe_ctx {
|
|
|
142
226
|
|
|
143
227
|
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
|
144
228
|
struct wait_for_cqe_ctx *ctx = ptr;
|
|
145
|
-
if (ctx->machine->
|
|
146
|
-
|
|
229
|
+
if (ctx->machine->metrics.ops_unsubmitted) {
|
|
230
|
+
DEBUG_PRINTF("-> %p io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
|
|
231
|
+
&ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
|
|
232
|
+
ctx->machine->metrics.total_ops
|
|
233
|
+
);
|
|
147
234
|
|
|
148
235
|
// Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
|
|
149
236
|
// interrupted with a signal. We can detect this by testing ctx->cqe for
|
|
150
237
|
// NULL.
|
|
151
238
|
//
|
|
152
239
|
// https://github.com/axboe/liburing/issues/1280
|
|
153
|
-
int
|
|
154
|
-
ctx->
|
|
240
|
+
int ret = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
|
|
241
|
+
ctx->machine->metrics.ops_unsubmitted = 0;
|
|
242
|
+
DEBUG_PRINTF("<- %p io_uring_submit_and_wait_timeout: result=%d\n", &ctx->machine->ring, ret);
|
|
243
|
+
ctx->result = (ret > 0 && !ctx->cqe) ? -EINTR : ret;
|
|
155
244
|
}
|
|
156
|
-
else
|
|
245
|
+
else {
|
|
246
|
+
DEBUG_PRINTF("-> %p io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
|
|
247
|
+
&ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
|
|
248
|
+
ctx->machine->metrics.total_ops
|
|
249
|
+
);
|
|
157
250
|
ctx->result = io_uring_wait_cqes(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
|
|
251
|
+
DEBUG_PRINTF("<- %p io_uring_wait_cqes: result=%d\n", &ctx->machine->ring, ctx->result);
|
|
252
|
+
}
|
|
158
253
|
return NULL;
|
|
159
254
|
}
|
|
160
255
|
|
|
256
|
+
inline void um_profile_wait_cqe_pre(struct um *machine, double *time_monotonic0, VALUE *fiber) {
|
|
257
|
+
// *fiber = rb_fiber_current();
|
|
258
|
+
*time_monotonic0 = um_get_time_monotonic();
|
|
259
|
+
// double time_cpu = um_get_time_cpu();
|
|
260
|
+
// double elapsed = time_cpu - machine->metrics.time_last_cpu;
|
|
261
|
+
// um_update_fiber_time_run(fiber, time_monotonic0, elapsed);
|
|
262
|
+
// machine->metrics.time_last_cpu = time_cpu;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
inline void um_profile_wait_cqe_post(struct um *machine, double time_monotonic0, VALUE fiber) {
|
|
266
|
+
// double time_cpu = um_get_time_cpu();
|
|
267
|
+
double elapsed = um_get_time_monotonic() - time_monotonic0;
|
|
268
|
+
// um_update_fiber_last_time(fiber, cpu_time1);
|
|
269
|
+
machine->metrics.time_total_wait += elapsed;
|
|
270
|
+
// machine->metrics.time_last_cpu = time_cpu;
|
|
271
|
+
}
|
|
272
|
+
|
|
161
273
|
// Waits for the given minimum number of completion entries. The wait_nr is
|
|
162
274
|
// either 1 - where we wait for at least one CQE to be ready, or 0, where we
|
|
163
275
|
// don't wait, and just process any CQEs that already ready.
|
|
164
276
|
static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wait_nr) {
|
|
165
277
|
struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL, .wait_nr = wait_nr };
|
|
278
|
+
machine->metrics.total_waits++;
|
|
279
|
+
double time_monotonic0 = 0.0;
|
|
280
|
+
VALUE fiber;
|
|
281
|
+
if (machine->profile_mode) um_profile_wait_cqe_pre(machine, &time_monotonic0, &fiber);
|
|
166
282
|
rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
|
|
283
|
+
if (machine->profile_mode) um_profile_wait_cqe_post(machine, time_monotonic0, fiber);
|
|
167
284
|
|
|
168
285
|
if (unlikely(ctx.result < 0)) {
|
|
169
286
|
// the internal calls to (maybe submit) and wait for cqes may fail with:
|
|
@@ -187,23 +304,42 @@ static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wa
|
|
|
187
304
|
}
|
|
188
305
|
}
|
|
189
306
|
|
|
307
|
+
inline void um_profile_switch(struct um *machine, VALUE next_fiber) {
|
|
308
|
+
// *current_fiber = rb_fiber_current();
|
|
309
|
+
// double time_cpu = um_get_time_cpu();
|
|
310
|
+
// double elapsed = time_cpu - machine->metrics.time_last_cpu;
|
|
311
|
+
// um_update_fiber_time_run(cur_fiber, time_cpu, elapsed);
|
|
312
|
+
// um_update_fiber_time_wait(next_fiber, time_cpu);
|
|
313
|
+
// machine->metrics.time_last_cpu = time_cpu;
|
|
314
|
+
}
|
|
315
|
+
|
|
190
316
|
inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
|
|
317
|
+
DEBUG_PRINTF("-> %p process_runqueue_op: op %p\n", &machine->ring, op);
|
|
318
|
+
|
|
319
|
+
machine->metrics.total_switches++;
|
|
191
320
|
VALUE fiber = op->fiber;
|
|
192
321
|
VALUE value = op->value;
|
|
193
322
|
|
|
194
323
|
if (unlikely(op->flags & OP_F_TRANSIENT))
|
|
195
324
|
um_op_free(machine, op);
|
|
196
325
|
|
|
326
|
+
if (machine->profile_mode) um_profile_switch(machine, fiber);
|
|
197
327
|
VALUE ret = rb_fiber_transfer(fiber, 1, &value);
|
|
198
328
|
RB_GC_GUARD(value);
|
|
199
329
|
RB_GC_GUARD(ret);
|
|
200
330
|
return ret;
|
|
201
331
|
}
|
|
202
332
|
|
|
203
|
-
inline VALUE
|
|
333
|
+
inline VALUE um_switch(struct um *machine) {
|
|
334
|
+
DEBUG_PRINTF("-> %p um_switch: unsubmitted=%d pending=%d total=%lu\n",
|
|
335
|
+
&machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
|
|
336
|
+
machine->metrics.total_ops
|
|
337
|
+
);
|
|
204
338
|
while (true) {
|
|
205
339
|
struct um_op *op = um_runqueue_shift(machine);
|
|
206
340
|
if (op) {
|
|
341
|
+
if (unlikely(op->flags & OP_F_RUNQUEUE_SKIP)) continue;
|
|
342
|
+
|
|
207
343
|
// in case of a snooze, we need to prevent a situation where completions
|
|
208
344
|
// are not processed because the runqueue is never empty. Theoretically,
|
|
209
345
|
// we can still have a situation where multiple fibers are all doing a
|
|
@@ -212,7 +348,7 @@ inline VALUE um_fiber_switch(struct um *machine) {
|
|
|
212
348
|
// is the op a snooze op and is this the same fiber as the current one?
|
|
213
349
|
if (unlikely(op->kind == OP_SCHEDULE && op->fiber == rb_fiber_current())) {
|
|
214
350
|
// are there any pending ops (i.e. waiting for completion)?
|
|
215
|
-
if (machine->
|
|
351
|
+
if (machine->metrics.ops_pending > 0) {
|
|
216
352
|
// if yes, process completions, get runqueue head, put original op
|
|
217
353
|
// back on runqueue.
|
|
218
354
|
// um_process_ready_cqes(machine);
|
|
@@ -231,17 +367,28 @@ inline VALUE um_fiber_switch(struct um *machine) {
|
|
|
231
367
|
}
|
|
232
368
|
}
|
|
233
369
|
|
|
234
|
-
|
|
370
|
+
inline VALUE um_yield(struct um *machine) {
|
|
371
|
+
VALUE fiber = rb_fiber_current();
|
|
372
|
+
rb_hash_aset(machine->pending_fibers, fiber, Qtrue);
|
|
373
|
+
VALUE ret = um_switch(machine);
|
|
374
|
+
rb_hash_delete(machine->pending_fibers, fiber);
|
|
375
|
+
return ret;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
void um_cancel_op(struct um *machine, struct um_op *op) {
|
|
235
379
|
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
|
236
380
|
io_uring_prep_cancel64(sqe, (long long)op, 0);
|
|
237
381
|
}
|
|
238
382
|
|
|
239
383
|
inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
384
|
+
um_cancel_op(machine, op);
|
|
385
|
+
|
|
386
|
+
VALUE fiber = rb_fiber_current();
|
|
387
|
+
rb_hash_aset(machine->pending_fibers, fiber, Qtrue);
|
|
388
|
+
while (!um_op_completed_p(op)) {
|
|
389
|
+
um_switch(machine);
|
|
244
390
|
}
|
|
391
|
+
rb_hash_delete(machine->pending_fibers, fiber);
|
|
245
392
|
}
|
|
246
393
|
|
|
247
394
|
inline int um_check_completion(struct um *machine, struct um_op *op) {
|
|
@@ -254,14 +401,14 @@ inline int um_check_completion(struct um *machine, struct um_op *op) {
|
|
|
254
401
|
return 1;
|
|
255
402
|
}
|
|
256
403
|
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
return
|
|
404
|
+
VALUE um_wakeup(struct um *machine) {
|
|
405
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
|
406
|
+
io_uring_prep_nop(sqe);
|
|
407
|
+
io_uring_submit(&machine->ring);
|
|
408
|
+
return Qnil;
|
|
262
409
|
}
|
|
263
410
|
|
|
264
|
-
inline void um_prep_op(struct um *machine, struct um_op *op, enum
|
|
411
|
+
inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, unsigned flags) {
|
|
265
412
|
memset(op, 0, sizeof(struct um_op));
|
|
266
413
|
op->kind = kind;
|
|
267
414
|
op->flags = flags;
|
|
@@ -299,7 +446,7 @@ VALUE um_timeout_complete(VALUE arg) {
|
|
|
299
446
|
struct op_ctx *ctx = (struct op_ctx *)arg;
|
|
300
447
|
|
|
301
448
|
if (!um_op_completed_p(ctx->op)) {
|
|
302
|
-
|
|
449
|
+
um_cancel_op(ctx->machine, ctx->op);
|
|
303
450
|
ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
|
|
304
451
|
um_op_transient_add(ctx->machine, ctx->op);
|
|
305
452
|
}
|
|
@@ -339,7 +486,8 @@ VALUE um_sleep(struct um *machine, double duration) {
|
|
|
339
486
|
op.ts = um_double_to_timespec(duration);
|
|
340
487
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
341
488
|
io_uring_prep_timeout(sqe, &op.ts, 0, 0);
|
|
342
|
-
|
|
489
|
+
|
|
490
|
+
VALUE ret = um_yield(machine);
|
|
343
491
|
|
|
344
492
|
if (!um_op_completed_p(&op))
|
|
345
493
|
um_cancel_and_wait(machine, &op);
|
|
@@ -353,14 +501,15 @@ VALUE um_sleep(struct um *machine, double duration) {
|
|
|
353
501
|
return ret;
|
|
354
502
|
}
|
|
355
503
|
|
|
356
|
-
VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset) {
|
|
504
|
+
VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset, __u64 file_offset) {
|
|
357
505
|
struct um_op op;
|
|
358
506
|
um_prep_op(machine, &op, OP_READ, 0);
|
|
359
507
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
360
508
|
void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
|
|
361
|
-
io_uring_prep_read(sqe, fd, ptr, maxlen,
|
|
509
|
+
io_uring_prep_read(sqe, fd, ptr, maxlen, file_offset);
|
|
510
|
+
|
|
511
|
+
VALUE ret = um_yield(machine);
|
|
362
512
|
|
|
363
|
-
VALUE ret = um_fiber_switch(machine);
|
|
364
513
|
if (um_check_completion(machine, &op)) {
|
|
365
514
|
um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
|
|
366
515
|
ret = INT2NUM(op.result.res);
|
|
@@ -378,10 +527,10 @@ size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
|
|
|
378
527
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
379
528
|
io_uring_prep_read(sqe, fd, buffer, maxlen, -1);
|
|
380
529
|
|
|
381
|
-
VALUE ret =
|
|
530
|
+
VALUE ret = um_yield(machine);
|
|
531
|
+
|
|
382
532
|
if (um_check_completion(machine, &op)) {
|
|
383
533
|
return op.result.res;
|
|
384
|
-
|
|
385
534
|
}
|
|
386
535
|
|
|
387
536
|
RAISE_IF_EXCEPTION(ret);
|
|
@@ -389,19 +538,21 @@ size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
|
|
|
389
538
|
return 0;
|
|
390
539
|
}
|
|
391
540
|
|
|
392
|
-
VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len) {
|
|
393
|
-
struct um_op op;
|
|
394
|
-
um_prep_op(machine, &op, OP_WRITE, 0);
|
|
395
|
-
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
396
|
-
|
|
541
|
+
VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
|
|
397
542
|
const void *base;
|
|
398
543
|
size_t size;
|
|
399
544
|
um_get_buffer_bytes_for_writing(buffer, &base, &size);
|
|
400
545
|
if ((len == (size_t)-1) || (len > size)) len = size;
|
|
546
|
+
if (unlikely(!len)) return INT2NUM(0);
|
|
547
|
+
|
|
548
|
+
struct um_op op;
|
|
549
|
+
um_prep_op(machine, &op, OP_WRITE, 0);
|
|
550
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
401
551
|
|
|
402
|
-
io_uring_prep_write(sqe, fd, base, len,
|
|
552
|
+
io_uring_prep_write(sqe, fd, base, len, file_offset);
|
|
553
|
+
|
|
554
|
+
VALUE ret = um_yield(machine);
|
|
403
555
|
|
|
404
|
-
VALUE ret = um_fiber_switch(machine);
|
|
405
556
|
if (um_check_completion(machine, &op))
|
|
406
557
|
ret = INT2NUM(op.result.res);
|
|
407
558
|
|
|
@@ -410,19 +561,19 @@ VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len) {
|
|
|
410
561
|
return ret;
|
|
411
562
|
}
|
|
412
563
|
|
|
413
|
-
VALUE um_write_async(struct um *machine, int fd, VALUE buffer) {
|
|
414
|
-
struct um_op *op = um_op_alloc(machine);
|
|
415
|
-
um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
|
|
416
|
-
RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
|
|
417
|
-
RB_OBJ_WRITE(machine->self, &op->value, buffer);
|
|
418
|
-
RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
|
|
419
|
-
|
|
564
|
+
VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
|
|
420
565
|
const void *base;
|
|
421
566
|
size_t size;
|
|
422
567
|
um_get_buffer_bytes_for_writing(buffer, &base, &size);
|
|
568
|
+
if ((len == (size_t)-1) || (len > size)) len = size;
|
|
569
|
+
if (unlikely(!len)) return INT2NUM(0);
|
|
570
|
+
|
|
571
|
+
struct um_op *op = um_op_alloc(machine);
|
|
572
|
+
um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
|
|
573
|
+
RB_OBJ_WRITE(machine->self, &op->value, buffer);
|
|
423
574
|
|
|
424
575
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
425
|
-
io_uring_prep_write(sqe, fd, base,
|
|
576
|
+
io_uring_prep_write(sqe, fd, base, len, file_offset);
|
|
426
577
|
um_op_transient_add(machine, op);
|
|
427
578
|
|
|
428
579
|
return buffer;
|
|
@@ -434,7 +585,8 @@ VALUE um_close(struct um *machine, int fd) {
|
|
|
434
585
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
435
586
|
io_uring_prep_close(sqe, fd);
|
|
436
587
|
|
|
437
|
-
VALUE ret =
|
|
588
|
+
VALUE ret = um_yield(machine);
|
|
589
|
+
|
|
438
590
|
if (um_check_completion(machine, &op))
|
|
439
591
|
ret = INT2NUM(fd);
|
|
440
592
|
|
|
@@ -446,9 +598,6 @@ VALUE um_close(struct um *machine, int fd) {
|
|
|
446
598
|
VALUE um_close_async(struct um *machine, int fd) {
|
|
447
599
|
struct um_op *op = um_op_alloc(machine);
|
|
448
600
|
um_prep_op(machine, op, OP_CLOSE_ASYNC, OP_F_FREE_ON_COMPLETE);
|
|
449
|
-
RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
|
|
450
|
-
RB_OBJ_WRITE(machine->self, &op->value, Qnil);
|
|
451
|
-
RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
|
|
452
601
|
|
|
453
602
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
454
603
|
io_uring_prep_close(sqe, fd);
|
|
@@ -462,7 +611,8 @@ VALUE um_accept(struct um *machine, int fd) {
|
|
|
462
611
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
463
612
|
io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
|
|
464
613
|
|
|
465
|
-
VALUE ret =
|
|
614
|
+
VALUE ret = um_yield(machine);
|
|
615
|
+
|
|
466
616
|
if (um_check_completion(machine, &op))
|
|
467
617
|
ret = INT2NUM(op.result.res);
|
|
468
618
|
|
|
@@ -477,7 +627,8 @@ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint fla
|
|
|
477
627
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
478
628
|
io_uring_prep_socket(sqe, domain, type, protocol, flags);
|
|
479
629
|
|
|
480
|
-
VALUE ret =
|
|
630
|
+
VALUE ret = um_yield(machine);
|
|
631
|
+
|
|
481
632
|
if (um_check_completion(machine, &op))
|
|
482
633
|
ret = INT2NUM(op.result.res);
|
|
483
634
|
|
|
@@ -492,7 +643,8 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
|
|
|
492
643
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
493
644
|
io_uring_prep_connect(sqe, fd, addr, addrlen);
|
|
494
645
|
|
|
495
|
-
VALUE ret =
|
|
646
|
+
VALUE ret = um_yield(machine);
|
|
647
|
+
|
|
496
648
|
if (um_check_completion(machine, &op))
|
|
497
649
|
ret = INT2NUM(op.result.res);
|
|
498
650
|
|
|
@@ -513,7 +665,8 @@ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
|
|
|
513
665
|
|
|
514
666
|
io_uring_prep_send(sqe, fd, base, len, flags);
|
|
515
667
|
|
|
516
|
-
VALUE ret =
|
|
668
|
+
VALUE ret = um_yield(machine);
|
|
669
|
+
|
|
517
670
|
if (um_check_completion(machine, &op))
|
|
518
671
|
ret = INT2NUM(op.result.res);
|
|
519
672
|
|
|
@@ -533,7 +686,8 @@ VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
|
|
|
533
686
|
sqe->flags |= IOSQE_BUFFER_SELECT;
|
|
534
687
|
sqe->buf_group = bgid;
|
|
535
688
|
|
|
536
|
-
VALUE ret =
|
|
689
|
+
VALUE ret = um_yield(machine);
|
|
690
|
+
|
|
537
691
|
if (um_check_completion(machine, &op))
|
|
538
692
|
ret = INT2NUM(op.result.res);
|
|
539
693
|
|
|
@@ -550,7 +704,8 @@ VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags
|
|
|
550
704
|
|
|
551
705
|
io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
|
|
552
706
|
|
|
553
|
-
VALUE ret =
|
|
707
|
+
VALUE ret = um_yield(machine);
|
|
708
|
+
|
|
554
709
|
if (um_check_completion(machine, &op)) {
|
|
555
710
|
um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
|
|
556
711
|
ret = INT2NUM(op.result.res);
|
|
@@ -567,7 +722,8 @@ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrl
|
|
|
567
722
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
568
723
|
io_uring_prep_bind(sqe, fd, addr, addrlen);
|
|
569
724
|
|
|
570
|
-
VALUE ret =
|
|
725
|
+
VALUE ret = um_yield(machine);
|
|
726
|
+
|
|
571
727
|
if (um_check_completion(machine, &op))
|
|
572
728
|
ret = INT2NUM(op.result.res);
|
|
573
729
|
|
|
@@ -582,7 +738,8 @@ VALUE um_listen(struct um *machine, int fd, int backlog) {
|
|
|
582
738
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
583
739
|
io_uring_prep_listen(sqe, fd, backlog);
|
|
584
740
|
|
|
585
|
-
VALUE ret =
|
|
741
|
+
VALUE ret = um_yield(machine);
|
|
742
|
+
|
|
586
743
|
if (um_check_completion(machine, &op))
|
|
587
744
|
ret = INT2NUM(op.result.res);
|
|
588
745
|
|
|
@@ -600,7 +757,8 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
|
|
600
757
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
601
758
|
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
|
602
759
|
|
|
603
|
-
ret =
|
|
760
|
+
ret = um_yield(machine);
|
|
761
|
+
|
|
604
762
|
if (um_check_completion(machine, &op))
|
|
605
763
|
ret = INT2NUM(value);
|
|
606
764
|
|
|
@@ -617,7 +775,8 @@ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
|
|
|
617
775
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
618
776
|
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
|
619
777
|
|
|
620
|
-
ret =
|
|
778
|
+
ret = um_yield(machine);
|
|
779
|
+
|
|
621
780
|
if (um_check_completion(machine, &op))
|
|
622
781
|
ret = INT2NUM(op.result.res);
|
|
623
782
|
|
|
@@ -634,7 +793,8 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
|
|
|
634
793
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
635
794
|
io_uring_prep_shutdown(sqe, fd, how);
|
|
636
795
|
|
|
637
|
-
ret =
|
|
796
|
+
ret = um_yield(machine);
|
|
797
|
+
|
|
638
798
|
if (um_check_completion(machine, &op))
|
|
639
799
|
ret = INT2NUM(op.result.res);
|
|
640
800
|
|
|
@@ -646,9 +806,6 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
|
|
|
646
806
|
VALUE um_shutdown_async(struct um *machine, int fd, int how) {
|
|
647
807
|
struct um_op *op = um_op_alloc(machine);
|
|
648
808
|
um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, OP_F_FREE_ON_COMPLETE);
|
|
649
|
-
RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
|
|
650
|
-
RB_OBJ_WRITE(machine->self, &op->value, Qnil);
|
|
651
|
-
RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
|
|
652
809
|
|
|
653
810
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
654
811
|
io_uring_prep_shutdown(sqe, fd, how);
|
|
@@ -662,7 +819,8 @@ VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
|
|
|
662
819
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
663
820
|
io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
|
|
664
821
|
|
|
665
|
-
VALUE ret =
|
|
822
|
+
VALUE ret = um_yield(machine);
|
|
823
|
+
|
|
666
824
|
if (um_check_completion(machine, &op))
|
|
667
825
|
ret = INT2NUM(op.result.res);
|
|
668
826
|
|
|
@@ -677,15 +835,127 @@ VALUE um_poll(struct um *machine, int fd, unsigned mask) {
|
|
|
677
835
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
678
836
|
io_uring_prep_poll_add(sqe, fd, mask);
|
|
679
837
|
|
|
680
|
-
VALUE ret =
|
|
838
|
+
VALUE ret = um_yield(machine);
|
|
839
|
+
|
|
681
840
|
if (um_check_completion(machine, &op))
|
|
682
841
|
ret = INT2NUM(op.result.res);
|
|
683
842
|
|
|
684
843
|
RAISE_IF_EXCEPTION(ret);
|
|
685
844
|
RB_GC_GUARD(ret);
|
|
845
|
+
RB_GC_GUARD(op.fiber);
|
|
846
|
+
RB_GC_GUARD(op.value);
|
|
686
847
|
return ret;
|
|
687
848
|
}
|
|
688
849
|
|
|
850
|
+
static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op *ops, VALUE fds, uint len, uint flags, uint event) {
|
|
851
|
+
for (uint i = 0; i < len; i++) {
|
|
852
|
+
struct um_op *op = ops + ((*idx)++);
|
|
853
|
+
um_prep_op(machine, op, OP_POLL, flags | OP_F_IGNORE_CANCELED);
|
|
854
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
|
855
|
+
VALUE fd = rb_ary_entry(fds, i);
|
|
856
|
+
RB_OBJ_WRITE(machine->self, &op->value, fd);
|
|
857
|
+
io_uring_prep_poll_add(sqe, NUM2INT(fd), event);
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, uint rfds_len, uint wfds_len, uint efds_len) {
|
|
862
|
+
struct um_op op;
|
|
863
|
+
uint idx = 0;
|
|
864
|
+
if (rfds_len)
|
|
865
|
+
prepare_select_poll_ops(machine, &idx, &op, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
|
|
866
|
+
else if (wfds_len)
|
|
867
|
+
prepare_select_poll_ops(machine, &idx, &op, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
|
|
868
|
+
else if (efds_len)
|
|
869
|
+
prepare_select_poll_ops(machine, &idx, &op, efds, efds_len, OP_F_SELECT_POLLPRI, POLLPRI);
|
|
870
|
+
assert(idx == 1);
|
|
871
|
+
|
|
872
|
+
VALUE ret = um_yield(machine);
|
|
873
|
+
|
|
874
|
+
um_check_completion(machine, &op);
|
|
875
|
+
RAISE_IF_EXCEPTION(ret);
|
|
876
|
+
|
|
877
|
+
if (op.flags & OP_F_SELECT_POLLIN)
|
|
878
|
+
return rb_ary_new3(3, rb_ary_new3(1, ret), rb_ary_new(), rb_ary_new());
|
|
879
|
+
else if (op.flags & OP_F_SELECT_POLLOUT)
|
|
880
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new3(1, ret), rb_ary_new());
|
|
881
|
+
else
|
|
882
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new3(1, ret));
|
|
883
|
+
|
|
884
|
+
RB_GC_GUARD(ret);
|
|
885
|
+
}
|
|
886
|
+
|
|
887
|
+
VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
|
|
888
|
+
uint rfds_len = RARRAY_LEN(rfds);
|
|
889
|
+
uint wfds_len = RARRAY_LEN(wfds);
|
|
890
|
+
uint efds_len = RARRAY_LEN(efds);
|
|
891
|
+
uint total_len = rfds_len + wfds_len + efds_len;
|
|
892
|
+
if (total_len == 1)
|
|
893
|
+
return um_select_single(machine, rfds, wfds, efds, rfds_len, wfds_len, efds_len);
|
|
894
|
+
|
|
895
|
+
if (unlikely(!total_len))
|
|
896
|
+
return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new());
|
|
897
|
+
|
|
898
|
+
struct um_op *ops = malloc(sizeof(struct um_op) * total_len);
|
|
899
|
+
uint idx = 0;
|
|
900
|
+
prepare_select_poll_ops(machine, &idx, ops, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
|
|
901
|
+
prepare_select_poll_ops(machine, &idx, ops, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
|
|
902
|
+
prepare_select_poll_ops(machine, &idx, ops, efds, efds_len, OP_F_SELECT_POLLPRI, POLLPRI);
|
|
903
|
+
assert(idx == total_len);
|
|
904
|
+
|
|
905
|
+
VALUE ret = um_yield(machine);
|
|
906
|
+
if (unlikely(um_value_is_exception_p(ret))) {
|
|
907
|
+
free(ops);
|
|
908
|
+
um_raise_exception(ret);
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
VALUE rfds_out = rb_ary_new();
|
|
912
|
+
VALUE wfds_out = rb_ary_new();
|
|
913
|
+
VALUE efds_out = rb_ary_new();
|
|
914
|
+
|
|
915
|
+
int error_code = 0;
|
|
916
|
+
uint pending = total_len;
|
|
917
|
+
for (uint i = 0; i < total_len; i++) {
|
|
918
|
+
if (um_op_completed_p(&ops[i])) {
|
|
919
|
+
ops[i].flags |= OP_F_RUNQUEUE_SKIP;
|
|
920
|
+
pending--;
|
|
921
|
+
|
|
922
|
+
if (unlikely((ops[i].result.res < 0) && !error_code)) {
|
|
923
|
+
error_code = ops[i].result.res;
|
|
924
|
+
}
|
|
925
|
+
else {
|
|
926
|
+
if (ops[i].flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i].value);
|
|
927
|
+
if (ops[i].flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i].value);
|
|
928
|
+
if (ops[i].flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i].value);
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
else {
|
|
932
|
+
ops[i].flags |= OP_F_CANCELED;
|
|
933
|
+
um_cancel_op(machine, &ops[i]);
|
|
934
|
+
}
|
|
935
|
+
}
|
|
936
|
+
|
|
937
|
+
while (pending) {
|
|
938
|
+
um_wait_for_and_process_ready_cqes(machine, 0);
|
|
939
|
+
|
|
940
|
+
for (uint i = 0; i < total_len; i++) {
|
|
941
|
+
struct um_op *op = ops + i;
|
|
942
|
+
if (op->flags & OP_F_CANCELED && um_op_completed_p(op)) {
|
|
943
|
+
pending--;
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
free(ops);
|
|
948
|
+
|
|
949
|
+
if (error_code)
|
|
950
|
+
um_raise_on_error_result(error_code);
|
|
951
|
+
|
|
952
|
+
return rb_ary_new3(3, rfds_out, wfds_out, efds_out);
|
|
953
|
+
|
|
954
|
+
RB_GC_GUARD(rfds_out);
|
|
955
|
+
RB_GC_GUARD(wfds_out);
|
|
956
|
+
RB_GC_GUARD(efds_out);
|
|
957
|
+
}
|
|
958
|
+
|
|
689
959
|
VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
|
|
690
960
|
struct um_op op;
|
|
691
961
|
um_prep_op(machine, &op, OP_WAITID, 0);
|
|
@@ -694,7 +964,8 @@ VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
|
|
|
694
964
|
siginfo_t infop;
|
|
695
965
|
io_uring_prep_waitid(sqe, idtype, id, &infop, options, 0);
|
|
696
966
|
|
|
697
|
-
VALUE ret =
|
|
967
|
+
VALUE ret = um_yield(machine);
|
|
968
|
+
|
|
698
969
|
if (um_check_completion(machine, &op))
|
|
699
970
|
ret = INT2NUM(op.result.res);
|
|
700
971
|
|
|
@@ -706,8 +977,8 @@ VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
|
|
|
706
977
|
);
|
|
707
978
|
}
|
|
708
979
|
|
|
709
|
-
VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
|
|
710
980
|
#ifdef HAVE_RB_PROCESS_STATUS_NEW
|
|
981
|
+
VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
|
|
711
982
|
struct um_op op;
|
|
712
983
|
um_prep_op(machine, &op, OP_WAITID, 0);
|
|
713
984
|
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
|
@@ -715,7 +986,7 @@ VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
|
|
|
715
986
|
siginfo_t infop;
|
|
716
987
|
io_uring_prep_waitid(sqe, idtype, id, &infop, options | WNOWAIT, 0);
|
|
717
988
|
|
|
718
|
-
VALUE ret =
|
|
989
|
+
VALUE ret = um_yield(machine);
|
|
719
990
|
if (um_check_completion(machine, &op))
|
|
720
991
|
ret = INT2NUM(op.result.res);
|
|
721
992
|
|
|
@@ -723,10 +994,8 @@ VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
|
|
|
723
994
|
RB_GC_GUARD(ret);
|
|
724
995
|
|
|
725
996
|
return rb_process_status_new(infop.si_pid, (infop.si_status & 0xff) << 8, 0);
|
|
726
|
-
#else
|
|
727
|
-
rb_raise(rb_eNotImpError, "Missing rb_process_status_new");
|
|
728
|
-
#endif
|
|
729
997
|
}
|
|
998
|
+
#endif
|
|
730
999
|
|
|
731
1000
|
#define hash_set(h, sym, v) rb_hash_aset(h, ID2SYM(rb_intern(sym)), v)
|
|
732
1001
|
|
|
@@ -763,7 +1032,8 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
|
|
|
763
1032
|
memset(&stat, 0, sizeof(stat));
|
|
764
1033
|
io_uring_prep_statx(sqe, dirfd, path_ptr, flags, mask, &stat);
|
|
765
1034
|
|
|
766
|
-
VALUE ret =
|
|
1035
|
+
VALUE ret = um_yield(machine);
|
|
1036
|
+
|
|
767
1037
|
if (um_check_completion(machine, &op))
|
|
768
1038
|
ret = INT2NUM(op.result.res);
|
|
769
1039
|
|
|
@@ -783,7 +1053,7 @@ VALUE accept_each_start(VALUE arg) {
|
|
|
783
1053
|
io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
|
|
784
1054
|
|
|
785
1055
|
while (true) {
|
|
786
|
-
VALUE ret =
|
|
1056
|
+
VALUE ret = um_yield(ctx->machine);
|
|
787
1057
|
if (!um_op_completed_p(ctx->op)) {
|
|
788
1058
|
RAISE_IF_EXCEPTION(ret);
|
|
789
1059
|
return ret;
|
|
@@ -825,6 +1095,8 @@ VALUE multishot_complete(VALUE arg) {
|
|
|
825
1095
|
if (ctx->read_buf)
|
|
826
1096
|
free(ctx->read_buf);
|
|
827
1097
|
|
|
1098
|
+
rb_hash_delete(ctx->machine->pending_fibers, ctx->op->fiber);
|
|
1099
|
+
|
|
828
1100
|
return Qnil;
|
|
829
1101
|
}
|
|
830
1102
|
|
|
@@ -847,7 +1119,7 @@ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
|
|
|
847
1119
|
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
|
848
1120
|
io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
|
|
849
1121
|
|
|
850
|
-
VALUE ret =
|
|
1122
|
+
VALUE ret = um_yield(ctx->machine);
|
|
851
1123
|
if (um_op_completed_p(ctx->op)) {
|
|
852
1124
|
um_raise_on_error_result(ctx->op->result.res);
|
|
853
1125
|
if (!ctx->op->result.res) return total;
|
|
@@ -908,7 +1180,7 @@ VALUE read_recv_each_start(VALUE arg) {
|
|
|
908
1180
|
int total = 0;
|
|
909
1181
|
|
|
910
1182
|
while (true) {
|
|
911
|
-
VALUE ret =
|
|
1183
|
+
VALUE ret = um_yield(ctx->machine);
|
|
912
1184
|
if (!um_op_completed_p(ctx->op)) {
|
|
913
1185
|
RAISE_IF_EXCEPTION(ret);
|
|
914
1186
|
return ret;
|
|
@@ -959,7 +1231,7 @@ VALUE periodically_start(VALUE arg) {
|
|
|
959
1231
|
io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
|
|
960
1232
|
|
|
961
1233
|
while (true) {
|
|
962
|
-
VALUE ret =
|
|
1234
|
+
VALUE ret = um_switch(ctx->machine);
|
|
963
1235
|
if (!um_op_completed_p(ctx->op)) {
|
|
964
1236
|
RAISE_IF_EXCEPTION(ret);
|
|
965
1237
|
return ret;
|
|
@@ -995,3 +1267,40 @@ VALUE um_periodically(struct um *machine, double interval) {
|
|
|
995
1267
|
struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
|
|
996
1268
|
return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
|
|
997
1269
|
}
|
|
1270
|
+
|
|
1271
|
+
extern VALUE SYM_size;
|
|
1272
|
+
extern VALUE SYM_total_ops;
|
|
1273
|
+
extern VALUE SYM_total_switches;
|
|
1274
|
+
extern VALUE SYM_total_waits;
|
|
1275
|
+
extern VALUE SYM_ops_pending;
|
|
1276
|
+
extern VALUE SYM_ops_unsubmitted;
|
|
1277
|
+
extern VALUE SYM_ops_runqueue;
|
|
1278
|
+
extern VALUE SYM_ops_free;
|
|
1279
|
+
extern VALUE SYM_ops_transient;
|
|
1280
|
+
extern VALUE SYM_time_total_cpu;
|
|
1281
|
+
extern VALUE SYM_time_total_wait;
|
|
1282
|
+
|
|
1283
|
+
VALUE um_metrics(struct um *machine, struct um_metrics *metrics) {
|
|
1284
|
+
VALUE hash = rb_hash_new();
|
|
1285
|
+
|
|
1286
|
+
rb_hash_aset(hash, SYM_size, UINT2NUM(machine->size));
|
|
1287
|
+
|
|
1288
|
+
rb_hash_aset(hash, SYM_total_ops, ULONG2NUM(metrics->total_ops));
|
|
1289
|
+
rb_hash_aset(hash, SYM_total_switches, ULONG2NUM(metrics->total_switches));
|
|
1290
|
+
rb_hash_aset(hash, SYM_total_waits, ULONG2NUM(metrics->total_waits));
|
|
1291
|
+
|
|
1292
|
+
rb_hash_aset(hash, SYM_ops_pending, UINT2NUM(metrics->ops_pending));
|
|
1293
|
+
rb_hash_aset(hash, SYM_ops_unsubmitted, UINT2NUM(metrics->ops_unsubmitted));
|
|
1294
|
+
rb_hash_aset(hash, SYM_ops_runqueue, UINT2NUM(metrics->ops_runqueue));
|
|
1295
|
+
rb_hash_aset(hash, SYM_ops_free, UINT2NUM(metrics->ops_free));
|
|
1296
|
+
rb_hash_aset(hash, SYM_ops_transient, UINT2NUM(metrics->ops_transient));
|
|
1297
|
+
|
|
1298
|
+
if (machine->profile_mode) {
|
|
1299
|
+
double total_cpu = um_get_time_cpu() - metrics->time_first_cpu;
|
|
1300
|
+
rb_hash_aset(hash, SYM_time_total_cpu, DBL2NUM(total_cpu));
|
|
1301
|
+
rb_hash_aset(hash, SYM_time_total_wait, DBL2NUM(metrics->time_total_wait));
|
|
1302
|
+
}
|
|
1303
|
+
|
|
1304
|
+
return hash;
|
|
1305
|
+
RB_GC_GUARD(hash);
|
|
1306
|
+
}
|