uringmachine 0.4 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -1
- data/CHANGELOG.md +16 -0
- data/README.md +44 -1
- data/TODO.md +12 -3
- data/examples/bm_snooze.rb +89 -0
- data/examples/bm_sqlite.rb +89 -0
- data/examples/bm_write.rb +56 -0
- data/examples/dns_client.rb +12 -0
- data/examples/http_server.rb +42 -43
- data/examples/pg.rb +85 -0
- data/examples/server_client.rb +64 -0
- data/examples/snooze.rb +44 -0
- data/examples/stream.rb +85 -0
- data/examples/write_dev_null.rb +16 -0
- data/ext/um/extconf.rb +81 -14
- data/ext/um/um.c +468 -414
- data/ext/um/um.h +149 -40
- data/ext/um/um_async_op.c +40 -0
- data/ext/um/um_async_op_class.c +136 -0
- data/ext/um/um_buffer.c +49 -0
- data/ext/um/um_class.c +176 -44
- data/ext/um/um_const.c +174 -9
- data/ext/um/um_ext.c +8 -0
- data/ext/um/um_mutex_class.c +47 -0
- data/ext/um/um_op.c +89 -111
- data/ext/um/um_queue_class.c +58 -0
- data/ext/um/um_ssl.c +850 -0
- data/ext/um/um_ssl.h +22 -0
- data/ext/um/um_ssl_class.c +138 -0
- data/ext/um/um_sync.c +273 -0
- data/ext/um/um_utils.c +1 -1
- data/lib/uringmachine/dns_resolver.rb +84 -0
- data/lib/uringmachine/ssl/context_builder.rb +96 -0
- data/lib/uringmachine/ssl.rb +394 -0
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +27 -3
- data/supressions/ruby.supp +71 -0
- data/test/helper.rb +6 -0
- data/test/test_async_op.rb +119 -0
- data/test/test_ssl.rb +155 -0
- data/test/test_um.rb +464 -47
- data/uringmachine.gemspec +3 -2
- data/vendor/liburing/.gitignore +5 -0
- data/vendor/liburing/CHANGELOG +1 -0
- data/vendor/liburing/configure +32 -0
- data/vendor/liburing/examples/Makefile +1 -0
- data/vendor/liburing/examples/reg-wait.c +159 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
- data/vendor/liburing/src/include/liburing.h +28 -2
- data/vendor/liburing/src/int_flags.h +10 -3
- data/vendor/liburing/src/liburing-ffi.map +13 -2
- data/vendor/liburing/src/liburing.map +9 -0
- data/vendor/liburing/src/queue.c +25 -16
- data/vendor/liburing/src/register.c +73 -4
- data/vendor/liburing/src/setup.c +46 -18
- data/vendor/liburing/src/setup.h +6 -0
- data/vendor/liburing/test/Makefile +7 -0
- data/vendor/liburing/test/cmd-discard.c +427 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
- data/vendor/liburing/test/file-exit-unreg.c +48 -0
- data/vendor/liburing/test/io_uring_passthrough.c +2 -0
- data/vendor/liburing/test/io_uring_register.c +13 -2
- data/vendor/liburing/test/napi-test.c +1 -1
- data/vendor/liburing/test/no-mmap-inval.c +1 -1
- data/vendor/liburing/test/read-mshot-empty.c +2 -0
- data/vendor/liburing/test/read-mshot-stdin.c +121 -0
- data/vendor/liburing/test/read-mshot.c +6 -0
- data/vendor/liburing/test/recvsend_bundle.c +2 -2
- data/vendor/liburing/test/reg-fd-only.c +1 -1
- data/vendor/liburing/test/reg-wait.c +251 -0
- data/vendor/liburing/test/regbuf-clone.c +458 -0
- data/vendor/liburing/test/resize-rings.c +643 -0
- data/vendor/liburing/test/rsrc_tags.c +1 -1
- data/vendor/liburing/test/sqpoll-sleep.c +39 -8
- data/vendor/liburing/test/sqwait.c +136 -0
- data/vendor/liburing/test/sync-cancel.c +8 -1
- data/vendor/liburing/test/timeout.c +13 -8
- metadata +52 -8
- data/examples/http_server_multishot.rb +0 -57
- data/examples/http_server_simpler.rb +0 -34
data/ext/um/um.c
CHANGED
@@ -1,24 +1,13 @@
|
|
1
1
|
#include "um.h"
|
2
2
|
#include "ruby/thread.h"
|
3
3
|
|
4
|
-
void um_setup(struct um *machine) {
|
5
|
-
machine
|
6
|
-
|
7
|
-
machine->
|
8
|
-
machine->pending_count = 0;
|
9
|
-
machine->runqueue_head = NULL;
|
10
|
-
machine->runqueue_tail = NULL;
|
11
|
-
machine->op_freelist = NULL;
|
12
|
-
machine->result_freelist = NULL;
|
4
|
+
void um_setup(VALUE self, struct um *machine) {
|
5
|
+
memset(machine, 0, sizeof(struct um));
|
6
|
+
|
7
|
+
RB_OBJ_WRITE(self, &machine->self, self);
|
13
8
|
|
14
9
|
unsigned prepared_limit = 4096;
|
15
|
-
unsigned flags =
|
16
|
-
#ifdef HAVE_IORING_SETUP_SUBMIT_ALL
|
17
|
-
flags |= IORING_SETUP_SUBMIT_ALL;
|
18
|
-
#endif
|
19
|
-
#ifdef HAVE_IORING_SETUP_COOP_TASKRUN
|
20
|
-
flags |= IORING_SETUP_COOP_TASKRUN;
|
21
|
-
#endif
|
10
|
+
unsigned flags = IORING_SETUP_SUBMIT_ALL | IORING_SETUP_COOP_TASKRUN;
|
22
11
|
|
23
12
|
while (1) {
|
24
13
|
int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
|
@@ -45,11 +34,10 @@ inline void um_teardown(struct um *machine) {
|
|
45
34
|
io_uring_queue_exit(&machine->ring);
|
46
35
|
machine->ring_initialized = 0;
|
47
36
|
|
48
|
-
|
49
|
-
um_free_op_linked_list(machine, machine->runqueue_head);
|
37
|
+
um_free_buffer_linked_list(machine);
|
50
38
|
}
|
51
39
|
|
52
|
-
|
40
|
+
inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
53
41
|
struct io_uring_sqe *sqe;
|
54
42
|
sqe = io_uring_get_sqe(&machine->ring);
|
55
43
|
if (likely(sqe)) goto done;
|
@@ -68,92 +56,44 @@ done:
|
|
68
56
|
sqe->user_data = (long long)op;
|
69
57
|
sqe->flags = 0;
|
70
58
|
machine->unsubmitted_count++;
|
59
|
+
if (op) machine->pending_count++;
|
71
60
|
return sqe;
|
72
61
|
}
|
73
62
|
|
74
|
-
struct
|
75
|
-
struct um *machine;
|
76
|
-
struct io_uring_cqe *cqe;
|
77
|
-
int result;
|
78
|
-
};
|
79
|
-
|
80
|
-
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
81
|
-
struct wait_for_cqe_ctx *ctx = ptr;
|
82
|
-
if (ctx->machine->unsubmitted_count) {
|
83
|
-
ctx->machine->unsubmitted_count = 0;
|
84
|
-
ctx->result = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
|
85
|
-
}
|
86
|
-
else
|
87
|
-
ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
|
88
|
-
return NULL;
|
89
|
-
}
|
90
|
-
|
91
|
-
inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
92
|
-
op->cqe_result = cqe->res;
|
93
|
-
op->cqe_flags = cqe->flags;
|
94
|
-
op->state = OP_completed;
|
95
|
-
um_runqueue_push(machine, op);
|
96
|
-
}
|
97
|
-
|
98
|
-
inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
99
|
-
if (!op->results_head) {
|
100
|
-
// if no results are ready yet, schedule the corresponding fiber
|
101
|
-
struct um_op *op2 = um_op_checkout(machine);
|
102
|
-
op2->state = OP_schedule;
|
103
|
-
op2->fiber = op->fiber;
|
104
|
-
op2->resume_value = Qnil;
|
105
|
-
um_runqueue_push(machine, op2);
|
106
|
-
}
|
107
|
-
um_op_result_push(machine, op, cqe->res, cqe->flags);
|
108
|
-
|
109
|
-
if (!(cqe->flags & IORING_CQE_F_MORE))
|
110
|
-
op->state = OP_completed;
|
111
|
-
}
|
112
|
-
|
113
|
-
inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
63
|
+
static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
114
64
|
struct um_op *op = (struct um_op *)cqe->user_data;
|
115
65
|
if (unlikely(!op)) return;
|
116
66
|
|
117
|
-
|
118
|
-
|
67
|
+
if (!(cqe->flags & IORING_CQE_F_MORE))
|
68
|
+
machine->pending_count--;
|
119
69
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
break;
|
125
|
-
}
|
126
|
-
if (op->is_multishot)
|
127
|
-
um_handle_submitted_op_cqe_multi(machine, op, cqe);
|
128
|
-
else
|
129
|
-
um_handle_submitted_op_cqe_single(machine, op, cqe);
|
130
|
-
break;
|
131
|
-
case OP_abandonned:
|
132
|
-
// op has been abandonned by the I/O method, so we need to cleanup (check
|
133
|
-
// the op in to the free list).
|
134
|
-
um_op_checkin(machine, op);
|
135
|
-
break;
|
136
|
-
default:
|
137
|
-
// TODO: invalid state, should raise!
|
138
|
-
}
|
139
|
-
}
|
70
|
+
// printf(
|
71
|
+
// ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
|
72
|
+
// op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
|
73
|
+
// );
|
140
74
|
|
141
|
-
|
142
|
-
struct wait_for_cqe_ctx ctx = {
|
143
|
-
.machine = machine,
|
144
|
-
.cqe = NULL
|
145
|
-
};
|
75
|
+
if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
|
146
76
|
|
147
|
-
|
148
|
-
if (
|
149
|
-
|
77
|
+
op->flags |= OP_F_COMPLETED;
|
78
|
+
if (op->flags & OP_F_TRANSIENT)
|
79
|
+
um_op_transient_remove(machine, op);
|
80
|
+
|
81
|
+
if (op->flags & OP_F_MULTISHOT) {
|
82
|
+
um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
|
83
|
+
if (op->multishot_result_count > 1)
|
84
|
+
return;
|
150
85
|
}
|
151
|
-
|
152
|
-
|
86
|
+
else {
|
87
|
+
op->result.res = cqe->res;
|
88
|
+
op->result.flags = cqe->flags;
|
89
|
+
}
|
90
|
+
|
91
|
+
if (!(op->flags & OP_F_ASYNC))
|
92
|
+
um_runqueue_push(machine, op);
|
153
93
|
}
|
154
94
|
|
155
95
|
// copied from liburing/queue.c
|
156
|
-
static inline
|
96
|
+
static inline int cq_ring_needs_flush(struct io_uring *ring) {
|
157
97
|
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
158
98
|
}
|
159
99
|
|
@@ -183,101 +123,117 @@ done:
|
|
183
123
|
return total_count;
|
184
124
|
}
|
185
125
|
|
186
|
-
|
187
|
-
|
188
|
-
|
126
|
+
struct wait_for_cqe_ctx {
|
127
|
+
struct um *machine;
|
128
|
+
struct io_uring_cqe *cqe;
|
129
|
+
int result;
|
130
|
+
};
|
131
|
+
|
132
|
+
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
133
|
+
struct wait_for_cqe_ctx *ctx = ptr;
|
134
|
+
if (ctx->machine->unsubmitted_count) {
|
135
|
+
ctx->machine->unsubmitted_count = 0;
|
136
|
+
|
137
|
+
// Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
|
138
|
+
// interrupted with a signal. We can detect this by testing ctx->cqe for
|
139
|
+
// NULL.
|
140
|
+
//
|
141
|
+
// https://github.com/axboe/liburing/issues/1280
|
142
|
+
int res = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
|
143
|
+
ctx->result = (res > 0 && !ctx->cqe) ? -EINTR : res;
|
144
|
+
}
|
145
|
+
else
|
146
|
+
ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
|
147
|
+
return NULL;
|
189
148
|
}
|
190
149
|
|
191
|
-
inline
|
192
|
-
struct
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
unlikely(
|
202
|
-
first_iteration && machine->unsubmitted_count &&
|
203
|
-
machine->runqueue_head &&
|
204
|
-
machine->runqueue_head->fiber == rb_fiber_current()
|
205
|
-
)
|
206
|
-
) {
|
207
|
-
io_uring_submit(&machine->ring);
|
150
|
+
static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
|
151
|
+
struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL };
|
152
|
+
rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
|
153
|
+
|
154
|
+
if (unlikely(ctx.result < 0 && ctx.result != -EINTR))
|
155
|
+
rb_syserr_fail(-ctx.result, strerror(-ctx.result));
|
156
|
+
|
157
|
+
if (ctx.cqe) {
|
158
|
+
um_process_cqe(machine, ctx.cqe);
|
159
|
+
io_uring_cq_advance(&machine->ring, 1);
|
208
160
|
um_process_ready_cqes(machine);
|
209
161
|
}
|
210
|
-
|
162
|
+
}
|
211
163
|
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
if (op->state == OP_schedule)
|
216
|
-
um_op_checkin(machine, op);
|
164
|
+
inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
|
165
|
+
VALUE fiber = op->fiber;
|
166
|
+
VALUE value = op->value;
|
217
167
|
|
218
|
-
|
219
|
-
|
220
|
-
return v;
|
221
|
-
}
|
168
|
+
if (unlikely(op->flags & OP_F_TRANSIENT))
|
169
|
+
um_op_free(machine, op);
|
222
170
|
|
223
|
-
|
224
|
-
goto loop;
|
171
|
+
return rb_fiber_transfer(fiber, 1, &value);
|
225
172
|
}
|
226
173
|
|
227
|
-
|
174
|
+
inline VALUE um_fiber_switch(struct um *machine) {
|
175
|
+
while (true) {
|
176
|
+
struct um_op *op = um_runqueue_shift(machine);
|
177
|
+
if (op)
|
178
|
+
return process_runqueue_op(machine, op);
|
179
|
+
|
180
|
+
um_wait_for_and_process_ready_cqes(machine);
|
181
|
+
}
|
182
|
+
}
|
183
|
+
|
184
|
+
void um_submit_cancel_op(struct um *machine, struct um_op *op) {
|
228
185
|
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
229
186
|
io_uring_prep_cancel64(sqe, (long long)op, 0);
|
230
187
|
}
|
231
188
|
|
232
|
-
|
233
|
-
op
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
if (unlikely(is_exception && op->state == OP_submitted)) {
|
238
|
-
um_cancel_op(machine, op);
|
239
|
-
op->state = OP_abandonned;
|
189
|
+
inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
|
190
|
+
um_submit_cancel_op(machine, op);
|
191
|
+
while (true) {
|
192
|
+
um_fiber_switch(machine);
|
193
|
+
if (um_op_completed_p(op)) break;
|
240
194
|
}
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
195
|
+
}
|
196
|
+
|
197
|
+
inline int um_check_completion(struct um *machine, struct um_op *op) {
|
198
|
+
if (!um_op_completed_p(op)) {
|
199
|
+
um_cancel_and_wait(machine, op);
|
200
|
+
return 0;
|
246
201
|
}
|
247
202
|
|
248
|
-
|
249
|
-
return
|
203
|
+
um_raise_on_error_result(op->result.res);
|
204
|
+
return 1;
|
250
205
|
}
|
251
206
|
|
252
207
|
inline VALUE um_await(struct um *machine) {
|
253
208
|
VALUE v = um_fiber_switch(machine);
|
254
|
-
return
|
209
|
+
return raise_if_exception(v);
|
255
210
|
}
|
256
211
|
|
257
|
-
inline void
|
258
|
-
|
259
|
-
op->
|
260
|
-
|
261
|
-
|
262
|
-
|
212
|
+
inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind) {
|
213
|
+
memset(op, 0, sizeof(struct um_op));
|
214
|
+
op->kind = kind;
|
215
|
+
switch (kind) {
|
216
|
+
case OP_ACCEPT_MULTISHOT:
|
217
|
+
case OP_READ_MULTISHOT:
|
218
|
+
case OP_RECV_MULTISHOT:
|
219
|
+
op->flags |= OP_F_MULTISHOT;
|
220
|
+
default:
|
221
|
+
}
|
222
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
|
223
|
+
op->value = Qnil;
|
263
224
|
}
|
264
225
|
|
265
|
-
inline void
|
266
|
-
struct um_op *op =
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
op->state = OP_schedule;
|
274
|
-
op->fiber = fiber;
|
275
|
-
op->resume_value = value;
|
276
|
-
um_runqueue_unshift(machine, op);
|
277
|
-
}
|
226
|
+
inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
|
227
|
+
struct um_op *op = um_op_alloc(machine);
|
228
|
+
memset(op, 0, sizeof(struct um_op));
|
229
|
+
op->kind = OP_SCHEDULE;
|
230
|
+
op->flags = OP_F_TRANSIENT;
|
231
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
|
232
|
+
RB_OBJ_WRITE(machine->self, &op->value, value);
|
233
|
+
um_runqueue_push(machine, op);
|
278
234
|
}
|
279
235
|
|
280
|
-
struct
|
236
|
+
struct op_ctx {
|
281
237
|
struct um *machine;
|
282
238
|
struct um_op *op;
|
283
239
|
int fd;
|
@@ -285,20 +241,18 @@ struct op_ensure_ctx {
|
|
285
241
|
|
286
242
|
void *read_buf;
|
287
243
|
int read_maxlen;
|
244
|
+
struct __kernel_timespec ts;
|
245
|
+
int flags;
|
288
246
|
};
|
289
247
|
|
290
248
|
VALUE um_timeout_ensure(VALUE arg) {
|
291
|
-
struct
|
249
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
292
250
|
|
293
|
-
if (ctx->op
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
ctx->op->state == OP_abandonned;
|
251
|
+
if (!um_op_completed_p(ctx->op)) {
|
252
|
+
um_submit_cancel_op(ctx->machine, ctx->op);
|
253
|
+
ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
|
254
|
+
um_op_transient_add(ctx->machine, ctx->op);
|
298
255
|
}
|
299
|
-
else
|
300
|
-
// completed, so can be checked in
|
301
|
-
um_op_checkin(ctx->machine, ctx->op);
|
302
256
|
|
303
257
|
return Qnil;
|
304
258
|
}
|
@@ -307,343 +261,443 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
|
|
307
261
|
static ID ID_new = 0;
|
308
262
|
if (!ID_new) ID_new = rb_intern("new");
|
309
263
|
|
310
|
-
struct um_op *op =
|
264
|
+
struct um_op *op = um_op_alloc(machine);
|
265
|
+
um_prep_op(machine, op, OP_TIMEOUT);
|
311
266
|
op->ts = um_double_to_timespec(NUM2DBL(interval));
|
267
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
|
268
|
+
RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
|
312
269
|
|
313
270
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
314
271
|
io_uring_prep_timeout(sqe, &op->ts, 0, 0);
|
315
|
-
op->state = OP_submitted;
|
316
|
-
op->fiber = rb_fiber_current();
|
317
|
-
op->resume_value = rb_funcall(class, ID_new, 0);
|
318
272
|
|
319
|
-
struct
|
273
|
+
struct op_ctx ctx = { .machine = machine, .op = op };
|
320
274
|
return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
|
321
275
|
}
|
322
276
|
|
323
|
-
|
324
|
-
|
325
|
-
|
277
|
+
/*******************************************************************************
|
278
|
+
blocking singleshot ops
|
279
|
+
*******************************************************************************/
|
326
280
|
|
327
|
-
|
328
|
-
struct um_op
|
329
|
-
op
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
op->state = OP_submitted;
|
281
|
+
VALUE um_sleep(struct um *machine, double duration) {
|
282
|
+
struct um_op op;
|
283
|
+
um_prep_op(machine, &op, OP_SLEEP);
|
284
|
+
op.ts = um_double_to_timespec(duration);
|
285
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
286
|
+
io_uring_prep_timeout(sqe, &op.ts, 0, 0);
|
287
|
+
VALUE ret = um_fiber_switch(machine);
|
335
288
|
|
336
|
-
|
289
|
+
if (!um_op_completed_p(&op))
|
290
|
+
um_cancel_and_wait(machine, &op);
|
291
|
+
else {
|
292
|
+
if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
|
293
|
+
ret = DBL2NUM(duration);
|
294
|
+
}
|
337
295
|
|
338
|
-
|
339
|
-
|
340
|
-
return Qnil;
|
296
|
+
RB_GC_GUARD(ret);
|
297
|
+
return raise_if_exception(ret);
|
341
298
|
}
|
342
299
|
|
343
300
|
inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
|
344
|
-
struct um_op
|
345
|
-
|
346
|
-
|
347
|
-
__u32 flags = 0;
|
348
|
-
|
301
|
+
struct um_op op;
|
302
|
+
um_prep_op(machine, &op, OP_READ);
|
303
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
349
304
|
void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
|
350
305
|
io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
|
351
|
-
|
306
|
+
|
307
|
+
VALUE ret = um_fiber_switch(machine);
|
308
|
+
if (um_check_completion(machine, &op)) {
|
309
|
+
um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
|
310
|
+
ret = INT2NUM(op.result.res);
|
311
|
+
|
312
|
+
}
|
352
313
|
|
353
|
-
|
314
|
+
RB_GC_GUARD(buffer);
|
315
|
+
RB_GC_GUARD(ret);
|
316
|
+
return raise_if_exception(ret);
|
317
|
+
}
|
354
318
|
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
319
|
+
VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
|
320
|
+
struct um_op op;
|
321
|
+
um_prep_op(machine, &op, OP_WRITE);
|
322
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
323
|
+
const int str_len = RSTRING_LEN(str);
|
324
|
+
if (len > str_len) len = str_len;
|
325
|
+
|
326
|
+
io_uring_prep_write(sqe, fd, RSTRING_PTR(str), len, -1);
|
327
|
+
|
328
|
+
VALUE ret = um_fiber_switch(machine);
|
329
|
+
if (um_check_completion(machine, &op))
|
330
|
+
ret = INT2NUM(op.result.res);
|
331
|
+
|
332
|
+
RB_GC_GUARD(str);
|
333
|
+
RB_GC_GUARD(ret);
|
334
|
+
return raise_if_exception(ret);
|
359
335
|
}
|
360
336
|
|
361
|
-
VALUE
|
362
|
-
struct
|
337
|
+
VALUE um_close(struct um *machine, int fd) {
|
338
|
+
struct um_op op;
|
339
|
+
um_prep_op(machine, &op, OP_CLOSE);
|
340
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
341
|
+
io_uring_prep_close(sqe, fd);
|
363
342
|
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
break;
|
368
|
-
case OP_completed:
|
369
|
-
um_op_checkin(ctx->machine, ctx->op);
|
370
|
-
break;
|
371
|
-
default:
|
372
|
-
}
|
343
|
+
VALUE ret = um_fiber_switch(machine);
|
344
|
+
if (um_check_completion(machine, &op))
|
345
|
+
ret = INT2NUM(fd);
|
373
346
|
|
374
|
-
|
375
|
-
return
|
347
|
+
RB_GC_GUARD(ret);
|
348
|
+
return raise_if_exception(ret);
|
376
349
|
}
|
377
350
|
|
378
|
-
|
379
|
-
struct um_op
|
380
|
-
|
351
|
+
VALUE um_accept(struct um *machine, int fd) {
|
352
|
+
struct um_op op;
|
353
|
+
um_prep_op(machine, &op, OP_ACCEPT);
|
354
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
355
|
+
io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
|
381
356
|
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
|
386
|
-
op->is_multishot = 1;
|
387
|
-
}
|
357
|
+
VALUE ret = um_fiber_switch(machine);
|
358
|
+
if (um_check_completion(machine, &op))
|
359
|
+
ret = INT2NUM(op.result.res);
|
388
360
|
|
389
|
-
|
390
|
-
|
361
|
+
RB_GC_GUARD(ret);
|
362
|
+
return raise_if_exception(ret);
|
391
363
|
}
|
392
364
|
|
393
|
-
|
394
|
-
struct
|
395
|
-
|
396
|
-
|
397
|
-
|
365
|
+
VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
|
366
|
+
struct um_op op;
|
367
|
+
um_prep_op(machine, &op, OP_SOCKET);
|
368
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
369
|
+
io_uring_prep_socket(sqe, domain, type, protocol, flags);
|
398
370
|
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
total += result;
|
406
|
-
VALUE buf = rb_str_new(ctx->read_buf, result);
|
407
|
-
rb_yield(buf);
|
408
|
-
um_op_checkin(ctx->machine, ctx->op);
|
409
|
-
}
|
371
|
+
VALUE ret = um_fiber_switch(machine);
|
372
|
+
if (um_check_completion(machine, &op))
|
373
|
+
ret = INT2NUM(op.result.res);
|
374
|
+
|
375
|
+
RB_GC_GUARD(ret);
|
376
|
+
return raise_if_exception(ret);
|
410
377
|
}
|
411
378
|
|
379
|
+
VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
|
380
|
+
struct um_op op;
|
381
|
+
um_prep_op(machine, &op, OP_CONNECT);
|
382
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
383
|
+
io_uring_prep_connect(sqe, fd, addr, addrlen);
|
412
384
|
|
385
|
+
VALUE ret = um_fiber_switch(machine);
|
386
|
+
if (um_check_completion(machine, &op))
|
387
|
+
ret = INT2NUM(op.result.res);
|
413
388
|
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
__s32 bad_result = 0;
|
418
|
-
int eof = 0;
|
389
|
+
RB_GC_GUARD(ret);
|
390
|
+
return raise_if_exception(ret);
|
391
|
+
}
|
419
392
|
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
if (result == 0) {
|
426
|
-
eof = 1;
|
427
|
-
break;
|
428
|
-
}
|
393
|
+
VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
|
394
|
+
struct um_op op;
|
395
|
+
um_prep_op(machine, &op, OP_SEND);
|
396
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
397
|
+
io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
|
429
398
|
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
}
|
399
|
+
VALUE ret = um_fiber_switch(machine);
|
400
|
+
if (um_check_completion(machine, &op))
|
401
|
+
ret = INT2NUM(op.result.res);
|
434
402
|
|
435
|
-
|
436
|
-
|
403
|
+
RB_GC_GUARD(buffer);
|
404
|
+
RB_GC_GUARD(ret);
|
405
|
+
return raise_if_exception(ret);
|
406
|
+
}
|
437
407
|
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
408
|
+
VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
|
409
|
+
struct um_op op;
|
410
|
+
um_prep_op(machine, &op, OP_RECV);
|
411
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
412
|
+
void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
|
413
|
+
io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
|
414
|
+
|
415
|
+
VALUE ret = um_fiber_switch(machine);
|
416
|
+
if (um_check_completion(machine, &op)) {
|
417
|
+
um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
|
418
|
+
ret = INT2NUM(op.result.res);
|
448
419
|
}
|
449
|
-
if (bad_result)
|
450
|
-
um_raise_on_system_error(bad_result);
|
451
420
|
|
452
|
-
|
421
|
+
RB_GC_GUARD(buffer);
|
422
|
+
RB_GC_GUARD(ret);
|
423
|
+
return raise_if_exception(ret);
|
453
424
|
}
|
454
425
|
|
455
|
-
VALUE
|
456
|
-
struct
|
457
|
-
|
458
|
-
|
459
|
-
|
426
|
+
VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
|
427
|
+
struct um_op op;
|
428
|
+
um_prep_op(machine, &op, OP_BIND);
|
429
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
430
|
+
io_uring_prep_bind(sqe, fd, addr, addrlen);
|
460
431
|
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
rb_raise(rb_eRuntimeError, "no result found!\n");
|
432
|
+
VALUE ret = um_fiber_switch(machine);
|
433
|
+
if (um_check_completion(machine, &op))
|
434
|
+
ret = INT2NUM(op.result.res);
|
465
435
|
|
466
|
-
|
467
|
-
|
468
|
-
}
|
436
|
+
RB_GC_GUARD(ret);
|
437
|
+
return raise_if_exception(ret);
|
469
438
|
}
|
470
439
|
|
471
|
-
VALUE
|
472
|
-
struct
|
473
|
-
|
440
|
+
VALUE um_listen(struct um *machine, int fd, int backlog) {
|
441
|
+
struct um_op op;
|
442
|
+
um_prep_op(machine, &op, OP_BIND);
|
443
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
444
|
+
io_uring_prep_listen(sqe, fd, backlog);
|
445
|
+
|
446
|
+
VALUE ret = um_fiber_switch(machine);
|
447
|
+
if (um_check_completion(machine, &op))
|
448
|
+
ret = INT2NUM(op.result.res);
|
449
|
+
|
450
|
+
RB_GC_GUARD(ret);
|
451
|
+
return raise_if_exception(ret);
|
474
452
|
}
|
475
453
|
|
476
|
-
VALUE
|
477
|
-
|
478
|
-
|
479
|
-
__s32 result = 0;
|
480
|
-
__u32 flags = 0;
|
454
|
+
VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
455
|
+
VALUE ret = Qnil;
|
456
|
+
int value;
|
481
457
|
|
482
|
-
|
483
|
-
|
458
|
+
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
459
|
+
struct um_op op;
|
460
|
+
um_prep_op(machine, &op, OP_GETSOCKOPT);
|
461
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
462
|
+
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
484
463
|
|
485
|
-
|
464
|
+
ret = um_fiber_switch(machine);
|
465
|
+
if (um_check_completion(machine, &op))
|
466
|
+
ret = INT2NUM(value);
|
467
|
+
#else
|
468
|
+
socklen_t nvalue = sizeof(value);
|
469
|
+
int res = getsockopt(fd, level, opt, &value, &nvalue);
|
470
|
+
if (res)
|
471
|
+
rb_syserr_fail(errno, strerror(errno));
|
472
|
+
ret = INT2NUM(value);
|
473
|
+
#endif
|
486
474
|
|
487
|
-
|
488
|
-
|
489
|
-
return INT2FIX(result);
|
475
|
+
RB_GC_GUARD(ret);
|
476
|
+
return raise_if_exception(ret);
|
490
477
|
}
|
491
478
|
|
492
|
-
VALUE
|
493
|
-
|
494
|
-
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
495
|
-
__s32 result = 0;
|
496
|
-
__u32 flags = 0;
|
479
|
+
VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
|
480
|
+
VALUE ret = Qnil;
|
497
481
|
|
498
|
-
|
499
|
-
|
482
|
+
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
483
|
+
struct um_op op;
|
484
|
+
um_prep_op(machine, &op, OP_GETSOCKOPT);
|
485
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
486
|
+
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
500
487
|
|
501
|
-
|
488
|
+
ret = um_fiber_switch(machine);
|
489
|
+
if (um_check_completion(machine, &op))
|
490
|
+
ret = INT2NUM(op.result.res);
|
491
|
+
#else
|
492
|
+
int res = setsockopt(fd, level, opt, &value, sizeof(value));
|
493
|
+
if (res)
|
494
|
+
rb_syserr_fail(errno, strerror(errno));
|
495
|
+
ret = INT2NUM(0);
|
496
|
+
#endif
|
502
497
|
|
503
|
-
|
504
|
-
|
505
|
-
return INT2FIX(fd);
|
498
|
+
RB_GC_GUARD(ret);
|
499
|
+
return raise_if_exception(ret);
|
506
500
|
}
|
507
501
|
|
508
|
-
VALUE
|
509
|
-
struct um_op
|
510
|
-
|
511
|
-
struct
|
512
|
-
|
513
|
-
|
514
|
-
|
502
|
+
VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
|
503
|
+
struct um_op op;
|
504
|
+
um_prep_op(machine, &op, OP_BIND);
|
505
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
506
|
+
io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
|
507
|
+
|
508
|
+
VALUE ret = um_fiber_switch(machine);
|
509
|
+
if (um_check_completion(machine, &op))
|
510
|
+
ret = INT2NUM(op.result.res);
|
511
|
+
|
512
|
+
RB_GC_GUARD(ret);
|
513
|
+
return raise_if_exception(ret);
|
514
|
+
}
|
515
|
+
|
516
|
+
VALUE um_waitpid(struct um *machine, int pid, int options) {
|
517
|
+
struct um_op op;
|
518
|
+
um_prep_op(machine, &op, OP_BIND);
|
519
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
520
|
+
|
521
|
+
siginfo_t infop;
|
522
|
+
io_uring_prep_waitid(sqe, P_PID, pid, &infop, options, 0);
|
515
523
|
|
516
|
-
|
517
|
-
|
524
|
+
VALUE ret = um_fiber_switch(machine);
|
525
|
+
if (um_check_completion(machine, &op))
|
526
|
+
ret = INT2NUM(op.result.res);
|
518
527
|
|
519
|
-
|
528
|
+
RB_GC_GUARD(ret);
|
529
|
+
raise_if_exception(ret);
|
520
530
|
|
521
|
-
|
522
|
-
um_raise_on_system_error(result);
|
523
|
-
return INT2FIX(result);
|
531
|
+
return rb_ary_new_from_args(2, INT2NUM(infop.si_pid), INT2NUM(infop.si_status));
|
524
532
|
}
|
525
533
|
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
__u32 flags = 0;
|
534
|
+
/*******************************************************************************
|
535
|
+
multishot ops
|
536
|
+
*******************************************************************************/
|
530
537
|
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
538
|
+
VALUE accept_each_begin(VALUE arg) {
|
539
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
540
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
541
|
+
io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
|
542
|
+
|
543
|
+
while (true) {
|
544
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
545
|
+
if (!um_op_completed_p(ctx->op))
|
546
|
+
return raise_if_exception(ret);
|
537
547
|
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
548
|
+
int more = false;
|
549
|
+
struct um_op_result *result = &ctx->op->result;
|
550
|
+
while (result) {
|
551
|
+
more = (result->flags & IORING_CQE_F_MORE);
|
552
|
+
if (result->res < 0) {
|
553
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
543
554
|
return Qnil;
|
555
|
+
}
|
556
|
+
rb_yield(INT2NUM(result->res));
|
557
|
+
result = result->next;
|
544
558
|
}
|
559
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
560
|
+
if (more)
|
561
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
562
|
+
else
|
563
|
+
break;
|
545
564
|
}
|
546
|
-
}
|
547
|
-
|
548
|
-
VALUE um_accept_each(struct um *machine, int fd) {
|
549
|
-
struct um_op *op = um_op_checkout(machine);
|
550
|
-
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
551
|
-
io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
|
552
|
-
op->state = OP_submitted;
|
553
|
-
op->is_multishot = 1;
|
554
565
|
|
555
|
-
|
556
|
-
return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
|
566
|
+
return Qnil;
|
557
567
|
}
|
558
568
|
|
559
|
-
VALUE
|
560
|
-
struct
|
561
|
-
|
562
|
-
|
569
|
+
VALUE multishot_ensure(VALUE arg) {
|
570
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
571
|
+
if (ctx->op->multishot_result_count) {
|
572
|
+
int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
|
573
|
+
if (more)
|
574
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
575
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
576
|
+
}
|
577
|
+
if (!um_op_completed_p(ctx->op))
|
578
|
+
um_cancel_and_wait(ctx->machine, ctx->op);
|
563
579
|
|
564
|
-
|
565
|
-
|
580
|
+
if (ctx->read_buf)
|
581
|
+
free(ctx->read_buf);
|
566
582
|
|
567
|
-
|
583
|
+
return Qnil;
|
584
|
+
}
|
568
585
|
|
569
|
-
|
570
|
-
|
571
|
-
|
586
|
+
VALUE um_accept_each(struct um *machine, int fd) {
|
587
|
+
struct um_op op;
|
588
|
+
um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
|
589
|
+
|
590
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
|
591
|
+
return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
572
592
|
}
|
573
593
|
|
574
|
-
|
575
|
-
struct
|
576
|
-
|
577
|
-
|
594
|
+
int um_read_each_singleshot_loop(struct op_ctx *ctx) {
|
595
|
+
struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
|
596
|
+
ctx->read_maxlen = desc->buf_size;
|
597
|
+
ctx->read_buf = malloc(desc->buf_size);
|
598
|
+
int total = 0;
|
578
599
|
|
579
|
-
|
580
|
-
|
600
|
+
while (1) {
|
601
|
+
um_prep_op(ctx->machine, ctx->op, OP_READ);
|
602
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
603
|
+
io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
|
581
604
|
|
582
|
-
|
605
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
606
|
+
if (um_op_completed_p(ctx->op)) {
|
607
|
+
um_raise_on_error_result(ctx->op->result.res);
|
608
|
+
if (!ctx->op->result.res) return total;
|
583
609
|
|
584
|
-
|
585
|
-
|
586
|
-
|
610
|
+
VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
|
611
|
+
total += ctx->op->result.res;
|
612
|
+
rb_yield(buf);
|
613
|
+
RB_GC_GUARD(buf);
|
614
|
+
}
|
615
|
+
else
|
616
|
+
return raise_if_exception(ret);
|
617
|
+
}
|
587
618
|
}
|
588
619
|
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
620
|
+
// // returns true if more results are expected
|
621
|
+
int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
|
622
|
+
if (result->res == 0)
|
623
|
+
return false;
|
593
624
|
|
594
|
-
|
595
|
-
|
625
|
+
*total += result->res;
|
626
|
+
VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result->res, result->flags);
|
627
|
+
rb_yield(buf);
|
628
|
+
RB_GC_GUARD(buf);
|
596
629
|
|
597
|
-
|
630
|
+
// TTY devices might not support multishot reads:
|
631
|
+
// https://github.com/axboe/liburing/issues/1185. We detect this by checking
|
632
|
+
// if the F_MORE flag is absent, then switch to single shot mode.
|
633
|
+
if (unlikely(!(result->flags & IORING_CQE_F_MORE))) {
|
634
|
+
*total += um_read_each_singleshot_loop(ctx);
|
635
|
+
return false;
|
636
|
+
}
|
598
637
|
|
599
|
-
|
600
|
-
um_raise_on_system_error(result);
|
601
|
-
return INT2FIX(result);
|
638
|
+
return true;
|
602
639
|
}
|
603
640
|
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
641
|
+
void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
|
642
|
+
switch (ctx->op->kind) {
|
643
|
+
case OP_READ_MULTISHOT:
|
644
|
+
io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
|
645
|
+
return;
|
646
|
+
case OP_RECV_MULTISHOT:
|
647
|
+
io_uring_prep_recv_multishot(sqe, ctx->fd, NULL, 0, 0);
|
648
|
+
sqe->buf_group = ctx->bgid;
|
649
|
+
sqe->flags |= IOSQE_BUFFER_SELECT;
|
650
|
+
return;
|
651
|
+
default:
|
652
|
+
return;
|
653
|
+
}
|
654
|
+
}
|
612
655
|
|
613
|
-
|
656
|
+
VALUE read_recv_each_begin(VALUE arg) {
|
657
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
658
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
659
|
+
read_recv_each_prep(sqe, ctx);
|
660
|
+
int total = 0;
|
614
661
|
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
}
|
662
|
+
while (true) {
|
663
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
664
|
+
if (!um_op_completed_p(ctx->op))
|
665
|
+
return raise_if_exception(ret);
|
620
666
|
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
667
|
+
int more = false;
|
668
|
+
struct um_op_result *result = &ctx->op->result;
|
669
|
+
while (result) {
|
670
|
+
um_raise_on_error_result(result->res);
|
625
671
|
|
626
|
-
|
627
|
-
|
672
|
+
more = (result->flags & IORING_CQE_F_MORE);
|
673
|
+
if (!read_recv_each_multishot_process_result(ctx, result, &total))
|
674
|
+
return Qnil;
|
628
675
|
|
629
|
-
|
676
|
+
// rb_yield(INT2NUM(result->res));
|
677
|
+
result = result->next;
|
678
|
+
}
|
679
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
680
|
+
if (more)
|
681
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
682
|
+
else
|
683
|
+
break;
|
684
|
+
}
|
630
685
|
|
631
|
-
|
632
|
-
um_raise_on_system_error(result);
|
633
|
-
return INT2FIX(result);
|
686
|
+
return Qnil;
|
634
687
|
}
|
635
688
|
|
636
|
-
VALUE
|
637
|
-
struct um_op
|
638
|
-
|
639
|
-
int result = 0;
|
689
|
+
VALUE um_read_each(struct um *machine, int fd, int bgid) {
|
690
|
+
struct um_op op;
|
691
|
+
um_prep_op(machine, &op, OP_READ_MULTISHOT);
|
640
692
|
|
641
|
-
|
642
|
-
|
693
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
|
694
|
+
return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
695
|
+
}
|
643
696
|
|
644
|
-
|
697
|
+
VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
|
698
|
+
struct um_op op;
|
699
|
+
um_prep_op(machine, &op, OP_RECV_MULTISHOT);
|
645
700
|
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
}
|
701
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
|
702
|
+
return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
703
|
+
}
|