uringmachine 0.3 → 0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -1
- data/CHANGELOG.md +23 -0
- data/README.md +128 -0
- data/TODO.md +14 -0
- data/examples/bm_snooze.rb +89 -0
- data/examples/bm_write.rb +56 -0
- data/examples/dns_client.rb +12 -0
- data/examples/echo_server.rb +18 -40
- data/examples/http_server.rb +42 -43
- data/examples/inout.rb +19 -0
- data/examples/nc.rb +36 -0
- data/examples/server_client.rb +64 -0
- data/examples/snooze.rb +44 -0
- data/examples/write_dev_null.rb +16 -0
- data/ext/um/extconf.rb +24 -23
- data/ext/um/um.c +524 -278
- data/ext/um/um.h +146 -44
- data/ext/um/um_buffer.c +49 -0
- data/ext/um/um_class.c +217 -106
- data/ext/um/um_const.c +213 -0
- data/ext/um/um_ext.c +4 -0
- data/ext/um/um_mutex_class.c +47 -0
- data/ext/um/um_op.c +86 -114
- data/ext/um/um_queue_class.c +58 -0
- data/ext/um/um_sync.c +273 -0
- data/ext/um/um_utils.c +49 -4
- data/lib/uringmachine/dns_resolver.rb +84 -0
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +28 -0
- data/supressions/ruby.supp +71 -0
- data/test/helper.rb +8 -0
- data/test/test_um.rb +685 -46
- data/vendor/liburing/.github/workflows/build.yml +29 -1
- data/vendor/liburing/.gitignore +6 -0
- data/vendor/liburing/CHANGELOG +16 -0
- data/vendor/liburing/CONTRIBUTING.md +165 -0
- data/vendor/liburing/configure +64 -0
- data/vendor/liburing/examples/Makefile +9 -1
- data/vendor/liburing/examples/kdigest.c +405 -0
- data/vendor/liburing/examples/proxy.c +75 -8
- data/vendor/liburing/examples/reg-wait.c +159 -0
- data/vendor/liburing/liburing.pc.in +1 -1
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/Makefile +16 -2
- data/vendor/liburing/src/include/liburing/io_uring.h +77 -0
- data/vendor/liburing/src/include/liburing/sanitize.h +39 -0
- data/vendor/liburing/src/include/liburing.h +59 -6
- data/vendor/liburing/src/int_flags.h +10 -3
- data/vendor/liburing/src/liburing-ffi.map +16 -0
- data/vendor/liburing/src/liburing.map +10 -0
- data/vendor/liburing/src/queue.c +28 -16
- data/vendor/liburing/src/register.c +106 -1
- data/vendor/liburing/src/sanitize.c +176 -0
- data/vendor/liburing/src/setup.c +47 -19
- data/vendor/liburing/src/setup.h +6 -0
- data/vendor/liburing/test/35fa71a030ca.c +7 -0
- data/vendor/liburing/test/500f9fbadef8.c +2 -0
- data/vendor/liburing/test/7ad0e4b2f83c.c +0 -25
- data/vendor/liburing/test/917257daa0fe.c +7 -0
- data/vendor/liburing/test/Makefile +38 -4
- data/vendor/liburing/test/a0908ae19763.c +7 -0
- data/vendor/liburing/test/a4c0b3decb33.c +7 -0
- data/vendor/liburing/test/accept.c +14 -4
- data/vendor/liburing/test/b19062a56726.c +7 -0
- data/vendor/liburing/test/bind-listen.c +2 -2
- data/vendor/liburing/test/buf-ring-nommap.c +10 -3
- data/vendor/liburing/test/buf-ring.c +2 -0
- data/vendor/liburing/test/cmd-discard.c +427 -0
- data/vendor/liburing/test/coredump.c +7 -0
- data/vendor/liburing/test/cq-overflow.c +13 -1
- data/vendor/liburing/test/d4ae271dfaae.c +11 -3
- data/vendor/liburing/test/defer-taskrun.c +2 -2
- data/vendor/liburing/test/defer-tw-timeout.c +4 -1
- data/vendor/liburing/test/defer.c +2 -2
- data/vendor/liburing/test/double-poll-crash.c +1 -1
- data/vendor/liburing/test/eeed8b54e0df.c +2 -0
- data/vendor/liburing/test/eventfd.c +0 -1
- data/vendor/liburing/test/exit-no-cleanup.c +11 -0
- data/vendor/liburing/test/fadvise.c +9 -26
- data/vendor/liburing/test/fdinfo.c +9 -1
- data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
- data/vendor/liburing/test/file-exit-unreg.c +48 -0
- data/vendor/liburing/test/file-register.c +14 -2
- data/vendor/liburing/test/file-update.c +1 -1
- data/vendor/liburing/test/file-verify.c +27 -16
- data/vendor/liburing/test/files-exit-hang-timeout.c +1 -2
- data/vendor/liburing/test/fixed-buf-iter.c +3 -1
- data/vendor/liburing/test/fixed-hugepage.c +12 -1
- data/vendor/liburing/test/fsnotify.c +1 -0
- data/vendor/liburing/test/futex.c +16 -4
- data/vendor/liburing/test/helpers.c +47 -0
- data/vendor/liburing/test/helpers.h +6 -0
- data/vendor/liburing/test/init-mem.c +5 -3
- data/vendor/liburing/test/io-cancel.c +0 -24
- data/vendor/liburing/test/io_uring_passthrough.c +4 -0
- data/vendor/liburing/test/io_uring_register.c +38 -8
- data/vendor/liburing/test/iopoll-leak.c +4 -0
- data/vendor/liburing/test/iopoll-overflow.c +1 -1
- data/vendor/liburing/test/iopoll.c +3 -3
- data/vendor/liburing/test/kallsyms.c +203 -0
- data/vendor/liburing/test/link-timeout.c +159 -0
- data/vendor/liburing/test/linked-defer-close.c +224 -0
- data/vendor/liburing/test/madvise.c +12 -25
- data/vendor/liburing/test/min-timeout-wait.c +0 -25
- data/vendor/liburing/test/min-timeout.c +0 -25
- data/vendor/liburing/test/mkdir.c +6 -0
- data/vendor/liburing/test/msg-ring.c +8 -2
- data/vendor/liburing/test/napi-test.c +16 -3
- data/vendor/liburing/test/no-mmap-inval.c +3 -1
- data/vendor/liburing/test/nop.c +44 -0
- data/vendor/liburing/test/ooo-file-unreg.c +1 -1
- data/vendor/liburing/test/open-close.c +40 -0
- data/vendor/liburing/test/openat2.c +37 -14
- data/vendor/liburing/test/poll-many.c +13 -7
- data/vendor/liburing/test/poll-mshot-update.c +17 -10
- data/vendor/liburing/test/poll-v-poll.c +6 -3
- data/vendor/liburing/test/pollfree.c +148 -0
- data/vendor/liburing/test/read-mshot-empty.c +158 -153
- data/vendor/liburing/test/read-mshot-stdin.c +121 -0
- data/vendor/liburing/test/read-mshot.c +282 -27
- data/vendor/liburing/test/read-write.c +78 -13
- data/vendor/liburing/test/recv-msgall-stream.c +3 -0
- data/vendor/liburing/test/recv-msgall.c +5 -0
- data/vendor/liburing/test/recvsend_bundle-inc.c +680 -0
- data/vendor/liburing/test/recvsend_bundle.c +94 -31
- data/vendor/liburing/test/reg-fd-only.c +15 -5
- data/vendor/liburing/test/reg-wait.c +251 -0
- data/vendor/liburing/test/regbuf-clone.c +645 -0
- data/vendor/liburing/test/regbuf-merge.c +7 -0
- data/vendor/liburing/test/register-restrictions.c +86 -85
- data/vendor/liburing/test/rename.c +59 -1
- data/vendor/liburing/test/resize-rings.c +643 -0
- data/vendor/liburing/test/ringbuf-read.c +5 -0
- data/vendor/liburing/test/ringbuf-status.c +5 -1
- data/vendor/liburing/test/rsrc_tags.c +1 -1
- data/vendor/liburing/test/runtests.sh +16 -1
- data/vendor/liburing/test/send-zerocopy.c +59 -0
- data/vendor/liburing/test/short-read.c +1 -0
- data/vendor/liburing/test/socket.c +43 -0
- data/vendor/liburing/test/splice.c +3 -1
- data/vendor/liburing/test/sq-poll-dup.c +1 -1
- data/vendor/liburing/test/sq-poll-share.c +2 -0
- data/vendor/liburing/test/sqpoll-disable-exit.c +8 -0
- data/vendor/liburing/test/sqpoll-exit-hang.c +1 -25
- data/vendor/liburing/test/sqpoll-sleep.c +40 -33
- data/vendor/liburing/test/sqwait.c +136 -0
- data/vendor/liburing/test/statx.c +89 -0
- data/vendor/liburing/test/stdout.c +2 -0
- data/vendor/liburing/test/submit-and-wait.c +1 -25
- data/vendor/liburing/test/submit-reuse.c +4 -26
- data/vendor/liburing/test/symlink.c +12 -1
- data/vendor/liburing/test/sync-cancel.c +56 -22
- data/vendor/liburing/test/thread-exit.c +5 -0
- data/vendor/liburing/test/timeout-new.c +1 -26
- data/vendor/liburing/test/timeout.c +25 -34
- data/vendor/liburing/test/unlink.c +94 -1
- data/vendor/liburing/test/uring_cmd_ublk.c +1252 -0
- data/vendor/liburing/test/waitid.c +62 -8
- data/vendor/liburing/test/wq-aff.c +35 -0
- data/vendor/liburing/test/xfail_prep_link_timeout_out_of_scope.c +46 -0
- data/vendor/liburing/test/xfail_register_buffers_out_of_scope.c +51 -0
- metadata +37 -6
- data/examples/event_loop.rb +0 -69
- data/examples/fibers.rb +0 -105
- data/examples/http_server_multishot.rb +0 -57
- data/examples/http_server_simpler.rb +0 -34
data/ext/um/um.c
CHANGED
@@ -1,25 +1,14 @@
|
|
1
1
|
#include "um.h"
|
2
2
|
#include "ruby/thread.h"
|
3
|
-
#include <sys/mman.h>
|
4
3
|
|
5
|
-
void um_setup(struct um *machine) {
|
6
|
-
machine
|
7
|
-
|
8
|
-
machine->
|
9
|
-
machine->
|
10
|
-
machine->runqueue_head = NULL;
|
11
|
-
machine->runqueue_tail = NULL;
|
12
|
-
machine->op_freelist = NULL;
|
13
|
-
machine->result_freelist = NULL;
|
4
|
+
void um_setup(VALUE self, struct um *machine) {
|
5
|
+
memset(machine, 0, sizeof(struct um));
|
6
|
+
|
7
|
+
RB_OBJ_WRITE(self, &machine->self, self);
|
8
|
+
RB_OBJ_WRITE(self, &machine->poll_fiber, Qnil);
|
14
9
|
|
15
10
|
unsigned prepared_limit = 4096;
|
16
|
-
|
17
|
-
#ifdef HAVE_IORING_SETUP_SUBMIT_ALL
|
18
|
-
flags |= IORING_SETUP_SUBMIT_ALL;
|
19
|
-
#endif
|
20
|
-
#ifdef HAVE_IORING_SETUP_COOP_TASKRUN
|
21
|
-
flags |= IORING_SETUP_COOP_TASKRUN;
|
22
|
-
#endif
|
11
|
+
unsigned flags = IORING_SETUP_SUBMIT_ALL | IORING_SETUP_COOP_TASKRUN;
|
23
12
|
|
24
13
|
while (1) {
|
25
14
|
int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
|
@@ -46,11 +35,10 @@ inline void um_teardown(struct um *machine) {
|
|
46
35
|
io_uring_queue_exit(&machine->ring);
|
47
36
|
machine->ring_initialized = 0;
|
48
37
|
|
49
|
-
|
50
|
-
um_free_op_linked_list(machine, machine->runqueue_head);
|
38
|
+
um_free_buffer_linked_list(machine);
|
51
39
|
}
|
52
40
|
|
53
|
-
|
41
|
+
inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
54
42
|
struct io_uring_sqe *sqe;
|
55
43
|
sqe = io_uring_get_sqe(&machine->ring);
|
56
44
|
if (likely(sqe)) goto done;
|
@@ -69,88 +57,43 @@ done:
|
|
69
57
|
sqe->user_data = (long long)op;
|
70
58
|
sqe->flags = 0;
|
71
59
|
machine->unsubmitted_count++;
|
60
|
+
if (op) machine->pending_count++;
|
72
61
|
return sqe;
|
73
62
|
}
|
74
63
|
|
75
|
-
struct
|
76
|
-
struct
|
77
|
-
|
78
|
-
int result;
|
79
|
-
};
|
80
|
-
|
81
|
-
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
82
|
-
struct wait_for_cqe_ctx *ctx = ptr;
|
83
|
-
if (ctx->machine->unsubmitted_count) {
|
84
|
-
ctx->machine->unsubmitted_count = 0;
|
85
|
-
ctx->result = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
|
86
|
-
}
|
87
|
-
else
|
88
|
-
ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
|
89
|
-
return NULL;
|
90
|
-
}
|
91
|
-
|
92
|
-
inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
93
|
-
op->cqe_result = cqe->res;
|
94
|
-
op->cqe_flags = cqe->flags;
|
95
|
-
op->state = OP_completed;
|
96
|
-
um_runqueue_push(machine, op);
|
97
|
-
}
|
98
|
-
|
99
|
-
inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
100
|
-
if (!op->results_head) {
|
101
|
-
struct um_op *op2 = um_op_checkout(machine);
|
102
|
-
op2->state = OP_schedule;
|
103
|
-
op2->fiber = op->fiber;
|
104
|
-
op2->resume_value = Qnil;
|
105
|
-
um_runqueue_push(machine, op2);
|
106
|
-
}
|
107
|
-
um_op_result_push(machine, op, cqe->res, cqe->flags);
|
64
|
+
static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
65
|
+
struct um_op *op = (struct um_op *)cqe->user_data;
|
66
|
+
if (unlikely(!op)) return;
|
108
67
|
|
109
68
|
if (!(cqe->flags & IORING_CQE_F_MORE))
|
110
|
-
|
111
|
-
}
|
69
|
+
machine->pending_count--;
|
112
70
|
|
113
|
-
|
114
|
-
|
115
|
-
|
71
|
+
// printf(
|
72
|
+
// ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
|
73
|
+
// op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
|
74
|
+
// );
|
116
75
|
|
117
|
-
|
118
|
-
case OP_submitted:
|
119
|
-
if (unlikely(cqe->res == -ECANCELED)) {
|
120
|
-
um_op_checkin(machine, op);
|
121
|
-
break;
|
122
|
-
}
|
123
|
-
if (!op->is_multishot)
|
124
|
-
um_handle_submitted_op_cqe_single(machine, op, cqe);
|
125
|
-
else
|
126
|
-
um_handle_submitted_op_cqe_multi(machine, op, cqe);
|
127
|
-
break;
|
128
|
-
case OP_abandonned:
|
129
|
-
// op has been abandonned by the I/O method, so we need to cleanup (check
|
130
|
-
// the op in to the free list).
|
131
|
-
um_op_checkin(machine, op);
|
132
|
-
break;
|
133
|
-
default:
|
134
|
-
// TODO: invalid state, should raise!
|
135
|
-
}
|
136
|
-
}
|
76
|
+
if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
|
137
77
|
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
.cqe = NULL
|
142
|
-
};
|
78
|
+
op->flags |= OP_F_COMPLETED;
|
79
|
+
if (unlikely(op->flags & OP_F_TRANSIENT))
|
80
|
+
um_op_transient_remove(machine, op);
|
143
81
|
|
144
|
-
|
145
|
-
|
146
|
-
|
82
|
+
if (op->flags & OP_F_MULTISHOT) {
|
83
|
+
um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
|
84
|
+
if (op->multishot_result_count > 1)
|
85
|
+
return;
|
147
86
|
}
|
148
|
-
|
149
|
-
|
87
|
+
else {
|
88
|
+
op->result.res = cqe->res;
|
89
|
+
op->result.flags = cqe->flags;
|
90
|
+
}
|
91
|
+
|
92
|
+
um_runqueue_push(machine, op);
|
150
93
|
}
|
151
94
|
|
152
95
|
// copied from liburing/queue.c
|
153
|
-
static inline
|
96
|
+
static inline int cq_ring_needs_flush(struct io_uring *ring) {
|
154
97
|
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
155
98
|
}
|
156
99
|
|
@@ -180,121 +123,137 @@ done:
|
|
180
123
|
return total_count;
|
181
124
|
}
|
182
125
|
|
183
|
-
|
184
|
-
|
185
|
-
|
126
|
+
struct wait_for_cqe_ctx {
|
127
|
+
struct um *machine;
|
128
|
+
struct io_uring_cqe *cqe;
|
129
|
+
int result;
|
130
|
+
};
|
131
|
+
|
132
|
+
void *um_wait_for_cqe_without_gvl(void *ptr) {
|
133
|
+
struct wait_for_cqe_ctx *ctx = ptr;
|
134
|
+
if (ctx->machine->unsubmitted_count) {
|
135
|
+
ctx->machine->unsubmitted_count = 0;
|
136
|
+
|
137
|
+
// Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
|
138
|
+
// interrupted with a signal. We can detect this by testing ctx->cqe for
|
139
|
+
// NULL.
|
140
|
+
//
|
141
|
+
// https://github.com/axboe/liburing/issues/1280
|
142
|
+
int res = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
|
143
|
+
ctx->result = (res > 0 && !ctx->cqe) ? -EINTR : res;
|
144
|
+
}
|
145
|
+
else
|
146
|
+
ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
|
147
|
+
return NULL;
|
186
148
|
}
|
187
149
|
|
188
|
-
inline
|
189
|
-
struct
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
unlikely(
|
199
|
-
first_iteration && machine->unsubmitted_count &&
|
200
|
-
machine->runqueue_head &&
|
201
|
-
machine->runqueue_head->fiber == rb_fiber_current()
|
202
|
-
)
|
203
|
-
) {
|
204
|
-
io_uring_submit(&machine->ring);
|
150
|
+
static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
|
151
|
+
struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL };
|
152
|
+
rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
|
153
|
+
|
154
|
+
if (unlikely(ctx.result < 0 && ctx.result != -EINTR))
|
155
|
+
rb_syserr_fail(-ctx.result, strerror(-ctx.result));
|
156
|
+
|
157
|
+
if (ctx.cqe) {
|
158
|
+
um_process_cqe(machine, ctx.cqe);
|
159
|
+
io_uring_cq_advance(&machine->ring, 1);
|
205
160
|
um_process_ready_cqes(machine);
|
206
161
|
}
|
207
|
-
|
162
|
+
}
|
208
163
|
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
// the resume value is disregarded, we pass the fiber itself
|
216
|
-
VALUE v = rb_fiber_transfer(op->fiber, 1, &resume_value);
|
217
|
-
return v;
|
218
|
-
}
|
164
|
+
inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
|
165
|
+
VALUE fiber = op->fiber;
|
166
|
+
VALUE value = op->value;
|
167
|
+
|
168
|
+
if (unlikely(op->flags & OP_F_TRANSIENT))
|
169
|
+
um_op_free(machine, op);
|
219
170
|
|
220
|
-
|
221
|
-
goto loop;
|
171
|
+
return rb_fiber_transfer(fiber, 1, &value);
|
222
172
|
}
|
223
173
|
|
224
|
-
|
174
|
+
inline VALUE um_fiber_switch(struct um *machine) {
|
175
|
+
while (true) {
|
176
|
+
struct um_op *op = um_runqueue_shift(machine);
|
177
|
+
if (op)
|
178
|
+
return process_runqueue_op(machine, op);
|
179
|
+
|
180
|
+
um_wait_for_and_process_ready_cqes(machine);
|
181
|
+
}
|
182
|
+
}
|
183
|
+
|
184
|
+
static inline void um_submit_cancel_op(struct um *machine, struct um_op *op) {
|
225
185
|
struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
|
226
186
|
io_uring_prep_cancel64(sqe, (long long)op, 0);
|
227
187
|
}
|
228
188
|
|
229
|
-
|
230
|
-
op
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
if (unlikely(is_exception && op->state == OP_submitted)) {
|
235
|
-
um_cancel_op(machine, op);
|
236
|
-
op->state = OP_abandonned;
|
189
|
+
inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
|
190
|
+
um_submit_cancel_op(machine, op);
|
191
|
+
while (true) {
|
192
|
+
um_fiber_switch(machine);
|
193
|
+
if (um_op_completed_p(op)) break;
|
237
194
|
}
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
um_op_checkin(machine, op);
|
195
|
+
}
|
196
|
+
|
197
|
+
inline int um_check_completion(struct um *machine, struct um_op *op) {
|
198
|
+
if (!um_op_completed_p(op)) {
|
199
|
+
um_cancel_and_wait(machine, op);
|
200
|
+
return 0;
|
245
201
|
}
|
246
202
|
|
247
|
-
|
248
|
-
return
|
203
|
+
um_raise_on_error_result(op->result.res);
|
204
|
+
return 1;
|
249
205
|
}
|
250
206
|
|
251
207
|
inline VALUE um_await(struct um *machine) {
|
252
208
|
VALUE v = um_fiber_switch(machine);
|
253
|
-
return
|
209
|
+
return raise_if_exception(v);
|
254
210
|
}
|
255
211
|
|
256
|
-
inline void
|
257
|
-
|
258
|
-
op->
|
259
|
-
|
260
|
-
|
261
|
-
|
212
|
+
inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind) {
|
213
|
+
memset(op, 0, sizeof(struct um_op));
|
214
|
+
op->kind = kind;
|
215
|
+
switch (kind) {
|
216
|
+
case OP_ACCEPT_MULTISHOT:
|
217
|
+
case OP_READ_MULTISHOT:
|
218
|
+
case OP_RECV_MULTISHOT:
|
219
|
+
op->flags |= OP_F_MULTISHOT;
|
220
|
+
default:
|
221
|
+
}
|
222
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
|
223
|
+
op->value = Qnil;
|
262
224
|
}
|
263
225
|
|
264
|
-
inline void
|
265
|
-
struct um_op *op =
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
op->state = OP_schedule;
|
273
|
-
op->fiber = fiber;
|
274
|
-
op->resume_value = value;
|
275
|
-
um_runqueue_unshift(machine, op);
|
276
|
-
}
|
226
|
+
inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
|
227
|
+
struct um_op *op = um_op_alloc(machine);
|
228
|
+
memset(op, 0, sizeof(struct um_op));
|
229
|
+
op->kind = OP_SCHEDULE;
|
230
|
+
op->flags = OP_F_TRANSIENT;
|
231
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
|
232
|
+
RB_OBJ_WRITE(machine->self, &op->value, value);
|
233
|
+
um_runqueue_push(machine, op);
|
277
234
|
}
|
278
235
|
|
279
|
-
struct
|
236
|
+
struct op_ctx {
|
280
237
|
struct um *machine;
|
281
238
|
struct um_op *op;
|
239
|
+
int fd;
|
282
240
|
int bgid;
|
241
|
+
|
242
|
+
void *read_buf;
|
243
|
+
int read_maxlen;
|
244
|
+
struct __kernel_timespec ts;
|
245
|
+
int flags;
|
283
246
|
};
|
284
247
|
|
285
248
|
VALUE um_timeout_ensure(VALUE arg) {
|
286
|
-
struct
|
249
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
287
250
|
|
288
|
-
if (ctx->op
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
ctx->op->state == OP_abandonned;
|
293
|
-
}
|
294
|
-
else {
|
295
|
-
// completed, so can be checked in
|
296
|
-
um_op_checkin(ctx->machine, ctx->op);
|
251
|
+
if (!um_op_completed_p(ctx->op)) {
|
252
|
+
um_submit_cancel_op(ctx->machine, ctx->op);
|
253
|
+
ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
|
254
|
+
um_op_transient_add(ctx->machine, ctx->op);
|
297
255
|
}
|
256
|
+
|
298
257
|
return Qnil;
|
299
258
|
}
|
300
259
|
|
@@ -302,156 +261,443 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
|
|
302
261
|
static ID ID_new = 0;
|
303
262
|
if (!ID_new) ID_new = rb_intern("new");
|
304
263
|
|
305
|
-
struct um_op *op =
|
264
|
+
struct um_op *op = malloc(sizeof(struct um_op));
|
265
|
+
um_prep_op(machine, op, OP_TIMEOUT);
|
306
266
|
op->ts = um_double_to_timespec(NUM2DBL(interval));
|
267
|
+
RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
|
268
|
+
RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
|
307
269
|
|
308
270
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
309
271
|
io_uring_prep_timeout(sqe, &op->ts, 0, 0);
|
310
|
-
op->state = OP_submitted;
|
311
|
-
op->fiber = rb_fiber_current();
|
312
|
-
op->resume_value = rb_funcall(class, ID_new, 0);
|
313
272
|
|
314
|
-
struct
|
273
|
+
struct op_ctx ctx = { .machine = machine, .op = op };
|
315
274
|
return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
|
316
275
|
}
|
317
276
|
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
322
|
-
int result = 0;
|
277
|
+
/*******************************************************************************
|
278
|
+
blocking singleshot ops
|
279
|
+
*******************************************************************************/
|
323
280
|
|
324
|
-
|
325
|
-
|
281
|
+
VALUE um_sleep(struct um *machine, double duration) {
|
282
|
+
struct um_op op;
|
283
|
+
um_prep_op(machine, &op, OP_SLEEP);
|
284
|
+
op.ts = um_double_to_timespec(duration);
|
285
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
286
|
+
io_uring_prep_timeout(sqe, &op.ts, 0, 0);
|
287
|
+
VALUE ret = um_fiber_switch(machine);
|
288
|
+
|
289
|
+
if (!um_op_completed_p(&op))
|
290
|
+
um_cancel_and_wait(machine, &op);
|
291
|
+
else {
|
292
|
+
if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
|
293
|
+
ret = DBL2NUM(duration);
|
294
|
+
}
|
326
295
|
|
327
|
-
|
296
|
+
RB_GC_GUARD(ret);
|
297
|
+
return raise_if_exception(ret);
|
328
298
|
}
|
329
299
|
|
330
300
|
inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
|
331
|
-
struct um_op
|
332
|
-
|
333
|
-
|
334
|
-
int flags = 0;
|
335
|
-
|
301
|
+
struct um_op op;
|
302
|
+
um_prep_op(machine, &op, OP_READ);
|
303
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
336
304
|
void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
|
337
305
|
io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
|
338
|
-
|
306
|
+
|
307
|
+
VALUE ret = um_fiber_switch(machine);
|
308
|
+
if (um_check_completion(machine, &op)) {
|
309
|
+
um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
|
310
|
+
ret = INT2NUM(op.result.res);
|
311
|
+
|
312
|
+
}
|
313
|
+
|
314
|
+
RB_GC_GUARD(buffer);
|
315
|
+
RB_GC_GUARD(ret);
|
316
|
+
return raise_if_exception(ret);
|
317
|
+
}
|
339
318
|
|
340
|
-
|
319
|
+
VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
|
320
|
+
struct um_op op;
|
321
|
+
um_prep_op(machine, &op, OP_WRITE);
|
322
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
323
|
+
const int str_len = RSTRING_LEN(str);
|
324
|
+
if (len > str_len) len = str_len;
|
341
325
|
|
342
|
-
|
343
|
-
|
344
|
-
|
326
|
+
io_uring_prep_write(sqe, fd, RSTRING_PTR(str), len, -1);
|
327
|
+
|
328
|
+
VALUE ret = um_fiber_switch(machine);
|
329
|
+
if (um_check_completion(machine, &op))
|
330
|
+
ret = INT2NUM(op.result.res);
|
331
|
+
|
332
|
+
RB_GC_GUARD(str);
|
333
|
+
RB_GC_GUARD(ret);
|
334
|
+
return raise_if_exception(ret);
|
345
335
|
}
|
346
336
|
|
347
|
-
VALUE
|
348
|
-
struct
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
337
|
+
VALUE um_close(struct um *machine, int fd) {
|
338
|
+
struct um_op op;
|
339
|
+
um_prep_op(machine, &op, OP_CLOSE);
|
340
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
341
|
+
io_uring_prep_close(sqe, fd);
|
342
|
+
|
343
|
+
VALUE ret = um_fiber_switch(machine);
|
344
|
+
if (um_check_completion(machine, &op))
|
345
|
+
ret = INT2NUM(fd);
|
346
|
+
|
347
|
+
RB_GC_GUARD(ret);
|
348
|
+
return raise_if_exception(ret);
|
349
|
+
}
|
350
|
+
|
351
|
+
VALUE um_accept(struct um *machine, int fd) {
|
352
|
+
struct um_op op;
|
353
|
+
um_prep_op(machine, &op, OP_ACCEPT);
|
354
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
355
|
+
io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
|
356
|
+
|
357
|
+
VALUE ret = um_fiber_switch(machine);
|
358
|
+
if (um_check_completion(machine, &op))
|
359
|
+
ret = INT2NUM(op.result.res);
|
360
|
+
|
361
|
+
RB_GC_GUARD(ret);
|
362
|
+
return raise_if_exception(ret);
|
363
|
+
}
|
364
|
+
|
365
|
+
VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
|
366
|
+
struct um_op op;
|
367
|
+
um_prep_op(machine, &op, OP_SOCKET);
|
368
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
369
|
+
io_uring_prep_socket(sqe, domain, type, protocol, flags);
|
370
|
+
|
371
|
+
VALUE ret = um_fiber_switch(machine);
|
372
|
+
if (um_check_completion(machine, &op))
|
373
|
+
ret = INT2NUM(op.result.res);
|
374
|
+
|
375
|
+
RB_GC_GUARD(ret);
|
376
|
+
return raise_if_exception(ret);
|
377
|
+
}
|
378
|
+
|
379
|
+
VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
|
380
|
+
struct um_op op;
|
381
|
+
um_prep_op(machine, &op, OP_CONNECT);
|
382
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
383
|
+
io_uring_prep_connect(sqe, fd, addr, addrlen);
|
384
|
+
|
385
|
+
VALUE ret = um_fiber_switch(machine);
|
386
|
+
if (um_check_completion(machine, &op))
|
387
|
+
ret = INT2NUM(op.result.res);
|
388
|
+
|
389
|
+
RB_GC_GUARD(ret);
|
390
|
+
return raise_if_exception(ret);
|
391
|
+
}
|
392
|
+
|
393
|
+
VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
|
394
|
+
struct um_op op;
|
395
|
+
um_prep_op(machine, &op, OP_SEND);
|
396
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
397
|
+
io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
|
398
|
+
|
399
|
+
VALUE ret = um_fiber_switch(machine);
|
400
|
+
if (um_check_completion(machine, &op))
|
401
|
+
ret = INT2NUM(op.result.res);
|
402
|
+
|
403
|
+
RB_GC_GUARD(buffer);
|
404
|
+
RB_GC_GUARD(ret);
|
405
|
+
return raise_if_exception(ret);
|
406
|
+
}
|
407
|
+
|
408
|
+
VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
|
409
|
+
struct um_op op;
|
410
|
+
um_prep_op(machine, &op, OP_RECV);
|
411
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
412
|
+
void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
|
413
|
+
io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
|
414
|
+
|
415
|
+
VALUE ret = um_fiber_switch(machine);
|
416
|
+
if (um_check_completion(machine, &op)) {
|
417
|
+
um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
|
418
|
+
ret = INT2NUM(op.result.res);
|
357
419
|
}
|
358
|
-
|
420
|
+
|
421
|
+
RB_GC_GUARD(buffer);
|
422
|
+
RB_GC_GUARD(ret);
|
423
|
+
return raise_if_exception(ret);
|
359
424
|
}
|
360
425
|
|
361
|
-
VALUE
|
362
|
-
struct
|
363
|
-
|
364
|
-
|
365
|
-
|
426
|
+
VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
|
427
|
+
struct um_op op;
|
428
|
+
um_prep_op(machine, &op, OP_BIND);
|
429
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
430
|
+
io_uring_prep_bind(sqe, fd, addr, addrlen);
|
366
431
|
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
432
|
+
VALUE ret = um_fiber_switch(machine);
|
433
|
+
if (um_check_completion(machine, &op))
|
434
|
+
ret = INT2NUM(op.result.res);
|
435
|
+
|
436
|
+
RB_GC_GUARD(ret);
|
437
|
+
return raise_if_exception(ret);
|
438
|
+
}
|
439
|
+
|
440
|
+
VALUE um_listen(struct um *machine, int fd, int backlog) {
|
441
|
+
struct um_op op;
|
442
|
+
um_prep_op(machine, &op, OP_BIND);
|
443
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
444
|
+
io_uring_prep_listen(sqe, fd, backlog);
|
445
|
+
|
446
|
+
VALUE ret = um_fiber_switch(machine);
|
447
|
+
if (um_check_completion(machine, &op))
|
448
|
+
ret = INT2NUM(op.result.res);
|
449
|
+
|
450
|
+
RB_GC_GUARD(ret);
|
451
|
+
return raise_if_exception(ret);
|
452
|
+
}
|
453
|
+
|
454
|
+
VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
|
455
|
+
VALUE ret = Qnil;
|
456
|
+
int value;
|
457
|
+
|
458
|
+
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
459
|
+
struct um_op op;
|
460
|
+
um_prep_op(machine, &op, OP_GETSOCKOPT);
|
461
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
462
|
+
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
463
|
+
|
464
|
+
ret = um_fiber_switch(machine);
|
465
|
+
if (um_check_completion(machine, &op))
|
466
|
+
ret = INT2NUM(value);
|
467
|
+
#else
|
468
|
+
socklen_t nvalue = sizeof(value);
|
469
|
+
int res = getsockopt(fd, level, opt, &value, &nvalue);
|
470
|
+
if (res)
|
471
|
+
rb_syserr_fail(errno, strerror(errno));
|
472
|
+
ret = INT2NUM(value);
|
473
|
+
#endif
|
474
|
+
|
475
|
+
RB_GC_GUARD(ret);
|
476
|
+
return raise_if_exception(ret);
|
477
|
+
}
|
478
|
+
|
479
|
+
VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
|
480
|
+
VALUE ret = Qnil;
|
481
|
+
|
482
|
+
#ifdef HAVE_IO_URING_PREP_CMD_SOCK
|
483
|
+
struct um_op op;
|
484
|
+
um_prep_op(machine, &op, OP_GETSOCKOPT);
|
485
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
486
|
+
io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
|
487
|
+
|
488
|
+
ret = um_fiber_switch(machine);
|
489
|
+
if (um_check_completion(machine, &op))
|
490
|
+
ret = INT2NUM(op.result.res);
|
491
|
+
#else
|
492
|
+
int res = setsockopt(fd, level, opt, &value, sizeof(value));
|
493
|
+
if (res)
|
494
|
+
rb_syserr_fail(errno, strerror(errno));
|
495
|
+
ret = INT2NUM(0);
|
496
|
+
#endif
|
497
|
+
|
498
|
+
RB_GC_GUARD(ret);
|
499
|
+
return raise_if_exception(ret);
|
500
|
+
}
|
501
|
+
|
502
|
+
VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
|
503
|
+
struct um_op op;
|
504
|
+
um_prep_op(machine, &op, OP_BIND);
|
505
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
506
|
+
io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
|
507
|
+
|
508
|
+
VALUE ret = um_fiber_switch(machine);
|
509
|
+
if (um_check_completion(machine, &op))
|
510
|
+
ret = INT2NUM(op.result.res);
|
511
|
+
|
512
|
+
RB_GC_GUARD(ret);
|
513
|
+
return raise_if_exception(ret);
|
514
|
+
}
|
515
|
+
|
516
|
+
VALUE um_waitpid(struct um *machine, int pid, int options) {
|
517
|
+
struct um_op op;
|
518
|
+
um_prep_op(machine, &op, OP_BIND);
|
519
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
|
520
|
+
|
521
|
+
siginfo_t infop;
|
522
|
+
io_uring_prep_waitid(sqe, P_PID, pid, &infop, options, 0);
|
523
|
+
|
524
|
+
VALUE ret = um_fiber_switch(machine);
|
525
|
+
if (um_check_completion(machine, &op))
|
526
|
+
ret = INT2NUM(op.result.res);
|
527
|
+
|
528
|
+
RB_GC_GUARD(ret);
|
529
|
+
raise_if_exception(ret);
|
530
|
+
|
531
|
+
return rb_ary_new_from_args(2, INT2NUM(infop.si_pid), INT2NUM(infop.si_status));
|
532
|
+
}
|
533
|
+
|
534
|
+
/*******************************************************************************
|
535
|
+
multishot ops
|
536
|
+
*******************************************************************************/
|
537
|
+
|
538
|
+
VALUE accept_each_begin(VALUE arg) {
|
539
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
540
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
541
|
+
io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
|
542
|
+
|
543
|
+
while (true) {
|
544
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
545
|
+
if (!um_op_completed_p(ctx->op))
|
546
|
+
return raise_if_exception(ret);
|
547
|
+
|
548
|
+
int more = false;
|
549
|
+
struct um_op_result *result = &ctx->op->result;
|
550
|
+
while (result) {
|
551
|
+
more = (result->flags & IORING_CQE_F_MORE);
|
552
|
+
if (result->res < 0) {
|
553
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
554
|
+
return Qnil;
|
378
555
|
}
|
379
|
-
|
380
|
-
|
556
|
+
rb_yield(INT2NUM(result->res));
|
557
|
+
result = result->next;
|
381
558
|
}
|
559
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
560
|
+
if (more)
|
561
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
562
|
+
else
|
563
|
+
break;
|
382
564
|
}
|
565
|
+
|
566
|
+
return Qnil;
|
383
567
|
}
|
384
568
|
|
385
|
-
VALUE
|
386
|
-
struct
|
387
|
-
|
569
|
+
VALUE multishot_ensure(VALUE arg) {
|
570
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
571
|
+
if (ctx->op->multishot_result_count) {
|
572
|
+
int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
|
573
|
+
if (more)
|
574
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
575
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
576
|
+
}
|
577
|
+
if (!um_op_completed_p(ctx->op))
|
578
|
+
um_cancel_and_wait(ctx->machine, ctx->op);
|
388
579
|
|
389
|
-
|
390
|
-
|
391
|
-
op->state = OP_submitted;
|
580
|
+
if (ctx->read_buf)
|
581
|
+
free(ctx->read_buf);
|
392
582
|
|
393
|
-
|
394
|
-
return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
|
583
|
+
return Qnil;
|
395
584
|
}
|
396
585
|
|
397
|
-
VALUE
|
398
|
-
struct um_op
|
399
|
-
|
400
|
-
|
401
|
-
|
586
|
+
VALUE um_accept_each(struct um *machine, int fd) {
|
587
|
+
struct um_op op;
|
588
|
+
um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
|
589
|
+
|
590
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
|
591
|
+
return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
592
|
+
}
|
402
593
|
|
403
|
-
|
404
|
-
|
594
|
+
int um_read_each_singleshot_loop(struct op_ctx *ctx) {
|
595
|
+
struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
|
596
|
+
ctx->read_maxlen = desc->buf_size;
|
597
|
+
ctx->read_buf = malloc(desc->buf_size);
|
598
|
+
int total = 0;
|
405
599
|
|
406
|
-
|
407
|
-
|
408
|
-
|
600
|
+
while (1) {
|
601
|
+
um_prep_op(ctx->machine, ctx->op, OP_READ);
|
602
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
603
|
+
io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
|
604
|
+
|
605
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
606
|
+
if (um_op_completed_p(ctx->op)) {
|
607
|
+
um_raise_on_error_result(ctx->op->result.res);
|
608
|
+
if (!ctx->op->result.res) return total;
|
609
|
+
|
610
|
+
VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
|
611
|
+
total += ctx->op->result.res;
|
612
|
+
rb_yield(buf);
|
613
|
+
RB_GC_GUARD(buf);
|
614
|
+
}
|
615
|
+
else
|
616
|
+
return raise_if_exception(ret);
|
617
|
+
}
|
409
618
|
}
|
410
619
|
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
620
|
+
// // returns true if more results are expected
|
621
|
+
int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
|
622
|
+
if (result->res == 0)
|
623
|
+
return false;
|
624
|
+
|
625
|
+
*total += result->res;
|
626
|
+
VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result->res, result->flags);
|
627
|
+
rb_yield(buf);
|
628
|
+
RB_GC_GUARD(buf);
|
629
|
+
|
630
|
+
// TTY devices might not support multishot reads:
|
631
|
+
// https://github.com/axboe/liburing/issues/1185. We detect this by checking
|
632
|
+
// if the F_MORE flag is absent, then switch to single shot mode.
|
633
|
+
if (unlikely(!(result->flags & IORING_CQE_F_MORE))) {
|
634
|
+
*total += um_read_each_singleshot_loop(ctx);
|
635
|
+
return false;
|
636
|
+
}
|
420
637
|
|
421
|
-
|
422
|
-
um_raise_on_system_error(result);
|
423
|
-
return INT2FIX(result);
|
638
|
+
return true;
|
424
639
|
}
|
425
640
|
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
641
|
+
void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
|
642
|
+
switch (ctx->op->kind) {
|
643
|
+
case OP_READ_MULTISHOT:
|
644
|
+
io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
|
645
|
+
return;
|
646
|
+
case OP_RECV_MULTISHOT:
|
647
|
+
io_uring_prep_recv_multishot(sqe, ctx->fd, NULL, 0, 0);
|
648
|
+
sqe->buf_group = ctx->bgid;
|
649
|
+
sqe->flags |= IOSQE_BUFFER_SELECT;
|
650
|
+
return;
|
651
|
+
default:
|
652
|
+
return;
|
653
|
+
}
|
654
|
+
}
|
430
655
|
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
656
|
+
VALUE read_recv_each_begin(VALUE arg) {
|
657
|
+
struct op_ctx *ctx = (struct op_ctx *)arg;
|
658
|
+
struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
|
659
|
+
read_recv_each_prep(sqe, ctx);
|
660
|
+
int total = 0;
|
661
|
+
|
662
|
+
while (true) {
|
663
|
+
VALUE ret = um_fiber_switch(ctx->machine);
|
664
|
+
if (!um_op_completed_p(ctx->op))
|
665
|
+
return raise_if_exception(ret);
|
666
|
+
|
667
|
+
int more = false;
|
668
|
+
struct um_op_result *result = &ctx->op->result;
|
669
|
+
while (result) {
|
670
|
+
um_raise_on_error_result(result->res);
|
671
|
+
|
672
|
+
more = (result->flags & IORING_CQE_F_MORE);
|
673
|
+
if (!read_recv_each_multishot_process_result(ctx, result, &total))
|
441
674
|
return Qnil;
|
675
|
+
|
676
|
+
// rb_yield(INT2NUM(result->res));
|
677
|
+
result = result->next;
|
442
678
|
}
|
679
|
+
um_op_multishot_results_clear(ctx->machine, ctx->op);
|
680
|
+
if (more)
|
681
|
+
ctx->op->flags &= ~OP_F_COMPLETED;
|
682
|
+
else
|
683
|
+
break;
|
443
684
|
}
|
685
|
+
|
686
|
+
return Qnil;
|
444
687
|
}
|
445
688
|
|
446
|
-
VALUE
|
447
|
-
struct um_op
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
struct
|
456
|
-
|
689
|
+
VALUE um_read_each(struct um *machine, int fd, int bgid) {
|
690
|
+
struct um_op op;
|
691
|
+
um_prep_op(machine, &op, OP_READ_MULTISHOT);
|
692
|
+
|
693
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
|
694
|
+
return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
695
|
+
}
|
696
|
+
|
697
|
+
VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
|
698
|
+
struct um_op op;
|
699
|
+
um_prep_op(machine, &op, OP_RECV_MULTISHOT);
|
700
|
+
|
701
|
+
struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
|
702
|
+
return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
|
457
703
|
}
|