uringmachine 0.3 → 0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +2 -1
  3. data/CHANGELOG.md +23 -0
  4. data/README.md +128 -0
  5. data/TODO.md +14 -0
  6. data/examples/bm_snooze.rb +89 -0
  7. data/examples/bm_write.rb +56 -0
  8. data/examples/dns_client.rb +12 -0
  9. data/examples/echo_server.rb +18 -40
  10. data/examples/http_server.rb +42 -43
  11. data/examples/inout.rb +19 -0
  12. data/examples/nc.rb +36 -0
  13. data/examples/server_client.rb +64 -0
  14. data/examples/snooze.rb +44 -0
  15. data/examples/write_dev_null.rb +16 -0
  16. data/ext/um/extconf.rb +24 -23
  17. data/ext/um/um.c +524 -278
  18. data/ext/um/um.h +146 -44
  19. data/ext/um/um_buffer.c +49 -0
  20. data/ext/um/um_class.c +217 -106
  21. data/ext/um/um_const.c +213 -0
  22. data/ext/um/um_ext.c +4 -0
  23. data/ext/um/um_mutex_class.c +47 -0
  24. data/ext/um/um_op.c +86 -114
  25. data/ext/um/um_queue_class.c +58 -0
  26. data/ext/um/um_sync.c +273 -0
  27. data/ext/um/um_utils.c +49 -4
  28. data/lib/uringmachine/dns_resolver.rb +84 -0
  29. data/lib/uringmachine/version.rb +1 -1
  30. data/lib/uringmachine.rb +28 -0
  31. data/supressions/ruby.supp +71 -0
  32. data/test/helper.rb +8 -0
  33. data/test/test_um.rb +685 -46
  34. data/vendor/liburing/.github/workflows/build.yml +29 -1
  35. data/vendor/liburing/.gitignore +6 -0
  36. data/vendor/liburing/CHANGELOG +16 -0
  37. data/vendor/liburing/CONTRIBUTING.md +165 -0
  38. data/vendor/liburing/configure +64 -0
  39. data/vendor/liburing/examples/Makefile +9 -1
  40. data/vendor/liburing/examples/kdigest.c +405 -0
  41. data/vendor/liburing/examples/proxy.c +75 -8
  42. data/vendor/liburing/examples/reg-wait.c +159 -0
  43. data/vendor/liburing/liburing.pc.in +1 -1
  44. data/vendor/liburing/liburing.spec +1 -1
  45. data/vendor/liburing/src/Makefile +16 -2
  46. data/vendor/liburing/src/include/liburing/io_uring.h +77 -0
  47. data/vendor/liburing/src/include/liburing/sanitize.h +39 -0
  48. data/vendor/liburing/src/include/liburing.h +59 -6
  49. data/vendor/liburing/src/int_flags.h +10 -3
  50. data/vendor/liburing/src/liburing-ffi.map +16 -0
  51. data/vendor/liburing/src/liburing.map +10 -0
  52. data/vendor/liburing/src/queue.c +28 -16
  53. data/vendor/liburing/src/register.c +106 -1
  54. data/vendor/liburing/src/sanitize.c +176 -0
  55. data/vendor/liburing/src/setup.c +47 -19
  56. data/vendor/liburing/src/setup.h +6 -0
  57. data/vendor/liburing/test/35fa71a030ca.c +7 -0
  58. data/vendor/liburing/test/500f9fbadef8.c +2 -0
  59. data/vendor/liburing/test/7ad0e4b2f83c.c +0 -25
  60. data/vendor/liburing/test/917257daa0fe.c +7 -0
  61. data/vendor/liburing/test/Makefile +38 -4
  62. data/vendor/liburing/test/a0908ae19763.c +7 -0
  63. data/vendor/liburing/test/a4c0b3decb33.c +7 -0
  64. data/vendor/liburing/test/accept.c +14 -4
  65. data/vendor/liburing/test/b19062a56726.c +7 -0
  66. data/vendor/liburing/test/bind-listen.c +2 -2
  67. data/vendor/liburing/test/buf-ring-nommap.c +10 -3
  68. data/vendor/liburing/test/buf-ring.c +2 -0
  69. data/vendor/liburing/test/cmd-discard.c +427 -0
  70. data/vendor/liburing/test/coredump.c +7 -0
  71. data/vendor/liburing/test/cq-overflow.c +13 -1
  72. data/vendor/liburing/test/d4ae271dfaae.c +11 -3
  73. data/vendor/liburing/test/defer-taskrun.c +2 -2
  74. data/vendor/liburing/test/defer-tw-timeout.c +4 -1
  75. data/vendor/liburing/test/defer.c +2 -2
  76. data/vendor/liburing/test/double-poll-crash.c +1 -1
  77. data/vendor/liburing/test/eeed8b54e0df.c +2 -0
  78. data/vendor/liburing/test/eventfd.c +0 -1
  79. data/vendor/liburing/test/exit-no-cleanup.c +11 -0
  80. data/vendor/liburing/test/fadvise.c +9 -26
  81. data/vendor/liburing/test/fdinfo.c +9 -1
  82. data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
  83. data/vendor/liburing/test/file-exit-unreg.c +48 -0
  84. data/vendor/liburing/test/file-register.c +14 -2
  85. data/vendor/liburing/test/file-update.c +1 -1
  86. data/vendor/liburing/test/file-verify.c +27 -16
  87. data/vendor/liburing/test/files-exit-hang-timeout.c +1 -2
  88. data/vendor/liburing/test/fixed-buf-iter.c +3 -1
  89. data/vendor/liburing/test/fixed-hugepage.c +12 -1
  90. data/vendor/liburing/test/fsnotify.c +1 -0
  91. data/vendor/liburing/test/futex.c +16 -4
  92. data/vendor/liburing/test/helpers.c +47 -0
  93. data/vendor/liburing/test/helpers.h +6 -0
  94. data/vendor/liburing/test/init-mem.c +5 -3
  95. data/vendor/liburing/test/io-cancel.c +0 -24
  96. data/vendor/liburing/test/io_uring_passthrough.c +4 -0
  97. data/vendor/liburing/test/io_uring_register.c +38 -8
  98. data/vendor/liburing/test/iopoll-leak.c +4 -0
  99. data/vendor/liburing/test/iopoll-overflow.c +1 -1
  100. data/vendor/liburing/test/iopoll.c +3 -3
  101. data/vendor/liburing/test/kallsyms.c +203 -0
  102. data/vendor/liburing/test/link-timeout.c +159 -0
  103. data/vendor/liburing/test/linked-defer-close.c +224 -0
  104. data/vendor/liburing/test/madvise.c +12 -25
  105. data/vendor/liburing/test/min-timeout-wait.c +0 -25
  106. data/vendor/liburing/test/min-timeout.c +0 -25
  107. data/vendor/liburing/test/mkdir.c +6 -0
  108. data/vendor/liburing/test/msg-ring.c +8 -2
  109. data/vendor/liburing/test/napi-test.c +16 -3
  110. data/vendor/liburing/test/no-mmap-inval.c +3 -1
  111. data/vendor/liburing/test/nop.c +44 -0
  112. data/vendor/liburing/test/ooo-file-unreg.c +1 -1
  113. data/vendor/liburing/test/open-close.c +40 -0
  114. data/vendor/liburing/test/openat2.c +37 -14
  115. data/vendor/liburing/test/poll-many.c +13 -7
  116. data/vendor/liburing/test/poll-mshot-update.c +17 -10
  117. data/vendor/liburing/test/poll-v-poll.c +6 -3
  118. data/vendor/liburing/test/pollfree.c +148 -0
  119. data/vendor/liburing/test/read-mshot-empty.c +158 -153
  120. data/vendor/liburing/test/read-mshot-stdin.c +121 -0
  121. data/vendor/liburing/test/read-mshot.c +282 -27
  122. data/vendor/liburing/test/read-write.c +78 -13
  123. data/vendor/liburing/test/recv-msgall-stream.c +3 -0
  124. data/vendor/liburing/test/recv-msgall.c +5 -0
  125. data/vendor/liburing/test/recvsend_bundle-inc.c +680 -0
  126. data/vendor/liburing/test/recvsend_bundle.c +94 -31
  127. data/vendor/liburing/test/reg-fd-only.c +15 -5
  128. data/vendor/liburing/test/reg-wait.c +251 -0
  129. data/vendor/liburing/test/regbuf-clone.c +645 -0
  130. data/vendor/liburing/test/regbuf-merge.c +7 -0
  131. data/vendor/liburing/test/register-restrictions.c +86 -85
  132. data/vendor/liburing/test/rename.c +59 -1
  133. data/vendor/liburing/test/resize-rings.c +643 -0
  134. data/vendor/liburing/test/ringbuf-read.c +5 -0
  135. data/vendor/liburing/test/ringbuf-status.c +5 -1
  136. data/vendor/liburing/test/rsrc_tags.c +1 -1
  137. data/vendor/liburing/test/runtests.sh +16 -1
  138. data/vendor/liburing/test/send-zerocopy.c +59 -0
  139. data/vendor/liburing/test/short-read.c +1 -0
  140. data/vendor/liburing/test/socket.c +43 -0
  141. data/vendor/liburing/test/splice.c +3 -1
  142. data/vendor/liburing/test/sq-poll-dup.c +1 -1
  143. data/vendor/liburing/test/sq-poll-share.c +2 -0
  144. data/vendor/liburing/test/sqpoll-disable-exit.c +8 -0
  145. data/vendor/liburing/test/sqpoll-exit-hang.c +1 -25
  146. data/vendor/liburing/test/sqpoll-sleep.c +40 -33
  147. data/vendor/liburing/test/sqwait.c +136 -0
  148. data/vendor/liburing/test/statx.c +89 -0
  149. data/vendor/liburing/test/stdout.c +2 -0
  150. data/vendor/liburing/test/submit-and-wait.c +1 -25
  151. data/vendor/liburing/test/submit-reuse.c +4 -26
  152. data/vendor/liburing/test/symlink.c +12 -1
  153. data/vendor/liburing/test/sync-cancel.c +56 -22
  154. data/vendor/liburing/test/thread-exit.c +5 -0
  155. data/vendor/liburing/test/timeout-new.c +1 -26
  156. data/vendor/liburing/test/timeout.c +25 -34
  157. data/vendor/liburing/test/unlink.c +94 -1
  158. data/vendor/liburing/test/uring_cmd_ublk.c +1252 -0
  159. data/vendor/liburing/test/waitid.c +62 -8
  160. data/vendor/liburing/test/wq-aff.c +35 -0
  161. data/vendor/liburing/test/xfail_prep_link_timeout_out_of_scope.c +46 -0
  162. data/vendor/liburing/test/xfail_register_buffers_out_of_scope.c +51 -0
  163. metadata +37 -6
  164. data/examples/event_loop.rb +0 -69
  165. data/examples/fibers.rb +0 -105
  166. data/examples/http_server_multishot.rb +0 -57
  167. data/examples/http_server_simpler.rb +0 -34
data/ext/um/um.c CHANGED
@@ -1,25 +1,14 @@
1
1
  #include "um.h"
2
2
  #include "ruby/thread.h"
3
- #include <sys/mman.h>
4
3
 
5
- void um_setup(struct um *machine) {
6
- machine->ring_initialized = 0;
7
- machine->unsubmitted_count = 0;
8
- machine->buffer_ring_count = 0;
9
- machine->pending_count = 0;
10
- machine->runqueue_head = NULL;
11
- machine->runqueue_tail = NULL;
12
- machine->op_freelist = NULL;
13
- machine->result_freelist = NULL;
4
+ void um_setup(VALUE self, struct um *machine) {
5
+ memset(machine, 0, sizeof(struct um));
6
+
7
+ RB_OBJ_WRITE(self, &machine->self, self);
8
+ RB_OBJ_WRITE(self, &machine->poll_fiber, Qnil);
14
9
 
15
10
  unsigned prepared_limit = 4096;
16
- int flags = 0;
17
- #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
18
- flags |= IORING_SETUP_SUBMIT_ALL;
19
- #endif
20
- #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
21
- flags |= IORING_SETUP_COOP_TASKRUN;
22
- #endif
11
+ unsigned flags = IORING_SETUP_SUBMIT_ALL | IORING_SETUP_COOP_TASKRUN;
23
12
 
24
13
  while (1) {
25
14
  int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
@@ -46,11 +35,10 @@ inline void um_teardown(struct um *machine) {
46
35
  io_uring_queue_exit(&machine->ring);
47
36
  machine->ring_initialized = 0;
48
37
 
49
- um_free_op_linked_list(machine, machine->op_freelist);
50
- um_free_op_linked_list(machine, machine->runqueue_head);
38
+ um_free_buffer_linked_list(machine);
51
39
  }
52
40
 
53
- static inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
41
+ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
54
42
  struct io_uring_sqe *sqe;
55
43
  sqe = io_uring_get_sqe(&machine->ring);
56
44
  if (likely(sqe)) goto done;
@@ -69,88 +57,43 @@ done:
69
57
  sqe->user_data = (long long)op;
70
58
  sqe->flags = 0;
71
59
  machine->unsubmitted_count++;
60
+ if (op) machine->pending_count++;
72
61
  return sqe;
73
62
  }
74
63
 
75
- struct wait_for_cqe_ctx {
76
- struct um *machine;
77
- struct io_uring_cqe *cqe;
78
- int result;
79
- };
80
-
81
- void *um_wait_for_cqe_without_gvl(void *ptr) {
82
- struct wait_for_cqe_ctx *ctx = ptr;
83
- if (ctx->machine->unsubmitted_count) {
84
- ctx->machine->unsubmitted_count = 0;
85
- ctx->result = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
86
- }
87
- else
88
- ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
89
- return NULL;
90
- }
91
-
92
- inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
93
- op->cqe_result = cqe->res;
94
- op->cqe_flags = cqe->flags;
95
- op->state = OP_completed;
96
- um_runqueue_push(machine, op);
97
- }
98
-
99
- inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
100
- if (!op->results_head) {
101
- struct um_op *op2 = um_op_checkout(machine);
102
- op2->state = OP_schedule;
103
- op2->fiber = op->fiber;
104
- op2->resume_value = Qnil;
105
- um_runqueue_push(machine, op2);
106
- }
107
- um_op_result_push(machine, op, cqe->res, cqe->flags);
64
+ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
65
+ struct um_op *op = (struct um_op *)cqe->user_data;
66
+ if (unlikely(!op)) return;
108
67
 
109
68
  if (!(cqe->flags & IORING_CQE_F_MORE))
110
- op->state = OP_completed;
111
- }
69
+ machine->pending_count--;
112
70
 
113
- inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
114
- struct um_op *op = (struct um_op *)cqe->user_data;
115
- if (unlikely(!op)) return;
71
+ // printf(
72
+ // ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
73
+ // op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
74
+ // );
116
75
 
117
- switch (op->state) {
118
- case OP_submitted:
119
- if (unlikely(cqe->res == -ECANCELED)) {
120
- um_op_checkin(machine, op);
121
- break;
122
- }
123
- if (!op->is_multishot)
124
- um_handle_submitted_op_cqe_single(machine, op, cqe);
125
- else
126
- um_handle_submitted_op_cqe_multi(machine, op, cqe);
127
- break;
128
- case OP_abandonned:
129
- // op has been abandonned by the I/O method, so we need to cleanup (check
130
- // the op in to the free list).
131
- um_op_checkin(machine, op);
132
- break;
133
- default:
134
- // TODO: invalid state, should raise!
135
- }
136
- }
76
+ if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
137
77
 
138
- static inline void um_wait_for_and_process_cqe(struct um *machine) {
139
- struct wait_for_cqe_ctx ctx = {
140
- .machine = machine,
141
- .cqe = NULL
142
- };
78
+ op->flags |= OP_F_COMPLETED;
79
+ if (unlikely(op->flags & OP_F_TRANSIENT))
80
+ um_op_transient_remove(machine, op);
143
81
 
144
- rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
145
- if (unlikely(ctx.result < 0)) {
146
- rb_syserr_fail(-ctx.result, strerror(-ctx.result));
82
+ if (op->flags & OP_F_MULTISHOT) {
83
+ um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
84
+ if (op->multishot_result_count > 1)
85
+ return;
147
86
  }
148
- io_uring_cqe_seen(&machine->ring, ctx.cqe);
149
- um_process_cqe(machine, ctx.cqe);
87
+ else {
88
+ op->result.res = cqe->res;
89
+ op->result.flags = cqe->flags;
90
+ }
91
+
92
+ um_runqueue_push(machine, op);
150
93
  }
151
94
 
152
95
  // copied from liburing/queue.c
153
- static inline bool cq_ring_needs_flush(struct io_uring *ring) {
96
+ static inline int cq_ring_needs_flush(struct io_uring *ring) {
154
97
  return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
155
98
  }
156
99
 
@@ -180,121 +123,137 @@ done:
180
123
  return total_count;
181
124
  }
182
125
 
183
- static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
184
- um_wait_for_and_process_cqe(machine);
185
- um_process_ready_cqes(machine);
126
+ struct wait_for_cqe_ctx {
127
+ struct um *machine;
128
+ struct io_uring_cqe *cqe;
129
+ int result;
130
+ };
131
+
132
+ void *um_wait_for_cqe_without_gvl(void *ptr) {
133
+ struct wait_for_cqe_ctx *ctx = ptr;
134
+ if (ctx->machine->unsubmitted_count) {
135
+ ctx->machine->unsubmitted_count = 0;
136
+
137
+ // Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
138
+ // interrupted with a signal. We can detect this by testing ctx->cqe for
139
+ // NULL.
140
+ //
141
+ // https://github.com/axboe/liburing/issues/1280
142
+ int res = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
143
+ ctx->result = (res > 0 && !ctx->cqe) ? -EINTR : res;
144
+ }
145
+ else
146
+ ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
147
+ return NULL;
186
148
  }
187
149
 
188
- inline VALUE um_fiber_switch(struct um *machine) {
189
- struct um_op *op = 0;
190
- unsigned int first_iteration = 1;
191
- loop:
192
- // in the case where:
193
- // - first time through loop
194
- // - there are SQEs waiting to be submitted
195
- // - the runqueue head references the current fiber
196
- // we need to submit events and check completions without blocking
197
- if (
198
- unlikely(
199
- first_iteration && machine->unsubmitted_count &&
200
- machine->runqueue_head &&
201
- machine->runqueue_head->fiber == rb_fiber_current()
202
- )
203
- ) {
204
- io_uring_submit(&machine->ring);
150
+ static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
151
+ struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL };
152
+ rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
153
+
154
+ if (unlikely(ctx.result < 0 && ctx.result != -EINTR))
155
+ rb_syserr_fail(-ctx.result, strerror(-ctx.result));
156
+
157
+ if (ctx.cqe) {
158
+ um_process_cqe(machine, ctx.cqe);
159
+ io_uring_cq_advance(&machine->ring, 1);
205
160
  um_process_ready_cqes(machine);
206
161
  }
207
- first_iteration = 0;
162
+ }
208
163
 
209
- op = um_runqueue_shift(machine);
210
- if (op) {
211
- VALUE resume_value = op->resume_value;
212
- if (op->state == OP_schedule) {
213
- um_op_checkin(machine, op);
214
- }
215
- // the resume value is disregarded, we pass the fiber itself
216
- VALUE v = rb_fiber_transfer(op->fiber, 1, &resume_value);
217
- return v;
218
- }
164
+ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
165
+ VALUE fiber = op->fiber;
166
+ VALUE value = op->value;
167
+
168
+ if (unlikely(op->flags & OP_F_TRANSIENT))
169
+ um_op_free(machine, op);
219
170
 
220
- um_wait_for_and_process_ready_cqes(machine);
221
- goto loop;
171
+ return rb_fiber_transfer(fiber, 1, &value);
222
172
  }
223
173
 
224
- static inline void um_cancel_op(struct um *machine, struct um_op *op) {
174
+ inline VALUE um_fiber_switch(struct um *machine) {
175
+ while (true) {
176
+ struct um_op *op = um_runqueue_shift(machine);
177
+ if (op)
178
+ return process_runqueue_op(machine, op);
179
+
180
+ um_wait_for_and_process_ready_cqes(machine);
181
+ }
182
+ }
183
+
184
+ static inline void um_submit_cancel_op(struct um *machine, struct um_op *op) {
225
185
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
226
186
  io_uring_prep_cancel64(sqe, (long long)op, 0);
227
187
  }
228
188
 
229
- static inline VALUE um_await_op(struct um *machine, struct um_op *op, int *result, int *flags) {
230
- op->fiber = rb_fiber_current();
231
- VALUE v = um_fiber_switch(machine);
232
- int is_exception = um_value_is_exception_p(v);
233
-
234
- if (unlikely(is_exception && op->state == OP_submitted)) {
235
- um_cancel_op(machine, op);
236
- op->state = OP_abandonned;
189
+ inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
190
+ um_submit_cancel_op(machine, op);
191
+ while (true) {
192
+ um_fiber_switch(machine);
193
+ if (um_op_completed_p(op)) break;
237
194
  }
238
- else {
239
- // We copy over the CQE result and flags, since the op is immediately
240
- // checked in.
241
- if (result) *result = op->cqe_result;
242
- if (flags) *flags = op->cqe_flags;
243
- if (!op->is_multishot)
244
- um_op_checkin(machine, op);
195
+ }
196
+
197
+ inline int um_check_completion(struct um *machine, struct um_op *op) {
198
+ if (!um_op_completed_p(op)) {
199
+ um_cancel_and_wait(machine, op);
200
+ return 0;
245
201
  }
246
202
 
247
- if (unlikely(is_exception)) um_raise_exception(v);
248
- return v;
203
+ um_raise_on_error_result(op->result.res);
204
+ return 1;
249
205
  }
250
206
 
251
207
  inline VALUE um_await(struct um *machine) {
252
208
  VALUE v = um_fiber_switch(machine);
253
- return um_value_is_exception_p(v) ? um_raise_exception(v) : v;
209
+ return raise_if_exception(v);
254
210
  }
255
211
 
256
- inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
257
- struct um_op *op = um_op_checkout(machine);
258
- op->state = OP_schedule;
259
- op->fiber = fiber;
260
- op->resume_value = value;
261
- um_runqueue_push(machine, op);
212
+ inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind) {
213
+ memset(op, 0, sizeof(struct um_op));
214
+ op->kind = kind;
215
+ switch (kind) {
216
+ case OP_ACCEPT_MULTISHOT:
217
+ case OP_READ_MULTISHOT:
218
+ case OP_RECV_MULTISHOT:
219
+ op->flags |= OP_F_MULTISHOT;
220
+ default:
221
+ }
222
+ RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
223
+ op->value = Qnil;
262
224
  }
263
225
 
264
- inline void um_interrupt(struct um *machine, VALUE fiber, VALUE value) {
265
- struct um_op *op = um_runqueue_find_by_fiber(machine, fiber);
266
- if (op) {
267
- op->state = OP_cancelled;
268
- op->resume_value = value;
269
- }
270
- else {
271
- op = um_op_checkout(machine);
272
- op->state = OP_schedule;
273
- op->fiber = fiber;
274
- op->resume_value = value;
275
- um_runqueue_unshift(machine, op);
276
- }
226
+ inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
227
+ struct um_op *op = um_op_alloc(machine);
228
+ memset(op, 0, sizeof(struct um_op));
229
+ op->kind = OP_SCHEDULE;
230
+ op->flags = OP_F_TRANSIENT;
231
+ RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
232
+ RB_OBJ_WRITE(machine->self, &op->value, value);
233
+ um_runqueue_push(machine, op);
277
234
  }
278
235
 
279
- struct op_ensure_ctx {
236
+ struct op_ctx {
280
237
  struct um *machine;
281
238
  struct um_op *op;
239
+ int fd;
282
240
  int bgid;
241
+
242
+ void *read_buf;
243
+ int read_maxlen;
244
+ struct __kernel_timespec ts;
245
+ int flags;
283
246
  };
284
247
 
285
248
  VALUE um_timeout_ensure(VALUE arg) {
286
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
249
+ struct op_ctx *ctx = (struct op_ctx *)arg;
287
250
 
288
- if (ctx->op->state == OP_submitted) {
289
- // A CQE has not yet been received, we cancel the timeout and abandon the op
290
- // (it will be checked in upon receiving the -ECANCELED CQE)
291
- um_cancel_op(ctx->machine, ctx->op);
292
- ctx->op->state == OP_abandonned;
293
- }
294
- else {
295
- // completed, so can be checked in
296
- um_op_checkin(ctx->machine, ctx->op);
251
+ if (!um_op_completed_p(ctx->op)) {
252
+ um_submit_cancel_op(ctx->machine, ctx->op);
253
+ ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
254
+ um_op_transient_add(ctx->machine, ctx->op);
297
255
  }
256
+
298
257
  return Qnil;
299
258
  }
300
259
 
@@ -302,156 +261,443 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
302
261
  static ID ID_new = 0;
303
262
  if (!ID_new) ID_new = rb_intern("new");
304
263
 
305
- struct um_op *op = um_op_checkout(machine);
264
+ struct um_op *op = malloc(sizeof(struct um_op));
265
+ um_prep_op(machine, op, OP_TIMEOUT);
306
266
  op->ts = um_double_to_timespec(NUM2DBL(interval));
267
+ RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
268
+ RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
307
269
 
308
270
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
309
271
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
310
- op->state = OP_submitted;
311
- op->fiber = rb_fiber_current();
312
- op->resume_value = rb_funcall(class, ID_new, 0);
313
272
 
314
- struct op_ensure_ctx ctx = { .machine = machine, .op = op };
273
+ struct op_ctx ctx = { .machine = machine, .op = op };
315
274
  return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
316
275
  }
317
276
 
318
- inline VALUE um_sleep(struct um *machine, double duration) {
319
- struct um_op *op = um_op_checkout(machine);
320
- op->ts = um_double_to_timespec(duration);
321
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
322
- int result = 0;
277
+ /*******************************************************************************
278
+ blocking singleshot ops
279
+ *******************************************************************************/
323
280
 
324
- io_uring_prep_timeout(sqe, &op->ts, 0, 0);
325
- op->state = OP_submitted;
281
+ VALUE um_sleep(struct um *machine, double duration) {
282
+ struct um_op op;
283
+ um_prep_op(machine, &op, OP_SLEEP);
284
+ op.ts = um_double_to_timespec(duration);
285
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
286
+ io_uring_prep_timeout(sqe, &op.ts, 0, 0);
287
+ VALUE ret = um_fiber_switch(machine);
288
+
289
+ if (!um_op_completed_p(&op))
290
+ um_cancel_and_wait(machine, &op);
291
+ else {
292
+ if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
293
+ ret = DBL2NUM(duration);
294
+ }
326
295
 
327
- return um_await_op(machine, op, &result, NULL);
296
+ RB_GC_GUARD(ret);
297
+ return raise_if_exception(ret);
328
298
  }
329
299
 
330
300
  inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
331
- struct um_op *op = um_op_checkout(machine);
332
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
333
- int result = 0;
334
- int flags = 0;
335
-
301
+ struct um_op op;
302
+ um_prep_op(machine, &op, OP_READ);
303
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
336
304
  void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
337
305
  io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
338
- op->state = OP_submitted;
306
+
307
+ VALUE ret = um_fiber_switch(machine);
308
+ if (um_check_completion(machine, &op)) {
309
+ um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
310
+ ret = INT2NUM(op.result.res);
311
+
312
+ }
313
+
314
+ RB_GC_GUARD(buffer);
315
+ RB_GC_GUARD(ret);
316
+ return raise_if_exception(ret);
317
+ }
339
318
 
340
- um_await_op(machine, op, &result, &flags);
319
+ VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
320
+ struct um_op op;
321
+ um_prep_op(machine, &op, OP_WRITE);
322
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
323
+ const int str_len = RSTRING_LEN(str);
324
+ if (len > str_len) len = str_len;
341
325
 
342
- um_raise_on_system_error(result);
343
- um_update_read_buffer(machine, buffer, buffer_offset, result, flags);
344
- return INT2FIX(result);
326
+ io_uring_prep_write(sqe, fd, RSTRING_PTR(str), len, -1);
327
+
328
+ VALUE ret = um_fiber_switch(machine);
329
+ if (um_check_completion(machine, &op))
330
+ ret = INT2NUM(op.result.res);
331
+
332
+ RB_GC_GUARD(str);
333
+ RB_GC_GUARD(ret);
334
+ return raise_if_exception(ret);
345
335
  }
346
336
 
347
- VALUE um_multishot_ensure(VALUE arg) {
348
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
349
- switch (ctx->op->state) {
350
- case OP_submitted:
351
- um_cancel_op(ctx->machine, ctx->op);
352
- break;
353
- case OP_completed:
354
- um_op_checkin(ctx->machine, ctx->op);
355
- break;
356
- default:
337
+ VALUE um_close(struct um *machine, int fd) {
338
+ struct um_op op;
339
+ um_prep_op(machine, &op, OP_CLOSE);
340
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
341
+ io_uring_prep_close(sqe, fd);
342
+
343
+ VALUE ret = um_fiber_switch(machine);
344
+ if (um_check_completion(machine, &op))
345
+ ret = INT2NUM(fd);
346
+
347
+ RB_GC_GUARD(ret);
348
+ return raise_if_exception(ret);
349
+ }
350
+
351
+ VALUE um_accept(struct um *machine, int fd) {
352
+ struct um_op op;
353
+ um_prep_op(machine, &op, OP_ACCEPT);
354
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
355
+ io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
356
+
357
+ VALUE ret = um_fiber_switch(machine);
358
+ if (um_check_completion(machine, &op))
359
+ ret = INT2NUM(op.result.res);
360
+
361
+ RB_GC_GUARD(ret);
362
+ return raise_if_exception(ret);
363
+ }
364
+
365
+ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
366
+ struct um_op op;
367
+ um_prep_op(machine, &op, OP_SOCKET);
368
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
369
+ io_uring_prep_socket(sqe, domain, type, protocol, flags);
370
+
371
+ VALUE ret = um_fiber_switch(machine);
372
+ if (um_check_completion(machine, &op))
373
+ ret = INT2NUM(op.result.res);
374
+
375
+ RB_GC_GUARD(ret);
376
+ return raise_if_exception(ret);
377
+ }
378
+
379
+ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
380
+ struct um_op op;
381
+ um_prep_op(machine, &op, OP_CONNECT);
382
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
383
+ io_uring_prep_connect(sqe, fd, addr, addrlen);
384
+
385
+ VALUE ret = um_fiber_switch(machine);
386
+ if (um_check_completion(machine, &op))
387
+ ret = INT2NUM(op.result.res);
388
+
389
+ RB_GC_GUARD(ret);
390
+ return raise_if_exception(ret);
391
+ }
392
+
393
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
394
+ struct um_op op;
395
+ um_prep_op(machine, &op, OP_SEND);
396
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
397
+ io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
398
+
399
+ VALUE ret = um_fiber_switch(machine);
400
+ if (um_check_completion(machine, &op))
401
+ ret = INT2NUM(op.result.res);
402
+
403
+ RB_GC_GUARD(buffer);
404
+ RB_GC_GUARD(ret);
405
+ return raise_if_exception(ret);
406
+ }
407
+
408
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
409
+ struct um_op op;
410
+ um_prep_op(machine, &op, OP_RECV);
411
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
412
+ void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
413
+ io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
414
+
415
+ VALUE ret = um_fiber_switch(machine);
416
+ if (um_check_completion(machine, &op)) {
417
+ um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
418
+ ret = INT2NUM(op.result.res);
357
419
  }
358
- return Qnil;
420
+
421
+ RB_GC_GUARD(buffer);
422
+ RB_GC_GUARD(ret);
423
+ return raise_if_exception(ret);
359
424
  }
360
425
 
361
- VALUE um_read_each_safe_loop(VALUE arg) {
362
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
363
- int result = 0;
364
- int flags = 0;
365
- int total = 0;
426
+ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
427
+ struct um_op op;
428
+ um_prep_op(machine, &op, OP_BIND);
429
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
430
+ io_uring_prep_bind(sqe, fd, addr, addrlen);
366
431
 
367
- while (1) {
368
- um_await_op(ctx->machine, ctx->op, NULL, NULL);
369
- if (!ctx->op->results_head) {
370
- // TODO: raise, this shouldn't happen
371
- printf("no result found!\n");
372
- }
373
- while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
374
- if (likely(result > 0)) {
375
- total += result;
376
- VALUE buf = get_string_from_buffer_ring(ctx->machine, ctx->bgid, result, flags);
377
- rb_yield(buf);
432
+ VALUE ret = um_fiber_switch(machine);
433
+ if (um_check_completion(machine, &op))
434
+ ret = INT2NUM(op.result.res);
435
+
436
+ RB_GC_GUARD(ret);
437
+ return raise_if_exception(ret);
438
+ }
439
+
440
+ VALUE um_listen(struct um *machine, int fd, int backlog) {
441
+ struct um_op op;
442
+ um_prep_op(machine, &op, OP_BIND);
443
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
444
+ io_uring_prep_listen(sqe, fd, backlog);
445
+
446
+ VALUE ret = um_fiber_switch(machine);
447
+ if (um_check_completion(machine, &op))
448
+ ret = INT2NUM(op.result.res);
449
+
450
+ RB_GC_GUARD(ret);
451
+ return raise_if_exception(ret);
452
+ }
453
+
454
+ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
455
+ VALUE ret = Qnil;
456
+ int value;
457
+
458
+ #ifdef HAVE_IO_URING_PREP_CMD_SOCK
459
+ struct um_op op;
460
+ um_prep_op(machine, &op, OP_GETSOCKOPT);
461
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
462
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
463
+
464
+ ret = um_fiber_switch(machine);
465
+ if (um_check_completion(machine, &op))
466
+ ret = INT2NUM(value);
467
+ #else
468
+ socklen_t nvalue = sizeof(value);
469
+ int res = getsockopt(fd, level, opt, &value, &nvalue);
470
+ if (res)
471
+ rb_syserr_fail(errno, strerror(errno));
472
+ ret = INT2NUM(value);
473
+ #endif
474
+
475
+ RB_GC_GUARD(ret);
476
+ return raise_if_exception(ret);
477
+ }
478
+
479
+ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
480
+ VALUE ret = Qnil;
481
+
482
+ #ifdef HAVE_IO_URING_PREP_CMD_SOCK
483
+ struct um_op op;
484
+ um_prep_op(machine, &op, OP_GETSOCKOPT);
485
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
486
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
487
+
488
+ ret = um_fiber_switch(machine);
489
+ if (um_check_completion(machine, &op))
490
+ ret = INT2NUM(op.result.res);
491
+ #else
492
+ int res = setsockopt(fd, level, opt, &value, sizeof(value));
493
+ if (res)
494
+ rb_syserr_fail(errno, strerror(errno));
495
+ ret = INT2NUM(0);
496
+ #endif
497
+
498
+ RB_GC_GUARD(ret);
499
+ return raise_if_exception(ret);
500
+ }
501
+
502
+ VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
503
+ struct um_op op;
504
+ um_prep_op(machine, &op, OP_BIND);
505
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
506
+ io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
507
+
508
+ VALUE ret = um_fiber_switch(machine);
509
+ if (um_check_completion(machine, &op))
510
+ ret = INT2NUM(op.result.res);
511
+
512
+ RB_GC_GUARD(ret);
513
+ return raise_if_exception(ret);
514
+ }
515
+
516
+ VALUE um_waitpid(struct um *machine, int pid, int options) {
517
+ struct um_op op;
518
+ um_prep_op(machine, &op, OP_BIND);
519
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
520
+
521
+ siginfo_t infop;
522
+ io_uring_prep_waitid(sqe, P_PID, pid, &infop, options, 0);
523
+
524
+ VALUE ret = um_fiber_switch(machine);
525
+ if (um_check_completion(machine, &op))
526
+ ret = INT2NUM(op.result.res);
527
+
528
+ RB_GC_GUARD(ret);
529
+ raise_if_exception(ret);
530
+
531
+ return rb_ary_new_from_args(2, INT2NUM(infop.si_pid), INT2NUM(infop.si_status));
532
+ }
533
+
534
+ /*******************************************************************************
535
+ multishot ops
536
+ *******************************************************************************/
537
+
538
+ VALUE accept_each_begin(VALUE arg) {
539
+ struct op_ctx *ctx = (struct op_ctx *)arg;
540
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
541
+ io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
542
+
543
+ while (true) {
544
+ VALUE ret = um_fiber_switch(ctx->machine);
545
+ if (!um_op_completed_p(ctx->op))
546
+ return raise_if_exception(ret);
547
+
548
+ int more = false;
549
+ struct um_op_result *result = &ctx->op->result;
550
+ while (result) {
551
+ more = (result->flags & IORING_CQE_F_MORE);
552
+ if (result->res < 0) {
553
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
554
+ return Qnil;
378
555
  }
379
- else
380
- return INT2FIX(total);
556
+ rb_yield(INT2NUM(result->res));
557
+ result = result->next;
381
558
  }
559
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
560
+ if (more)
561
+ ctx->op->flags &= ~OP_F_COMPLETED;
562
+ else
563
+ break;
382
564
  }
565
+
566
+ return Qnil;
383
567
  }
384
568
 
385
- VALUE um_read_each(struct um *machine, int fd, int bgid) {
386
- struct um_op *op = um_op_checkout(machine);
387
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
569
+ VALUE multishot_ensure(VALUE arg) {
570
+ struct op_ctx *ctx = (struct op_ctx *)arg;
571
+ if (ctx->op->multishot_result_count) {
572
+ int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
573
+ if (more)
574
+ ctx->op->flags &= ~OP_F_COMPLETED;
575
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
576
+ }
577
+ if (!um_op_completed_p(ctx->op))
578
+ um_cancel_and_wait(ctx->machine, ctx->op);
388
579
 
389
- op->is_multishot = 1;
390
- io_uring_prep_read_multishot(sqe, fd, 0, -1, bgid);
391
- op->state = OP_submitted;
580
+ if (ctx->read_buf)
581
+ free(ctx->read_buf);
392
582
 
393
- struct op_ensure_ctx ctx = { .machine = machine, .op = op, .bgid = bgid };
394
- return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
583
+ return Qnil;
395
584
  }
396
585
 
397
- VALUE um_write(struct um *machine, int fd, VALUE buffer, int len) {
398
- struct um_op *op = um_op_checkout(machine);
399
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
400
- int result = 0;
401
- int flags = 0;
586
+ VALUE um_accept_each(struct um *machine, int fd) {
587
+ struct um_op op;
588
+ um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
589
+
590
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
591
+ return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
592
+ }
402
593
 
403
- io_uring_prep_write(sqe, fd, RSTRING_PTR(buffer), len, -1);
404
- op->state = OP_submitted;
594
+ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
595
+ struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
596
+ ctx->read_maxlen = desc->buf_size;
597
+ ctx->read_buf = malloc(desc->buf_size);
598
+ int total = 0;
405
599
 
406
- um_await_op(machine, op, &result, &flags);
407
- um_raise_on_system_error(result);
408
- return INT2FIX(result);
600
+ while (1) {
601
+ um_prep_op(ctx->machine, ctx->op, OP_READ);
602
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
603
+ io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
604
+
605
+ VALUE ret = um_fiber_switch(ctx->machine);
606
+ if (um_op_completed_p(ctx->op)) {
607
+ um_raise_on_error_result(ctx->op->result.res);
608
+ if (!ctx->op->result.res) return total;
609
+
610
+ VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
611
+ total += ctx->op->result.res;
612
+ rb_yield(buf);
613
+ RB_GC_GUARD(buf);
614
+ }
615
+ else
616
+ return raise_if_exception(ret);
617
+ }
409
618
  }
410
619
 
411
- VALUE um_accept(struct um *machine, int fd) {
412
- struct um_op *op = um_op_checkout(machine);
413
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
414
- struct sockaddr addr;
415
- socklen_t len;
416
- int result = 0;
417
- int flags = 0;
418
- io_uring_prep_accept(sqe, fd, &addr, &len, 0);
419
- op->state = OP_submitted;
620
+ // // returns true if more results are expected
621
+ int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
622
+ if (result->res == 0)
623
+ return false;
624
+
625
+ *total += result->res;
626
+ VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result->res, result->flags);
627
+ rb_yield(buf);
628
+ RB_GC_GUARD(buf);
629
+
630
+ // TTY devices might not support multishot reads:
631
+ // https://github.com/axboe/liburing/issues/1185. We detect this by checking
632
+ // if the F_MORE flag is absent, then switch to single shot mode.
633
+ if (unlikely(!(result->flags & IORING_CQE_F_MORE))) {
634
+ *total += um_read_each_singleshot_loop(ctx);
635
+ return false;
636
+ }
420
637
 
421
- um_await_op(machine, op, &result, &flags);
422
- um_raise_on_system_error(result);
423
- return INT2FIX(result);
638
+ return true;
424
639
  }
425
640
 
426
- VALUE um_accept_each_safe_loop(VALUE arg) {
427
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
428
- int result = 0;
429
- int flags = 0;
641
+ void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
642
+ switch (ctx->op->kind) {
643
+ case OP_READ_MULTISHOT:
644
+ io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
645
+ return;
646
+ case OP_RECV_MULTISHOT:
647
+ io_uring_prep_recv_multishot(sqe, ctx->fd, NULL, 0, 0);
648
+ sqe->buf_group = ctx->bgid;
649
+ sqe->flags |= IOSQE_BUFFER_SELECT;
650
+ return;
651
+ default:
652
+ return;
653
+ }
654
+ }
430
655
 
431
- while (1) {
432
- um_await_op(ctx->machine, ctx->op, NULL, NULL);
433
- if (!ctx->op->results_head) {
434
- // TODO: raise, this shouldn't happen
435
- printf("no result found!\n");
436
- }
437
- while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
438
- if (likely(result > 0))
439
- rb_yield(INT2FIX(result));
440
- else
656
+ VALUE read_recv_each_begin(VALUE arg) {
657
+ struct op_ctx *ctx = (struct op_ctx *)arg;
658
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
659
+ read_recv_each_prep(sqe, ctx);
660
+ int total = 0;
661
+
662
+ while (true) {
663
+ VALUE ret = um_fiber_switch(ctx->machine);
664
+ if (!um_op_completed_p(ctx->op))
665
+ return raise_if_exception(ret);
666
+
667
+ int more = false;
668
+ struct um_op_result *result = &ctx->op->result;
669
+ while (result) {
670
+ um_raise_on_error_result(result->res);
671
+
672
+ more = (result->flags & IORING_CQE_F_MORE);
673
+ if (!read_recv_each_multishot_process_result(ctx, result, &total))
441
674
  return Qnil;
675
+
676
+ // rb_yield(INT2NUM(result->res));
677
+ result = result->next;
442
678
  }
679
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
680
+ if (more)
681
+ ctx->op->flags &= ~OP_F_COMPLETED;
682
+ else
683
+ break;
443
684
  }
685
+
686
+ return Qnil;
444
687
  }
445
688
 
446
- VALUE um_accept_each(struct um *machine, int fd) {
447
- struct um_op *op = um_op_checkout(machine);
448
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
449
- struct sockaddr addr;
450
- socklen_t len;
451
- io_uring_prep_multishot_accept(sqe, fd, &addr, &len, 0);
452
- op->state = OP_submitted;
453
- op->is_multishot = 1;
454
-
455
- struct op_ensure_ctx ctx = { .machine = machine, .op = op };
456
- return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
689
+ VALUE um_read_each(struct um *machine, int fd, int bgid) {
690
+ struct um_op op;
691
+ um_prep_op(machine, &op, OP_READ_MULTISHOT);
692
+
693
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
694
+ return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
695
+ }
696
+
697
+ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
698
+ struct um_op op;
699
+ um_prep_op(machine, &op, OP_RECV_MULTISHOT);
700
+
701
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
702
+ return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
457
703
  }