uringmachine 0.4 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +2 -1
  3. data/CHANGELOG.md +16 -0
  4. data/README.md +44 -1
  5. data/TODO.md +12 -3
  6. data/examples/bm_snooze.rb +89 -0
  7. data/examples/bm_sqlite.rb +89 -0
  8. data/examples/bm_write.rb +56 -0
  9. data/examples/dns_client.rb +12 -0
  10. data/examples/http_server.rb +42 -43
  11. data/examples/pg.rb +85 -0
  12. data/examples/server_client.rb +64 -0
  13. data/examples/snooze.rb +44 -0
  14. data/examples/stream.rb +85 -0
  15. data/examples/write_dev_null.rb +16 -0
  16. data/ext/um/extconf.rb +81 -14
  17. data/ext/um/um.c +468 -414
  18. data/ext/um/um.h +149 -40
  19. data/ext/um/um_async_op.c +40 -0
  20. data/ext/um/um_async_op_class.c +136 -0
  21. data/ext/um/um_buffer.c +49 -0
  22. data/ext/um/um_class.c +176 -44
  23. data/ext/um/um_const.c +174 -9
  24. data/ext/um/um_ext.c +8 -0
  25. data/ext/um/um_mutex_class.c +47 -0
  26. data/ext/um/um_op.c +89 -111
  27. data/ext/um/um_queue_class.c +58 -0
  28. data/ext/um/um_ssl.c +850 -0
  29. data/ext/um/um_ssl.h +22 -0
  30. data/ext/um/um_ssl_class.c +138 -0
  31. data/ext/um/um_sync.c +273 -0
  32. data/ext/um/um_utils.c +1 -1
  33. data/lib/uringmachine/dns_resolver.rb +84 -0
  34. data/lib/uringmachine/ssl/context_builder.rb +96 -0
  35. data/lib/uringmachine/ssl.rb +394 -0
  36. data/lib/uringmachine/version.rb +1 -1
  37. data/lib/uringmachine.rb +27 -3
  38. data/supressions/ruby.supp +71 -0
  39. data/test/helper.rb +6 -0
  40. data/test/test_async_op.rb +119 -0
  41. data/test/test_ssl.rb +155 -0
  42. data/test/test_um.rb +464 -47
  43. data/uringmachine.gemspec +3 -2
  44. data/vendor/liburing/.gitignore +5 -0
  45. data/vendor/liburing/CHANGELOG +1 -0
  46. data/vendor/liburing/configure +32 -0
  47. data/vendor/liburing/examples/Makefile +1 -0
  48. data/vendor/liburing/examples/reg-wait.c +159 -0
  49. data/vendor/liburing/liburing.spec +1 -1
  50. data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
  51. data/vendor/liburing/src/include/liburing.h +28 -2
  52. data/vendor/liburing/src/int_flags.h +10 -3
  53. data/vendor/liburing/src/liburing-ffi.map +13 -2
  54. data/vendor/liburing/src/liburing.map +9 -0
  55. data/vendor/liburing/src/queue.c +25 -16
  56. data/vendor/liburing/src/register.c +73 -4
  57. data/vendor/liburing/src/setup.c +46 -18
  58. data/vendor/liburing/src/setup.h +6 -0
  59. data/vendor/liburing/test/Makefile +7 -0
  60. data/vendor/liburing/test/cmd-discard.c +427 -0
  61. data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
  62. data/vendor/liburing/test/file-exit-unreg.c +48 -0
  63. data/vendor/liburing/test/io_uring_passthrough.c +2 -0
  64. data/vendor/liburing/test/io_uring_register.c +13 -2
  65. data/vendor/liburing/test/napi-test.c +1 -1
  66. data/vendor/liburing/test/no-mmap-inval.c +1 -1
  67. data/vendor/liburing/test/read-mshot-empty.c +2 -0
  68. data/vendor/liburing/test/read-mshot-stdin.c +121 -0
  69. data/vendor/liburing/test/read-mshot.c +6 -0
  70. data/vendor/liburing/test/recvsend_bundle.c +2 -2
  71. data/vendor/liburing/test/reg-fd-only.c +1 -1
  72. data/vendor/liburing/test/reg-wait.c +251 -0
  73. data/vendor/liburing/test/regbuf-clone.c +458 -0
  74. data/vendor/liburing/test/resize-rings.c +643 -0
  75. data/vendor/liburing/test/rsrc_tags.c +1 -1
  76. data/vendor/liburing/test/sqpoll-sleep.c +39 -8
  77. data/vendor/liburing/test/sqwait.c +136 -0
  78. data/vendor/liburing/test/sync-cancel.c +8 -1
  79. data/vendor/liburing/test/timeout.c +13 -8
  80. metadata +52 -8
  81. data/examples/http_server_multishot.rb +0 -57
  82. data/examples/http_server_simpler.rb +0 -34
data/ext/um/um.c CHANGED
@@ -1,24 +1,13 @@
1
1
  #include "um.h"
2
2
  #include "ruby/thread.h"
3
3
 
4
- void um_setup(struct um *machine) {
5
- machine->ring_initialized = 0;
6
- machine->unsubmitted_count = 0;
7
- machine->buffer_ring_count = 0;
8
- machine->pending_count = 0;
9
- machine->runqueue_head = NULL;
10
- machine->runqueue_tail = NULL;
11
- machine->op_freelist = NULL;
12
- machine->result_freelist = NULL;
4
+ void um_setup(VALUE self, struct um *machine) {
5
+ memset(machine, 0, sizeof(struct um));
6
+
7
+ RB_OBJ_WRITE(self, &machine->self, self);
13
8
 
14
9
  unsigned prepared_limit = 4096;
15
- unsigned flags = 0;
16
- #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
17
- flags |= IORING_SETUP_SUBMIT_ALL;
18
- #endif
19
- #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
20
- flags |= IORING_SETUP_COOP_TASKRUN;
21
- #endif
10
+ unsigned flags = IORING_SETUP_SUBMIT_ALL | IORING_SETUP_COOP_TASKRUN;
22
11
 
23
12
  while (1) {
24
13
  int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
@@ -45,11 +34,10 @@ inline void um_teardown(struct um *machine) {
45
34
  io_uring_queue_exit(&machine->ring);
46
35
  machine->ring_initialized = 0;
47
36
 
48
- um_free_op_linked_list(machine, machine->op_freelist);
49
- um_free_op_linked_list(machine, machine->runqueue_head);
37
+ um_free_buffer_linked_list(machine);
50
38
  }
51
39
 
52
- static inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
40
+ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
53
41
  struct io_uring_sqe *sqe;
54
42
  sqe = io_uring_get_sqe(&machine->ring);
55
43
  if (likely(sqe)) goto done;
@@ -68,92 +56,44 @@ done:
68
56
  sqe->user_data = (long long)op;
69
57
  sqe->flags = 0;
70
58
  machine->unsubmitted_count++;
59
+ if (op) machine->pending_count++;
71
60
  return sqe;
72
61
  }
73
62
 
74
- struct wait_for_cqe_ctx {
75
- struct um *machine;
76
- struct io_uring_cqe *cqe;
77
- int result;
78
- };
79
-
80
- void *um_wait_for_cqe_without_gvl(void *ptr) {
81
- struct wait_for_cqe_ctx *ctx = ptr;
82
- if (ctx->machine->unsubmitted_count) {
83
- ctx->machine->unsubmitted_count = 0;
84
- ctx->result = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
85
- }
86
- else
87
- ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
88
- return NULL;
89
- }
90
-
91
- inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
92
- op->cqe_result = cqe->res;
93
- op->cqe_flags = cqe->flags;
94
- op->state = OP_completed;
95
- um_runqueue_push(machine, op);
96
- }
97
-
98
- inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
99
- if (!op->results_head) {
100
- // if no results are ready yet, schedule the corresponding fiber
101
- struct um_op *op2 = um_op_checkout(machine);
102
- op2->state = OP_schedule;
103
- op2->fiber = op->fiber;
104
- op2->resume_value = Qnil;
105
- um_runqueue_push(machine, op2);
106
- }
107
- um_op_result_push(machine, op, cqe->res, cqe->flags);
108
-
109
- if (!(cqe->flags & IORING_CQE_F_MORE))
110
- op->state = OP_completed;
111
- }
112
-
113
- inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
63
+ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
114
64
  struct um_op *op = (struct um_op *)cqe->user_data;
115
65
  if (unlikely(!op)) return;
116
66
 
117
- // if (op->is_multishot)
118
- // printf("process_cqe %p state: %d result: %d flags: %d (%d)\n", op, op->state, cqe->res, cqe->flags, (cqe->flags & IORING_CQE_F_MORE));
67
+ if (!(cqe->flags & IORING_CQE_F_MORE))
68
+ machine->pending_count--;
119
69
 
120
- switch (op->state) {
121
- case OP_submitted:
122
- if (unlikely(cqe->res == -ECANCELED)) {
123
- um_op_checkin(machine, op);
124
- break;
125
- }
126
- if (op->is_multishot)
127
- um_handle_submitted_op_cqe_multi(machine, op, cqe);
128
- else
129
- um_handle_submitted_op_cqe_single(machine, op, cqe);
130
- break;
131
- case OP_abandonned:
132
- // op has been abandonned by the I/O method, so we need to cleanup (check
133
- // the op in to the free list).
134
- um_op_checkin(machine, op);
135
- break;
136
- default:
137
- // TODO: invalid state, should raise!
138
- }
139
- }
70
+ // printf(
71
+ // ":process_cqe op %p kind %d flags %d cqe_res %d cqe_flags %d pending %d\n",
72
+ // op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
73
+ // );
140
74
 
141
- static inline void um_wait_for_and_process_cqe(struct um *machine) {
142
- struct wait_for_cqe_ctx ctx = {
143
- .machine = machine,
144
- .cqe = NULL
145
- };
75
+ if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
146
76
 
147
- rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
148
- if (unlikely(ctx.result < 0)) {
149
- rb_syserr_fail(-ctx.result, strerror(-ctx.result));
77
+ op->flags |= OP_F_COMPLETED;
78
+ if (op->flags & OP_F_TRANSIENT)
79
+ um_op_transient_remove(machine, op);
80
+
81
+ if (op->flags & OP_F_MULTISHOT) {
82
+ um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
83
+ if (op->multishot_result_count > 1)
84
+ return;
150
85
  }
151
- io_uring_cqe_seen(&machine->ring, ctx.cqe);
152
- um_process_cqe(machine, ctx.cqe);
86
+ else {
87
+ op->result.res = cqe->res;
88
+ op->result.flags = cqe->flags;
89
+ }
90
+
91
+ if (!(op->flags & OP_F_ASYNC))
92
+ um_runqueue_push(machine, op);
153
93
  }
154
94
 
155
95
  // copied from liburing/queue.c
156
- static inline bool cq_ring_needs_flush(struct io_uring *ring) {
96
+ static inline int cq_ring_needs_flush(struct io_uring *ring) {
157
97
  return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
158
98
  }
159
99
 
@@ -183,101 +123,117 @@ done:
183
123
  return total_count;
184
124
  }
185
125
 
186
- static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
187
- um_wait_for_and_process_cqe(machine);
188
- um_process_ready_cqes(machine);
126
+ struct wait_for_cqe_ctx {
127
+ struct um *machine;
128
+ struct io_uring_cqe *cqe;
129
+ int result;
130
+ };
131
+
132
+ void *um_wait_for_cqe_without_gvl(void *ptr) {
133
+ struct wait_for_cqe_ctx *ctx = ptr;
134
+ if (ctx->machine->unsubmitted_count) {
135
+ ctx->machine->unsubmitted_count = 0;
136
+
137
+ // Attn: The io_uring_submit_and_wait_timeout will not return -EINTR if
138
+ // interrupted with a signal. We can detect this by testing ctx->cqe for
139
+ // NULL.
140
+ //
141
+ // https://github.com/axboe/liburing/issues/1280
142
+ int res = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, 1, NULL, NULL);
143
+ ctx->result = (res > 0 && !ctx->cqe) ? -EINTR : res;
144
+ }
145
+ else
146
+ ctx->result = io_uring_wait_cqe(&ctx->machine->ring, &ctx->cqe);
147
+ return NULL;
189
148
  }
190
149
 
191
- inline VALUE um_fiber_switch(struct um *machine) {
192
- struct um_op *op = 0;
193
- unsigned int first_iteration = 1;
194
- loop:
195
- // in the case where:
196
- // - first time through loop
197
- // - there are SQEs waiting to be submitted
198
- // - the runqueue head references the current fiber
199
- // we need to submit events and check completions without blocking
200
- if (
201
- unlikely(
202
- first_iteration && machine->unsubmitted_count &&
203
- machine->runqueue_head &&
204
- machine->runqueue_head->fiber == rb_fiber_current()
205
- )
206
- ) {
207
- io_uring_submit(&machine->ring);
150
+ static inline void um_wait_for_and_process_ready_cqes(struct um *machine) {
151
+ struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL };
152
+ rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
153
+
154
+ if (unlikely(ctx.result < 0 && ctx.result != -EINTR))
155
+ rb_syserr_fail(-ctx.result, strerror(-ctx.result));
156
+
157
+ if (ctx.cqe) {
158
+ um_process_cqe(machine, ctx.cqe);
159
+ io_uring_cq_advance(&machine->ring, 1);
208
160
  um_process_ready_cqes(machine);
209
161
  }
210
- first_iteration = 0;
162
+ }
211
163
 
212
- op = um_runqueue_shift(machine);
213
- if (op) {
214
- VALUE resume_value = op->resume_value;
215
- if (op->state == OP_schedule)
216
- um_op_checkin(machine, op);
164
+ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
165
+ VALUE fiber = op->fiber;
166
+ VALUE value = op->value;
217
167
 
218
- // the resume value is disregarded, we pass the fiber itself
219
- VALUE v = rb_fiber_transfer(op->fiber, 1, &resume_value);
220
- return v;
221
- }
168
+ if (unlikely(op->flags & OP_F_TRANSIENT))
169
+ um_op_free(machine, op);
222
170
 
223
- um_wait_for_and_process_ready_cqes(machine);
224
- goto loop;
171
+ return rb_fiber_transfer(fiber, 1, &value);
225
172
  }
226
173
 
227
- static inline void um_cancel_op(struct um *machine, struct um_op *op) {
174
+ inline VALUE um_fiber_switch(struct um *machine) {
175
+ while (true) {
176
+ struct um_op *op = um_runqueue_shift(machine);
177
+ if (op)
178
+ return process_runqueue_op(machine, op);
179
+
180
+ um_wait_for_and_process_ready_cqes(machine);
181
+ }
182
+ }
183
+
184
+ void um_submit_cancel_op(struct um *machine, struct um_op *op) {
228
185
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
229
186
  io_uring_prep_cancel64(sqe, (long long)op, 0);
230
187
  }
231
188
 
232
- static inline VALUE um_await_op(struct um *machine, struct um_op *op, __s32 *result, __u32 *flags) {
233
- op->fiber = rb_fiber_current();
234
- VALUE v = um_fiber_switch(machine);
235
- int is_exception = um_value_is_exception_p(v);
236
-
237
- if (unlikely(is_exception && op->state == OP_submitted)) {
238
- um_cancel_op(machine, op);
239
- op->state = OP_abandonned;
189
+ inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
190
+ um_submit_cancel_op(machine, op);
191
+ while (true) {
192
+ um_fiber_switch(machine);
193
+ if (um_op_completed_p(op)) break;
240
194
  }
241
- else {
242
- // We copy over the CQE result and flags, since the op is immediately
243
- // checked in.
244
- if (result) *result = op->cqe_result;
245
- if (flags) *flags = op->cqe_flags;
195
+ }
196
+
197
+ inline int um_check_completion(struct um *machine, struct um_op *op) {
198
+ if (!um_op_completed_p(op)) {
199
+ um_cancel_and_wait(machine, op);
200
+ return 0;
246
201
  }
247
202
 
248
- if (unlikely(is_exception)) um_raise_exception(v);
249
- return v;
203
+ um_raise_on_error_result(op->result.res);
204
+ return 1;
250
205
  }
251
206
 
252
207
  inline VALUE um_await(struct um *machine) {
253
208
  VALUE v = um_fiber_switch(machine);
254
- return um_value_is_exception_p(v) ? um_raise_exception(v) : v;
209
+ return raise_if_exception(v);
255
210
  }
256
211
 
257
- inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
258
- struct um_op *op = um_op_checkout(machine);
259
- op->state = OP_schedule;
260
- op->fiber = fiber;
261
- op->resume_value = value;
262
- um_runqueue_push(machine, op);
212
+ inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind) {
213
+ memset(op, 0, sizeof(struct um_op));
214
+ op->kind = kind;
215
+ switch (kind) {
216
+ case OP_ACCEPT_MULTISHOT:
217
+ case OP_READ_MULTISHOT:
218
+ case OP_RECV_MULTISHOT:
219
+ op->flags |= OP_F_MULTISHOT;
220
+ default:
221
+ }
222
+ RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
223
+ op->value = Qnil;
263
224
  }
264
225
 
265
- inline void um_interrupt(struct um *machine, VALUE fiber, VALUE value) {
266
- struct um_op *op = um_runqueue_find_by_fiber(machine, fiber);
267
- if (op) {
268
- op->state = OP_cancelled;
269
- op->resume_value = value;
270
- }
271
- else {
272
- op = um_op_checkout(machine);
273
- op->state = OP_schedule;
274
- op->fiber = fiber;
275
- op->resume_value = value;
276
- um_runqueue_unshift(machine, op);
277
- }
226
+ inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
227
+ struct um_op *op = um_op_alloc(machine);
228
+ memset(op, 0, sizeof(struct um_op));
229
+ op->kind = OP_SCHEDULE;
230
+ op->flags = OP_F_TRANSIENT;
231
+ RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
232
+ RB_OBJ_WRITE(machine->self, &op->value, value);
233
+ um_runqueue_push(machine, op);
278
234
  }
279
235
 
280
- struct op_ensure_ctx {
236
+ struct op_ctx {
281
237
  struct um *machine;
282
238
  struct um_op *op;
283
239
  int fd;
@@ -285,20 +241,18 @@ struct op_ensure_ctx {
285
241
 
286
242
  void *read_buf;
287
243
  int read_maxlen;
244
+ struct __kernel_timespec ts;
245
+ int flags;
288
246
  };
289
247
 
290
248
  VALUE um_timeout_ensure(VALUE arg) {
291
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
249
+ struct op_ctx *ctx = (struct op_ctx *)arg;
292
250
 
293
- if (ctx->op->state == OP_submitted) {
294
- // A CQE has not yet been received, we cancel the timeout and abandon the op
295
- // (it will be checked in upon receiving the -ECANCELED CQE)
296
- um_cancel_op(ctx->machine, ctx->op);
297
- ctx->op->state == OP_abandonned;
251
+ if (!um_op_completed_p(ctx->op)) {
252
+ um_submit_cancel_op(ctx->machine, ctx->op);
253
+ ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
254
+ um_op_transient_add(ctx->machine, ctx->op);
298
255
  }
299
- else
300
- // completed, so can be checked in
301
- um_op_checkin(ctx->machine, ctx->op);
302
256
 
303
257
  return Qnil;
304
258
  }
@@ -307,343 +261,443 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
307
261
  static ID ID_new = 0;
308
262
  if (!ID_new) ID_new = rb_intern("new");
309
263
 
310
- struct um_op *op = um_op_checkout(machine);
264
+ struct um_op *op = um_op_alloc(machine);
265
+ um_prep_op(machine, op, OP_TIMEOUT);
311
266
  op->ts = um_double_to_timespec(NUM2DBL(interval));
267
+ RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
268
+ RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
312
269
 
313
270
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
314
271
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
315
- op->state = OP_submitted;
316
- op->fiber = rb_fiber_current();
317
- op->resume_value = rb_funcall(class, ID_new, 0);
318
272
 
319
- struct op_ensure_ctx ctx = { .machine = machine, .op = op };
273
+ struct op_ctx ctx = { .machine = machine, .op = op };
320
274
  return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
321
275
  }
322
276
 
323
- inline void discard_op_if_completed(struct um *machine, struct um_op *op) {
324
- if (op->state == OP_completed) um_op_checkin(machine, op);
325
- }
277
+ /*******************************************************************************
278
+ blocking singleshot ops
279
+ *******************************************************************************/
326
280
 
327
- inline VALUE um_sleep(struct um *machine, double duration) {
328
- struct um_op *op = um_op_checkout(machine);
329
- op->ts = um_double_to_timespec(duration);
330
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
331
- __s32 result = 0;
332
-
333
- io_uring_prep_timeout(sqe, &op->ts, 0, 0);
334
- op->state = OP_submitted;
281
+ VALUE um_sleep(struct um *machine, double duration) {
282
+ struct um_op op;
283
+ um_prep_op(machine, &op, OP_SLEEP);
284
+ op.ts = um_double_to_timespec(duration);
285
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
286
+ io_uring_prep_timeout(sqe, &op.ts, 0, 0);
287
+ VALUE ret = um_fiber_switch(machine);
335
288
 
336
- um_await_op(machine, op, &result, NULL);
289
+ if (!um_op_completed_p(&op))
290
+ um_cancel_and_wait(machine, &op);
291
+ else {
292
+ if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
293
+ ret = DBL2NUM(duration);
294
+ }
337
295
 
338
- discard_op_if_completed(machine, op);
339
- if (result != -ETIME) um_raise_on_system_error(result);
340
- return Qnil;
296
+ RB_GC_GUARD(ret);
297
+ return raise_if_exception(ret);
341
298
  }
342
299
 
343
300
  inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
344
- struct um_op *op = um_op_checkout(machine);
345
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
346
- __s32 result = 0;
347
- __u32 flags = 0;
348
-
301
+ struct um_op op;
302
+ um_prep_op(machine, &op, OP_READ);
303
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
349
304
  void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
350
305
  io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
351
- op->state = OP_submitted;
306
+
307
+ VALUE ret = um_fiber_switch(machine);
308
+ if (um_check_completion(machine, &op)) {
309
+ um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
310
+ ret = INT2NUM(op.result.res);
311
+
312
+ }
352
313
 
353
- um_await_op(machine, op, &result, &flags);
314
+ RB_GC_GUARD(buffer);
315
+ RB_GC_GUARD(ret);
316
+ return raise_if_exception(ret);
317
+ }
354
318
 
355
- discard_op_if_completed(machine, op);
356
- um_raise_on_system_error(result);
357
- um_update_read_buffer(machine, buffer, buffer_offset, result, flags);
358
- return INT2FIX(result);
319
+ VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
320
+ struct um_op op;
321
+ um_prep_op(machine, &op, OP_WRITE);
322
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
323
+ const int str_len = RSTRING_LEN(str);
324
+ if (len > str_len) len = str_len;
325
+
326
+ io_uring_prep_write(sqe, fd, RSTRING_PTR(str), len, -1);
327
+
328
+ VALUE ret = um_fiber_switch(machine);
329
+ if (um_check_completion(machine, &op))
330
+ ret = INT2NUM(op.result.res);
331
+
332
+ RB_GC_GUARD(str);
333
+ RB_GC_GUARD(ret);
334
+ return raise_if_exception(ret);
359
335
  }
360
336
 
361
- VALUE um_multishot_ensure(VALUE arg) {
362
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
337
+ VALUE um_close(struct um *machine, int fd) {
338
+ struct um_op op;
339
+ um_prep_op(machine, &op, OP_CLOSE);
340
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
341
+ io_uring_prep_close(sqe, fd);
363
342
 
364
- switch (ctx->op->state) {
365
- case OP_submitted:
366
- um_cancel_op(ctx->machine, ctx->op);
367
- break;
368
- case OP_completed:
369
- um_op_checkin(ctx->machine, ctx->op);
370
- break;
371
- default:
372
- }
343
+ VALUE ret = um_fiber_switch(machine);
344
+ if (um_check_completion(machine, &op))
345
+ ret = INT2NUM(fd);
373
346
 
374
- if (ctx->read_buf) free(ctx->read_buf);
375
- return Qnil;
347
+ RB_GC_GUARD(ret);
348
+ return raise_if_exception(ret);
376
349
  }
377
350
 
378
- static inline void um_read_each_prepare_op(struct op_ensure_ctx *ctx, int singleshot_mode) {
379
- struct um_op *op = um_op_checkout(ctx->machine);
380
- struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, op);
351
+ VALUE um_accept(struct um *machine, int fd) {
352
+ struct um_op op;
353
+ um_prep_op(machine, &op, OP_ACCEPT);
354
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
355
+ io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
381
356
 
382
- if (singleshot_mode)
383
- io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
384
- else {
385
- io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
386
- op->is_multishot = 1;
387
- }
357
+ VALUE ret = um_fiber_switch(machine);
358
+ if (um_check_completion(machine, &op))
359
+ ret = INT2NUM(op.result.res);
388
360
 
389
- op->state = OP_submitted;
390
- ctx->op = op;
361
+ RB_GC_GUARD(ret);
362
+ return raise_if_exception(ret);
391
363
  }
392
364
 
393
- int um_read_each_safe_loop_singleshot(struct op_ensure_ctx *ctx, int total) {
394
- struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
395
- __s32 result = 0;
396
- ctx->read_maxlen = desc->buf_size;
397
- ctx->read_buf = malloc(desc->buf_size);
365
+ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
366
+ struct um_op op;
367
+ um_prep_op(machine, &op, OP_SOCKET);
368
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
369
+ io_uring_prep_socket(sqe, domain, type, protocol, flags);
398
370
 
399
- while (1) {
400
- um_read_each_prepare_op(ctx, 1);
401
- um_await_op(ctx->machine, ctx->op, &result, NULL);
402
- um_raise_on_system_error(result);
403
- if (!result) return total;
404
-
405
- total += result;
406
- VALUE buf = rb_str_new(ctx->read_buf, result);
407
- rb_yield(buf);
408
- um_op_checkin(ctx->machine, ctx->op);
409
- }
371
+ VALUE ret = um_fiber_switch(machine);
372
+ if (um_check_completion(machine, &op))
373
+ ret = INT2NUM(op.result.res);
374
+
375
+ RB_GC_GUARD(ret);
376
+ return raise_if_exception(ret);
410
377
  }
411
378
 
379
+ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
380
+ struct um_op op;
381
+ um_prep_op(machine, &op, OP_CONNECT);
382
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
383
+ io_uring_prep_connect(sqe, fd, addr, addrlen);
412
384
 
385
+ VALUE ret = um_fiber_switch(machine);
386
+ if (um_check_completion(machine, &op))
387
+ ret = INT2NUM(op.result.res);
413
388
 
414
- int um_read_each_multishot_process_results(struct op_ensure_ctx *ctx, int *total) {
415
- __s32 result = 0;
416
- __u32 flags = 0;
417
- __s32 bad_result = 0;
418
- int eof = 0;
389
+ RB_GC_GUARD(ret);
390
+ return raise_if_exception(ret);
391
+ }
419
392
 
420
- while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
421
- if (result < 0) {
422
- bad_result = result;
423
- break;
424
- }
425
- if (result == 0) {
426
- eof = 1;
427
- break;
428
- }
393
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
394
+ struct um_op op;
395
+ um_prep_op(machine, &op, OP_SEND);
396
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
397
+ io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
429
398
 
430
- *total += result;
431
- VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result, flags);
432
- rb_yield(buf);
433
- }
399
+ VALUE ret = um_fiber_switch(machine);
400
+ if (um_check_completion(machine, &op))
401
+ ret = INT2NUM(op.result.res);
434
402
 
435
- if (ctx->op->state == OP_completed) {
436
- um_op_checkin(ctx->machine, ctx->op);
403
+ RB_GC_GUARD(buffer);
404
+ RB_GC_GUARD(ret);
405
+ return raise_if_exception(ret);
406
+ }
437
407
 
438
- // TTY devices might not support multishot reads:
439
- // https://github.com/axboe/liburing/issues/1185. A workaround is to
440
- // fallback to singleshot mode, using the first buffer in the buffer
441
- // group.
442
- if (!(flags & IORING_CQE_F_BUFFER)) {
443
- *total = um_read_each_safe_loop_singleshot(ctx, *total);
444
- return 0;
445
- }
446
- else
447
- um_read_each_prepare_op(ctx, 0);
408
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
409
+ struct um_op op;
410
+ um_prep_op(machine, &op, OP_RECV);
411
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
412
+ void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
413
+ io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
414
+
415
+ VALUE ret = um_fiber_switch(machine);
416
+ if (um_check_completion(machine, &op)) {
417
+ um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
418
+ ret = INT2NUM(op.result.res);
448
419
  }
449
- if (bad_result)
450
- um_raise_on_system_error(bad_result);
451
420
 
452
- return eof ? 0 : 1;
421
+ RB_GC_GUARD(buffer);
422
+ RB_GC_GUARD(ret);
423
+ return raise_if_exception(ret);
453
424
  }
454
425
 
455
- VALUE um_read_each_safe_loop(VALUE arg) {
456
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
457
- int total = 0;
458
-
459
- um_read_each_prepare_op(ctx, 0);
426
+ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
427
+ struct um_op op;
428
+ um_prep_op(machine, &op, OP_BIND);
429
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
430
+ io_uring_prep_bind(sqe, fd, addr, addrlen);
460
431
 
461
- while (1) {
462
- um_await_op(ctx->machine, ctx->op, NULL, NULL);
463
- if (!ctx->op->results_head)
464
- rb_raise(rb_eRuntimeError, "no result found!\n");
432
+ VALUE ret = um_fiber_switch(machine);
433
+ if (um_check_completion(machine, &op))
434
+ ret = INT2NUM(op.result.res);
465
435
 
466
- if (!um_read_each_multishot_process_results(ctx, &total))
467
- return INT2NUM(total);
468
- }
436
+ RB_GC_GUARD(ret);
437
+ return raise_if_exception(ret);
469
438
  }
470
439
 
471
- VALUE um_read_each(struct um *machine, int fd, int bgid) {
472
- struct op_ensure_ctx ctx = { .machine = machine, .fd = fd, .bgid = bgid, .read_buf = NULL };
473
- return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
440
+ VALUE um_listen(struct um *machine, int fd, int backlog) {
441
+ struct um_op op;
442
+ um_prep_op(machine, &op, OP_BIND);
443
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
444
+ io_uring_prep_listen(sqe, fd, backlog);
445
+
446
+ VALUE ret = um_fiber_switch(machine);
447
+ if (um_check_completion(machine, &op))
448
+ ret = INT2NUM(op.result.res);
449
+
450
+ RB_GC_GUARD(ret);
451
+ return raise_if_exception(ret);
474
452
  }
475
453
 
476
- VALUE um_write(struct um *machine, int fd, VALUE buffer, int len) {
477
- struct um_op *op = um_op_checkout(machine);
478
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
479
- __s32 result = 0;
480
- __u32 flags = 0;
454
+ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
455
+ VALUE ret = Qnil;
456
+ int value;
481
457
 
482
- io_uring_prep_write(sqe, fd, RSTRING_PTR(buffer), len, -1);
483
- op->state = OP_submitted;
458
+ #ifdef HAVE_IO_URING_PREP_CMD_SOCK
459
+ struct um_op op;
460
+ um_prep_op(machine, &op, OP_GETSOCKOPT);
461
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
462
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
484
463
 
485
- um_await_op(machine, op, &result, &flags);
464
+ ret = um_fiber_switch(machine);
465
+ if (um_check_completion(machine, &op))
466
+ ret = INT2NUM(value);
467
+ #else
468
+ socklen_t nvalue = sizeof(value);
469
+ int res = getsockopt(fd, level, opt, &value, &nvalue);
470
+ if (res)
471
+ rb_syserr_fail(errno, strerror(errno));
472
+ ret = INT2NUM(value);
473
+ #endif
486
474
 
487
- discard_op_if_completed(machine, op);
488
- um_raise_on_system_error(result);
489
- return INT2FIX(result);
475
+ RB_GC_GUARD(ret);
476
+ return raise_if_exception(ret);
490
477
  }
491
478
 
492
- VALUE um_close(struct um *machine, int fd) {
493
- struct um_op *op = um_op_checkout(machine);
494
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
495
- __s32 result = 0;
496
- __u32 flags = 0;
479
+ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
480
+ VALUE ret = Qnil;
497
481
 
498
- io_uring_prep_close(sqe, fd);
499
- op->state = OP_submitted;
482
+ #ifdef HAVE_IO_URING_PREP_CMD_SOCK
483
+ struct um_op op;
484
+ um_prep_op(machine, &op, OP_GETSOCKOPT);
485
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
486
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
500
487
 
501
- um_await_op(machine, op, &result, &flags);
488
+ ret = um_fiber_switch(machine);
489
+ if (um_check_completion(machine, &op))
490
+ ret = INT2NUM(op.result.res);
491
+ #else
492
+ int res = setsockopt(fd, level, opt, &value, sizeof(value));
493
+ if (res)
494
+ rb_syserr_fail(errno, strerror(errno));
495
+ ret = INT2NUM(0);
496
+ #endif
502
497
 
503
- discard_op_if_completed(machine, op);
504
- um_raise_on_system_error(result);
505
- return INT2FIX(fd);
498
+ RB_GC_GUARD(ret);
499
+ return raise_if_exception(ret);
506
500
  }
507
501
 
508
- VALUE um_accept(struct um *machine, int fd) {
509
- struct um_op *op = um_op_checkout(machine);
510
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
511
- struct sockaddr addr;
512
- socklen_t len;
513
- __s32 result = 0;
514
- __u32 flags = 0;
502
+ VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
503
+ struct um_op op;
504
+ um_prep_op(machine, &op, OP_BIND);
505
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
506
+ io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
507
+
508
+ VALUE ret = um_fiber_switch(machine);
509
+ if (um_check_completion(machine, &op))
510
+ ret = INT2NUM(op.result.res);
511
+
512
+ RB_GC_GUARD(ret);
513
+ return raise_if_exception(ret);
514
+ }
515
+
516
+ VALUE um_waitpid(struct um *machine, int pid, int options) {
517
+ struct um_op op;
518
+ um_prep_op(machine, &op, OP_BIND);
519
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
520
+
521
+ siginfo_t infop;
522
+ io_uring_prep_waitid(sqe, P_PID, pid, &infop, options, 0);
515
523
 
516
- io_uring_prep_accept(sqe, fd, &addr, &len, 0);
517
- op->state = OP_submitted;
524
+ VALUE ret = um_fiber_switch(machine);
525
+ if (um_check_completion(machine, &op))
526
+ ret = INT2NUM(op.result.res);
518
527
 
519
- um_await_op(machine, op, &result, &flags);
528
+ RB_GC_GUARD(ret);
529
+ raise_if_exception(ret);
520
530
 
521
- discard_op_if_completed(machine, op);
522
- um_raise_on_system_error(result);
523
- return INT2FIX(result);
531
+ return rb_ary_new_from_args(2, INT2NUM(infop.si_pid), INT2NUM(infop.si_status));
524
532
  }
525
533
 
526
- VALUE um_accept_each_safe_loop(VALUE arg) {
527
- struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
528
- __s32 result = 0;
529
- __u32 flags = 0;
534
+ /*******************************************************************************
535
+ multishot ops
536
+ *******************************************************************************/
530
537
 
531
- while (1) {
532
- um_await_op(ctx->machine, ctx->op, &result, &flags);
533
- if (!ctx->op->results_head) {
534
- // this shouldn't happen!
535
- rb_raise(rb_eRuntimeError, "no result found for accept_each loop");
536
- }
538
+ VALUE accept_each_begin(VALUE arg) {
539
+ struct op_ctx *ctx = (struct op_ctx *)arg;
540
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
541
+ io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
542
+
543
+ while (true) {
544
+ VALUE ret = um_fiber_switch(ctx->machine);
545
+ if (!um_op_completed_p(ctx->op))
546
+ return raise_if_exception(ret);
537
547
 
538
- while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
539
- um_raise_on_system_error(result);
540
- if (likely(result > 0))
541
- rb_yield(INT2FIX(result));
542
- else
548
+ int more = false;
549
+ struct um_op_result *result = &ctx->op->result;
550
+ while (result) {
551
+ more = (result->flags & IORING_CQE_F_MORE);
552
+ if (result->res < 0) {
553
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
543
554
  return Qnil;
555
+ }
556
+ rb_yield(INT2NUM(result->res));
557
+ result = result->next;
544
558
  }
559
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
560
+ if (more)
561
+ ctx->op->flags &= ~OP_F_COMPLETED;
562
+ else
563
+ break;
545
564
  }
546
- }
547
-
548
- VALUE um_accept_each(struct um *machine, int fd) {
549
- struct um_op *op = um_op_checkout(machine);
550
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
551
- io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
552
- op->state = OP_submitted;
553
- op->is_multishot = 1;
554
565
 
555
- struct op_ensure_ctx ctx = { .machine = machine, .op = op, .read_buf = NULL };
556
- return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
566
+ return Qnil;
557
567
  }
558
568
 
559
- VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
560
- struct um_op *op = um_op_checkout(machine);
561
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
562
- int result = 0;
569
+ VALUE multishot_ensure(VALUE arg) {
570
+ struct op_ctx *ctx = (struct op_ctx *)arg;
571
+ if (ctx->op->multishot_result_count) {
572
+ int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
573
+ if (more)
574
+ ctx->op->flags &= ~OP_F_COMPLETED;
575
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
576
+ }
577
+ if (!um_op_completed_p(ctx->op))
578
+ um_cancel_and_wait(ctx->machine, ctx->op);
563
579
 
564
- io_uring_prep_socket(sqe, domain, type, protocol, flags);
565
- op->state = OP_submitted;
580
+ if (ctx->read_buf)
581
+ free(ctx->read_buf);
566
582
 
567
- um_await_op(machine, op, &result, NULL);
583
+ return Qnil;
584
+ }
568
585
 
569
- discard_op_if_completed(machine, op);
570
- um_raise_on_system_error(result);
571
- return INT2FIX(result);
586
+ VALUE um_accept_each(struct um *machine, int fd) {
587
+ struct um_op op;
588
+ um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
589
+
590
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
591
+ return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
572
592
  }
573
593
 
574
- VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
575
- struct um_op *op = um_op_checkout(machine);
576
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
577
- int result = 0;
594
+ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
595
+ struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
596
+ ctx->read_maxlen = desc->buf_size;
597
+ ctx->read_buf = malloc(desc->buf_size);
598
+ int total = 0;
578
599
 
579
- io_uring_prep_connect(sqe, fd, addr, addrlen);
580
- op->state = OP_submitted;
600
+ while (1) {
601
+ um_prep_op(ctx->machine, ctx->op, OP_READ);
602
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
603
+ io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
581
604
 
582
- um_await_op(machine, op, &result, NULL);
605
+ VALUE ret = um_fiber_switch(ctx->machine);
606
+ if (um_op_completed_p(ctx->op)) {
607
+ um_raise_on_error_result(ctx->op->result.res);
608
+ if (!ctx->op->result.res) return total;
583
609
 
584
- discard_op_if_completed(machine, op);
585
- um_raise_on_system_error(result);
586
- return INT2FIX(result);
610
+ VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
611
+ total += ctx->op->result.res;
612
+ rb_yield(buf);
613
+ RB_GC_GUARD(buf);
614
+ }
615
+ else
616
+ return raise_if_exception(ret);
617
+ }
587
618
  }
588
619
 
589
- VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
590
- struct um_op *op = um_op_checkout(machine);
591
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
592
- int result = 0;
620
+ // // returns true if more results are expected
621
+ int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
622
+ if (result->res == 0)
623
+ return false;
593
624
 
594
- io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
595
- op->state = OP_submitted;
625
+ *total += result->res;
626
+ VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result->res, result->flags);
627
+ rb_yield(buf);
628
+ RB_GC_GUARD(buf);
596
629
 
597
- um_await_op(machine, op, &result, NULL);
630
+ // TTY devices might not support multishot reads:
631
+ // https://github.com/axboe/liburing/issues/1185. We detect this by checking
632
+ // if the F_MORE flag is absent, then switch to single shot mode.
633
+ if (unlikely(!(result->flags & IORING_CQE_F_MORE))) {
634
+ *total += um_read_each_singleshot_loop(ctx);
635
+ return false;
636
+ }
598
637
 
599
- discard_op_if_completed(machine, op);
600
- um_raise_on_system_error(result);
601
- return INT2FIX(result);
638
+ return true;
602
639
  }
603
640
 
604
- VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
605
- struct um_op *op = um_op_checkout(machine);
606
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
607
- int result = 0;
608
-
609
- void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
610
- io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
611
- op->state = OP_submitted;
641
+ void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
642
+ switch (ctx->op->kind) {
643
+ case OP_READ_MULTISHOT:
644
+ io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
645
+ return;
646
+ case OP_RECV_MULTISHOT:
647
+ io_uring_prep_recv_multishot(sqe, ctx->fd, NULL, 0, 0);
648
+ sqe->buf_group = ctx->bgid;
649
+ sqe->flags |= IOSQE_BUFFER_SELECT;
650
+ return;
651
+ default:
652
+ return;
653
+ }
654
+ }
612
655
 
613
- um_await_op(machine, op, &result, NULL);
656
+ VALUE read_recv_each_begin(VALUE arg) {
657
+ struct op_ctx *ctx = (struct op_ctx *)arg;
658
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
659
+ read_recv_each_prep(sqe, ctx);
660
+ int total = 0;
614
661
 
615
- discard_op_if_completed(machine, op);
616
- um_raise_on_system_error(result);
617
- um_update_read_buffer(machine, buffer, 0, result, flags);
618
- return INT2FIX(result);
619
- }
662
+ while (true) {
663
+ VALUE ret = um_fiber_switch(ctx->machine);
664
+ if (!um_op_completed_p(ctx->op))
665
+ return raise_if_exception(ret);
620
666
 
621
- VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
622
- struct um_op *op = um_op_checkout(machine);
623
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
624
- int result = 0;
667
+ int more = false;
668
+ struct um_op_result *result = &ctx->op->result;
669
+ while (result) {
670
+ um_raise_on_error_result(result->res);
625
671
 
626
- io_uring_prep_bind(sqe, fd, addr, addrlen);
627
- op->state = OP_submitted;
672
+ more = (result->flags & IORING_CQE_F_MORE);
673
+ if (!read_recv_each_multishot_process_result(ctx, result, &total))
674
+ return Qnil;
628
675
 
629
- um_await_op(machine, op, &result, NULL);
676
+ // rb_yield(INT2NUM(result->res));
677
+ result = result->next;
678
+ }
679
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
680
+ if (more)
681
+ ctx->op->flags &= ~OP_F_COMPLETED;
682
+ else
683
+ break;
684
+ }
630
685
 
631
- discard_op_if_completed(machine, op);
632
- um_raise_on_system_error(result);
633
- return INT2FIX(result);
686
+ return Qnil;
634
687
  }
635
688
 
636
- VALUE um_listen(struct um *machine, int fd, int backlog) {
637
- struct um_op *op = um_op_checkout(machine);
638
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
639
- int result = 0;
689
+ VALUE um_read_each(struct um *machine, int fd, int bgid) {
690
+ struct um_op op;
691
+ um_prep_op(machine, &op, OP_READ_MULTISHOT);
640
692
 
641
- io_uring_prep_listen(sqe, fd, backlog);
642
- op->state = OP_submitted;
693
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
694
+ return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
695
+ }
643
696
 
644
- um_await_op(machine, op, &result, NULL);
697
+ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
698
+ struct um_op op;
699
+ um_prep_op(machine, &op, OP_RECV_MULTISHOT);
645
700
 
646
- discard_op_if_completed(machine, op);
647
- um_raise_on_system_error(result);
648
- return INT2FIX(result);
649
- }
701
+ struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
702
+ return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
703
+ }