uringmachine 0.25.0 → 0.26.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/ext/um/um.c CHANGED
@@ -60,8 +60,8 @@ inline void um_teardown(struct um *machine) {
60
60
  }
61
61
 
62
62
  inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
63
- DEBUG_PRINTF("-> %p um_get_sqe: op %p kind=%s unsubmitted=%d pending=%d total=%lu\n",
64
- &machine->ring, op, um_op_kind_name(op ? op->kind : OP_UNDEFINED),
63
+ DEBUG_PRINTF("* um_get_sqe: op %p kind=%s ref_count=%d flags=%x unsubmitted=%d pending=%d total=%lu\n",
64
+ op, um_op_kind_name(op ? op->kind : OP_UNDEFINED), op ? op->ref_count : 0, op ? op->flags : 0,
65
65
  machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
66
66
  );
67
67
 
@@ -112,12 +112,11 @@ void *um_submit_without_gvl(void *ptr) {
112
112
  }
113
113
 
114
114
  inline uint um_submit(struct um *machine) {
115
- DEBUG_PRINTF("-> %p um_submit: unsubmitted=%d pending=%d total=%lu\n",
116
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
117
- machine->metrics.total_ops
115
+ DEBUG_PRINTF("> um_submit: unsubmitted=%d pending=%d total=%lu\n",
116
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
118
117
  );
119
118
  if (!machine->metrics.ops_unsubmitted) {
120
- DEBUG_PRINTF("<- %p um_submit: no unsubmitted SQEs, early return\n", &machine->ring);
119
+ DEBUG_PRINTF("< %p um_submit: no unsubmitted SQEs, early return\n", &machine->ring);
121
120
  return 0;
122
121
  }
123
122
 
@@ -127,7 +126,7 @@ inline uint um_submit(struct um *machine) {
127
126
  else
128
127
  ctx.result = io_uring_submit(&machine->ring);
129
128
 
130
- DEBUG_PRINTF("<- %p um_submit: result=%d\n", &machine->ring, ctx.result);
129
+ DEBUG_PRINTF("< um_submit: result=%d\n", ctx.result);
131
130
 
132
131
  if (ctx.result < 0)
133
132
  rb_syserr_fail(-ctx.result, strerror(-ctx.result));
@@ -136,53 +135,76 @@ inline uint um_submit(struct um *machine) {
136
135
  return ctx.result;
137
136
  }
138
137
 
138
+ static inline void um_schedule_op(struct um *machine, struct um_op *op) {
139
+ op->flags |= OP_F_SCHEDULED;
140
+ um_runqueue_push(machine, op);
141
+ op->ref_count++;
142
+ }
143
+
139
144
  static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
140
145
  struct um_op *op = (struct um_op *)cqe->user_data;
141
146
  if (DEBUG) {
142
147
  if (op) {
143
- DEBUG_PRINTF("<- %p um_process_cqe: op %p kind %s flags %d cqe_res %d cqe_flags %d pending %d\n",
144
- &machine->ring, op, um_op_kind_name(op->kind), op->flags, cqe->res, cqe->flags, machine->metrics.ops_pending
148
+ DEBUG_PRINTF("* um_process_cqe: op=%p kind=%s ref_count=%d flags=%x cqe_res=%d cqe_flags=%x pending=%d\n",
149
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags,
150
+ cqe->res, cqe->flags, machine->metrics.ops_pending
145
151
  );
146
152
  }
147
153
  else {
148
- DEBUG_PRINTF("<- %p um_process_cqe: op NULL cqe_res %d cqe_flags %d pending %d\n",
149
- &machine->ring, cqe->res, cqe->flags, machine->metrics.ops_pending
154
+ DEBUG_PRINTF("* um_process_cqe: op=NULL cqe_res=%d cqe_flags=%x pending=%d\n",
155
+ cqe->res, cqe->flags, machine->metrics.ops_pending
150
156
  );
151
157
  }
152
158
  }
153
159
  if (unlikely(!op)) return;
154
160
 
155
- if (!(cqe->flags & IORING_CQE_F_MORE))
156
- machine->metrics.ops_pending--;
157
-
158
- if (op->flags & OP_F_FREE_ON_COMPLETE) {
159
- if (op->flags & OP_F_TRANSIENT)
160
- um_op_transient_remove(machine, op);
161
+ // A multishot operation is still in progress if CQE has the F_MORE flag set
162
+ int done = OP_MULTISHOT_P(op) ? !(cqe->flags & IORING_CQE_F_MORE) : true;
161
163
 
162
- um_op_free(machine, op);
164
+ // F_TRANSIENT means the operation was put on the transient list. Transient
165
+ // ops are usually async ops where the app doesn't care when they are done or
166
+ // how. We hold on to those ops on the transient list, where we can mark the
167
+ // corresponding buffer during a GC.
168
+ if (OP_TRANSIENT_P(op)) {
169
+ machine->metrics.ops_pending--;
170
+ um_op_transient_remove(machine, op);
171
+ if (op->ref_count > 1) {
172
+ op->result.res = cqe->res;
173
+ op->result.flags = cqe->flags;
174
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
175
+ }
176
+ um_op_release(machine, op);
163
177
  return;
164
178
  }
165
179
 
166
- op->flags |= OP_F_COMPLETED;
167
- if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
168
- if (unlikely(op->flags & OP_F_CANCELED)) return;
180
+ if (done) {
181
+ machine->metrics.ops_pending--;
182
+ um_op_release(machine, op);
183
+ }
169
184
 
170
- if (op->flags & OP_F_TRANSIENT)
171
- um_op_transient_remove(machine, op);
185
+ if (unlikely(OP_CANCELED_P(op))) {
186
+ // multishot ops may generate multiple CQEs, we release only on the last
187
+ // one for the op.
188
+ if (done) {
189
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
190
+ um_op_release(machine, op);
191
+ }
192
+ return;
193
+ }
172
194
 
173
- if (op->flags & OP_F_MULTISHOT) {
195
+ if (OP_MULTISHOT_P(op)) {
174
196
  um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
175
- if (op->multishot_result_count > 1)
176
- return;
197
+
198
+ op->flags |= done ? (OP_F_CQE_SEEN | OP_F_CQE_DONE) : OP_F_CQE_SEEN;
199
+ if (!OP_SCHEDULED_P(op)) um_schedule_op(machine, op);
177
200
  }
178
201
  else {
202
+ // single shot
179
203
  op->result.res = cqe->res;
180
204
  op->result.flags = cqe->flags;
205
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
206
+ if (!OP_ASYNC_P(op)) um_schedule_op(machine, op);
181
207
  }
182
-
183
- if (op->flags & OP_F_ASYNC) return;
184
-
185
- um_runqueue_push(machine, op);
186
208
  }
187
209
 
188
210
  // copied from liburing/queue.c
@@ -191,8 +213,8 @@ static inline int cq_ring_needs_flush(struct io_uring *ring) {
191
213
  }
192
214
 
193
215
  static inline int um_process_ready_cqes(struct um *machine) {
194
- DEBUG_PRINTF("-> %p um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
195
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
216
+ DEBUG_PRINTF("> um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
217
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
196
218
  );
197
219
 
198
220
  unsigned total_count = 0;
@@ -211,9 +233,9 @@ iterate:
211
233
  if (overflow_checked) goto done;
212
234
 
213
235
  if (cq_ring_needs_flush(&machine->ring)) {
214
- DEBUG_PRINTF("-> %p io_uring_enter\n", &machine->ring);
236
+ DEBUG_PRINTF("> io_uring_enter\n");
215
237
  int ret = io_uring_enter(machine->ring.ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
216
- DEBUG_PRINTF("<- %p io_uring_enter: result=%d\n", &machine->ring, ret);
238
+ DEBUG_PRINTF("< io_uring_enter: result=%d\n", ret);
217
239
  if (ret < 0)
218
240
  rb_syserr_fail(-ret, strerror(-ret));
219
241
 
@@ -222,7 +244,7 @@ iterate:
222
244
  }
223
245
 
224
246
  done:
225
- DEBUG_PRINTF("<- %p um_process_ready_cqes: total_processed=%u\n", &machine->ring, total_count);
247
+ DEBUG_PRINTF("< um_process_ready_cqes: total_processed=%u\n", total_count);
226
248
 
227
249
  return total_count;
228
250
  }
@@ -237,8 +259,8 @@ struct wait_for_cqe_ctx {
237
259
  void *um_wait_for_cqe_without_gvl(void *ptr) {
238
260
  struct wait_for_cqe_ctx *ctx = ptr;
239
261
  if (ctx->machine->metrics.ops_unsubmitted) {
240
- DEBUG_PRINTF("-> %p io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
241
- &ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
262
+ DEBUG_PRINTF("> io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
263
+ ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
242
264
  ctx->machine->metrics.total_ops
243
265
  );
244
266
 
@@ -249,16 +271,16 @@ void *um_wait_for_cqe_without_gvl(void *ptr) {
249
271
  // https://github.com/axboe/liburing/issues/1280
250
272
  int ret = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
251
273
  ctx->machine->metrics.ops_unsubmitted = 0;
252
- DEBUG_PRINTF("<- %p io_uring_submit_and_wait_timeout: result=%d\n", &ctx->machine->ring, ret);
274
+ DEBUG_PRINTF("< io_uring_submit_and_wait_timeout: result=%d\n", ret);
253
275
  ctx->result = (ret > 0 && !ctx->cqe) ? -EINTR : ret;
254
276
  }
255
277
  else {
256
- DEBUG_PRINTF("-> %p io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
257
- &ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
278
+ DEBUG_PRINTF("> io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
279
+ ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
258
280
  ctx->machine->metrics.total_ops
259
281
  );
260
282
  ctx->result = io_uring_wait_cqes(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
261
- DEBUG_PRINTF("<- %p io_uring_wait_cqes: result=%d\n", &ctx->machine->ring, ctx->result);
283
+ DEBUG_PRINTF("< io_uring_wait_cqes: result=%d\n", ctx->result);
262
284
  }
263
285
  return NULL;
264
286
  }
@@ -290,6 +312,7 @@ inline void *um_wait_for_sidecar_signal(void *ptr) {
290
312
  // either 1 - where we wait for at least one CQE to be ready, or 0, where we
291
313
  // don't wait, and just process any CQEs that already ready.
292
314
  static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wait_nr) {
315
+ DEBUG_PRINTF("* um_wait_for_and_process_ready_cqes wait_nr=%d\n", wait_nr);
293
316
  struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL, .wait_nr = wait_nr };
294
317
  machine->metrics.total_waits++;
295
318
 
@@ -347,14 +370,16 @@ inline void um_profile_switch(struct um *machine, VALUE next_fiber) {
347
370
  }
348
371
 
349
372
  inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
350
- DEBUG_PRINTF("-> %p process_runqueue_op: op %p\n", &machine->ring, op);
373
+ DEBUG_PRINTF("* process_runqueue_op: op=%p kind=%s ref_count=%d flags=%x\n",
374
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
375
+ );
351
376
 
352
377
  machine->metrics.total_switches++;
353
378
  VALUE fiber = op->fiber;
354
379
  VALUE value = op->value;
355
380
 
356
- if (unlikely(op->flags & OP_F_TRANSIENT))
357
- um_op_free(machine, op);
381
+ op->flags &= ~OP_F_SCHEDULED;
382
+ um_op_release(machine, op);
358
383
 
359
384
  if (machine->profile_mode) um_profile_switch(machine, fiber);
360
385
  VALUE ret = rb_fiber_transfer(fiber, 1, &value);
@@ -364,15 +389,19 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
364
389
  }
365
390
 
366
391
  inline VALUE um_switch(struct um *machine) {
367
- DEBUG_PRINTF("-> %p um_switch: unsubmitted=%d pending=%d total=%lu\n",
368
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
392
+ DEBUG_PRINTF("* um_switch: unsubmitted=%d pending=%d total=%lu\n",
393
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
369
394
  machine->metrics.total_ops
370
395
  );
371
396
 
372
397
  while (true) {
373
398
  struct um_op *op = um_runqueue_shift(machine);
374
399
  if (op) {
375
- if (unlikely(op->flags & OP_F_RUNQUEUE_SKIP)) continue;
400
+ if (unlikely(OP_SKIP_P(op))) {
401
+ op->flags &= ~OP_F_SCHEDULED;
402
+ um_op_release(machine, op);
403
+ continue;
404
+ }
376
405
 
377
406
  // in test mode we want to process I/O on each snooze
378
407
  if (unlikely(machine->test_mode && (op->kind == OP_SCHEDULE))) {
@@ -393,29 +422,62 @@ inline VALUE um_yield(struct um *machine) {
393
422
  return ret;
394
423
  }
395
424
 
396
- void um_cancel_op(struct um *machine, struct um_op *op) {
425
+ inline void um_cancel_op(struct um *machine, struct um_op *op) {
397
426
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
398
- io_uring_prep_cancel64(sqe, (long long)op, 0);
427
+ io_uring_prep_cancel(sqe, op, IORING_ASYNC_CANCEL_USERDATA);
428
+ sqe->flags = IOSQE_CQE_SKIP_SUCCESS;
399
429
  }
400
430
 
401
- inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
431
+ inline void um_cancel_op_and_discard_cqe(struct um *machine, struct um_op *op) {
432
+ DEBUG_PRINTF("* um_cancel_op_and_discard_cqe op=%p kind=%s ref_count=%d flags=%x\n",
433
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
434
+ );
435
+ um_cancel_op(machine, op);
436
+ op->flags |= OP_F_CANCELED;
437
+ }
438
+
439
+ inline void um_cancel_op_and_await_cqe(struct um *machine, struct um_op *op) {
440
+ DEBUG_PRINTF("* um_cancel_op_and_await_cqe op=%p kind=%s ref_count=%d flags=%x\n",
441
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
442
+ );
402
443
  um_cancel_op(machine, op);
403
444
 
404
445
  VALUE fiber = rb_fiber_current();
405
446
  rb_set_add(machine->pending_fibers, fiber);
406
- while (!um_op_completed_p(op)) {
447
+ int multishot_wait_count = 0;
448
+ while (!OP_CQE_DONE_P(op)) {
449
+ if (OP_MULTISHOT_P(op)) {
450
+ // I noticed that with multishot timeout ops, there seems to be once in a
451
+ // while a race condition where the cancel would not register, causing
452
+ // this function to block forever, waiting for the operation to be done.
453
+ // The following mechanism reissues a cancel every 4 iterations, which
454
+ // seems to fix the problem. Not clear if this is a bug in io_uring.
455
+ multishot_wait_count++;
456
+ if (!(multishot_wait_count % 4)) um_cancel_op(machine, op);
457
+ um_op_multishot_results_clear(machine, op);
458
+ op->flags &= ~OP_F_CQE_SEEN;
459
+ }
407
460
  um_switch(machine);
408
461
  }
409
462
  rb_set_delete(machine->pending_fibers, fiber);
410
463
  }
411
464
 
412
- inline int um_check_completion(struct um *machine, struct um_op *op) {
413
- if (!um_op_completed_p(op)) {
414
- um_cancel_and_wait(machine, op);
465
+ int um_verify_op_completion(struct um *machine, struct um_op *op, int await_cancelled) {
466
+ if (unlikely(!OP_CQE_DONE_P(op))) {
467
+ if (await_cancelled)
468
+ um_cancel_op_and_await_cqe(machine, op);
469
+ else
470
+ um_cancel_op_and_discard_cqe(machine, op);
415
471
  return 0;
416
472
  }
417
473
 
418
- um_raise_on_error_result(op->result.res);
474
+ int res = op->result.res;
475
+
476
+ // on error we release the op and immediately raise an exception
477
+ if (unlikely(res < 0 && res != -ETIME)) {
478
+ um_op_release(machine, op);
479
+ um_raise_on_error_result(res);
480
+ }
419
481
  return 1;
420
482
  }
421
483
 
@@ -426,25 +488,31 @@ VALUE um_wakeup(struct um *machine) {
426
488
  return Qnil;
427
489
  }
428
490
 
429
- inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, unsigned flags) {
491
+ inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, uint ref_count, unsigned flags) {
430
492
  memset(op, 0, sizeof(struct um_op));
431
493
  op->kind = kind;
494
+ op->ref_count = ref_count;
432
495
  op->flags = flags;
433
496
 
434
- VALUE fiber = (flags & OP_F_FREE_ON_COMPLETE) ? Qnil : rb_fiber_current();
497
+ VALUE fiber = OP_ASYNC_P(op) ? Qnil : rb_fiber_current();
498
+ if (OP_TRANSIENT_P(op)) um_op_transient_add(machine, op);
499
+
435
500
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
436
501
  RB_OBJ_WRITE(machine->self, &op->value, Qnil);
437
502
  RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
438
503
  }
439
504
 
440
505
  inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
441
- struct um_op *op = um_op_alloc(machine);
506
+ struct um_op *op = um_op_acquire(machine);
442
507
  memset(op, 0, sizeof(struct um_op));
443
508
  op->kind = OP_SCHEDULE;
444
- op->flags = OP_F_TRANSIENT;
509
+ op->ref_count = 1;
510
+ op->flags = OP_F_SCHEDULED;
511
+
445
512
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
446
513
  RB_OBJ_WRITE(machine->self, &op->value, value);
447
514
  RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
515
+
448
516
  um_runqueue_push(machine, op);
449
517
  }
450
518
 
@@ -457,18 +525,14 @@ struct op_ctx {
457
525
  struct um_queue *queue;
458
526
  void *read_buf;
459
527
  int read_maxlen;
460
- struct __kernel_timespec ts;
461
528
  int flags;
462
529
  };
463
530
 
464
531
  VALUE um_timeout_complete(VALUE arg) {
465
532
  struct op_ctx *ctx = (struct op_ctx *)arg;
466
533
 
467
- if (!um_op_completed_p(ctx->op)) {
468
- um_cancel_op(ctx->machine, ctx->op);
469
- ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
470
- um_op_transient_add(ctx->machine, ctx->op);
471
- }
534
+ if (!OP_CQE_DONE_P(ctx->op)) um_cancel_op_and_discard_cqe(ctx->machine, ctx->op);
535
+ um_op_release(ctx->machine, ctx->op);
472
536
 
473
537
  return Qnil;
474
538
  }
@@ -477,8 +541,8 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
477
541
  static ID ID_new = 0;
478
542
  if (!ID_new) ID_new = rb_intern("new");
479
543
 
480
- struct um_op *op = um_op_alloc(machine);
481
- um_prep_op(machine, op, OP_TIMEOUT, 0);
544
+ struct um_op *op = um_op_acquire(machine);
545
+ um_prep_op(machine, op, OP_TIMEOUT, 2, 0);
482
546
  op->ts = um_double_to_timespec(NUM2DBL(interval));
483
547
  RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
484
548
  RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
@@ -500,20 +564,20 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
500
564
  VALUE um_sleep(struct um *machine, double duration) {
501
565
  if (duration <= 0) duration = SLEEP_FOREVER_DURATION;
502
566
 
503
- struct um_op op;
504
- um_prep_op(machine, &op, OP_SLEEP, 0);
505
- op.ts = um_double_to_timespec(duration);
506
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
507
- io_uring_prep_timeout(sqe, &op.ts, 0, 0);
567
+ struct um_op *op = um_op_acquire(machine);
568
+ um_prep_op(machine, op, OP_SLEEP, 2, 0);
569
+ op->ts = um_double_to_timespec(duration);
570
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
571
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
508
572
 
509
573
  VALUE ret = um_yield(machine);
510
574
 
511
- if (!um_op_completed_p(&op))
512
- um_cancel_and_wait(machine, &op);
513
- else {
514
- if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
515
- ret = DBL2NUM(duration);
516
- }
575
+ DEBUG_PRINTF("sleep resume op %p ref_count %d flags: %x\n",
576
+ op, op->ref_count, op->flags
577
+ );
578
+
579
+ if (likely(um_verify_op_completion(machine, op, false))) ret = DBL2NUM(duration);
580
+ um_op_release(machine, op);
517
581
 
518
582
  RAISE_IF_EXCEPTION(ret);
519
583
  RB_GC_GUARD(ret);
@@ -521,19 +585,20 @@ VALUE um_sleep(struct um *machine, double duration) {
521
585
  }
522
586
 
523
587
  VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset, __u64 file_offset) {
524
- struct um_op op;
525
- um_prep_op(machine, &op, OP_READ, 0);
526
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
527
588
  void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
589
+
590
+ struct um_op *op = um_op_acquire(machine);
591
+ um_prep_op(machine, op, OP_READ, 2, 0);
592
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
528
593
  io_uring_prep_read(sqe, fd, ptr, maxlen, file_offset);
529
594
 
530
595
  VALUE ret = um_yield(machine);
531
596
 
532
- if (um_check_completion(machine, &op)) {
533
- um_update_read_buffer(buffer, buffer_offset, op.result.res);
534
- ret = INT2NUM(op.result.res);
535
-
597
+ if (likely(um_verify_op_completion(machine, op, true))) {
598
+ um_update_read_buffer(buffer, buffer_offset, op->result.res);
599
+ ret = INT2NUM(op->result.res);
536
600
  }
601
+ um_op_release(machine, op);
537
602
 
538
603
  RAISE_IF_EXCEPTION(ret);
539
604
  RB_GC_GUARD(ret);
@@ -541,39 +606,38 @@ VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t b
541
606
  }
542
607
 
543
608
  size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
544
- struct um_op op;
545
- um_prep_op(machine, &op, OP_READ, 0);
546
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
609
+ struct um_op *op = um_op_acquire(machine);
610
+ um_prep_op(machine, op, OP_READ, 2, 0);
611
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
547
612
  io_uring_prep_read(sqe, fd, buffer, maxlen, -1);
548
613
 
614
+ int res = 0;
549
615
  VALUE ret = um_yield(machine);
550
616
 
551
- if (um_check_completion(machine, &op)) {
552
- return op.result.res;
553
- }
617
+ if (likely(um_verify_op_completion(machine, op, true))) res = op->result.res;
618
+ um_op_release(machine, op);
554
619
 
555
620
  RAISE_IF_EXCEPTION(ret);
556
621
  RB_GC_GUARD(ret);
557
- return 0;
622
+ return res;
558
623
  }
559
624
 
560
625
  VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
561
626
  const void *base;
562
627
  size_t size;
563
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
628
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
564
629
  if ((len == (size_t)-1) || (len > size)) len = size;
565
630
  if (unlikely(!len)) return INT2NUM(0);
566
631
 
567
- struct um_op op;
568
- um_prep_op(machine, &op, OP_WRITE, 0);
569
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
570
-
632
+ struct um_op *op = um_op_acquire(machine);
633
+ um_prep_op(machine, op, OP_WRITE, 2, 0);
634
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
571
635
  io_uring_prep_write(sqe, fd, base, len, file_offset);
572
636
 
573
637
  VALUE ret = um_yield(machine);
574
638
 
575
- if (um_check_completion(machine, &op))
576
- ret = INT2NUM(op.result.res);
639
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
640
+ um_op_release(machine, op);
577
641
 
578
642
  RAISE_IF_EXCEPTION(ret);
579
643
  RB_GC_GUARD(ret);
@@ -581,19 +645,20 @@ VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_
581
645
  }
582
646
 
583
647
  size_t um_write_raw(struct um *machine, int fd, const char *buffer, size_t maxlen) {
584
- struct um_op op;
585
- um_prep_op(machine, &op, OP_WRITE, 0);
586
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
648
+ struct um_op *op = um_op_acquire(machine);
649
+ um_prep_op(machine, op, OP_WRITE, 2, 0);
650
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
587
651
  io_uring_prep_write(sqe, fd, buffer, maxlen, 0);
588
652
 
653
+ int res = 0;
589
654
  VALUE ret = um_yield(machine);
590
655
 
591
- if (um_check_completion(machine, &op))
592
- return op.result.res;
656
+ if (likely(um_verify_op_completion(machine, op, true))) res = op->result.res;
657
+ um_op_release(machine, op);
593
658
 
594
659
  RAISE_IF_EXCEPTION(ret);
595
660
  RB_GC_GUARD(ret);
596
- return 0;
661
+ return res;
597
662
  }
598
663
 
599
664
  VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
@@ -607,7 +672,6 @@ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
607
672
  struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, &total_len);
608
673
  struct iovec *iovecs_ptr = iovecs;
609
674
  int iovecs_len = argc;
610
- struct um_op op;
611
675
  VALUE ret = Qnil;
612
676
  int writev_res = 0;
613
677
 
@@ -615,31 +679,34 @@ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
615
679
  free(iovecs);
616
680
  return INT2NUM(0);
617
681
  }
682
+
683
+ struct um_op *op = um_op_acquire(machine);
618
684
  len = total_len;
619
- while (len) {
620
- um_prep_op(machine, &op, OP_WRITEV, 0);
621
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
685
+ while (true) {
686
+ op->iovecs = iovecs;
687
+ um_prep_op(machine, op, OP_WRITEV, 2, OP_F_FREE_IOVECS);
688
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
622
689
  io_uring_prep_writev(sqe, fd, iovecs_ptr, iovecs_len, file_offset);
623
690
 
624
691
  ret = um_yield(machine);
625
692
 
626
- int completed = um_op_completed_p(&op);
627
- if (unlikely(!completed)) goto cancelled;
693
+ if (unlikely(!OP_CQE_DONE_P(op))) goto cancelled;
628
694
 
629
- writev_res = op.result.res;
695
+ writev_res = op->result.res;
630
696
  if (unlikely(writev_res < 0)) goto done;
631
697
 
632
698
  len -= writev_res;
633
- if (len) {
634
- um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
635
- if (file_offset != (__u64)-1) file_offset += writev_res;
636
- }
699
+ if (!len) goto done;
700
+
701
+ um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
702
+ if (file_offset != (__u64)-1) file_offset += writev_res;
637
703
  }
638
704
 
639
705
  cancelled:
640
- um_cancel_and_wait(machine, &op);
706
+ um_cancel_op_and_await_cqe(machine, op);
641
707
  done:
642
- free(iovecs);
708
+ um_op_release(machine, op);
709
+
643
710
  RAISE_IF_EXCEPTION(ret);
644
711
  RB_GC_GUARD(ret);
645
712
  um_raise_on_error_result(writev_res);
@@ -649,31 +716,29 @@ done:
649
716
  VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
650
717
  const void *base;
651
718
  size_t size;
652
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
719
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
653
720
  if ((len == (size_t)-1) || (len > size)) len = size;
654
721
  if (unlikely(!len)) return INT2NUM(0);
655
722
 
656
- struct um_op *op = um_op_alloc(machine);
657
- um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
723
+ struct um_op *op = um_op_acquire(machine);
724
+ um_prep_op(machine, op, OP_WRITE_ASYNC, 1, OP_F_ASYNC | OP_F_TRANSIENT);
658
725
  RB_OBJ_WRITE(machine->self, &op->value, buffer);
659
-
660
726
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
661
727
  io_uring_prep_write(sqe, fd, base, len, file_offset);
662
- um_op_transient_add(machine, op);
663
728
 
664
729
  return buffer;
665
730
  }
666
731
 
667
732
  VALUE um_close(struct um *machine, int fd) {
668
- struct um_op op;
669
- um_prep_op(machine, &op, OP_CLOSE, 0);
670
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
733
+ struct um_op *op = um_op_acquire(machine);
734
+ um_prep_op(machine, op, OP_CLOSE, 2, 0);
735
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
671
736
  io_uring_prep_close(sqe, fd);
672
737
 
673
738
  VALUE ret = um_yield(machine);
674
739
 
675
- if (um_check_completion(machine, &op))
676
- ret = INT2NUM(fd);
740
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(fd);
741
+ um_op_release(machine, op);
677
742
 
678
743
  RAISE_IF_EXCEPTION(ret);
679
744
  RB_GC_GUARD(ret);
@@ -681,9 +746,8 @@ VALUE um_close(struct um *machine, int fd) {
681
746
  }
682
747
 
683
748
  VALUE um_close_async(struct um *machine, int fd) {
684
- struct um_op *op = um_op_alloc(machine);
685
- um_prep_op(machine, op, OP_CLOSE_ASYNC, OP_F_FREE_ON_COMPLETE);
686
-
749
+ struct um_op *op = um_op_acquire(machine);
750
+ um_prep_op(machine, op, OP_CLOSE_ASYNC, 1, OP_F_ASYNC);
687
751
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
688
752
  io_uring_prep_close(sqe, fd);
689
753
 
@@ -691,15 +755,15 @@ VALUE um_close_async(struct um *machine, int fd) {
691
755
  }
692
756
 
693
757
  VALUE um_accept(struct um *machine, int fd) {
694
- struct um_op op;
695
- um_prep_op(machine, &op, OP_ACCEPT, 0);
696
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
758
+ struct um_op *op = um_op_acquire(machine);
759
+ um_prep_op(machine, op, OP_ACCEPT, 2, 0);
760
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
697
761
  io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
698
762
 
699
763
  VALUE ret = um_yield(machine);
700
764
 
701
- if (um_check_completion(machine, &op))
702
- ret = INT2NUM(op.result.res);
765
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
766
+ um_op_release(machine, op);
703
767
 
704
768
  RAISE_IF_EXCEPTION(ret);
705
769
  RB_GC_GUARD(ret);
@@ -707,15 +771,15 @@ VALUE um_accept(struct um *machine, int fd) {
707
771
  }
708
772
 
709
773
  VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
710
- struct um_op op;
711
- um_prep_op(machine, &op, OP_SOCKET, 0);
712
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
774
+ struct um_op *op = um_op_acquire(machine);
775
+ um_prep_op(machine, op, OP_SOCKET, 2, 0);
776
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
713
777
  io_uring_prep_socket(sqe, domain, type, protocol, flags);
714
778
 
715
779
  VALUE ret = um_yield(machine);
716
780
 
717
- if (um_check_completion(machine, &op))
718
- ret = INT2NUM(op.result.res);
781
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
782
+ um_op_release(machine, op);
719
783
 
720
784
  RAISE_IF_EXCEPTION(ret);
721
785
  RB_GC_GUARD(ret);
@@ -723,15 +787,15 @@ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint fla
723
787
  }
724
788
 
725
789
  VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
726
- struct um_op op;
727
- um_prep_op(machine, &op, OP_CONNECT, 0);
728
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
790
+ struct um_op *op = um_op_acquire(machine);
791
+ um_prep_op(machine, op, OP_CONNECT, 2, 0);
792
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
729
793
  io_uring_prep_connect(sqe, fd, addr, addrlen);
730
794
 
731
795
  VALUE ret = um_yield(machine);
732
796
 
733
- if (um_check_completion(machine, &op))
734
- ret = INT2NUM(op.result.res);
797
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
798
+ um_op_release(machine, op);
735
799
 
736
800
  RAISE_IF_EXCEPTION(ret);
737
801
  RB_GC_GUARD(ret);
@@ -739,27 +803,24 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
739
803
  }
740
804
 
741
805
  VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
742
- struct um_op op;
743
- um_prep_op(machine, &op, OP_SEND, 0);
744
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
745
-
746
806
  const void *base;
747
807
  size_t size;
748
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
808
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
749
809
  if ((len == (size_t)-1) || (len > size)) len = size;
750
810
 
811
+ struct um_op *op = um_op_acquire(machine);
812
+ um_prep_op(machine, op, OP_SEND, 2, 0);
813
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
751
814
  io_uring_prep_send(sqe, fd, base, len, flags);
752
815
 
753
816
  VALUE ret = um_yield(machine);
754
817
 
755
- if (um_check_completion(machine, &op))
756
- ret = INT2NUM(op.result.res);
818
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
819
+ um_op_release(machine, op);
757
820
 
758
821
  RAISE_IF_EXCEPTION(ret);
759
822
  RB_GC_GUARD(ret);
760
823
  return ret;
761
- // int ret = write(fd, base, len);
762
- // return UINT2NUM(ret);
763
824
  }
764
825
 
765
826
  // for some reason we don't get this define from liburing/io_uring.h
@@ -767,22 +828,17 @@ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
767
828
 
768
829
  VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
769
830
  struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, NULL);
770
- struct um_op op;
771
- um_prep_op(machine, &op, OP_SEND, 0);
772
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
773
-
831
+ struct um_op *op = um_op_acquire(machine);
832
+ op->iovecs = iovecs;
833
+ um_prep_op(machine, op, OP_SEND, 2, OP_F_FREE_IOVECS);
834
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
774
835
  io_uring_prep_send(sqe, fd, iovecs, argc, MSG_NOSIGNAL | MSG_WAITALL);
775
836
  sqe->ioprio |= IORING_SEND_VECTORIZED;
776
837
 
777
838
  VALUE ret = um_yield(machine);
778
839
 
779
- int completed = um_op_completed_p(&op);
780
- if (unlikely(!completed)) um_cancel_and_wait(machine, &op);
781
- free(iovecs);
782
- if (likely(completed)) {
783
- um_raise_on_error_result(op.result.res);
784
- ret = INT2NUM(op.result.res);
785
- }
840
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
841
+ um_op_release(machine, op);
786
842
 
787
843
  RAISE_IF_EXCEPTION(ret);
788
844
  RB_GC_GUARD(ret);
@@ -791,19 +847,17 @@ VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
791
847
 
792
848
  VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
793
849
  um_add_strings_to_buffer_ring(machine, bgid, strings);
794
-
795
- struct um_op op;
796
- um_prep_op(machine, &op, OP_SEND_BUNDLE, 0);
797
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
798
-
850
+ struct um_op *op = um_op_acquire(machine);
851
+ um_prep_op(machine, op, OP_SEND_BUNDLE, 2, 0);
852
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
799
853
  io_uring_prep_send_bundle(sqe, fd, 0, MSG_NOSIGNAL | MSG_WAITALL);
800
854
  sqe->flags |= IOSQE_BUFFER_SELECT;
801
855
  sqe->buf_group = bgid;
802
856
 
803
857
  VALUE ret = um_yield(machine);
804
858
 
805
- if (um_check_completion(machine, &op))
806
- ret = INT2NUM(op.result.res);
859
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
860
+ um_op_release(machine, op);
807
861
 
808
862
  RAISE_IF_EXCEPTION(ret);
809
863
  RB_GC_GUARD(ret);
@@ -811,19 +865,19 @@ VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
811
865
  }
812
866
 
813
867
  VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags) {
814
- struct um_op op;
815
- um_prep_op(machine, &op, OP_RECV, 0);
816
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
817
868
  void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
818
-
869
+ struct um_op *op = um_op_acquire(machine);
870
+ um_prep_op(machine, op, OP_RECV, 2, 0);
871
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
819
872
  io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
820
873
 
821
874
  VALUE ret = um_yield(machine);
822
875
 
823
- if (um_check_completion(machine, &op)) {
824
- um_update_read_buffer(buffer, 0, op.result.res);
825
- ret = INT2NUM(op.result.res);
876
+ if (likely(um_verify_op_completion(machine, op, true))) {
877
+ um_update_read_buffer(buffer, 0, op->result.res);
878
+ ret = INT2NUM(op->result.res);
826
879
  }
880
+ um_op_release(machine, op);
827
881
 
828
882
  RAISE_IF_EXCEPTION(ret);
829
883
  RB_GC_GUARD(ret);
@@ -831,15 +885,15 @@ VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags
831
885
  }
832
886
 
833
887
  VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
834
- struct um_op op;
835
- um_prep_op(machine, &op, OP_BIND, 0);
836
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
888
+ struct um_op *op = um_op_acquire(machine);
889
+ um_prep_op(machine, op, OP_BIND, 2, 0);
890
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
837
891
  io_uring_prep_bind(sqe, fd, addr, addrlen);
838
892
 
839
893
  VALUE ret = um_yield(machine);
840
894
 
841
- if (um_check_completion(machine, &op))
842
- ret = INT2NUM(op.result.res);
895
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
896
+ um_op_release(machine, op);
843
897
 
844
898
  RAISE_IF_EXCEPTION(ret);
845
899
  RB_GC_GUARD(ret);
@@ -847,15 +901,15 @@ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrl
847
901
  }
848
902
 
849
903
  VALUE um_listen(struct um *machine, int fd, int backlog) {
850
- struct um_op op;
851
- um_prep_op(machine, &op, OP_BIND, 0);
852
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
904
+ struct um_op *op = um_op_acquire(machine);
905
+ um_prep_op(machine, op, OP_BIND, 2, 0);
906
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
853
907
  io_uring_prep_listen(sqe, fd, backlog);
854
908
 
855
909
  VALUE ret = um_yield(machine);
856
910
 
857
- if (um_check_completion(machine, &op))
858
- ret = INT2NUM(op.result.res);
911
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
912
+ um_op_release(machine, op);
859
913
 
860
914
  RAISE_IF_EXCEPTION(ret);
861
915
  RB_GC_GUARD(ret);
@@ -864,17 +918,15 @@ VALUE um_listen(struct um *machine, int fd, int backlog) {
864
918
 
865
919
  VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
866
920
  VALUE ret = Qnil;
867
- int value;
868
-
869
- struct um_op op;
870
- um_prep_op(machine, &op, OP_GETSOCKOPT, 0);
871
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
872
- io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
921
+ struct um_op *op = um_op_acquire(machine);
922
+ um_prep_op(machine, op, OP_GETSOCKOPT, 2, 0);
923
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
924
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &op->int_value, sizeof(op->int_value));
873
925
 
874
926
  ret = um_yield(machine);
875
927
 
876
- if (um_check_completion(machine, &op))
877
- ret = INT2NUM(value);
928
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->int_value);
929
+ um_op_release(machine, op);
878
930
 
879
931
  RAISE_IF_EXCEPTION(ret);
880
932
  RB_GC_GUARD(ret);
@@ -883,16 +935,16 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
883
935
 
884
936
  VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
885
937
  VALUE ret = Qnil;
886
-
887
- struct um_op op;
888
- um_prep_op(machine, &op, OP_SETSOCKOPT, 0);
889
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
890
- io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
938
+ struct um_op *op = um_op_acquire(machine);
939
+ um_prep_op(machine, op, OP_SETSOCKOPT, 2, 0);
940
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
941
+ op->int_value = value;
942
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &op->int_value, sizeof(op->int_value));
891
943
 
892
944
  ret = um_yield(machine);
893
945
 
894
- if (um_check_completion(machine, &op))
895
- ret = INT2NUM(op.result.res);
946
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
947
+ um_op_release(machine, op);
896
948
 
897
949
  RAISE_IF_EXCEPTION(ret);
898
950
  RB_GC_GUARD(ret);
@@ -901,16 +953,15 @@ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
901
953
 
902
954
  VALUE um_shutdown(struct um *machine, int fd, int how) {
903
955
  VALUE ret = Qnil;
904
-
905
- struct um_op op;
906
- um_prep_op(machine, &op, OP_SHUTDOWN, 0);
907
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
956
+ struct um_op *op = um_op_acquire(machine);
957
+ um_prep_op(machine, op, OP_SHUTDOWN, 2, 0);
958
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
908
959
  io_uring_prep_shutdown(sqe, fd, how);
909
960
 
910
961
  ret = um_yield(machine);
911
962
 
912
- if (um_check_completion(machine, &op))
913
- ret = INT2NUM(op.result.res);
963
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
964
+ um_op_release(machine, op);
914
965
 
915
966
  RAISE_IF_EXCEPTION(ret);
916
967
  RB_GC_GUARD(ret);
@@ -918,9 +969,8 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
918
969
  }
919
970
 
920
971
  VALUE um_shutdown_async(struct um *machine, int fd, int how) {
921
- struct um_op *op = um_op_alloc(machine);
922
- um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, OP_F_FREE_ON_COMPLETE);
923
-
972
+ struct um_op *op = um_op_acquire(machine);
973
+ um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, 1, OP_F_ASYNC);
924
974
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
925
975
  io_uring_prep_shutdown(sqe, fd, how);
926
976
 
@@ -928,15 +978,15 @@ VALUE um_shutdown_async(struct um *machine, int fd, int how) {
928
978
  }
929
979
 
930
980
  VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
931
- struct um_op op;
932
- um_prep_op(machine, &op, OP_OPEN, 0);
933
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
981
+ struct um_op *op = um_op_acquire(machine);
982
+ um_prep_op(machine, op, OP_OPEN, 2, 0);
983
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
934
984
  io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
935
985
 
936
986
  VALUE ret = um_yield(machine);
937
987
 
938
- if (um_check_completion(machine, &op))
939
- ret = INT2NUM(op.result.res);
988
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
989
+ um_op_release(machine, op);
940
990
 
941
991
  RAISE_IF_EXCEPTION(ret);
942
992
  RB_GC_GUARD(ret);
@@ -944,27 +994,25 @@ VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
944
994
  }
945
995
 
946
996
  VALUE um_poll(struct um *machine, int fd, unsigned mask) {
947
- struct um_op op;
948
- um_prep_op(machine, &op, OP_POLL, 0);
949
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
997
+ struct um_op *op = um_op_acquire(machine);
998
+ um_prep_op(machine, op, OP_POLL, 2, 0);
999
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
950
1000
  io_uring_prep_poll_add(sqe, fd, mask);
951
1001
 
952
1002
  VALUE ret = um_yield(machine);
953
1003
 
954
- if (um_check_completion(machine, &op))
955
- ret = INT2NUM(op.result.res);
1004
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1005
+ um_op_release(machine, op);
956
1006
 
957
1007
  RAISE_IF_EXCEPTION(ret);
958
1008
  RB_GC_GUARD(ret);
959
- RB_GC_GUARD(op.fiber);
960
- RB_GC_GUARD(op.value);
961
1009
  return ret;
962
1010
  }
963
1011
 
964
- static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op *ops, VALUE fds, uint len, uint flags, uint event) {
1012
+ static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op **ops, VALUE fds, uint len, uint flags, uint event) {
965
1013
  for (uint i = 0; i < len; i++) {
966
- struct um_op *op = ops + ((*idx)++);
967
- um_prep_op(machine, op, OP_POLL, flags | OP_F_IGNORE_CANCELED);
1014
+ struct um_op *op = ops[(*idx)++] = um_op_acquire(machine);
1015
+ um_prep_op(machine, op, OP_POLL, 2, flags);
968
1016
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
969
1017
  VALUE fd = rb_ary_entry(fds, i);
970
1018
  RB_OBJ_WRITE(machine->self, &op->value, fd);
@@ -973,8 +1021,9 @@ static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct
973
1021
  }
974
1022
 
975
1023
  VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, uint rfds_len, uint wfds_len, uint efds_len) {
976
- struct um_op op;
1024
+ struct um_op *op;
977
1025
  uint idx = 0;
1026
+
978
1027
  if (rfds_len)
979
1028
  prepare_select_poll_ops(machine, &idx, &op, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
980
1029
  else if (wfds_len)
@@ -985,12 +1034,14 @@ VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, u
985
1034
 
986
1035
  VALUE ret = um_yield(machine);
987
1036
 
988
- um_check_completion(machine, &op);
1037
+ um_verify_op_completion(machine, op, false);
1038
+ uint flags = op->flags;
1039
+ um_op_release(machine, op);
989
1040
  RAISE_IF_EXCEPTION(ret);
990
1041
 
991
- if (op.flags & OP_F_SELECT_POLLIN)
1042
+ if (flags & OP_F_SELECT_POLLIN)
992
1043
  return rb_ary_new3(3, rb_ary_new3(1, ret), rb_ary_new(), rb_ary_new());
993
- else if (op.flags & OP_F_SELECT_POLLOUT)
1044
+ else if (flags & OP_F_SELECT_POLLOUT)
994
1045
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new3(1, ret), rb_ary_new());
995
1046
  else
996
1047
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new3(1, ret));
@@ -1009,7 +1060,7 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1009
1060
  if (unlikely(!total_len))
1010
1061
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new());
1011
1062
 
1012
- struct um_op *ops = malloc(sizeof(struct um_op) * total_len);
1063
+ struct um_op **ops = malloc(sizeof(struct um_op *) * total_len);
1013
1064
  uint idx = 0;
1014
1065
  prepare_select_poll_ops(machine, &idx, ops, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
1015
1066
  prepare_select_poll_ops(machine, &idx, ops, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
@@ -1017,50 +1068,35 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1017
1068
  assert(idx == total_len);
1018
1069
 
1019
1070
  VALUE ret = um_yield(machine);
1020
- if (unlikely(um_value_is_exception_p(ret))) {
1021
- free(ops);
1022
- um_raise_exception(ret);
1023
- }
1024
1071
 
1025
1072
  VALUE rfds_out = rb_ary_new();
1026
1073
  VALUE wfds_out = rb_ary_new();
1027
1074
  VALUE efds_out = rb_ary_new();
1028
1075
 
1029
1076
  int error_code = 0;
1030
- uint pending = total_len;
1031
1077
  for (uint i = 0; i < total_len; i++) {
1032
- if (um_op_completed_p(&ops[i])) {
1033
- ops[i].flags |= OP_F_RUNQUEUE_SKIP;
1034
- pending--;
1078
+ if (OP_CQE_DONE_P(ops[i])) {
1079
+ if (OP_SCHEDULED_P(ops[i])) ops[i]->flags |= OP_F_SKIP;
1035
1080
 
1036
- if (unlikely((ops[i].result.res < 0) && !error_code)) {
1037
- error_code = ops[i].result.res;
1081
+ if (unlikely((ops[i]->result.res < 0) && !error_code)) {
1082
+ error_code = ops[i]->result.res;
1038
1083
  }
1039
1084
  else {
1040
- if (ops[i].flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i].value);
1041
- if (ops[i].flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i].value);
1042
- if (ops[i].flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i].value);
1085
+ if (ops[i]->flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i]->value);
1086
+ if (ops[i]->flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i]->value);
1087
+ if (ops[i]->flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i]->value);
1043
1088
  }
1044
1089
  }
1045
1090
  else {
1046
- ops[i].flags |= OP_F_CANCELED;
1047
- um_cancel_op(machine, &ops[i]);
1091
+ um_cancel_op_and_discard_cqe(machine, ops[i]);
1048
1092
  }
1049
1093
  }
1050
1094
 
1051
- while (pending) {
1052
- um_wait_for_and_process_ready_cqes(machine, 1);
1053
-
1054
- for (uint i = 0; i < total_len; i++) {
1055
- struct um_op *op = ops + i;
1056
- if (op->flags & OP_F_CANCELED && um_op_completed_p(op)) {
1057
- pending--;
1058
- }
1059
- }
1060
- }
1095
+ for (uint i = 0; i < total_len; i++) um_op_release(machine, ops[i]);
1061
1096
  free(ops);
1062
1097
 
1063
- if (error_code)
1098
+ RAISE_IF_EXCEPTION(ret);
1099
+ if (unlikely(error_code))
1064
1100
  um_raise_on_error_result(error_code);
1065
1101
 
1066
1102
  return rb_ary_new3(3, rfds_out, wfds_out, efds_out);
@@ -1070,44 +1106,49 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1070
1106
  RB_GC_GUARD(efds_out);
1071
1107
  }
1072
1108
 
1073
- VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
1074
- struct um_op op;
1075
- um_prep_op(machine, &op, OP_WAITID, 0);
1076
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1109
+ static inline VALUE siginfo_to_array(siginfo_t *info) {
1110
+ return rb_ary_new_from_args(
1111
+ 3,
1112
+ INT2NUM(info->si_pid),
1113
+ INT2NUM(info->si_status),
1114
+ INT2NUM(info->si_code)
1115
+ );
1116
+ }
1077
1117
 
1078
- siginfo_t infop;
1079
- io_uring_prep_waitid(sqe, idtype, id, &infop, options, 0);
1118
+ VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
1119
+ struct um_op *op = um_op_acquire(machine);
1120
+ um_prep_op(machine, op, OP_WAITID, 2, 0);
1121
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1122
+ io_uring_prep_waitid(sqe, idtype, id, &op->siginfo, options, 0);
1080
1123
 
1081
1124
  VALUE ret = um_yield(machine);
1082
1125
 
1083
- if (um_check_completion(machine, &op))
1084
- ret = INT2NUM(op.result.res);
1126
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1127
+ um_op_release(machine, op);
1085
1128
 
1086
1129
  RAISE_IF_EXCEPTION(ret);
1087
1130
  RB_GC_GUARD(ret);
1088
1131
 
1089
- return rb_ary_new_from_args(
1090
- 3, INT2NUM(infop.si_pid), INT2NUM(infop.si_status), INT2NUM(infop.si_code)
1091
- );
1132
+ return siginfo_to_array(&op->siginfo);
1092
1133
  }
1093
1134
 
1094
1135
  #ifdef HAVE_RB_PROCESS_STATUS_NEW
1095
1136
  VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
1096
- struct um_op op;
1097
- um_prep_op(machine, &op, OP_WAITID, 0);
1098
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1099
-
1100
- siginfo_t infop;
1101
- io_uring_prep_waitid(sqe, idtype, id, &infop, options | WNOWAIT, 0);
1137
+ struct um_op *op = um_op_acquire(machine);
1138
+ um_prep_op(machine, op, OP_WAITID, 2, 0);
1139
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1140
+ io_uring_prep_waitid(sqe, idtype, id, &op->siginfo, options | WNOWAIT, 0);
1102
1141
 
1103
1142
  VALUE ret = um_yield(machine);
1104
- if (um_check_completion(machine, &op))
1105
- ret = INT2NUM(op.result.res);
1143
+
1144
+ if (likely(um_verify_op_completion(machine, op))) ret = INT2NUM(op->result.res);
1145
+ siginfo_t siginfo = op->siginfo;
1146
+ um_op_release(machine, op);
1106
1147
 
1107
1148
  RAISE_IF_EXCEPTION(ret);
1108
1149
  RB_GC_GUARD(ret);
1109
1150
 
1110
- return rb_process_status_new(infop.si_pid, (infop.si_status & 0xff) << 8, 0);
1151
+ return rb_process_status_new(siginfo.si_pid, (siginfo.si_status & 0xff) << 8, 0);
1111
1152
  }
1112
1153
  #endif
1113
1154
 
@@ -1137,10 +1178,9 @@ VALUE statx_to_hash(struct statx *stat) {
1137
1178
  VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned int mask) {
1138
1179
  static char empty_path[] = "";
1139
1180
 
1140
- struct um_op op;
1141
- um_prep_op(machine, &op, OP_STATX, 0);
1142
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1143
-
1181
+ struct um_op *op = um_op_acquire(machine);
1182
+ um_prep_op(machine, op, OP_STATX, 2, 0);
1183
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1144
1184
  char *path_ptr = NIL_P(path) ? empty_path : StringValueCStr(path);
1145
1185
  struct statx stat;
1146
1186
  memset(&stat, 0, sizeof(stat));
@@ -1148,8 +1188,8 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
1148
1188
 
1149
1189
  VALUE ret = um_yield(machine);
1150
1190
 
1151
- if (um_check_completion(machine, &op))
1152
- ret = INT2NUM(op.result.res);
1191
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
1192
+ um_op_release(machine, op);
1153
1193
 
1154
1194
  RAISE_IF_EXCEPTION(ret);
1155
1195
  RB_GC_GUARD(ret);
@@ -1168,28 +1208,24 @@ VALUE accept_each_start(VALUE arg) {
1168
1208
 
1169
1209
  while (true) {
1170
1210
  VALUE ret = um_yield(ctx->machine);
1171
- if (!um_op_completed_p(ctx->op)) {
1172
- RAISE_IF_EXCEPTION(ret);
1173
- return ret;
1174
- }
1211
+
1212
+ RAISE_IF_EXCEPTION(ret);
1213
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1175
1214
  RB_GC_GUARD(ret);
1176
1215
 
1177
- int more = false;
1178
1216
  struct um_op_result *result = &ctx->op->result;
1179
1217
  while (result) {
1180
- more = (result->flags & IORING_CQE_F_MORE);
1181
- if (result->res < 0) {
1182
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1218
+ if (unlikely(result->res < 0)) {
1183
1219
  rb_syserr_fail(-result->res, strerror(-result->res));
1184
1220
  }
1185
1221
  rb_yield(INT2NUM(result->res));
1186
1222
  result = result->next;
1187
1223
  }
1224
+
1225
+ if (OP_CQE_DONE_P(ctx->op)) break;
1226
+
1188
1227
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1189
- if (more)
1190
- ctx->op->flags &= ~OP_F_COMPLETED;
1191
- else
1192
- break;
1228
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1193
1229
  }
1194
1230
 
1195
1231
  return Qnil;
@@ -1202,28 +1238,24 @@ VALUE accept_into_queue_start(VALUE arg) {
1202
1238
 
1203
1239
  while (true) {
1204
1240
  VALUE ret = um_yield(ctx->machine);
1205
- if (!um_op_completed_p(ctx->op)) {
1206
- RAISE_IF_EXCEPTION(ret);
1207
- return ret;
1208
- }
1241
+
1242
+ RAISE_IF_EXCEPTION(ret);
1243
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1209
1244
  RB_GC_GUARD(ret);
1210
1245
 
1211
- int more = false;
1212
1246
  struct um_op_result *result = &ctx->op->result;
1213
1247
  while (result) {
1214
- more = (result->flags & IORING_CQE_F_MORE);
1215
- if (result->res < 0) {
1216
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1248
+ if (unlikely(result->res < 0)) {
1217
1249
  rb_syserr_fail(-result->res, strerror(-result->res));
1218
1250
  }
1219
1251
  um_queue_push(ctx->machine, ctx->queue, INT2NUM(result->res));
1220
1252
  result = result->next;
1221
1253
  }
1254
+
1255
+ if (OP_CQE_DONE_P(ctx->op)) break;
1256
+
1222
1257
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1223
- if (more)
1224
- ctx->op->flags &= ~OP_F_COMPLETED;
1225
- else
1226
- break;
1258
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1227
1259
  }
1228
1260
 
1229
1261
  return Qnil;
@@ -1231,14 +1263,8 @@ VALUE accept_into_queue_start(VALUE arg) {
1231
1263
 
1232
1264
  VALUE multishot_complete(VALUE arg) {
1233
1265
  struct op_ctx *ctx = (struct op_ctx *)arg;
1234
- if (ctx->op->multishot_result_count) {
1235
- int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
1236
- if (more)
1237
- ctx->op->flags &= ~OP_F_COMPLETED;
1238
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1239
- }
1240
- if (!um_op_completed_p(ctx->op))
1241
- um_cancel_and_wait(ctx->machine, ctx->op);
1266
+ um_verify_op_completion(ctx->machine, ctx->op, true);
1267
+ um_op_release(ctx->machine, ctx->op);
1242
1268
 
1243
1269
  if (ctx->read_buf)
1244
1270
  free(ctx->read_buf);
@@ -1247,19 +1273,20 @@ VALUE multishot_complete(VALUE arg) {
1247
1273
  }
1248
1274
 
1249
1275
  VALUE um_accept_each(struct um *machine, int fd) {
1250
- struct um_op op;
1251
- um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT, OP_F_MULTISHOT);
1276
+ struct um_op *op = um_op_acquire(machine);
1277
+ um_prep_op(machine, op, OP_ACCEPT_MULTISHOT, 2, OP_F_MULTISHOT);
1252
1278
 
1253
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
1279
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .read_buf = NULL };
1254
1280
  return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1255
1281
  }
1256
1282
 
1257
1283
  VALUE um_accept_into_queue(struct um *machine, int fd, VALUE queue) {
1258
- struct um_op op;
1259
- um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT, OP_F_MULTISHOT);
1284
+ struct um_queue *queue_data = Queue_data(queue);
1285
+ struct um_op *op = um_op_acquire(machine);
1286
+ um_prep_op(machine, op, OP_ACCEPT_MULTISHOT, 2, OP_F_MULTISHOT);
1260
1287
 
1261
1288
  struct op_ctx ctx = {
1262
- .machine = machine, .op = &op, .fd = fd, .queue = Queue_data(queue), .read_buf = NULL
1289
+ .machine = machine, .op = op, .fd = fd, .queue = queue_data, .read_buf = NULL
1263
1290
  };
1264
1291
  return rb_ensure(accept_into_queue_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1265
1292
  }
@@ -1271,15 +1298,13 @@ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1271
1298
  int total = 0;
1272
1299
 
1273
1300
  while (1) {
1274
- um_prep_op(ctx->machine, ctx->op, OP_READ, 0);
1301
+ um_prep_op(ctx->machine, ctx->op, OP_READ, 2, 0);
1275
1302
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1276
1303
  io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
1277
1304
 
1278
1305
  VALUE ret = um_yield(ctx->machine);
1279
- if (um_op_completed_p(ctx->op)) {
1280
- um_raise_on_error_result(ctx->op->result.res);
1281
- if (!ctx->op->result.res) return total;
1282
1306
 
1307
+ if (likely(um_verify_op_completion(ctx->machine, ctx->op, true))) {
1283
1308
  VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
1284
1309
  total += ctx->op->result.res;
1285
1310
  rb_yield(buf);
@@ -1287,10 +1312,11 @@ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1287
1312
  }
1288
1313
  else {
1289
1314
  RAISE_IF_EXCEPTION(ret);
1290
- return ret;
1315
+ return 0;
1291
1316
  }
1292
1317
  RB_GC_GUARD(ret);
1293
1318
  }
1319
+ return 0;
1294
1320
  }
1295
1321
 
1296
1322
  // // returns true if more results are expected
@@ -1337,91 +1363,83 @@ VALUE read_recv_each_start(VALUE arg) {
1337
1363
 
1338
1364
  while (true) {
1339
1365
  VALUE ret = um_yield(ctx->machine);
1340
- if (!um_op_completed_p(ctx->op)) {
1341
- RAISE_IF_EXCEPTION(ret);
1342
- return ret;
1343
- }
1366
+
1367
+ RAISE_IF_EXCEPTION(ret);
1368
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1344
1369
  RB_GC_GUARD(ret);
1345
1370
 
1346
- int more = false;
1347
1371
  struct um_op_result *result = &ctx->op->result;
1348
1372
  while (result) {
1349
1373
  um_raise_on_error_result(result->res);
1350
1374
 
1351
- more = (result->flags & IORING_CQE_F_MORE);
1352
1375
  if (!read_recv_each_multishot_process_result(ctx, result, &total))
1353
1376
  return Qnil;
1354
1377
 
1355
1378
  // rb_yield(INT2NUM(result->res));
1356
1379
  result = result->next;
1357
1380
  }
1381
+
1382
+ if (OP_CQE_DONE_P(ctx->op)) break;
1383
+
1358
1384
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1359
- if (more)
1360
- ctx->op->flags &= ~OP_F_COMPLETED;
1361
- else
1362
- break;
1385
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1363
1386
  }
1364
1387
 
1365
1388
  return Qnil;
1366
1389
  }
1367
1390
 
1368
1391
  VALUE um_read_each(struct um *machine, int fd, int bgid) {
1369
- struct um_op op;
1370
- um_prep_op(machine, &op, OP_READ_MULTISHOT, OP_F_MULTISHOT);
1392
+ struct um_op *op = um_op_acquire(machine);
1393
+ um_prep_op(machine, op, OP_READ_MULTISHOT, 2, OP_F_MULTISHOT);
1371
1394
 
1372
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
1395
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL };
1373
1396
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1374
1397
  }
1375
1398
 
1376
1399
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
1377
- struct um_op op;
1378
- um_prep_op(machine, &op, OP_RECV_MULTISHOT, OP_F_MULTISHOT);
1400
+ struct um_op *op = um_op_acquire(machine);
1401
+ um_prep_op(machine, op, OP_RECV_MULTISHOT, 2, OP_F_MULTISHOT);
1379
1402
 
1380
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
1403
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
1381
1404
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1382
1405
  }
1383
1406
 
1384
1407
  VALUE periodically_start(VALUE arg) {
1385
1408
  struct op_ctx *ctx = (struct op_ctx *)arg;
1386
1409
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1387
- io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
1410
+ io_uring_prep_timeout(sqe, &ctx->op->ts, 0, IORING_TIMEOUT_MULTISHOT);
1388
1411
 
1389
1412
  while (true) {
1390
1413
  VALUE ret = um_switch(ctx->machine);
1391
- if (!um_op_completed_p(ctx->op)) {
1392
- RAISE_IF_EXCEPTION(ret);
1393
- return ret;
1394
- }
1414
+
1415
+ RAISE_IF_EXCEPTION(ret);
1416
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1395
1417
  RB_GC_GUARD(ret);
1396
1418
 
1397
- int more = false;
1398
1419
  struct um_op_result *result = &ctx->op->result;
1399
1420
  while (result) {
1400
- more = (result->flags & IORING_CQE_F_MORE);
1401
- if (result->res < 0 && result->res != -ETIME) {
1402
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1403
- return Qnil;
1404
- }
1421
+ if (unlikely(result->res < 0 && result->res != -ETIME)) um_raise_on_error_result(result->res);
1422
+
1405
1423
  rb_yield(Qnil);
1406
1424
  result = result->next;
1407
1425
  }
1426
+ if (OP_CQE_DONE_P(ctx->op)) break;
1427
+
1408
1428
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1409
- if (more)
1410
- ctx->op->flags &= ~OP_F_COMPLETED;
1411
- else
1412
- break;
1429
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1413
1430
  }
1414
1431
 
1415
1432
  return Qnil;
1416
1433
  }
1417
1434
 
1418
1435
  VALUE um_periodically(struct um *machine, double interval) {
1419
- struct um_op op;
1420
- um_prep_op(machine, &op, OP_SLEEP_MULTISHOT, OP_F_MULTISHOT);
1421
- op.ts = um_double_to_timespec(interval);
1436
+ struct um_op *op = um_op_acquire(machine);
1437
+ um_prep_op(machine, op, OP_TIMEOUT_MULTISHOT, 2, OP_F_MULTISHOT);
1438
+ op->ts = um_double_to_timespec(interval);
1422
1439
 
1423
- struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
1440
+ struct op_ctx ctx = { .machine = machine, .op = op, .read_buf = NULL };
1424
1441
  return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1442
+ return Qnil;
1425
1443
  }
1426
1444
 
1427
1445
  extern VALUE SYM_size;