polyphony 1.4 → 1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +3 -0
  3. data/CHANGELOG.md +22 -0
  4. data/TODO.md +5 -14
  5. data/examples/pipes/http_server.rb +42 -12
  6. data/examples/pipes/http_server2.rb +45 -0
  7. data/ext/polyphony/backend_common.h +5 -0
  8. data/ext/polyphony/backend_io_uring.c +174 -121
  9. data/ext/polyphony/backend_io_uring_context.c +24 -18
  10. data/ext/polyphony/backend_io_uring_context.h +4 -2
  11. data/ext/polyphony/backend_libev.c +46 -22
  12. data/ext/polyphony/event.c +21 -0
  13. data/ext/polyphony/extconf.rb +25 -19
  14. data/ext/polyphony/fiber.c +0 -2
  15. data/ext/polyphony/pipe.c +1 -1
  16. data/ext/polyphony/polyphony.c +2 -20
  17. data/ext/polyphony/polyphony.h +5 -5
  18. data/ext/polyphony/ring_buffer.c +1 -0
  19. data/ext/polyphony/runqueue_ring_buffer.c +1 -0
  20. data/ext/polyphony/thread.c +63 -0
  21. data/ext/polyphony/win_uio.h +18 -0
  22. data/lib/polyphony/adapters/open3.rb +190 -0
  23. data/lib/polyphony/core/sync.rb +83 -13
  24. data/lib/polyphony/core/timer.rb +7 -25
  25. data/lib/polyphony/extensions/exception.rb +15 -0
  26. data/lib/polyphony/extensions/fiber.rb +14 -13
  27. data/lib/polyphony/extensions/io.rb +56 -14
  28. data/lib/polyphony/extensions/kernel.rb +1 -1
  29. data/lib/polyphony/extensions/object.rb +1 -13
  30. data/lib/polyphony/extensions/process.rb +76 -1
  31. data/lib/polyphony/extensions/socket.rb +0 -14
  32. data/lib/polyphony/extensions/thread.rb +19 -27
  33. data/lib/polyphony/extensions/timeout.rb +5 -1
  34. data/lib/polyphony/version.rb +1 -1
  35. data/lib/polyphony.rb +11 -5
  36. data/test/helper.rb +46 -4
  37. data/test/open3/envutil.rb +380 -0
  38. data/test/open3/find_executable.rb +24 -0
  39. data/test/stress.rb +11 -7
  40. data/test/test_backend.rb +11 -4
  41. data/test/test_event.rb +10 -3
  42. data/test/test_ext.rb +16 -1
  43. data/test/test_fiber.rb +16 -4
  44. data/test/test_global_api.rb +17 -16
  45. data/test/test_io.rb +39 -0
  46. data/test/test_kernel.rb +2 -2
  47. data/test/test_monitor.rb +356 -0
  48. data/test/test_open3.rb +338 -0
  49. data/test/test_signal.rb +5 -1
  50. data/test/test_socket.rb +6 -98
  51. data/test/test_sync.rb +46 -0
  52. data/test/test_thread.rb +10 -1
  53. data/test/test_thread_pool.rb +5 -0
  54. data/test/test_throttler.rb +1 -1
  55. data/test/test_timer.rb +8 -2
  56. data/test/test_trace.rb +2 -0
  57. data/vendor/liburing/.github/workflows/build.yml +8 -0
  58. data/vendor/liburing/.gitignore +1 -0
  59. data/vendor/liburing/CHANGELOG +8 -0
  60. data/vendor/liburing/configure +17 -25
  61. data/vendor/liburing/debian/liburing-dev.manpages +2 -0
  62. data/vendor/liburing/debian/rules +2 -1
  63. data/vendor/liburing/examples/Makefile +2 -1
  64. data/vendor/liburing/examples/io_uring-udp.c +11 -3
  65. data/vendor/liburing/examples/rsrc-update-bench.c +100 -0
  66. data/vendor/liburing/liburing.spec +1 -1
  67. data/vendor/liburing/make-debs.sh +4 -2
  68. data/vendor/liburing/src/Makefile +5 -5
  69. data/vendor/liburing/src/arch/aarch64/lib.h +1 -1
  70. data/vendor/liburing/src/include/liburing/io_uring.h +41 -16
  71. data/vendor/liburing/src/include/liburing.h +86 -11
  72. data/vendor/liburing/src/int_flags.h +1 -0
  73. data/vendor/liburing/src/liburing-ffi.map +12 -0
  74. data/vendor/liburing/src/liburing.map +8 -0
  75. data/vendor/liburing/src/register.c +7 -2
  76. data/vendor/liburing/src/setup.c +373 -81
  77. data/vendor/liburing/test/232c93d07b74.c +3 -3
  78. data/vendor/liburing/test/Makefile +10 -3
  79. data/vendor/liburing/test/accept.c +2 -1
  80. data/vendor/liburing/test/buf-ring.c +35 -75
  81. data/vendor/liburing/test/connect-rep.c +204 -0
  82. data/vendor/liburing/test/coredump.c +59 -0
  83. data/vendor/liburing/test/fallocate.c +9 -0
  84. data/vendor/liburing/test/fd-pass.c +34 -3
  85. data/vendor/liburing/test/file-verify.c +27 -6
  86. data/vendor/liburing/test/helpers.c +3 -1
  87. data/vendor/liburing/test/io_uring_register.c +25 -28
  88. data/vendor/liburing/test/io_uring_setup.c +1 -1
  89. data/vendor/liburing/test/poll-cancel-all.c +29 -5
  90. data/vendor/liburing/test/poll-race-mshot.c +6 -22
  91. data/vendor/liburing/test/read-write.c +53 -0
  92. data/vendor/liburing/test/recv-msgall.c +21 -23
  93. data/vendor/liburing/test/reg-fd-only.c +55 -0
  94. data/vendor/liburing/test/reg-hint.c +56 -0
  95. data/vendor/liburing/test/regbuf-merge.c +91 -0
  96. data/vendor/liburing/test/ringbuf-read.c +2 -10
  97. data/vendor/liburing/test/send_recvmsg.c +5 -16
  98. data/vendor/liburing/test/shutdown.c +2 -1
  99. data/vendor/liburing/test/socket-io-cmd.c +215 -0
  100. data/vendor/liburing/test/socket-rw-eagain.c +2 -1
  101. data/vendor/liburing/test/socket-rw-offset.c +2 -1
  102. data/vendor/liburing/test/socket-rw.c +2 -1
  103. data/vendor/liburing/test/timeout.c +276 -0
  104. data/vendor/liburing/test/xattr.c +38 -25
  105. metadata +20 -7
  106. data/vendor/liburing/test/timeout-overflow.c +0 -204
@@ -138,12 +138,28 @@ VALUE Backend_post_fork(VALUE self) {
138
138
  typedef struct poll_context {
139
139
  struct io_uring *ring;
140
140
  struct io_uring_cqe *cqe;
141
+ int pending_sqes;
141
142
  int result;
142
143
  } poll_context_t;
143
144
 
145
+ // This function combines the functionality of io_uring_wait_cqe() and io_uring_submit_and_wait()
146
+ static inline int io_uring_submit_and_wait_cqe(struct io_uring *ring,
147
+ struct io_uring_cqe **cqe_ptr)
148
+ {
149
+ if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr) {
150
+ io_uring_submit(ring);
151
+ return 0;
152
+ }
153
+
154
+ *cqe_ptr = NULL;
155
+ return io_uring_submit_and_wait(ring, 1);
156
+ }
157
+
144
158
  void *io_uring_backend_poll_without_gvl(void *ptr) {
145
159
  poll_context_t *ctx = (poll_context_t *)ptr;
146
- ctx->result = io_uring_wait_cqe(ctx->ring, &ctx->cqe);
160
+ ctx->result = ctx->pending_sqes ?
161
+ io_uring_submit_and_wait_cqe(ctx->ring, &ctx->cqe) :
162
+ io_uring_wait_cqe(ctx->ring, &ctx->cqe);
147
163
  return NULL;
148
164
  }
149
165
 
@@ -152,6 +168,8 @@ static inline bool cq_ring_needs_flush(struct io_uring *ring) {
152
168
  return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
153
169
  }
154
170
 
171
+ #define MULTISHOT_ACCEPT_QUEUE(socket) (rb_ivar_get(socket, ID_ivar_multishot_accept_queue))
172
+
155
173
  static void handle_multishot_accept_completion(op_context_t *ctx, struct io_uring_cqe *cqe, Backend_t *backend) {
156
174
  // printf("handle_multishot_accept_completion result: %d\n", ctx->result);
157
175
  if (ctx->result == -ECANCELED) {
@@ -162,9 +180,27 @@ static void handle_multishot_accept_completion(op_context_t *ctx, struct io_urin
162
180
  if (!(cqe->flags & IORING_CQE_F_MORE)) {
163
181
  context_store_release(&backend->store, ctx);
164
182
  }
165
- VALUE queue = rb_ivar_get(ctx->resume_value, ID_ivar_multishot_accept_queue);
183
+ VALUE queue = MULTISHOT_ACCEPT_QUEUE(ctx->resume_value);
166
184
  if (queue != Qnil)
167
- Queue_push(queue, INT2NUM(ctx->result));
185
+ Queue_push(queue, INT2FIX(ctx->result));
186
+ }
187
+ }
188
+
189
+ static void handle_multishot_timeout_completion(
190
+ op_context_t *ctx, struct io_uring_cqe *cqe, Backend_t *backend
191
+ )
192
+ {
193
+ if (ctx->result == -ECANCELED) {
194
+ context_store_release(&backend->store, ctx);
195
+ }
196
+ else {
197
+ int has_more = cqe->flags & IORING_CQE_F_MORE;
198
+ if (!has_more) {
199
+ context_store_release(&backend->store, ctx);
200
+ }
201
+ if (ctx->fiber) {
202
+ Fiber_make_runnable(ctx->fiber, has_more ? Qtrue : Qnil);
203
+ }
168
204
  }
169
205
  }
170
206
 
@@ -172,6 +208,8 @@ static void handle_multishot_completion(op_context_t *ctx, struct io_uring_cqe *
172
208
  switch (ctx->type) {
173
209
  case OP_MULTISHOT_ACCEPT:
174
210
  return handle_multishot_accept_completion(ctx, cqe, backend);
211
+ case OP_MULTISHOT_TIMEOUT:
212
+ return handle_multishot_timeout_completion(ctx, cqe, backend);
175
213
  default:
176
214
  printf("Unexpected multishot completion for op type %d\n", ctx->type);
177
215
  }
@@ -181,6 +219,12 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
181
219
  op_context_t *ctx = io_uring_cqe_get_data(cqe);
182
220
  if (!ctx) return;
183
221
 
222
+ // if (ctx->type == OP_TIMEOUT) {
223
+ // double now = current_time_ns() / 1e9;
224
+ // double elapsed = now - ctx->ts;
225
+ // printf("%13.6f CQE timeout %p:%d (elapsed: %9.6f)\n", now, ctx, ctx->id, elapsed);
226
+ // }
227
+
184
228
  // printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
185
229
  ctx->result = cqe->res;
186
230
  if (ctx->ref_count == MULTISHOT_REFCOUNT) {
@@ -236,7 +280,7 @@ inline void io_uring_backend_defer_submit(Backend_t *backend) {
236
280
  void io_uring_backend_poll(Backend_t *backend) {
237
281
  poll_context_t poll_ctx;
238
282
  poll_ctx.ring = &backend->ring;
239
- if (backend->pending_sqes) io_uring_backend_immediate_submit(backend);
283
+ poll_ctx.pending_sqes = backend->pending_sqes;
240
284
 
241
285
  wait_cqe:
242
286
  backend->base.currently_polling = 1;
@@ -247,8 +291,10 @@ wait_cqe:
247
291
  return;
248
292
  }
249
293
 
250
- io_uring_backend_handle_completion(poll_ctx.cqe, backend);
251
- io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
294
+ if (poll_ctx.cqe) {
295
+ io_uring_backend_handle_completion(poll_ctx.cqe, backend);
296
+ io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
297
+ }
252
298
  }
253
299
 
254
300
  inline VALUE Backend_poll(VALUE self, VALUE blocking) {
@@ -284,6 +330,7 @@ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
284
330
  runqueue_delete(&backend->base.runqueue, fiber);
285
331
  }
286
332
 
333
+ // This function is deprecated
287
334
  inline VALUE Backend_switch_fiber(VALUE self) {
288
335
  Backend_t *backend;
289
336
  GetBackend(self, backend);
@@ -331,7 +378,22 @@ VALUE Backend_wakeup(VALUE self) {
331
378
  return Qnil;
332
379
  }
333
380
 
381
+ static inline VALUE io_uring_backend_await(VALUE self, struct Backend_t *backend) {
382
+ backend->base.pending_count++;
383
+
384
+ VALUE ret = backend_base_switch_fiber(self, &backend->base);
385
+
386
+ // run next fiber
387
+ COND_TRACE(&backend->base, 4, SYM_unblock, rb_fiber_current(), ret, CALLER());
388
+
389
+ backend->base.pending_count--;
390
+ RB_GC_GUARD(ret);
391
+ return ret;
392
+
393
+ }
394
+
334
395
  int io_uring_backend_defer_submit_and_await(
396
+ VALUE self,
335
397
  Backend_t *backend,
336
398
  struct io_uring_sqe *sqe,
337
399
  op_context_t *ctx,
@@ -344,7 +406,7 @@ int io_uring_backend_defer_submit_and_await(
344
406
  if (sqe) io_uring_sqe_set_data(sqe, ctx);
345
407
  io_uring_backend_defer_submit(backend);
346
408
 
347
- switchpoint_result = backend_await((struct Backend_base *)backend);
409
+ switchpoint_result = io_uring_backend_await(self, backend);
348
410
 
349
411
  if (ctx->ref_count > 1) {
350
412
  struct io_uring_sqe *sqe;
@@ -363,14 +425,14 @@ int io_uring_backend_defer_submit_and_await(
363
425
  return ctx->result;
364
426
  }
365
427
 
366
- VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
428
+ VALUE io_uring_backend_wait_fd(VALUE self, Backend_t *backend, int fd, int write) {
367
429
  op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
368
430
  VALUE resumed_value = Qnil;
369
431
 
370
432
  struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
371
433
  io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
372
434
 
373
- io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
435
+ io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resumed_value);
374
436
  context_store_release(&backend->store, ctx);
375
437
 
376
438
  RB_GC_GUARD(resumed_value);
@@ -378,20 +440,24 @@ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
378
440
  }
379
441
 
380
442
  static inline int fd_from_io(VALUE io, rb_io_t **fptr, int write_mode, int rectify_file_pos) {
443
+ if (TYPE(io) == T_FIXNUM) {
444
+ *fptr = NULL;
445
+ return FIX2INT(io);
446
+ }
447
+
381
448
  if (rb_obj_class(io) == cPipe) {
382
449
  *fptr = NULL;
383
450
  return Pipe_get_fd(io, write_mode);
384
451
  }
385
- else {
386
- VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
387
- if (underlying_io != Qnil) io = underlying_io;
388
-
389
- GetOpenFile(io, *fptr);
390
- int fd = rb_io_descriptor(io);
391
- io_unset_nonblock(io, fd);
392
- if (rectify_file_pos) rectify_io_file_pos(*fptr);
393
- return fd;
394
- }
452
+
453
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
454
+ if (underlying_io != Qnil) io = underlying_io;
455
+
456
+ GetOpenFile(io, *fptr);
457
+ int fd = rb_io_descriptor(io);
458
+ io_unset_nonblock(io, fd);
459
+ if (rectify_file_pos) rectify_io_file_pos(*fptr);
460
+ return fd;
395
461
  }
396
462
 
397
463
  VALUE Backend_read(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE to_eof, VALUE pos) {
@@ -415,7 +481,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE to_eo
415
481
 
416
482
  io_uring_prep_read(sqe, fd, buffer_spec.ptr, buffer_spec.len, -1);
417
483
 
418
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
484
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
419
485
  completed = context_store_release(&backend->store, ctx);
420
486
  if (!completed) {
421
487
  context_attach_buffers(ctx, 1, &buffer);
@@ -476,7 +542,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
476
542
 
477
543
  io_uring_prep_read(sqe, fd, ptr, len, -1);
478
544
 
479
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
545
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
480
546
  completed = context_store_release(&backend->store, ctx);
481
547
  if (!completed) {
482
548
  context_attach_buffers(ctx, 1, &buffer);
@@ -525,7 +591,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
525
591
 
526
592
  io_uring_prep_read(sqe, fd, ptr, len, -1);
527
593
 
528
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
594
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
529
595
  completed = context_store_release(&backend->store, ctx);
530
596
  if (!completed) {
531
597
  context_attach_buffers(ctx, 1, &buffer);
@@ -569,7 +635,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE buffer) {
569
635
 
570
636
  io_uring_prep_write(sqe, fd, buffer_spec.ptr, left, -1);
571
637
 
572
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
638
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
573
639
  completed = context_store_release(&backend->store, ctx);
574
640
  if (!completed) {
575
641
  context_attach_buffers(ctx, 1, &buffer);
@@ -620,9 +686,10 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
620
686
 
621
687
  io_uring_prep_writev(sqe, fd, iov_ptr, iov_count, -1);
622
688
 
623
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
689
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
624
690
  completed = context_store_release(&backend->store, ctx);
625
691
  if (!completed) {
692
+ TRACE_FREE(iov);
626
693
  free(iov);
627
694
  context_attach_buffers(ctx, argc, argv);
628
695
  RAISE_IF_EXCEPTION(resume_value);
@@ -631,6 +698,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
631
698
  RB_GC_GUARD(resume_value);
632
699
 
633
700
  if (result < 0) {
701
+ TRACE_FREE(iov);
634
702
  free(iov);
635
703
  rb_syserr_fail(-result, strerror(-result));
636
704
  }
@@ -653,6 +721,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
653
721
  }
654
722
  }
655
723
 
724
+ TRACE_FREE(iov);
656
725
  free(iov);
657
726
  return INT2FIX(total_written);
658
727
  }
@@ -686,7 +755,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE pos)
686
755
 
687
756
  io_uring_prep_recv(sqe, fd, buffer_spec.ptr, buffer_spec.len, 0);
688
757
 
689
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
758
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
690
759
  completed = context_store_release(&backend->store, ctx);
691
760
  if (!completed) {
692
761
  context_attach_buffers(ctx, 1, &buffer);
@@ -744,7 +813,7 @@ VALUE Backend_recvmsg(VALUE self, VALUE io, VALUE buffer, VALUE maxlen, VALUE po
744
813
 
745
814
  io_uring_prep_recvmsg(sqe, fd, &msg, NUM2INT(flags));
746
815
 
747
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
816
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
748
817
  completed = context_store_release(&backend->store, ctx);
749
818
  if (!completed) {
750
819
  context_attach_buffers(ctx, 1, &buffer);
@@ -794,7 +863,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
794
863
 
795
864
  io_uring_prep_recv(sqe, fd, ptr, len, 0);
796
865
 
797
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
866
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
798
867
  completed = context_store_release(&backend->store, ctx);
799
868
  if (!completed) {
800
869
  context_attach_buffers(ctx, 1, &buffer);
@@ -842,7 +911,7 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
842
911
 
843
912
  io_uring_prep_recv(sqe, fd, ptr, len, 0);
844
913
 
845
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
914
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
846
915
  completed = context_store_release(&backend->store, ctx);
847
916
  if (!completed) {
848
917
  context_attach_buffers(ctx, 1, &buffer);
@@ -886,7 +955,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE buffer, VALUE flags) {
886
955
 
887
956
  io_uring_prep_send(sqe, fd, buffer_spec.ptr, left, flags_int);
888
957
 
889
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
958
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
890
959
  completed = context_store_release(&backend->store, ctx);
891
960
  if (!completed) {
892
961
  context_attach_buffers(ctx, 1, &buffer);
@@ -947,7 +1016,7 @@ VALUE Backend_sendmsg(VALUE self, VALUE io, VALUE buffer, VALUE flags, VALUE des
947
1016
 
948
1017
  io_uring_prep_sendmsg(sqe, fd, &msg, flags_int);
949
1018
 
950
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1019
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
951
1020
  completed = context_store_release(&backend->store, ctx);
952
1021
  if (!completed) {
953
1022
  context_attach_buffers(ctx, 1, &buffer);
@@ -968,7 +1037,20 @@ VALUE Backend_sendmsg(VALUE self, VALUE io, VALUE buffer, VALUE flags, VALUE des
968
1037
  return INT2FIX(buffer_spec.len);
969
1038
  }
970
1039
 
971
- VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE socket_class, int loop) {
1040
+ inline VALUE create_socket_from_fd(int fd, VALUE socket_class) {
1041
+ rb_io_t *fp;
1042
+
1043
+ VALUE socket = rb_obj_alloc(socket_class);
1044
+ MakeOpenFile(socket, fp);
1045
+ rb_update_max_fd(fd);
1046
+ fp->fd = fd;
1047
+ fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1048
+ rb_io_ascii8bit_binmode(socket);
1049
+ rb_io_synchronized(fp);
1050
+ return socket;
1051
+ }
1052
+
1053
+ VALUE io_uring_backend_accept(VALUE self, Backend_t *backend, VALUE server_socket, VALUE socket_class, int loop) {
972
1054
  int server_fd;
973
1055
  rb_io_t *server_fptr;
974
1056
  struct sockaddr addr;
@@ -986,7 +1068,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
986
1068
 
987
1069
  io_uring_prep_accept(sqe, server_fd, &addr, &len, 0);
988
1070
 
989
- fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1071
+ fd = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
990
1072
  completed = context_store_release(&backend->store, ctx);
991
1073
  RAISE_IF_EXCEPTION(resume_value);
992
1074
  if (!completed) return resume_value;
@@ -995,19 +1077,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
995
1077
  if (fd < 0)
996
1078
  rb_syserr_fail(-fd, strerror(-fd));
997
1079
  else {
998
- rb_io_t *fp;
999
-
1000
- socket = rb_obj_alloc(socket_class);
1001
- MakeOpenFile(socket, fp);
1002
- rb_update_max_fd(fd);
1003
- fp->fd = fd;
1004
- fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1005
- rb_io_ascii8bit_binmode(socket);
1006
- rb_io_synchronized(fp);
1007
-
1008
- // if (rsock_do_not_reverse_lookup) {
1009
- // fp->mode |= FMODE_NOREVLOOKUP;
1010
- // }
1080
+ socket = create_socket_from_fd(fd, socket_class);
1011
1081
  if (loop) {
1012
1082
  rb_yield(socket);
1013
1083
  socket = Qnil;
@@ -1022,30 +1092,20 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
1022
1092
 
1023
1093
  VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
1024
1094
  #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1025
- VALUE accept_queue = rb_ivar_get(server_socket, ID_ivar_multishot_accept_queue);
1095
+ VALUE accept_queue = MULTISHOT_ACCEPT_QUEUE(server_socket);
1026
1096
  if (accept_queue != Qnil) {
1027
1097
  VALUE next = Queue_shift(0, 0, accept_queue);
1028
1098
  int fd = NUM2INT(next);
1029
1099
  if (fd < 0)
1030
1100
  rb_syserr_fail(-fd, strerror(-fd));
1031
- else {
1032
- rb_io_t *fp;
1033
-
1034
- VALUE socket = rb_obj_alloc(socket_class);
1035
- MakeOpenFile(socket, fp);
1036
- rb_update_max_fd(fd);
1037
- fp->fd = fd;
1038
- fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1039
- rb_io_ascii8bit_binmode(socket);
1040
- rb_io_synchronized(fp);
1041
- return socket;
1042
- }
1101
+ else
1102
+ return create_socket_from_fd(fd, socket_class);
1043
1103
  }
1044
1104
  #endif
1045
1105
 
1046
1106
  Backend_t *backend;
1047
1107
  GetBackend(self, backend);
1048
- return io_uring_backend_accept(backend, server_socket, socket_class, 0);
1108
+ return io_uring_backend_accept(self, backend, server_socket, socket_class, 0);
1049
1109
  }
1050
1110
 
1051
1111
  #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
@@ -1053,9 +1113,25 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
1053
1113
  struct multishot_accept_ctx {
1054
1114
  Backend_t *backend;
1055
1115
  VALUE server_socket;
1116
+ VALUE socket_class;
1056
1117
  op_context_t *op_ctx;
1057
1118
  };
1058
1119
 
1120
+ static inline VALUE accept_loop_from_queue(VALUE server_socket, VALUE socket_class) {
1121
+ VALUE accept_queue = MULTISHOT_ACCEPT_QUEUE(server_socket);
1122
+ if (accept_queue == Qnil) return Qnil;
1123
+
1124
+ while (true) {
1125
+ VALUE next = Queue_shift(0, 0, accept_queue);
1126
+ int fd = NUM2INT(next);
1127
+ if (fd < 0)
1128
+ rb_syserr_fail(-fd, strerror(-fd));
1129
+ else
1130
+ rb_yield(create_socket_from_fd(fd, socket_class));
1131
+ }
1132
+ return Qtrue;
1133
+ }
1134
+
1059
1135
  VALUE multishot_accept_start(struct multishot_accept_ctx *ctx) {
1060
1136
  int server_fd;
1061
1137
  rb_io_t *server_fptr;
@@ -1071,7 +1147,7 @@ VALUE multishot_accept_start(struct multishot_accept_ctx *ctx) {
1071
1147
  io_uring_sqe_set_data(sqe, ctx->op_ctx);
1072
1148
  io_uring_backend_defer_submit(ctx->backend);
1073
1149
 
1074
- rb_yield(ctx->server_socket);
1150
+ accept_loop_from_queue(ctx->server_socket, ctx->socket_class);
1075
1151
 
1076
1152
  return Qnil;
1077
1153
  }
@@ -1087,55 +1163,30 @@ VALUE multishot_accept_cleanup(struct multishot_accept_ctx *ctx) {
1087
1163
  return Qnil;
1088
1164
  }
1089
1165
 
1090
- VALUE Backend_multishot_accept(VALUE self, VALUE server_socket) {
1091
- Backend_t *backend;
1092
- GetBackend(self, backend);
1093
-
1094
- struct multishot_accept_ctx ctx;
1095
- ctx.backend = backend;
1096
- ctx.server_socket = server_socket;
1166
+ VALUE multishot_accept_loop(Backend_t *backend, VALUE server_socket, VALUE socket_class) {
1167
+ struct multishot_accept_ctx ctx = { backend, server_socket, socket_class };
1097
1168
 
1098
1169
  return rb_ensure(
1099
1170
  SAFE(multishot_accept_start), (VALUE)&ctx,
1100
1171
  SAFE(multishot_accept_cleanup), (VALUE)&ctx
1101
1172
  );
1102
1173
  }
1103
-
1104
1174
  #endif
1105
1175
 
1106
1176
  VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
1107
- #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1108
- VALUE accept_queue = rb_ivar_get(server_socket, ID_ivar_multishot_accept_queue);
1109
- if (accept_queue != Qnil) {
1110
- while (true) {
1111
- VALUE next = Queue_shift(0, 0, accept_queue);
1112
- int fd = NUM2INT(next);
1113
- if (fd < 0)
1114
- rb_syserr_fail(-fd, strerror(-fd));
1115
- else {
1116
- rb_io_t *fp;
1117
-
1118
- VALUE socket = rb_obj_alloc(socket_class);
1119
- MakeOpenFile(socket, fp);
1120
- rb_update_max_fd(fd);
1121
- fp->fd = fd;
1122
- fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1123
- rb_io_ascii8bit_binmode(socket);
1124
- rb_io_synchronized(fp);
1125
- rb_yield(socket);
1126
- }
1127
- }
1128
- return self;
1129
- }
1130
- #endif
1131
-
1132
1177
  Backend_t *backend;
1133
1178
  GetBackend(self, backend);
1134
- io_uring_backend_accept(backend, server_socket, socket_class, 1);
1179
+
1180
+ #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1181
+ multishot_accept_loop(backend, server_socket, socket_class);
1182
+ #else
1183
+ io_uring_backend_accept(self, backend, server_socket, socket_class, 1);
1184
+ #endif
1185
+
1135
1186
  return self;
1136
1187
  }
1137
1188
 
1138
- VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int maxlen) {
1189
+ VALUE io_uring_backend_splice(VALUE self, Backend_t *backend, VALUE src, VALUE dest, int maxlen) {
1139
1190
  int src_fd;
1140
1191
  int dest_fd;
1141
1192
  rb_io_t *src_fptr;
@@ -1156,7 +1207,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int max
1156
1207
 
1157
1208
  io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, maxlen, 0);
1158
1209
 
1159
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1210
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
1160
1211
  completed = context_store_release(&backend->store, ctx);
1161
1212
  RAISE_IF_EXCEPTION(resume_value);
1162
1213
  if (!completed) return resume_value;
@@ -1174,10 +1225,11 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int max
1174
1225
  VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1175
1226
  Backend_t *backend;
1176
1227
  GetBackend(self, backend);
1177
- return io_uring_backend_splice(backend, src, dest, FIX2INT(maxlen));
1228
+ return io_uring_backend_splice(self, backend, src, dest, FIX2INT(maxlen));
1178
1229
  }
1179
1230
 
1180
1231
  struct double_splice_ctx {
1232
+ VALUE self;
1181
1233
  Backend_t *backend;
1182
1234
  VALUE src;
1183
1235
  VALUE dest;
@@ -1222,7 +1274,7 @@ VALUE double_splice_safe(struct double_splice_ctx *ctx) {
1222
1274
  io_uring_backend_immediate_submit(ctx->backend);
1223
1275
 
1224
1276
  while (1) {
1225
- resume_value = backend_await((struct Backend_base *)ctx->backend);
1277
+ resume_value = io_uring_backend_await(ctx->self, ctx->backend);
1226
1278
 
1227
1279
  if ((ctx_src && ctx_src->ref_count == 2 && ctx_dest && ctx_dest->ref_count == 2) || TEST_EXCEPTION(resume_value)) {
1228
1280
  if (ctx_src) {
@@ -1273,7 +1325,7 @@ VALUE double_splice_cleanup(struct double_splice_ctx *ctx) {
1273
1325
  }
1274
1326
 
1275
1327
  VALUE Backend_double_splice(VALUE self, VALUE src, VALUE dest) {
1276
- struct double_splice_ctx ctx = { NULL, src, dest, {0, 0} };
1328
+ struct double_splice_ctx ctx = { self, NULL, src, dest, {0, 0} };
1277
1329
  GetBackend(self, ctx.backend);
1278
1330
  if (pipe(ctx.pipefd) == -1) rb_syserr_fail(errno, strerror(errno));
1279
1331
 
@@ -1304,7 +1356,7 @@ VALUE Backend_tee(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1304
1356
 
1305
1357
  io_uring_prep_tee(sqe, src_fd, dest_fd, FIX2INT(maxlen), 0);
1306
1358
 
1307
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1359
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
1308
1360
  completed = context_store_release(&backend->store, ctx);
1309
1361
  RAISE_IF_EXCEPTION(resume_value);
1310
1362
  if (!completed) return resume_value;
@@ -1337,7 +1389,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
1337
1389
  ctx = context_store_acquire(&backend->store, OP_CONNECT);
1338
1390
  sqe = io_uring_backend_get_sqe(backend);
1339
1391
  io_uring_prep_connect(sqe, fd, ai_addr, ai_addrlen);
1340
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1392
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
1341
1393
  completed = context_store_release(&backend->store, ctx);
1342
1394
  RAISE_IF_EXCEPTION(resume_value);
1343
1395
  if (!completed) return resume_value;
@@ -1356,7 +1408,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1356
1408
 
1357
1409
  GetBackend(self, backend);
1358
1410
  fd = fd_from_io(io, &fptr, write_mode, 0);
1359
- resume_value = io_uring_backend_wait_fd(backend, fd, write_mode);
1411
+ resume_value = io_uring_backend_wait_fd(self, backend, fd, write_mode);
1360
1412
 
1361
1413
  RAISE_IF_EXCEPTION(resume_value);
1362
1414
  RB_GC_GUARD(resume_value);
@@ -1378,7 +1430,7 @@ VALUE Backend_close(VALUE self, VALUE io) {
1378
1430
  ctx = context_store_acquire(&backend->store, OP_CLOSE);
1379
1431
  sqe = io_uring_backend_get_sqe(backend);
1380
1432
  io_uring_prep_close(sqe, fd);
1381
- result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1433
+ result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
1382
1434
  completed = context_store_release(&backend->store, ctx);
1383
1435
  RAISE_IF_EXCEPTION(resume_value);
1384
1436
  if (!completed) return resume_value;
@@ -1386,7 +1438,7 @@ VALUE Backend_close(VALUE self, VALUE io) {
1386
1438
 
1387
1439
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
1388
1440
 
1389
- fptr_finalize(fptr);
1441
+ if (fptr) fptr_finalize(fptr);
1390
1442
  // fd = -1;
1391
1443
  return io;
1392
1444
  }
@@ -1405,13 +1457,17 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
1405
1457
  }
1406
1458
 
1407
1459
  // returns true if completed, 0 otherwise
1408
- int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
1460
+ int io_uring_backend_submit_timeout_and_await(VALUE self, Backend_t *backend, double duration, VALUE *resume_value) {
1409
1461
  struct __kernel_timespec ts = double_to_timespec(duration);
1410
1462
  struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
1411
1463
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1412
1464
 
1465
+ // double now = current_time_ns() / 1e9;
1466
+ // ctx->ts = now;
1467
+ // printf("%13.6f SQE timeout %p:%d (%g)\n", now, ctx, ctx->id, duration);
1468
+
1413
1469
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1414
- io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
1470
+ io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, resume_value);
1415
1471
  return context_store_release(&backend->store, ctx);
1416
1472
  }
1417
1473
 
@@ -1420,7 +1476,7 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1420
1476
  Backend_t *backend;
1421
1477
  GetBackend(self, backend);
1422
1478
 
1423
- io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
1479
+ io_uring_backend_submit_timeout_and_await(self, backend, NUM2DBL(duration), &resume_value);
1424
1480
  RAISE_IF_EXCEPTION(resume_value);
1425
1481
  RB_GC_GUARD(resume_value);
1426
1482
  return resume_value;
@@ -1439,7 +1495,7 @@ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1439
1495
  if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1440
1496
  if (next_time_ns > now_ns) {
1441
1497
  double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1442
- int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1498
+ int completed = io_uring_backend_submit_timeout_and_await(self, backend, sleep_duration, &resume_value);
1443
1499
  RAISE_IF_EXCEPTION(resume_value);
1444
1500
  if (!completed) return resume_value;
1445
1501
  }
@@ -1531,7 +1587,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1531
1587
  Backend_t *backend;
1532
1588
  GetBackend(self, backend);
1533
1589
 
1534
- resume_value = io_uring_backend_wait_fd(backend, fd, 0);
1590
+ resume_value = io_uring_backend_wait_fd(self, backend, fd, 0);
1535
1591
  close(fd);
1536
1592
  RAISE_IF_EXCEPTION(resume_value);
1537
1593
  RB_GC_GUARD(resume_value);
@@ -1549,7 +1605,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1549
1605
  else
1550
1606
  rb_syserr_fail(e, strerror(e));
1551
1607
  }
1552
- return rb_ary_new_from_args(2, INT2FIX(ret), INT2FIX(WEXITSTATUS(status)));
1608
+ return rb_ary_new_from_args(2, INT2FIX(ret), INT2FIX(status));
1553
1609
  }
1554
1610
 
1555
1611
  /*
@@ -1586,7 +1642,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1586
1642
  else
1587
1643
  backend->event_fd_ctx->ref_count += 1;
1588
1644
 
1589
- resume_value = backend_await((struct Backend_base *)backend);
1645
+ resume_value = io_uring_backend_await(self, backend);
1590
1646
  context_store_release(&backend->store, backend->event_fd_ctx);
1591
1647
 
1592
1648
  if (backend->event_fd_ctx->ref_count == 1) {
@@ -1719,7 +1775,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1719
1775
  backend->base.op_count += sqe_count;
1720
1776
  ctx->ref_count = sqe_count + 1;
1721
1777
  io_uring_backend_defer_submit(backend);
1722
- resume_value = backend_await((struct Backend_base *)backend);
1778
+ resume_value = io_uring_backend_await(self, backend);
1723
1779
  result = ctx->result;
1724
1780
  completed = context_store_release(&backend->store, ctx);
1725
1781
  if (!completed) {
@@ -1803,6 +1859,7 @@ static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1803
1859
  }
1804
1860
 
1805
1861
  static inline int splice_chunks_await_ops(
1862
+ VALUE self,
1806
1863
  Backend_t *backend,
1807
1864
  op_context_t **ctx,
1808
1865
  int *result,
@@ -1810,7 +1867,7 @@ static inline int splice_chunks_await_ops(
1810
1867
  )
1811
1868
  {
1812
1869
  int completed;
1813
- int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
1870
+ int res = io_uring_backend_defer_submit_and_await(self, backend, 0, *ctx, switchpoint_result);
1814
1871
 
1815
1872
  if (result) (*result) = res;
1816
1873
  completed = context_store_release(&backend->store, *ctx);
@@ -1822,8 +1879,8 @@ static inline int splice_chunks_await_ops(
1822
1879
  return 0;
1823
1880
  }
1824
1881
 
1825
- #define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
1826
- if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
1882
+ #define SPLICE_CHUNKS_AWAIT_OPS(self, backend, ctx, result, switchpoint_result) \
1883
+ if (splice_chunks_await_ops(self, backend, ctx, result, switchpoint_result)) goto error;
1827
1884
 
1828
1885
  VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1829
1886
  Backend_t *backend;
@@ -1866,7 +1923,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1866
1923
  splice_chunks_prep_splice(ctx, sqe, src_fd, pipefd[1], maxlen);
1867
1924
  backend->base.op_count++;
1868
1925
 
1869
- SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1926
+ SPLICE_CHUNKS_AWAIT_OPS(self, backend, &ctx, &chunk_len, &switchpoint_result);
1870
1927
  if (chunk_len == 0) break;
1871
1928
 
1872
1929
  total += chunk_len;
@@ -1901,7 +1958,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1901
1958
  backend->base.op_count++;
1902
1959
  }
1903
1960
  if (ctx) {
1904
- SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
1961
+ SPLICE_CHUNKS_AWAIT_OPS(self, backend, &ctx, 0, &switchpoint_result);
1905
1962
  }
1906
1963
 
1907
1964
  RB_GC_GUARD(chunk_len_value);
@@ -1989,10 +2046,6 @@ void Init_Backend(void) {
1989
2046
  rb_define_method(cBackend, "connect", Backend_connect, 3);
1990
2047
  rb_define_method(cBackend, "feed_loop", Backend_feed_loop, 3);
1991
2048
 
1992
- #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1993
- rb_define_method(cBackend, "multishot_accept", Backend_multishot_accept, 1);
1994
- #endif
1995
-
1996
2049
  rb_define_method(cBackend, "read", Backend_read, 5);
1997
2050
  rb_define_method(cBackend, "read_loop", Backend_read_loop, 2);
1998
2051
  rb_define_method(cBackend, "recv", Backend_recv, 4);