polyphony 0.71 → 0.74

Sign up to get free protection for your applications and to get access to all the features.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/.github/FUNDING.yml +1 -0
  3. data/.github/workflows/test.yml +15 -11
  4. data/.github/workflows/test_io_uring.yml +32 -0
  5. data/.gitignore +3 -1
  6. data/CHANGELOG.md +33 -4
  7. data/Gemfile.lock +16 -13
  8. data/TODO.md +1 -1
  9. data/bin/pdbg +1 -1
  10. data/docs/_user-guide/all-about-timers.md +1 -1
  11. data/docs/api-reference/exception.md +5 -1
  12. data/docs/api-reference/fiber.md +2 -2
  13. data/docs/faq.md +1 -1
  14. data/docs/getting-started/overview.md +8 -8
  15. data/docs/getting-started/tutorial.md +3 -3
  16. data/docs/main-concepts/concurrency.md +1 -1
  17. data/docs/main-concepts/extending.md +3 -3
  18. data/docs/main-concepts/fiber-scheduling.md +1 -1
  19. data/examples/core/calc.rb +37 -0
  20. data/examples/core/calc_with_restart.rb +40 -0
  21. data/examples/core/calc_with_supervise.rb +37 -0
  22. data/examples/core/message_based_supervision.rb +1 -1
  23. data/examples/core/ring.rb +29 -0
  24. data/examples/io/rack_server.rb +1 -1
  25. data/examples/io/tunnel.rb +1 -1
  26. data/examples/performance/fiber_transfer.rb +1 -1
  27. data/examples/performance/line_splitting.rb +1 -1
  28. data/examples/performance/thread-vs-fiber/compare.rb +1 -1
  29. data/ext/polyphony/backend_common.c +88 -18
  30. data/ext/polyphony/backend_common.h +8 -1
  31. data/ext/polyphony/backend_io_uring.c +280 -164
  32. data/ext/polyphony/backend_io_uring_context.c +2 -1
  33. data/ext/polyphony/backend_io_uring_context.h +3 -2
  34. data/ext/polyphony/backend_libev.c +42 -38
  35. data/ext/polyphony/event.c +5 -2
  36. data/ext/polyphony/extconf.rb +25 -13
  37. data/ext/polyphony/polyphony.c +10 -1
  38. data/ext/polyphony/polyphony.h +7 -1
  39. data/ext/polyphony/queue.c +12 -7
  40. data/ext/polyphony/runqueue_ring_buffer.c +6 -3
  41. data/ext/polyphony/socket_extensions.c +5 -2
  42. data/ext/polyphony/thread.c +1 -1
  43. data/lib/polyphony/adapters/irb.rb +11 -1
  44. data/lib/polyphony/{extensions → core}/debug.rb +0 -0
  45. data/lib/polyphony/core/global_api.rb +3 -6
  46. data/lib/polyphony/core/timer.rb +2 -2
  47. data/lib/polyphony/debugger.rb +3 -3
  48. data/lib/polyphony/extensions/exception.rb +45 -0
  49. data/lib/polyphony/extensions/fiber.rb +87 -11
  50. data/lib/polyphony/extensions/io.rb +2 -2
  51. data/lib/polyphony/extensions/{core.rb → kernel.rb} +0 -73
  52. data/lib/polyphony/extensions/openssl.rb +20 -5
  53. data/lib/polyphony/extensions/process.rb +19 -0
  54. data/lib/polyphony/extensions/socket.rb +20 -9
  55. data/lib/polyphony/extensions/thread.rb +9 -3
  56. data/lib/polyphony/extensions/timeout.rb +10 -0
  57. data/lib/polyphony/extensions.rb +9 -0
  58. data/lib/polyphony/version.rb +1 -1
  59. data/lib/polyphony.rb +2 -4
  60. data/polyphony.gemspec +1 -1
  61. data/test/coverage.rb +2 -2
  62. data/test/test_backend.rb +15 -17
  63. data/test/test_event.rb +1 -1
  64. data/test/test_ext.rb +1 -1
  65. data/test/test_fiber.rb +31 -7
  66. data/test/test_global_api.rb +23 -14
  67. data/test/test_io.rb +5 -5
  68. data/test/test_kernel.rb +2 -2
  69. data/test/test_process_supervision.rb +1 -1
  70. data/test/test_queue.rb +6 -6
  71. data/test/test_signal.rb +20 -1
  72. data/test/test_socket.rb +45 -10
  73. data/test/test_supervise.rb +85 -0
  74. data/test/test_sync.rb +2 -2
  75. data/test/test_thread.rb +22 -2
  76. data/test/test_thread_pool.rb +2 -2
  77. data/test/test_throttler.rb +3 -3
  78. data/test/test_timer.rb +3 -3
  79. data/test/test_trace.rb +1 -1
  80. metadata +19 -9
@@ -42,6 +42,7 @@ typedef struct Backend_t {
42
42
  unsigned int pending_sqes;
43
43
  unsigned int prepared_limit;
44
44
  int event_fd;
45
+ int ring_initialized;
45
46
  } Backend_t;
46
47
 
47
48
  static void Backend_mark(void *ptr) {
@@ -80,20 +81,32 @@ static VALUE Backend_initialize(VALUE self) {
80
81
 
81
82
  backend_base_initialize(&backend->base);
82
83
  backend->pending_sqes = 0;
83
- backend->prepared_limit = 2048;
84
+ backend->ring_initialized = 0;
85
+ backend->event_fd = -1;
84
86
 
85
87
  context_store_initialize(&backend->store);
86
- io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
87
- backend->event_fd = -1;
88
88
 
89
- return Qnil;
89
+ backend->prepared_limit = 1024;
90
+ while (1) {
91
+ int ret = io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
92
+ if (!ret) break;
93
+
94
+ // if ENOMEM is returned, use a smaller limit
95
+ if (ret == -ENOMEM && backend->prepared_limit > 64)
96
+ backend->prepared_limit = backend->prepared_limit / 2;
97
+ else
98
+ rb_syserr_fail(-ret, strerror(-ret));
99
+ }
100
+ backend->ring_initialized = 1;
101
+
102
+ return self;
90
103
  }
91
104
 
92
105
  VALUE Backend_finalize(VALUE self) {
93
106
  Backend_t *backend;
94
107
  GetBackend(self, backend);
95
108
 
96
- io_uring_queue_exit(&backend->ring);
109
+ if (backend->ring_initialized) io_uring_queue_exit(&backend->ring);
97
110
  if (backend->event_fd != -1) close(backend->event_fd);
98
111
  context_store_free(&backend->store);
99
112
  return self;
@@ -127,7 +140,7 @@ void *io_uring_backend_poll_without_gvl(void *ptr) {
127
140
 
128
141
  // copied from queue.c
129
142
  static inline bool cq_ring_needs_flush(struct io_uring *ring) {
130
- return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
143
+ return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
131
144
  }
132
145
 
133
146
  static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
@@ -141,13 +154,13 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
141
154
  context_store_release(&backend->store, ctx);
142
155
  }
143
156
 
144
- // adapted from io_uring_peek_batch_cqe in queue.c
157
+ // adapted from io_uring_peek_batch_cqe in queue.c
145
158
  // this peeks at cqes and handles each available cqe
146
159
  void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
147
160
  struct io_uring *ring = &backend->ring;
148
- bool overflow_checked = false;
161
+ bool overflow_checked = false;
149
162
  struct io_uring_cqe *cqe;
150
- unsigned head;
163
+ unsigned head;
151
164
  unsigned cqe_count;
152
165
 
153
166
  again:
@@ -158,16 +171,16 @@ again:
158
171
  }
159
172
  io_uring_cq_advance(ring, cqe_count);
160
173
 
161
- if (overflow_checked) goto done;
174
+ if (overflow_checked) goto done;
162
175
 
163
- if (cq_ring_needs_flush(ring)) {
164
- __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
165
- overflow_checked = true;
166
- goto again;
167
- }
176
+ if (cq_ring_needs_flush(ring)) {
177
+ __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
178
+ overflow_checked = true;
179
+ goto again;
180
+ }
168
181
 
169
182
  done:
170
- return;
183
+ return;
171
184
  }
172
185
 
173
186
  void io_uring_backend_poll(Backend_t *backend) {
@@ -200,19 +213,12 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
200
213
  }
201
214
 
202
215
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
203
- // if (SHOULD_TRACE(&backend->base))
204
- // printf(
205
- // "io_uring_poll(blocking_mode: %d, pending: %d, taken: %d, available: %d, runqueue: %d\n",
206
- // is_blocking,
207
- // backend->base.pending_count,
208
- // backend->store.taken_count,
209
- // backend->store.available_count,
210
- // backend->base.runqueue.entries.count
211
- // );
216
+
212
217
  if (is_blocking) io_uring_backend_poll(backend);
213
218
  io_uring_backend_handle_ready_cqes(backend);
214
- COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
215
219
 
220
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
221
+
216
222
  return self;
217
223
  }
218
224
 
@@ -255,7 +261,7 @@ VALUE Backend_wakeup(VALUE self) {
255
261
  io_uring_prep_nop(sqe);
256
262
  backend->pending_sqes = 0;
257
263
  io_uring_submit(&backend->ring);
258
-
264
+
259
265
  return Qtrue;
260
266
  }
261
267
 
@@ -289,9 +295,11 @@ int io_uring_backend_defer_submit_and_await(
289
295
  switchpoint_result = backend_await((struct Backend_base *)backend);
290
296
 
291
297
  if (ctx->ref_count > 1) {
298
+ struct io_uring_sqe *sqe;
299
+
292
300
  // op was not completed (an exception was raised), so we need to cancel it
293
301
  ctx->result = -ECANCELED;
294
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
302
+ sqe = io_uring_get_sqe(&backend->ring);
295
303
  io_uring_prep_cancel(sqe, ctx, 0);
296
304
  backend->pending_sqes = 0;
297
305
  io_uring_submit(&backend->ring);
@@ -323,16 +331,20 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
323
331
  long dynamic_len = length == Qnil;
324
332
  long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
325
333
  long buf_pos = NUM2INT(pos);
334
+ int shrinkable;
335
+ char *buf;
336
+ long total = 0;
337
+ int read_to_eof = RTEST(to_eof);
338
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
339
+
340
+
326
341
  if (str != Qnil) {
327
342
  int current_len = RSTRING_LEN(str);
328
343
  if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
329
344
  }
330
345
  else buf_pos = 0;
331
- int shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
332
- char *buf = RSTRING_PTR(str) + buf_pos;
333
- long total = 0;
334
- int read_to_eof = RTEST(to_eof);
335
- VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
346
+ shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
347
+ buf = RSTRING_PTR(str) + buf_pos;
336
348
 
337
349
  GetBackend(self, backend);
338
350
  if (underlying_io != Qnil) io = underlying_io;
@@ -340,16 +352,18 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
340
352
  rb_io_check_byte_readable(fptr);
341
353
  io_unset_nonblock(fptr, io);
342
354
  rectify_io_file_pos(fptr);
343
- OBJ_TAINT(str);
344
355
 
345
356
  while (1) {
346
357
  VALUE resume_value = Qnil;
347
358
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
348
359
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
360
+ int result;
361
+ int completed;
362
+
349
363
  io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
350
364
 
351
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
352
- int completed = context_store_release(&backend->store, ctx);
365
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
366
+ completed = context_store_release(&backend->store, ctx);
353
367
  if (!completed) {
354
368
  context_attach_buffers(ctx, 1, &str);
355
369
  RAISE_IF_EXCEPTION(resume_value);
@@ -410,10 +424,13 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
410
424
  VALUE resume_value = Qnil;
411
425
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
412
426
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
427
+ ssize_t result;
428
+ int completed;
429
+
413
430
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
414
-
415
- ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
416
- int completed = context_store_release(&backend->store, ctx);
431
+
432
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
433
+ completed = context_store_release(&backend->store, ctx);
417
434
  if (!completed) {
418
435
  context_attach_buffers(ctx, 1, &str);
419
436
  RAISE_IF_EXCEPTION(resume_value);
@@ -460,10 +477,13 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
460
477
  VALUE resume_value = Qnil;
461
478
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
462
479
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
480
+ ssize_t result;
481
+ int completed;
482
+
463
483
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
464
-
465
- ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
466
- int completed = context_store_release(&backend->store, ctx);
484
+
485
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
486
+ completed = context_store_release(&backend->store, ctx);
467
487
  if (!completed) {
468
488
  context_attach_buffers(ctx, 1, &str);
469
489
  RAISE_IF_EXCEPTION(resume_value);
@@ -490,6 +510,9 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
490
510
  Backend_t *backend;
491
511
  rb_io_t *fptr;
492
512
  VALUE underlying_io;
513
+ char *buf = StringValuePtr(str);
514
+ long len = RSTRING_LEN(str);
515
+ long left = len;
493
516
 
494
517
  underlying_io = rb_ivar_get(io, ID_ivar_io);
495
518
  if (underlying_io != Qnil) io = underlying_io;
@@ -498,25 +521,24 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
498
521
  GetOpenFile(io, fptr);
499
522
  io_unset_nonblock(fptr, io);
500
523
 
501
- char *buf = StringValuePtr(str);
502
- long len = RSTRING_LEN(str);
503
- long left = len;
504
-
505
524
  while (left > 0) {
506
525
  VALUE resume_value = Qnil;
507
526
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
508
527
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
528
+ int result;
529
+ int completed;
530
+
509
531
  io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
510
-
511
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
512
- int completed = context_store_release(&backend->store, ctx);
532
+
533
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
534
+ completed = context_store_release(&backend->store, ctx);
513
535
  if (!completed) {
514
536
  context_attach_buffers(ctx, 1, &str);
515
537
  RAISE_IF_EXCEPTION(resume_value);
516
538
  return resume_value;
517
539
  }
518
540
  RB_GC_GUARD(resume_value);
519
-
541
+
520
542
  if (result < 0)
521
543
  rb_syserr_fail(-result, strerror(-result));
522
544
  else {
@@ -558,10 +580,13 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
558
580
  VALUE resume_value = Qnil;
559
581
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
560
582
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
583
+ int result;
584
+ int completed;
585
+
561
586
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
562
-
563
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
564
- int completed = context_store_release(&backend->store, ctx);
587
+
588
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
589
+ completed = context_store_release(&backend->store, ctx);
565
590
  if (!completed) {
566
591
  free(iov);
567
592
  context_attach_buffers(ctx, argc, argv);
@@ -612,15 +637,18 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
612
637
  long dynamic_len = length == Qnil;
613
638
  long len = dynamic_len ? 4096 : NUM2INT(length);
614
639
  long buf_pos = NUM2INT(pos);
640
+ int shrinkable;
641
+ char *buf;
642
+ long total = 0;
643
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);;
644
+
615
645
  if (str != Qnil) {
616
646
  int current_len = RSTRING_LEN(str);
617
647
  if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
618
648
  }
619
649
  else buf_pos = 0;
620
- int shrinkable = io_setstrbuf(&str, buf_pos + len);
621
- char *buf = RSTRING_PTR(str) + buf_pos;
622
- long total = 0;
623
- VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
650
+ shrinkable = io_setstrbuf(&str, buf_pos + len);
651
+ buf = RSTRING_PTR(str) + buf_pos;
624
652
 
625
653
  GetBackend(self, backend);
626
654
  if (underlying_io != Qnil) io = underlying_io;
@@ -628,16 +656,18 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
628
656
  rb_io_check_byte_readable(fptr);
629
657
  io_unset_nonblock(fptr, io);
630
658
  rectify_io_file_pos(fptr);
631
- OBJ_TAINT(str);
632
659
 
633
660
  while (1) {
634
661
  VALUE resume_value = Qnil;
635
662
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
636
663
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
664
+ int result;
665
+ int completed;
666
+
637
667
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
638
-
639
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
640
- int completed = context_store_release(&backend->store, ctx);
668
+
669
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
670
+ completed = context_store_release(&backend->store, ctx);
641
671
  if (!completed) {
642
672
  context_attach_buffers(ctx, 1, &str);
643
673
  RAISE_IF_EXCEPTION(resume_value);
@@ -684,10 +714,13 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
684
714
  VALUE resume_value = Qnil;
685
715
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
686
716
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
717
+ int result;
718
+ int completed;
719
+
687
720
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
688
-
689
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
690
- int completed = context_store_release(&backend->store, ctx);
721
+
722
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
723
+ completed = context_store_release(&backend->store, ctx);
691
724
  if (!completed) {
692
725
  context_attach_buffers(ctx, 1, &str);
693
726
  RAISE_IF_EXCEPTION(resume_value);
@@ -733,10 +766,13 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
733
766
  VALUE resume_value = Qnil;
734
767
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
735
768
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
769
+ int result;
770
+ int completed;
771
+
736
772
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
737
-
738
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
739
- int completed = context_store_release(&backend->store, ctx);
773
+
774
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
775
+ completed = context_store_release(&backend->store, ctx);
740
776
  if (!completed) {
741
777
  context_attach_buffers(ctx, 1, &str);
742
778
  RAISE_IF_EXCEPTION(resume_value);
@@ -762,6 +798,10 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
762
798
  Backend_t *backend;
763
799
  rb_io_t *fptr;
764
800
  VALUE underlying_io;
801
+ char *buf;
802
+ long len;
803
+ long left;
804
+ int flags_int;
765
805
 
766
806
  underlying_io = rb_ivar_get(io, ID_ivar_io);
767
807
  if (underlying_io != Qnil) io = underlying_io;
@@ -770,19 +810,22 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
770
810
  GetOpenFile(io, fptr);
771
811
  io_unset_nonblock(fptr, io);
772
812
 
773
- char *buf = StringValuePtr(str);
774
- long len = RSTRING_LEN(str);
775
- long left = len;
776
- int flags_int = NUM2INT(flags);
813
+ buf = StringValuePtr(str);
814
+ len = RSTRING_LEN(str);
815
+ left = len;
816
+ flags_int = NUM2INT(flags);
777
817
 
778
818
  while (left > 0) {
779
819
  VALUE resume_value = Qnil;
780
820
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
781
821
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
822
+ int result;
823
+ int completed;
824
+
782
825
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
783
-
784
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
785
- int completed = context_store_release(&backend->store, ctx);
826
+
827
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
828
+ completed = context_store_release(&backend->store, ctx);
786
829
  if (!completed) {
787
830
  context_attach_buffers(ctx, 1, &str);
788
831
  RAISE_IF_EXCEPTION(resume_value);
@@ -816,10 +859,13 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
816
859
  VALUE resume_value = Qnil;
817
860
  op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
818
861
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
862
+ int fd;
863
+ int completed;
864
+
819
865
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
820
-
821
- int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
822
- int completed = context_store_release(&backend->store, ctx);
866
+
867
+ fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
868
+ completed = context_store_release(&backend->store, ctx);
823
869
  RAISE_IF_EXCEPTION(resume_value);
824
870
  if (!completed) return resume_value;
825
871
  RB_GC_GUARD(resume_value);
@@ -838,7 +884,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
838
884
  rb_io_synchronized(fp);
839
885
 
840
886
  // if (rsock_do_not_reverse_lookup) {
841
- // fp->mode |= FMODE_NOREVLOOKUP;
887
+ // fp->mode |= FMODE_NOREVLOOKUP;
842
888
  // }
843
889
  if (loop) {
844
890
  rb_yield(socket);
@@ -870,6 +916,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
870
916
  rb_io_t *dest_fptr;
871
917
  VALUE underlying_io;
872
918
  int total = 0;
919
+ VALUE resume_value = Qnil;
873
920
 
874
921
  underlying_io = rb_ivar_get(src, ID_ivar_io);
875
922
  if (underlying_io != Qnil) src = underlying_io;
@@ -882,15 +929,16 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
882
929
  GetOpenFile(dest, dest_fptr);
883
930
  io_unset_nonblock(dest_fptr, dest);
884
931
 
885
- VALUE resume_value = Qnil;
886
-
887
932
  while (1) {
888
933
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
889
934
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
935
+ int result;
936
+ int completed;
937
+
890
938
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
891
-
892
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
893
- int completed = context_store_release(&backend->store, ctx);
939
+
940
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
941
+ completed = context_store_release(&backend->store, ctx);
894
942
  RAISE_IF_EXCEPTION(resume_value);
895
943
  if (!completed) return resume_value;
896
944
 
@@ -918,33 +966,35 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE chunksize)
918
966
  return io_uring_backend_splice(backend, src, dest, chunksize, 1);
919
967
  }
920
968
 
921
-
922
969
  VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
923
970
  Backend_t *backend;
924
971
  rb_io_t *fptr;
925
- struct sockaddr_in addr;
926
- char *host_buf = StringValueCStr(host);
972
+ struct sockaddr *ai_addr;
973
+ int ai_addrlen;
927
974
  VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
975
+ VALUE resume_value = Qnil;
976
+ op_context_t *ctx;
977
+ struct io_uring_sqe *sqe;
978
+ int result;
979
+ int completed;
980
+
981
+ ai_addrlen = backend_getaddrinfo(host, port, &ai_addr);
982
+
928
983
  if (underlying_sock != Qnil) sock = underlying_sock;
929
984
 
930
985
  GetBackend(self, backend);
931
986
  GetOpenFile(sock, fptr);
932
987
  io_unset_nonblock(fptr, sock);
933
988
 
934
- addr.sin_family = AF_INET;
935
- addr.sin_addr.s_addr = inet_addr(host_buf);
936
- addr.sin_port = htons(NUM2INT(port));
937
-
938
- VALUE resume_value = Qnil;
939
- op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
940
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
941
- io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
942
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
943
- int completed = context_store_release(&backend->store, ctx);
989
+ ctx = context_store_acquire(&backend->store, OP_CONNECT);
990
+ sqe = io_uring_get_sqe(&backend->ring);
991
+ io_uring_prep_connect(sqe, fptr->fd, ai_addr, ai_addrlen);
992
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
993
+ completed = context_store_release(&backend->store, ctx);
944
994
  RAISE_IF_EXCEPTION(resume_value);
945
995
  if (!completed) return resume_value;
946
996
  RB_GC_GUARD(resume_value);
947
-
997
+
948
998
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
949
999
  return sock;
950
1000
  }
@@ -953,23 +1003,60 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
953
1003
  Backend_t *backend;
954
1004
  rb_io_t *fptr;
955
1005
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
1006
+ VALUE resume_value;
1007
+
956
1008
  if (underlying_io != Qnil) io = underlying_io;
957
1009
  GetBackend(self, backend);
958
1010
  GetOpenFile(io, fptr);
959
1011
  io_unset_nonblock(fptr, io);
960
1012
 
961
- VALUE resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
1013
+ resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
1014
+
962
1015
  RAISE_IF_EXCEPTION(resume_value);
963
1016
  RB_GC_GUARD(resume_value);
964
1017
  return self;
965
1018
  }
966
1019
 
1020
+ VALUE Backend_close(VALUE self, VALUE io) {
1021
+ Backend_t *backend;
1022
+ rb_io_t *fptr;
1023
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
1024
+ VALUE resume_value = Qnil;
1025
+ op_context_t *ctx;
1026
+ struct io_uring_sqe *sqe;
1027
+ int result;
1028
+ int completed;
1029
+
1030
+ if (underlying_io != Qnil) io = underlying_io;
1031
+ GetBackend(self, backend);
1032
+ GetOpenFile(io, fptr);
1033
+
1034
+ if (fptr->fd < 0) return Qnil;
1035
+
1036
+ io_unset_nonblock(fptr, io);
1037
+
1038
+ ctx = context_store_acquire(&backend->store, OP_CLOSE);
1039
+ sqe = io_uring_get_sqe(&backend->ring);
1040
+ io_uring_prep_close(sqe, fptr->fd);
1041
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1042
+ completed = context_store_release(&backend->store, ctx);
1043
+ RAISE_IF_EXCEPTION(resume_value);
1044
+ if (!completed) return resume_value;
1045
+ RB_GC_GUARD(resume_value);
1046
+
1047
+ if (result < 0) rb_syserr_fail(-result, strerror(-result));
1048
+
1049
+ fptr_finalize(fptr);
1050
+ // fptr->fd = -1;
1051
+ return io;
1052
+ }
1053
+
967
1054
  inline struct __kernel_timespec double_to_timespec(double duration) {
968
1055
  double duration_integral;
969
1056
  double duration_fraction = modf(duration, &duration_integral);
970
1057
  struct __kernel_timespec ts;
971
1058
  ts.tv_sec = duration_integral;
972
- ts.tv_nsec = floor(duration_fraction * 1000000000);
1059
+ ts.tv_nsec = floor(duration_fraction * 1000000000);
973
1060
  return ts;
974
1061
  }
975
1062
 
@@ -981,18 +1068,18 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
981
1068
  int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
982
1069
  struct __kernel_timespec ts = double_to_timespec(duration);
983
1070
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
984
-
985
1071
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1072
+
986
1073
  io_uring_prep_timeout(sqe, &ts, 0, 0);
987
1074
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
988
1075
  return context_store_release(&backend->store, ctx);
989
1076
  }
990
1077
 
991
1078
  VALUE Backend_sleep(VALUE self, VALUE duration) {
1079
+ VALUE resume_value = Qnil;
992
1080
  Backend_t *backend;
993
1081
  GetBackend(self, backend);
994
1082
 
995
- VALUE resume_value = Qnil;
996
1083
  io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
997
1084
  RAISE_IF_EXCEPTION(resume_value);
998
1085
  RB_GC_GUARD(resume_value);
@@ -1001,29 +1088,34 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1001
1088
 
1002
1089
  VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1003
1090
  Backend_t *backend;
1004
- double interval_d = NUM2DBL(interval);
1091
+ uint64_t interval_ns = NUM2DBL(interval) * 1e9;
1092
+ uint64_t next_time_ns = 0;
1093
+ VALUE resume_value = Qnil;
1094
+
1005
1095
  GetBackend(self, backend);
1006
- double next_time = 0.;
1007
1096
 
1008
1097
  while (1) {
1009
- double now = current_time();
1010
- if (next_time == 0.) next_time = current_time() + interval_d;
1011
- double sleep_duration = next_time - now;
1012
- if (sleep_duration < 0) sleep_duration = 0;
1013
-
1014
- VALUE resume_value = Qnil;
1015
- int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1016
- RAISE_IF_EXCEPTION(resume_value);
1017
- if (!completed) return resume_value;
1018
- RB_GC_GUARD(resume_value);
1098
+ double now_ns = current_time_ns();
1099
+ if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1100
+ if (next_time_ns > now_ns) {
1101
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1102
+ int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1103
+ RAISE_IF_EXCEPTION(resume_value);
1104
+ if (!completed) return resume_value;
1105
+ }
1106
+ else {
1107
+ resume_value = backend_snooze();
1108
+ RAISE_IF_EXCEPTION(resume_value);
1109
+ }
1019
1110
 
1020
1111
  rb_yield(Qnil);
1021
1112
 
1022
1113
  while (1) {
1023
- next_time += interval_d;
1024
- if (next_time > now) break;
1114
+ next_time_ns += interval_ns;
1115
+ if (next_time_ns > now_ns) break;
1025
1116
  }
1026
1117
  }
1118
+ RB_GC_GUARD(resume_value);
1027
1119
  }
1028
1120
 
1029
1121
  struct Backend_timeout_ctx {
@@ -1032,12 +1124,13 @@ struct Backend_timeout_ctx {
1032
1124
  };
1033
1125
 
1034
1126
  VALUE Backend_timeout_ensure(VALUE arg) {
1035
- struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
1036
- if (timeout_ctx->ctx->ref_count) {
1037
- timeout_ctx->ctx->result = -ECANCELED;
1127
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
1128
+ if (timeout_ctx->ctx->ref_count) {
1129
+ struct io_uring_sqe *sqe;
1038
1130
 
1131
+ timeout_ctx->ctx->result = -ECANCELED;
1039
1132
  // op was not completed, so we need to cancel it
1040
- struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
1133
+ sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
1041
1134
  io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
1042
1135
  timeout_ctx->backend->pending_sqes = 0;
1043
1136
  io_uring_submit(&timeout_ctx->backend->ring);
@@ -1050,24 +1143,30 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1050
1143
  VALUE duration;
1051
1144
  VALUE exception;
1052
1145
  VALUE move_on_value = Qnil;
1053
- rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1054
-
1055
- struct __kernel_timespec ts = duration_to_timespec(duration);
1146
+ struct Backend_timeout_ctx timeout_ctx;
1147
+ op_context_t *ctx;
1148
+ struct io_uring_sqe *sqe;
1056
1149
  Backend_t *backend;
1057
- GetBackend(self, backend);
1150
+ struct __kernel_timespec ts;
1058
1151
  VALUE result = Qnil;
1059
- VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
1152
+ VALUE timeout;
1060
1153
 
1061
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1062
-
1063
- op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1154
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1155
+
1156
+ ts = duration_to_timespec(duration);
1157
+ GetBackend(self, backend);
1158
+ timeout = rb_funcall(cTimeoutException, ID_new, 0);
1159
+
1160
+ sqe = io_uring_get_sqe(&backend->ring);
1161
+ ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1064
1162
  ctx->resume_value = timeout;
1065
1163
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1066
1164
  io_uring_sqe_set_data(sqe, ctx);
1067
1165
  io_uring_backend_defer_submit(backend);
1068
1166
  backend->base.op_count++;
1069
1167
 
1070
- struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
1168
+ timeout_ctx.backend = backend;
1169
+ timeout_ctx.ctx = ctx;
1071
1170
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
1072
1171
 
1073
1172
  if (result == timeout) {
@@ -1084,19 +1183,21 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1084
1183
  VALUE Backend_waitpid(VALUE self, VALUE pid) {
1085
1184
  int pid_int = NUM2INT(pid);
1086
1185
  int fd = pidfd_open(pid_int, 0);
1186
+ int status;
1187
+ pid_t ret;
1087
1188
 
1088
1189
  if (fd >= 0) {
1190
+ VALUE resume_value;
1089
1191
  Backend_t *backend;
1090
1192
  GetBackend(self, backend);
1091
1193
 
1092
- VALUE resume_value = io_uring_backend_wait_fd(backend, fd, 0);
1194
+ resume_value = io_uring_backend_wait_fd(backend, fd, 0);
1093
1195
  close(fd);
1094
1196
  RAISE_IF_EXCEPTION(resume_value);
1095
1197
  RB_GC_GUARD(resume_value);
1096
1198
  }
1097
-
1098
- int status;
1099
- pid_t ret = waitpid(pid_int, &status, WNOHANG);
1199
+
1200
+ ret = waitpid(pid_int, &status, WNOHANG);
1100
1201
  if (ret < 0) {
1101
1202
  int e = errno;
1102
1203
  if (e == ECHILD)
@@ -1109,6 +1210,8 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1109
1210
 
1110
1211
  VALUE Backend_wait_event(VALUE self, VALUE raise) {
1111
1212
  Backend_t *backend;
1213
+ VALUE resume_value;
1214
+
1112
1215
  GetBackend(self, backend);
1113
1216
 
1114
1217
  if (backend->event_fd == -1) {
@@ -1119,7 +1222,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1119
1222
  }
1120
1223
  }
1121
1224
 
1122
- VALUE resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
1225
+ resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
1123
1226
  if (RTEST(raise)) RAISE_IF_EXCEPTION(resume_value);
1124
1227
  RB_GC_GUARD(resume_value);
1125
1228
  return resume_value;
@@ -1132,6 +1235,7 @@ VALUE Backend_kind(VALUE self) {
1132
1235
  struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1133
1236
  rb_io_t *fptr;
1134
1237
  VALUE underlying_io;
1238
+ struct io_uring_sqe *sqe;
1135
1239
 
1136
1240
  underlying_io = rb_ivar_get(io, ID_ivar_io);
1137
1241
  if (underlying_io != Qnil) io = underlying_io;
@@ -1139,17 +1243,15 @@ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, V
1139
1243
  GetOpenFile(io, fptr);
1140
1244
  io_unset_nonblock(fptr, io);
1141
1245
 
1142
- char *buf = StringValuePtr(str);
1143
- long len = RSTRING_LEN(str);
1144
-
1145
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1146
- io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
1246
+ sqe = io_uring_get_sqe(&backend->ring);
1247
+ io_uring_prep_write(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), 0);
1147
1248
  return sqe;
1148
1249
  }
1149
1250
 
1150
1251
  struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1151
1252
  rb_io_t *fptr;
1152
1253
  VALUE underlying_io;
1254
+ struct io_uring_sqe *sqe;
1153
1255
 
1154
1256
  underlying_io = rb_ivar_get(io, ID_ivar_io);
1155
1257
  if (underlying_io != Qnil) io = underlying_io;
@@ -1157,12 +1259,8 @@ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VA
1157
1259
  GetOpenFile(io, fptr);
1158
1260
  io_unset_nonblock(fptr, io);
1159
1261
 
1160
- char *buf = StringValuePtr(str);
1161
- long len = RSTRING_LEN(str);
1162
- int flags_int = NUM2INT(flags);
1163
-
1164
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1165
- io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1262
+ sqe = io_uring_get_sqe(&backend->ring);
1263
+ io_uring_prep_send(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), NUM2INT(flags));
1166
1264
  return sqe;
1167
1265
  }
1168
1266
 
@@ -1170,6 +1268,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
1170
1268
  rb_io_t *src_fptr;
1171
1269
  rb_io_t *dest_fptr;
1172
1270
  VALUE underlying_io;
1271
+ struct io_uring_sqe *sqe;
1173
1272
 
1174
1273
  underlying_io = rb_ivar_get(src, ID_ivar_io);
1175
1274
  if (underlying_io != Qnil) src = underlying_io;
@@ -1182,7 +1281,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
1182
1281
  GetOpenFile(dest, dest_fptr);
1183
1282
  io_unset_nonblock(dest_fptr, dest);
1184
1283
 
1185
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1284
+ sqe = io_uring_get_sqe(&backend->ring);
1186
1285
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1187
1286
  return sqe;
1188
1287
  }
@@ -1210,14 +1309,19 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1210
1309
  unsigned int sqe_count = 0;
1211
1310
  struct io_uring_sqe *last_sqe = 0;
1212
1311
  Backend_t *backend;
1312
+ int result;
1313
+ int completed;
1314
+ op_context_t *ctx;
1315
+
1213
1316
  GetBackend(self, backend);
1214
1317
  if (argc == 0) return resume_value;
1215
1318
 
1216
- op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1319
+ ctx = context_store_acquire(&backend->store, OP_CHAIN);
1217
1320
  for (int i = 0; i < argc; i++) {
1218
1321
  VALUE op = argv[i];
1219
1322
  VALUE op_type = RARRAY_AREF(op, 0);
1220
1323
  VALUE op_len = RARRAY_LEN(op);
1324
+ unsigned int flags;
1221
1325
 
1222
1326
  if (op_type == SYM_write && op_len == 3) {
1223
1327
  last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
@@ -1227,13 +1331,16 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1227
1331
  else if (op_type == SYM_splice && op_len == 4)
1228
1332
  last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1229
1333
  else {
1334
+
1230
1335
  if (sqe_count) {
1336
+ struct io_uring_sqe *sqe;
1337
+
1231
1338
  io_uring_sqe_set_data(last_sqe, ctx);
1232
1339
  io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1233
1340
 
1234
1341
  ctx->ref_count = sqe_count;
1235
1342
  ctx->result = -ECANCELED;
1236
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1343
+ sqe = io_uring_get_sqe(&backend->ring);
1237
1344
  io_uring_prep_cancel(sqe, ctx, 0);
1238
1345
  backend->pending_sqes = 0;
1239
1346
  io_uring_submit(&backend->ring);
@@ -1244,9 +1351,9 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1244
1351
  }
1245
1352
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1246
1353
  }
1247
-
1354
+
1248
1355
  io_uring_sqe_set_data(last_sqe, ctx);
1249
- unsigned int flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1356
+ flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1250
1357
  io_uring_sqe_set_flags(last_sqe, flags);
1251
1358
  sqe_count++;
1252
1359
  }
@@ -1255,21 +1362,23 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1255
1362
  ctx->ref_count = sqe_count + 1;
1256
1363
  io_uring_backend_defer_submit(backend);
1257
1364
  resume_value = backend_await((struct Backend_base *)backend);
1258
- int result = ctx->result;
1259
- int completed = context_store_release(&backend->store, ctx);
1365
+ result = ctx->result;
1366
+ completed = context_store_release(&backend->store, ctx);
1260
1367
  if (!completed) {
1261
- Backend_chain_ctx_attach_buffers(ctx, argc, argv);
1368
+ struct io_uring_sqe *sqe;
1262
1369
 
1370
+ Backend_chain_ctx_attach_buffers(ctx, argc, argv);
1371
+
1263
1372
  // op was not completed (an exception was raised), so we need to cancel it
1264
1373
  ctx->result = -ECANCELED;
1265
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1374
+ sqe = io_uring_get_sqe(&backend->ring);
1266
1375
  io_uring_prep_cancel(sqe, ctx, 0);
1267
1376
  backend->pending_sqes = 0;
1268
1377
  io_uring_submit(&backend->ring);
1269
1378
  RAISE_IF_EXCEPTION(resume_value);
1270
1379
  return resume_value;
1271
1380
  }
1272
-
1381
+
1273
1382
  RB_GC_GUARD(resume_value);
1274
1383
  return INT2NUM(result);
1275
1384
  }
@@ -1326,8 +1435,10 @@ static inline void splice_chunks_get_sqe(
1326
1435
  }
1327
1436
 
1328
1437
  static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1438
+ struct io_uring_sqe *sqe;
1439
+
1329
1440
  ctx->result = -ECANCELED;
1330
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1441
+ sqe = io_uring_get_sqe(&backend->ring);
1331
1442
  io_uring_prep_cancel(sqe, ctx, 0);
1332
1443
  backend->pending_sqes = 0;
1333
1444
  io_uring_submit(&backend->ring);
@@ -1340,9 +1451,11 @@ static inline int splice_chunks_await_ops(
1340
1451
  VALUE *switchpoint_result
1341
1452
  )
1342
1453
  {
1454
+ int completed;
1343
1455
  int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
1456
+
1344
1457
  if (result) (*result) = res;
1345
- int completed = context_store_release(&backend->store, *ctx);
1458
+ completed = context_store_release(&backend->store, *ctx);
1346
1459
  if (!completed) {
1347
1460
  splice_chunks_cancel(backend, *ctx);
1348
1461
  if (TEST_EXCEPTION(*switchpoint_result)) return 1;
@@ -1356,17 +1469,22 @@ static inline int splice_chunks_await_ops(
1356
1469
 
1357
1470
  VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1358
1471
  Backend_t *backend;
1359
- GetBackend(self, backend);
1360
1472
  int total = 0;
1361
1473
  int err = 0;
1362
1474
  VALUE switchpoint_result = Qnil;
1363
1475
  op_context_t *ctx = 0;
1364
1476
  struct io_uring_sqe *sqe = 0;
1365
-
1477
+ int maxlen;
1478
+ VALUE underlying_io;
1479
+ VALUE str = Qnil;
1480
+ VALUE chunk_len_value = Qnil;
1366
1481
  rb_io_t *src_fptr;
1367
1482
  rb_io_t *dest_fptr;
1483
+ int pipefd[2] = { -1, -1 };
1368
1484
 
1369
- VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
1485
+ GetBackend(self, backend);
1486
+
1487
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1370
1488
  if (underlying_io != Qnil) src = underlying_io;
1371
1489
  GetOpenFile(src, src_fptr);
1372
1490
  io_verify_blocking_mode(src_fptr, src, Qtrue);
@@ -1377,11 +1495,8 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1377
1495
  GetOpenFile(dest, dest_fptr);
1378
1496
  io_verify_blocking_mode(dest_fptr, dest, Qtrue);
1379
1497
 
1380
- int maxlen = NUM2INT(chunk_size);
1381
- VALUE str = Qnil;
1382
- VALUE chunk_len_value = Qnil;
1498
+ maxlen = NUM2INT(chunk_size);
1383
1499
 
1384
- int pipefd[2] = { -1, -1 };
1385
1500
  if (pipe(pipefd) == -1) {
1386
1501
  err = errno;
1387
1502
  goto syscallerror;
@@ -1404,7 +1519,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1404
1519
 
1405
1520
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1406
1521
  if (chunk_len == 0) break;
1407
-
1522
+
1408
1523
  total += chunk_len;
1409
1524
  chunk_len_value = INT2NUM(chunk_len);
1410
1525
 
@@ -1525,6 +1640,7 @@ void Init_Backend() {
1525
1640
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
1526
1641
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
1527
1642
  rb_define_method(cBackend, "write", Backend_write_m, -1);
1643
+ rb_define_method(cBackend, "close", Backend_close, 1);
1528
1644
 
1529
1645
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1530
1646
  SYM_send = ID2SYM(rb_intern("send"));