polyphony 0.87 → 0.91

Sign up to get free protection for your applications and to get access to all the features.
@@ -262,6 +262,21 @@ inline struct backend_stats backend_get_stats(VALUE self) {
262
262
  return backend_base_stats(&backend->base);
263
263
  }
264
264
 
265
+ static inline struct io_uring_sqe *io_uring_backend_get_sqe(Backend_t *backend) {
266
+ struct io_uring_sqe *sqe;
267
+ sqe = io_uring_get_sqe(&backend->ring);
268
+ if (sqe) goto done;
269
+
270
+ if (backend->pending_sqes)
271
+ io_uring_backend_immediate_submit(backend);
272
+ else {
273
+ VALUE resume_value = backend_snooze(&backend->base);
274
+ RAISE_IF_EXCEPTION(resume_value);
275
+ }
276
+ done:
277
+ return sqe;
278
+ }
279
+
265
280
  VALUE Backend_wakeup(VALUE self) {
266
281
  Backend_t *backend;
267
282
  GetBackend(self, backend);
@@ -269,7 +284,7 @@ VALUE Backend_wakeup(VALUE self) {
269
284
  if (backend->base.currently_polling) {
270
285
  // Since we're currently blocking while waiting for a completion, we add a
271
286
  // NOP which would cause the io_uring_enter syscall to return
272
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
287
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
273
288
  io_uring_prep_nop(sqe);
274
289
  io_uring_backend_immediate_submit(backend);
275
290
 
@@ -302,7 +317,7 @@ int io_uring_backend_defer_submit_and_await(
302
317
 
303
318
  // op was not completed (an exception was raised), so we need to cancel it
304
319
  ctx->result = -ECANCELED;
305
- sqe = io_uring_get_sqe(&backend->ring);
320
+ sqe = io_uring_backend_get_sqe(backend);
306
321
  io_uring_prep_cancel(sqe, (__u64)ctx, 0);
307
322
  io_uring_backend_immediate_submit(backend);
308
323
  }
@@ -317,7 +332,7 @@ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
317
332
  op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
318
333
  VALUE resumed_value = Qnil;
319
334
 
320
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
335
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
321
336
  io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
322
337
 
323
338
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
@@ -349,7 +364,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
349
364
  int fd;
350
365
  rb_io_t *fptr;
351
366
  struct io_buffer buffer = get_io_buffer(str);
352
- long buf_pos = NUM2INT(pos);
367
+ long buf_pos = FIX2INT(pos);
353
368
  int shrinkable_string = 0;
354
369
  int expandable_buffer = 0;
355
370
  long total = 0;
@@ -368,7 +383,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
368
383
 
369
384
  if (string_cap < expected_read_length + buf_pos) {
370
385
  shrinkable_string = io_setstrbuf(&str, expected_read_length + buf_pos);
371
- buffer.ptr = RSTRING_PTR(str) + buf_pos;
386
+ buffer.ptr = (unsigned char *)RSTRING_PTR(str) + buf_pos;
372
387
  buffer.len = expected_read_length;
373
388
  }
374
389
  else {
@@ -385,7 +400,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
385
400
  while (1) {
386
401
  VALUE resume_value = Qnil;
387
402
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
388
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
403
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
389
404
  int result;
390
405
  int completed;
391
406
 
@@ -415,7 +430,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
415
430
  rb_str_resize(str, total + buf_pos);
416
431
  rb_str_modify_expand(str, rb_str_capacity(str));
417
432
  shrinkable_string = 0;
418
- buffer.ptr = RSTRING_PTR(str) + total + buf_pos;
433
+ buffer.ptr = (unsigned char *)RSTRING_PTR(str) + total + buf_pos;
419
434
  buffer.len = rb_str_capacity(str) - total - buf_pos;
420
435
  }
421
436
  else {
@@ -441,7 +456,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
441
456
  rb_io_t *fptr;
442
457
  VALUE str;
443
458
  long total;
444
- long len = NUM2INT(maxlen);
459
+ long len = FIX2INT(maxlen);
445
460
  int shrinkable;
446
461
  char *buf;
447
462
 
@@ -453,7 +468,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
453
468
  while (1) {
454
469
  VALUE resume_value = Qnil;
455
470
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
456
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
471
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
457
472
  ssize_t result;
458
473
  int completed;
459
474
 
@@ -502,7 +517,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
502
517
  while (1) {
503
518
  VALUE resume_value = Qnil;
504
519
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
505
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
520
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
506
521
  ssize_t result;
507
522
  int completed;
508
523
 
@@ -546,7 +561,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
546
561
  while (left > 0) {
547
562
  VALUE resume_value = Qnil;
548
563
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
549
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
564
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
550
565
  int result;
551
566
  int completed;
552
567
 
@@ -569,7 +584,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
569
584
  }
570
585
  }
571
586
 
572
- return INT2NUM(buffer.len);
587
+ return INT2FIX(buffer.len);
573
588
  }
574
589
 
575
590
  VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
@@ -597,7 +612,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
597
612
  while (1) {
598
613
  VALUE resume_value = Qnil;
599
614
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
600
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
615
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
601
616
  int result;
602
617
  int completed;
603
618
 
@@ -637,7 +652,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
637
652
  }
638
653
 
639
654
  free(iov);
640
- return INT2NUM(total_written);
655
+ return INT2FIX(total_written);
641
656
  }
642
657
 
643
658
  VALUE Backend_write_m(int argc, VALUE *argv, VALUE self) {
@@ -654,7 +669,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
654
669
  int fd;
655
670
  rb_io_t *fptr;
656
671
  struct io_buffer buffer = get_io_buffer(str);
657
- long buf_pos = NUM2INT(pos);
672
+ long buf_pos = FIX2INT(pos);
658
673
  int shrinkable_string = 0;
659
674
  int expandable_buffer = 0;
660
675
  long total = 0;
@@ -672,7 +687,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
672
687
 
673
688
  if (string_cap < expected_read_length + buf_pos) {
674
689
  shrinkable_string = io_setstrbuf(&str, expected_read_length + buf_pos);
675
- buffer.ptr = RSTRING_PTR(str) + buf_pos;
690
+ buffer.ptr = (unsigned char *)RSTRING_PTR(str) + buf_pos;
676
691
  buffer.len = expected_read_length;
677
692
  }
678
693
  else {
@@ -689,7 +704,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
689
704
  while (1) {
690
705
  VALUE resume_value = Qnil;
691
706
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
692
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
707
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
693
708
  int result;
694
709
  int completed;
695
710
 
@@ -727,7 +742,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
727
742
  rb_io_t *fptr;
728
743
  VALUE str;
729
744
  long total;
730
- long len = NUM2INT(maxlen);
745
+ long len = FIX2INT(maxlen);
731
746
  int shrinkable;
732
747
  char *buf;
733
748
 
@@ -739,7 +754,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
739
754
  while (1) {
740
755
  VALUE resume_value = Qnil;
741
756
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
742
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
757
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
743
758
  int result;
744
759
  int completed;
745
760
 
@@ -787,7 +802,7 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
787
802
  while (1) {
788
803
  VALUE resume_value = Qnil;
789
804
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
790
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
805
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
791
806
  int result;
792
807
  int completed;
793
808
 
@@ -823,7 +838,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
823
838
 
824
839
  struct io_buffer buffer = get_io_buffer(str);
825
840
  long left = buffer.len;
826
- int flags_int = NUM2INT(flags);
841
+ int flags_int = FIX2INT(flags);
827
842
 
828
843
  GetBackend(self, backend);
829
844
  fd = fd_from_io(io, &fptr, 1, 0);
@@ -831,7 +846,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
831
846
  while (left > 0) {
832
847
  VALUE resume_value = Qnil;
833
848
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
834
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
849
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
835
850
  int result;
836
851
  int completed;
837
852
 
@@ -854,7 +869,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
854
869
  }
855
870
  }
856
871
 
857
- return INT2NUM(buffer.len);
872
+ return INT2FIX(buffer.len);
858
873
  }
859
874
 
860
875
  VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE socket_class, int loop) {
@@ -869,7 +884,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
869
884
  while (1) {
870
885
  VALUE resume_value = Qnil;
871
886
  op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
872
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
887
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
873
888
  int fd;
874
889
  int completed;
875
890
 
@@ -935,11 +950,11 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
935
950
 
936
951
  while (1) {
937
952
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
938
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
953
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
939
954
  int result;
940
955
  int completed;
941
956
 
942
- io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, NUM2INT(maxlen), 0);
957
+ io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, FIX2INT(maxlen), 0);
943
958
 
944
959
  result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
945
960
  completed = context_store_release(&backend->store, ctx);
@@ -950,7 +965,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
950
965
  rb_syserr_fail(-result, strerror(-result));
951
966
 
952
967
  total += result;
953
- if (result == 0 || !loop) return INT2NUM(total);
968
+ if (result == 0 || !loop) return INT2FIX(total);
954
969
  }
955
970
 
956
971
  RB_GC_GUARD(resume_value);
@@ -969,6 +984,112 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE chunksize)
969
984
  return io_uring_backend_splice(backend, src, dest, chunksize, 1);
970
985
  }
971
986
 
987
+ struct double_splice_ctx {
988
+ Backend_t *backend;
989
+ VALUE src;
990
+ VALUE dest;
991
+ int pipefd[2];
992
+ };
993
+
994
+ #define DOUBLE_SPLICE_MAXLEN (1 << 16)
995
+
996
+ static inline op_context_t *prepare_double_splice_ctx(Backend_t *backend, int src_fd, int dest_fd) {
997
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
998
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
999
+ io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, DOUBLE_SPLICE_MAXLEN, 0);
1000
+ io_uring_sqe_set_data(sqe, ctx);
1001
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
1002
+ backend->base.op_count += 1;
1003
+ backend->pending_sqes += 1;
1004
+
1005
+ return ctx;
1006
+ }
1007
+
1008
+ static inline void io_uring_backend_cancel(Backend_t *backend, op_context_t *ctx) {
1009
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
1010
+ ctx->result = -ECANCELED;
1011
+ io_uring_prep_cancel(sqe, (__u64)ctx, 0);
1012
+ }
1013
+
1014
+ VALUE double_splice_to_eof_safe(struct double_splice_ctx *ctx) {
1015
+ int src_fd;
1016
+ int dest_fd;
1017
+ rb_io_t *src_fptr;
1018
+ rb_io_t *dest_fptr;
1019
+ int total = 0;
1020
+ VALUE resume_value = Qnil;
1021
+
1022
+ src_fd = fd_from_io(ctx->src, &src_fptr, 0, 0);
1023
+ dest_fd = fd_from_io(ctx->dest, &dest_fptr, 1, 0);
1024
+
1025
+ op_context_t *ctx_src = prepare_double_splice_ctx(ctx->backend, src_fd, ctx->pipefd[1]);
1026
+ op_context_t *ctx_dest = prepare_double_splice_ctx(ctx->backend, ctx->pipefd[0], dest_fd);
1027
+
1028
+ if (ctx->backend->pending_sqes >= ctx->backend->prepared_limit)
1029
+ io_uring_backend_immediate_submit(ctx->backend);
1030
+
1031
+ while (1) {
1032
+ resume_value = backend_await((struct Backend_base *)ctx->backend);
1033
+
1034
+ if ((ctx_src && ctx_src->ref_count == 2 && ctx_dest && ctx_dest->ref_count == 2) || TEST_EXCEPTION(resume_value)) {
1035
+ if (ctx_src) {
1036
+ context_store_release(&ctx->backend->store, ctx_src);
1037
+ io_uring_backend_cancel(ctx->backend, ctx_src);
1038
+ }
1039
+ if (ctx_dest) {
1040
+ context_store_release(&ctx->backend->store, ctx_dest);
1041
+ io_uring_backend_cancel(ctx->backend, ctx_dest);
1042
+ }
1043
+ io_uring_backend_immediate_submit(ctx->backend);
1044
+ RAISE_IF_EXCEPTION(resume_value);
1045
+ return resume_value;
1046
+ }
1047
+
1048
+ if (ctx_src && ctx_src->ref_count == 1) {
1049
+ context_store_release(&ctx->backend->store, ctx_src);
1050
+ if (ctx_src->result == 0) {
1051
+ // close write end of pipe
1052
+ close(ctx->pipefd[1]);
1053
+ ctx_src = NULL;
1054
+ }
1055
+ else {
1056
+ ctx_src = prepare_double_splice_ctx(ctx->backend, src_fd, ctx->pipefd[1]);
1057
+ }
1058
+ }
1059
+ if (ctx_dest && ctx_dest->ref_count == 1) {
1060
+ context_store_release(&ctx->backend->store, ctx_dest);
1061
+ if (ctx_dest->result == 0)
1062
+ break;
1063
+ else {
1064
+ total += ctx_dest->result;
1065
+ ctx_dest = prepare_double_splice_ctx(ctx->backend, ctx->pipefd[0], dest_fd);
1066
+ }
1067
+ }
1068
+
1069
+ if (ctx->backend->pending_sqes >= ctx->backend->prepared_limit)
1070
+ io_uring_backend_immediate_submit(ctx->backend);
1071
+ }
1072
+ RB_GC_GUARD(resume_value);
1073
+ return INT2FIX(total);
1074
+ }
1075
+
1076
+ VALUE double_splice_to_eof_cleanup(struct double_splice_ctx *ctx) {
1077
+ if (ctx->pipefd[0]) close(ctx->pipefd[0]);
1078
+ if (ctx->pipefd[1]) close(ctx->pipefd[1]);
1079
+ return Qnil;
1080
+ }
1081
+
1082
+ VALUE Backend_double_splice_to_eof(VALUE self, VALUE src, VALUE dest) {
1083
+ struct double_splice_ctx ctx = { NULL, src, dest, 0, 0 };
1084
+ GetBackend(self, ctx.backend);
1085
+ if (pipe(ctx.pipefd) == -1) rb_syserr_fail(errno, strerror(errno));
1086
+
1087
+ return rb_ensure(
1088
+ SAFE(double_splice_to_eof_safe), (VALUE)&ctx,
1089
+ SAFE(double_splice_to_eof_cleanup), (VALUE)&ctx
1090
+ );
1091
+ }
1092
+
972
1093
  VALUE Backend_tee(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
973
1094
  Backend_t *backend;
974
1095
  GetBackend(self, backend);
@@ -984,11 +1105,11 @@ VALUE Backend_tee(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
984
1105
 
985
1106
  while (1) {
986
1107
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
987
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1108
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
988
1109
  int result;
989
1110
  int completed;
990
1111
 
991
- io_uring_prep_tee(sqe, src_fd, dest_fd, NUM2INT(maxlen), 0);
1112
+ io_uring_prep_tee(sqe, src_fd, dest_fd, FIX2INT(maxlen), 0);
992
1113
 
993
1114
  result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
994
1115
  completed = context_store_release(&backend->store, ctx);
@@ -998,7 +1119,7 @@ VALUE Backend_tee(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
998
1119
  if (result < 0)
999
1120
  rb_syserr_fail(-result, strerror(-result));
1000
1121
 
1001
- return INT2NUM(result);
1122
+ return INT2FIX(result);
1002
1123
  }
1003
1124
 
1004
1125
  RB_GC_GUARD(resume_value);
@@ -1021,7 +1142,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
1021
1142
  GetBackend(self, backend);
1022
1143
  fd = fd_from_io(sock, &fptr, 1, 0);
1023
1144
  ctx = context_store_acquire(&backend->store, OP_CONNECT);
1024
- sqe = io_uring_get_sqe(&backend->ring);
1145
+ sqe = io_uring_backend_get_sqe(backend);
1025
1146
  io_uring_prep_connect(sqe, fd, ai_addr, ai_addrlen);
1026
1147
  result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1027
1148
  completed = context_store_release(&backend->store, ctx);
@@ -1063,7 +1184,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1063
1184
  // io_unset_nonblock(fptr, io);
1064
1185
 
1065
1186
  // ctx = context_store_acquire(&backend->store, OP_CLOSE);
1066
- // sqe = io_uring_get_sqe(&backend->ring);
1187
+ // sqe = io_uring_backend_get_sqe(backend);
1067
1188
  // io_uring_prep_close(sqe, fd);
1068
1189
  // result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1069
1190
  // completed = context_store_release(&backend->store, ctx);
@@ -1094,7 +1215,7 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
1094
1215
  // returns true if completed, 0 otherwise
1095
1216
  int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
1096
1217
  struct __kernel_timespec ts = double_to_timespec(duration);
1097
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1218
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
1098
1219
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1099
1220
 
1100
1221
  io_uring_prep_timeout(sqe, &ts, 0, 0);
@@ -1183,7 +1304,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1183
1304
  GetBackend(self, backend);
1184
1305
  timeout = rb_funcall(cTimeoutException, ID_new, 0);
1185
1306
 
1186
- sqe = io_uring_get_sqe(&backend->ring);
1307
+ sqe = io_uring_backend_get_sqe(backend);
1187
1308
  ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1188
1309
  ctx->resume_value = timeout;
1189
1310
  io_uring_prep_timeout(sqe, &ts, 0, 0);
@@ -1207,7 +1328,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1207
1328
  }
1208
1329
 
1209
1330
  VALUE Backend_waitpid(VALUE self, VALUE pid) {
1210
- int pid_int = NUM2INT(pid);
1331
+ int pid_int = FIX2INT(pid);
1211
1332
  int fd = pidfd_open(pid_int, 0);
1212
1333
  int status;
1213
1334
  pid_t ret;
@@ -1231,7 +1352,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1231
1352
  else
1232
1353
  rb_syserr_fail(e, strerror(e));
1233
1354
  }
1234
- return rb_ary_new_from_args(2, INT2NUM(ret), INT2NUM(WEXITSTATUS(status)));
1355
+ return rb_ary_new_from_args(2, INT2FIX(ret), INT2FIX(WEXITSTATUS(status)));
1235
1356
  }
1236
1357
 
1237
1358
  /*
@@ -1259,7 +1380,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1259
1380
  struct io_uring_sqe *sqe;
1260
1381
 
1261
1382
  backend->event_fd_ctx = context_store_acquire(&backend->store, OP_POLL);
1262
- sqe = io_uring_get_sqe(&backend->ring);
1383
+ sqe = io_uring_backend_get_sqe(backend);
1263
1384
  io_uring_prep_poll_add(sqe, backend->event_fd, POLLIN);
1264
1385
  backend->base.op_count++;
1265
1386
  io_uring_sqe_set_data(sqe, backend->event_fd_ctx);
@@ -1275,7 +1396,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1275
1396
 
1276
1397
  // last fiber to use the eventfd, so we cancel the ongoing poll
1277
1398
  struct io_uring_sqe *sqe;
1278
- sqe = io_uring_get_sqe(&backend->ring);
1399
+ sqe = io_uring_backend_get_sqe(backend);
1279
1400
  io_uring_prep_cancel(sqe, (__u64)backend->event_fd_ctx, 0);
1280
1401
  io_uring_backend_immediate_submit(backend);
1281
1402
  backend->event_fd_ctx = NULL;
@@ -1296,7 +1417,7 @@ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, V
1296
1417
  struct io_uring_sqe *sqe;
1297
1418
 
1298
1419
  fd = fd_from_io(io, &fptr, 1, 0);
1299
- sqe = io_uring_get_sqe(&backend->ring);
1420
+ sqe = io_uring_backend_get_sqe(backend);
1300
1421
  io_uring_prep_write(sqe, fd, StringValuePtr(str), RSTRING_LEN(str), 0);
1301
1422
  return sqe;
1302
1423
  }
@@ -1308,8 +1429,8 @@ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VA
1308
1429
 
1309
1430
  fd = fd_from_io(io, &fptr, 1, 0);
1310
1431
 
1311
- sqe = io_uring_get_sqe(&backend->ring);
1312
- io_uring_prep_send(sqe, fd, StringValuePtr(str), RSTRING_LEN(str), NUM2INT(flags));
1432
+ sqe = io_uring_backend_get_sqe(backend);
1433
+ io_uring_prep_send(sqe, fd, StringValuePtr(str), RSTRING_LEN(str), FIX2INT(flags));
1313
1434
  return sqe;
1314
1435
  }
1315
1436
 
@@ -1322,8 +1443,8 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
1322
1443
 
1323
1444
  src_fd = fd_from_io(src, &src_fptr, 0, 0);
1324
1445
  dest_fd = fd_from_io(dest, &dest_fptr, 1, 0);
1325
- sqe = io_uring_get_sqe(&backend->ring);
1326
- io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, NUM2INT(maxlen), 0);
1446
+ sqe = io_uring_backend_get_sqe(backend);
1447
+ io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, FIX2INT(maxlen), 0);
1327
1448
  return sqe;
1328
1449
  }
1329
1450
 
@@ -1381,7 +1502,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1381
1502
 
1382
1503
  ctx->ref_count = sqe_count;
1383
1504
  ctx->result = -ECANCELED;
1384
- sqe = io_uring_get_sqe(&backend->ring);
1505
+ sqe = io_uring_backend_get_sqe(backend);
1385
1506
  io_uring_prep_cancel(sqe, (__u64)ctx, 0);
1386
1507
  io_uring_backend_immediate_submit(backend);
1387
1508
  }
@@ -1411,7 +1532,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1411
1532
 
1412
1533
  // op was not completed (an exception was raised), so we need to cancel it
1413
1534
  ctx->result = -ECANCELED;
1414
- sqe = io_uring_get_sqe(&backend->ring);
1535
+ sqe = io_uring_backend_get_sqe(backend);
1415
1536
  io_uring_prep_cancel(sqe, (__u64)ctx, 0);
1416
1537
  io_uring_backend_immediate_submit(backend);
1417
1538
  RAISE_IF_EXCEPTION(resume_value);
@@ -1419,7 +1540,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1419
1540
  }
1420
1541
 
1421
1542
  RB_GC_GUARD(resume_value);
1422
- return INT2NUM(result);
1543
+ return INT2FIX(result);
1423
1544
  }
1424
1545
 
1425
1546
  VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
@@ -1470,14 +1591,14 @@ static inline void splice_chunks_get_sqe(
1470
1591
  }
1471
1592
  else
1472
1593
  *ctx = context_store_acquire(&backend->store, type);
1473
- (*sqe) = io_uring_get_sqe(&backend->ring);
1594
+ (*sqe) = io_uring_backend_get_sqe(backend);
1474
1595
  }
1475
1596
 
1476
1597
  static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1477
1598
  struct io_uring_sqe *sqe;
1478
1599
 
1479
1600
  ctx->result = -ECANCELED;
1480
- sqe = io_uring_get_sqe(&backend->ring);
1601
+ sqe = io_uring_backend_get_sqe(backend);
1481
1602
  io_uring_prep_cancel(sqe, (__u64)ctx, 0);
1482
1603
  io_uring_backend_immediate_submit(backend);
1483
1604
  }
@@ -1525,7 +1646,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1525
1646
  src_fd = fd_from_io(src, &src_fptr, 0, 0);
1526
1647
  dest_fd = fd_from_io(dest, &dest_fptr, 1, 0);
1527
1648
 
1528
- maxlen = NUM2INT(chunk_size);
1649
+ maxlen = FIX2INT(chunk_size);
1529
1650
 
1530
1651
  if (pipe(pipefd) == -1) {
1531
1652
  err = errno;
@@ -1551,7 +1672,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1551
1672
  if (chunk_len == 0) break;
1552
1673
 
1553
1674
  total += chunk_len;
1554
- chunk_len_value = INT2NUM(chunk_len);
1675
+ chunk_len_value = INT2FIX(chunk_len);
1555
1676
 
1556
1677
 
1557
1678
  if (chunk_prefix != Qnil) {
@@ -1590,7 +1711,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1590
1711
  RB_GC_GUARD(switchpoint_result);
1591
1712
  if (pipefd[0] != -1) close(pipefd[0]);
1592
1713
  if (pipefd[1] != -1) close(pipefd[1]);
1593
- return INT2NUM(total);
1714
+ return INT2FIX(total);
1594
1715
  syscallerror:
1595
1716
  if (pipefd[0] != -1) close(pipefd[0]);
1596
1717
  if (pipefd[1] != -1) close(pipefd[1]);
@@ -1678,9 +1799,12 @@ void Init_Backend() {
1678
1799
  rb_define_method(cBackend, "send", Backend_send, 3);
1679
1800
  rb_define_method(cBackend, "sendv", Backend_sendv, 3);
1680
1801
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
1802
+
1681
1803
  rb_define_method(cBackend, "splice", Backend_splice, 3);
1682
1804
  rb_define_method(cBackend, "splice_to_eof", Backend_splice_to_eof, 3);
1805
+ rb_define_method(cBackend, "double_splice_to_eof", Backend_double_splice_to_eof, 2);
1683
1806
  rb_define_method(cBackend, "tee", Backend_tee, 3);
1807
+
1684
1808
  rb_define_method(cBackend, "timeout", Backend_timeout, -1);
1685
1809
  rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
1686
1810
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);