polyphony 0.73.1 → 0.74

Sign up to get free protection for your applications and to get access to all the features.
@@ -42,6 +42,7 @@ typedef struct Backend_t {
42
42
  unsigned int pending_sqes;
43
43
  unsigned int prepared_limit;
44
44
  int event_fd;
45
+ int ring_initialized;
45
46
  } Backend_t;
46
47
 
47
48
  static void Backend_mark(void *ptr) {
@@ -80,20 +81,32 @@ static VALUE Backend_initialize(VALUE self) {
80
81
 
81
82
  backend_base_initialize(&backend->base);
82
83
  backend->pending_sqes = 0;
83
- backend->prepared_limit = 2048;
84
+ backend->ring_initialized = 0;
85
+ backend->event_fd = -1;
84
86
 
85
87
  context_store_initialize(&backend->store);
86
- io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
87
- backend->event_fd = -1;
88
88
 
89
- return Qnil;
89
+ backend->prepared_limit = 1024;
90
+ while (1) {
91
+ int ret = io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
92
+ if (!ret) break;
93
+
94
+ // if ENOMEM is returned, use a smaller limit
95
+ if (ret == -ENOMEM && backend->prepared_limit > 64)
96
+ backend->prepared_limit = backend->prepared_limit / 2;
97
+ else
98
+ rb_syserr_fail(-ret, strerror(-ret));
99
+ }
100
+ backend->ring_initialized = 1;
101
+
102
+ return self;
90
103
  }
91
104
 
92
105
  VALUE Backend_finalize(VALUE self) {
93
106
  Backend_t *backend;
94
107
  GetBackend(self, backend);
95
108
 
96
- io_uring_queue_exit(&backend->ring);
109
+ if (backend->ring_initialized) io_uring_queue_exit(&backend->ring);
97
110
  if (backend->event_fd != -1) close(backend->event_fd);
98
111
  context_store_free(&backend->store);
99
112
  return self;
@@ -282,9 +295,11 @@ int io_uring_backend_defer_submit_and_await(
282
295
  switchpoint_result = backend_await((struct Backend_base *)backend);
283
296
 
284
297
  if (ctx->ref_count > 1) {
298
+ struct io_uring_sqe *sqe;
299
+
285
300
  // op was not completed (an exception was raised), so we need to cancel it
286
301
  ctx->result = -ECANCELED;
287
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
302
+ sqe = io_uring_get_sqe(&backend->ring);
288
303
  io_uring_prep_cancel(sqe, ctx, 0);
289
304
  backend->pending_sqes = 0;
290
305
  io_uring_submit(&backend->ring);
@@ -316,16 +331,20 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
316
331
  long dynamic_len = length == Qnil;
317
332
  long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
318
333
  long buf_pos = NUM2INT(pos);
334
+ int shrinkable;
335
+ char *buf;
336
+ long total = 0;
337
+ int read_to_eof = RTEST(to_eof);
338
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
339
+
340
+
319
341
  if (str != Qnil) {
320
342
  int current_len = RSTRING_LEN(str);
321
343
  if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
322
344
  }
323
345
  else buf_pos = 0;
324
- int shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
325
- char *buf = RSTRING_PTR(str) + buf_pos;
326
- long total = 0;
327
- int read_to_eof = RTEST(to_eof);
328
- VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
346
+ shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
347
+ buf = RSTRING_PTR(str) + buf_pos;
329
348
 
330
349
  GetBackend(self, backend);
331
350
  if (underlying_io != Qnil) io = underlying_io;
@@ -338,10 +357,13 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
338
357
  VALUE resume_value = Qnil;
339
358
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
340
359
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
360
+ int result;
361
+ int completed;
362
+
341
363
  io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
342
364
 
343
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
344
- int completed = context_store_release(&backend->store, ctx);
365
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
366
+ completed = context_store_release(&backend->store, ctx);
345
367
  if (!completed) {
346
368
  context_attach_buffers(ctx, 1, &str);
347
369
  RAISE_IF_EXCEPTION(resume_value);
@@ -402,10 +424,13 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
402
424
  VALUE resume_value = Qnil;
403
425
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
404
426
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
427
+ ssize_t result;
428
+ int completed;
429
+
405
430
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
406
431
 
407
- ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
408
- int completed = context_store_release(&backend->store, ctx);
432
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
433
+ completed = context_store_release(&backend->store, ctx);
409
434
  if (!completed) {
410
435
  context_attach_buffers(ctx, 1, &str);
411
436
  RAISE_IF_EXCEPTION(resume_value);
@@ -452,10 +477,13 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
452
477
  VALUE resume_value = Qnil;
453
478
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
454
479
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
480
+ ssize_t result;
481
+ int completed;
482
+
455
483
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
456
484
 
457
- ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
458
- int completed = context_store_release(&backend->store, ctx);
485
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
486
+ completed = context_store_release(&backend->store, ctx);
459
487
  if (!completed) {
460
488
  context_attach_buffers(ctx, 1, &str);
461
489
  RAISE_IF_EXCEPTION(resume_value);
@@ -482,6 +510,9 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
482
510
  Backend_t *backend;
483
511
  rb_io_t *fptr;
484
512
  VALUE underlying_io;
513
+ char *buf = StringValuePtr(str);
514
+ long len = RSTRING_LEN(str);
515
+ long left = len;
485
516
 
486
517
  underlying_io = rb_ivar_get(io, ID_ivar_io);
487
518
  if (underlying_io != Qnil) io = underlying_io;
@@ -490,18 +521,17 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
490
521
  GetOpenFile(io, fptr);
491
522
  io_unset_nonblock(fptr, io);
492
523
 
493
- char *buf = StringValuePtr(str);
494
- long len = RSTRING_LEN(str);
495
- long left = len;
496
-
497
524
  while (left > 0) {
498
525
  VALUE resume_value = Qnil;
499
526
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
500
527
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
528
+ int result;
529
+ int completed;
530
+
501
531
  io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
502
532
 
503
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
504
- int completed = context_store_release(&backend->store, ctx);
533
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
534
+ completed = context_store_release(&backend->store, ctx);
505
535
  if (!completed) {
506
536
  context_attach_buffers(ctx, 1, &str);
507
537
  RAISE_IF_EXCEPTION(resume_value);
@@ -550,10 +580,13 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
550
580
  VALUE resume_value = Qnil;
551
581
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
552
582
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
583
+ int result;
584
+ int completed;
585
+
553
586
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
554
587
 
555
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
556
- int completed = context_store_release(&backend->store, ctx);
588
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
589
+ completed = context_store_release(&backend->store, ctx);
557
590
  if (!completed) {
558
591
  free(iov);
559
592
  context_attach_buffers(ctx, argc, argv);
@@ -604,15 +637,18 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
604
637
  long dynamic_len = length == Qnil;
605
638
  long len = dynamic_len ? 4096 : NUM2INT(length);
606
639
  long buf_pos = NUM2INT(pos);
640
+ int shrinkable;
641
+ char *buf;
642
+ long total = 0;
643
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);;
644
+
607
645
  if (str != Qnil) {
608
646
  int current_len = RSTRING_LEN(str);
609
647
  if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
610
648
  }
611
649
  else buf_pos = 0;
612
- int shrinkable = io_setstrbuf(&str, buf_pos + len);
613
- char *buf = RSTRING_PTR(str) + buf_pos;
614
- long total = 0;
615
- VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
650
+ shrinkable = io_setstrbuf(&str, buf_pos + len);
651
+ buf = RSTRING_PTR(str) + buf_pos;
616
652
 
617
653
  GetBackend(self, backend);
618
654
  if (underlying_io != Qnil) io = underlying_io;
@@ -625,10 +661,13 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
625
661
  VALUE resume_value = Qnil;
626
662
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
627
663
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
664
+ int result;
665
+ int completed;
666
+
628
667
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
629
668
 
630
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
631
- int completed = context_store_release(&backend->store, ctx);
669
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
670
+ completed = context_store_release(&backend->store, ctx);
632
671
  if (!completed) {
633
672
  context_attach_buffers(ctx, 1, &str);
634
673
  RAISE_IF_EXCEPTION(resume_value);
@@ -675,10 +714,13 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
675
714
  VALUE resume_value = Qnil;
676
715
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
677
716
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
717
+ int result;
718
+ int completed;
719
+
678
720
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
679
721
 
680
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
681
- int completed = context_store_release(&backend->store, ctx);
722
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
723
+ completed = context_store_release(&backend->store, ctx);
682
724
  if (!completed) {
683
725
  context_attach_buffers(ctx, 1, &str);
684
726
  RAISE_IF_EXCEPTION(resume_value);
@@ -724,10 +766,13 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
724
766
  VALUE resume_value = Qnil;
725
767
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
726
768
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
769
+ int result;
770
+ int completed;
771
+
727
772
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
728
773
 
729
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
730
- int completed = context_store_release(&backend->store, ctx);
774
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
775
+ completed = context_store_release(&backend->store, ctx);
731
776
  if (!completed) {
732
777
  context_attach_buffers(ctx, 1, &str);
733
778
  RAISE_IF_EXCEPTION(resume_value);
@@ -753,6 +798,10 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
753
798
  Backend_t *backend;
754
799
  rb_io_t *fptr;
755
800
  VALUE underlying_io;
801
+ char *buf;
802
+ long len;
803
+ long left;
804
+ int flags_int;
756
805
 
757
806
  underlying_io = rb_ivar_get(io, ID_ivar_io);
758
807
  if (underlying_io != Qnil) io = underlying_io;
@@ -761,19 +810,22 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
761
810
  GetOpenFile(io, fptr);
762
811
  io_unset_nonblock(fptr, io);
763
812
 
764
- char *buf = StringValuePtr(str);
765
- long len = RSTRING_LEN(str);
766
- long left = len;
767
- int flags_int = NUM2INT(flags);
813
+ buf = StringValuePtr(str);
814
+ len = RSTRING_LEN(str);
815
+ left = len;
816
+ flags_int = NUM2INT(flags);
768
817
 
769
818
  while (left > 0) {
770
819
  VALUE resume_value = Qnil;
771
820
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
772
821
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
822
+ int result;
823
+ int completed;
824
+
773
825
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
774
826
 
775
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
776
- int completed = context_store_release(&backend->store, ctx);
827
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
828
+ completed = context_store_release(&backend->store, ctx);
777
829
  if (!completed) {
778
830
  context_attach_buffers(ctx, 1, &str);
779
831
  RAISE_IF_EXCEPTION(resume_value);
@@ -807,10 +859,13 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
807
859
  VALUE resume_value = Qnil;
808
860
  op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
809
861
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
862
+ int fd;
863
+ int completed;
864
+
810
865
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
811
866
 
812
- int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
813
- int completed = context_store_release(&backend->store, ctx);
867
+ fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
868
+ completed = context_store_release(&backend->store, ctx);
814
869
  RAISE_IF_EXCEPTION(resume_value);
815
870
  if (!completed) return resume_value;
816
871
  RB_GC_GUARD(resume_value);
@@ -861,6 +916,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
861
916
  rb_io_t *dest_fptr;
862
917
  VALUE underlying_io;
863
918
  int total = 0;
919
+ VALUE resume_value = Qnil;
864
920
 
865
921
  underlying_io = rb_ivar_get(src, ID_ivar_io);
866
922
  if (underlying_io != Qnil) src = underlying_io;
@@ -873,15 +929,16 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
873
929
  GetOpenFile(dest, dest_fptr);
874
930
  io_unset_nonblock(dest_fptr, dest);
875
931
 
876
- VALUE resume_value = Qnil;
877
-
878
932
  while (1) {
879
933
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
880
934
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
935
+ int result;
936
+ int completed;
937
+
881
938
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
882
939
 
883
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
884
- int completed = context_store_release(&backend->store, ctx);
940
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
941
+ completed = context_store_release(&backend->store, ctx);
885
942
  RAISE_IF_EXCEPTION(resume_value);
886
943
  if (!completed) return resume_value;
887
944
 
@@ -909,29 +966,31 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE chunksize)
909
966
  return io_uring_backend_splice(backend, src, dest, chunksize, 1);
910
967
  }
911
968
 
912
-
913
969
  VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
914
970
  Backend_t *backend;
915
971
  rb_io_t *fptr;
916
- struct sockaddr_in addr;
917
- char *host_buf = StringValueCStr(host);
972
+ struct sockaddr *ai_addr;
973
+ int ai_addrlen;
918
974
  VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
975
+ VALUE resume_value = Qnil;
976
+ op_context_t *ctx;
977
+ struct io_uring_sqe *sqe;
978
+ int result;
979
+ int completed;
980
+
981
+ ai_addrlen = backend_getaddrinfo(host, port, &ai_addr);
982
+
919
983
  if (underlying_sock != Qnil) sock = underlying_sock;
920
984
 
921
985
  GetBackend(self, backend);
922
986
  GetOpenFile(sock, fptr);
923
987
  io_unset_nonblock(fptr, sock);
924
988
 
925
- addr.sin_family = AF_INET;
926
- addr.sin_addr.s_addr = inet_addr(host_buf);
927
- addr.sin_port = htons(NUM2INT(port));
928
-
929
- VALUE resume_value = Qnil;
930
- op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
931
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
932
- io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
933
- int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
934
- int completed = context_store_release(&backend->store, ctx);
989
+ ctx = context_store_acquire(&backend->store, OP_CONNECT);
990
+ sqe = io_uring_get_sqe(&backend->ring);
991
+ io_uring_prep_connect(sqe, fptr->fd, ai_addr, ai_addrlen);
992
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
993
+ completed = context_store_release(&backend->store, ctx);
935
994
  RAISE_IF_EXCEPTION(resume_value);
936
995
  if (!completed) return resume_value;
937
996
  RB_GC_GUARD(resume_value);
@@ -944,17 +1003,54 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
944
1003
  Backend_t *backend;
945
1004
  rb_io_t *fptr;
946
1005
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
1006
+ VALUE resume_value;
1007
+
947
1008
  if (underlying_io != Qnil) io = underlying_io;
948
1009
  GetBackend(self, backend);
949
1010
  GetOpenFile(io, fptr);
950
1011
  io_unset_nonblock(fptr, io);
951
1012
 
952
- VALUE resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
1013
+ resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
1014
+
953
1015
  RAISE_IF_EXCEPTION(resume_value);
954
1016
  RB_GC_GUARD(resume_value);
955
1017
  return self;
956
1018
  }
957
1019
 
1020
+ VALUE Backend_close(VALUE self, VALUE io) {
1021
+ Backend_t *backend;
1022
+ rb_io_t *fptr;
1023
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
1024
+ VALUE resume_value = Qnil;
1025
+ op_context_t *ctx;
1026
+ struct io_uring_sqe *sqe;
1027
+ int result;
1028
+ int completed;
1029
+
1030
+ if (underlying_io != Qnil) io = underlying_io;
1031
+ GetBackend(self, backend);
1032
+ GetOpenFile(io, fptr);
1033
+
1034
+ if (fptr->fd < 0) return Qnil;
1035
+
1036
+ io_unset_nonblock(fptr, io);
1037
+
1038
+ ctx = context_store_acquire(&backend->store, OP_CLOSE);
1039
+ sqe = io_uring_get_sqe(&backend->ring);
1040
+ io_uring_prep_close(sqe, fptr->fd);
1041
+ result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
1042
+ completed = context_store_release(&backend->store, ctx);
1043
+ RAISE_IF_EXCEPTION(resume_value);
1044
+ if (!completed) return resume_value;
1045
+ RB_GC_GUARD(resume_value);
1046
+
1047
+ if (result < 0) rb_syserr_fail(-result, strerror(-result));
1048
+
1049
+ fptr_finalize(fptr);
1050
+ // fptr->fd = -1;
1051
+ return io;
1052
+ }
1053
+
958
1054
  inline struct __kernel_timespec double_to_timespec(double duration) {
959
1055
  double duration_integral;
960
1056
  double duration_fraction = modf(duration, &duration_integral);
@@ -972,18 +1068,18 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
972
1068
  int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
973
1069
  struct __kernel_timespec ts = double_to_timespec(duration);
974
1070
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
975
-
976
1071
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1072
+
977
1073
  io_uring_prep_timeout(sqe, &ts, 0, 0);
978
1074
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
979
1075
  return context_store_release(&backend->store, ctx);
980
1076
  }
981
1077
 
982
1078
  VALUE Backend_sleep(VALUE self, VALUE duration) {
1079
+ VALUE resume_value = Qnil;
983
1080
  Backend_t *backend;
984
1081
  GetBackend(self, backend);
985
1082
 
986
- VALUE resume_value = Qnil;
987
1083
  io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
988
1084
  RAISE_IF_EXCEPTION(resume_value);
989
1085
  RB_GC_GUARD(resume_value);
@@ -1028,12 +1124,13 @@ struct Backend_timeout_ctx {
1028
1124
  };
1029
1125
 
1030
1126
  VALUE Backend_timeout_ensure(VALUE arg) {
1031
- struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
1032
- if (timeout_ctx->ctx->ref_count) {
1033
- timeout_ctx->ctx->result = -ECANCELED;
1127
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
1128
+ if (timeout_ctx->ctx->ref_count) {
1129
+ struct io_uring_sqe *sqe;
1034
1130
 
1131
+ timeout_ctx->ctx->result = -ECANCELED;
1035
1132
  // op was not completed, so we need to cancel it
1036
- struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
1133
+ sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
1037
1134
  io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
1038
1135
  timeout_ctx->backend->pending_sqes = 0;
1039
1136
  io_uring_submit(&timeout_ctx->backend->ring);
@@ -1046,24 +1143,30 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1046
1143
  VALUE duration;
1047
1144
  VALUE exception;
1048
1145
  VALUE move_on_value = Qnil;
1049
- rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1050
-
1051
- struct __kernel_timespec ts = duration_to_timespec(duration);
1146
+ struct Backend_timeout_ctx timeout_ctx;
1147
+ op_context_t *ctx;
1148
+ struct io_uring_sqe *sqe;
1052
1149
  Backend_t *backend;
1053
- GetBackend(self, backend);
1150
+ struct __kernel_timespec ts;
1054
1151
  VALUE result = Qnil;
1055
- VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
1152
+ VALUE timeout;
1056
1153
 
1057
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1154
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1058
1155
 
1059
- op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1156
+ ts = duration_to_timespec(duration);
1157
+ GetBackend(self, backend);
1158
+ timeout = rb_funcall(cTimeoutException, ID_new, 0);
1159
+
1160
+ sqe = io_uring_get_sqe(&backend->ring);
1161
+ ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1060
1162
  ctx->resume_value = timeout;
1061
1163
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1062
1164
  io_uring_sqe_set_data(sqe, ctx);
1063
1165
  io_uring_backend_defer_submit(backend);
1064
1166
  backend->base.op_count++;
1065
1167
 
1066
- struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
1168
+ timeout_ctx.backend = backend;
1169
+ timeout_ctx.ctx = ctx;
1067
1170
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
1068
1171
 
1069
1172
  if (result == timeout) {
@@ -1080,19 +1183,21 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1080
1183
  VALUE Backend_waitpid(VALUE self, VALUE pid) {
1081
1184
  int pid_int = NUM2INT(pid);
1082
1185
  int fd = pidfd_open(pid_int, 0);
1186
+ int status;
1187
+ pid_t ret;
1083
1188
 
1084
1189
  if (fd >= 0) {
1190
+ VALUE resume_value;
1085
1191
  Backend_t *backend;
1086
1192
  GetBackend(self, backend);
1087
1193
 
1088
- VALUE resume_value = io_uring_backend_wait_fd(backend, fd, 0);
1194
+ resume_value = io_uring_backend_wait_fd(backend, fd, 0);
1089
1195
  close(fd);
1090
1196
  RAISE_IF_EXCEPTION(resume_value);
1091
1197
  RB_GC_GUARD(resume_value);
1092
1198
  }
1093
1199
 
1094
- int status;
1095
- pid_t ret = waitpid(pid_int, &status, WNOHANG);
1200
+ ret = waitpid(pid_int, &status, WNOHANG);
1096
1201
  if (ret < 0) {
1097
1202
  int e = errno;
1098
1203
  if (e == ECHILD)
@@ -1105,6 +1210,8 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1105
1210
 
1106
1211
  VALUE Backend_wait_event(VALUE self, VALUE raise) {
1107
1212
  Backend_t *backend;
1213
+ VALUE resume_value;
1214
+
1108
1215
  GetBackend(self, backend);
1109
1216
 
1110
1217
  if (backend->event_fd == -1) {
@@ -1115,7 +1222,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1115
1222
  }
1116
1223
  }
1117
1224
 
1118
- VALUE resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
1225
+ resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
1119
1226
  if (RTEST(raise)) RAISE_IF_EXCEPTION(resume_value);
1120
1227
  RB_GC_GUARD(resume_value);
1121
1228
  return resume_value;
@@ -1128,6 +1235,7 @@ VALUE Backend_kind(VALUE self) {
1128
1235
  struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1129
1236
  rb_io_t *fptr;
1130
1237
  VALUE underlying_io;
1238
+ struct io_uring_sqe *sqe;
1131
1239
 
1132
1240
  underlying_io = rb_ivar_get(io, ID_ivar_io);
1133
1241
  if (underlying_io != Qnil) io = underlying_io;
@@ -1135,17 +1243,15 @@ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, V
1135
1243
  GetOpenFile(io, fptr);
1136
1244
  io_unset_nonblock(fptr, io);
1137
1245
 
1138
- char *buf = StringValuePtr(str);
1139
- long len = RSTRING_LEN(str);
1140
-
1141
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1142
- io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
1246
+ sqe = io_uring_get_sqe(&backend->ring);
1247
+ io_uring_prep_write(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), 0);
1143
1248
  return sqe;
1144
1249
  }
1145
1250
 
1146
1251
  struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1147
1252
  rb_io_t *fptr;
1148
1253
  VALUE underlying_io;
1254
+ struct io_uring_sqe *sqe;
1149
1255
 
1150
1256
  underlying_io = rb_ivar_get(io, ID_ivar_io);
1151
1257
  if (underlying_io != Qnil) io = underlying_io;
@@ -1153,12 +1259,8 @@ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VA
1153
1259
  GetOpenFile(io, fptr);
1154
1260
  io_unset_nonblock(fptr, io);
1155
1261
 
1156
- char *buf = StringValuePtr(str);
1157
- long len = RSTRING_LEN(str);
1158
- int flags_int = NUM2INT(flags);
1159
-
1160
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1161
- io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1262
+ sqe = io_uring_get_sqe(&backend->ring);
1263
+ io_uring_prep_send(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), NUM2INT(flags));
1162
1264
  return sqe;
1163
1265
  }
1164
1266
 
@@ -1166,6 +1268,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
1166
1268
  rb_io_t *src_fptr;
1167
1269
  rb_io_t *dest_fptr;
1168
1270
  VALUE underlying_io;
1271
+ struct io_uring_sqe *sqe;
1169
1272
 
1170
1273
  underlying_io = rb_ivar_get(src, ID_ivar_io);
1171
1274
  if (underlying_io != Qnil) src = underlying_io;
@@ -1178,7 +1281,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
1178
1281
  GetOpenFile(dest, dest_fptr);
1179
1282
  io_unset_nonblock(dest_fptr, dest);
1180
1283
 
1181
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1284
+ sqe = io_uring_get_sqe(&backend->ring);
1182
1285
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1183
1286
  return sqe;
1184
1287
  }
@@ -1206,14 +1309,19 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1206
1309
  unsigned int sqe_count = 0;
1207
1310
  struct io_uring_sqe *last_sqe = 0;
1208
1311
  Backend_t *backend;
1312
+ int result;
1313
+ int completed;
1314
+ op_context_t *ctx;
1315
+
1209
1316
  GetBackend(self, backend);
1210
1317
  if (argc == 0) return resume_value;
1211
1318
 
1212
- op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1319
+ ctx = context_store_acquire(&backend->store, OP_CHAIN);
1213
1320
  for (int i = 0; i < argc; i++) {
1214
1321
  VALUE op = argv[i];
1215
1322
  VALUE op_type = RARRAY_AREF(op, 0);
1216
1323
  VALUE op_len = RARRAY_LEN(op);
1324
+ unsigned int flags;
1217
1325
 
1218
1326
  if (op_type == SYM_write && op_len == 3) {
1219
1327
  last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
@@ -1223,13 +1331,16 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1223
1331
  else if (op_type == SYM_splice && op_len == 4)
1224
1332
  last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1225
1333
  else {
1334
+
1226
1335
  if (sqe_count) {
1336
+ struct io_uring_sqe *sqe;
1337
+
1227
1338
  io_uring_sqe_set_data(last_sqe, ctx);
1228
1339
  io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1229
1340
 
1230
1341
  ctx->ref_count = sqe_count;
1231
1342
  ctx->result = -ECANCELED;
1232
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1343
+ sqe = io_uring_get_sqe(&backend->ring);
1233
1344
  io_uring_prep_cancel(sqe, ctx, 0);
1234
1345
  backend->pending_sqes = 0;
1235
1346
  io_uring_submit(&backend->ring);
@@ -1242,7 +1353,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1242
1353
  }
1243
1354
 
1244
1355
  io_uring_sqe_set_data(last_sqe, ctx);
1245
- unsigned int flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1356
+ flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1246
1357
  io_uring_sqe_set_flags(last_sqe, flags);
1247
1358
  sqe_count++;
1248
1359
  }
@@ -1251,14 +1362,16 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1251
1362
  ctx->ref_count = sqe_count + 1;
1252
1363
  io_uring_backend_defer_submit(backend);
1253
1364
  resume_value = backend_await((struct Backend_base *)backend);
1254
- int result = ctx->result;
1255
- int completed = context_store_release(&backend->store, ctx);
1365
+ result = ctx->result;
1366
+ completed = context_store_release(&backend->store, ctx);
1256
1367
  if (!completed) {
1368
+ struct io_uring_sqe *sqe;
1369
+
1257
1370
  Backend_chain_ctx_attach_buffers(ctx, argc, argv);
1258
1371
 
1259
1372
  // op was not completed (an exception was raised), so we need to cancel it
1260
1373
  ctx->result = -ECANCELED;
1261
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1374
+ sqe = io_uring_get_sqe(&backend->ring);
1262
1375
  io_uring_prep_cancel(sqe, ctx, 0);
1263
1376
  backend->pending_sqes = 0;
1264
1377
  io_uring_submit(&backend->ring);
@@ -1322,8 +1435,10 @@ static inline void splice_chunks_get_sqe(
1322
1435
  }
1323
1436
 
1324
1437
  static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1438
+ struct io_uring_sqe *sqe;
1439
+
1325
1440
  ctx->result = -ECANCELED;
1326
- struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1441
+ sqe = io_uring_get_sqe(&backend->ring);
1327
1442
  io_uring_prep_cancel(sqe, ctx, 0);
1328
1443
  backend->pending_sqes = 0;
1329
1444
  io_uring_submit(&backend->ring);
@@ -1336,9 +1451,11 @@ static inline int splice_chunks_await_ops(
1336
1451
  VALUE *switchpoint_result
1337
1452
  )
1338
1453
  {
1454
+ int completed;
1339
1455
  int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
1456
+
1340
1457
  if (result) (*result) = res;
1341
- int completed = context_store_release(&backend->store, *ctx);
1458
+ completed = context_store_release(&backend->store, *ctx);
1342
1459
  if (!completed) {
1343
1460
  splice_chunks_cancel(backend, *ctx);
1344
1461
  if (TEST_EXCEPTION(*switchpoint_result)) return 1;
@@ -1352,17 +1469,22 @@ static inline int splice_chunks_await_ops(
1352
1469
 
1353
1470
  VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1354
1471
  Backend_t *backend;
1355
- GetBackend(self, backend);
1356
1472
  int total = 0;
1357
1473
  int err = 0;
1358
1474
  VALUE switchpoint_result = Qnil;
1359
1475
  op_context_t *ctx = 0;
1360
1476
  struct io_uring_sqe *sqe = 0;
1361
-
1477
+ int maxlen;
1478
+ VALUE underlying_io;
1479
+ VALUE str = Qnil;
1480
+ VALUE chunk_len_value = Qnil;
1362
1481
  rb_io_t *src_fptr;
1363
1482
  rb_io_t *dest_fptr;
1483
+ int pipefd[2] = { -1, -1 };
1484
+
1485
+ GetBackend(self, backend);
1364
1486
 
1365
- VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
1487
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1366
1488
  if (underlying_io != Qnil) src = underlying_io;
1367
1489
  GetOpenFile(src, src_fptr);
1368
1490
  io_verify_blocking_mode(src_fptr, src, Qtrue);
@@ -1373,11 +1495,8 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1373
1495
  GetOpenFile(dest, dest_fptr);
1374
1496
  io_verify_blocking_mode(dest_fptr, dest, Qtrue);
1375
1497
 
1376
- int maxlen = NUM2INT(chunk_size);
1377
- VALUE str = Qnil;
1378
- VALUE chunk_len_value = Qnil;
1498
+ maxlen = NUM2INT(chunk_size);
1379
1499
 
1380
- int pipefd[2] = { -1, -1 };
1381
1500
  if (pipe(pipefd) == -1) {
1382
1501
  err = errno;
1383
1502
  goto syscallerror;
@@ -1521,6 +1640,7 @@ void Init_Backend() {
1521
1640
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
1522
1641
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
1523
1642
  rb_define_method(cBackend, "write", Backend_write_m, -1);
1643
+ rb_define_method(cBackend, "close", Backend_close, 1);
1524
1644
 
1525
1645
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1526
1646
  SYM_send = ID2SYM(rb_intern("send"));