io-event 1.7.1 → 1.7.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b7b49a9181d8917dd2dba7620f964ad0dced4d33ab9a5d725f5493debd927ea6
4
- data.tar.gz: ce62d1d7c3488ccb39620a66b3c25a12537a586fc0e6b9b9f3263ad662abbcb0
3
+ metadata.gz: 00ee9c58a1f3c352028955bd7fa687b868a38d658f076ed128999e434a50e67f
4
+ data.tar.gz: 8bb4a29b1f992e2fdf5b155e8efb1361e70cf1a635bbcac75723d6e255b27d92
5
5
  SHA512:
6
- metadata.gz: 6c0d875c4ec99d9670d16980ea6b6f92073547b6da089cccab1dc96af6b40f0034a3f3ce6151123acddedb48b808b27782bce638ec93884823a07951fb381f57
7
- data.tar.gz: fc818ad30b6087149020f7b9fc718c9219b54d19dbfef69c468605c4154d58e0a75d802d1b646829d2a48c8a24d4b4bcdae15a5cea21aa2826d051eb1975f585
6
+ metadata.gz: 6882b4c368d9b5e1fd32d3545409d311db6d5624ae9353f1112c0f3960a85e62685aaa05ff9a20612c2e17bb8d286e1ce88be5024ed0d343713c8ddf378f9d05
7
+ data.tar.gz: be96ca6107b200375af43eefad3b549eff15a7e4ad90798fb695c20dcd711f557b405a9c8f36d40324531ab92f03e1dbbaadb07419d7091ae7f6247567573877
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -30,6 +30,9 @@ have_func("rb_ext_ractor_safe")
30
30
  have_func("&rb_fiber_transfer")
31
31
 
32
32
  if have_library("uring") and have_header("liburing.h")
33
+ # We might want to consider using this in the future:
34
+ # have_func("io_uring_submit_and_wait_timeout", "liburing.h")
35
+
33
36
  $srcs << "io/event/selector/uring.c"
34
37
  end
35
38
 
@@ -8,6 +8,11 @@
8
8
  #include <errno.h>
9
9
  #include <assert.h>
10
10
 
11
+ enum {
12
+ IO_EVENT_ARRAY_MAXIMUM_COUNT = SIZE_MAX / sizeof(void*),
13
+ IO_EVENT_ARRAY_DEFAULT_COUNT = 128
14
+ };
15
+
11
16
  struct IO_Event_Array {
12
17
  // The array of pointers to elements:
13
18
  void **base;
@@ -25,20 +30,27 @@ struct IO_Event_Array {
25
30
  void (*element_free)(void*);
26
31
  };
27
32
 
28
- inline static void IO_Event_Array_allocate(struct IO_Event_Array *array, size_t count, size_t element_size)
33
+ inline static int IO_Event_Array_allocate(struct IO_Event_Array *array, size_t count, size_t element_size)
29
34
  {
35
+ array->limit = 0;
36
+ array->element_size = element_size;
37
+
30
38
  if (count) {
31
39
  array->base = (void**)calloc(count, sizeof(void*));
32
- assert(array->base);
40
+
41
+ if (array->base == NULL) {
42
+ return -1;
43
+ }
33
44
 
34
45
  array->count = count;
46
+
47
+ return 1;
35
48
  } else {
36
49
  array->base = NULL;
37
50
  array->count = 0;
51
+
52
+ return 0;
38
53
  }
39
-
40
- array->limit = 0;
41
- array->element_size = element_size;
42
54
  }
43
55
 
44
56
  inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *array)
@@ -49,32 +61,51 @@ inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *arr
49
61
 
50
62
  inline static void IO_Event_Array_free(struct IO_Event_Array *array)
51
63
  {
52
- for (size_t i = 0; i < array->limit; i += 1) {
53
- void *element = array->base[i];
54
- if (element) {
55
- array->element_free(element);
56
-
57
- free(element);
64
+ if (array->base) {
65
+ void **base = array->base;
66
+ size_t limit = array->limit;
67
+
68
+ array->base = NULL;
69
+ array->count = 0;
70
+ array->limit = 0;
71
+
72
+ for (size_t i = 0; i < limit; i += 1) {
73
+ void *element = base[i];
74
+ if (element) {
75
+ array->element_free(element);
76
+
77
+ free(element);
78
+ }
58
79
  }
80
+
81
+ free(base);
59
82
  }
60
-
61
- if (array->base)
62
- free(array->base);
63
-
64
- array->base = NULL;
65
- array->count = 0;
66
- array->limit = 0;
67
83
  }
68
84
 
69
85
  inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t count)
70
86
  {
71
87
  if (count <= array->count) {
88
+ // Already big enough:
72
89
  return 0;
73
90
  }
74
91
 
75
- // Compute the next multiple (ideally a power of 2):
92
+ if (count > IO_EVENT_ARRAY_MAXIMUM_COUNT) {
93
+ errno = ENOMEM;
94
+ return -1;
95
+ }
96
+
76
97
  size_t new_count = array->count;
77
- while (new_count < count) {
98
+
99
+ // If the array is empty, we need to set the initial size:
100
+ if (new_count == 0) new_count = IO_EVENT_ARRAY_DEFAULT_COUNT;
101
+ else while (new_count < count) {
102
+ // Ensure we don't overflow:
103
+ if (new_count > (IO_EVENT_ARRAY_MAXIMUM_COUNT / 2)) {
104
+ new_count = IO_EVENT_ARRAY_MAXIMUM_COUNT;
105
+ break;
106
+ }
107
+
108
+ // Compute the next multiple (ideally a power of 2):
78
109
  new_count *= 2;
79
110
  }
80
111
 
@@ -90,6 +121,7 @@ inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t cou
90
121
  array->base = (void**)new_base;
91
122
  array->count = new_count;
92
123
 
124
+ // Resizing sucessful:
93
125
  return 1;
94
126
  }
95
127
 
@@ -337,7 +337,10 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
337
337
 
338
338
  selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
339
339
  selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
340
- IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
340
+ int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
341
+ if (result < 0) {
342
+ rb_sys_fail("IO_Event_Selector_EPoll_allocate:IO_Event_Array_allocate");
343
+ }
341
344
 
342
345
  return instance;
343
346
  }
@@ -507,6 +510,8 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
507
510
  .events = IO_EVENT_READABLE,
508
511
  };
509
512
 
513
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
514
+
510
515
  int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
511
516
 
512
517
  if (result == -1) {
@@ -566,6 +571,8 @@ VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
566
571
  .events = RB_NUM2INT(events),
567
572
  };
568
573
 
574
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
575
+
569
576
  int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
570
577
 
571
578
  if (result == -1) {
@@ -664,6 +671,8 @@ VALUE IO_Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
664
671
  .offset = offset,
665
672
  };
666
673
 
674
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
675
+
667
676
  return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
668
677
  }
669
678
 
@@ -760,6 +769,8 @@ VALUE IO_Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
760
769
  .offset = offset,
761
770
  };
762
771
 
772
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
773
+
763
774
  return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
764
775
  }
765
776
 
@@ -311,7 +311,11 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
311
311
 
312
312
  selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
313
313
  selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
314
- IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
314
+
315
+ int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
316
+ if (result < 0) {
317
+ rb_sys_fail("IO_Event_Selector_KQueue_allocate:IO_Event_Array_allocate");
318
+ }
315
319
 
316
320
  return instance;
317
321
  }
@@ -501,6 +505,8 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
501
505
  .events = IO_EVENT_EXIT,
502
506
  };
503
507
 
508
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
509
+
504
510
  struct process_wait_arguments process_wait_arguments = {
505
511
  .selector = selector,
506
512
  .waiting = &waiting,
@@ -564,6 +570,8 @@ VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE
564
570
  .events = RB_NUM2INT(events),
565
571
  };
566
572
 
573
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
574
+
567
575
  int result = IO_Event_Selector_KQueue_Waiting_register(selector, descriptor, &waiting);
568
576
  if (result == -1) {
569
577
  rb_sys_fail("IO_Event_Selector_KQueue_io_wait:IO_Event_Selector_KQueue_Waiting_register");
@@ -667,6 +675,8 @@ VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE
667
675
  .offset = offset,
668
676
  };
669
677
 
678
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
679
+
670
680
  return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
671
681
  }
672
682
 
@@ -773,6 +783,8 @@ VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
773
783
  .offset = offset,
774
784
  };
775
785
 
786
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
787
+
776
788
  return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
777
789
  }
778
790
 
@@ -979,7 +991,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
979
991
  // Non-comprehensive testing shows this gives a 1.5x speedup.
980
992
 
981
993
  // First do the syscall with no timeout to get any immediately available events:
982
- if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout=" PRINTF_TIMESPEC "\r\n", PRINTF_TIMESPEC_ARGS(arguments.storage));
994
+ if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout=" IO_EVENT_PRINTF_TIMESPEC "\r\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
983
995
  select_internal_with_gvl(&arguments);
984
996
  if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl done\r\n");
985
997
 
@@ -997,7 +1009,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
997
1009
  struct timespec start_time;
998
1010
  IO_Event_Selector_current_time(&start_time);
999
1011
 
1000
- if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout=" PRINTF_TIMESPEC "\n", PRINTF_TIMESPEC_ARGS(arguments.storage));
1012
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout=" IO_EVENT_PRINTF_TIMESPEC "\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
1001
1013
  select_internal_without_gvl(&arguments);
1002
1014
 
1003
1015
  struct timespec end_time;
@@ -152,5 +152,5 @@ int IO_Event_Selector_queue_flush(struct IO_Event_Selector *backend);
152
152
  void IO_Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
153
153
  void IO_Event_Selector_current_time(struct timespec *time);
154
154
 
155
- #define PRINTF_TIMESPEC "%lld.%.9ld"
156
- #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
155
+ #define IO_EVENT_PRINTF_TIMESPEC "%lld.%.9ld"
156
+ #define IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -35,6 +35,7 @@
35
35
  enum {
36
36
  DEBUG = 0,
37
37
  DEBUG_COMPLETION = 0,
38
+ DEBUG_IO_READ = 1,
38
39
  };
39
40
 
40
41
  enum {URING_ENTRIES = 64};
@@ -237,7 +238,10 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
237
238
 
238
239
  selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
239
240
  selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
240
- IO_Event_Array_allocate(&selector->completions, 1024, sizeof(struct IO_Event_Selector_URing_Completion));
241
+ int result = IO_Event_Array_allocate(&selector->completions, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_URing_Completion));
242
+ if (result < 0) {
243
+ rb_sys_fail("IO_Event_Selector_URing_allocate:IO_Event_Array_allocate");
244
+ }
241
245
 
242
246
  return instance;
243
247
  }
@@ -483,6 +487,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
483
487
  .fiber = fiber,
484
488
  };
485
489
 
490
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
491
+
486
492
  struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
487
493
 
488
494
  struct process_wait_arguments process_wait_arguments = {
@@ -586,6 +592,8 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
586
592
  .fiber = fiber,
587
593
  };
588
594
 
595
+ RB_OBJ_WRITTEN(self, Qundef, fiber);
596
+
589
597
  struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
590
598
 
591
599
  struct io_uring_sqe *sqe = io_get_sqe(selector);
@@ -629,6 +637,7 @@ struct io_read_arguments {
629
637
  struct IO_Event_Selector_URing *selector;
630
638
  struct IO_Event_Selector_URing_Waiting *waiting;
631
639
  int descriptor;
640
+ off_t offset;
632
641
  char *buffer;
633
642
  size_t length;
634
643
  };
@@ -642,7 +651,7 @@ io_read_submit(VALUE _arguments)
642
651
  if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
643
652
 
644
653
  struct io_uring_sqe *sqe = io_get_sqe(selector);
645
- io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
654
+ io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
646
655
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
647
656
  io_uring_submit_now(selector);
648
657
 
@@ -672,18 +681,21 @@ io_read_ensure(VALUE _arguments)
672
681
  }
673
682
 
674
683
  static int
675
- io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
684
+ io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
676
685
  {
677
686
  struct IO_Event_Selector_URing_Waiting waiting = {
678
687
  .fiber = fiber,
679
688
  };
680
689
 
690
+ RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
691
+
681
692
  IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
682
693
 
683
694
  struct io_read_arguments io_read_arguments = {
684
695
  .selector = selector,
685
696
  .waiting = &waiting,
686
697
  .descriptor = descriptor,
698
+ .offset = offset,
687
699
  .buffer = buffer,
688
700
  .length = length
689
701
  };
@@ -706,10 +718,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
706
718
  size_t length = NUM2SIZET(_length);
707
719
  size_t offset = NUM2SIZET(_offset);
708
720
  size_t total = 0;
721
+ off_t from = io_seekable(descriptor);
709
722
 
710
723
  size_t maximum_size = size - offset;
711
724
  while (maximum_size) {
712
- int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
725
+ int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
713
726
 
714
727
  if (result > 0) {
715
728
  total += result;
@@ -743,12 +756,52 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
743
756
  return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
744
757
  }
745
758
 
759
+ VALUE IO_Event_Selector_URing_io_pread(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
760
+ struct IO_Event_Selector_URing *selector = NULL;
761
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
762
+
763
+ int descriptor = IO_Event_Selector_io_descriptor(io);
764
+
765
+ void *base;
766
+ size_t size;
767
+ rb_io_buffer_get_bytes_for_writing(buffer, &base, &size);
768
+
769
+ size_t length = NUM2SIZET(_length);
770
+ size_t offset = NUM2SIZET(_offset);
771
+ size_t total = 0;
772
+ off_t from = NUM2OFFT(_from);
773
+
774
+ size_t maximum_size = size - offset;
775
+ while (maximum_size) {
776
+ int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
777
+
778
+ if (result > 0) {
779
+ total += result;
780
+ offset += result;
781
+ from += result;
782
+ if ((size_t)result >= length) break;
783
+ length -= result;
784
+ } else if (result == 0) {
785
+ break;
786
+ } else if (length > 0 && IO_Event_try_again(-result)) {
787
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_READABLE));
788
+ } else {
789
+ return rb_fiber_scheduler_io_result(-1, -result);
790
+ }
791
+
792
+ maximum_size = size - offset;
793
+ }
794
+
795
+ return rb_fiber_scheduler_io_result(total, 0);
796
+ }
797
+
746
798
  #pragma mark - IO#write
747
799
 
748
800
  struct io_write_arguments {
749
801
  struct IO_Event_Selector_URing *selector;
750
802
  struct IO_Event_Selector_URing_Waiting *waiting;
751
803
  int descriptor;
804
+ off_t offset;
752
805
  char *buffer;
753
806
  size_t length;
754
807
  };
@@ -762,7 +815,7 @@ io_write_submit(VALUE _argument)
762
815
  if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
763
816
 
764
817
  struct io_uring_sqe *sqe = io_get_sqe(selector);
765
- io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
818
+ io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
766
819
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
767
820
  io_uring_submit_pending(selector);
768
821
 
@@ -792,18 +845,21 @@ io_write_ensure(VALUE _argument)
792
845
  }
793
846
 
794
847
  static int
795
- io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
848
+ io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
796
849
  {
797
850
  struct IO_Event_Selector_URing_Waiting waiting = {
798
851
  .fiber = fiber,
799
852
  };
800
853
 
854
+ RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
855
+
801
856
  IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
802
857
 
803
858
  struct io_write_arguments arguments = {
804
859
  .selector = selector,
805
860
  .waiting = &waiting,
806
861
  .descriptor = descriptor,
862
+ .offset = offset,
807
863
  .buffer = buffer,
808
864
  .length = length,
809
865
  };
@@ -826,6 +882,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
826
882
  size_t length = NUM2SIZET(_length);
827
883
  size_t offset = NUM2SIZET(_offset);
828
884
  size_t total = 0;
885
+ off_t from = io_seekable(descriptor);
829
886
 
830
887
  if (length > size) {
831
888
  rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
@@ -833,7 +890,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
833
890
 
834
891
  size_t maximum_size = size - offset;
835
892
  while (maximum_size) {
836
- int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
893
+ int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
837
894
 
838
895
  if (result > 0) {
839
896
  total += result;
@@ -867,6 +924,49 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
867
924
  return IO_Event_Selector_URing_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
868
925
  }
869
926
 
927
+ VALUE IO_Event_Selector_URing_io_pwrite(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
928
+ struct IO_Event_Selector_URing *selector = NULL;
929
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
930
+
931
+ int descriptor = IO_Event_Selector_io_descriptor(io);
932
+
933
+ const void *base;
934
+ size_t size;
935
+ rb_io_buffer_get_bytes_for_reading(buffer, &base, &size);
936
+
937
+ size_t length = NUM2SIZET(_length);
938
+ size_t offset = NUM2SIZET(_offset);
939
+ size_t total = 0;
940
+ off_t from = NUM2OFFT(_from);
941
+
942
+ if (length > size) {
943
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
944
+ }
945
+
946
+ size_t maximum_size = size - offset;
947
+ while (maximum_size) {
948
+ int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
949
+
950
+ if (result > 0) {
951
+ total += result;
952
+ offset += result;
953
+ from += result;
954
+ if ((size_t)result >= length) break;
955
+ length -= result;
956
+ } else if (result == 0) {
957
+ break;
958
+ } else if (length > 0 && IO_Event_try_again(-result)) {
959
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_WRITABLE));
960
+ } else {
961
+ return rb_fiber_scheduler_io_result(-1, -result);
962
+ }
963
+
964
+ maximum_size = size - offset;
965
+ }
966
+
967
+ return rb_fiber_scheduler_io_result(total, 0);
968
+ }
969
+
870
970
  #endif
871
971
 
872
972
  #pragma mark - IO#close
@@ -1118,6 +1218,8 @@ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
1118
1218
  #ifdef HAVE_RUBY_IO_BUFFER_H
1119
1219
  rb_define_method(IO_Event_Selector_URing, "io_read", IO_Event_Selector_URing_io_read_compatible, -1);
1120
1220
  rb_define_method(IO_Event_Selector_URing, "io_write", IO_Event_Selector_URing_io_write_compatible, -1);
1221
+ rb_define_method(IO_Event_Selector_URing, "io_pread", IO_Event_Selector_URing_io_pread, 6);
1222
+ rb_define_method(IO_Event_Selector_URing, "io_pwrite", IO_Event_Selector_URing_io_pwrite, 6);
1121
1223
  #endif
1122
1224
 
1123
1225
  rb_define_method(IO_Event_Selector_URing, "io_close", IO_Event_Selector_URing_io_close, 1);
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.7.1"
8
+ VERSION = "1.7.2"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.7.1
4
+ version: 1.7.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -45,7 +45,7 @@ cert_chain:
45
45
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
46
46
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
47
47
  -----END CERTIFICATE-----
48
- date: 2024-10-04 00:00:00.000000000 Z
48
+ date: 2024-10-16 00:00:00.000000000 Z
49
49
  dependencies: []
50
50
  description:
51
51
  email:
metadata.gz.sig CHANGED
Binary file