io-event 1.7.1 → 1.7.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +3 -2
- data/ext/extconf.rb +3 -0
- data/ext/io/event/selector/array.h +50 -20
- data/ext/io/event/selector/epoll.c +12 -1
- data/ext/io/event/selector/kqueue.c +15 -3
- data/ext/io/event/selector/selector.c +4 -0
- data/ext/io/event/selector/selector.h +2 -2
- data/ext/io/event/selector/uring.c +109 -7
- data/lib/io/event/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +2 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 4835f250a8c525c97d3315d67508bff55d6a21e3b208085e970c10635a3e9296
|
4
|
+
data.tar.gz: 2187f6b6aeacd2ef3fa2b521623dd151390aefe0963bb69a00e4d0e0f94b67f5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e9e07bbc607cb39694c96c40a8bef133590bf352de6a3713a0d1ce094df22b6cd611c0723694852fa9ca46d8bbb7d367a0ba8601cd4ee883ed2a1bdc2eb29680
|
7
|
+
data.tar.gz: 267a7d8e83f1f48b89261f1b55a07f96687da1f0e5740441c9499308fac5f10fd4cc63441677e7bd0131a0fab3dc9af6d85b4f51be81504c6e39b047efa97e2d
|
checksums.yaml.gz.sig
CHANGED
@@ -1,2 +1,3 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
�g�]]��/��Q4�_+�wW#��Gdd�z
|
2
|
+
*���%S ��1� 8ë,)��[�(�F����e�MR��&���ܛ�j=�䨃��T��=����%��ri���i`�Ň����G8$|ɍ �"sB�b=����A�53;;���Grƨ��1+M2��abs�Y�܃i�oÙ�o�e4��M��$?jH4���j��;�,��D�Y#-ߪ�@m�Ʒ��\����G���5Q�TZ����� �hȾ ��J���h����2�����v_X���{j������� ��E�XUJ�,�G����,��)h�;��f~����������S�F�Q�g*ٗ��/4X�Y<*Ze0y�͆�y
|
3
|
+
�]�o%�#��+%d��
|
data/ext/extconf.rb
CHANGED
@@ -30,6 +30,9 @@ have_func("rb_ext_ractor_safe")
|
|
30
30
|
have_func("&rb_fiber_transfer")
|
31
31
|
|
32
32
|
if have_library("uring") and have_header("liburing.h")
|
33
|
+
# We might want to consider using this in the future:
|
34
|
+
# have_func("io_uring_submit_and_wait_timeout", "liburing.h")
|
35
|
+
|
33
36
|
$srcs << "io/event/selector/uring.c"
|
34
37
|
end
|
35
38
|
|
@@ -8,6 +8,9 @@
|
|
8
8
|
#include <errno.h>
|
9
9
|
#include <assert.h>
|
10
10
|
|
11
|
+
static const size_t IO_EVENT_ARRAY_MAXIMUM_COUNT = SIZE_MAX / sizeof(void*);
|
12
|
+
static const size_t IO_EVENT_ARRAY_DEFAULT_COUNT = 128;
|
13
|
+
|
11
14
|
struct IO_Event_Array {
|
12
15
|
// The array of pointers to elements:
|
13
16
|
void **base;
|
@@ -25,20 +28,27 @@ struct IO_Event_Array {
|
|
25
28
|
void (*element_free)(void*);
|
26
29
|
};
|
27
30
|
|
28
|
-
inline static
|
31
|
+
inline static int IO_Event_Array_allocate(struct IO_Event_Array *array, size_t count, size_t element_size)
|
29
32
|
{
|
33
|
+
array->limit = 0;
|
34
|
+
array->element_size = element_size;
|
35
|
+
|
30
36
|
if (count) {
|
31
37
|
array->base = (void**)calloc(count, sizeof(void*));
|
32
|
-
|
38
|
+
|
39
|
+
if (array->base == NULL) {
|
40
|
+
return -1;
|
41
|
+
}
|
33
42
|
|
34
43
|
array->count = count;
|
44
|
+
|
45
|
+
return 1;
|
35
46
|
} else {
|
36
47
|
array->base = NULL;
|
37
48
|
array->count = 0;
|
49
|
+
|
50
|
+
return 0;
|
38
51
|
}
|
39
|
-
|
40
|
-
array->limit = 0;
|
41
|
-
array->element_size = element_size;
|
42
52
|
}
|
43
53
|
|
44
54
|
inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *array)
|
@@ -49,32 +59,51 @@ inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *arr
|
|
49
59
|
|
50
60
|
inline static void IO_Event_Array_free(struct IO_Event_Array *array)
|
51
61
|
{
|
52
|
-
|
53
|
-
void
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
62
|
+
if (array->base) {
|
63
|
+
void **base = array->base;
|
64
|
+
size_t limit = array->limit;
|
65
|
+
|
66
|
+
array->base = NULL;
|
67
|
+
array->count = 0;
|
68
|
+
array->limit = 0;
|
69
|
+
|
70
|
+
for (size_t i = 0; i < limit; i += 1) {
|
71
|
+
void *element = base[i];
|
72
|
+
if (element) {
|
73
|
+
array->element_free(element);
|
74
|
+
|
75
|
+
free(element);
|
76
|
+
}
|
58
77
|
}
|
78
|
+
|
79
|
+
free(base);
|
59
80
|
}
|
60
|
-
|
61
|
-
if (array->base)
|
62
|
-
free(array->base);
|
63
|
-
|
64
|
-
array->base = NULL;
|
65
|
-
array->count = 0;
|
66
|
-
array->limit = 0;
|
67
81
|
}
|
68
82
|
|
69
83
|
inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t count)
|
70
84
|
{
|
71
85
|
if (count <= array->count) {
|
86
|
+
// Already big enough:
|
72
87
|
return 0;
|
73
88
|
}
|
74
89
|
|
75
|
-
|
90
|
+
if (count > IO_EVENT_ARRAY_MAXIMUM_COUNT) {
|
91
|
+
errno = ENOMEM;
|
92
|
+
return -1;
|
93
|
+
}
|
94
|
+
|
76
95
|
size_t new_count = array->count;
|
77
|
-
|
96
|
+
|
97
|
+
// If the array is empty, we need to set the initial size:
|
98
|
+
if (new_count == 0) new_count = IO_EVENT_ARRAY_DEFAULT_COUNT;
|
99
|
+
else while (new_count < count) {
|
100
|
+
// Ensure we don't overflow:
|
101
|
+
if (new_count > (IO_EVENT_ARRAY_MAXIMUM_COUNT / 2)) {
|
102
|
+
new_count = IO_EVENT_ARRAY_MAXIMUM_COUNT;
|
103
|
+
break;
|
104
|
+
}
|
105
|
+
|
106
|
+
// Compute the next multiple (ideally a power of 2):
|
78
107
|
new_count *= 2;
|
79
108
|
}
|
80
109
|
|
@@ -90,6 +119,7 @@ inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t cou
|
|
90
119
|
array->base = (void**)new_base;
|
91
120
|
array->count = new_count;
|
92
121
|
|
122
|
+
// Resizing sucessful:
|
93
123
|
return 1;
|
94
124
|
}
|
95
125
|
|
@@ -337,7 +337,10 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
|
337
337
|
|
338
338
|
selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
|
339
339
|
selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
|
340
|
-
IO_Event_Array_allocate(&selector->descriptors,
|
340
|
+
int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
|
341
|
+
if (result < 0) {
|
342
|
+
rb_sys_fail("IO_Event_Selector_EPoll_allocate:IO_Event_Array_allocate");
|
343
|
+
}
|
341
344
|
|
342
345
|
return instance;
|
343
346
|
}
|
@@ -507,6 +510,8 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
507
510
|
.events = IO_EVENT_READABLE,
|
508
511
|
};
|
509
512
|
|
513
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
514
|
+
|
510
515
|
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
|
511
516
|
|
512
517
|
if (result == -1) {
|
@@ -566,6 +571,8 @@ VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
|
|
566
571
|
.events = RB_NUM2INT(events),
|
567
572
|
};
|
568
573
|
|
574
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
575
|
+
|
569
576
|
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
|
570
577
|
|
571
578
|
if (result == -1) {
|
@@ -664,6 +671,8 @@ VALUE IO_Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
|
|
664
671
|
.offset = offset,
|
665
672
|
};
|
666
673
|
|
674
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
675
|
+
|
667
676
|
return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
|
668
677
|
}
|
669
678
|
|
@@ -760,6 +769,8 @@ VALUE IO_Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
760
769
|
.offset = offset,
|
761
770
|
};
|
762
771
|
|
772
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
773
|
+
|
763
774
|
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
764
775
|
}
|
765
776
|
|
@@ -311,7 +311,11 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
|
311
311
|
|
312
312
|
selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
|
313
313
|
selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
|
314
|
-
|
314
|
+
|
315
|
+
int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
|
316
|
+
if (result < 0) {
|
317
|
+
rb_sys_fail("IO_Event_Selector_KQueue_allocate:IO_Event_Array_allocate");
|
318
|
+
}
|
315
319
|
|
316
320
|
return instance;
|
317
321
|
}
|
@@ -501,6 +505,8 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
501
505
|
.events = IO_EVENT_EXIT,
|
502
506
|
};
|
503
507
|
|
508
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
509
|
+
|
504
510
|
struct process_wait_arguments process_wait_arguments = {
|
505
511
|
.selector = selector,
|
506
512
|
.waiting = &waiting,
|
@@ -564,6 +570,8 @@ VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
564
570
|
.events = RB_NUM2INT(events),
|
565
571
|
};
|
566
572
|
|
573
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
574
|
+
|
567
575
|
int result = IO_Event_Selector_KQueue_Waiting_register(selector, descriptor, &waiting);
|
568
576
|
if (result == -1) {
|
569
577
|
rb_sys_fail("IO_Event_Selector_KQueue_io_wait:IO_Event_Selector_KQueue_Waiting_register");
|
@@ -667,6 +675,8 @@ VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
667
675
|
.offset = offset,
|
668
676
|
};
|
669
677
|
|
678
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
679
|
+
|
670
680
|
return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
|
671
681
|
}
|
672
682
|
|
@@ -773,6 +783,8 @@ VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
773
783
|
.offset = offset,
|
774
784
|
};
|
775
785
|
|
786
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
787
|
+
|
776
788
|
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
777
789
|
}
|
778
790
|
|
@@ -979,7 +991,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
979
991
|
// Non-comprehensive testing shows this gives a 1.5x speedup.
|
980
992
|
|
981
993
|
// First do the syscall with no timeout to get any immediately available events:
|
982
|
-
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout="
|
994
|
+
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout=" IO_EVENT_PRINTF_TIMESPEC "\r\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
|
983
995
|
select_internal_with_gvl(&arguments);
|
984
996
|
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl done\r\n");
|
985
997
|
|
@@ -997,7 +1009,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
997
1009
|
struct timespec start_time;
|
998
1010
|
IO_Event_Selector_current_time(&start_time);
|
999
1011
|
|
1000
|
-
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout="
|
1012
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout=" IO_EVENT_PRINTF_TIMESPEC "\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
|
1001
1013
|
select_internal_without_gvl(&arguments);
|
1002
1014
|
|
1003
1015
|
struct timespec end_time;
|
@@ -233,6 +233,8 @@ VALUE IO_Event_Selector_resume(struct IO_Event_Selector *backend, int argc, VALU
|
|
233
233
|
.fiber = rb_fiber_current()
|
234
234
|
};
|
235
235
|
|
236
|
+
RB_OBJ_WRITTEN(backend->self, Qundef, waiting.fiber);
|
237
|
+
|
236
238
|
queue_push(backend, &waiting);
|
237
239
|
|
238
240
|
struct wait_and_transfer_arguments arguments = {
|
@@ -266,6 +268,8 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
|
|
266
268
|
.fiber = rb_fiber_current()
|
267
269
|
};
|
268
270
|
|
271
|
+
RB_OBJ_WRITTEN(backend->self, Qundef, waiting.fiber);
|
272
|
+
|
269
273
|
queue_push(backend, &waiting);
|
270
274
|
|
271
275
|
struct wait_and_transfer_arguments arguments = {
|
@@ -152,5 +152,5 @@ int IO_Event_Selector_queue_flush(struct IO_Event_Selector *backend);
|
|
152
152
|
void IO_Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
|
153
153
|
void IO_Event_Selector_current_time(struct timespec *time);
|
154
154
|
|
155
|
-
#define
|
156
|
-
#define
|
155
|
+
#define IO_EVENT_PRINTF_TIMESPEC "%lld.%.9ld"
|
156
|
+
#define IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
|
@@ -35,6 +35,7 @@
|
|
35
35
|
enum {
|
36
36
|
DEBUG = 0,
|
37
37
|
DEBUG_COMPLETION = 0,
|
38
|
+
DEBUG_IO_READ = 1,
|
38
39
|
};
|
39
40
|
|
40
41
|
enum {URING_ENTRIES = 64};
|
@@ -237,7 +238,10 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
|
|
237
238
|
|
238
239
|
selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
|
239
240
|
selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
|
240
|
-
IO_Event_Array_allocate(&selector->completions,
|
241
|
+
int result = IO_Event_Array_allocate(&selector->completions, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_URing_Completion));
|
242
|
+
if (result < 0) {
|
243
|
+
rb_sys_fail("IO_Event_Selector_URing_allocate:IO_Event_Array_allocate");
|
244
|
+
}
|
241
245
|
|
242
246
|
return instance;
|
243
247
|
}
|
@@ -483,6 +487,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
483
487
|
.fiber = fiber,
|
484
488
|
};
|
485
489
|
|
490
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
491
|
+
|
486
492
|
struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
487
493
|
|
488
494
|
struct process_wait_arguments process_wait_arguments = {
|
@@ -586,6 +592,8 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
|
|
586
592
|
.fiber = fiber,
|
587
593
|
};
|
588
594
|
|
595
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
596
|
+
|
589
597
|
struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
590
598
|
|
591
599
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
@@ -629,6 +637,7 @@ struct io_read_arguments {
|
|
629
637
|
struct IO_Event_Selector_URing *selector;
|
630
638
|
struct IO_Event_Selector_URing_Waiting *waiting;
|
631
639
|
int descriptor;
|
640
|
+
off_t offset;
|
632
641
|
char *buffer;
|
633
642
|
size_t length;
|
634
643
|
};
|
@@ -642,7 +651,7 @@ io_read_submit(VALUE _arguments)
|
|
642
651
|
if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
643
652
|
|
644
653
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
645
|
-
io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length,
|
654
|
+
io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
|
646
655
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
647
656
|
io_uring_submit_now(selector);
|
648
657
|
|
@@ -672,18 +681,21 @@ io_read_ensure(VALUE _arguments)
|
|
672
681
|
}
|
673
682
|
|
674
683
|
static int
|
675
|
-
io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
|
684
|
+
io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
|
676
685
|
{
|
677
686
|
struct IO_Event_Selector_URing_Waiting waiting = {
|
678
687
|
.fiber = fiber,
|
679
688
|
};
|
680
689
|
|
690
|
+
RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
|
691
|
+
|
681
692
|
IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
682
693
|
|
683
694
|
struct io_read_arguments io_read_arguments = {
|
684
695
|
.selector = selector,
|
685
696
|
.waiting = &waiting,
|
686
697
|
.descriptor = descriptor,
|
698
|
+
.offset = offset,
|
687
699
|
.buffer = buffer,
|
688
700
|
.length = length
|
689
701
|
};
|
@@ -706,10 +718,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
|
|
706
718
|
size_t length = NUM2SIZET(_length);
|
707
719
|
size_t offset = NUM2SIZET(_offset);
|
708
720
|
size_t total = 0;
|
721
|
+
off_t from = io_seekable(descriptor);
|
709
722
|
|
710
723
|
size_t maximum_size = size - offset;
|
711
724
|
while (maximum_size) {
|
712
|
-
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
|
725
|
+
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
713
726
|
|
714
727
|
if (result > 0) {
|
715
728
|
total += result;
|
@@ -743,12 +756,52 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
|
|
743
756
|
return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
|
744
757
|
}
|
745
758
|
|
759
|
+
VALUE IO_Event_Selector_URing_io_pread(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
|
760
|
+
struct IO_Event_Selector_URing *selector = NULL;
|
761
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
762
|
+
|
763
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
764
|
+
|
765
|
+
void *base;
|
766
|
+
size_t size;
|
767
|
+
rb_io_buffer_get_bytes_for_writing(buffer, &base, &size);
|
768
|
+
|
769
|
+
size_t length = NUM2SIZET(_length);
|
770
|
+
size_t offset = NUM2SIZET(_offset);
|
771
|
+
size_t total = 0;
|
772
|
+
off_t from = NUM2OFFT(_from);
|
773
|
+
|
774
|
+
size_t maximum_size = size - offset;
|
775
|
+
while (maximum_size) {
|
776
|
+
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
777
|
+
|
778
|
+
if (result > 0) {
|
779
|
+
total += result;
|
780
|
+
offset += result;
|
781
|
+
from += result;
|
782
|
+
if ((size_t)result >= length) break;
|
783
|
+
length -= result;
|
784
|
+
} else if (result == 0) {
|
785
|
+
break;
|
786
|
+
} else if (length > 0 && IO_Event_try_again(-result)) {
|
787
|
+
IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_READABLE));
|
788
|
+
} else {
|
789
|
+
return rb_fiber_scheduler_io_result(-1, -result);
|
790
|
+
}
|
791
|
+
|
792
|
+
maximum_size = size - offset;
|
793
|
+
}
|
794
|
+
|
795
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
796
|
+
}
|
797
|
+
|
746
798
|
#pragma mark - IO#write
|
747
799
|
|
748
800
|
struct io_write_arguments {
|
749
801
|
struct IO_Event_Selector_URing *selector;
|
750
802
|
struct IO_Event_Selector_URing_Waiting *waiting;
|
751
803
|
int descriptor;
|
804
|
+
off_t offset;
|
752
805
|
char *buffer;
|
753
806
|
size_t length;
|
754
807
|
};
|
@@ -762,7 +815,7 @@ io_write_submit(VALUE _argument)
|
|
762
815
|
if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
763
816
|
|
764
817
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
765
|
-
io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length,
|
818
|
+
io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
|
766
819
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
767
820
|
io_uring_submit_pending(selector);
|
768
821
|
|
@@ -792,18 +845,21 @@ io_write_ensure(VALUE _argument)
|
|
792
845
|
}
|
793
846
|
|
794
847
|
static int
|
795
|
-
io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
|
848
|
+
io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
|
796
849
|
{
|
797
850
|
struct IO_Event_Selector_URing_Waiting waiting = {
|
798
851
|
.fiber = fiber,
|
799
852
|
};
|
800
853
|
|
854
|
+
RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
|
855
|
+
|
801
856
|
IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
802
857
|
|
803
858
|
struct io_write_arguments arguments = {
|
804
859
|
.selector = selector,
|
805
860
|
.waiting = &waiting,
|
806
861
|
.descriptor = descriptor,
|
862
|
+
.offset = offset,
|
807
863
|
.buffer = buffer,
|
808
864
|
.length = length,
|
809
865
|
};
|
@@ -826,6 +882,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
826
882
|
size_t length = NUM2SIZET(_length);
|
827
883
|
size_t offset = NUM2SIZET(_offset);
|
828
884
|
size_t total = 0;
|
885
|
+
off_t from = io_seekable(descriptor);
|
829
886
|
|
830
887
|
if (length > size) {
|
831
888
|
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
@@ -833,7 +890,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
833
890
|
|
834
891
|
size_t maximum_size = size - offset;
|
835
892
|
while (maximum_size) {
|
836
|
-
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
|
893
|
+
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
837
894
|
|
838
895
|
if (result > 0) {
|
839
896
|
total += result;
|
@@ -867,6 +924,49 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
|
|
867
924
|
return IO_Event_Selector_URing_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
|
868
925
|
}
|
869
926
|
|
927
|
+
VALUE IO_Event_Selector_URing_io_pwrite(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
|
928
|
+
struct IO_Event_Selector_URing *selector = NULL;
|
929
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
930
|
+
|
931
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
932
|
+
|
933
|
+
const void *base;
|
934
|
+
size_t size;
|
935
|
+
rb_io_buffer_get_bytes_for_reading(buffer, &base, &size);
|
936
|
+
|
937
|
+
size_t length = NUM2SIZET(_length);
|
938
|
+
size_t offset = NUM2SIZET(_offset);
|
939
|
+
size_t total = 0;
|
940
|
+
off_t from = NUM2OFFT(_from);
|
941
|
+
|
942
|
+
if (length > size) {
|
943
|
+
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
944
|
+
}
|
945
|
+
|
946
|
+
size_t maximum_size = size - offset;
|
947
|
+
while (maximum_size) {
|
948
|
+
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
949
|
+
|
950
|
+
if (result > 0) {
|
951
|
+
total += result;
|
952
|
+
offset += result;
|
953
|
+
from += result;
|
954
|
+
if ((size_t)result >= length) break;
|
955
|
+
length -= result;
|
956
|
+
} else if (result == 0) {
|
957
|
+
break;
|
958
|
+
} else if (length > 0 && IO_Event_try_again(-result)) {
|
959
|
+
IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_WRITABLE));
|
960
|
+
} else {
|
961
|
+
return rb_fiber_scheduler_io_result(-1, -result);
|
962
|
+
}
|
963
|
+
|
964
|
+
maximum_size = size - offset;
|
965
|
+
}
|
966
|
+
|
967
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
968
|
+
}
|
969
|
+
|
870
970
|
#endif
|
871
971
|
|
872
972
|
#pragma mark - IO#close
|
@@ -1118,6 +1218,8 @@ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
|
|
1118
1218
|
#ifdef HAVE_RUBY_IO_BUFFER_H
|
1119
1219
|
rb_define_method(IO_Event_Selector_URing, "io_read", IO_Event_Selector_URing_io_read_compatible, -1);
|
1120
1220
|
rb_define_method(IO_Event_Selector_URing, "io_write", IO_Event_Selector_URing_io_write_compatible, -1);
|
1221
|
+
rb_define_method(IO_Event_Selector_URing, "io_pread", IO_Event_Selector_URing_io_pread, 6);
|
1222
|
+
rb_define_method(IO_Event_Selector_URing, "io_pwrite", IO_Event_Selector_URing_io_pwrite, 6);
|
1121
1223
|
#endif
|
1122
1224
|
|
1123
1225
|
rb_define_method(IO_Event_Selector_URing, "io_close", IO_Event_Selector_URing_io_close, 1);
|
data/lib/io/event/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.7.
|
4
|
+
version: 1.7.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -45,7 +45,7 @@ cert_chain:
|
|
45
45
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
46
46
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
47
47
|
-----END CERTIFICATE-----
|
48
|
-
date: 2024-10-
|
48
|
+
date: 2024-10-21 00:00:00.000000000 Z
|
49
49
|
dependencies: []
|
50
50
|
description:
|
51
51
|
email:
|
metadata.gz.sig
CHANGED
Binary file
|