io-event 1.7.0 → 1.7.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +3 -0
- data/ext/io/event/selector/array.h +52 -20
- data/ext/io/event/selector/epoll.c +14 -3
- data/ext/io/event/selector/kqueue.c +15 -3
- data/ext/io/event/selector/list.h +19 -1
- data/ext/io/event/selector/selector.c +1 -2
- data/ext/io/event/selector/selector.h +2 -2
- data/ext/io/event/selector/uring.c +110 -7
- data/lib/io/event/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +3 -3
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 00ee9c58a1f3c352028955bd7fa687b868a38d658f076ed128999e434a50e67f
|
4
|
+
data.tar.gz: 8bb4a29b1f992e2fdf5b155e8efb1361e70cf1a635bbcac75723d6e255b27d92
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6882b4c368d9b5e1fd32d3545409d311db6d5624ae9353f1112c0f3960a85e62685aaa05ff9a20612c2e17bb8d286e1ce88be5024ed0d343713c8ddf378f9d05
|
7
|
+
data.tar.gz: be96ca6107b200375af43eefad3b549eff15a7e4ad90798fb695c20dcd711f557b405a9c8f36d40324531ab92f03e1dbbaadb07419d7091ae7f6247567573877
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/ext/extconf.rb
CHANGED
@@ -30,6 +30,9 @@ have_func("rb_ext_ractor_safe")
|
|
30
30
|
have_func("&rb_fiber_transfer")
|
31
31
|
|
32
32
|
if have_library("uring") and have_header("liburing.h")
|
33
|
+
# We might want to consider using this in the future:
|
34
|
+
# have_func("io_uring_submit_and_wait_timeout", "liburing.h")
|
35
|
+
|
33
36
|
$srcs << "io/event/selector/uring.c"
|
34
37
|
end
|
35
38
|
|
@@ -8,6 +8,11 @@
|
|
8
8
|
#include <errno.h>
|
9
9
|
#include <assert.h>
|
10
10
|
|
11
|
+
enum {
|
12
|
+
IO_EVENT_ARRAY_MAXIMUM_COUNT = SIZE_MAX / sizeof(void*),
|
13
|
+
IO_EVENT_ARRAY_DEFAULT_COUNT = 128
|
14
|
+
};
|
15
|
+
|
11
16
|
struct IO_Event_Array {
|
12
17
|
// The array of pointers to elements:
|
13
18
|
void **base;
|
@@ -25,20 +30,27 @@ struct IO_Event_Array {
|
|
25
30
|
void (*element_free)(void*);
|
26
31
|
};
|
27
32
|
|
28
|
-
inline static
|
33
|
+
inline static int IO_Event_Array_allocate(struct IO_Event_Array *array, size_t count, size_t element_size)
|
29
34
|
{
|
35
|
+
array->limit = 0;
|
36
|
+
array->element_size = element_size;
|
37
|
+
|
30
38
|
if (count) {
|
31
39
|
array->base = (void**)calloc(count, sizeof(void*));
|
32
|
-
|
40
|
+
|
41
|
+
if (array->base == NULL) {
|
42
|
+
return -1;
|
43
|
+
}
|
33
44
|
|
34
45
|
array->count = count;
|
46
|
+
|
47
|
+
return 1;
|
35
48
|
} else {
|
36
49
|
array->base = NULL;
|
37
50
|
array->count = 0;
|
51
|
+
|
52
|
+
return 0;
|
38
53
|
}
|
39
|
-
|
40
|
-
array->limit = 0;
|
41
|
-
array->element_size = element_size;
|
42
54
|
}
|
43
55
|
|
44
56
|
inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *array)
|
@@ -49,32 +61,51 @@ inline static size_t IO_Event_Array_memory_size(const struct IO_Event_Array *arr
|
|
49
61
|
|
50
62
|
inline static void IO_Event_Array_free(struct IO_Event_Array *array)
|
51
63
|
{
|
52
|
-
|
53
|
-
void
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
64
|
+
if (array->base) {
|
65
|
+
void **base = array->base;
|
66
|
+
size_t limit = array->limit;
|
67
|
+
|
68
|
+
array->base = NULL;
|
69
|
+
array->count = 0;
|
70
|
+
array->limit = 0;
|
71
|
+
|
72
|
+
for (size_t i = 0; i < limit; i += 1) {
|
73
|
+
void *element = base[i];
|
74
|
+
if (element) {
|
75
|
+
array->element_free(element);
|
76
|
+
|
77
|
+
free(element);
|
78
|
+
}
|
58
79
|
}
|
80
|
+
|
81
|
+
free(base);
|
59
82
|
}
|
60
|
-
|
61
|
-
if (array->base)
|
62
|
-
free(array->base);
|
63
|
-
|
64
|
-
array->base = NULL;
|
65
|
-
array->count = 0;
|
66
|
-
array->limit = 0;
|
67
83
|
}
|
68
84
|
|
69
85
|
inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t count)
|
70
86
|
{
|
71
87
|
if (count <= array->count) {
|
88
|
+
// Already big enough:
|
72
89
|
return 0;
|
73
90
|
}
|
74
91
|
|
75
|
-
|
92
|
+
if (count > IO_EVENT_ARRAY_MAXIMUM_COUNT) {
|
93
|
+
errno = ENOMEM;
|
94
|
+
return -1;
|
95
|
+
}
|
96
|
+
|
76
97
|
size_t new_count = array->count;
|
77
|
-
|
98
|
+
|
99
|
+
// If the array is empty, we need to set the initial size:
|
100
|
+
if (new_count == 0) new_count = IO_EVENT_ARRAY_DEFAULT_COUNT;
|
101
|
+
else while (new_count < count) {
|
102
|
+
// Ensure we don't overflow:
|
103
|
+
if (new_count > (IO_EVENT_ARRAY_MAXIMUM_COUNT / 2)) {
|
104
|
+
new_count = IO_EVENT_ARRAY_MAXIMUM_COUNT;
|
105
|
+
break;
|
106
|
+
}
|
107
|
+
|
108
|
+
// Compute the next multiple (ideally a power of 2):
|
78
109
|
new_count *= 2;
|
79
110
|
}
|
80
111
|
|
@@ -90,6 +121,7 @@ inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t cou
|
|
90
121
|
array->base = (void**)new_base;
|
91
122
|
array->count = new_count;
|
92
123
|
|
124
|
+
// Resizing sucessful:
|
93
125
|
return 1;
|
94
126
|
}
|
95
127
|
|
@@ -241,7 +241,7 @@ int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *se
|
|
241
241
|
} else {
|
242
242
|
// The IO has changed, we need to reset the state:
|
243
243
|
epoll_descriptor->registered_events = 0;
|
244
|
-
epoll_descriptor->io
|
244
|
+
RB_OBJ_WRITE(selector->backend.self, &epoll_descriptor->io, io);
|
245
245
|
}
|
246
246
|
|
247
247
|
if (epoll_descriptor->waiting_events == 0) {
|
@@ -251,7 +251,7 @@ int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *se
|
|
251
251
|
epoll_descriptor->registered_events = 0;
|
252
252
|
}
|
253
253
|
|
254
|
-
epoll_descriptor->io
|
254
|
+
RB_OBJ_WRITE(selector->backend.self, &epoll_descriptor->io, 0);
|
255
255
|
|
256
256
|
return 0;
|
257
257
|
}
|
@@ -337,7 +337,10 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
|
337
337
|
|
338
338
|
selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
|
339
339
|
selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
|
340
|
-
IO_Event_Array_allocate(&selector->descriptors,
|
340
|
+
int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
|
341
|
+
if (result < 0) {
|
342
|
+
rb_sys_fail("IO_Event_Selector_EPoll_allocate:IO_Event_Array_allocate");
|
343
|
+
}
|
341
344
|
|
342
345
|
return instance;
|
343
346
|
}
|
@@ -507,6 +510,8 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
507
510
|
.events = IO_EVENT_READABLE,
|
508
511
|
};
|
509
512
|
|
513
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
514
|
+
|
510
515
|
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
|
511
516
|
|
512
517
|
if (result == -1) {
|
@@ -566,6 +571,8 @@ VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
|
|
566
571
|
.events = RB_NUM2INT(events),
|
567
572
|
};
|
568
573
|
|
574
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
575
|
+
|
569
576
|
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
|
570
577
|
|
571
578
|
if (result == -1) {
|
@@ -664,6 +671,8 @@ VALUE IO_Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
|
|
664
671
|
.offset = offset,
|
665
672
|
};
|
666
673
|
|
674
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
675
|
+
|
667
676
|
return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
|
668
677
|
}
|
669
678
|
|
@@ -760,6 +769,8 @@ VALUE IO_Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
760
769
|
.offset = offset,
|
761
770
|
};
|
762
771
|
|
772
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
773
|
+
|
763
774
|
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
764
775
|
}
|
765
776
|
|
@@ -311,7 +311,11 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
|
311
311
|
|
312
312
|
selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
|
313
313
|
selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
|
314
|
-
|
314
|
+
|
315
|
+
int result = IO_Event_Array_allocate(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
|
316
|
+
if (result < 0) {
|
317
|
+
rb_sys_fail("IO_Event_Selector_KQueue_allocate:IO_Event_Array_allocate");
|
318
|
+
}
|
315
319
|
|
316
320
|
return instance;
|
317
321
|
}
|
@@ -501,6 +505,8 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
501
505
|
.events = IO_EVENT_EXIT,
|
502
506
|
};
|
503
507
|
|
508
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
509
|
+
|
504
510
|
struct process_wait_arguments process_wait_arguments = {
|
505
511
|
.selector = selector,
|
506
512
|
.waiting = &waiting,
|
@@ -564,6 +570,8 @@ VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
564
570
|
.events = RB_NUM2INT(events),
|
565
571
|
};
|
566
572
|
|
573
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
574
|
+
|
567
575
|
int result = IO_Event_Selector_KQueue_Waiting_register(selector, descriptor, &waiting);
|
568
576
|
if (result == -1) {
|
569
577
|
rb_sys_fail("IO_Event_Selector_KQueue_io_wait:IO_Event_Selector_KQueue_Waiting_register");
|
@@ -667,6 +675,8 @@ VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
667
675
|
.offset = offset,
|
668
676
|
};
|
669
677
|
|
678
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
679
|
+
|
670
680
|
return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
|
671
681
|
}
|
672
682
|
|
@@ -773,6 +783,8 @@ VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
773
783
|
.offset = offset,
|
774
784
|
};
|
775
785
|
|
786
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
787
|
+
|
776
788
|
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
777
789
|
}
|
778
790
|
|
@@ -979,7 +991,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
979
991
|
// Non-comprehensive testing shows this gives a 1.5x speedup.
|
980
992
|
|
981
993
|
// First do the syscall with no timeout to get any immediately available events:
|
982
|
-
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout="
|
994
|
+
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout=" IO_EVENT_PRINTF_TIMESPEC "\r\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
|
983
995
|
select_internal_with_gvl(&arguments);
|
984
996
|
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl done\r\n");
|
985
997
|
|
@@ -997,7 +1009,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
997
1009
|
struct timespec start_time;
|
998
1010
|
IO_Event_Selector_current_time(&start_time);
|
999
1011
|
|
1000
|
-
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout="
|
1012
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout=" IO_EVENT_PRINTF_TIMESPEC "\n", IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(arguments.storage));
|
1001
1013
|
select_internal_without_gvl(&arguments);
|
1002
1014
|
|
1003
1015
|
struct timespec end_time;
|
@@ -38,6 +38,7 @@ inline static void IO_Event_List_append(struct IO_Event_List *list, struct IO_Ev
|
|
38
38
|
head->tail = node;
|
39
39
|
}
|
40
40
|
|
41
|
+
// Prepend an item to the beginning of the list.
|
41
42
|
inline static void IO_Event_List_prepend(struct IO_Event_List *list, struct IO_Event_List *node)
|
42
43
|
{
|
43
44
|
assert(node->head == NULL);
|
@@ -64,6 +65,7 @@ inline static void IO_Event_List_pop(struct IO_Event_List *node)
|
|
64
65
|
node->head = node->tail = NULL;
|
65
66
|
}
|
66
67
|
|
68
|
+
// Remove an item from the list, if it is in a list.
|
67
69
|
inline static void IO_Event_List_free(struct IO_Event_List *node)
|
68
70
|
{
|
69
71
|
if (node->head && node->tail) {
|
@@ -71,11 +73,27 @@ inline static void IO_Event_List_free(struct IO_Event_List *node)
|
|
71
73
|
}
|
72
74
|
}
|
73
75
|
|
74
|
-
|
76
|
+
// Calculate the memory size of the list nodes.
|
77
|
+
inline static size_t IO_Event_List_memory_size(const struct IO_Event_List *list)
|
78
|
+
{
|
79
|
+
size_t memsize = 0;
|
80
|
+
|
81
|
+
const struct IO_Event_List *node = list->tail;
|
82
|
+
while (node != list) {
|
83
|
+
memsize += sizeof(struct IO_Event_List);
|
84
|
+
node = node->tail;
|
85
|
+
}
|
86
|
+
|
87
|
+
return memsize;
|
88
|
+
}
|
89
|
+
|
90
|
+
// Return true if the list is empty.
|
91
|
+
inline static int IO_Event_List_empty(const struct IO_Event_List *list)
|
75
92
|
{
|
76
93
|
return list->head == list->tail;
|
77
94
|
}
|
78
95
|
|
96
|
+
// Enumerate all items in the list, assuming the list will not be modified during iteration.
|
79
97
|
inline static void IO_Event_List_immutable_each(struct IO_Event_List *list, void (*callback)(struct IO_Event_List *node))
|
80
98
|
{
|
81
99
|
struct IO_Event_List *node = list->tail;
|
@@ -287,8 +287,7 @@ void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber
|
|
287
287
|
waiting->tail = NULL;
|
288
288
|
waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
|
289
289
|
|
290
|
-
waiting->fiber
|
291
|
-
RB_OBJ_WRITTEN(backend->self, Qundef, fiber);
|
290
|
+
RB_OBJ_WRITE(backend->self, &waiting->fiber, fiber);
|
292
291
|
|
293
292
|
queue_push(backend, waiting);
|
294
293
|
}
|
@@ -152,5 +152,5 @@ int IO_Event_Selector_queue_flush(struct IO_Event_Selector *backend);
|
|
152
152
|
void IO_Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
|
153
153
|
void IO_Event_Selector_current_time(struct timespec *time);
|
154
154
|
|
155
|
-
#define
|
156
|
-
#define
|
155
|
+
#define IO_EVENT_PRINTF_TIMESPEC "%lld.%.9ld"
|
156
|
+
#define IO_EVENT_PRINTF_TIMESPEC_ARGUMENTS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
|
@@ -35,6 +35,7 @@
|
|
35
35
|
enum {
|
36
36
|
DEBUG = 0,
|
37
37
|
DEBUG_COMPLETION = 0,
|
38
|
+
DEBUG_IO_READ = 1,
|
38
39
|
};
|
39
40
|
|
40
41
|
enum {URING_ENTRIES = 64};
|
@@ -138,6 +139,7 @@ size_t IO_Event_Selector_URing_Type_size(const void *_selector)
|
|
138
139
|
|
139
140
|
return sizeof(struct IO_Event_Selector_URing)
|
140
141
|
+ IO_Event_Array_memory_size(&selector->completions)
|
142
|
+
+ IO_Event_List_memory_size(&selector->free_list)
|
141
143
|
;
|
142
144
|
}
|
143
145
|
|
@@ -236,7 +238,10 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
|
|
236
238
|
|
237
239
|
selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
|
238
240
|
selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
|
239
|
-
IO_Event_Array_allocate(&selector->completions,
|
241
|
+
int result = IO_Event_Array_allocate(&selector->completions, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_URing_Completion));
|
242
|
+
if (result < 0) {
|
243
|
+
rb_sys_fail("IO_Event_Selector_URing_allocate:IO_Event_Array_allocate");
|
244
|
+
}
|
240
245
|
|
241
246
|
return instance;
|
242
247
|
}
|
@@ -482,6 +487,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
482
487
|
.fiber = fiber,
|
483
488
|
};
|
484
489
|
|
490
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
491
|
+
|
485
492
|
struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
486
493
|
|
487
494
|
struct process_wait_arguments process_wait_arguments = {
|
@@ -585,6 +592,8 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
|
|
585
592
|
.fiber = fiber,
|
586
593
|
};
|
587
594
|
|
595
|
+
RB_OBJ_WRITTEN(self, Qundef, fiber);
|
596
|
+
|
588
597
|
struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
589
598
|
|
590
599
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
@@ -628,6 +637,7 @@ struct io_read_arguments {
|
|
628
637
|
struct IO_Event_Selector_URing *selector;
|
629
638
|
struct IO_Event_Selector_URing_Waiting *waiting;
|
630
639
|
int descriptor;
|
640
|
+
off_t offset;
|
631
641
|
char *buffer;
|
632
642
|
size_t length;
|
633
643
|
};
|
@@ -641,7 +651,7 @@ io_read_submit(VALUE _arguments)
|
|
641
651
|
if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
642
652
|
|
643
653
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
644
|
-
io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length,
|
654
|
+
io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
|
645
655
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
646
656
|
io_uring_submit_now(selector);
|
647
657
|
|
@@ -671,18 +681,21 @@ io_read_ensure(VALUE _arguments)
|
|
671
681
|
}
|
672
682
|
|
673
683
|
static int
|
674
|
-
io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
|
684
|
+
io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
|
675
685
|
{
|
676
686
|
struct IO_Event_Selector_URing_Waiting waiting = {
|
677
687
|
.fiber = fiber,
|
678
688
|
};
|
679
689
|
|
690
|
+
RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
|
691
|
+
|
680
692
|
IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
681
693
|
|
682
694
|
struct io_read_arguments io_read_arguments = {
|
683
695
|
.selector = selector,
|
684
696
|
.waiting = &waiting,
|
685
697
|
.descriptor = descriptor,
|
698
|
+
.offset = offset,
|
686
699
|
.buffer = buffer,
|
687
700
|
.length = length
|
688
701
|
};
|
@@ -705,10 +718,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
|
|
705
718
|
size_t length = NUM2SIZET(_length);
|
706
719
|
size_t offset = NUM2SIZET(_offset);
|
707
720
|
size_t total = 0;
|
721
|
+
off_t from = io_seekable(descriptor);
|
708
722
|
|
709
723
|
size_t maximum_size = size - offset;
|
710
724
|
while (maximum_size) {
|
711
|
-
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
|
725
|
+
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
712
726
|
|
713
727
|
if (result > 0) {
|
714
728
|
total += result;
|
@@ -742,12 +756,52 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
|
|
742
756
|
return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
|
743
757
|
}
|
744
758
|
|
759
|
+
VALUE IO_Event_Selector_URing_io_pread(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
|
760
|
+
struct IO_Event_Selector_URing *selector = NULL;
|
761
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
762
|
+
|
763
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
764
|
+
|
765
|
+
void *base;
|
766
|
+
size_t size;
|
767
|
+
rb_io_buffer_get_bytes_for_writing(buffer, &base, &size);
|
768
|
+
|
769
|
+
size_t length = NUM2SIZET(_length);
|
770
|
+
size_t offset = NUM2SIZET(_offset);
|
771
|
+
size_t total = 0;
|
772
|
+
off_t from = NUM2OFFT(_from);
|
773
|
+
|
774
|
+
size_t maximum_size = size - offset;
|
775
|
+
while (maximum_size) {
|
776
|
+
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
777
|
+
|
778
|
+
if (result > 0) {
|
779
|
+
total += result;
|
780
|
+
offset += result;
|
781
|
+
from += result;
|
782
|
+
if ((size_t)result >= length) break;
|
783
|
+
length -= result;
|
784
|
+
} else if (result == 0) {
|
785
|
+
break;
|
786
|
+
} else if (length > 0 && IO_Event_try_again(-result)) {
|
787
|
+
IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_READABLE));
|
788
|
+
} else {
|
789
|
+
return rb_fiber_scheduler_io_result(-1, -result);
|
790
|
+
}
|
791
|
+
|
792
|
+
maximum_size = size - offset;
|
793
|
+
}
|
794
|
+
|
795
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
796
|
+
}
|
797
|
+
|
745
798
|
#pragma mark - IO#write
|
746
799
|
|
747
800
|
struct io_write_arguments {
|
748
801
|
struct IO_Event_Selector_URing *selector;
|
749
802
|
struct IO_Event_Selector_URing_Waiting *waiting;
|
750
803
|
int descriptor;
|
804
|
+
off_t offset;
|
751
805
|
char *buffer;
|
752
806
|
size_t length;
|
753
807
|
};
|
@@ -761,7 +815,7 @@ io_write_submit(VALUE _argument)
|
|
761
815
|
if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
762
816
|
|
763
817
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
764
|
-
io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length,
|
818
|
+
io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, arguments->offset);
|
765
819
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
766
820
|
io_uring_submit_pending(selector);
|
767
821
|
|
@@ -791,18 +845,21 @@ io_write_ensure(VALUE _argument)
|
|
791
845
|
}
|
792
846
|
|
793
847
|
static int
|
794
|
-
io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
|
848
|
+
io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length, off_t offset)
|
795
849
|
{
|
796
850
|
struct IO_Event_Selector_URing_Waiting waiting = {
|
797
851
|
.fiber = fiber,
|
798
852
|
};
|
799
853
|
|
854
|
+
RB_OBJ_WRITTEN(selector->backend.self, Qundef, fiber);
|
855
|
+
|
800
856
|
IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
801
857
|
|
802
858
|
struct io_write_arguments arguments = {
|
803
859
|
.selector = selector,
|
804
860
|
.waiting = &waiting,
|
805
861
|
.descriptor = descriptor,
|
862
|
+
.offset = offset,
|
806
863
|
.buffer = buffer,
|
807
864
|
.length = length,
|
808
865
|
};
|
@@ -825,6 +882,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
825
882
|
size_t length = NUM2SIZET(_length);
|
826
883
|
size_t offset = NUM2SIZET(_offset);
|
827
884
|
size_t total = 0;
|
885
|
+
off_t from = io_seekable(descriptor);
|
828
886
|
|
829
887
|
if (length > size) {
|
830
888
|
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
@@ -832,7 +890,7 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
832
890
|
|
833
891
|
size_t maximum_size = size - offset;
|
834
892
|
while (maximum_size) {
|
835
|
-
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
|
893
|
+
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
836
894
|
|
837
895
|
if (result > 0) {
|
838
896
|
total += result;
|
@@ -866,6 +924,49 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
|
|
866
924
|
return IO_Event_Selector_URing_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
|
867
925
|
}
|
868
926
|
|
927
|
+
VALUE IO_Event_Selector_URing_io_pwrite(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _from, VALUE _length, VALUE _offset) {
|
928
|
+
struct IO_Event_Selector_URing *selector = NULL;
|
929
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
930
|
+
|
931
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
932
|
+
|
933
|
+
const void *base;
|
934
|
+
size_t size;
|
935
|
+
rb_io_buffer_get_bytes_for_reading(buffer, &base, &size);
|
936
|
+
|
937
|
+
size_t length = NUM2SIZET(_length);
|
938
|
+
size_t offset = NUM2SIZET(_offset);
|
939
|
+
size_t total = 0;
|
940
|
+
off_t from = NUM2OFFT(_from);
|
941
|
+
|
942
|
+
if (length > size) {
|
943
|
+
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
944
|
+
}
|
945
|
+
|
946
|
+
size_t maximum_size = size - offset;
|
947
|
+
while (maximum_size) {
|
948
|
+
int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
949
|
+
|
950
|
+
if (result > 0) {
|
951
|
+
total += result;
|
952
|
+
offset += result;
|
953
|
+
from += result;
|
954
|
+
if ((size_t)result >= length) break;
|
955
|
+
length -= result;
|
956
|
+
} else if (result == 0) {
|
957
|
+
break;
|
958
|
+
} else if (length > 0 && IO_Event_try_again(-result)) {
|
959
|
+
IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_WRITABLE));
|
960
|
+
} else {
|
961
|
+
return rb_fiber_scheduler_io_result(-1, -result);
|
962
|
+
}
|
963
|
+
|
964
|
+
maximum_size = size - offset;
|
965
|
+
}
|
966
|
+
|
967
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
968
|
+
}
|
969
|
+
|
869
970
|
#endif
|
870
971
|
|
871
972
|
#pragma mark - IO#close
|
@@ -1117,6 +1218,8 @@ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
|
|
1117
1218
|
#ifdef HAVE_RUBY_IO_BUFFER_H
|
1118
1219
|
rb_define_method(IO_Event_Selector_URing, "io_read", IO_Event_Selector_URing_io_read_compatible, -1);
|
1119
1220
|
rb_define_method(IO_Event_Selector_URing, "io_write", IO_Event_Selector_URing_io_write_compatible, -1);
|
1221
|
+
rb_define_method(IO_Event_Selector_URing, "io_pread", IO_Event_Selector_URing_io_pread, 6);
|
1222
|
+
rb_define_method(IO_Event_Selector_URing, "io_pwrite", IO_Event_Selector_URing_io_pwrite, 6);
|
1120
1223
|
#endif
|
1121
1224
|
|
1122
1225
|
rb_define_method(IO_Event_Selector_URing, "io_close", IO_Event_Selector_URing_io_close, 1);
|
data/lib/io/event/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.7.
|
4
|
+
version: 1.7.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -45,7 +45,7 @@ cert_chain:
|
|
45
45
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
46
46
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
47
47
|
-----END CERTIFICATE-----
|
48
|
-
date: 2024-10-
|
48
|
+
date: 2024-10-16 00:00:00.000000000 Z
|
49
49
|
dependencies: []
|
50
50
|
description:
|
51
51
|
email:
|
@@ -104,7 +104,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
104
104
|
- !ruby/object:Gem::Version
|
105
105
|
version: '0'
|
106
106
|
requirements: []
|
107
|
-
rubygems_version: 3.
|
107
|
+
rubygems_version: 3.5.11
|
108
108
|
signing_key:
|
109
109
|
specification_version: 4
|
110
110
|
summary: An event loop.
|
metadata.gz.sig
CHANGED
Binary file
|