io-event 1.3.2 → 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/io/event/selector/epoll.c +51 -20
- data/ext/io/event/selector/kqueue.c +54 -26
- data/ext/io/event/selector/list.h +2 -1
- data/ext/io/event/selector/selector.c +2 -7
- data/ext/io/event/selector/selector.h +3 -2
- data/ext/io/event/selector/uring.c +4 -1
- data/lib/io/event/selector/select.rb +4 -0
- data/lib/io/event/support.rb +4 -1
- data/lib/io/event/version.rb +1 -1
- data/readme.md +1 -1
- data.tar.gz.sig +0 -0
- metadata +4 -4
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1f53f20e456a5b4a5ef56e4293e9f8f080e30c16b6d3cd7022d7d8da22ebfe84
|
4
|
+
data.tar.gz: cf6186d1eef0725483af5a47141dbf50bd6bcc96513bfd528c35dc693f4d7db2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ff231970a60cedcec409eb1294de3c6a29eff82440bab558cc3618fc537b5461fb4514fbdc401d9a21630193a8ab293529579c191dc5d446c5eb45eccc3c4d4b
|
7
|
+
data.tar.gz: 73a8a5cc5eb62fdc231b2d45c6591e367758ebb0e48a6a179a91877a08d6f0b9cd86ecf8c179ddab5f1cb81c0b05c9c8c345e0983de3e866ad420e189f7bbc4f
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
@@ -446,6 +446,7 @@ struct process_wait_arguments {
|
|
446
446
|
struct IO_Event_Selector_EPoll *selector;
|
447
447
|
struct IO_Event_Selector_EPoll_Waiting *waiting;
|
448
448
|
int pid;
|
449
|
+
int flags;
|
449
450
|
int descriptor;
|
450
451
|
};
|
451
452
|
|
@@ -456,7 +457,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
|
|
456
457
|
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
457
458
|
|
458
459
|
if (arguments->waiting->ready) {
|
459
|
-
return IO_Event_Selector_process_status_wait(arguments->pid);
|
460
|
+
return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
|
460
461
|
} else {
|
461
462
|
return Qfalse;
|
462
463
|
}
|
@@ -480,7 +481,7 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
480
481
|
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
481
482
|
|
482
483
|
pid_t pid = NUM2PIDT(_pid);
|
483
|
-
|
484
|
+
int flags = NUM2INT(_flags);
|
484
485
|
|
485
486
|
int descriptor = pidfd_open(pid, 0);
|
486
487
|
|
@@ -506,6 +507,7 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
506
507
|
struct process_wait_arguments process_wait_arguments = {
|
507
508
|
.selector = selector,
|
508
509
|
.pid = pid,
|
510
|
+
.flags = flags,
|
509
511
|
.descriptor = descriptor,
|
510
512
|
.waiting = &waiting,
|
511
513
|
};
|
@@ -802,9 +804,11 @@ struct select_arguments {
|
|
802
804
|
|
803
805
|
int count;
|
804
806
|
struct epoll_event events[EPOLL_MAX_EVENTS];
|
805
|
-
|
807
|
+
|
806
808
|
struct timespec * timeout;
|
807
809
|
struct timespec storage;
|
810
|
+
|
811
|
+
struct IO_Event_List saved;
|
808
812
|
};
|
809
813
|
|
810
814
|
static int make_timeout_ms(struct timespec * timeout) {
|
@@ -881,7 +885,7 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
881
885
|
}
|
882
886
|
|
883
887
|
static
|
884
|
-
int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event)
|
888
|
+
int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event, struct IO_Event_List *saved)
|
885
889
|
{
|
886
890
|
int descriptor = event->data.fd;
|
887
891
|
|
@@ -891,29 +895,32 @@ int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, con
|
|
891
895
|
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
|
892
896
|
struct IO_Event_List *list = &epoll_descriptor->list;
|
893
897
|
struct IO_Event_List *node = list->tail;
|
894
|
-
struct IO_Event_List saved = {NULL, NULL};
|
895
898
|
|
896
899
|
// Reset the events back to 0 so that we can re-arm if necessary:
|
897
900
|
epoll_descriptor->waiting_events = 0;
|
898
901
|
|
902
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d epoll_descriptor=%p\n", descriptor, ready_events, epoll_descriptor);
|
903
|
+
|
899
904
|
// It's possible (but unlikely) that the address of list will changing during iteration.
|
900
905
|
while (node != list) {
|
906
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: node=%p list=%p type=%p\n", node, list, node->type);
|
907
|
+
|
901
908
|
struct IO_Event_Selector_EPoll_Waiting *waiting = (struct IO_Event_Selector_EPoll_Waiting *)node;
|
902
909
|
|
903
910
|
// Compute the intersection of the events we are waiting for and the events that occured:
|
904
911
|
enum IO_Event matching_events = waiting->events & ready_events;
|
905
912
|
|
906
|
-
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, matching_events=%d\n", descriptor, ready_events, matching_events);
|
913
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, waiting_events=%d, matching_events=%d\n", descriptor, ready_events, waiting->events, matching_events);
|
907
914
|
|
908
915
|
if (matching_events) {
|
909
|
-
IO_Event_List_append(node,
|
916
|
+
IO_Event_List_append(node, saved);
|
910
917
|
|
911
918
|
// Resume the fiber:
|
912
919
|
waiting->ready = matching_events;
|
913
920
|
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
914
921
|
|
915
|
-
node = saved
|
916
|
-
IO_Event_List_pop(
|
922
|
+
node = saved->tail;
|
923
|
+
IO_Event_List_pop(saved);
|
917
924
|
} else {
|
918
925
|
// We are still waiting for the events:
|
919
926
|
epoll_descriptor->waiting_events |= waiting->events;
|
@@ -924,6 +931,36 @@ int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, con
|
|
924
931
|
return IO_Event_Selector_EPoll_Descriptor_update(selector, epoll_descriptor->io, descriptor, epoll_descriptor);
|
925
932
|
}
|
926
933
|
|
934
|
+
static
|
935
|
+
VALUE select_handle_events(VALUE _arguments)
|
936
|
+
{
|
937
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
938
|
+
struct IO_Event_Selector_EPoll *selector = arguments->selector;
|
939
|
+
|
940
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
941
|
+
const struct epoll_event *event = &arguments->events[i];
|
942
|
+
if (DEBUG) fprintf(stderr, "-> fd=%d events=%d\n", event->data.fd, event->events);
|
943
|
+
|
944
|
+
if (event->data.fd >= 0) {
|
945
|
+
IO_Event_Selector_EPoll_handle(selector, event, &arguments->saved);
|
946
|
+
} else {
|
947
|
+
IO_Event_Interrupt_clear(&selector->interrupt);
|
948
|
+
}
|
949
|
+
}
|
950
|
+
|
951
|
+
return INT2NUM(arguments->count);
|
952
|
+
}
|
953
|
+
|
954
|
+
static
|
955
|
+
VALUE select_handle_events_ensure(VALUE _arguments)
|
956
|
+
{
|
957
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
958
|
+
|
959
|
+
IO_Event_List_free(&arguments->saved);
|
960
|
+
|
961
|
+
return Qnil;
|
962
|
+
}
|
963
|
+
|
927
964
|
// TODO This function is not re-entrant and we should document and assert as such.
|
928
965
|
VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
929
966
|
struct IO_Event_Selector_EPoll *selector = NULL;
|
@@ -937,6 +974,7 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
937
974
|
.tv_sec = 0,
|
938
975
|
.tv_nsec = 0
|
939
976
|
},
|
977
|
+
.saved = {},
|
940
978
|
};
|
941
979
|
|
942
980
|
arguments.timeout = &arguments.storage;
|
@@ -958,18 +996,11 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
958
996
|
}
|
959
997
|
}
|
960
998
|
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
if (event->data.fd >= 0) {
|
966
|
-
IO_Event_Selector_EPoll_handle(selector, event);
|
967
|
-
} else {
|
968
|
-
IO_Event_Interrupt_clear(&selector->interrupt);
|
969
|
-
}
|
999
|
+
if (arguments.count) {
|
1000
|
+
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
|
1001
|
+
} else {
|
1002
|
+
return RB_INT2NUM(0);
|
970
1003
|
}
|
971
|
-
|
972
|
-
return INT2NUM(arguments.count);
|
973
1004
|
}
|
974
1005
|
|
975
1006
|
VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
@@ -433,6 +433,7 @@ struct process_wait_arguments {
|
|
433
433
|
struct IO_Event_Selector_KQueue *selector;
|
434
434
|
struct IO_Event_Selector_KQueue_Waiting *waiting;
|
435
435
|
pid_t pid;
|
436
|
+
int flags;
|
436
437
|
};
|
437
438
|
|
438
439
|
static
|
@@ -461,7 +462,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
|
|
461
462
|
|
462
463
|
if (arguments->waiting->ready) {
|
463
464
|
process_prewait(arguments->pid);
|
464
|
-
return IO_Event_Selector_process_status_wait(arguments->pid);
|
465
|
+
return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
|
465
466
|
} else {
|
466
467
|
return Qfalse;
|
467
468
|
}
|
@@ -483,6 +484,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
483
484
|
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
484
485
|
|
485
486
|
pid_t pid = NUM2PIDT(_pid);
|
487
|
+
int flags = NUM2INT(_flags);
|
486
488
|
|
487
489
|
struct IO_Event_Selector_KQueue_Waiting waiting = {
|
488
490
|
.list = {.type = &IO_Event_Selector_KQueue_process_wait_list_type},
|
@@ -494,6 +496,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
494
496
|
.selector = selector,
|
495
497
|
.waiting = &waiting,
|
496
498
|
.pid = pid,
|
499
|
+
.flags = flags,
|
497
500
|
};
|
498
501
|
|
499
502
|
int result = IO_Event_Selector_KQueue_Waiting_register(selector, pid, &waiting);
|
@@ -502,7 +505,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
502
505
|
if (errno == ESRCH) {
|
503
506
|
process_prewait(pid);
|
504
507
|
|
505
|
-
return IO_Event_Selector_process_status_wait(pid);
|
508
|
+
return IO_Event_Selector_process_status_wait(pid, flags);
|
506
509
|
}
|
507
510
|
|
508
511
|
rb_sys_fail("IO_Event_Selector_KQueue_process_wait:IO_Event_Selector_KQueue_Waiting_register");
|
@@ -818,6 +821,8 @@ struct select_arguments {
|
|
818
821
|
|
819
822
|
struct timespec storage;
|
820
823
|
struct timespec *timeout;
|
824
|
+
|
825
|
+
struct IO_Event_List saved;
|
821
826
|
};
|
822
827
|
|
823
828
|
static
|
@@ -859,7 +864,7 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
859
864
|
}
|
860
865
|
|
861
866
|
static
|
862
|
-
int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor)
|
867
|
+
int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor, struct IO_Event_List *saved)
|
863
868
|
{
|
864
869
|
// This is the mask of all events that occured for the given descriptor:
|
865
870
|
enum IO_Event ready_events = kqueue_descriptor->ready_events;
|
@@ -874,7 +879,6 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
|
|
874
879
|
|
875
880
|
struct IO_Event_List *list = &kqueue_descriptor->list;
|
876
881
|
struct IO_Event_List *node = list->tail;
|
877
|
-
struct IO_Event_List saved = {NULL, NULL};
|
878
882
|
|
879
883
|
// Reset the events back to 0 so that we can re-arm if necessary:
|
880
884
|
kqueue_descriptor->waiting_events = 0;
|
@@ -888,13 +892,13 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
|
|
888
892
|
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_handle: identifier=%lu, ready_events=%d, matching_events=%d\n", identifier, ready_events, matching_events);
|
889
893
|
|
890
894
|
if (matching_events) {
|
891
|
-
IO_Event_List_append(node,
|
895
|
+
IO_Event_List_append(node, saved);
|
892
896
|
|
893
897
|
waiting->ready = matching_events;
|
894
898
|
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
895
899
|
|
896
|
-
node = saved
|
897
|
-
IO_Event_List_pop(
|
900
|
+
node = saved->tail;
|
901
|
+
IO_Event_List_pop(saved);
|
898
902
|
} else {
|
899
903
|
kqueue_descriptor->waiting_events |= waiting->events;
|
900
904
|
node = node->tail;
|
@@ -904,6 +908,43 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
|
|
904
908
|
return IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
|
905
909
|
}
|
906
910
|
|
911
|
+
static
|
912
|
+
VALUE select_handle_events(VALUE _arguments)
|
913
|
+
{
|
914
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
915
|
+
struct IO_Event_Selector_KQueue *selector = arguments->selector;
|
916
|
+
|
917
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
918
|
+
if (arguments->events[i].udata) {
|
919
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
|
920
|
+
kqueue_descriptor->ready_events |= events_from_kevent_filter(arguments->events[i].filter);
|
921
|
+
}
|
922
|
+
}
|
923
|
+
|
924
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
925
|
+
if (arguments->events[i].udata) {
|
926
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
|
927
|
+
IO_Event_Selector_KQueue_handle(selector, arguments->events[i].ident, kqueue_descriptor, &arguments->saved);
|
928
|
+
} else {
|
929
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
930
|
+
IO_Event_Interrupt_clear(&selector->interrupt);
|
931
|
+
#endif
|
932
|
+
}
|
933
|
+
}
|
934
|
+
|
935
|
+
return RB_INT2NUM(arguments->count);
|
936
|
+
}
|
937
|
+
|
938
|
+
static
|
939
|
+
VALUE select_handle_events_ensure(VALUE _arguments)
|
940
|
+
{
|
941
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
942
|
+
|
943
|
+
IO_Event_List_free(&arguments->saved);
|
944
|
+
|
945
|
+
return Qnil;
|
946
|
+
}
|
947
|
+
|
907
948
|
VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
908
949
|
struct IO_Event_Selector_KQueue *selector = NULL;
|
909
950
|
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
@@ -916,7 +957,8 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
916
957
|
.storage = {
|
917
958
|
.tv_sec = 0,
|
918
959
|
.tv_nsec = 0
|
919
|
-
}
|
960
|
+
},
|
961
|
+
.saved = {},
|
920
962
|
};
|
921
963
|
|
922
964
|
arguments.timeout = &arguments.storage;
|
@@ -948,25 +990,11 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
948
990
|
}
|
949
991
|
}
|
950
992
|
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
}
|
956
|
-
}
|
957
|
-
|
958
|
-
for (int i = 0; i < arguments.count; i += 1) {
|
959
|
-
if (arguments.events[i].udata) {
|
960
|
-
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments.events[i].udata;
|
961
|
-
IO_Event_Selector_KQueue_handle(selector, arguments.events[i].ident, kqueue_descriptor);
|
962
|
-
} else {
|
963
|
-
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
964
|
-
IO_Event_Interrupt_clear(&selector->interrupt);
|
965
|
-
#endif
|
966
|
-
}
|
993
|
+
if (arguments.count) {
|
994
|
+
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
|
995
|
+
} else {
|
996
|
+
return RB_INT2NUM(0);
|
967
997
|
}
|
968
|
-
|
969
|
-
return RB_INT2NUM(arguments.count);
|
970
998
|
}
|
971
999
|
|
972
1000
|
VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
@@ -21,6 +21,7 @@ inline static void IO_Event_List_initialize(struct IO_Event_List *list)
|
|
21
21
|
inline static void IO_Event_List_clear(struct IO_Event_List *list)
|
22
22
|
{
|
23
23
|
list->head = list->tail = NULL;
|
24
|
+
list->type = 0;
|
24
25
|
}
|
25
26
|
|
26
27
|
// Append an item to the end of the list.
|
@@ -64,7 +65,7 @@ inline static void IO_Event_List_pop(struct IO_Event_List *node)
|
|
64
65
|
|
65
66
|
inline static void IO_Event_List_free(struct IO_Event_List *node)
|
66
67
|
{
|
67
|
-
if (node->head
|
68
|
+
if (node->head && node->tail) {
|
68
69
|
IO_Event_List_pop(node);
|
69
70
|
}
|
70
71
|
}
|
@@ -25,10 +25,6 @@ static const int DEBUG = 0;
|
|
25
25
|
|
26
26
|
static ID id_transfer, id_alive_p;
|
27
27
|
|
28
|
-
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
29
|
-
static VALUE process_wnohang;
|
30
|
-
#endif
|
31
|
-
|
32
28
|
VALUE IO_Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
|
33
29
|
// TODO Consider introducing something like `rb_fiber_scheduler_transfer(...)`.
|
34
30
|
#ifdef HAVE__RB_FIBER_TRANSFER
|
@@ -76,9 +72,9 @@ int IO_Event_Selector_io_descriptor(VALUE io) {
|
|
76
72
|
static ID id_wait;
|
77
73
|
static VALUE rb_Process_Status = Qnil;
|
78
74
|
|
79
|
-
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid)
|
75
|
+
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags)
|
80
76
|
{
|
81
|
-
return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid),
|
77
|
+
return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(flags | WNOHANG));
|
82
78
|
}
|
83
79
|
#endif
|
84
80
|
|
@@ -157,7 +153,6 @@ void Init_IO_Event_Selector(VALUE IO_Event_Selector) {
|
|
157
153
|
|
158
154
|
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
159
155
|
id_wait = rb_intern("wait");
|
160
|
-
process_wnohang = rb_const_get(rb_mProcess, rb_intern("WNOHANG"));
|
161
156
|
rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
|
162
157
|
rb_gc_register_mark_object(rb_Process_Status);
|
163
158
|
#endif
|
@@ -66,10 +66,11 @@ VALUE IO_Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
|
|
66
66
|
int IO_Event_Selector_io_descriptor(VALUE io);
|
67
67
|
#endif
|
68
68
|
|
69
|
+
// Reap a process without hanging.
|
69
70
|
#ifdef HAVE_RB_PROCESS_STATUS_WAIT
|
70
|
-
#define IO_Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
|
71
|
+
#define IO_Event_Selector_process_status_wait(pid, flags) rb_process_status_wait(pid, flags | WNOHANG)
|
71
72
|
#else
|
72
|
-
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid);
|
73
|
+
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags);
|
73
74
|
#endif
|
74
75
|
|
75
76
|
int IO_Event_Selector_nonblock_set(int file_descriptor);
|
@@ -428,6 +428,7 @@ struct process_wait_arguments {
|
|
428
428
|
struct IO_Event_Selector_URing_Waiting *waiting;
|
429
429
|
|
430
430
|
pid_t pid;
|
431
|
+
int flags;
|
431
432
|
int descriptor;
|
432
433
|
};
|
433
434
|
|
@@ -438,7 +439,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
|
|
438
439
|
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
439
440
|
|
440
441
|
if (arguments->waiting->result) {
|
441
|
-
return IO_Event_Selector_process_status_wait(arguments->pid);
|
442
|
+
return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
|
442
443
|
} else {
|
443
444
|
return Qfalse;
|
444
445
|
}
|
@@ -460,6 +461,7 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
460
461
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
461
462
|
|
462
463
|
pid_t pid = NUM2PIDT(_pid);
|
464
|
+
int flags = NUM2INT(_flags);
|
463
465
|
|
464
466
|
int descriptor = pidfd_open(pid, 0);
|
465
467
|
if (descriptor < 0) {
|
@@ -477,6 +479,7 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
477
479
|
.selector = selector,
|
478
480
|
.waiting = &waiting,
|
479
481
|
.pid = pid,
|
482
|
+
.flags = flags,
|
480
483
|
.descriptor = descriptor,
|
481
484
|
};
|
482
485
|
|
@@ -280,7 +280,9 @@ module IO::Event
|
|
280
280
|
elsif Support.fiber_scheduler_v1?
|
281
281
|
# Ruby <= 3.1, limited IO::Buffer support.
|
282
282
|
def io_read(fiber, _io, buffer, length, offset = 0)
|
283
|
+
# We need to avoid any internal buffering, so we use a duplicated IO object:
|
283
284
|
io = IO.for_fd(_io.fileno, autoclose: false)
|
285
|
+
|
284
286
|
total = 0
|
285
287
|
|
286
288
|
maximum_size = buffer.size - offset
|
@@ -321,7 +323,9 @@ module IO::Event
|
|
321
323
|
end
|
322
324
|
|
323
325
|
def io_write(fiber, _io, buffer, length, offset = 0)
|
326
|
+
# We need to avoid any internal buffering, so we use a duplicated IO object:
|
324
327
|
io = IO.for_fd(_io.fileno, autoclose: false)
|
328
|
+
|
325
329
|
total = 0
|
326
330
|
|
327
331
|
maximum_size = buffer.size - offset
|
data/lib/io/event/support.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
# Released under the MIT License.
|
4
|
-
# Copyright, 2022, by Samuel Williams.
|
4
|
+
# Copyright, 2022-2023, by Samuel Williams.
|
5
5
|
|
6
6
|
class IO
|
7
7
|
module Event
|
@@ -20,6 +20,9 @@ class IO
|
|
20
20
|
|
21
21
|
def self.fiber_scheduler_v3?
|
22
22
|
if fiber_scheduler_v2?
|
23
|
+
return true if RUBY_VERSION >= "3.3"
|
24
|
+
|
25
|
+
# Feature detection if required:
|
23
26
|
begin
|
24
27
|
IO::Buffer.new.slice(0, 0).write(STDOUT)
|
25
28
|
return true
|
data/lib/io/event/version.rb
CHANGED
data/readme.md
CHANGED
@@ -28,4 +28,4 @@ This project uses the [Developer Certificate of Origin](https://developercertifi
|
|
28
28
|
|
29
29
|
### Contributor Covenant
|
30
30
|
|
31
|
-
This project is governed by [Contributor Covenant](https://www.contributor-covenant.org/). All contributors and participants agree to abide by its terms.
|
31
|
+
This project is governed by the [Contributor Covenant](https://www.contributor-covenant.org/). All contributors and participants agree to abide by its terms.
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -42,7 +42,7 @@ cert_chain:
|
|
42
42
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
43
43
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
44
44
|
-----END CERTIFICATE-----
|
45
|
-
date: 2023-
|
45
|
+
date: 2023-12-28 00:00:00.000000000 Z
|
46
46
|
dependencies: []
|
47
47
|
description:
|
48
48
|
email:
|
@@ -90,14 +90,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
90
90
|
requirements:
|
91
91
|
- - ">="
|
92
92
|
- !ruby/object:Gem::Version
|
93
|
-
version: '3.
|
93
|
+
version: '3.1'
|
94
94
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
95
95
|
requirements:
|
96
96
|
- - ">="
|
97
97
|
- !ruby/object:Gem::Version
|
98
98
|
version: '0'
|
99
99
|
requirements: []
|
100
|
-
rubygems_version: 3.5.
|
100
|
+
rubygems_version: 3.5.3
|
101
101
|
signing_key:
|
102
102
|
specification_version: 4
|
103
103
|
summary: An event loop.
|
metadata.gz.sig
CHANGED
Binary file
|