io-event 1.2.2 → 1.3.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +7 -24
- data/ext/io/event/selector/array.h +135 -0
- data/ext/io/event/selector/epoll.c +474 -204
- data/ext/io/event/selector/kqueue.c +513 -222
- data/ext/io/event/selector/list.h +88 -0
- data/ext/io/event/selector/selector.c +16 -21
- data/ext/io/event/selector/selector.h +23 -8
- data/ext/io/event/selector/uring.c +459 -223
- data/lib/io/event/interrupt.rb +1 -1
- data/lib/io/event/selector/nonblock.rb +1 -1
- data/lib/io/event/selector/select.rb +123 -22
- data/lib/io/event/selector.rb +2 -6
- data/lib/io/event/support.rb +11 -0
- data/lib/io/event/version.rb +2 -2
- data/lib/io/event.rb +1 -1
- data/license.md +2 -1
- data/readme.md +13 -5
- data.tar.gz.sig +0 -0
- metadata +8 -61
- metadata.gz.sig +0 -0
@@ -0,0 +1,88 @@
|
|
1
|
+
// Released under the MIT License.
|
2
|
+
// Copyright, 2023, by Samuel Williams.
|
3
|
+
|
4
|
+
#include <stdio.h>
|
5
|
+
#include <assert.h>
|
6
|
+
|
7
|
+
struct IO_Event_List_Type {
|
8
|
+
};
|
9
|
+
|
10
|
+
struct IO_Event_List {
|
11
|
+
struct IO_Event_List *head, *tail;
|
12
|
+
struct IO_Event_List_Type *type;
|
13
|
+
};
|
14
|
+
|
15
|
+
inline static void IO_Event_List_initialize(struct IO_Event_List *list)
|
16
|
+
{
|
17
|
+
list->head = list->tail = list;
|
18
|
+
list->type = 0;
|
19
|
+
}
|
20
|
+
|
21
|
+
inline static void IO_Event_List_clear(struct IO_Event_List *list)
|
22
|
+
{
|
23
|
+
list->head = list->tail = NULL;
|
24
|
+
list->type = 0;
|
25
|
+
}
|
26
|
+
|
27
|
+
// Append an item to the end of the list.
|
28
|
+
inline static void IO_Event_List_append(struct IO_Event_List *list, struct IO_Event_List *node)
|
29
|
+
{
|
30
|
+
assert(node->head == NULL);
|
31
|
+
assert(node->tail == NULL);
|
32
|
+
|
33
|
+
struct IO_Event_List *head = list->head;
|
34
|
+
node->tail = list;
|
35
|
+
node->head = head;
|
36
|
+
list->head = node;
|
37
|
+
head->tail = node;
|
38
|
+
}
|
39
|
+
|
40
|
+
inline static void IO_Event_List_prepend(struct IO_Event_List *list, struct IO_Event_List *node)
|
41
|
+
{
|
42
|
+
assert(node->head == NULL);
|
43
|
+
assert(node->tail == NULL);
|
44
|
+
|
45
|
+
struct IO_Event_List *tail = list->tail;
|
46
|
+
node->head = list;
|
47
|
+
node->tail = tail;
|
48
|
+
list->tail = node;
|
49
|
+
tail->head = node;
|
50
|
+
}
|
51
|
+
|
52
|
+
// Pop an item from the list.
|
53
|
+
inline static void IO_Event_List_pop(struct IO_Event_List *node)
|
54
|
+
{
|
55
|
+
assert(node->head != NULL);
|
56
|
+
assert(node->tail != NULL);
|
57
|
+
|
58
|
+
struct IO_Event_List *head = node->head;
|
59
|
+
struct IO_Event_List *tail = node->tail;
|
60
|
+
|
61
|
+
head->tail = tail;
|
62
|
+
tail->head = head;
|
63
|
+
node->head = node->tail = NULL;
|
64
|
+
}
|
65
|
+
|
66
|
+
inline static void IO_Event_List_free(struct IO_Event_List *node)
|
67
|
+
{
|
68
|
+
if (node->head && node->tail) {
|
69
|
+
IO_Event_List_pop(node);
|
70
|
+
}
|
71
|
+
}
|
72
|
+
|
73
|
+
inline static int IO_Event_List_empty(struct IO_Event_List *list)
|
74
|
+
{
|
75
|
+
return list->head == list->tail;
|
76
|
+
}
|
77
|
+
|
78
|
+
inline static void IO_Event_List_immutable_each(struct IO_Event_List *list, void (*callback)(struct IO_Event_List *node))
|
79
|
+
{
|
80
|
+
struct IO_Event_List *node = list->tail;
|
81
|
+
|
82
|
+
while (node != list) {
|
83
|
+
if (node->type)
|
84
|
+
callback(node);
|
85
|
+
|
86
|
+
node = node->tail;
|
87
|
+
}
|
88
|
+
}
|
@@ -25,10 +25,6 @@ static const int DEBUG = 0;
|
|
25
25
|
|
26
26
|
static ID id_transfer, id_alive_p;
|
27
27
|
|
28
|
-
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
29
|
-
static VALUE process_wnohang;
|
30
|
-
#endif
|
31
|
-
|
32
28
|
VALUE IO_Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
|
33
29
|
// TODO Consider introducing something like `rb_fiber_scheduler_transfer(...)`.
|
34
30
|
#ifdef HAVE__RB_FIBER_TRANSFER
|
@@ -76,9 +72,9 @@ int IO_Event_Selector_io_descriptor(VALUE io) {
|
|
76
72
|
static ID id_wait;
|
77
73
|
static VALUE rb_Process_Status = Qnil;
|
78
74
|
|
79
|
-
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid)
|
75
|
+
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags)
|
80
76
|
{
|
81
|
-
return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid),
|
77
|
+
return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(flags | WNOHANG));
|
82
78
|
}
|
83
79
|
#endif
|
84
80
|
|
@@ -157,7 +153,6 @@ void Init_IO_Event_Selector(VALUE IO_Event_Selector) {
|
|
157
153
|
|
158
154
|
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
159
155
|
id_wait = rb_intern("wait");
|
160
|
-
process_wnohang = rb_const_get(rb_mProcess, rb_intern("WNOHANG"));
|
161
156
|
rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
|
162
157
|
rb_gc_register_mark_object(rb_Process_Status);
|
163
158
|
#endif
|
@@ -174,23 +169,23 @@ struct wait_and_transfer_arguments {
|
|
174
169
|
};
|
175
170
|
|
176
171
|
static void queue_pop(struct IO_Event_Selector *backend, struct IO_Event_Selector_Queue *waiting) {
|
177
|
-
if (waiting->
|
178
|
-
waiting->
|
172
|
+
if (waiting->head) {
|
173
|
+
waiting->head->tail = waiting->tail;
|
179
174
|
} else {
|
180
|
-
backend->waiting = waiting->
|
175
|
+
backend->waiting = waiting->tail;
|
181
176
|
}
|
182
177
|
|
183
|
-
if (waiting->
|
184
|
-
waiting->
|
178
|
+
if (waiting->tail) {
|
179
|
+
waiting->tail->head = waiting->head;
|
185
180
|
} else {
|
186
|
-
backend->ready = waiting->
|
181
|
+
backend->ready = waiting->head;
|
187
182
|
}
|
188
183
|
}
|
189
184
|
|
190
185
|
static void queue_push(struct IO_Event_Selector *backend, struct IO_Event_Selector_Queue *waiting) {
|
191
186
|
if (backend->waiting) {
|
192
|
-
backend->waiting->
|
193
|
-
waiting->
|
187
|
+
backend->waiting->head = waiting;
|
188
|
+
waiting->tail = backend->waiting;
|
194
189
|
} else {
|
195
190
|
backend->ready = waiting;
|
196
191
|
}
|
@@ -221,8 +216,8 @@ VALUE IO_Event_Selector_resume(struct IO_Event_Selector *backend, int argc, VALU
|
|
221
216
|
rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
|
222
217
|
|
223
218
|
struct IO_Event_Selector_Queue waiting = {
|
224
|
-
.
|
225
|
-
.
|
219
|
+
.head = NULL,
|
220
|
+
.tail = NULL,
|
226
221
|
.flags = IO_EVENT_SELECTOR_QUEUE_FIBER,
|
227
222
|
.fiber = rb_fiber_current()
|
228
223
|
};
|
@@ -254,8 +249,8 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
|
|
254
249
|
rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
|
255
250
|
|
256
251
|
struct IO_Event_Selector_Queue waiting = {
|
257
|
-
.
|
258
|
-
.
|
252
|
+
.head = NULL,
|
253
|
+
.tail = NULL,
|
259
254
|
.flags = IO_EVENT_SELECTOR_QUEUE_FIBER,
|
260
255
|
.fiber = rb_fiber_current()
|
261
256
|
};
|
@@ -276,8 +271,8 @@ void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber
|
|
276
271
|
{
|
277
272
|
struct IO_Event_Selector_Queue *waiting = malloc(sizeof(struct IO_Event_Selector_Queue));
|
278
273
|
|
279
|
-
waiting->
|
280
|
-
waiting->
|
274
|
+
waiting->head = NULL;
|
275
|
+
waiting->tail = NULL;
|
281
276
|
waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
|
282
277
|
waiting->fiber = fiber;
|
283
278
|
|
@@ -40,7 +40,10 @@ enum IO_Event {
|
|
40
40
|
IO_EVENT_PRIORITY = 2,
|
41
41
|
IO_EVENT_WRITABLE = 4,
|
42
42
|
IO_EVENT_ERROR = 8,
|
43
|
-
IO_EVENT_HANGUP = 16
|
43
|
+
IO_EVENT_HANGUP = 16,
|
44
|
+
|
45
|
+
// Used by kqueue to differentiate between process exit and file descriptor events:
|
46
|
+
IO_EVENT_EXIT = 32,
|
44
47
|
};
|
45
48
|
|
46
49
|
void Init_IO_Event_Selector(VALUE IO_Event_Selector);
|
@@ -63,10 +66,11 @@ VALUE IO_Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
|
|
63
66
|
int IO_Event_Selector_io_descriptor(VALUE io);
|
64
67
|
#endif
|
65
68
|
|
69
|
+
// Reap a process without hanging.
|
66
70
|
#ifdef HAVE_RB_PROCESS_STATUS_WAIT
|
67
|
-
#define IO_Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
|
71
|
+
#define IO_Event_Selector_process_status_wait(pid, flags) rb_process_status_wait(pid, flags | WNOHANG)
|
68
72
|
#else
|
69
|
-
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid);
|
73
|
+
VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags);
|
70
74
|
#endif
|
71
75
|
|
72
76
|
int IO_Event_Selector_nonblock_set(int file_descriptor);
|
@@ -78,8 +82,8 @@ enum IO_Event_Selector_Queue_Flags {
|
|
78
82
|
};
|
79
83
|
|
80
84
|
struct IO_Event_Selector_Queue {
|
81
|
-
struct IO_Event_Selector_Queue *
|
82
|
-
struct IO_Event_Selector_Queue *
|
85
|
+
struct IO_Event_Selector_Queue *head;
|
86
|
+
struct IO_Event_Selector_Queue *tail;
|
83
87
|
|
84
88
|
enum IO_Event_Selector_Queue_Flags flags;
|
85
89
|
|
@@ -106,12 +110,23 @@ void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE loop)
|
|
106
110
|
|
107
111
|
static inline
|
108
112
|
void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
|
109
|
-
|
113
|
+
rb_gc_mark_movable(backend->loop);
|
114
|
+
|
115
|
+
struct IO_Event_Selector_Queue *ready = backend->ready;
|
116
|
+
while (ready) {
|
117
|
+
rb_gc_mark_movable(ready->fiber);
|
118
|
+
ready = ready->head;
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
static inline
|
123
|
+
void IO_Event_Selector_compact(struct IO_Event_Selector *backend) {
|
124
|
+
backend->loop = rb_gc_location(backend->loop);
|
110
125
|
|
111
126
|
struct IO_Event_Selector_Queue *ready = backend->ready;
|
112
127
|
while (ready) {
|
113
|
-
|
114
|
-
ready = ready->
|
128
|
+
ready->fiber = rb_gc_location(ready->fiber);
|
129
|
+
ready = ready->head;
|
115
130
|
}
|
116
131
|
}
|
117
132
|
|