io-event 1.2.3 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +5 -3
- data/ext/io/event/selector/array.h +135 -0
- data/ext/io/event/selector/epoll.c +435 -196
- data/ext/io/event/selector/kqueue.c +481 -218
- data/ext/io/event/selector/list.h +87 -0
- data/ext/io/event/selector/selector.c +14 -14
- data/ext/io/event/selector/selector.h +20 -6
- data/ext/io/event/selector/uring.c +399 -216
- data/lib/io/event/selector/select.rb +34 -14
- data/lib/io/event/selector.rb +1 -5
- data/lib/io/event/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +4 -2
- metadata.gz.sig +0 -0
@@ -0,0 +1,87 @@
|
|
1
|
+
// Released under the MIT License.
|
2
|
+
// Copyright, 2023, by Samuel Williams.
|
3
|
+
|
4
|
+
#include <stdio.h>
|
5
|
+
#include <assert.h>
|
6
|
+
|
7
|
+
struct IO_Event_List_Type {
|
8
|
+
};
|
9
|
+
|
10
|
+
struct IO_Event_List {
|
11
|
+
struct IO_Event_List *head, *tail;
|
12
|
+
struct IO_Event_List_Type *type;
|
13
|
+
};
|
14
|
+
|
15
|
+
inline static void IO_Event_List_initialize(struct IO_Event_List *list)
|
16
|
+
{
|
17
|
+
list->head = list->tail = list;
|
18
|
+
list->type = 0;
|
19
|
+
}
|
20
|
+
|
21
|
+
inline static void IO_Event_List_clear(struct IO_Event_List *list)
|
22
|
+
{
|
23
|
+
list->head = list->tail = NULL;
|
24
|
+
}
|
25
|
+
|
26
|
+
// Append an item to the end of the list.
|
27
|
+
inline static void IO_Event_List_append(struct IO_Event_List *list, struct IO_Event_List *node)
|
28
|
+
{
|
29
|
+
assert(node->head == NULL);
|
30
|
+
assert(node->tail == NULL);
|
31
|
+
|
32
|
+
struct IO_Event_List *head = list->head;
|
33
|
+
node->tail = list;
|
34
|
+
node->head = head;
|
35
|
+
list->head = node;
|
36
|
+
head->tail = node;
|
37
|
+
}
|
38
|
+
|
39
|
+
inline static void IO_Event_List_prepend(struct IO_Event_List *list, struct IO_Event_List *node)
|
40
|
+
{
|
41
|
+
assert(node->head == NULL);
|
42
|
+
assert(node->tail == NULL);
|
43
|
+
|
44
|
+
struct IO_Event_List *tail = list->tail;
|
45
|
+
node->head = list;
|
46
|
+
node->tail = tail;
|
47
|
+
list->tail = node;
|
48
|
+
tail->head = node;
|
49
|
+
}
|
50
|
+
|
51
|
+
// Pop an item from the list.
|
52
|
+
inline static void IO_Event_List_pop(struct IO_Event_List *node)
|
53
|
+
{
|
54
|
+
assert(node->head != NULL);
|
55
|
+
assert(node->tail != NULL);
|
56
|
+
|
57
|
+
struct IO_Event_List *head = node->head;
|
58
|
+
struct IO_Event_List *tail = node->tail;
|
59
|
+
|
60
|
+
head->tail = tail;
|
61
|
+
tail->head = head;
|
62
|
+
node->head = node->tail = NULL;
|
63
|
+
}
|
64
|
+
|
65
|
+
inline static void IO_Event_List_free(struct IO_Event_List *node)
|
66
|
+
{
|
67
|
+
if (node->head != node->tail) {
|
68
|
+
IO_Event_List_pop(node);
|
69
|
+
}
|
70
|
+
}
|
71
|
+
|
72
|
+
inline static int IO_Event_List_empty(struct IO_Event_List *list)
|
73
|
+
{
|
74
|
+
return list->head == list->tail;
|
75
|
+
}
|
76
|
+
|
77
|
+
inline static void IO_Event_List_immutable_each(struct IO_Event_List *list, void (*callback)(struct IO_Event_List *node))
|
78
|
+
{
|
79
|
+
struct IO_Event_List *node = list->tail;
|
80
|
+
|
81
|
+
while (node != list) {
|
82
|
+
if (node->type)
|
83
|
+
callback(node);
|
84
|
+
|
85
|
+
node = node->tail;
|
86
|
+
}
|
87
|
+
}
|
@@ -174,23 +174,23 @@ struct wait_and_transfer_arguments {
|
|
174
174
|
};
|
175
175
|
|
176
176
|
static void queue_pop(struct IO_Event_Selector *backend, struct IO_Event_Selector_Queue *waiting) {
|
177
|
-
if (waiting->
|
178
|
-
waiting->
|
177
|
+
if (waiting->head) {
|
178
|
+
waiting->head->tail = waiting->tail;
|
179
179
|
} else {
|
180
|
-
backend->waiting = waiting->
|
180
|
+
backend->waiting = waiting->tail;
|
181
181
|
}
|
182
182
|
|
183
|
-
if (waiting->
|
184
|
-
waiting->
|
183
|
+
if (waiting->tail) {
|
184
|
+
waiting->tail->head = waiting->head;
|
185
185
|
} else {
|
186
|
-
backend->ready = waiting->
|
186
|
+
backend->ready = waiting->head;
|
187
187
|
}
|
188
188
|
}
|
189
189
|
|
190
190
|
static void queue_push(struct IO_Event_Selector *backend, struct IO_Event_Selector_Queue *waiting) {
|
191
191
|
if (backend->waiting) {
|
192
|
-
backend->waiting->
|
193
|
-
waiting->
|
192
|
+
backend->waiting->head = waiting;
|
193
|
+
waiting->tail = backend->waiting;
|
194
194
|
} else {
|
195
195
|
backend->ready = waiting;
|
196
196
|
}
|
@@ -221,8 +221,8 @@ VALUE IO_Event_Selector_resume(struct IO_Event_Selector *backend, int argc, VALU
|
|
221
221
|
rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
|
222
222
|
|
223
223
|
struct IO_Event_Selector_Queue waiting = {
|
224
|
-
.
|
225
|
-
.
|
224
|
+
.head = NULL,
|
225
|
+
.tail = NULL,
|
226
226
|
.flags = IO_EVENT_SELECTOR_QUEUE_FIBER,
|
227
227
|
.fiber = rb_fiber_current()
|
228
228
|
};
|
@@ -254,8 +254,8 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
|
|
254
254
|
rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
|
255
255
|
|
256
256
|
struct IO_Event_Selector_Queue waiting = {
|
257
|
-
.
|
258
|
-
.
|
257
|
+
.head = NULL,
|
258
|
+
.tail = NULL,
|
259
259
|
.flags = IO_EVENT_SELECTOR_QUEUE_FIBER,
|
260
260
|
.fiber = rb_fiber_current()
|
261
261
|
};
|
@@ -276,8 +276,8 @@ void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber
|
|
276
276
|
{
|
277
277
|
struct IO_Event_Selector_Queue *waiting = malloc(sizeof(struct IO_Event_Selector_Queue));
|
278
278
|
|
279
|
-
waiting->
|
280
|
-
waiting->
|
279
|
+
waiting->head = NULL;
|
280
|
+
waiting->tail = NULL;
|
281
281
|
waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
|
282
282
|
waiting->fiber = fiber;
|
283
283
|
|
@@ -40,7 +40,10 @@ enum IO_Event {
|
|
40
40
|
IO_EVENT_PRIORITY = 2,
|
41
41
|
IO_EVENT_WRITABLE = 4,
|
42
42
|
IO_EVENT_ERROR = 8,
|
43
|
-
IO_EVENT_HANGUP = 16
|
43
|
+
IO_EVENT_HANGUP = 16,
|
44
|
+
|
45
|
+
// Used by kqueue to differentiate between process exit and file descriptor events:
|
46
|
+
IO_EVENT_EXIT = 32,
|
44
47
|
};
|
45
48
|
|
46
49
|
void Init_IO_Event_Selector(VALUE IO_Event_Selector);
|
@@ -78,8 +81,8 @@ enum IO_Event_Selector_Queue_Flags {
|
|
78
81
|
};
|
79
82
|
|
80
83
|
struct IO_Event_Selector_Queue {
|
81
|
-
struct IO_Event_Selector_Queue *
|
82
|
-
struct IO_Event_Selector_Queue *
|
84
|
+
struct IO_Event_Selector_Queue *head;
|
85
|
+
struct IO_Event_Selector_Queue *tail;
|
83
86
|
|
84
87
|
enum IO_Event_Selector_Queue_Flags flags;
|
85
88
|
|
@@ -106,12 +109,23 @@ void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE loop)
|
|
106
109
|
|
107
110
|
static inline
|
108
111
|
void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
|
109
|
-
|
112
|
+
rb_gc_mark_movable(backend->loop);
|
113
|
+
|
114
|
+
struct IO_Event_Selector_Queue *ready = backend->ready;
|
115
|
+
while (ready) {
|
116
|
+
rb_gc_mark_movable(ready->fiber);
|
117
|
+
ready = ready->head;
|
118
|
+
}
|
119
|
+
}
|
120
|
+
|
121
|
+
static inline
|
122
|
+
void IO_Event_Selector_compact(struct IO_Event_Selector *backend) {
|
123
|
+
backend->loop = rb_gc_location(backend->loop);
|
110
124
|
|
111
125
|
struct IO_Event_Selector_Queue *ready = backend->ready;
|
112
126
|
while (ready) {
|
113
|
-
|
114
|
-
ready = ready->
|
127
|
+
ready->fiber = rb_gc_location(ready->fiber);
|
128
|
+
ready = ready->head;
|
115
129
|
}
|
116
130
|
}
|
117
131
|
|