io-event 1.2.2 → 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +7 -24
- data/ext/io/event/selector/array.h +135 -0
- data/ext/io/event/selector/epoll.c +474 -204
- data/ext/io/event/selector/kqueue.c +513 -222
- data/ext/io/event/selector/list.h +88 -0
- data/ext/io/event/selector/selector.c +16 -21
- data/ext/io/event/selector/selector.h +23 -8
- data/ext/io/event/selector/uring.c +459 -223
- data/lib/io/event/interrupt.rb +1 -1
- data/lib/io/event/selector/nonblock.rb +1 -1
- data/lib/io/event/selector/select.rb +123 -22
- data/lib/io/event/selector.rb +2 -6
- data/lib/io/event/support.rb +11 -0
- data/lib/io/event/version.rb +2 -2
- data/lib/io/event.rb +1 -1
- data/license.md +2 -1
- data/readme.md +13 -5
- data.tar.gz.sig +0 -0
- metadata +8 -61
- metadata.gz.sig +0 -0
@@ -20,11 +20,17 @@
|
|
20
20
|
|
21
21
|
#include "kqueue.h"
|
22
22
|
#include "selector.h"
|
23
|
+
#include "list.h"
|
24
|
+
#include "array.h"
|
23
25
|
|
24
26
|
#include <sys/event.h>
|
25
27
|
#include <sys/ioctl.h>
|
26
28
|
#include <time.h>
|
27
29
|
#include <errno.h>
|
30
|
+
#include <sys/wait.h>
|
31
|
+
#include <signal.h>
|
32
|
+
|
33
|
+
#include "../interrupt.h"
|
28
34
|
|
29
35
|
enum {
|
30
36
|
DEBUG = 0,
|
@@ -33,49 +39,144 @@ enum {
|
|
33
39
|
DEBUG_IO_WAIT = 0
|
34
40
|
};
|
35
41
|
|
42
|
+
#ifndef EVFILT_USER
|
43
|
+
#define IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
44
|
+
#endif
|
45
|
+
|
36
46
|
static VALUE IO_Event_Selector_KQueue = Qnil;
|
37
47
|
|
38
48
|
enum {KQUEUE_MAX_EVENTS = 64};
|
39
49
|
|
40
|
-
|
50
|
+
// This represents an actual fiber waiting for a specific event.
|
51
|
+
struct IO_Event_Selector_KQueue_Waiting
|
52
|
+
{
|
53
|
+
struct IO_Event_List list;
|
54
|
+
|
55
|
+
// The events the fiber is waiting for.
|
56
|
+
enum IO_Event events;
|
57
|
+
|
58
|
+
// The events that are currently ready.
|
59
|
+
enum IO_Event ready;
|
60
|
+
|
61
|
+
// The fiber value itself.
|
62
|
+
VALUE fiber;
|
63
|
+
};
|
64
|
+
|
65
|
+
struct IO_Event_Selector_KQueue
|
66
|
+
{
|
41
67
|
struct IO_Event_Selector backend;
|
42
68
|
int descriptor;
|
43
|
-
|
44
69
|
int blocked;
|
70
|
+
|
71
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
72
|
+
struct IO_Event_Interrupt interrupt;
|
73
|
+
#endif
|
74
|
+
struct IO_Event_Array descriptors;
|
75
|
+
};
|
76
|
+
|
77
|
+
// This represents zero or more fibers waiting for a specific descriptor.
|
78
|
+
struct IO_Event_Selector_KQueue_Descriptor
|
79
|
+
{
|
80
|
+
struct IO_Event_List list;
|
81
|
+
|
82
|
+
// The union of all events we are waiting for:
|
83
|
+
enum IO_Event waiting_events;
|
84
|
+
|
85
|
+
// The union of events we are registered for:
|
86
|
+
enum IO_Event registered_events;
|
87
|
+
|
88
|
+
// The events that are currently ready:
|
89
|
+
enum IO_Event ready_events;
|
45
90
|
};
|
46
91
|
|
47
|
-
|
92
|
+
static
|
93
|
+
void IO_Event_Selector_KQueue_Waiting_mark(struct IO_Event_List *_waiting)
|
94
|
+
{
|
95
|
+
struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;
|
96
|
+
|
97
|
+
if (waiting->fiber) {
|
98
|
+
rb_gc_mark_movable(waiting->fiber);
|
99
|
+
}
|
100
|
+
}
|
101
|
+
|
102
|
+
static
|
103
|
+
void IO_Event_Selector_KQueue_Descriptor_mark(void *_descriptor)
|
104
|
+
{
|
105
|
+
struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;
|
106
|
+
|
107
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_mark);
|
108
|
+
}
|
109
|
+
|
110
|
+
static
|
111
|
+
void IO_Event_Selector_KQueue_Type_mark(void *_selector)
|
112
|
+
{
|
113
|
+
struct IO_Event_Selector_KQueue *selector = _selector;
|
114
|
+
IO_Event_Selector_mark(&selector->backend);
|
115
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_mark);
|
116
|
+
}
|
117
|
+
|
118
|
+
static
|
119
|
+
void IO_Event_Selector_KQueue_Waiting_compact(struct IO_Event_List *_waiting)
|
120
|
+
{
|
121
|
+
struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;
|
122
|
+
|
123
|
+
if (waiting->fiber) {
|
124
|
+
waiting->fiber = rb_gc_location(waiting->fiber);
|
125
|
+
}
|
126
|
+
}
|
127
|
+
|
128
|
+
static
|
129
|
+
void IO_Event_Selector_KQueue_Descriptor_compact(void *_descriptor)
|
130
|
+
{
|
131
|
+
struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;
|
132
|
+
|
133
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_compact);
|
134
|
+
}
|
135
|
+
|
136
|
+
static
|
137
|
+
void IO_Event_Selector_KQueue_Type_compact(void *_selector)
|
48
138
|
{
|
49
|
-
struct IO_Event_Selector_KQueue *
|
50
|
-
|
139
|
+
struct IO_Event_Selector_KQueue *selector = _selector;
|
140
|
+
IO_Event_Selector_compact(&selector->backend);
|
141
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_compact);
|
51
142
|
}
|
52
143
|
|
53
144
|
static
|
54
|
-
void close_internal(struct IO_Event_Selector_KQueue *
|
55
|
-
|
56
|
-
|
57
|
-
|
145
|
+
void close_internal(struct IO_Event_Selector_KQueue *selector)
|
146
|
+
{
|
147
|
+
if (selector->descriptor >= 0) {
|
148
|
+
close(selector->descriptor);
|
149
|
+
selector->descriptor = -1;
|
58
150
|
}
|
59
151
|
}
|
60
152
|
|
61
|
-
|
153
|
+
static
|
154
|
+
void IO_Event_Selector_KQueue_Type_free(void *_selector)
|
62
155
|
{
|
63
|
-
struct IO_Event_Selector_KQueue *
|
156
|
+
struct IO_Event_Selector_KQueue *selector = _selector;
|
157
|
+
|
158
|
+
close_internal(selector);
|
64
159
|
|
65
|
-
|
160
|
+
IO_Event_Array_free(&selector->descriptors);
|
66
161
|
|
67
|
-
free(
|
162
|
+
free(selector);
|
68
163
|
}
|
69
164
|
|
70
|
-
|
165
|
+
static
|
166
|
+
size_t IO_Event_Selector_KQueue_Type_size(const void *_selector)
|
71
167
|
{
|
72
|
-
|
168
|
+
const struct IO_Event_Selector_KQueue *selector = _selector;
|
169
|
+
|
170
|
+
return sizeof(struct IO_Event_Selector_KQueue)
|
171
|
+
+ IO_Event_Array_memory_size(&selector->descriptors)
|
172
|
+
;
|
73
173
|
}
|
74
174
|
|
75
175
|
static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
|
76
176
|
.wrap_struct_name = "IO_Event::Backend::KQueue",
|
77
177
|
.function = {
|
78
178
|
.dmark = IO_Event_Selector_KQueue_Type_mark,
|
179
|
+
.dcompact = IO_Event_Selector_KQueue_Type_compact,
|
79
180
|
.dfree = IO_Event_Selector_KQueue_Type_free,
|
80
181
|
.dsize = IO_Event_Selector_KQueue_Type_size,
|
81
182
|
},
|
@@ -83,301 +184,390 @@ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
|
|
83
184
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
84
185
|
};
|
85
186
|
|
187
|
+
inline static
|
188
|
+
struct IO_Event_Selector_KQueue_Descriptor * IO_Event_Selector_KQueue_Descriptor_lookup(struct IO_Event_Selector_KQueue *selector, uintptr_t descriptor)
|
189
|
+
{
|
190
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = IO_Event_Array_lookup(&selector->descriptors, descriptor);
|
191
|
+
|
192
|
+
if (!kqueue_descriptor) {
|
193
|
+
rb_sys_fail("IO_Event_Selector_KQueue_Descriptor_lookup:IO_Event_Array_lookup");
|
194
|
+
}
|
195
|
+
|
196
|
+
return kqueue_descriptor;
|
197
|
+
}
|
198
|
+
|
199
|
+
inline static
|
200
|
+
enum IO_Event events_from_kevent_filter(int filter)
|
201
|
+
{
|
202
|
+
switch (filter) {
|
203
|
+
case EVFILT_READ:
|
204
|
+
return IO_EVENT_READABLE;
|
205
|
+
case EVFILT_WRITE:
|
206
|
+
return IO_EVENT_WRITABLE;
|
207
|
+
case EVFILT_PROC:
|
208
|
+
return IO_EVENT_EXIT;
|
209
|
+
default:
|
210
|
+
return 0;
|
211
|
+
}
|
212
|
+
}
|
213
|
+
|
214
|
+
inline static
|
215
|
+
int IO_Event_Selector_KQueue_Descriptor_update(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor)
|
216
|
+
{
|
217
|
+
int count = 0;
|
218
|
+
struct kevent kevents[3] = {0};
|
219
|
+
|
220
|
+
if (kqueue_descriptor->waiting_events & IO_EVENT_READABLE) {
|
221
|
+
kevents[count].ident = identifier;
|
222
|
+
kevents[count].filter = EVFILT_READ;
|
223
|
+
kevents[count].flags = EV_ADD | EV_ONESHOT;
|
224
|
+
kevents[count].udata = (void *)kqueue_descriptor;
|
225
|
+
// #ifdef EV_OOBAND
|
226
|
+
// if (events & IO_EVENT_PRIORITY) {
|
227
|
+
// kevents[count].flags |= EV_OOBAND;
|
228
|
+
// }
|
229
|
+
// #endif
|
230
|
+
count++;
|
231
|
+
}
|
232
|
+
|
233
|
+
if (kqueue_descriptor->waiting_events & IO_EVENT_WRITABLE) {
|
234
|
+
kevents[count].ident = identifier;
|
235
|
+
kevents[count].filter = EVFILT_WRITE;
|
236
|
+
kevents[count].flags = EV_ADD | EV_ONESHOT;
|
237
|
+
kevents[count].udata = (void *)kqueue_descriptor;
|
238
|
+
count++;
|
239
|
+
}
|
240
|
+
|
241
|
+
if (kqueue_descriptor->waiting_events & IO_EVENT_EXIT) {
|
242
|
+
kevents[count].ident = identifier;
|
243
|
+
kevents[count].filter = EVFILT_PROC;
|
244
|
+
kevents[count].flags = EV_ADD | EV_ONESHOT;
|
245
|
+
kevents[count].fflags = NOTE_EXIT;
|
246
|
+
kevents[count].udata = (void *)kqueue_descriptor;
|
247
|
+
count++;
|
248
|
+
}
|
249
|
+
|
250
|
+
if (count == 0) {
|
251
|
+
return 0;
|
252
|
+
}
|
253
|
+
|
254
|
+
int result = kevent(selector->descriptor, kevents, count, NULL, 0, NULL);
|
255
|
+
|
256
|
+
if (result == -1) {
|
257
|
+
return result;
|
258
|
+
}
|
259
|
+
|
260
|
+
kqueue_descriptor->registered_events = kqueue_descriptor->waiting_events;
|
261
|
+
|
262
|
+
return result;
|
263
|
+
}
|
264
|
+
|
265
|
+
inline static
|
266
|
+
int IO_Event_Selector_KQueue_Waiting_register(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Waiting *waiting)
|
267
|
+
{
|
268
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = IO_Event_Selector_KQueue_Descriptor_lookup(selector, identifier);
|
269
|
+
|
270
|
+
// We are waiting for these events:
|
271
|
+
kqueue_descriptor->waiting_events |= waiting->events;
|
272
|
+
|
273
|
+
int result = IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
|
274
|
+
if (result == -1) return -1;
|
275
|
+
|
276
|
+
IO_Event_List_prepend(&kqueue_descriptor->list, &waiting->list);
|
277
|
+
|
278
|
+
return result;
|
279
|
+
}
|
280
|
+
|
281
|
+
inline static
|
282
|
+
void IO_Event_Selector_KQueue_Waiting_cancel(struct IO_Event_Selector_KQueue_Waiting *waiting)
|
283
|
+
{
|
284
|
+
IO_Event_List_pop(&waiting->list);
|
285
|
+
waiting->fiber = 0;
|
286
|
+
}
|
287
|
+
|
288
|
+
void IO_Event_Selector_KQueue_Descriptor_initialize(void *element)
|
289
|
+
{
|
290
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = element;
|
291
|
+
IO_Event_List_initialize(&kqueue_descriptor->list);
|
292
|
+
kqueue_descriptor->waiting_events = 0;
|
293
|
+
kqueue_descriptor->registered_events = 0;
|
294
|
+
kqueue_descriptor->ready_events = 0;
|
295
|
+
}
|
296
|
+
|
297
|
+
void IO_Event_Selector_KQueue_Descriptor_free(void *element)
|
298
|
+
{
|
299
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = element;
|
300
|
+
|
301
|
+
IO_Event_List_free(&kqueue_descriptor->list);
|
302
|
+
}
|
303
|
+
|
86
304
|
VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
87
|
-
struct IO_Event_Selector_KQueue *
|
88
|
-
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
305
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
306
|
+
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
307
|
+
|
308
|
+
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
309
|
+
selector->descriptor = -1;
|
310
|
+
selector->blocked = 0;
|
89
311
|
|
90
|
-
|
91
|
-
|
92
|
-
|
312
|
+
selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
|
313
|
+
selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
|
314
|
+
IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
|
93
315
|
|
94
316
|
return instance;
|
95
317
|
}
|
96
318
|
|
319
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
320
|
+
void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_KQueue *selector) {
|
321
|
+
int descriptor = IO_Event_Interrupt_descriptor(interrupt);
|
322
|
+
|
323
|
+
struct kevent kev = {
|
324
|
+
.filter = EVFILT_READ,
|
325
|
+
.ident = descriptor,
|
326
|
+
.flags = EV_ADD | EV_CLEAR,
|
327
|
+
};
|
328
|
+
|
329
|
+
int result = kevent(selector->descriptor, &kev, 1, NULL, 0, NULL);
|
330
|
+
|
331
|
+
if (result == -1) {
|
332
|
+
rb_sys_fail("IO_Event_Interrupt_add:kevent");
|
333
|
+
}
|
334
|
+
}
|
335
|
+
#endif
|
336
|
+
|
97
337
|
VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
|
98
|
-
struct IO_Event_Selector_KQueue *
|
99
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
338
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
339
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
100
340
|
|
101
|
-
IO_Event_Selector_initialize(&
|
341
|
+
IO_Event_Selector_initialize(&selector->backend, loop);
|
102
342
|
int result = kqueue();
|
103
343
|
|
104
344
|
if (result == -1) {
|
105
345
|
rb_sys_fail("IO_Event_Selector_KQueue_initialize:kqueue");
|
106
346
|
} else {
|
347
|
+
// Make sure the descriptor is closed on exec.
|
107
348
|
ioctl(result, FIOCLEX);
|
108
|
-
data->descriptor = result;
|
109
349
|
|
110
|
-
|
350
|
+
selector->descriptor = result;
|
351
|
+
|
352
|
+
rb_update_max_fd(selector->descriptor);
|
111
353
|
}
|
112
354
|
|
355
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
356
|
+
IO_Event_Interrupt_open(&selector->interrupt);
|
357
|
+
IO_Event_Interrupt_add(&selector->interrupt, selector);
|
358
|
+
#endif
|
359
|
+
|
113
360
|
return self;
|
114
361
|
}
|
115
362
|
|
116
363
|
VALUE IO_Event_Selector_KQueue_loop(VALUE self) {
|
117
|
-
struct IO_Event_Selector_KQueue *
|
118
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
364
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
365
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
119
366
|
|
120
|
-
return
|
367
|
+
return selector->backend.loop;
|
121
368
|
}
|
122
369
|
|
123
370
|
VALUE IO_Event_Selector_KQueue_close(VALUE self) {
|
124
|
-
struct IO_Event_Selector_KQueue *
|
125
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
371
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
372
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
373
|
+
|
374
|
+
close_internal(selector);
|
126
375
|
|
127
|
-
|
376
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
377
|
+
IO_Event_Interrupt_close(&selector->interrupt);
|
378
|
+
#endif
|
128
379
|
|
129
380
|
return Qnil;
|
130
381
|
}
|
131
382
|
|
132
383
|
VALUE IO_Event_Selector_KQueue_transfer(VALUE self)
|
133
384
|
{
|
134
|
-
struct IO_Event_Selector_KQueue *
|
135
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
385
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
386
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
136
387
|
|
137
|
-
return IO_Event_Selector_fiber_transfer(
|
388
|
+
return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
|
138
389
|
}
|
139
390
|
|
140
391
|
VALUE IO_Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
|
141
392
|
{
|
142
|
-
struct IO_Event_Selector_KQueue *
|
143
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
393
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
394
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
144
395
|
|
145
|
-
return IO_Event_Selector_resume(&
|
396
|
+
return IO_Event_Selector_resume(&selector->backend, argc, argv);
|
146
397
|
}
|
147
398
|
|
148
399
|
VALUE IO_Event_Selector_KQueue_yield(VALUE self)
|
149
400
|
{
|
150
|
-
struct IO_Event_Selector_KQueue *
|
151
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
401
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
402
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
152
403
|
|
153
|
-
return IO_Event_Selector_yield(&
|
404
|
+
return IO_Event_Selector_yield(&selector->backend);
|
154
405
|
}
|
155
406
|
|
156
407
|
VALUE IO_Event_Selector_KQueue_push(VALUE self, VALUE fiber)
|
157
408
|
{
|
158
|
-
struct IO_Event_Selector_KQueue *
|
159
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
409
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
410
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
160
411
|
|
161
|
-
IO_Event_Selector_queue_push(&
|
412
|
+
IO_Event_Selector_queue_push(&selector->backend, fiber);
|
162
413
|
|
163
414
|
return Qnil;
|
164
415
|
}
|
165
416
|
|
166
417
|
VALUE IO_Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
|
167
418
|
{
|
168
|
-
struct IO_Event_Selector_KQueue *
|
169
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
419
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
420
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
170
421
|
|
171
|
-
return IO_Event_Selector_raise(&
|
422
|
+
return IO_Event_Selector_raise(&selector->backend, argc, argv);
|
172
423
|
}
|
173
424
|
|
174
425
|
VALUE IO_Event_Selector_KQueue_ready_p(VALUE self) {
|
175
|
-
struct IO_Event_Selector_KQueue *
|
176
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
426
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
427
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
177
428
|
|
178
|
-
return
|
429
|
+
return selector->backend.ready ? Qtrue : Qfalse;
|
179
430
|
}
|
180
431
|
|
181
432
|
struct process_wait_arguments {
|
182
|
-
struct IO_Event_Selector_KQueue *
|
433
|
+
struct IO_Event_Selector_KQueue *selector;
|
434
|
+
struct IO_Event_Selector_KQueue_Waiting *waiting;
|
183
435
|
pid_t pid;
|
184
436
|
int flags;
|
185
437
|
};
|
186
438
|
|
187
439
|
static
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
440
|
+
void process_prewait(pid_t pid) {
|
441
|
+
#if defined(WNOWAIT)
|
442
|
+
// FreeBSD seems to have an issue where kevent() can return an EVFILT_PROC/NOTE_EXIT event for a process even though a wait with WNOHANG on it immediately after will not return it (but it does after a small delay). Similarly, OpenBSD/NetBSD seem to sometimes fail the kevent() call with ESRCH (indicating the process has already terminated) even though a WNOHANG may not return it immediately after.
|
443
|
+
// To deal with this, do a hanging WNOWAIT wait on the process to make sure it is "terminated enough" for future WNOHANG waits to return it.
|
444
|
+
// Using waitid() for this because OpenBSD only supports WNOWAIT with waitid().
|
445
|
+
int result;
|
446
|
+
do {
|
447
|
+
siginfo_t info;
|
448
|
+
result = waitid(P_PID, pid, &info, WEXITED | WNOWAIT);
|
449
|
+
// This can sometimes get interrupted by SIGCHLD.
|
450
|
+
} while (result == -1 && errno == EINTR);
|
199
451
|
if (result == -1) {
|
200
|
-
|
201
|
-
if (errno == ESRCH) {
|
202
|
-
return 0;
|
203
|
-
}
|
204
|
-
|
205
|
-
rb_sys_fail("process_add_filters:kevent");
|
452
|
+
rb_sys_fail("process_prewait:waitid");
|
206
453
|
}
|
207
|
-
|
208
|
-
return 1;
|
209
|
-
}
|
210
|
-
|
211
|
-
static
|
212
|
-
void process_remove_filters(int descriptor, int ident) {
|
213
|
-
struct kevent event = {0};
|
214
|
-
|
215
|
-
event.ident = ident;
|
216
|
-
event.filter = EVFILT_PROC;
|
217
|
-
event.flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
218
|
-
event.fflags = NOTE_EXIT;
|
219
|
-
|
220
|
-
// Ignore the result.
|
221
|
-
kevent(descriptor, &event, 1, NULL, 0, NULL);
|
454
|
+
#endif
|
222
455
|
}
|
223
456
|
|
224
457
|
static
|
225
458
|
VALUE process_wait_transfer(VALUE _arguments) {
|
226
459
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
227
460
|
|
228
|
-
IO_Event_Selector_fiber_transfer(arguments->
|
461
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
229
462
|
|
230
|
-
|
463
|
+
if (arguments->waiting->ready) {
|
464
|
+
process_prewait(arguments->pid);
|
465
|
+
return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
|
466
|
+
} else {
|
467
|
+
return Qfalse;
|
468
|
+
}
|
231
469
|
}
|
232
470
|
|
233
471
|
static
|
234
|
-
VALUE
|
472
|
+
VALUE process_wait_ensure(VALUE _arguments) {
|
235
473
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
236
474
|
|
237
|
-
|
475
|
+
IO_Event_Selector_KQueue_Waiting_cancel(arguments->waiting);
|
238
476
|
|
239
|
-
|
477
|
+
return Qnil;
|
240
478
|
}
|
241
479
|
|
242
|
-
|
243
|
-
struct IO_Event_Selector_KQueue *data = NULL;
|
244
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
245
|
-
|
246
|
-
struct process_wait_arguments process_wait_arguments = {
|
247
|
-
.data = data,
|
248
|
-
.pid = NUM2PIDT(pid),
|
249
|
-
.flags = RB_NUM2INT(flags),
|
250
|
-
};
|
251
|
-
|
252
|
-
VALUE result = Qnil;
|
253
|
-
|
254
|
-
// This loop should not be needed but I have seen a race condition between NOTE_EXIT and `waitpid`, thus the result would be (unexpectedly) nil. So we put this in a loop to retry if the race condition shows up:
|
255
|
-
while (NIL_P(result)) {
|
256
|
-
int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
|
257
|
-
|
258
|
-
if (waiting) {
|
259
|
-
result = rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
|
260
|
-
} else {
|
261
|
-
result = IO_Event_Selector_process_status_wait(process_wait_arguments.pid);
|
262
|
-
}
|
263
|
-
}
|
264
|
-
|
265
|
-
return result;
|
266
|
-
}
|
480
|
+
struct IO_Event_List_Type IO_Event_Selector_KQueue_process_wait_list_type = {};
|
267
481
|
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
struct kevent kevents[2] = {0};
|
482
|
+
VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
|
483
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
484
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
272
485
|
|
273
|
-
|
274
|
-
|
275
|
-
kevents[count].filter = EVFILT_READ;
|
276
|
-
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
|
277
|
-
kevents[count].udata = (void*)fiber;
|
278
|
-
|
279
|
-
// #ifdef EV_OOBAND
|
280
|
-
// if (events & PRIORITY) {
|
281
|
-
// kevents[count].flags |= EV_OOBAND;
|
282
|
-
// }
|
283
|
-
// #endif
|
284
|
-
|
285
|
-
count++;
|
286
|
-
}
|
486
|
+
pid_t pid = NUM2PIDT(_pid);
|
487
|
+
int flags = NUM2INT(_flags);
|
287
488
|
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
count++;
|
294
|
-
}
|
489
|
+
struct IO_Event_Selector_KQueue_Waiting waiting = {
|
490
|
+
.list = {.type = &IO_Event_Selector_KQueue_process_wait_list_type},
|
491
|
+
.fiber = fiber,
|
492
|
+
.events = IO_EVENT_EXIT,
|
493
|
+
};
|
295
494
|
|
296
|
-
|
495
|
+
struct process_wait_arguments process_wait_arguments = {
|
496
|
+
.selector = selector,
|
497
|
+
.waiting = &waiting,
|
498
|
+
.pid = pid,
|
499
|
+
.flags = flags,
|
500
|
+
};
|
297
501
|
|
502
|
+
int result = IO_Event_Selector_KQueue_Waiting_register(selector, pid, &waiting);
|
298
503
|
if (result == -1) {
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
static
|
306
|
-
void io_remove_filters(int descriptor, int ident, int events) {
|
307
|
-
int count = 0;
|
308
|
-
struct kevent kevents[2] = {0};
|
309
|
-
|
310
|
-
if (events & IO_EVENT_READABLE) {
|
311
|
-
kevents[count].ident = ident;
|
312
|
-
kevents[count].filter = EVFILT_READ;
|
313
|
-
kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
504
|
+
// OpenBSD/NetBSD return ESRCH when attempting to register an EVFILT_PROC event for a zombie process.
|
505
|
+
if (errno == ESRCH) {
|
506
|
+
process_prewait(pid);
|
507
|
+
|
508
|
+
return IO_Event_Selector_process_status_wait(pid, flags);
|
509
|
+
}
|
314
510
|
|
315
|
-
|
316
|
-
}
|
317
|
-
|
318
|
-
if (events & IO_EVENT_WRITABLE) {
|
319
|
-
kevents[count].ident = ident;
|
320
|
-
kevents[count].filter = EVFILT_WRITE;
|
321
|
-
kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
322
|
-
count++;
|
511
|
+
rb_sys_fail("IO_Event_Selector_KQueue_process_wait:IO_Event_Selector_KQueue_Waiting_register");
|
323
512
|
}
|
324
513
|
|
325
|
-
|
326
|
-
kevent(descriptor, kevents, count, NULL, 0, NULL);
|
514
|
+
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
|
327
515
|
}
|
328
516
|
|
329
517
|
struct io_wait_arguments {
|
330
|
-
struct IO_Event_Selector_KQueue *
|
331
|
-
|
332
|
-
int descriptor;
|
518
|
+
struct IO_Event_Selector_KQueue *selector;
|
519
|
+
struct IO_Event_Selector_KQueue_Waiting *waiting;
|
333
520
|
};
|
334
521
|
|
335
522
|
static
|
336
|
-
VALUE
|
523
|
+
VALUE io_wait_ensure(VALUE _arguments) {
|
337
524
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
338
525
|
|
339
|
-
|
526
|
+
IO_Event_Selector_KQueue_Waiting_cancel(arguments->waiting);
|
340
527
|
|
341
|
-
|
342
|
-
}
|
343
|
-
|
344
|
-
static inline
|
345
|
-
int events_from_kqueue_filter(int filter) {
|
346
|
-
if (filter == EVFILT_READ) return IO_EVENT_READABLE;
|
347
|
-
if (filter == EVFILT_WRITE) return IO_EVENT_WRITABLE;
|
348
|
-
|
349
|
-
return 0;
|
528
|
+
return Qnil;
|
350
529
|
}
|
351
530
|
|
352
531
|
static
|
353
532
|
VALUE io_wait_transfer(VALUE _arguments) {
|
354
533
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
355
534
|
|
356
|
-
|
535
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
357
536
|
|
358
|
-
|
359
|
-
|
537
|
+
if (arguments->waiting->ready) {
|
538
|
+
return RB_INT2NUM(arguments->waiting->ready);
|
539
|
+
} else {
|
360
540
|
return Qfalse;
|
361
541
|
}
|
362
|
-
|
363
|
-
return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
|
364
542
|
}
|
365
543
|
|
544
|
+
struct IO_Event_List_Type IO_Event_Selector_KQueue_io_wait_list_type = {};
|
545
|
+
|
366
546
|
VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
367
|
-
struct IO_Event_Selector_KQueue *
|
368
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
547
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
548
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
369
549
|
|
370
550
|
int descriptor = IO_Event_Selector_io_descriptor(io);
|
371
551
|
|
552
|
+
struct IO_Event_Selector_KQueue_Waiting waiting = {
|
553
|
+
.list = {.type = &IO_Event_Selector_KQueue_io_wait_list_type},
|
554
|
+
.fiber = fiber,
|
555
|
+
.events = RB_NUM2INT(events),
|
556
|
+
};
|
557
|
+
|
558
|
+
int result = IO_Event_Selector_KQueue_Waiting_register(selector, descriptor, &waiting);
|
559
|
+
if (result == -1) {
|
560
|
+
rb_sys_fail("IO_Event_Selector_KQueue_io_wait:IO_Event_Selector_KQueue_Waiting_register");
|
561
|
+
}
|
562
|
+
|
372
563
|
struct io_wait_arguments io_wait_arguments = {
|
373
|
-
.
|
374
|
-
.
|
375
|
-
.descriptor = descriptor,
|
564
|
+
.selector = selector,
|
565
|
+
.waiting = &waiting,
|
376
566
|
};
|
377
567
|
|
378
568
|
if (DEBUG_IO_WAIT) fprintf(stderr, "IO_Event_Selector_KQueue_io_wait descriptor=%d\n", descriptor);
|
379
569
|
|
380
|
-
return
|
570
|
+
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
|
381
571
|
}
|
382
572
|
|
383
573
|
#ifdef HAVE_RUBY_IO_BUFFER_H
|
@@ -406,16 +596,18 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
406
596
|
|
407
597
|
size_t length = arguments->length;
|
408
598
|
size_t offset = arguments->offset;
|
599
|
+
size_t total = 0;
|
409
600
|
|
410
601
|
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
411
602
|
|
412
|
-
|
413
|
-
|
603
|
+
size_t maximum_size = size - offset;
|
604
|
+
while (maximum_size) {
|
414
605
|
if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld)\n", arguments->descriptor, offset, maximum_size);
|
415
606
|
ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
|
416
607
|
if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
|
417
608
|
|
418
609
|
if (result > 0) {
|
610
|
+
total += result;
|
419
611
|
offset += result;
|
420
612
|
if ((size_t)result >= length) break;
|
421
613
|
length -= result;
|
@@ -428,10 +620,12 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
428
620
|
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
|
429
621
|
return rb_fiber_scheduler_io_result(-1, errno);
|
430
622
|
}
|
623
|
+
|
624
|
+
maximum_size = size - offset;
|
431
625
|
}
|
432
626
|
|
433
627
|
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
|
434
|
-
return rb_fiber_scheduler_io_result(
|
628
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
435
629
|
}
|
436
630
|
|
437
631
|
static
|
@@ -444,8 +638,8 @@ VALUE io_read_ensure(VALUE _arguments) {
|
|
444
638
|
}
|
445
639
|
|
446
640
|
VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
|
447
|
-
struct IO_Event_Selector_KQueue *
|
448
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
641
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
642
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
449
643
|
|
450
644
|
int descriptor = IO_Event_Selector_io_descriptor(io);
|
451
645
|
|
@@ -504,6 +698,7 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
504
698
|
|
505
699
|
size_t length = arguments->length;
|
506
700
|
size_t offset = arguments->offset;
|
701
|
+
size_t total = 0;
|
507
702
|
|
508
703
|
if (length > size) {
|
509
704
|
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
@@ -511,13 +706,14 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
511
706
|
|
512
707
|
if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
513
708
|
|
514
|
-
|
515
|
-
|
709
|
+
size_t maximum_size = size - offset;
|
710
|
+
while (maximum_size) {
|
516
711
|
if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld, length=%zu)\n", arguments->descriptor, offset, maximum_size, length);
|
517
712
|
ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
|
518
713
|
if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
|
519
714
|
|
520
715
|
if (result > 0) {
|
716
|
+
total += result;
|
521
717
|
offset += result;
|
522
718
|
if ((size_t)result >= length) break;
|
523
719
|
length -= result;
|
@@ -530,10 +726,12 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
530
726
|
if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
|
531
727
|
return rb_fiber_scheduler_io_result(-1, errno);
|
532
728
|
}
|
729
|
+
|
730
|
+
maximum_size = size - offset;
|
533
731
|
}
|
534
732
|
|
535
733
|
if (DEBUG_IO_READ) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
|
536
|
-
return rb_fiber_scheduler_io_result(
|
734
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
537
735
|
};
|
538
736
|
|
539
737
|
static
|
@@ -546,8 +744,8 @@ VALUE io_write_ensure(VALUE _arguments) {
|
|
546
744
|
};
|
547
745
|
|
548
746
|
VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
|
549
|
-
struct IO_Event_Selector_KQueue *
|
550
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
747
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
748
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
551
749
|
|
552
750
|
int descriptor = IO_Event_Selector_io_descriptor(io);
|
553
751
|
|
@@ -616,30 +814,32 @@ int timeout_nonblocking(struct timespec * timespec) {
|
|
616
814
|
}
|
617
815
|
|
618
816
|
struct select_arguments {
|
619
|
-
struct IO_Event_Selector_KQueue *
|
817
|
+
struct IO_Event_Selector_KQueue *selector;
|
620
818
|
|
621
819
|
int count;
|
622
820
|
struct kevent events[KQUEUE_MAX_EVENTS];
|
623
821
|
|
624
822
|
struct timespec storage;
|
625
823
|
struct timespec *timeout;
|
824
|
+
|
825
|
+
struct IO_Event_List saved;
|
626
826
|
};
|
627
827
|
|
628
828
|
static
|
629
829
|
void * select_internal(void *_arguments) {
|
630
830
|
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
631
831
|
|
632
|
-
arguments->count = kevent(arguments->
|
832
|
+
arguments->count = kevent(arguments->selector->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
|
633
833
|
|
634
834
|
return NULL;
|
635
835
|
}
|
636
836
|
|
637
837
|
static
|
638
838
|
void select_internal_without_gvl(struct select_arguments *arguments) {
|
639
|
-
arguments->
|
839
|
+
arguments->selector->blocked = 1;
|
640
840
|
|
641
841
|
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
642
|
-
arguments->
|
842
|
+
arguments->selector->blocked = 0;
|
643
843
|
|
644
844
|
if (arguments->count == -1) {
|
645
845
|
if (errno != EINTR) {
|
@@ -663,19 +863,102 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
663
863
|
}
|
664
864
|
}
|
665
865
|
|
866
|
+
static
|
867
|
+
int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor, struct IO_Event_List *saved)
|
868
|
+
{
|
869
|
+
// This is the mask of all events that occured for the given descriptor:
|
870
|
+
enum IO_Event ready_events = kqueue_descriptor->ready_events;
|
871
|
+
|
872
|
+
if (ready_events) {
|
873
|
+
kqueue_descriptor->ready_events = 0;
|
874
|
+
// Since we use one-shot semantics, we need to re-arm the events that are ready if needed:
|
875
|
+
kqueue_descriptor->registered_events &= ~ready_events;
|
876
|
+
} else {
|
877
|
+
return 0;
|
878
|
+
}
|
879
|
+
|
880
|
+
struct IO_Event_List *list = &kqueue_descriptor->list;
|
881
|
+
struct IO_Event_List *node = list->tail;
|
882
|
+
|
883
|
+
// Reset the events back to 0 so that we can re-arm if necessary:
|
884
|
+
kqueue_descriptor->waiting_events = 0;
|
885
|
+
|
886
|
+
// It's possible (but unlikely) that the address of list will changing during iteration.
|
887
|
+
while (node != list) {
|
888
|
+
struct IO_Event_Selector_KQueue_Waiting *waiting = (struct IO_Event_Selector_KQueue_Waiting *)node;
|
889
|
+
|
890
|
+
enum IO_Event matching_events = waiting->events & ready_events;
|
891
|
+
|
892
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_handle: identifier=%lu, ready_events=%d, matching_events=%d\n", identifier, ready_events, matching_events);
|
893
|
+
|
894
|
+
if (matching_events) {
|
895
|
+
IO_Event_List_append(node, saved);
|
896
|
+
|
897
|
+
waiting->ready = matching_events;
|
898
|
+
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
899
|
+
|
900
|
+
node = saved->tail;
|
901
|
+
IO_Event_List_pop(saved);
|
902
|
+
} else {
|
903
|
+
kqueue_descriptor->waiting_events |= waiting->events;
|
904
|
+
node = node->tail;
|
905
|
+
}
|
906
|
+
}
|
907
|
+
|
908
|
+
return IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
|
909
|
+
}
|
910
|
+
|
911
|
+
static
|
912
|
+
VALUE select_handle_events(VALUE _arguments)
|
913
|
+
{
|
914
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
915
|
+
struct IO_Event_Selector_KQueue *selector = arguments->selector;
|
916
|
+
|
917
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
918
|
+
if (arguments->events[i].udata) {
|
919
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
|
920
|
+
kqueue_descriptor->ready_events |= events_from_kevent_filter(arguments->events[i].filter);
|
921
|
+
}
|
922
|
+
}
|
923
|
+
|
924
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
925
|
+
if (arguments->events[i].udata) {
|
926
|
+
struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
|
927
|
+
IO_Event_Selector_KQueue_handle(selector, arguments->events[i].ident, kqueue_descriptor, &arguments->saved);
|
928
|
+
} else {
|
929
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
930
|
+
IO_Event_Interrupt_clear(&selector->interrupt);
|
931
|
+
#endif
|
932
|
+
}
|
933
|
+
}
|
934
|
+
|
935
|
+
return RB_INT2NUM(arguments->count);
|
936
|
+
}
|
937
|
+
|
938
|
+
static
|
939
|
+
VALUE select_handle_events_ensure(VALUE _arguments)
|
940
|
+
{
|
941
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
942
|
+
|
943
|
+
IO_Event_List_free(&arguments->saved);
|
944
|
+
|
945
|
+
return Qnil;
|
946
|
+
}
|
947
|
+
|
666
948
|
VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
667
|
-
struct IO_Event_Selector_KQueue *
|
668
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
949
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
950
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
669
951
|
|
670
|
-
int ready = IO_Event_Selector_queue_flush(&
|
952
|
+
int ready = IO_Event_Selector_queue_flush(&selector->backend);
|
671
953
|
|
672
954
|
struct select_arguments arguments = {
|
673
|
-
.
|
955
|
+
.selector = selector,
|
674
956
|
.count = KQUEUE_MAX_EVENTS,
|
675
957
|
.storage = {
|
676
958
|
.tv_sec = 0,
|
677
959
|
.tv_nsec = 0
|
678
|
-
}
|
960
|
+
},
|
961
|
+
.saved = {},
|
679
962
|
};
|
680
963
|
|
681
964
|
arguments.timeout = &arguments.storage;
|
@@ -696,7 +979,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
696
979
|
// 2. Didn't process any events from non-blocking select (above), and
|
697
980
|
// 3. There are no items in the ready list,
|
698
981
|
// then we can perform a blocking select.
|
699
|
-
if (!ready && !arguments.count && !
|
982
|
+
if (!ready && !arguments.count && !selector->backend.ready) {
|
700
983
|
arguments.timeout = make_timeout(duration, &arguments.storage);
|
701
984
|
|
702
985
|
if (!timeout_nonblocking(arguments.timeout)) {
|
@@ -707,34 +990,42 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
|
707
990
|
}
|
708
991
|
}
|
709
992
|
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
IO_Event_Selector_fiber_transfer(fiber, 1, &result);
|
716
|
-
}
|
993
|
+
if (arguments.count) {
|
994
|
+
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
|
995
|
+
} else {
|
996
|
+
return RB_INT2NUM(0);
|
717
997
|
}
|
718
|
-
|
719
|
-
return INT2NUM(arguments.count);
|
720
998
|
}
|
721
999
|
|
722
1000
|
VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
723
|
-
struct IO_Event_Selector_KQueue *
|
724
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type,
|
1001
|
+
struct IO_Event_Selector_KQueue *selector = NULL;
|
1002
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
|
725
1003
|
|
726
|
-
if (
|
1004
|
+
if (selector->blocked) {
|
1005
|
+
#ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
1006
|
+
IO_Event_Interrupt_signal(&selector->interrupt);
|
1007
|
+
#else
|
727
1008
|
struct kevent trigger = {0};
|
728
1009
|
|
729
1010
|
trigger.filter = EVFILT_USER;
|
730
|
-
trigger.flags = EV_ADD | EV_CLEAR
|
1011
|
+
trigger.flags = EV_ADD | EV_CLEAR;
|
1012
|
+
|
1013
|
+
int result = kevent(selector->descriptor, &trigger, 1, NULL, 0, NULL);
|
1014
|
+
|
1015
|
+
if (result == -1) {
|
1016
|
+
rb_sys_fail("IO_Event_Selector_KQueue_wakeup:kevent");
|
1017
|
+
}
|
1018
|
+
|
1019
|
+
// FreeBSD apparently only works if the NOTE_TRIGGER is done as a separate call.
|
1020
|
+
trigger.flags = 0;
|
731
1021
|
trigger.fflags = NOTE_TRIGGER;
|
732
1022
|
|
733
|
-
|
1023
|
+
result = kevent(selector->descriptor, &trigger, 1, NULL, 0, NULL);
|
734
1024
|
|
735
1025
|
if (result == -1) {
|
736
1026
|
rb_sys_fail("IO_Event_Selector_KQueue_wakeup:kevent");
|
737
1027
|
}
|
1028
|
+
#endif
|
738
1029
|
|
739
1030
|
return Qtrue;
|
740
1031
|
}
|