io-event 1.2.2 → 1.3.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +7 -24
- data/ext/io/event/selector/array.h +135 -0
- data/ext/io/event/selector/epoll.c +474 -204
- data/ext/io/event/selector/kqueue.c +513 -222
- data/ext/io/event/selector/list.h +88 -0
- data/ext/io/event/selector/selector.c +16 -21
- data/ext/io/event/selector/selector.h +23 -8
- data/ext/io/event/selector/uring.c +459 -223
- data/lib/io/event/interrupt.rb +1 -1
- data/lib/io/event/selector/nonblock.rb +1 -1
- data/lib/io/event/selector/select.rb +123 -22
- data/lib/io/event/selector.rb +2 -6
- data/lib/io/event/support.rb +11 -0
- data/lib/io/event/version.rb +2 -2
- data/lib/io/event.rb +1 -1
- data/license.md +2 -1
- data/readme.md +13 -5
- data.tar.gz.sig +0 -0
- metadata +8 -61
- metadata.gz.sig +0 -0
@@ -20,6 +20,8 @@
|
|
20
20
|
|
21
21
|
#include "kqueue.h"
|
22
22
|
#include "selector.h"
|
23
|
+
#include "list.h"
|
24
|
+
#include "array.h"
|
23
25
|
|
24
26
|
#include <sys/epoll.h>
|
25
27
|
#include <time.h>
|
@@ -36,47 +38,145 @@ static VALUE IO_Event_Selector_EPoll = Qnil;
|
|
36
38
|
|
37
39
|
enum {EPOLL_MAX_EVENTS = 64};
|
38
40
|
|
39
|
-
|
41
|
+
// This represents an actual fiber waiting for a specific event.
|
42
|
+
struct IO_Event_Selector_EPoll_Waiting
|
43
|
+
{
|
44
|
+
struct IO_Event_List list;
|
45
|
+
|
46
|
+
// The events the fiber is waiting for.
|
47
|
+
enum IO_Event events;
|
48
|
+
|
49
|
+
// The events that are currently ready.
|
50
|
+
enum IO_Event ready;
|
51
|
+
|
52
|
+
// The fiber value itself.
|
53
|
+
VALUE fiber;
|
54
|
+
};
|
55
|
+
|
56
|
+
struct IO_Event_Selector_EPoll
|
57
|
+
{
|
40
58
|
struct IO_Event_Selector backend;
|
41
59
|
int descriptor;
|
42
60
|
int blocked;
|
61
|
+
|
43
62
|
struct IO_Event_Interrupt interrupt;
|
63
|
+
struct IO_Event_Array descriptors;
|
64
|
+
};
|
65
|
+
|
66
|
+
// This represents zero or more fibers waiting for a specific descriptor.
|
67
|
+
struct IO_Event_Selector_EPoll_Descriptor
|
68
|
+
{
|
69
|
+
struct IO_Event_List list;
|
70
|
+
|
71
|
+
// The last IO object that was used to register events.
|
72
|
+
VALUE io;
|
73
|
+
|
74
|
+
// The union of all events we are waiting for:
|
75
|
+
enum IO_Event waiting_events;
|
76
|
+
|
77
|
+
// The union of events we are registered for:
|
78
|
+
enum IO_Event registered_events;
|
44
79
|
};
|
45
80
|
|
46
|
-
|
81
|
+
static
|
82
|
+
void IO_Event_Selector_EPoll_Waiting_mark(struct IO_Event_List *_waiting)
|
47
83
|
{
|
48
|
-
struct
|
49
|
-
|
84
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;
|
85
|
+
|
86
|
+
if (waiting->fiber) {
|
87
|
+
rb_gc_mark_movable(waiting->fiber);
|
88
|
+
}
|
50
89
|
}
|
51
90
|
|
52
91
|
static
|
53
|
-
void
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
92
|
+
void IO_Event_Selector_EPoll_Descriptor_mark(void *_descriptor)
|
93
|
+
{
|
94
|
+
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;
|
95
|
+
|
96
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_mark);
|
97
|
+
|
98
|
+
if (descriptor->io) {
|
99
|
+
rb_gc_mark_movable(descriptor->io);
|
59
100
|
}
|
60
101
|
}
|
61
102
|
|
62
|
-
|
103
|
+
static
|
104
|
+
void IO_Event_Selector_EPoll_Type_mark(void *_selector)
|
63
105
|
{
|
64
|
-
struct IO_Event_Selector_EPoll *
|
106
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
65
107
|
|
66
|
-
|
108
|
+
IO_Event_Selector_mark(&selector->backend);
|
109
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_mark);
|
110
|
+
}
|
111
|
+
|
112
|
+
static
|
113
|
+
void IO_Event_Selector_EPoll_Waiting_compact(struct IO_Event_List *_waiting)
|
114
|
+
{
|
115
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;
|
67
116
|
|
68
|
-
|
117
|
+
if (waiting->fiber) {
|
118
|
+
waiting->fiber = rb_gc_location(waiting->fiber);
|
119
|
+
}
|
69
120
|
}
|
70
121
|
|
71
|
-
|
122
|
+
static
|
123
|
+
void IO_Event_Selector_EPoll_Descriptor_compact(void *_descriptor)
|
72
124
|
{
|
73
|
-
|
125
|
+
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;
|
126
|
+
|
127
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_compact);
|
128
|
+
|
129
|
+
if (descriptor->io) {
|
130
|
+
descriptor->io = rb_gc_location(descriptor->io);
|
131
|
+
}
|
132
|
+
}
|
133
|
+
|
134
|
+
static
|
135
|
+
void IO_Event_Selector_EPoll_Type_compact(void *_selector)
|
136
|
+
{
|
137
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
138
|
+
|
139
|
+
IO_Event_Selector_compact(&selector->backend);
|
140
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_compact);
|
141
|
+
}
|
142
|
+
|
143
|
+
static
|
144
|
+
void close_internal(struct IO_Event_Selector_EPoll *selector)
|
145
|
+
{
|
146
|
+
if (selector->descriptor >= 0) {
|
147
|
+
close(selector->descriptor);
|
148
|
+
selector->descriptor = -1;
|
149
|
+
|
150
|
+
IO_Event_Interrupt_close(&selector->interrupt);
|
151
|
+
}
|
152
|
+
}
|
153
|
+
static
|
154
|
+
void IO_Event_Selector_EPoll_Type_free(void *_selector)
|
155
|
+
{
|
156
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
157
|
+
|
158
|
+
close_internal(selector);
|
159
|
+
|
160
|
+
IO_Event_Array_free(&selector->descriptors);
|
161
|
+
|
162
|
+
free(selector);
|
163
|
+
}
|
164
|
+
|
165
|
+
static
|
166
|
+
size_t IO_Event_Selector_EPoll_Type_size(const void *_selector)
|
167
|
+
{
|
168
|
+
const struct IO_Event_Selector_EPoll *selector = _selector;
|
169
|
+
|
170
|
+
return sizeof(struct IO_Event_Selector_EPoll)
|
171
|
+
+ IO_Event_Array_memory_size(&selector->descriptors)
|
172
|
+
;
|
74
173
|
}
|
75
174
|
|
76
175
|
static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
77
176
|
.wrap_struct_name = "IO_Event::Backend::EPoll",
|
78
177
|
.function = {
|
79
178
|
.dmark = IO_Event_Selector_EPoll_Type_mark,
|
179
|
+
.dcompact = IO_Event_Selector_EPoll_Type_compact,
|
80
180
|
.dfree = IO_Event_Selector_EPoll_Type_free,
|
81
181
|
.dsize = IO_Event_Selector_EPoll_Type_size,
|
82
182
|
},
|
@@ -84,25 +184,172 @@ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
|
84
184
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
85
185
|
};
|
86
186
|
|
187
|
+
inline static
|
188
|
+
struct IO_Event_Selector_EPoll_Descriptor * IO_Event_Selector_EPoll_Descriptor_lookup(struct IO_Event_Selector_EPoll *selector, int descriptor)
|
189
|
+
{
|
190
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Array_lookup(&selector->descriptors, descriptor);
|
191
|
+
|
192
|
+
if (!epoll_descriptor) {
|
193
|
+
rb_sys_fail("IO_Event_Selector_EPoll_Descriptor_lookup:IO_Event_Array_lookup");
|
194
|
+
}
|
195
|
+
|
196
|
+
return epoll_descriptor;
|
197
|
+
}
|
198
|
+
|
199
|
+
static inline
|
200
|
+
uint32_t epoll_flags_from_events(int events)
|
201
|
+
{
|
202
|
+
uint32_t flags = 0;
|
203
|
+
|
204
|
+
if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
|
205
|
+
if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
|
206
|
+
if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
|
207
|
+
|
208
|
+
flags |= EPOLLHUP;
|
209
|
+
flags |= EPOLLERR;
|
210
|
+
|
211
|
+
if (DEBUG) fprintf(stderr, "epoll_flags_from_events events=%d flags=%d\n", events, flags);
|
212
|
+
|
213
|
+
return flags;
|
214
|
+
}
|
215
|
+
|
216
|
+
static inline
|
217
|
+
int events_from_epoll_flags(uint32_t flags)
|
218
|
+
{
|
219
|
+
int events = 0;
|
220
|
+
|
221
|
+
if (DEBUG) fprintf(stderr, "events_from_epoll_flags flags=%d\n", flags);
|
222
|
+
|
223
|
+
// Occasionally, (and noted specifically when dealing with child processes stdout), flags will only be POLLHUP. In this case, we arm the file descriptor for reading so that the HUP will be noted, rather than potentially ignored, since there is no dedicated event for it.
|
224
|
+
// if (flags & (EPOLLIN)) events |= IO_EVENT_READABLE;
|
225
|
+
if (flags & (EPOLLIN|EPOLLHUP|EPOLLERR)) events |= IO_EVENT_READABLE;
|
226
|
+
if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
|
227
|
+
if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
|
228
|
+
|
229
|
+
return events;
|
230
|
+
}
|
231
|
+
|
232
|
+
inline static
|
233
|
+
int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *selector, VALUE io, int descriptor, struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor)
|
234
|
+
{
|
235
|
+
if (epoll_descriptor->io == io) {
|
236
|
+
if (epoll_descriptor->registered_events == epoll_descriptor->waiting_events) {
|
237
|
+
// All the events we are interested in are already registered.
|
238
|
+
return 0;
|
239
|
+
}
|
240
|
+
} else {
|
241
|
+
// The IO has changed, we need to reset the state:
|
242
|
+
epoll_descriptor->registered_events = 0;
|
243
|
+
epoll_descriptor->io = io;
|
244
|
+
}
|
245
|
+
|
246
|
+
if (epoll_descriptor->waiting_events == 0) {
|
247
|
+
if (epoll_descriptor->registered_events) {
|
248
|
+
// We are no longer interested in any events.
|
249
|
+
epoll_ctl(selector->descriptor, EPOLL_CTL_DEL, descriptor, NULL);
|
250
|
+
epoll_descriptor->registered_events = 0;
|
251
|
+
}
|
252
|
+
|
253
|
+
epoll_descriptor->io = 0;
|
254
|
+
|
255
|
+
return 0;
|
256
|
+
}
|
257
|
+
|
258
|
+
// We need to register for additional events:
|
259
|
+
struct epoll_event event = {
|
260
|
+
.events = epoll_flags_from_events(epoll_descriptor->waiting_events),
|
261
|
+
.data = {.fd = descriptor},
|
262
|
+
};
|
263
|
+
|
264
|
+
int operation;
|
265
|
+
|
266
|
+
if (epoll_descriptor->registered_events) {
|
267
|
+
operation = EPOLL_CTL_MOD;
|
268
|
+
} else {
|
269
|
+
operation = EPOLL_CTL_ADD;
|
270
|
+
}
|
271
|
+
|
272
|
+
int result = epoll_ctl(selector->descriptor, operation, descriptor, &event);
|
273
|
+
if (result == -1) {
|
274
|
+
if (errno == ENOENT) {
|
275
|
+
result = epoll_ctl(selector->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
276
|
+
} else if (errno == EEXIST) {
|
277
|
+
result = epoll_ctl(selector->descriptor, EPOLL_CTL_MOD, descriptor, &event);
|
278
|
+
}
|
279
|
+
|
280
|
+
if (result == -1) {
|
281
|
+
return -1;
|
282
|
+
}
|
283
|
+
}
|
284
|
+
|
285
|
+
epoll_descriptor->registered_events = epoll_descriptor->waiting_events;
|
286
|
+
|
287
|
+
return 1;
|
288
|
+
}
|
289
|
+
|
290
|
+
inline static
|
291
|
+
int IO_Event_Selector_EPoll_Waiting_register(struct IO_Event_Selector_EPoll *selector, VALUE io, int descriptor, struct IO_Event_Selector_EPoll_Waiting *waiting)
|
292
|
+
{
|
293
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
|
294
|
+
|
295
|
+
// We are waiting for these events:
|
296
|
+
epoll_descriptor->waiting_events |= waiting->events;
|
297
|
+
|
298
|
+
int result = IO_Event_Selector_EPoll_Descriptor_update(selector, io, descriptor, epoll_descriptor);
|
299
|
+
if (result == -1) return -1;
|
300
|
+
|
301
|
+
IO_Event_List_prepend(&epoll_descriptor->list, &waiting->list);
|
302
|
+
|
303
|
+
return result;
|
304
|
+
}
|
305
|
+
|
306
|
+
inline static
|
307
|
+
void IO_Event_Selector_EPoll_Waiting_cancel(struct IO_Event_Selector_EPoll_Waiting *waiting)
|
308
|
+
{
|
309
|
+
IO_Event_List_pop(&waiting->list);
|
310
|
+
waiting->fiber = 0;
|
311
|
+
}
|
312
|
+
|
313
|
+
void IO_Event_Selector_EPoll_Descriptor_initialize(void *element)
|
314
|
+
{
|
315
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = element;
|
316
|
+
IO_Event_List_initialize(&epoll_descriptor->list);
|
317
|
+
epoll_descriptor->io = 0;
|
318
|
+
epoll_descriptor->waiting_events = 0;
|
319
|
+
epoll_descriptor->registered_events = 0;
|
320
|
+
}
|
321
|
+
|
322
|
+
void IO_Event_Selector_EPoll_Descriptor_free(void *element)
|
323
|
+
{
|
324
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = element;
|
325
|
+
|
326
|
+
IO_Event_List_free(&epoll_descriptor->list);
|
327
|
+
}
|
328
|
+
|
87
329
|
VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
88
|
-
struct IO_Event_Selector_EPoll *
|
89
|
-
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
330
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
331
|
+
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
90
332
|
|
91
|
-
IO_Event_Selector_initialize(&
|
92
|
-
|
333
|
+
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
334
|
+
selector->descriptor = -1;
|
335
|
+
selector->blocked = 0;
|
336
|
+
|
337
|
+
selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
|
338
|
+
selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
|
339
|
+
IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
|
93
340
|
|
94
341
|
return instance;
|
95
342
|
}
|
96
343
|
|
97
|
-
void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_EPoll *
|
344
|
+
void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_EPoll *selector) {
|
98
345
|
int descriptor = IO_Event_Interrupt_descriptor(interrupt);
|
99
346
|
|
100
347
|
struct epoll_event event = {
|
101
348
|
.events = EPOLLIN|EPOLLRDHUP,
|
102
|
-
.data = {.
|
349
|
+
.data = {.fd = -1},
|
103
350
|
};
|
104
351
|
|
105
|
-
int result = epoll_ctl(
|
352
|
+
int result = epoll_ctl(selector->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
106
353
|
|
107
354
|
if (result == -1) {
|
108
355
|
rb_sys_fail("IO_Event_Interrupt_add:epoll_ctl");
|
@@ -110,94 +357,95 @@ void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Even
|
|
110
357
|
}
|
111
358
|
|
112
359
|
VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
|
113
|
-
struct IO_Event_Selector_EPoll *
|
114
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
360
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
361
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
115
362
|
|
116
|
-
IO_Event_Selector_initialize(&
|
363
|
+
IO_Event_Selector_initialize(&selector->backend, loop);
|
117
364
|
int result = epoll_create1(EPOLL_CLOEXEC);
|
118
365
|
|
119
366
|
if (result == -1) {
|
120
367
|
rb_sys_fail("IO_Event_Selector_EPoll_initialize:epoll_create");
|
121
368
|
} else {
|
122
|
-
|
369
|
+
selector->descriptor = result;
|
123
370
|
|
124
|
-
rb_update_max_fd(
|
371
|
+
rb_update_max_fd(selector->descriptor);
|
125
372
|
}
|
126
373
|
|
127
|
-
IO_Event_Interrupt_open(&
|
128
|
-
IO_Event_Interrupt_add(&
|
374
|
+
IO_Event_Interrupt_open(&selector->interrupt);
|
375
|
+
IO_Event_Interrupt_add(&selector->interrupt, selector);
|
129
376
|
|
130
377
|
return self;
|
131
378
|
}
|
132
379
|
|
133
380
|
VALUE IO_Event_Selector_EPoll_loop(VALUE self) {
|
134
|
-
struct IO_Event_Selector_EPoll *
|
135
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
381
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
382
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
136
383
|
|
137
|
-
return
|
384
|
+
return selector->backend.loop;
|
138
385
|
}
|
139
386
|
|
140
387
|
VALUE IO_Event_Selector_EPoll_close(VALUE self) {
|
141
|
-
struct IO_Event_Selector_EPoll *
|
142
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
388
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
389
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
143
390
|
|
144
|
-
close_internal(
|
391
|
+
close_internal(selector);
|
145
392
|
|
146
393
|
return Qnil;
|
147
394
|
}
|
148
395
|
|
149
396
|
VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
|
150
397
|
{
|
151
|
-
struct IO_Event_Selector_EPoll *
|
152
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
398
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
399
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
153
400
|
|
154
|
-
return IO_Event_Selector_fiber_transfer(
|
401
|
+
return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
|
155
402
|
}
|
156
403
|
|
157
404
|
VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
|
158
405
|
{
|
159
|
-
struct IO_Event_Selector_EPoll *
|
160
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
406
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
407
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
161
408
|
|
162
|
-
return IO_Event_Selector_resume(&
|
409
|
+
return IO_Event_Selector_resume(&selector->backend, argc, argv);
|
163
410
|
}
|
164
411
|
|
165
412
|
VALUE IO_Event_Selector_EPoll_yield(VALUE self)
|
166
413
|
{
|
167
|
-
struct IO_Event_Selector_EPoll *
|
168
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
414
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
415
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
169
416
|
|
170
|
-
return IO_Event_Selector_yield(&
|
417
|
+
return IO_Event_Selector_yield(&selector->backend);
|
171
418
|
}
|
172
419
|
|
173
420
|
VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
|
174
421
|
{
|
175
|
-
struct IO_Event_Selector_EPoll *
|
176
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
422
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
423
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
177
424
|
|
178
|
-
IO_Event_Selector_queue_push(&
|
425
|
+
IO_Event_Selector_queue_push(&selector->backend, fiber);
|
179
426
|
|
180
427
|
return Qnil;
|
181
428
|
}
|
182
429
|
|
183
430
|
VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
|
184
431
|
{
|
185
|
-
struct IO_Event_Selector_EPoll *
|
186
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
432
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
433
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
187
434
|
|
188
|
-
return IO_Event_Selector_raise(&
|
435
|
+
return IO_Event_Selector_raise(&selector->backend, argc, argv);
|
189
436
|
}
|
190
437
|
|
191
438
|
VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
|
192
|
-
struct IO_Event_Selector_EPoll *
|
193
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
439
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
440
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
194
441
|
|
195
|
-
return
|
442
|
+
return selector->backend.ready ? Qtrue : Qfalse;
|
196
443
|
}
|
197
444
|
|
198
445
|
struct process_wait_arguments {
|
199
|
-
struct IO_Event_Selector_EPoll *
|
200
|
-
|
446
|
+
struct IO_Event_Selector_EPoll *selector;
|
447
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting;
|
448
|
+
int pid;
|
201
449
|
int flags;
|
202
450
|
int descriptor;
|
203
451
|
};
|
@@ -206,106 +454,77 @@ static
|
|
206
454
|
VALUE process_wait_transfer(VALUE _arguments) {
|
207
455
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
208
456
|
|
209
|
-
IO_Event_Selector_fiber_transfer(arguments->
|
457
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
210
458
|
|
211
|
-
|
459
|
+
if (arguments->waiting->ready) {
|
460
|
+
return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
|
461
|
+
} else {
|
462
|
+
return Qfalse;
|
463
|
+
}
|
212
464
|
}
|
213
465
|
|
214
466
|
static
|
215
467
|
VALUE process_wait_ensure(VALUE _arguments) {
|
216
468
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
217
469
|
|
218
|
-
// epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
|
219
|
-
|
220
470
|
close(arguments->descriptor);
|
221
471
|
|
472
|
+
IO_Event_Selector_EPoll_Waiting_cancel(arguments->waiting);
|
473
|
+
|
222
474
|
return Qnil;
|
223
475
|
}
|
224
476
|
|
225
|
-
|
226
|
-
|
227
|
-
|
477
|
+
struct IO_Event_List_Type IO_Event_Selector_EPoll_process_wait_list_type = {};
|
478
|
+
|
479
|
+
VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
|
480
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
481
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
228
482
|
|
229
|
-
|
230
|
-
|
231
|
-
.pid = NUM2PIDT(pid),
|
232
|
-
.flags = NUM2INT(flags),
|
233
|
-
};
|
483
|
+
pid_t pid = NUM2PIDT(_pid);
|
484
|
+
int flags = NUM2INT(_flags);
|
234
485
|
|
235
|
-
|
486
|
+
int descriptor = pidfd_open(pid, 0);
|
236
487
|
|
237
|
-
if (
|
488
|
+
if (descriptor == -1) {
|
238
489
|
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:pidfd_open");
|
239
490
|
}
|
240
491
|
|
241
|
-
rb_update_max_fd(
|
492
|
+
rb_update_max_fd(descriptor);
|
242
493
|
|
243
|
-
struct
|
244
|
-
.
|
245
|
-
.
|
494
|
+
struct IO_Event_Selector_EPoll_Waiting waiting = {
|
495
|
+
.list = {.type = &IO_Event_Selector_EPoll_process_wait_list_type},
|
496
|
+
.fiber = fiber,
|
497
|
+
.events = IO_EVENT_READABLE,
|
246
498
|
};
|
247
499
|
|
248
|
-
int result =
|
500
|
+
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
|
249
501
|
|
250
502
|
if (result == -1) {
|
251
|
-
close(
|
252
|
-
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:
|
503
|
+
close(descriptor);
|
504
|
+
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:IO_Event_Selector_EPoll_Waiting_register");
|
253
505
|
}
|
254
506
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
|
263
|
-
if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
|
264
|
-
if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
|
265
|
-
|
266
|
-
flags |= EPOLLHUP;
|
267
|
-
flags |= EPOLLERR;
|
268
|
-
|
269
|
-
// Immediately remove this descriptor after reading one event:
|
270
|
-
flags |= EPOLLONESHOT;
|
271
|
-
|
272
|
-
if (DEBUG) fprintf(stderr, "epoll_flags_from_events events=%d flags=%d\n", events, flags);
|
273
|
-
|
274
|
-
return flags;
|
275
|
-
}
|
276
|
-
|
277
|
-
static inline
|
278
|
-
int events_from_epoll_flags(uint32_t flags) {
|
279
|
-
int events = 0;
|
280
|
-
|
281
|
-
if (DEBUG) fprintf(stderr, "events_from_epoll_flags flags=%d\n", flags);
|
282
|
-
|
283
|
-
// Occasionally, (and noted specifically when dealing with child processes stdout), flags will only be POLLHUP. In this case, we arm the file descriptor for reading so that the HUP will be noted, rather than potentially ignored, since there is no dedicated event for it.
|
284
|
-
// if (flags & (EPOLLIN)) events |= IO_EVENT_READABLE;
|
285
|
-
if (flags & (EPOLLIN|EPOLLHUP|EPOLLERR)) events |= IO_EVENT_READABLE;
|
286
|
-
if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
|
287
|
-
if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
|
507
|
+
struct process_wait_arguments process_wait_arguments = {
|
508
|
+
.selector = selector,
|
509
|
+
.pid = pid,
|
510
|
+
.flags = flags,
|
511
|
+
.descriptor = descriptor,
|
512
|
+
.waiting = &waiting,
|
513
|
+
};
|
288
514
|
|
289
|
-
return
|
515
|
+
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
|
290
516
|
}
|
291
517
|
|
292
518
|
struct io_wait_arguments {
|
293
|
-
struct IO_Event_Selector_EPoll *
|
294
|
-
|
295
|
-
int duplicate;
|
519
|
+
struct IO_Event_Selector_EPoll *selector;
|
520
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting;
|
296
521
|
};
|
297
522
|
|
298
523
|
static
|
299
524
|
VALUE io_wait_ensure(VALUE _arguments) {
|
300
525
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
301
526
|
|
302
|
-
|
303
|
-
epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
|
304
|
-
|
305
|
-
close(arguments->duplicate);
|
306
|
-
} else {
|
307
|
-
epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
|
308
|
-
}
|
527
|
+
IO_Event_Selector_EPoll_Waiting_cancel(arguments->waiting);
|
309
528
|
|
310
529
|
return Qnil;
|
311
530
|
};
|
@@ -314,72 +533,44 @@ static
|
|
314
533
|
VALUE io_wait_transfer(VALUE _arguments) {
|
315
534
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
316
535
|
|
317
|
-
|
318
|
-
|
319
|
-
if (DEBUG) fprintf(stderr, "io_wait_transfer errno=%d\n", errno);
|
536
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
320
537
|
|
321
|
-
|
322
|
-
|
323
|
-
|
538
|
+
if (arguments->waiting->ready) {
|
539
|
+
return RB_INT2NUM(arguments->waiting->ready);
|
540
|
+
} else {
|
324
541
|
return Qfalse;
|
325
542
|
}
|
326
|
-
|
327
|
-
if (DEBUG) fprintf(stderr, "io_wait_transfer flags=%d\n", NUM2INT(result));
|
328
|
-
|
329
|
-
return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
|
330
543
|
};
|
331
544
|
|
545
|
+
struct IO_Event_List_Type IO_Event_Selector_EPoll_io_wait_list_type = {};
|
546
|
+
|
332
547
|
VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
333
|
-
struct IO_Event_Selector_EPoll *
|
334
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
335
|
-
|
336
|
-
struct epoll_event event = {0};
|
337
|
-
|
338
|
-
int descriptor = IO_Event_Selector_io_descriptor(io);
|
339
|
-
int duplicate = -1;
|
548
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
549
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
340
550
|
|
341
|
-
|
342
|
-
event.data.ptr = (void*)fiber;
|
551
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
343
552
|
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
553
|
+
struct IO_Event_Selector_EPoll_Waiting waiting = {
|
554
|
+
.list = {.type = &IO_Event_Selector_EPoll_io_wait_list_type},
|
555
|
+
.fiber = fiber,
|
556
|
+
.events = RB_NUM2INT(events),
|
557
|
+
};
|
348
558
|
|
349
|
-
|
350
|
-
// The file descriptor was already inserted into epoll.
|
351
|
-
duplicate = dup(descriptor);
|
352
|
-
|
353
|
-
if (duplicate == -1) {
|
354
|
-
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:dup");
|
355
|
-
}
|
356
|
-
|
357
|
-
descriptor = duplicate;
|
358
|
-
|
359
|
-
rb_update_max_fd(descriptor);
|
360
|
-
|
361
|
-
result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
362
|
-
}
|
559
|
+
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
|
363
560
|
|
364
561
|
if (result == -1) {
|
365
|
-
// If we duplicated the file descriptor, ensure it's closed:
|
366
|
-
if (duplicate >= 0) {
|
367
|
-
close(duplicate);
|
368
|
-
}
|
369
|
-
|
370
562
|
if (errno == EPERM) {
|
371
|
-
IO_Event_Selector_queue_push(&
|
372
|
-
IO_Event_Selector_yield(&
|
563
|
+
IO_Event_Selector_queue_push(&selector->backend, fiber);
|
564
|
+
IO_Event_Selector_yield(&selector->backend);
|
373
565
|
return events;
|
374
566
|
}
|
375
567
|
|
376
|
-
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:
|
568
|
+
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:IO_Event_Selector_EPoll_Waiting_register");
|
377
569
|
}
|
378
570
|
|
379
571
|
struct io_wait_arguments io_wait_arguments = {
|
380
|
-
.
|
381
|
-
.
|
382
|
-
.duplicate = duplicate
|
572
|
+
.selector = selector,
|
573
|
+
.waiting = &waiting,
|
383
574
|
};
|
384
575
|
|
385
576
|
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
|
@@ -411,12 +602,14 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
411
602
|
|
412
603
|
size_t length = arguments->length;
|
413
604
|
size_t offset = arguments->offset;
|
605
|
+
size_t total = 0;
|
414
606
|
|
415
|
-
|
416
|
-
|
607
|
+
size_t maximum_size = size - offset;
|
608
|
+
while (maximum_size) {
|
417
609
|
ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
|
418
610
|
|
419
611
|
if (result > 0) {
|
612
|
+
total += result;
|
420
613
|
offset += result;
|
421
614
|
if ((size_t)result >= length) break;
|
422
615
|
length -= result;
|
@@ -427,9 +620,11 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
427
620
|
} else {
|
428
621
|
return rb_fiber_scheduler_io_result(-1, errno);
|
429
622
|
}
|
623
|
+
|
624
|
+
maximum_size = size - offset;
|
430
625
|
}
|
431
626
|
|
432
|
-
return rb_fiber_scheduler_io_result(
|
627
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
433
628
|
}
|
434
629
|
|
435
630
|
static
|
@@ -499,16 +694,18 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
499
694
|
|
500
695
|
size_t length = arguments->length;
|
501
696
|
size_t offset = arguments->offset;
|
697
|
+
size_t total = 0;
|
502
698
|
|
503
699
|
if (length > size) {
|
504
700
|
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
505
701
|
}
|
506
702
|
|
507
|
-
|
508
|
-
|
703
|
+
size_t maximum_size = size - offset;
|
704
|
+
while (maximum_size) {
|
509
705
|
ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
|
510
706
|
|
511
707
|
if (result > 0) {
|
708
|
+
total += result;
|
512
709
|
offset += result;
|
513
710
|
if ((size_t)result >= length) break;
|
514
711
|
length -= result;
|
@@ -519,9 +716,11 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
519
716
|
} else {
|
520
717
|
return rb_fiber_scheduler_io_result(-1, errno);
|
521
718
|
}
|
719
|
+
|
720
|
+
maximum_size = size - offset;
|
522
721
|
}
|
523
722
|
|
524
|
-
return rb_fiber_scheduler_io_result(
|
723
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
525
724
|
};
|
526
725
|
|
527
726
|
static
|
@@ -601,13 +800,15 @@ int timeout_nonblocking(struct timespec * timespec) {
|
|
601
800
|
}
|
602
801
|
|
603
802
|
struct select_arguments {
|
604
|
-
struct IO_Event_Selector_EPoll *
|
803
|
+
struct IO_Event_Selector_EPoll *selector;
|
605
804
|
|
606
805
|
int count;
|
607
806
|
struct epoll_event events[EPOLL_MAX_EVENTS];
|
608
|
-
|
807
|
+
|
609
808
|
struct timespec * timeout;
|
610
809
|
struct timespec storage;
|
810
|
+
|
811
|
+
struct IO_Event_List saved;
|
611
812
|
};
|
612
813
|
|
613
814
|
static int make_timeout_ms(struct timespec * timeout) {
|
@@ -636,7 +837,7 @@ void * select_internal(void *_arguments) {
|
|
636
837
|
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
637
838
|
|
638
839
|
#if defined(HAVE_EPOLL_PWAIT2)
|
639
|
-
arguments->count = epoll_pwait2(arguments->
|
840
|
+
arguments->count = epoll_pwait2(arguments->selector->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout, NULL);
|
640
841
|
|
641
842
|
// Comment out the above line and enable the below lines to test ENOSYS code path.
|
642
843
|
// arguments->count = -1;
|
@@ -650,16 +851,16 @@ void * select_internal(void *_arguments) {
|
|
650
851
|
}
|
651
852
|
#endif
|
652
853
|
|
653
|
-
arguments->count = epoll_wait(arguments->
|
854
|
+
arguments->count = epoll_wait(arguments->selector->descriptor, arguments->events, EPOLL_MAX_EVENTS, make_timeout_ms(arguments->timeout));
|
654
855
|
|
655
856
|
return NULL;
|
656
857
|
}
|
657
858
|
|
658
859
|
static
|
659
860
|
void select_internal_without_gvl(struct select_arguments *arguments) {
|
660
|
-
arguments->
|
861
|
+
arguments->selector->blocked = 1;
|
661
862
|
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
662
|
-
arguments->
|
863
|
+
arguments->selector->blocked = 0;
|
663
864
|
|
664
865
|
if (arguments->count == -1) {
|
665
866
|
if (errno != EINTR) {
|
@@ -683,18 +884,97 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
683
884
|
}
|
684
885
|
}
|
685
886
|
|
887
|
+
static
|
888
|
+
int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event, struct IO_Event_List *saved)
|
889
|
+
{
|
890
|
+
int descriptor = event->data.fd;
|
891
|
+
|
892
|
+
// This is the mask of all events that occured for the given descriptor:
|
893
|
+
enum IO_Event ready_events = events_from_epoll_flags(event->events);
|
894
|
+
|
895
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
|
896
|
+
struct IO_Event_List *list = &epoll_descriptor->list;
|
897
|
+
struct IO_Event_List *node = list->tail;
|
898
|
+
|
899
|
+
// Reset the events back to 0 so that we can re-arm if necessary:
|
900
|
+
epoll_descriptor->waiting_events = 0;
|
901
|
+
|
902
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d epoll_descriptor=%p\n", descriptor, ready_events, epoll_descriptor);
|
903
|
+
|
904
|
+
// It's possible (but unlikely) that the address of list will changing during iteration.
|
905
|
+
while (node != list) {
|
906
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: node=%p list=%p type=%p\n", node, list, node->type);
|
907
|
+
|
908
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (struct IO_Event_Selector_EPoll_Waiting *)node;
|
909
|
+
|
910
|
+
// Compute the intersection of the events we are waiting for and the events that occured:
|
911
|
+
enum IO_Event matching_events = waiting->events & ready_events;
|
912
|
+
|
913
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, waiting_events=%d, matching_events=%d\n", descriptor, ready_events, waiting->events, matching_events);
|
914
|
+
|
915
|
+
if (matching_events) {
|
916
|
+
IO_Event_List_append(node, saved);
|
917
|
+
|
918
|
+
// Resume the fiber:
|
919
|
+
waiting->ready = matching_events;
|
920
|
+
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
921
|
+
|
922
|
+
node = saved->tail;
|
923
|
+
IO_Event_List_pop(saved);
|
924
|
+
} else {
|
925
|
+
// We are still waiting for the events:
|
926
|
+
epoll_descriptor->waiting_events |= waiting->events;
|
927
|
+
node = node->tail;
|
928
|
+
}
|
929
|
+
}
|
930
|
+
|
931
|
+
return IO_Event_Selector_EPoll_Descriptor_update(selector, epoll_descriptor->io, descriptor, epoll_descriptor);
|
932
|
+
}
|
933
|
+
|
934
|
+
static
|
935
|
+
VALUE select_handle_events(VALUE _arguments)
|
936
|
+
{
|
937
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
938
|
+
struct IO_Event_Selector_EPoll *selector = arguments->selector;
|
939
|
+
|
940
|
+
for (int i = 0; i < arguments->count; i += 1) {
|
941
|
+
const struct epoll_event *event = &arguments->events[i];
|
942
|
+
if (DEBUG) fprintf(stderr, "-> fd=%d events=%d\n", event->data.fd, event->events);
|
943
|
+
|
944
|
+
if (event->data.fd >= 0) {
|
945
|
+
IO_Event_Selector_EPoll_handle(selector, event, &arguments->saved);
|
946
|
+
} else {
|
947
|
+
IO_Event_Interrupt_clear(&selector->interrupt);
|
948
|
+
}
|
949
|
+
}
|
950
|
+
|
951
|
+
return INT2NUM(arguments->count);
|
952
|
+
}
|
953
|
+
|
954
|
+
static
|
955
|
+
VALUE select_handle_events_ensure(VALUE _arguments)
|
956
|
+
{
|
957
|
+
struct select_arguments *arguments = (struct select_arguments *)_arguments;
|
958
|
+
|
959
|
+
IO_Event_List_free(&arguments->saved);
|
960
|
+
|
961
|
+
return Qnil;
|
962
|
+
}
|
963
|
+
|
964
|
+
// TODO This function is not re-entrant and we should document and assert as such.
|
686
965
|
VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
687
|
-
struct IO_Event_Selector_EPoll *
|
688
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
966
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
967
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
689
968
|
|
690
|
-
int ready = IO_Event_Selector_queue_flush(&
|
969
|
+
int ready = IO_Event_Selector_queue_flush(&selector->backend);
|
691
970
|
|
692
971
|
struct select_arguments arguments = {
|
693
|
-
.
|
972
|
+
.selector = selector,
|
694
973
|
.storage = {
|
695
974
|
.tv_sec = 0,
|
696
975
|
.tv_nsec = 0
|
697
976
|
},
|
977
|
+
.saved = {},
|
698
978
|
};
|
699
979
|
|
700
980
|
arguments.timeout = &arguments.storage;
|
@@ -707,7 +987,7 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
707
987
|
// 2. Didn't process any events from non-blocking select (above), and
|
708
988
|
// 3. There are no items in the ready list,
|
709
989
|
// then we can perform a blocking select.
|
710
|
-
if (!ready && !arguments.count && !
|
990
|
+
if (!ready && !arguments.count && !selector->backend.ready) {
|
711
991
|
arguments.timeout = make_timeout(duration, &arguments.storage);
|
712
992
|
|
713
993
|
if (!timeout_nonblocking(arguments.timeout)) {
|
@@ -716,30 +996,20 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
716
996
|
}
|
717
997
|
}
|
718
998
|
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
if (event->data.ptr) {
|
724
|
-
VALUE fiber = (VALUE)event->data.ptr;
|
725
|
-
VALUE result = INT2NUM(event->events);
|
726
|
-
|
727
|
-
IO_Event_Selector_fiber_transfer(fiber, 1, &result);
|
728
|
-
} else {
|
729
|
-
IO_Event_Interrupt_clear(&data->interrupt);
|
730
|
-
}
|
999
|
+
if (arguments.count) {
|
1000
|
+
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
|
1001
|
+
} else {
|
1002
|
+
return RB_INT2NUM(0);
|
731
1003
|
}
|
732
|
-
|
733
|
-
return INT2NUM(arguments.count);
|
734
1004
|
}
|
735
1005
|
|
736
1006
|
VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
737
|
-
struct IO_Event_Selector_EPoll *
|
738
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
1007
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
1008
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
739
1009
|
|
740
1010
|
// If we are blocking, we can schedule a nop event to wake up the selector:
|
741
|
-
if (
|
742
|
-
IO_Event_Interrupt_signal(&
|
1011
|
+
if (selector->blocked) {
|
1012
|
+
IO_Event_Interrupt_signal(&selector->interrupt);
|
743
1013
|
|
744
1014
|
return Qtrue;
|
745
1015
|
}
|