io-event-machty 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/IO_Event.bundle +0 -0
- data/ext/Makefile +268 -0
- data/ext/event.o +0 -0
- data/ext/extconf.h +10 -0
- data/ext/extconf.rb +64 -0
- data/ext/interrupt.o +0 -0
- data/ext/io/event/event.c +52 -0
- data/ext/io/event/event.h +39 -0
- data/ext/io/event/interrupt.c +108 -0
- data/ext/io/event/interrupt.h +47 -0
- data/ext/io/event/selector/epoll.c +657 -0
- data/ext/io/event/selector/epoll.h +27 -0
- data/ext/io/event/selector/kqueue.c +742 -0
- data/ext/io/event/selector/kqueue.h +27 -0
- data/ext/io/event/selector/pidfd.c +36 -0
- data/ext/io/event/selector/selector.c +294 -0
- data/ext/io/event/selector/selector.h +130 -0
- data/ext/io/event/selector/uring.c +722 -0
- data/ext/io/event/selector/uring.h +27 -0
- data/ext/kqueue.o +0 -0
- data/ext/mkmf.log +273 -0
- data/ext/selector.o +0 -0
- data/lib/io/event/debug/selector.rb +161 -0
- data/lib/io/event/interrupt.rb +57 -0
- data/lib/io/event/selector/select.rb +270 -0
- data/lib/io/event/selector.rb +54 -0
- data/lib/io/event/version.rb +23 -0
- data/lib/io/event.rb +29 -0
- metadata +127 -0
@@ -0,0 +1,742 @@
|
|
1
|
+
// Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
|
2
|
+
//
|
3
|
+
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
// of this software and associated documentation files (the "Software"), to deal
|
5
|
+
// in the Software without restriction, including without limitation the rights
|
6
|
+
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
// copies of the Software, and to permit persons to whom the Software is
|
8
|
+
// furnished to do so, subject to the following conditions:
|
9
|
+
//
|
10
|
+
// The above copyright notice and this permission notice shall be included in
|
11
|
+
// all copies or substantial portions of the Software.
|
12
|
+
//
|
13
|
+
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
// THE SOFTWARE.
|
20
|
+
|
21
|
+
#include "kqueue.h"
|
22
|
+
#include "selector.h"
|
23
|
+
|
24
|
+
#include <sys/event.h>
|
25
|
+
#include <sys/ioctl.h>
|
26
|
+
#include <time.h>
|
27
|
+
#include <errno.h>
|
28
|
+
|
29
|
+
enum {
|
30
|
+
DEBUG = 0,
|
31
|
+
DEBUG_IO_READ = 0,
|
32
|
+
DEBUG_IO_WRITE = 0,
|
33
|
+
DEBUG_IO_WAIT = 0
|
34
|
+
};
|
35
|
+
|
36
|
+
static VALUE IO_Event_Selector_KQueue = Qnil;
|
37
|
+
|
38
|
+
enum {KQUEUE_MAX_EVENTS = 64};
|
39
|
+
|
40
|
+
struct IO_Event_Selector_KQueue {
|
41
|
+
struct IO_Event_Selector backend;
|
42
|
+
int descriptor;
|
43
|
+
|
44
|
+
int blocked;
|
45
|
+
};
|
46
|
+
|
47
|
+
void IO_Event_Selector_KQueue_Type_mark(void *_data)
|
48
|
+
{
|
49
|
+
struct IO_Event_Selector_KQueue *data = _data;
|
50
|
+
IO_Event_Selector_mark(&data->backend);
|
51
|
+
}
|
52
|
+
|
53
|
+
static
|
54
|
+
void close_internal(struct IO_Event_Selector_KQueue *data) {
|
55
|
+
if (data->descriptor >= 0) {
|
56
|
+
close(data->descriptor);
|
57
|
+
data->descriptor = -1;
|
58
|
+
}
|
59
|
+
}
|
60
|
+
|
61
|
+
void IO_Event_Selector_KQueue_Type_free(void *_data)
|
62
|
+
{
|
63
|
+
struct IO_Event_Selector_KQueue *data = _data;
|
64
|
+
|
65
|
+
close_internal(data);
|
66
|
+
|
67
|
+
free(data);
|
68
|
+
}
|
69
|
+
|
70
|
+
size_t IO_Event_Selector_KQueue_Type_size(const void *data)
|
71
|
+
{
|
72
|
+
return sizeof(struct IO_Event_Selector_KQueue);
|
73
|
+
}
|
74
|
+
|
75
|
+
static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
|
76
|
+
.wrap_struct_name = "IO_Event::Backend::KQueue",
|
77
|
+
.function = {
|
78
|
+
.dmark = IO_Event_Selector_KQueue_Type_mark,
|
79
|
+
.dfree = IO_Event_Selector_KQueue_Type_free,
|
80
|
+
.dsize = IO_Event_Selector_KQueue_Type_size,
|
81
|
+
},
|
82
|
+
.data = NULL,
|
83
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
84
|
+
};
|
85
|
+
|
86
|
+
VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
87
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
88
|
+
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
89
|
+
|
90
|
+
IO_Event_Selector_initialize(&data->backend, Qnil);
|
91
|
+
data->descriptor = -1;
|
92
|
+
data->blocked = 0;
|
93
|
+
|
94
|
+
return instance;
|
95
|
+
}
|
96
|
+
|
97
|
+
VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
|
98
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
99
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
100
|
+
|
101
|
+
IO_Event_Selector_initialize(&data->backend, loop);
|
102
|
+
int result = kqueue();
|
103
|
+
|
104
|
+
if (result == -1) {
|
105
|
+
rb_sys_fail("IO_Event_Selector_KQueue_initialize:kqueue");
|
106
|
+
} else {
|
107
|
+
ioctl(result, FIOCLEX);
|
108
|
+
data->descriptor = result;
|
109
|
+
|
110
|
+
rb_update_max_fd(data->descriptor);
|
111
|
+
}
|
112
|
+
|
113
|
+
return self;
|
114
|
+
}
|
115
|
+
|
116
|
+
VALUE IO_Event_Selector_KQueue_loop(VALUE self) {
|
117
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
118
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
119
|
+
|
120
|
+
return data->backend.loop;
|
121
|
+
}
|
122
|
+
|
123
|
+
VALUE IO_Event_Selector_KQueue_close(VALUE self) {
|
124
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
125
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
126
|
+
|
127
|
+
close_internal(data);
|
128
|
+
|
129
|
+
return Qnil;
|
130
|
+
}
|
131
|
+
|
132
|
+
VALUE IO_Event_Selector_KQueue_transfer(VALUE self)
|
133
|
+
{
|
134
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
135
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
136
|
+
|
137
|
+
return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
|
138
|
+
}
|
139
|
+
|
140
|
+
VALUE IO_Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
|
141
|
+
{
|
142
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
143
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
144
|
+
|
145
|
+
return IO_Event_Selector_resume(&data->backend, argc, argv);
|
146
|
+
}
|
147
|
+
|
148
|
+
VALUE IO_Event_Selector_KQueue_yield(VALUE self)
|
149
|
+
{
|
150
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
151
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
152
|
+
|
153
|
+
return IO_Event_Selector_yield(&data->backend);
|
154
|
+
}
|
155
|
+
|
156
|
+
VALUE IO_Event_Selector_KQueue_push(VALUE self, VALUE fiber)
|
157
|
+
{
|
158
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
159
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
160
|
+
|
161
|
+
IO_Event_Selector_queue_push(&data->backend, fiber);
|
162
|
+
|
163
|
+
return Qnil;
|
164
|
+
}
|
165
|
+
|
166
|
+
VALUE IO_Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
|
167
|
+
{
|
168
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
169
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
170
|
+
|
171
|
+
return IO_Event_Selector_raise(&data->backend, argc, argv);
|
172
|
+
}
|
173
|
+
|
174
|
+
VALUE IO_Event_Selector_KQueue_ready_p(VALUE self) {
|
175
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
176
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
177
|
+
|
178
|
+
return data->backend.ready ? Qtrue : Qfalse;
|
179
|
+
}
|
180
|
+
|
181
|
+
struct process_wait_arguments {
|
182
|
+
struct IO_Event_Selector_KQueue *data;
|
183
|
+
pid_t pid;
|
184
|
+
int flags;
|
185
|
+
};
|
186
|
+
|
187
|
+
static
|
188
|
+
int process_add_filters(int descriptor, int ident, VALUE fiber) {
|
189
|
+
struct kevent event = {0};
|
190
|
+
|
191
|
+
event.ident = ident;
|
192
|
+
event.filter = EVFILT_PROC;
|
193
|
+
event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
|
194
|
+
event.fflags = NOTE_EXIT;
|
195
|
+
event.udata = (void*)fiber;
|
196
|
+
|
197
|
+
int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
|
198
|
+
|
199
|
+
if (result == -1) {
|
200
|
+
// No such process - the process has probably already terminated:
|
201
|
+
if (errno == ESRCH) {
|
202
|
+
return 0;
|
203
|
+
}
|
204
|
+
|
205
|
+
rb_sys_fail("process_add_filters:kevent");
|
206
|
+
}
|
207
|
+
|
208
|
+
return 1;
|
209
|
+
}
|
210
|
+
|
211
|
+
static
|
212
|
+
void process_remove_filters(int descriptor, int ident) {
|
213
|
+
struct kevent event = {0};
|
214
|
+
|
215
|
+
event.ident = ident;
|
216
|
+
event.filter = EVFILT_PROC;
|
217
|
+
event.flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
218
|
+
event.fflags = NOTE_EXIT;
|
219
|
+
|
220
|
+
// Ignore the result.
|
221
|
+
kevent(descriptor, &event, 1, NULL, 0, NULL);
|
222
|
+
}
|
223
|
+
|
224
|
+
static
|
225
|
+
VALUE process_wait_transfer(VALUE _arguments) {
|
226
|
+
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
227
|
+
|
228
|
+
IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
|
229
|
+
|
230
|
+
return IO_Event_Selector_process_status_wait(arguments->pid);
|
231
|
+
}
|
232
|
+
|
233
|
+
static
|
234
|
+
VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
|
235
|
+
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
236
|
+
|
237
|
+
process_remove_filters(arguments->data->descriptor, arguments->pid);
|
238
|
+
|
239
|
+
rb_exc_raise(exception);
|
240
|
+
}
|
241
|
+
|
242
|
+
VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
|
243
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
244
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
245
|
+
|
246
|
+
struct process_wait_arguments process_wait_arguments = {
|
247
|
+
.data = data,
|
248
|
+
.pid = NUM2PIDT(pid),
|
249
|
+
.flags = RB_NUM2INT(flags),
|
250
|
+
};
|
251
|
+
|
252
|
+
VALUE result = Qnil;
|
253
|
+
|
254
|
+
// This loop should not be needed but I have seen a race condition between NOTE_EXIT and `waitpid`, thus the result would be (unexpectedly) nil. So we put this in a loop to retry if the race condition shows up:
|
255
|
+
while (NIL_P(result)) {
|
256
|
+
int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
|
257
|
+
|
258
|
+
if (waiting) {
|
259
|
+
result = rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
|
260
|
+
} else {
|
261
|
+
result = IO_Event_Selector_process_status_wait(process_wait_arguments.pid);
|
262
|
+
}
|
263
|
+
}
|
264
|
+
|
265
|
+
return result;
|
266
|
+
}
|
267
|
+
|
268
|
+
static
|
269
|
+
int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
|
270
|
+
int count = 0;
|
271
|
+
struct kevent kevents[2] = {0};
|
272
|
+
|
273
|
+
if (events & IO_EVENT_READABLE) {
|
274
|
+
kevents[count].ident = ident;
|
275
|
+
kevents[count].filter = EVFILT_READ;
|
276
|
+
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
|
277
|
+
kevents[count].udata = (void*)fiber;
|
278
|
+
|
279
|
+
// #ifdef EV_OOBAND
|
280
|
+
// if (events & PRIORITY) {
|
281
|
+
// kevents[count].flags |= EV_OOBAND;
|
282
|
+
// }
|
283
|
+
// #endif
|
284
|
+
|
285
|
+
count++;
|
286
|
+
}
|
287
|
+
|
288
|
+
if (events & IO_EVENT_WRITABLE) {
|
289
|
+
kevents[count].ident = ident;
|
290
|
+
kevents[count].filter = EVFILT_WRITE;
|
291
|
+
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
|
292
|
+
kevents[count].udata = (void*)fiber;
|
293
|
+
count++;
|
294
|
+
}
|
295
|
+
|
296
|
+
int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
|
297
|
+
|
298
|
+
if (result == -1) {
|
299
|
+
rb_sys_fail("io_add_filters:kevent");
|
300
|
+
}
|
301
|
+
|
302
|
+
return events;
|
303
|
+
}
|
304
|
+
|
305
|
+
static
|
306
|
+
void io_remove_filters(int descriptor, int ident, int events) {
|
307
|
+
int count = 0;
|
308
|
+
struct kevent kevents[2] = {0};
|
309
|
+
|
310
|
+
if (events & IO_EVENT_READABLE) {
|
311
|
+
kevents[count].ident = ident;
|
312
|
+
kevents[count].filter = EVFILT_READ;
|
313
|
+
kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
314
|
+
|
315
|
+
count++;
|
316
|
+
}
|
317
|
+
|
318
|
+
if (events & IO_EVENT_WRITABLE) {
|
319
|
+
kevents[count].ident = ident;
|
320
|
+
kevents[count].filter = EVFILT_WRITE;
|
321
|
+
kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
|
322
|
+
count++;
|
323
|
+
}
|
324
|
+
|
325
|
+
// Ignore the result.
|
326
|
+
kevent(descriptor, kevents, count, NULL, 0, NULL);
|
327
|
+
}
|
328
|
+
|
329
|
+
struct io_wait_arguments {
|
330
|
+
struct IO_Event_Selector_KQueue *data;
|
331
|
+
int events;
|
332
|
+
int descriptor;
|
333
|
+
};
|
334
|
+
|
335
|
+
static
|
336
|
+
VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
|
337
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
338
|
+
|
339
|
+
io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
|
340
|
+
|
341
|
+
rb_exc_raise(exception);
|
342
|
+
}
|
343
|
+
|
344
|
+
static inline
|
345
|
+
int events_from_kqueue_filter(int filter) {
|
346
|
+
if (filter == EVFILT_READ) return IO_EVENT_READABLE;
|
347
|
+
if (filter == EVFILT_WRITE) return IO_EVENT_WRITABLE;
|
348
|
+
|
349
|
+
return 0;
|
350
|
+
}
|
351
|
+
|
352
|
+
static
|
353
|
+
VALUE io_wait_transfer(VALUE _arguments) {
|
354
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
355
|
+
|
356
|
+
VALUE result = IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
|
357
|
+
|
358
|
+
// If the fiber is being cancelled, it might be resumed with nil:
|
359
|
+
if (!RTEST(result)) {
|
360
|
+
return Qfalse;
|
361
|
+
}
|
362
|
+
|
363
|
+
return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
|
364
|
+
}
|
365
|
+
|
366
|
+
VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
367
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
368
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
369
|
+
|
370
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
371
|
+
|
372
|
+
struct io_wait_arguments io_wait_arguments = {
|
373
|
+
.events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
|
374
|
+
.data = data,
|
375
|
+
.descriptor = descriptor,
|
376
|
+
};
|
377
|
+
|
378
|
+
if (DEBUG_IO_WAIT) fprintf(stderr, "IO_Event_Selector_KQueue_io_wait descriptor=%d\n", descriptor);
|
379
|
+
|
380
|
+
return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
|
381
|
+
}
|
382
|
+
|
383
|
+
#ifdef HAVE_RUBY_IO_BUFFER_H
|
384
|
+
|
385
|
+
struct io_read_arguments {
|
386
|
+
VALUE self;
|
387
|
+
VALUE fiber;
|
388
|
+
VALUE io;
|
389
|
+
|
390
|
+
int flags;
|
391
|
+
|
392
|
+
int descriptor;
|
393
|
+
|
394
|
+
VALUE buffer;
|
395
|
+
size_t length;
|
396
|
+
};
|
397
|
+
|
398
|
+
static
|
399
|
+
VALUE io_read_loop(VALUE _arguments) {
|
400
|
+
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
401
|
+
|
402
|
+
void *base;
|
403
|
+
size_t size;
|
404
|
+
rb_io_buffer_get_bytes_for_writing(arguments->buffer, &base, &size);
|
405
|
+
|
406
|
+
size_t offset = 0;
|
407
|
+
size_t length = arguments->length;
|
408
|
+
|
409
|
+
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
410
|
+
|
411
|
+
while (true) {
|
412
|
+
size_t maximum_size = size - offset;
|
413
|
+
if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld)\n", arguments->descriptor, offset, maximum_size);
|
414
|
+
ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
|
415
|
+
if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
|
416
|
+
|
417
|
+
if (result > 0) {
|
418
|
+
offset += result;
|
419
|
+
if ((size_t)result >= length) break;
|
420
|
+
length -= result;
|
421
|
+
} else if (result == 0) {
|
422
|
+
break;
|
423
|
+
} else if (length > 0 && IO_Event_try_again(errno)) {
|
424
|
+
if (DEBUG_IO_READ) fprintf(stderr, "IO_Event_Selector_KQueue_io_wait(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
425
|
+
IO_Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_READABLE));
|
426
|
+
} else {
|
427
|
+
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
|
428
|
+
return rb_fiber_scheduler_io_result(-1, errno);
|
429
|
+
}
|
430
|
+
}
|
431
|
+
|
432
|
+
if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
|
433
|
+
return rb_fiber_scheduler_io_result(offset, 0);
|
434
|
+
}
|
435
|
+
|
436
|
+
static
|
437
|
+
VALUE io_read_ensure(VALUE _arguments) {
|
438
|
+
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
439
|
+
|
440
|
+
IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
|
441
|
+
|
442
|
+
return Qnil;
|
443
|
+
}
|
444
|
+
|
445
|
+
VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
446
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
447
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
448
|
+
|
449
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
450
|
+
|
451
|
+
size_t length = NUM2SIZET(_length);
|
452
|
+
|
453
|
+
struct io_read_arguments io_read_arguments = {
|
454
|
+
.self = self,
|
455
|
+
.fiber = fiber,
|
456
|
+
.io = io,
|
457
|
+
|
458
|
+
.flags = IO_Event_Selector_nonblock_set(descriptor),
|
459
|
+
.descriptor = descriptor,
|
460
|
+
.buffer = buffer,
|
461
|
+
.length = length,
|
462
|
+
};
|
463
|
+
|
464
|
+
return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
|
465
|
+
}
|
466
|
+
|
467
|
+
struct io_write_arguments {
|
468
|
+
VALUE self;
|
469
|
+
VALUE fiber;
|
470
|
+
VALUE io;
|
471
|
+
|
472
|
+
int flags;
|
473
|
+
|
474
|
+
int descriptor;
|
475
|
+
|
476
|
+
VALUE buffer;
|
477
|
+
size_t length;
|
478
|
+
};
|
479
|
+
|
480
|
+
static
|
481
|
+
VALUE io_write_loop(VALUE _arguments) {
|
482
|
+
struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
|
483
|
+
|
484
|
+
const void *base;
|
485
|
+
size_t size;
|
486
|
+
rb_io_buffer_get_bytes_for_reading(arguments->buffer, &base, &size);
|
487
|
+
|
488
|
+
size_t offset = 0;
|
489
|
+
size_t length = arguments->length;
|
490
|
+
|
491
|
+
if (length > size) {
|
492
|
+
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
493
|
+
}
|
494
|
+
|
495
|
+
if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
496
|
+
|
497
|
+
while (true) {
|
498
|
+
size_t maximum_size = size - offset;
|
499
|
+
if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld, length=%zu)\n", arguments->descriptor, offset, maximum_size, length);
|
500
|
+
ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
|
501
|
+
if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
|
502
|
+
|
503
|
+
if (result > 0) {
|
504
|
+
offset += result;
|
505
|
+
if ((size_t)result >= length) break;
|
506
|
+
length -= result;
|
507
|
+
} else if (result == 0) {
|
508
|
+
break;
|
509
|
+
} else if (length > 0 && IO_Event_try_again(errno)) {
|
510
|
+
if (DEBUG_IO_WRITE) fprintf(stderr, "IO_Event_Selector_KQueue_io_wait(fd=%d, length=%zu)\n", arguments->descriptor, length);
|
511
|
+
IO_Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_READABLE));
|
512
|
+
} else {
|
513
|
+
if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
|
514
|
+
return rb_fiber_scheduler_io_result(-1, errno);
|
515
|
+
}
|
516
|
+
}
|
517
|
+
|
518
|
+
if (DEBUG_IO_READ) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
|
519
|
+
return rb_fiber_scheduler_io_result(offset, 0);
|
520
|
+
};
|
521
|
+
|
522
|
+
static
|
523
|
+
VALUE io_write_ensure(VALUE _arguments) {
|
524
|
+
struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
|
525
|
+
|
526
|
+
IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
|
527
|
+
|
528
|
+
return Qnil;
|
529
|
+
};
|
530
|
+
|
531
|
+
VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
532
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
533
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
534
|
+
|
535
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
536
|
+
|
537
|
+
size_t length = NUM2SIZET(_length);
|
538
|
+
|
539
|
+
struct io_write_arguments io_write_arguments = {
|
540
|
+
.self = self,
|
541
|
+
.fiber = fiber,
|
542
|
+
.io = io,
|
543
|
+
|
544
|
+
.flags = IO_Event_Selector_nonblock_set(descriptor),
|
545
|
+
.descriptor = descriptor,
|
546
|
+
.buffer = buffer,
|
547
|
+
.length = length,
|
548
|
+
};
|
549
|
+
|
550
|
+
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
551
|
+
}
|
552
|
+
|
553
|
+
#endif
|
554
|
+
|
555
|
+
static
|
556
|
+
struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
|
557
|
+
if (duration == Qnil) {
|
558
|
+
return NULL;
|
559
|
+
}
|
560
|
+
|
561
|
+
if (FIXNUM_P(duration)) {
|
562
|
+
storage->tv_sec = NUM2TIMET(duration);
|
563
|
+
storage->tv_nsec = 0;
|
564
|
+
|
565
|
+
return storage;
|
566
|
+
}
|
567
|
+
|
568
|
+
else if (RB_FLOAT_TYPE_P(duration)) {
|
569
|
+
double value = RFLOAT_VALUE(duration);
|
570
|
+
time_t seconds = value;
|
571
|
+
|
572
|
+
storage->tv_sec = seconds;
|
573
|
+
storage->tv_nsec = (value - seconds) * 1000000000L;
|
574
|
+
|
575
|
+
return storage;
|
576
|
+
}
|
577
|
+
|
578
|
+
rb_raise(rb_eRuntimeError, "unable to convert timeout");
|
579
|
+
}
|
580
|
+
|
581
|
+
static
|
582
|
+
int timeout_nonblocking(struct timespec * timespec) {
|
583
|
+
return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
|
584
|
+
}
|
585
|
+
|
586
|
+
struct select_arguments {
|
587
|
+
struct IO_Event_Selector_KQueue *data;
|
588
|
+
|
589
|
+
int count;
|
590
|
+
struct kevent events[KQUEUE_MAX_EVENTS];
|
591
|
+
|
592
|
+
struct timespec storage;
|
593
|
+
struct timespec *timeout;
|
594
|
+
};
|
595
|
+
|
596
|
+
static
|
597
|
+
void * select_internal(void *_arguments) {
|
598
|
+
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
599
|
+
|
600
|
+
arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
|
601
|
+
|
602
|
+
return NULL;
|
603
|
+
}
|
604
|
+
|
605
|
+
static
|
606
|
+
void select_internal_without_gvl(struct select_arguments *arguments) {
|
607
|
+
arguments->data->blocked = 1;
|
608
|
+
|
609
|
+
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
610
|
+
arguments->data->blocked = 0;
|
611
|
+
|
612
|
+
if (arguments->count == -1) {
|
613
|
+
if (errno != EINTR) {
|
614
|
+
rb_sys_fail("select_internal_without_gvl:kevent");
|
615
|
+
} else {
|
616
|
+
arguments->count = 0;
|
617
|
+
}
|
618
|
+
}
|
619
|
+
}
|
620
|
+
|
621
|
+
static
|
622
|
+
void select_internal_with_gvl(struct select_arguments *arguments) {
|
623
|
+
select_internal((void *)arguments);
|
624
|
+
|
625
|
+
if (arguments->count == -1) {
|
626
|
+
if (errno != EINTR) {
|
627
|
+
rb_sys_fail("select_internal_with_gvl:kevent");
|
628
|
+
} else {
|
629
|
+
arguments->count = 0;
|
630
|
+
}
|
631
|
+
}
|
632
|
+
}
|
633
|
+
|
634
|
+
VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
635
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
636
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
637
|
+
|
638
|
+
int ready = IO_Event_Selector_queue_flush(&data->backend);
|
639
|
+
|
640
|
+
struct select_arguments arguments = {
|
641
|
+
.data = data,
|
642
|
+
.count = KQUEUE_MAX_EVENTS,
|
643
|
+
.storage = {
|
644
|
+
.tv_sec = 0,
|
645
|
+
.tv_nsec = 0
|
646
|
+
}
|
647
|
+
};
|
648
|
+
|
649
|
+
arguments.timeout = &arguments.storage;
|
650
|
+
|
651
|
+
// We break this implementation into two parts.
|
652
|
+
// (1) count = kevent(..., timeout = 0)
|
653
|
+
// (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
|
654
|
+
// This allows us to avoid releasing and reacquiring the GVL.
|
655
|
+
// Non-comprehensive testing shows this gives a 1.5x speedup.
|
656
|
+
|
657
|
+
// First do the syscall with no timeout to get any immediately available events:
|
658
|
+
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl timeout=" PRINTF_TIMESPEC "\r\n", PRINTF_TIMESPEC_ARGS(arguments.storage));
|
659
|
+
select_internal_with_gvl(&arguments);
|
660
|
+
if (DEBUG) fprintf(stderr, "\r\nselect_internal_with_gvl done\r\n");
|
661
|
+
|
662
|
+
// If we:
|
663
|
+
// 1. Didn't process any ready fibers, and
|
664
|
+
// 2. Didn't process any events from non-blocking select (above), and
|
665
|
+
// 3. There are no items in the ready list,
|
666
|
+
// then we can perform a blocking select.
|
667
|
+
if (!ready && !arguments.count && !data->backend.ready) {
|
668
|
+
arguments.timeout = make_timeout(duration, &arguments.storage);
|
669
|
+
|
670
|
+
if (!timeout_nonblocking(arguments.timeout)) {
|
671
|
+
arguments.count = KQUEUE_MAX_EVENTS;
|
672
|
+
|
673
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_select timeout=" PRINTF_TIMESPEC "\n", PRINTF_TIMESPEC_ARGS(arguments.storage));
|
674
|
+
select_internal_without_gvl(&arguments);
|
675
|
+
}
|
676
|
+
}
|
677
|
+
|
678
|
+
for (int i = 0; i < arguments.count; i += 1) {
|
679
|
+
if (arguments.events[i].udata) {
|
680
|
+
VALUE fiber = (VALUE)arguments.events[i].udata;
|
681
|
+
VALUE result = INT2NUM(arguments.events[i].filter);
|
682
|
+
|
683
|
+
IO_Event_Selector_fiber_transfer(fiber, 1, &result);
|
684
|
+
}
|
685
|
+
}
|
686
|
+
|
687
|
+
return INT2NUM(arguments.count);
|
688
|
+
}
|
689
|
+
|
690
|
+
VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
691
|
+
struct IO_Event_Selector_KQueue *data = NULL;
|
692
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
|
693
|
+
|
694
|
+
if (data->blocked) {
|
695
|
+
struct kevent trigger = {0};
|
696
|
+
|
697
|
+
trigger.filter = EVFILT_USER;
|
698
|
+
trigger.flags = EV_ADD | EV_CLEAR | EV_UDATA_SPECIFIC;
|
699
|
+
trigger.fflags = NOTE_TRIGGER;
|
700
|
+
|
701
|
+
int result = kevent(data->descriptor, &trigger, 1, NULL, 0, NULL);
|
702
|
+
|
703
|
+
if (result == -1) {
|
704
|
+
rb_sys_fail("IO_Event_Selector_KQueue_wakeup:kevent");
|
705
|
+
}
|
706
|
+
|
707
|
+
return Qtrue;
|
708
|
+
}
|
709
|
+
|
710
|
+
return Qfalse;
|
711
|
+
}
|
712
|
+
|
713
|
+
void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
|
714
|
+
IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
715
|
+
rb_gc_register_mark_object(IO_Event_Selector_KQueue);
|
716
|
+
|
717
|
+
rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
|
718
|
+
rb_define_method(IO_Event_Selector_KQueue, "initialize", IO_Event_Selector_KQueue_initialize, 1);
|
719
|
+
|
720
|
+
rb_define_method(IO_Event_Selector_KQueue, "loop", IO_Event_Selector_KQueue_loop, 0);
|
721
|
+
|
722
|
+
rb_define_method(IO_Event_Selector_KQueue, "transfer", IO_Event_Selector_KQueue_transfer, 0);
|
723
|
+
rb_define_method(IO_Event_Selector_KQueue, "resume", IO_Event_Selector_KQueue_resume, -1);
|
724
|
+
rb_define_method(IO_Event_Selector_KQueue, "yield", IO_Event_Selector_KQueue_yield, 0);
|
725
|
+
rb_define_method(IO_Event_Selector_KQueue, "push", IO_Event_Selector_KQueue_push, 1);
|
726
|
+
rb_define_method(IO_Event_Selector_KQueue, "raise", IO_Event_Selector_KQueue_raise, -1);
|
727
|
+
|
728
|
+
rb_define_method(IO_Event_Selector_KQueue, "ready?", IO_Event_Selector_KQueue_ready_p, 0);
|
729
|
+
|
730
|
+
rb_define_method(IO_Event_Selector_KQueue, "select", IO_Event_Selector_KQueue_select, 1);
|
731
|
+
rb_define_method(IO_Event_Selector_KQueue, "wakeup", IO_Event_Selector_KQueue_wakeup, 0);
|
732
|
+
rb_define_method(IO_Event_Selector_KQueue, "close", IO_Event_Selector_KQueue_close, 0);
|
733
|
+
|
734
|
+
rb_define_method(IO_Event_Selector_KQueue, "io_wait", IO_Event_Selector_KQueue_io_wait, 3);
|
735
|
+
|
736
|
+
#ifdef HAVE_RUBY_IO_BUFFER_H
|
737
|
+
rb_define_method(IO_Event_Selector_KQueue, "io_read", IO_Event_Selector_KQueue_io_read, 4);
|
738
|
+
rb_define_method(IO_Event_Selector_KQueue, "io_write", IO_Event_Selector_KQueue_io_write, 4);
|
739
|
+
#endif
|
740
|
+
|
741
|
+
rb_define_method(IO_Event_Selector_KQueue, "process_wait", IO_Event_Selector_KQueue_process_wait, 3);
|
742
|
+
}
|