event 0.4.4 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,347 +0,0 @@
1
- // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
- //
3
- // Permission is hereby granted, free of charge, to any person obtaining a copy
4
- // of this software and associated documentation files (the "Software"), to deal
5
- // in the Software without restriction, including without limitation the rights
6
- // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- // copies of the Software, and to permit persons to whom the Software is
8
- // furnished to do so, subject to the following conditions:
9
- //
10
- // The above copyright notice and this permission notice shall be included in
11
- // all copies or substantial portions of the Software.
12
- //
13
- // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
- // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
- // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
- // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
- // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
- // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
- // THE SOFTWARE.
20
-
21
- #include "kqueue.h"
22
- #include "backend.h"
23
-
24
- #include <sys/event.h>
25
- #include <sys/ioctl.h>
26
- #include <time.h>
27
-
28
- static VALUE Event_Backend_KQueue = Qnil;
29
- static ID id_fileno, id_transfer;
30
-
31
- enum {KQUEUE_MAX_EVENTS = 64};
32
-
33
- struct Event_Backend_KQueue {
34
- VALUE loop;
35
- int descriptor;
36
- };
37
-
38
- void Event_Backend_KQueue_Type_mark(void *_data)
39
- {
40
- struct Event_Backend_KQueue *data = _data;
41
- rb_gc_mark(data->loop);
42
- }
43
-
44
- static
45
- void close_internal(struct Event_Backend_KQueue *data) {
46
- if (data->descriptor >= 0) {
47
- close(data->descriptor);
48
- data->descriptor = -1;
49
- }
50
- }
51
-
52
- void Event_Backend_KQueue_Type_free(void *_data)
53
- {
54
- struct Event_Backend_KQueue *data = _data;
55
-
56
- close_internal(data);
57
-
58
- free(data);
59
- }
60
-
61
- size_t Event_Backend_KQueue_Type_size(const void *data)
62
- {
63
- return sizeof(struct Event_Backend_KQueue);
64
- }
65
-
66
- static const rb_data_type_t Event_Backend_KQueue_Type = {
67
- .wrap_struct_name = "Event::Backend::KQueue",
68
- .function = {
69
- .dmark = Event_Backend_KQueue_Type_mark,
70
- .dfree = Event_Backend_KQueue_Type_free,
71
- .dsize = Event_Backend_KQueue_Type_size,
72
- },
73
- .data = NULL,
74
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
- };
76
-
77
- VALUE Event_Backend_KQueue_allocate(VALUE self) {
78
- struct Event_Backend_KQueue *data = NULL;
79
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
80
-
81
- data->loop = Qnil;
82
- data->descriptor = -1;
83
-
84
- return instance;
85
- }
86
-
87
- VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
88
- struct Event_Backend_KQueue *data = NULL;
89
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
90
-
91
- data->loop = loop;
92
- int result = kqueue();
93
-
94
- if (result == -1) {
95
- rb_sys_fail("kqueue");
96
- } else {
97
- ioctl(result, FIOCLEX);
98
- data->descriptor = result;
99
-
100
- rb_update_max_fd(data->descriptor);
101
- }
102
-
103
- return self;
104
- }
105
-
106
- VALUE Event_Backend_KQueue_close(VALUE self) {
107
- struct Event_Backend_KQueue *data = NULL;
108
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
109
-
110
- close_internal(data);
111
-
112
- return Qnil;
113
- }
114
-
115
- static
116
- int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
- int count = 0;
118
- struct kevent kevents[2] = {0};
119
-
120
- if (events & READABLE) {
121
- kevents[count].ident = ident;
122
- kevents[count].filter = EVFILT_READ;
123
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
124
- kevents[count].udata = (void*)fiber;
125
-
126
- // #ifdef EV_OOBAND
127
- // if (events & PRIORITY) {
128
- // kevents[count].flags |= EV_OOBAND;
129
- // }
130
- // #endif
131
-
132
- count++;
133
- }
134
-
135
- if (events & WRITABLE) {
136
- kevents[count].ident = ident;
137
- kevents[count].filter = EVFILT_WRITE;
138
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
139
- kevents[count].udata = (void*)fiber;
140
- count++;
141
- }
142
-
143
- int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
-
145
- if (result == -1) {
146
- rb_sys_fail("kevent(register)");
147
- }
148
-
149
- return events;
150
- }
151
-
152
- static
153
- void io_remove_filters(int descriptor, int ident, int events) {
154
- int count = 0;
155
- struct kevent kevents[2] = {0};
156
-
157
- if (events & READABLE) {
158
- kevents[count].ident = ident;
159
- kevents[count].filter = EVFILT_READ;
160
- kevents[count].flags = EV_DELETE;
161
-
162
- count++;
163
- }
164
-
165
- if (events & WRITABLE) {
166
- kevents[count].ident = ident;
167
- kevents[count].filter = EVFILT_WRITE;
168
- kevents[count].flags = EV_DELETE;
169
- count++;
170
- }
171
-
172
- // Ignore the result.
173
- kevent(descriptor, kevents, count, NULL, 0, NULL);
174
- }
175
-
176
- struct io_wait_arguments {
177
- struct Event_Backend_KQueue *data;
178
- int events;
179
- int descriptor;
180
- };
181
-
182
- static
183
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
184
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
185
-
186
- io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
187
-
188
- rb_exc_raise(exception);
189
- };
190
-
191
- static inline
192
- int events_from_kqueue_filter(int filter) {
193
- if (filter == EVFILT_READ) return READABLE;
194
- if (filter == EVFILT_WRITE) return WRITABLE;
195
-
196
- return 0;
197
- }
198
-
199
- static
200
- VALUE io_wait_transfer(VALUE _arguments) {
201
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
-
203
- VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
204
-
205
- return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
- };
207
-
208
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
- struct Event_Backend_KQueue *data = NULL;
210
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
-
212
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
213
-
214
- struct io_wait_arguments io_wait_arguments = {
215
- .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
216
- .data = data,
217
- .descriptor = descriptor,
218
- };
219
-
220
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
221
- }
222
-
223
- static
224
- struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
225
- if (duration == Qnil) {
226
- return NULL;
227
- }
228
-
229
- if (FIXNUM_P(duration)) {
230
- storage->tv_sec = NUM2TIMET(duration);
231
- storage->tv_nsec = 0;
232
-
233
- return storage;
234
- }
235
-
236
- else if (RB_FLOAT_TYPE_P(duration)) {
237
- double value = RFLOAT_VALUE(duration);
238
- time_t seconds = value;
239
-
240
- storage->tv_sec = seconds;
241
- storage->tv_nsec = (value - seconds) * 1000000000L;
242
-
243
- return storage;
244
- }
245
-
246
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
247
- }
248
-
249
- static
250
- int timeout_nonblocking(struct timespec * timespec) {
251
- return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
252
- }
253
-
254
- struct select_arguments {
255
- struct Event_Backend_KQueue *data;
256
-
257
- int count;
258
- struct kevent events[KQUEUE_MAX_EVENTS];
259
-
260
- struct timespec storage;
261
- struct timespec *timeout;
262
- };
263
-
264
- static
265
- void * select_internal(void *_arguments) {
266
- struct select_arguments * arguments = (struct select_arguments *)_arguments;
267
-
268
- arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
269
-
270
- return NULL;
271
- }
272
-
273
- static
274
- void select_internal_without_gvl(struct select_arguments *arguments) {
275
- rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
276
-
277
- if (arguments->count == -1) {
278
- rb_sys_fail("select_internal_without_gvl:kevent");
279
- }
280
- }
281
-
282
- static
283
- void select_internal_with_gvl(struct select_arguments *arguments) {
284
- select_internal((void *)arguments);
285
-
286
- if (arguments->count == -1) {
287
- rb_sys_fail("select_internal_with_gvl:kevent");
288
- }
289
- }
290
-
291
- VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
292
- struct Event_Backend_KQueue *data = NULL;
293
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
294
-
295
- struct select_arguments arguments = {
296
- .data = data,
297
- .count = KQUEUE_MAX_EVENTS,
298
- .storage = {
299
- .tv_sec = 0,
300
- .tv_nsec = 0
301
- }
302
- };
303
-
304
- // We break this implementation into two parts.
305
- // (1) count = kevent(..., timeout = 0)
306
- // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
307
- // This allows us to avoid releasing and reacquiring the GVL.
308
- // Non-comprehensive testing shows this gives a 1.5x speedup.
309
- arguments.timeout = &arguments.storage;
310
-
311
- // First do the syscall with no timeout to get any immediately available events:
312
- select_internal_with_gvl(&arguments);
313
-
314
- // If there were no pending events, if we have a timeout, wait for more events:
315
- if (arguments.count == 0) {
316
- arguments.timeout = make_timeout(duration, &arguments.storage);
317
-
318
- if (!timeout_nonblocking(arguments.timeout)) {
319
- arguments.count = KQUEUE_MAX_EVENTS;
320
-
321
- select_internal_without_gvl(&arguments);
322
- }
323
- }
324
-
325
- for (int i = 0; i < arguments.count; i += 1) {
326
- VALUE fiber = (VALUE)arguments.events[i].udata;
327
- VALUE result = INT2NUM(arguments.events[i].filter);
328
-
329
- Event_Backend_resume_safe(fiber, result);
330
- }
331
-
332
- return INT2NUM(arguments.count);
333
- }
334
-
335
- void Init_Event_Backend_KQueue(VALUE Event_Backend) {
336
- id_fileno = rb_intern("fileno");
337
- id_transfer = rb_intern("transfer");
338
-
339
- Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
340
-
341
- rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
342
- rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
343
- rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
344
-
345
- rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
346
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
347
- }
@@ -1,432 +0,0 @@
1
- // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
- //
3
- // Permission is hereby granted, free of charge, to any person obtaining a copy
4
- // of this software and associated documentation files (the "Software"), to deal
5
- // in the Software without restriction, including without limitation the rights
6
- // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- // copies of the Software, and to permit persons to whom the Software is
8
- // furnished to do so, subject to the following conditions:
9
- //
10
- // The above copyright notice and this permission notice shall be included in
11
- // all copies or substantial portions of the Software.
12
- //
13
- // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
- // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
- // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
- // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
- // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
- // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
- // THE SOFTWARE.
20
-
21
- #include "uring.h"
22
- #include "backend.h"
23
-
24
- #include <liburing.h>
25
- #include <poll.h>
26
- #include <time.h>
27
-
28
- static VALUE Event_Backend_URing = Qnil;
29
- static ID id_fileno, id_transfer;
30
-
31
- enum {URING_ENTRIES = 128};
32
- enum {URING_MAX_EVENTS = 128};
33
-
34
- struct Event_Backend_URing {
35
- VALUE loop;
36
- struct io_uring ring;
37
- };
38
-
39
- void Event_Backend_URing_Type_mark(void *_data)
40
- {
41
- struct Event_Backend_URing *data = _data;
42
- rb_gc_mark(data->loop);
43
- }
44
-
45
- static
46
- void close_internal(struct Event_Backend_URing *data) {
47
- if (data->ring.ring_fd >= 0) {
48
- io_uring_queue_exit(&data->ring);
49
- data->ring.ring_fd = -1;
50
- }
51
- }
52
-
53
- void Event_Backend_URing_Type_free(void *_data)
54
- {
55
- struct Event_Backend_URing *data = _data;
56
-
57
- close_internal(data);
58
-
59
- free(data);
60
- }
61
-
62
- size_t Event_Backend_URing_Type_size(const void *data)
63
- {
64
- return sizeof(struct Event_Backend_URing);
65
- }
66
-
67
- static const rb_data_type_t Event_Backend_URing_Type = {
68
- .wrap_struct_name = "Event::Backend::URing",
69
- .function = {
70
- .dmark = Event_Backend_URing_Type_mark,
71
- .dfree = Event_Backend_URing_Type_free,
72
- .dsize = Event_Backend_URing_Type_size,
73
- },
74
- .data = NULL,
75
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
76
- };
77
-
78
- VALUE Event_Backend_URing_allocate(VALUE self) {
79
- struct Event_Backend_URing *data = NULL;
80
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
81
-
82
- data->loop = Qnil;
83
- data->ring.ring_fd = -1;
84
-
85
- return instance;
86
- }
87
-
88
- VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
89
- struct Event_Backend_URing *data = NULL;
90
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
91
-
92
- data->loop = loop;
93
-
94
- int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
95
-
96
- if (result < 0) {
97
- rb_syserr_fail(-result, "io_uring_queue_init");
98
- }
99
-
100
- rb_update_max_fd(data->ring.ring_fd);
101
-
102
- return self;
103
- }
104
-
105
- VALUE Event_Backend_URing_close(VALUE self) {
106
- struct Event_Backend_URing *data = NULL;
107
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
108
-
109
- close_internal(data);
110
-
111
- return Qnil;
112
- }
113
-
114
- static inline
115
- short poll_flags_from_events(int events) {
116
- short flags = 0;
117
-
118
- if (events & READABLE) flags |= POLLIN;
119
- if (events & PRIORITY) flags |= POLLPRI;
120
- if (events & WRITABLE) flags |= POLLOUT;
121
-
122
- flags |= POLLERR;
123
- flags |= POLLHUP;
124
-
125
- return flags;
126
- }
127
-
128
- static inline
129
- int events_from_poll_flags(short flags) {
130
- int events = 0;
131
-
132
- if (flags & POLLIN) events |= READABLE;
133
- if (flags & POLLPRI) events |= PRIORITY;
134
- if (flags & POLLOUT) events |= WRITABLE;
135
-
136
- return events;
137
- }
138
-
139
- struct io_wait_arguments {
140
- struct Event_Backend_URing *data;
141
- VALUE fiber;
142
- short flags;
143
- };
144
-
145
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
146
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
147
-
148
- while (sqe == NULL) {
149
- sqe = io_uring_get_sqe(&data->ring);
150
- }
151
-
152
- return sqe;
153
- }
154
-
155
- static
156
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
157
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
158
- struct Event_Backend_URing *data = arguments->data;
159
-
160
- struct io_uring_sqe *sqe = io_get_sqe(data);
161
-
162
- // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
163
-
164
- io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
165
- io_uring_submit(&data->ring);
166
-
167
- rb_exc_raise(exception);
168
- };
169
-
170
- static
171
- VALUE io_wait_transfer(VALUE _arguments) {
172
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
173
- struct Event_Backend_URing *data = arguments->data;
174
-
175
- VALUE result = rb_funcall(data->loop, id_transfer, 0);
176
-
177
- // We explicitly filter the resulting events based on the requested events.
178
- // In some cases, poll will report events we didn't ask for.
179
- short flags = arguments->flags & NUM2INT(result);
180
-
181
- return INT2NUM(events_from_poll_flags(flags));
182
- };
183
-
184
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
185
- struct Event_Backend_URing *data = NULL;
186
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
187
-
188
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
189
- struct io_uring_sqe *sqe = io_get_sqe(data);
190
-
191
- if (!sqe) return INT2NUM(0);
192
-
193
- short flags = poll_flags_from_events(NUM2INT(events));
194
-
195
- // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
196
-
197
- io_uring_prep_poll_add(sqe, descriptor, flags);
198
- io_uring_sqe_set_data(sqe, (void*)fiber);
199
- // fprintf(stderr, "io_uring_submit\n");
200
- // io_uring_submit(&data->ring);
201
-
202
- struct io_wait_arguments io_wait_arguments = {
203
- .data = data,
204
- .fiber = fiber,
205
- .flags = flags
206
- };
207
-
208
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
209
- }
210
-
211
- inline static
212
- void resize_to_capacity(VALUE string, size_t offset, size_t length) {
213
- size_t current_length = RSTRING_LEN(string);
214
- long difference = (long)(offset + length) - (long)current_length;
215
-
216
- difference += 1;
217
-
218
- if (difference > 0) {
219
- rb_str_modify_expand(string, difference);
220
- } else {
221
- rb_str_modify(string);
222
- }
223
- }
224
-
225
- inline static
226
- void resize_to_fit(VALUE string, size_t offset, size_t length) {
227
- size_t current_length = RSTRING_LEN(string);
228
-
229
- if (current_length < (offset + length)) {
230
- rb_str_set_len(string, offset + length);
231
- }
232
- }
233
-
234
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
235
- struct Event_Backend_URing *data = NULL;
236
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
237
-
238
- resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
239
-
240
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
241
- struct io_uring_sqe *sqe = io_get_sqe(data);
242
-
243
- struct iovec iovecs[1];
244
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
245
- iovecs[0].iov_len = NUM2SIZET(length);
246
-
247
- io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
248
- io_uring_sqe_set_data(sqe, (void*)fiber);
249
- io_uring_submit(&data->ring);
250
-
251
- // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
252
-
253
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
254
-
255
- if (result < 0) {
256
- rb_syserr_fail(-result, strerror(-result));
257
- }
258
-
259
- resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
260
-
261
- return INT2NUM(result);
262
- }
263
-
264
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
265
- struct Event_Backend_URing *data = NULL;
266
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
267
-
268
- if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
269
- rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
270
- }
271
-
272
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
273
- struct io_uring_sqe *sqe = io_get_sqe(data);
274
-
275
- struct iovec iovecs[1];
276
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
277
- iovecs[0].iov_len = NUM2SIZET(length);
278
-
279
- io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
280
- io_uring_sqe_set_data(sqe, (void*)fiber);
281
- io_uring_submit(&data->ring);
282
-
283
- // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
284
-
285
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
286
-
287
- if (result < 0) {
288
- rb_syserr_fail(-result, strerror(-result));
289
- }
290
-
291
- return INT2NUM(result);
292
- }
293
-
294
- static
295
- struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
296
- if (duration == Qnil) {
297
- return NULL;
298
- }
299
-
300
- if (FIXNUM_P(duration)) {
301
- storage->tv_sec = NUM2TIMET(duration);
302
- storage->tv_nsec = 0;
303
-
304
- return storage;
305
- }
306
-
307
- else if (RB_FLOAT_TYPE_P(duration)) {
308
- double value = RFLOAT_VALUE(duration);
309
- time_t seconds = value;
310
-
311
- storage->tv_sec = seconds;
312
- storage->tv_nsec = (value - seconds) * 1000000000L;
313
-
314
- return storage;
315
- }
316
-
317
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
318
- }
319
-
320
- static
321
- int timeout_nonblocking(struct __kernel_timespec *timespec) {
322
- return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
323
- }
324
-
325
- struct select_arguments {
326
- struct Event_Backend_URing *data;
327
-
328
- int count;
329
- struct io_uring_cqe **cqes;
330
-
331
- struct __kernel_timespec storage;
332
- struct __kernel_timespec *timeout;
333
- };
334
-
335
- static
336
- void * select_internal(void *_arguments) {
337
- struct select_arguments * arguments = (struct select_arguments *)_arguments;
338
-
339
- io_uring_submit(&arguments->data->ring);
340
-
341
- int result = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
342
-
343
- if (result == -ETIME) {
344
- // If waiting resulted in a timeout, there are 0 events.
345
- arguments->count = 0;
346
- } else if (result == 0) {
347
- // Otherwise, there was no error, at least 1 event was reported. So we ask for them all.
348
- arguments->count = io_uring_peek_batch_cqe(&arguments->data->ring, arguments->cqes, URING_MAX_EVENTS);
349
- } else {
350
- arguments->count = result;
351
- }
352
-
353
- return NULL;
354
- }
355
-
356
- static
357
- int select_internal_without_gvl(struct select_arguments *arguments) {
358
- rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
359
-
360
- if (arguments->count < 0) {
361
- rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
362
- }
363
-
364
- return arguments->count;
365
- }
366
-
367
- VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
368
- struct Event_Backend_URing *data = NULL;
369
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
370
-
371
- struct io_uring_cqe *cqes[URING_MAX_EVENTS];
372
-
373
- // This is a non-blocking operation:
374
- int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
375
-
376
- if (result < 0) {
377
- rb_syserr_fail(-result, strerror(-result));
378
- } else if (result == 0) {
379
- // We might need to wait for events:
380
- struct select_arguments arguments = {
381
- .data = data,
382
- .cqes = cqes,
383
- .timeout = NULL,
384
- };
385
-
386
- arguments.timeout = make_timeout(duration, &arguments.storage);
387
-
388
- if (!timeout_nonblocking(arguments.timeout)) {
389
- result = select_internal_without_gvl(&arguments);
390
- } else {
391
- io_uring_submit(&data->ring);
392
- result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
393
- }
394
- }
395
-
396
- // fprintf(stderr, "cqes count=%d\n", result);
397
-
398
- for (int i = 0; i < result; i += 1) {
399
- // If the operation was cancelled, or the operation has no user data (fiber):
400
- if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
401
- continue;
402
- }
403
-
404
- VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
405
- VALUE result = INT2NUM(cqes[i]->res);
406
-
407
- // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
408
-
409
- io_uring_cqe_seen(&data->ring, cqes[i]);
410
-
411
- Event_Backend_resume_safe(fiber, result);
412
- }
413
-
414
- return INT2NUM(result);
415
- }
416
-
417
- void Init_Event_Backend_URing(VALUE Event_Backend) {
418
- id_fileno = rb_intern("fileno");
419
- id_transfer = rb_intern("transfer");
420
-
421
- Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
422
-
423
- rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
424
- rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
425
- rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
426
-
427
- rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
428
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
429
-
430
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
431
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
432
- }