event 0.5.0 → 0.8.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -22,6 +22,6 @@
22
22
 
23
23
  #include <ruby.h>
24
24
 
25
- #define EVENT_BACKEND_KQUEUE
25
+ #define EVENT_SELECTOR_KQUEUE
26
26
 
27
- void Init_Event_Backend_KQueue(VALUE Event_Backend);
27
+ void Init_Event_Selector_KQueue(VALUE Event_Selector);
File without changes
@@ -0,0 +1,263 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "selector.h"
22
+ #include <fcntl.h>
23
+
24
+ static ID id_transfer, id_alive_p;
25
+
26
+ #ifndef HAVE__RB_FIBER_TRANSFER
27
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
28
+ return rb_funcallv(fiber, id_transfer, argc, argv);
29
+ }
30
+ #endif
31
+
32
+ #ifndef HAVE__RB_FIBER_RAISE
33
+ static ID id_raise;
34
+
35
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv) {
36
+ return rb_funcallv(fiber, id_raise, argc, argv);
37
+ }
38
+ #endif
39
+
40
+ #ifndef HAVE_RB_IO_DESCRIPTOR
41
+ static ID id_fileno;
42
+
43
+ int Event_Selector_io_descriptor(VALUE io) {
44
+ return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
45
+ }
46
+ #endif
47
+
48
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
49
+ static ID id_wait;
50
+ static VALUE rb_Process_Status = Qnil;
51
+
52
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid)
53
+ {
54
+ return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
55
+ }
56
+ #endif
57
+
58
+ int Event_Selector_nonblock_set(int file_descriptor)
59
+ {
60
+ int flags = fcntl(file_descriptor, F_GETFL, 0);
61
+
62
+ if (!(flags & O_NONBLOCK)) {
63
+ fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
64
+ }
65
+
66
+ return flags;
67
+ }
68
+
69
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags)
70
+ {
71
+ if (!(flags & O_NONBLOCK)) {
72
+ fcntl(file_descriptor, F_SETFL, flags & ~flags);
73
+ }
74
+ }
75
+
76
+ void Init_Event_Selector(VALUE Event_Selector) {
77
+ id_transfer = rb_intern("transfer");
78
+ id_alive_p = rb_intern("alive?");
79
+
80
+ #ifndef HAVE__RB_FIBER_RAISE
81
+ id_raise = rb_intern("raise");
82
+ #endif
83
+
84
+ #ifndef HAVE_RB_IO_DESCRIPTOR
85
+ id_fileno = rb_intern("fileno");
86
+ #endif
87
+
88
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
89
+ id_wait = rb_intern("wait");
90
+ rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
91
+ #endif
92
+ }
93
+
94
+ struct wait_and_transfer_arguments {
95
+ int argc;
96
+ VALUE *argv;
97
+
98
+ struct Event_Selector *backend;
99
+ struct Event_Selector_Queue *waiting;
100
+ };
101
+
102
+ static void queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
103
+ if (waiting->behind) {
104
+ waiting->behind->infront = waiting->infront;
105
+ } else {
106
+ backend->waiting = waiting->infront;
107
+ }
108
+
109
+ if (waiting->infront) {
110
+ waiting->infront->behind = waiting->behind;
111
+ } else {
112
+ backend->ready = waiting->behind;
113
+ }
114
+ }
115
+
116
+ static void queue_push(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
117
+ if (backend->waiting) {
118
+ backend->waiting->behind = waiting;
119
+ waiting->infront = backend->waiting;
120
+ } else {
121
+ backend->ready = waiting;
122
+ }
123
+
124
+ backend->waiting = waiting;
125
+ }
126
+
127
+ static VALUE wait_and_transfer(VALUE _arguments) {
128
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
129
+
130
+ VALUE fiber = arguments->argv[0];
131
+ int argc = arguments->argc - 1;
132
+ VALUE *argv = arguments->argv + 1;
133
+
134
+ return Event_Selector_fiber_transfer(fiber, argc, argv);
135
+ }
136
+
137
+ static VALUE wait_and_transfer_ensure(VALUE _arguments) {
138
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
139
+
140
+ queue_pop(arguments->backend, arguments->waiting);
141
+
142
+ return Qnil;
143
+ }
144
+
145
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv)
146
+ {
147
+ rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
148
+
149
+ struct Event_Selector_Queue waiting = {
150
+ .behind = NULL,
151
+ .infront = NULL,
152
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
153
+ .fiber = rb_fiber_current()
154
+ };
155
+
156
+ queue_push(backend, &waiting);
157
+
158
+ struct wait_and_transfer_arguments arguments = {
159
+ .argc = argc,
160
+ .argv = argv,
161
+ .backend = backend,
162
+ .waiting = &waiting,
163
+ };
164
+
165
+ return rb_ensure(wait_and_transfer, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
166
+ }
167
+
168
+ static VALUE wait_and_raise(VALUE _arguments) {
169
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
170
+
171
+ VALUE fiber = arguments->argv[0];
172
+ int argc = arguments->argc - 1;
173
+ VALUE *argv = arguments->argv + 1;
174
+
175
+ return Event_Selector_fiber_raise(fiber, argc, argv);
176
+ }
177
+
178
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv)
179
+ {
180
+ rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
181
+
182
+ struct Event_Selector_Queue waiting = {
183
+ .behind = NULL,
184
+ .infront = NULL,
185
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
186
+ .fiber = rb_fiber_current()
187
+ };
188
+
189
+ queue_push(backend, &waiting);
190
+
191
+ struct wait_and_transfer_arguments arguments = {
192
+ .argc = argc,
193
+ .argv = argv,
194
+ .backend = backend,
195
+ .waiting = &waiting,
196
+ };
197
+
198
+ return rb_ensure(wait_and_raise, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
199
+ }
200
+
201
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber)
202
+ {
203
+ struct Event_Selector_Queue *waiting = malloc(sizeof(struct Event_Selector_Queue));
204
+
205
+ waiting->behind = NULL;
206
+ waiting->infront = NULL;
207
+ waiting->flags = EVENT_SELECTOR_QUEUE_INTERNAL;
208
+ waiting->fiber = fiber;
209
+
210
+ queue_push(backend, waiting);
211
+ }
212
+
213
+ static inline
214
+ void Event_Selector_queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *ready)
215
+ {
216
+ if (ready->flags & EVENT_SELECTOR_QUEUE_FIBER) {
217
+ Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
218
+ } else {
219
+ VALUE fiber = ready->fiber;
220
+ queue_pop(backend, ready);
221
+ free(ready);
222
+
223
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
224
+ rb_funcall(fiber, id_transfer, 0);
225
+ }
226
+ }
227
+ }
228
+
229
+ int Event_Selector_queue_flush(struct Event_Selector *backend)
230
+ {
231
+ int count = 0;
232
+
233
+ // Get the current tail and head of the queue:
234
+ struct Event_Selector_Queue *waiting = backend->waiting;
235
+
236
+ // Process from head to tail in order:
237
+ // During this, more items may be appended to tail.
238
+ while (backend->ready) {
239
+ struct Event_Selector_Queue *ready = backend->ready;
240
+
241
+ count += 1;
242
+ Event_Selector_queue_pop(backend, ready);
243
+
244
+ if (ready == waiting) break;
245
+ }
246
+
247
+ return count;
248
+ }
249
+
250
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
251
+ {
252
+ if ((stop->tv_nsec - start->tv_nsec) < 0) {
253
+ duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
254
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
255
+ } else {
256
+ duration->tv_sec = stop->tv_sec - start->tv_sec;
257
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
258
+ }
259
+ }
260
+
261
+ void Event_Selector_current_time(struct timespec *time) {
262
+ clock_gettime(CLOCK_MONOTONIC, time);
263
+ }
@@ -0,0 +1,127 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <ruby.h>
22
+ #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
30
+
31
+ enum Event {
32
+ EVENT_READABLE = 1,
33
+ EVENT_PRIORITY = 2,
34
+ EVENT_WRITABLE = 4,
35
+ EVENT_ERROR = 8,
36
+ EVENT_HANGUP = 16
37
+ };
38
+
39
+ void Init_Event_Selector();
40
+
41
+ #ifdef HAVE__RB_FIBER_TRANSFER
42
+ #define Event_Selector_fiber_transfer(fiber, argc, argv) rb_fiber_transfer(fiber, argc, argv)
43
+ #else
44
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv);
45
+ #endif
46
+
47
+ #ifdef HAVE__RB_FIBER_RAISE
48
+ #define Event_Selector_fiber_raise(fiber, argc, argv) rb_fiber_raise(fiber, argc, argv)
49
+ #else
50
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
51
+ #endif
52
+
53
+ #ifdef HAVE_RB_IO_DESCRIPTOR
54
+ #define Event_Selector_io_descriptor(io) rb_io_descriptor(io)
55
+ #else
56
+ int Event_Selector_io_descriptor(VALUE io);
57
+ #endif
58
+
59
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
60
+ #define Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
61
+ #else
62
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid);
63
+ #endif
64
+
65
+ int Event_Selector_nonblock_set(int file_descriptor);
66
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags);
67
+
68
+ enum Event_Selector_Queue_Flags {
69
+ EVENT_SELECTOR_QUEUE_FIBER = 1,
70
+ EVENT_SELECTOR_QUEUE_INTERNAL = 2,
71
+ };
72
+
73
+ struct Event_Selector_Queue {
74
+ struct Event_Selector_Queue *behind;
75
+ struct Event_Selector_Queue *infront;
76
+
77
+ enum Event_Selector_Queue_Flags flags;
78
+
79
+ VALUE fiber;
80
+ };
81
+
82
+ struct Event_Selector {
83
+ VALUE loop;
84
+
85
+ struct Event_Selector_Queue *free;
86
+
87
+ // Append to waiting.
88
+ struct Event_Selector_Queue *waiting;
89
+ // Process from ready.
90
+ struct Event_Selector_Queue *ready;
91
+ };
92
+
93
+ static inline
94
+ void Event_Selector_initialize(struct Event_Selector *backend, VALUE loop) {
95
+ backend->loop = loop;
96
+ backend->waiting = NULL;
97
+ backend->ready = NULL;
98
+ }
99
+
100
+ static inline
101
+ void Event_Selector_mark(struct Event_Selector *backend) {
102
+ rb_gc_mark(backend->loop);
103
+
104
+ struct Event_Selector_Queue *ready = backend->ready;
105
+ while (ready) {
106
+ rb_gc_mark(ready->fiber);
107
+ ready = ready->behind;
108
+ }
109
+ }
110
+
111
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv);
112
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv);
113
+
114
+ static inline
115
+ VALUE Event_Selector_yield(struct Event_Selector *backend)
116
+ {
117
+ return Event_Selector_wait_and_transfer(backend, 1, &backend->loop);
118
+ }
119
+
120
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber);
121
+ int Event_Selector_queue_flush(struct Event_Selector *backend);
122
+
123
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
124
+ void Event_Selector_current_time(struct timespec *time);
125
+
126
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
127
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -0,0 +1,638 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "uring.h"
22
+ #include "selector.h"
23
+
24
+ #include <liburing.h>
25
+ #include <poll.h>
26
+ #include <time.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ static const int DEBUG = 0;
31
+
32
+ static VALUE Event_Selector_URing = Qnil;
33
+
34
+ enum {URING_ENTRIES = 64};
35
+
36
+ struct Event_Selector_URing {
37
+ struct Event_Selector backend;
38
+ struct io_uring ring;
39
+ size_t pending;
40
+ };
41
+
42
+ void Event_Selector_URing_Type_mark(void *_data)
43
+ {
44
+ struct Event_Selector_URing *data = _data;
45
+ Event_Selector_mark(&data->backend);
46
+ }
47
+
48
+ static
49
+ void close_internal(struct Event_Selector_URing *data) {
50
+ if (data->ring.ring_fd >= 0) {
51
+ io_uring_queue_exit(&data->ring);
52
+ data->ring.ring_fd = -1;
53
+ }
54
+ }
55
+
56
+ void Event_Selector_URing_Type_free(void *_data)
57
+ {
58
+ struct Event_Selector_URing *data = _data;
59
+
60
+ close_internal(data);
61
+
62
+ free(data);
63
+ }
64
+
65
+ size_t Event_Selector_URing_Type_size(const void *data)
66
+ {
67
+ return sizeof(struct Event_Selector_URing);
68
+ }
69
+
70
+ static const rb_data_type_t Event_Selector_URing_Type = {
71
+ .wrap_struct_name = "Event::Backend::URing",
72
+ .function = {
73
+ .dmark = Event_Selector_URing_Type_mark,
74
+ .dfree = Event_Selector_URing_Type_free,
75
+ .dsize = Event_Selector_URing_Type_size,
76
+ },
77
+ .data = NULL,
78
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
79
+ };
80
+
81
+ VALUE Event_Selector_URing_allocate(VALUE self) {
82
+ struct Event_Selector_URing *data = NULL;
83
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
84
+
85
+ Event_Selector_initialize(&data->backend, Qnil);
86
+ data->ring.ring_fd = -1;
87
+
88
+ data->pending = 0;
89
+
90
+ return instance;
91
+ }
92
+
93
+ VALUE Event_Selector_URing_initialize(VALUE self, VALUE loop) {
94
+ struct Event_Selector_URing *data = NULL;
95
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
96
+
97
+ Event_Selector_initialize(&data->backend, loop);
98
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
99
+
100
+ if (result < 0) {
101
+ rb_syserr_fail(-result, "io_uring_queue_init");
102
+ }
103
+
104
+ rb_update_max_fd(data->ring.ring_fd);
105
+
106
+ return self;
107
+ }
108
+
109
+ VALUE Event_Selector_URing_close(VALUE self) {
110
+ struct Event_Selector_URing *data = NULL;
111
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
112
+
113
+ close_internal(data);
114
+
115
+ return Qnil;
116
+ }
117
+
118
+ VALUE Event_Selector_URing_transfer(int argc, VALUE *argv, VALUE self)
119
+ {
120
+ struct Event_Selector_URing *data = NULL;
121
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
122
+
123
+ Event_Selector_wait_and_transfer(&data->backend, argc, argv);
124
+
125
+ return Qnil;
126
+ }
127
+
128
+ VALUE Event_Selector_URing_yield(VALUE self)
129
+ {
130
+ struct Event_Selector_URing *data = NULL;
131
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
132
+
133
+ Event_Selector_yield(&data->backend);
134
+
135
+ return Qnil;
136
+ }
137
+
138
+ VALUE Event_Selector_URing_push(VALUE self, VALUE fiber)
139
+ {
140
+ struct Event_Selector_URing *data = NULL;
141
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
142
+
143
+ Event_Selector_queue_push(&data->backend, fiber);
144
+
145
+ return Qnil;
146
+ }
147
+
148
+ VALUE Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
149
+ {
150
+ struct Event_Selector_URing *data = NULL;
151
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
152
+
153
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
154
+ }
155
+
156
+ VALUE Event_Selector_URing_ready_p(VALUE self) {
157
+ struct Event_Selector_URing *data = NULL;
158
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
159
+
160
+ return data->backend.ready ? Qtrue : Qfalse;
161
+ }
162
+
163
+ static
164
+ int io_uring_submit_flush(struct Event_Selector_URing *data) {
165
+ if (data->pending) {
166
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
167
+
168
+ // Try to submit:
169
+ int result = io_uring_submit(&data->ring);
170
+
171
+ if (result >= 0) {
172
+ // If it was submitted, reset pending count:
173
+ data->pending = 0;
174
+ } else if (result != -EBUSY && result != -EAGAIN) {
175
+ rb_syserr_fail(-result, "io_uring_submit_flush");
176
+ }
177
+
178
+ return result;
179
+ }
180
+
181
+ return 0;
182
+ }
183
+
184
+ static
185
+ int io_uring_submit_now(struct Event_Selector_URing *data) {
186
+ while (true) {
187
+ int result = io_uring_submit(&data->ring);
188
+
189
+ if (result >= 0) {
190
+ data->pending = 0;
191
+ return result;
192
+ }
193
+
194
+ if (result == -EBUSY || result == -EAGAIN) {
195
+ Event_Selector_yield(&data->backend);
196
+ } else {
197
+ rb_syserr_fail(-result, "io_uring_submit_now");
198
+ }
199
+ }
200
+ }
201
+
202
+ static
203
+ void io_uring_submit_pending(struct Event_Selector_URing *data) {
204
+ data->pending += 1;
205
+ }
206
+
207
+ struct io_uring_sqe * io_get_sqe(struct Event_Selector_URing *data) {
208
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
209
+
210
+ while (sqe == NULL) {
211
+ // The submit queue is full, we need to drain it:
212
+ io_uring_submit_now(data);
213
+
214
+ sqe = io_uring_get_sqe(&data->ring);
215
+ }
216
+
217
+ return sqe;
218
+ }
219
+
220
+ struct process_wait_arguments {
221
+ struct Event_Selector_URing *data;
222
+ pid_t pid;
223
+ int flags;
224
+ int descriptor;
225
+ };
226
+
227
+ static
228
+ VALUE process_wait_transfer(VALUE _arguments) {
229
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
230
+
231
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
232
+
233
+ return Event_Selector_process_status_wait(arguments->pid);
234
+ }
235
+
236
+ static
237
+ VALUE process_wait_ensure(VALUE _arguments) {
238
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
239
+
240
+ close(arguments->descriptor);
241
+
242
+ return Qnil;
243
+ }
244
+
245
+ VALUE Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
246
+ struct Event_Selector_URing *data = NULL;
247
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
248
+
249
+ struct process_wait_arguments process_wait_arguments = {
250
+ .data = data,
251
+ .pid = NUM2PIDT(pid),
252
+ .flags = NUM2INT(flags),
253
+ };
254
+
255
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
256
+ rb_update_max_fd(process_wait_arguments.descriptor);
257
+
258
+ struct io_uring_sqe *sqe = io_get_sqe(data);
259
+
260
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
261
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
262
+ io_uring_sqe_set_data(sqe, (void*)fiber);
263
+ io_uring_submit_pending(data);
264
+
265
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
266
+ }
267
+
268
+ static inline
269
+ short poll_flags_from_events(int events) {
270
+ short flags = 0;
271
+
272
+ if (events & EVENT_READABLE) flags |= POLLIN;
273
+ if (events & EVENT_PRIORITY) flags |= POLLPRI;
274
+ if (events & EVENT_WRITABLE) flags |= POLLOUT;
275
+
276
+ flags |= POLLERR;
277
+ flags |= POLLHUP;
278
+
279
+ return flags;
280
+ }
281
+
282
+ static inline
283
+ int events_from_poll_flags(short flags) {
284
+ int events = 0;
285
+
286
+ if (flags & POLLIN) events |= EVENT_READABLE;
287
+ if (flags & POLLPRI) events |= EVENT_PRIORITY;
288
+ if (flags & POLLOUT) events |= EVENT_WRITABLE;
289
+
290
+ return events;
291
+ }
292
+
293
+ struct io_wait_arguments {
294
+ struct Event_Selector_URing *data;
295
+ VALUE fiber;
296
+ short flags;
297
+ };
298
+
299
+ static
300
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
301
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
302
+ struct Event_Selector_URing *data = arguments->data;
303
+
304
+ struct io_uring_sqe *sqe = io_get_sqe(data);
305
+
306
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
307
+
308
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
309
+ io_uring_submit_now(data);
310
+
311
+ rb_exc_raise(exception);
312
+ };
313
+
314
+ static
315
+ VALUE io_wait_transfer(VALUE _arguments) {
316
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
317
+ struct Event_Selector_URing *data = arguments->data;
318
+
319
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
320
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
321
+
322
+ // We explicitly filter the resulting events based on the requested events.
323
+ // In some cases, poll will report events we didn't ask for.
324
+ short flags = arguments->flags & NUM2INT(result);
325
+
326
+ return INT2NUM(events_from_poll_flags(flags));
327
+ };
328
+
329
+ VALUE Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
330
+ struct Event_Selector_URing *data = NULL;
331
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
332
+
333
+ int descriptor = Event_Selector_io_descriptor(io);
334
+ struct io_uring_sqe *sqe = io_get_sqe(data);
335
+
336
+ short flags = poll_flags_from_events(NUM2INT(events));
337
+
338
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
339
+
340
+ io_uring_prep_poll_add(sqe, descriptor, flags);
341
+ io_uring_sqe_set_data(sqe, (void*)fiber);
342
+
343
+ // If we are going to wait, we assume that we are waiting for a while:
344
+ io_uring_submit_pending(data);
345
+
346
+ struct io_wait_arguments io_wait_arguments = {
347
+ .data = data,
348
+ .fiber = fiber,
349
+ .flags = flags
350
+ };
351
+
352
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
353
+ }
354
+
355
+ #ifdef HAVE_RUBY_IO_BUFFER_H
356
+
357
+ static int io_read(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
358
+ struct io_uring_sqe *sqe = io_get_sqe(data);
359
+
360
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
361
+
362
+ io_uring_prep_read(sqe, descriptor, buffer, length, 0);
363
+ io_uring_sqe_set_data(sqe, (void*)fiber);
364
+ io_uring_submit_now(data);
365
+
366
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
367
+ if (DEBUG) fprintf(stderr, "io_read:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
368
+
369
+ return RB_NUM2INT(result);
370
+ }
371
+
372
+ VALUE Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
373
+ struct Event_Selector_URing *data = NULL;
374
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
375
+
376
+ int descriptor = Event_Selector_io_descriptor(io);
377
+
378
+ void *base;
379
+ size_t size;
380
+ rb_io_buffer_get_mutable(buffer, &base, &size);
381
+
382
+ size_t offset = 0;
383
+ size_t length = NUM2SIZET(_length);
384
+
385
+ while (length > 0) {
386
+ size_t maximum_size = size - offset;
387
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
388
+
389
+ if (result == 0) {
390
+ break;
391
+ } else if (result > 0) {
392
+ offset += result;
393
+ if ((size_t)result >= length) break;
394
+ length -= result;
395
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
396
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_READABLE));
397
+ } else {
398
+ rb_syserr_fail(-result, strerror(-result));
399
+ }
400
+ }
401
+
402
+ return SIZET2NUM(offset);
403
+ }
404
+
405
+ static
406
+ int io_write(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
407
+ struct io_uring_sqe *sqe = io_get_sqe(data);
408
+
409
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
410
+
411
+ io_uring_prep_write(sqe, descriptor, buffer, length, 0);
412
+ io_uring_sqe_set_data(sqe, (void*)fiber);
413
+ io_uring_submit_pending(data);
414
+
415
+ int result = RB_NUM2INT(Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
416
+ if (DEBUG) fprintf(stderr, "io_write:Event_Selector_fiber_transfer -> %d\n", result);
417
+
418
+ return result;
419
+ }
420
+
421
+ VALUE Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
422
+ struct Event_Selector_URing *data = NULL;
423
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
424
+
425
+ int descriptor = Event_Selector_io_descriptor(io);
426
+
427
+ const void *base;
428
+ size_t size;
429
+ rb_io_buffer_get_immutable(buffer, &base, &size);
430
+
431
+ size_t offset = 0;
432
+ size_t length = NUM2SIZET(_length);
433
+
434
+ if (length > size) {
435
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
436
+ }
437
+
438
+ while (length > 0) {
439
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, length);
440
+
441
+ if (result >= 0) {
442
+ offset += result;
443
+ if ((size_t)result >= length) break;
444
+ length -= result;
445
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
446
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_WRITABLE));
447
+ } else {
448
+ rb_syserr_fail(-result, strerror(-result));
449
+ }
450
+ }
451
+
452
+ return SIZET2NUM(offset);
453
+ }
454
+
455
+ #endif
456
+
457
+ static const int ASYNC_CLOSE = 1;
458
+
459
+ VALUE Event_Selector_URing_io_close(VALUE self, VALUE io) {
460
+ struct Event_Selector_URing *data = NULL;
461
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
462
+
463
+ int descriptor = Event_Selector_io_descriptor(io);
464
+
465
+ if (ASYNC_CLOSE) {
466
+ struct io_uring_sqe *sqe = io_get_sqe(data);
467
+
468
+ io_uring_prep_close(sqe, descriptor);
469
+ io_uring_sqe_set_data(sqe, NULL);
470
+ io_uring_submit_now(data);
471
+ } else {
472
+ close(descriptor);
473
+ }
474
+
475
+ // We don't wait for the result of close since it has no use in pratice:
476
+ return Qtrue;
477
+ }
478
+
479
+ static
480
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
481
+ if (duration == Qnil) {
482
+ return NULL;
483
+ }
484
+
485
+ if (FIXNUM_P(duration)) {
486
+ storage->tv_sec = NUM2TIMET(duration);
487
+ storage->tv_nsec = 0;
488
+
489
+ return storage;
490
+ }
491
+
492
+ else if (RB_FLOAT_TYPE_P(duration)) {
493
+ double value = RFLOAT_VALUE(duration);
494
+ time_t seconds = value;
495
+
496
+ storage->tv_sec = seconds;
497
+ storage->tv_nsec = (value - seconds) * 1000000000L;
498
+
499
+ return storage;
500
+ }
501
+
502
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
503
+ }
504
+
505
+ static
506
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
507
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
508
+ }
509
+
510
+ struct select_arguments {
511
+ struct Event_Selector_URing *data;
512
+
513
+ int result;
514
+
515
+ struct __kernel_timespec storage;
516
+ struct __kernel_timespec *timeout;
517
+ };
518
+
519
+ static
520
+ void * select_internal(void *_arguments) {
521
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
522
+
523
+ io_uring_submit_flush(arguments->data);
524
+
525
+ struct io_uring_cqe *cqe = NULL;
526
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
527
+
528
+ return NULL;
529
+ }
530
+
531
+ static
532
+ int select_internal_without_gvl(struct select_arguments *arguments) {
533
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
534
+
535
+ if (arguments->result == -ETIME) {
536
+ arguments->result = 0;
537
+ } else if (arguments->result < 0) {
538
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
539
+ } else {
540
+ // At least 1 event is waiting:
541
+ arguments->result = 1;
542
+ }
543
+
544
+ return arguments->result;
545
+ }
546
+
547
+ static inline
548
+ unsigned select_process_completions(struct io_uring *ring) {
549
+ unsigned completed = 0;
550
+ unsigned head;
551
+ struct io_uring_cqe *cqe;
552
+
553
+ io_uring_for_each_cqe(ring, head, cqe) {
554
+ ++completed;
555
+
556
+ // If the operation was cancelled, or the operation has no user data (fiber):
557
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
558
+ io_uring_cq_advance(ring, 1);
559
+ continue;
560
+ }
561
+
562
+ VALUE fiber = (VALUE)cqe->user_data;
563
+ VALUE result = RB_INT2NUM(cqe->res);
564
+
565
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
566
+
567
+ io_uring_cq_advance(ring, 1);
568
+
569
+ Event_Selector_fiber_transfer(fiber, 1, &result);
570
+ }
571
+
572
+ // io_uring_cq_advance(ring, completed);
573
+
574
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
575
+
576
+ return completed;
577
+ }
578
+
579
+ VALUE Event_Selector_URing_select(VALUE self, VALUE duration) {
580
+ struct Event_Selector_URing *data = NULL;
581
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
582
+
583
+ int ready = Event_Selector_queue_flush(&data->backend);
584
+
585
+ int result = select_process_completions(&data->ring);
586
+
587
+ // If the ready list was empty and we didn't process any completions:
588
+ if (!ready && result == 0) {
589
+ // We might need to wait for events:
590
+ struct select_arguments arguments = {
591
+ .data = data,
592
+ .timeout = NULL,
593
+ };
594
+
595
+ arguments.timeout = make_timeout(duration, &arguments.storage);
596
+
597
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
598
+ // This is a blocking operation, we wait for events:
599
+ result = select_internal_without_gvl(&arguments);
600
+ } else {
601
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
602
+ io_uring_submit_flush(data);
603
+ }
604
+
605
+ // After waiting/flushing the SQ, check if there are any completions:
606
+ result = select_process_completions(&data->ring);
607
+ }
608
+
609
+ return RB_INT2NUM(result);
610
+ }
611
+
612
+ void Init_Event_Selector_URing(VALUE Event_Selector) {
613
+ Event_Selector_URing = rb_define_class_under(Event_Selector, "URing", rb_cObject);
614
+
615
+ rb_define_alloc_func(Event_Selector_URing, Event_Selector_URing_allocate);
616
+ rb_define_method(Event_Selector_URing, "initialize", Event_Selector_URing_initialize, 1);
617
+
618
+ rb_define_method(Event_Selector_URing, "transfer", Event_Selector_URing_transfer, -1);
619
+ rb_define_method(Event_Selector_URing, "yield", Event_Selector_URing_yield, 0);
620
+ rb_define_method(Event_Selector_URing, "push", Event_Selector_URing_push, 1);
621
+ rb_define_method(Event_Selector_URing, "raise", Event_Selector_URing_raise, -1);
622
+
623
+ rb_define_method(Event_Selector_URing, "ready?", Event_Selector_URing_ready_p, 0);
624
+
625
+ rb_define_method(Event_Selector_URing, "select", Event_Selector_URing_select, 1);
626
+ rb_define_method(Event_Selector_URing, "close", Event_Selector_URing_close, 0);
627
+
628
+ rb_define_method(Event_Selector_URing, "io_wait", Event_Selector_URing_io_wait, 3);
629
+
630
+ #ifdef HAVE_RUBY_IO_BUFFER_H
631
+ rb_define_method(Event_Selector_URing, "io_read", Event_Selector_URing_io_read, 4);
632
+ rb_define_method(Event_Selector_URing, "io_write", Event_Selector_URing_io_write, 4);
633
+ #endif
634
+
635
+ rb_define_method(Event_Selector_URing, "io_close", Event_Selector_URing_io_close, 1);
636
+
637
+ rb_define_method(Event_Selector_URing, "process_wait", Event_Selector_URing_process_wait, 3);
638
+ }