event 0.4.4 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -22,6 +22,6 @@
22
22
 
23
23
  #include <ruby.h>
24
24
 
25
- #define EVENT_BACKEND_KQUEUE
25
+ #define EVENT_SELECTOR_KQUEUE
26
26
 
27
- void Init_Event_Backend_KQueue(VALUE Event_Backend);
27
+ void Init_Event_Selector_KQueue(VALUE Event_Selector);
@@ -18,19 +18,19 @@
18
18
  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
19
  // THE SOFTWARE.
20
20
 
21
- #include <ruby.h>
22
- #include <ruby/thread.h>
21
+ #include <sys/types.h>
22
+ #include <sys/syscall.h>
23
+ #include <unistd.h>
24
+ #include <poll.h>
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
23
27
 
24
- enum Event {
25
- READABLE = 1,
26
- PRIORITY = 2,
27
- WRITABLE = 4,
28
- ERROR = 8,
29
- HANGUP = 16
30
- };
28
+ #ifndef __NR_pidfd_open
29
+ #define __NR_pidfd_open 434 /* System call # on most architectures */
30
+ #endif
31
31
 
32
- void
33
- Init_Event_Backend();
34
-
35
- VALUE
36
- Event_Backend_resume_safe(VALUE fiber, VALUE argument);
32
+ static int
33
+ pidfd_open(pid_t pid, unsigned int flags)
34
+ {
35
+ return syscall(__NR_pidfd_open, pid, flags);
36
+ }
@@ -0,0 +1,263 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "selector.h"
22
+ #include <fcntl.h>
23
+
24
+ static ID id_transfer, id_alive_p;
25
+
26
+ #ifndef HAVE__RB_FIBER_TRANSFER
27
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
28
+ return rb_funcallv(fiber, id_transfer, argc, argv);
29
+ }
30
+ #endif
31
+
32
+ #ifndef HAVE__RB_FIBER_RAISE
33
+ static ID id_raise;
34
+
35
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv) {
36
+ return rb_funcallv(fiber, id_raise, argc, argv);
37
+ }
38
+ #endif
39
+
40
+ #ifndef HAVE_RB_IO_DESCRIPTOR
41
+ static ID id_fileno;
42
+
43
+ int Event_Selector_io_descriptor(VALUE io) {
44
+ return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
45
+ }
46
+ #endif
47
+
48
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
49
+ static ID id_wait;
50
+ static VALUE rb_Process_Status = Qnil;
51
+
52
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid)
53
+ {
54
+ return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
55
+ }
56
+ #endif
57
+
58
+ int Event_Selector_nonblock_set(int file_descriptor)
59
+ {
60
+ int flags = fcntl(file_descriptor, F_GETFL, 0);
61
+
62
+ if (!(flags & O_NONBLOCK)) {
63
+ fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
64
+ }
65
+
66
+ return flags;
67
+ }
68
+
69
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags)
70
+ {
71
+ if (!(flags & O_NONBLOCK)) {
72
+ fcntl(file_descriptor, F_SETFL, flags & ~flags);
73
+ }
74
+ }
75
+
76
+ void Init_Event_Selector(VALUE Event_Selector) {
77
+ id_transfer = rb_intern("transfer");
78
+ id_alive_p = rb_intern("alive?");
79
+
80
+ #ifndef HAVE__RB_FIBER_RAISE
81
+ id_raise = rb_intern("raise");
82
+ #endif
83
+
84
+ #ifndef HAVE_RB_IO_DESCRIPTOR
85
+ id_fileno = rb_intern("fileno");
86
+ #endif
87
+
88
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
89
+ id_wait = rb_intern("wait");
90
+ rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
91
+ #endif
92
+ }
93
+
94
+ struct wait_and_transfer_arguments {
95
+ int argc;
96
+ VALUE *argv;
97
+
98
+ struct Event_Selector *backend;
99
+ struct Event_Selector_Queue *waiting;
100
+ };
101
+
102
+ static void queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
103
+ if (waiting->behind) {
104
+ waiting->behind->infront = waiting->infront;
105
+ } else {
106
+ backend->waiting = waiting->infront;
107
+ }
108
+
109
+ if (waiting->infront) {
110
+ waiting->infront->behind = waiting->behind;
111
+ } else {
112
+ backend->ready = waiting->behind;
113
+ }
114
+ }
115
+
116
+ static void queue_push(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
117
+ if (backend->waiting) {
118
+ backend->waiting->behind = waiting;
119
+ waiting->infront = backend->waiting;
120
+ } else {
121
+ backend->ready = waiting;
122
+ }
123
+
124
+ backend->waiting = waiting;
125
+ }
126
+
127
+ static VALUE wait_and_transfer(VALUE _arguments) {
128
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
129
+
130
+ VALUE fiber = arguments->argv[0];
131
+ int argc = arguments->argc - 1;
132
+ VALUE *argv = arguments->argv + 1;
133
+
134
+ return Event_Selector_fiber_transfer(fiber, argc, argv);
135
+ }
136
+
137
+ static VALUE wait_and_transfer_ensure(VALUE _arguments) {
138
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
139
+
140
+ queue_pop(arguments->backend, arguments->waiting);
141
+
142
+ return Qnil;
143
+ }
144
+
145
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv)
146
+ {
147
+ rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
148
+
149
+ struct Event_Selector_Queue waiting = {
150
+ .behind = NULL,
151
+ .infront = NULL,
152
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
153
+ .fiber = rb_fiber_current()
154
+ };
155
+
156
+ queue_push(backend, &waiting);
157
+
158
+ struct wait_and_transfer_arguments arguments = {
159
+ .argc = argc,
160
+ .argv = argv,
161
+ .backend = backend,
162
+ .waiting = &waiting,
163
+ };
164
+
165
+ return rb_ensure(wait_and_transfer, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
166
+ }
167
+
168
+ static VALUE wait_and_raise(VALUE _arguments) {
169
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
170
+
171
+ VALUE fiber = arguments->argv[0];
172
+ int argc = arguments->argc - 1;
173
+ VALUE *argv = arguments->argv + 1;
174
+
175
+ return Event_Selector_fiber_raise(fiber, argc, argv);
176
+ }
177
+
178
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv)
179
+ {
180
+ rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
181
+
182
+ struct Event_Selector_Queue waiting = {
183
+ .behind = NULL,
184
+ .infront = NULL,
185
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
186
+ .fiber = rb_fiber_current()
187
+ };
188
+
189
+ queue_push(backend, &waiting);
190
+
191
+ struct wait_and_transfer_arguments arguments = {
192
+ .argc = argc,
193
+ .argv = argv,
194
+ .backend = backend,
195
+ .waiting = &waiting,
196
+ };
197
+
198
+ return rb_ensure(wait_and_raise, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
199
+ }
200
+
201
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber)
202
+ {
203
+ struct Event_Selector_Queue *waiting = malloc(sizeof(struct Event_Selector_Queue));
204
+
205
+ waiting->behind = NULL;
206
+ waiting->infront = NULL;
207
+ waiting->flags = EVENT_SELECTOR_QUEUE_INTERNAL;
208
+ waiting->fiber = fiber;
209
+
210
+ queue_push(backend, waiting);
211
+ }
212
+
213
+ static inline
214
+ void Event_Selector_queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *ready)
215
+ {
216
+ if (ready->flags & EVENT_SELECTOR_QUEUE_FIBER) {
217
+ Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
218
+ } else {
219
+ VALUE fiber = ready->fiber;
220
+ queue_pop(backend, ready);
221
+ free(ready);
222
+
223
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
224
+ rb_funcall(fiber, id_transfer, 0);
225
+ }
226
+ }
227
+ }
228
+
229
+ int Event_Selector_queue_flush(struct Event_Selector *backend)
230
+ {
231
+ int count = 0;
232
+
233
+ // Get the current tail and head of the queue:
234
+ struct Event_Selector_Queue *waiting = backend->waiting;
235
+
236
+ // Process from head to tail in order:
237
+ // During this, more items may be appended to tail.
238
+ while (backend->ready) {
239
+ struct Event_Selector_Queue *ready = backend->ready;
240
+
241
+ count += 1;
242
+ Event_Selector_queue_pop(backend, ready);
243
+
244
+ if (ready == waiting) break;
245
+ }
246
+
247
+ return count;
248
+ }
249
+
250
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
251
+ {
252
+ if ((stop->tv_nsec - start->tv_nsec) < 0) {
253
+ duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
254
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
255
+ } else {
256
+ duration->tv_sec = stop->tv_sec - start->tv_sec;
257
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
258
+ }
259
+ }
260
+
261
+ void Event_Selector_current_time(struct timespec *time) {
262
+ clock_gettime(CLOCK_MONOTONIC, time);
263
+ }
@@ -0,0 +1,127 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <ruby.h>
22
+ #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
30
+
31
+ enum Event {
32
+ EVENT_READABLE = 1,
33
+ EVENT_PRIORITY = 2,
34
+ EVENT_WRITABLE = 4,
35
+ EVENT_ERROR = 8,
36
+ EVENT_HANGUP = 16
37
+ };
38
+
39
+ void Init_Event_Selector();
40
+
41
+ #ifdef HAVE__RB_FIBER_TRANSFER
42
+ #define Event_Selector_fiber_transfer(fiber, argc, argv) rb_fiber_transfer(fiber, argc, argv)
43
+ #else
44
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv);
45
+ #endif
46
+
47
+ #ifdef HAVE__RB_FIBER_RAISE
48
+ #define Event_Selector_fiber_raise(fiber, argc, argv) rb_fiber_raise(fiber, argc, argv)
49
+ #else
50
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
51
+ #endif
52
+
53
+ #ifdef HAVE_RB_IO_DESCRIPTOR
54
+ #define Event_Selector_io_descriptor(io) rb_io_descriptor(io)
55
+ #else
56
+ int Event_Selector_io_descriptor(VALUE io);
57
+ #endif
58
+
59
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
60
+ #define Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
61
+ #else
62
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid);
63
+ #endif
64
+
65
+ int Event_Selector_nonblock_set(int file_descriptor);
66
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags);
67
+
68
+ enum Event_Selector_Queue_Flags {
69
+ EVENT_SELECTOR_QUEUE_FIBER = 1,
70
+ EVENT_SELECTOR_QUEUE_INTERNAL = 2,
71
+ };
72
+
73
+ struct Event_Selector_Queue {
74
+ struct Event_Selector_Queue *behind;
75
+ struct Event_Selector_Queue *infront;
76
+
77
+ enum Event_Selector_Queue_Flags flags;
78
+
79
+ VALUE fiber;
80
+ };
81
+
82
+ struct Event_Selector {
83
+ VALUE loop;
84
+
85
+ struct Event_Selector_Queue *free;
86
+
87
+ // Append to waiting.
88
+ struct Event_Selector_Queue *waiting;
89
+ // Process from ready.
90
+ struct Event_Selector_Queue *ready;
91
+ };
92
+
93
+ static inline
94
+ void Event_Selector_initialize(struct Event_Selector *backend, VALUE loop) {
95
+ backend->loop = loop;
96
+ backend->waiting = NULL;
97
+ backend->ready = NULL;
98
+ }
99
+
100
+ static inline
101
+ void Event_Selector_mark(struct Event_Selector *backend) {
102
+ rb_gc_mark(backend->loop);
103
+
104
+ struct Event_Selector_Queue *ready = backend->ready;
105
+ while (ready) {
106
+ rb_gc_mark(ready->fiber);
107
+ ready = ready->behind;
108
+ }
109
+ }
110
+
111
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv);
112
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv);
113
+
114
+ static inline
115
+ VALUE Event_Selector_yield(struct Event_Selector *backend)
116
+ {
117
+ return Event_Selector_wait_and_transfer(backend, 1, &backend->loop);
118
+ }
119
+
120
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber);
121
+ int Event_Selector_queue_flush(struct Event_Selector *backend);
122
+
123
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
124
+ void Event_Selector_current_time(struct timespec *time);
125
+
126
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
127
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -0,0 +1,651 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "uring.h"
22
+ #include "selector.h"
23
+
24
+ #include <liburing.h>
25
+ #include <poll.h>
26
+ #include <time.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ static const int DEBUG = 0;
31
+
32
+ // This option controls whether to all `io_uring_submit()` after every operation:
33
+ static const int EARLY_SUBMIT = 1;
34
+
35
+ static VALUE Event_Selector_URing = Qnil;
36
+
37
+ enum {URING_ENTRIES = 64};
38
+
39
+ struct Event_Selector_URing {
40
+ struct Event_Selector backend;
41
+ struct io_uring ring;
42
+ size_t pending;
43
+ };
44
+
45
+ void Event_Selector_URing_Type_mark(void *_data)
46
+ {
47
+ struct Event_Selector_URing *data = _data;
48
+ Event_Selector_mark(&data->backend);
49
+ }
50
+
51
+ static
52
+ void close_internal(struct Event_Selector_URing *data) {
53
+ if (data->ring.ring_fd >= 0) {
54
+ io_uring_queue_exit(&data->ring);
55
+ data->ring.ring_fd = -1;
56
+ }
57
+ }
58
+
59
+ void Event_Selector_URing_Type_free(void *_data)
60
+ {
61
+ struct Event_Selector_URing *data = _data;
62
+
63
+ close_internal(data);
64
+
65
+ free(data);
66
+ }
67
+
68
+ size_t Event_Selector_URing_Type_size(const void *data)
69
+ {
70
+ return sizeof(struct Event_Selector_URing);
71
+ }
72
+
73
+ static const rb_data_type_t Event_Selector_URing_Type = {
74
+ .wrap_struct_name = "Event::Backend::URing",
75
+ .function = {
76
+ .dmark = Event_Selector_URing_Type_mark,
77
+ .dfree = Event_Selector_URing_Type_free,
78
+ .dsize = Event_Selector_URing_Type_size,
79
+ },
80
+ .data = NULL,
81
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
82
+ };
83
+
84
+ VALUE Event_Selector_URing_allocate(VALUE self) {
85
+ struct Event_Selector_URing *data = NULL;
86
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
87
+
88
+ Event_Selector_initialize(&data->backend, Qnil);
89
+ data->ring.ring_fd = -1;
90
+
91
+ data->pending = 0;
92
+
93
+ return instance;
94
+ }
95
+
96
+ VALUE Event_Selector_URing_initialize(VALUE self, VALUE loop) {
97
+ struct Event_Selector_URing *data = NULL;
98
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
99
+
100
+ Event_Selector_initialize(&data->backend, loop);
101
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
102
+
103
+ if (result < 0) {
104
+ rb_syserr_fail(-result, "io_uring_queue_init");
105
+ }
106
+
107
+ rb_update_max_fd(data->ring.ring_fd);
108
+
109
+ return self;
110
+ }
111
+
112
+ VALUE Event_Selector_URing_close(VALUE self) {
113
+ struct Event_Selector_URing *data = NULL;
114
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
115
+
116
+ close_internal(data);
117
+
118
+ return Qnil;
119
+ }
120
+
121
+ VALUE Event_Selector_URing_transfer(int argc, VALUE *argv, VALUE self)
122
+ {
123
+ struct Event_Selector_URing *data = NULL;
124
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
125
+
126
+ Event_Selector_wait_and_transfer(&data->backend, argc, argv);
127
+
128
+ return Qnil;
129
+ }
130
+
131
+ VALUE Event_Selector_URing_yield(VALUE self)
132
+ {
133
+ struct Event_Selector_URing *data = NULL;
134
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
135
+
136
+ Event_Selector_yield(&data->backend);
137
+
138
+ return Qnil;
139
+ }
140
+
141
+ VALUE Event_Selector_URing_push(VALUE self, VALUE fiber)
142
+ {
143
+ struct Event_Selector_URing *data = NULL;
144
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
145
+
146
+ Event_Selector_queue_push(&data->backend, fiber);
147
+
148
+ return Qnil;
149
+ }
150
+
151
+ VALUE Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
152
+ {
153
+ struct Event_Selector_URing *data = NULL;
154
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
155
+
156
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
157
+ }
158
+
159
+ VALUE Event_Selector_URing_ready_p(VALUE self) {
160
+ struct Event_Selector_URing *data = NULL;
161
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
162
+
163
+ return data->backend.ready ? Qtrue : Qfalse;
164
+ }
165
+
166
+ static
167
+ int io_uring_submit_flush(struct Event_Selector_URing *data) {
168
+ if (data->pending) {
169
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
170
+
171
+ // Try to submit:
172
+ int result = io_uring_submit(&data->ring);
173
+
174
+ if (result >= 0) {
175
+ // If it was submitted, reset pending count:
176
+ data->pending = 0;
177
+ } else if (result != -EBUSY && result != -EAGAIN) {
178
+ rb_syserr_fail(-result, "io_uring_submit_flush");
179
+ }
180
+
181
+ return result;
182
+ }
183
+
184
+ return 0;
185
+ }
186
+
187
+ static
188
+ int io_uring_submit_now(struct Event_Selector_URing *data) {
189
+ while (true) {
190
+ int result = io_uring_submit(&data->ring);
191
+
192
+ if (result >= 0) {
193
+ data->pending = 0;
194
+ return result;
195
+ }
196
+
197
+ if (result == -EBUSY || result == -EAGAIN) {
198
+ Event_Selector_yield(&data->backend);
199
+ } else {
200
+ rb_syserr_fail(-result, "io_uring_submit_now");
201
+ }
202
+ }
203
+ }
204
+
205
+ static
206
+ void io_uring_submit_pending(struct Event_Selector_URing *data) {
207
+ if (EARLY_SUBMIT) {
208
+ io_uring_submit_now(data);
209
+ } else {
210
+ data->pending += 1;
211
+ }
212
+ }
213
+
214
+ struct io_uring_sqe * io_get_sqe(struct Event_Selector_URing *data) {
215
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
216
+
217
+ while (sqe == NULL) {
218
+ // The submit queue is full, we need to drain it:
219
+ io_uring_submit_now(data);
220
+
221
+ sqe = io_uring_get_sqe(&data->ring);
222
+ }
223
+
224
+ return sqe;
225
+ }
226
+
227
+ struct process_wait_arguments {
228
+ struct Event_Selector_URing *data;
229
+ pid_t pid;
230
+ int flags;
231
+ int descriptor;
232
+ };
233
+
234
+ static
235
+ VALUE process_wait_transfer(VALUE _arguments) {
236
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
237
+
238
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
239
+
240
+ return Event_Selector_process_status_wait(arguments->pid);
241
+ }
242
+
243
+ static
244
+ VALUE process_wait_ensure(VALUE _arguments) {
245
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
246
+
247
+ close(arguments->descriptor);
248
+
249
+ return Qnil;
250
+ }
251
+
252
+ VALUE Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
253
+ struct Event_Selector_URing *data = NULL;
254
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
255
+
256
+ struct process_wait_arguments process_wait_arguments = {
257
+ .data = data,
258
+ .pid = NUM2PIDT(pid),
259
+ .flags = NUM2INT(flags),
260
+ };
261
+
262
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
263
+ rb_update_max_fd(process_wait_arguments.descriptor);
264
+
265
+ struct io_uring_sqe *sqe = io_get_sqe(data);
266
+
267
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
268
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
269
+ io_uring_sqe_set_data(sqe, (void*)fiber);
270
+ io_uring_submit_pending(data);
271
+
272
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
273
+ }
274
+
275
+ static inline
276
+ short poll_flags_from_events(int events) {
277
+ short flags = 0;
278
+
279
+ if (events & EVENT_READABLE) flags |= POLLIN;
280
+ if (events & EVENT_PRIORITY) flags |= POLLPRI;
281
+ if (events & EVENT_WRITABLE) flags |= POLLOUT;
282
+
283
+ flags |= POLLERR;
284
+ flags |= POLLHUP;
285
+
286
+ return flags;
287
+ }
288
+
289
+ static inline
290
+ int events_from_poll_flags(short flags) {
291
+ int events = 0;
292
+
293
+ if (flags & POLLIN) events |= EVENT_READABLE;
294
+ if (flags & POLLPRI) events |= EVENT_PRIORITY;
295
+ if (flags & POLLOUT) events |= EVENT_WRITABLE;
296
+
297
+ return events;
298
+ }
299
+
300
+ struct io_wait_arguments {
301
+ struct Event_Selector_URing *data;
302
+ VALUE fiber;
303
+ short flags;
304
+ };
305
+
306
+ static
307
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
308
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
309
+ struct Event_Selector_URing *data = arguments->data;
310
+
311
+ struct io_uring_sqe *sqe = io_get_sqe(data);
312
+
313
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
314
+
315
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
316
+ io_uring_submit_now(data);
317
+
318
+ rb_exc_raise(exception);
319
+ };
320
+
321
+ static
322
+ VALUE io_wait_transfer(VALUE _arguments) {
323
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
324
+ struct Event_Selector_URing *data = arguments->data;
325
+
326
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
327
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
328
+
329
+ // We explicitly filter the resulting events based on the requested events.
330
+ // In some cases, poll will report events we didn't ask for.
331
+ short flags = arguments->flags & NUM2INT(result);
332
+
333
+ return INT2NUM(events_from_poll_flags(flags));
334
+ };
335
+
336
+ VALUE Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
337
+ struct Event_Selector_URing *data = NULL;
338
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
339
+
340
+ int descriptor = Event_Selector_io_descriptor(io);
341
+ struct io_uring_sqe *sqe = io_get_sqe(data);
342
+
343
+ short flags = poll_flags_from_events(NUM2INT(events));
344
+
345
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
346
+
347
+ io_uring_prep_poll_add(sqe, descriptor, flags);
348
+ io_uring_sqe_set_data(sqe, (void*)fiber);
349
+ io_uring_submit_pending(data);
350
+
351
+ struct io_wait_arguments io_wait_arguments = {
352
+ .data = data,
353
+ .fiber = fiber,
354
+ .flags = flags
355
+ };
356
+
357
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
358
+ }
359
+
360
+ #ifdef HAVE_RUBY_IO_BUFFER_H
361
+
362
+ static int io_read(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
363
+ struct io_uring_sqe *sqe = io_get_sqe(data);
364
+
365
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
366
+
367
+ io_uring_prep_read(sqe, descriptor, buffer, length, 0);
368
+ io_uring_sqe_set_data(sqe, (void*)fiber);
369
+ io_uring_submit_pending(data);
370
+
371
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
372
+ if (DEBUG) fprintf(stderr, "io_read:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
373
+
374
+ return RB_NUM2INT(result);
375
+ }
376
+
377
+ VALUE Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
378
+ struct Event_Selector_URing *data = NULL;
379
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
380
+
381
+ int descriptor = Event_Selector_io_descriptor(io);
382
+
383
+ void *base;
384
+ size_t size;
385
+ rb_io_buffer_get_mutable(buffer, &base, &size);
386
+
387
+ size_t offset = 0;
388
+ size_t length = NUM2SIZET(_length);
389
+
390
+ while (length > 0) {
391
+ size_t maximum_size = size - offset;
392
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
393
+
394
+ if (result == 0) {
395
+ break;
396
+ } else if (result > 0) {
397
+ offset += result;
398
+ if ((size_t)result >= length) break;
399
+ length -= result;
400
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
401
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_READABLE));
402
+ } else {
403
+ rb_syserr_fail(-result, strerror(-result));
404
+ }
405
+ }
406
+
407
+ return SIZET2NUM(offset);
408
+ }
409
+
410
+ static
411
+ int io_write(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
412
+ struct io_uring_sqe *sqe = io_get_sqe(data);
413
+
414
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
415
+
416
+ io_uring_prep_write(sqe, descriptor, buffer, length, 0);
417
+ io_uring_sqe_set_data(sqe, (void*)fiber);
418
+ io_uring_submit_pending(data);
419
+
420
+ int result = RB_NUM2INT(Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
421
+ if (DEBUG) fprintf(stderr, "io_write:Event_Selector_fiber_transfer -> %d\n", result);
422
+
423
+ return result;
424
+ }
425
+
426
+ VALUE Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
427
+ struct Event_Selector_URing *data = NULL;
428
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
429
+
430
+ int descriptor = Event_Selector_io_descriptor(io);
431
+
432
+ const void *base;
433
+ size_t size;
434
+ rb_io_buffer_get_immutable(buffer, &base, &size);
435
+
436
+ size_t offset = 0;
437
+ size_t length = NUM2SIZET(_length);
438
+
439
+ if (length > size) {
440
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
441
+ }
442
+
443
+ while (length > 0) {
444
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, length);
445
+
446
+ if (result >= 0) {
447
+ offset += result;
448
+ if ((size_t)result >= length) break;
449
+ length -= result;
450
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
451
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_WRITABLE));
452
+ } else {
453
+ rb_syserr_fail(-result, strerror(-result));
454
+ }
455
+ }
456
+
457
+ return SIZET2NUM(offset);
458
+ }
459
+
460
+ #endif
461
+
462
+ static const int ASYNC_CLOSE = 2;
463
+
464
+ VALUE Event_Selector_URing_io_close(VALUE self, VALUE io) {
465
+ struct Event_Selector_URing *data = NULL;
466
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
467
+
468
+ int descriptor = Event_Selector_io_descriptor(io);
469
+
470
+ if (ASYNC_CLOSE) {
471
+ struct io_uring_sqe *sqe = io_get_sqe(data);
472
+
473
+ io_uring_prep_close(sqe, descriptor);
474
+ io_uring_sqe_set_data(sqe, NULL);
475
+ if (ASYNC_CLOSE == 1)
476
+ io_uring_submit_now(data);
477
+ else if (ASYNC_CLOSE == 2)
478
+ io_uring_submit_pending(data);
479
+ } else {
480
+ close(descriptor);
481
+ }
482
+
483
+ // We don't wait for the result of close since it has no use in pratice:
484
+ return Qtrue;
485
+ }
486
+
487
+ static
488
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
489
+ if (duration == Qnil) {
490
+ return NULL;
491
+ }
492
+
493
+ if (FIXNUM_P(duration)) {
494
+ storage->tv_sec = NUM2TIMET(duration);
495
+ storage->tv_nsec = 0;
496
+
497
+ return storage;
498
+ }
499
+
500
+ else if (RB_FLOAT_TYPE_P(duration)) {
501
+ double value = RFLOAT_VALUE(duration);
502
+ time_t seconds = value;
503
+
504
+ storage->tv_sec = seconds;
505
+ storage->tv_nsec = (value - seconds) * 1000000000L;
506
+
507
+ return storage;
508
+ }
509
+
510
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
511
+ }
512
+
513
+ static
514
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
515
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
516
+ }
517
+
518
+ struct select_arguments {
519
+ struct Event_Selector_URing *data;
520
+
521
+ int result;
522
+
523
+ struct __kernel_timespec storage;
524
+ struct __kernel_timespec *timeout;
525
+ };
526
+
527
+ static
528
+ void * select_internal(void *_arguments) {
529
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
530
+
531
+ io_uring_submit_flush(arguments->data);
532
+
533
+ struct io_uring_cqe *cqe = NULL;
534
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
535
+
536
+ return NULL;
537
+ }
538
+
539
+ static
540
+ int select_internal_without_gvl(struct select_arguments *arguments) {
541
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
542
+
543
+ if (arguments->result == -ETIME) {
544
+ arguments->result = 0;
545
+ } else if (arguments->result < 0) {
546
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
547
+ } else {
548
+ // At least 1 event is waiting:
549
+ arguments->result = 1;
550
+ }
551
+
552
+ return arguments->result;
553
+ }
554
+
555
+ static inline
556
+ unsigned select_process_completions(struct io_uring *ring) {
557
+ unsigned completed = 0;
558
+ unsigned head;
559
+ struct io_uring_cqe *cqe;
560
+
561
+ io_uring_for_each_cqe(ring, head, cqe) {
562
+ ++completed;
563
+
564
+ // If the operation was cancelled, or the operation has no user data (fiber):
565
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
566
+ io_uring_cq_advance(ring, 1);
567
+ continue;
568
+ }
569
+
570
+ VALUE fiber = (VALUE)cqe->user_data;
571
+ VALUE result = RB_INT2NUM(cqe->res);
572
+
573
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
574
+
575
+ io_uring_cq_advance(ring, 1);
576
+
577
+ Event_Selector_fiber_transfer(fiber, 1, &result);
578
+ }
579
+
580
+ // io_uring_cq_advance(ring, completed);
581
+
582
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
583
+
584
+ return completed;
585
+ }
586
+
587
+ VALUE Event_Selector_URing_select(VALUE self, VALUE duration) {
588
+ struct Event_Selector_URing *data = NULL;
589
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
590
+
591
+ Event_Selector_queue_flush(&data->backend);
592
+
593
+ int result = 0;
594
+
595
+ // There can only be events waiting if we have been submitting them early:
596
+ if (EARLY_SUBMIT) {
597
+ result = select_process_completions(&data->ring);
598
+ }
599
+
600
+ // If we aren't submitting events early, we need to submit them and/or wait for them:
601
+ if (result == 0) {
602
+ // We might need to wait for events:
603
+ struct select_arguments arguments = {
604
+ .data = data,
605
+ .timeout = NULL,
606
+ };
607
+
608
+ arguments.timeout = make_timeout(duration, &arguments.storage);
609
+
610
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
611
+ // This is a blocking operation, we wait for events:
612
+ result = select_internal_without_gvl(&arguments);
613
+ } else {
614
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
615
+ io_uring_submit_flush(data);
616
+ }
617
+
618
+ // After waiting/flushing the SQ, check if there are any completions:
619
+ result = select_process_completions(&data->ring);
620
+ }
621
+
622
+ return RB_INT2NUM(result);
623
+ }
624
+
625
+ void Init_Event_Selector_URing(VALUE Event_Selector) {
626
+ Event_Selector_URing = rb_define_class_under(Event_Selector, "URing", rb_cObject);
627
+
628
+ rb_define_alloc_func(Event_Selector_URing, Event_Selector_URing_allocate);
629
+ rb_define_method(Event_Selector_URing, "initialize", Event_Selector_URing_initialize, 1);
630
+
631
+ rb_define_method(Event_Selector_URing, "transfer", Event_Selector_URing_transfer, -1);
632
+ rb_define_method(Event_Selector_URing, "yield", Event_Selector_URing_yield, 0);
633
+ rb_define_method(Event_Selector_URing, "push", Event_Selector_URing_push, 1);
634
+ rb_define_method(Event_Selector_URing, "raise", Event_Selector_URing_raise, -1);
635
+
636
+ rb_define_method(Event_Selector_URing, "ready?", Event_Selector_URing_ready_p, 0);
637
+
638
+ rb_define_method(Event_Selector_URing, "select", Event_Selector_URing_select, 1);
639
+ rb_define_method(Event_Selector_URing, "close", Event_Selector_URing_close, 0);
640
+
641
+ rb_define_method(Event_Selector_URing, "io_wait", Event_Selector_URing_io_wait, 3);
642
+
643
+ #ifdef HAVE_RUBY_IO_BUFFER_H
644
+ rb_define_method(Event_Selector_URing, "io_read", Event_Selector_URing_io_read, 4);
645
+ rb_define_method(Event_Selector_URing, "io_write", Event_Selector_URing_io_write, 4);
646
+ #endif
647
+
648
+ rb_define_method(Event_Selector_URing, "io_close", Event_Selector_URing_io_close, 1);
649
+
650
+ rb_define_method(Event_Selector_URing, "process_wait", Event_Selector_URing_process_wait, 3);
651
+ }