event 0.7.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,6 +22,6 @@
22
22
 
23
23
  #include <ruby.h>
24
24
 
25
- #define EVENT_BACKEND_KQUEUE
25
+ #define EVENT_SELECTOR_KQUEUE
26
26
 
27
- void Init_Event_Backend_KQueue(VALUE Event_Backend);
27
+ void Init_Event_Selector_KQueue(VALUE Event_Selector);
File without changes
@@ -0,0 +1,285 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "selector.h"
22
+ #include <fcntl.h>
23
+
24
+ static ID id_transfer, id_alive_p;
25
+
26
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
27
+ // TODO Consider introducing something like `rb_fiber_scheduler_transfer(...)`.
28
+ #ifdef HAVE__RB_FIBER_TRANSFER
29
+ if (RTEST(rb_fiber_alive_p(fiber))) {
30
+ return rb_fiber_transfer(fiber, argc, argv);
31
+ }
32
+ #else
33
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
34
+ return rb_funcallv(fiber, id_transfer, argc, argv);
35
+ }
36
+ #endif
37
+
38
+ return Qnil;
39
+ }
40
+
41
+ #ifndef HAVE__RB_FIBER_RAISE
42
+ static ID id_raise;
43
+
44
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv) {
45
+ return rb_funcallv(fiber, id_raise, argc, argv);
46
+ }
47
+ #endif
48
+
49
+ #ifndef HAVE_RB_FIBER_CURRENT
50
+ static ID id_current;
51
+
52
+ static VALUE rb_fiber_current() {
53
+ return rb_funcall(rb_cFiber, id_current, 0);
54
+ }
55
+ #endif
56
+
57
+
58
+ #ifndef HAVE_RB_IO_DESCRIPTOR
59
+ static ID id_fileno;
60
+
61
+ int Event_Selector_io_descriptor(VALUE io) {
62
+ return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
63
+ }
64
+ #endif
65
+
66
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
67
+ static ID id_wait;
68
+ static VALUE rb_Process_Status = Qnil;
69
+
70
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid)
71
+ {
72
+ return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
73
+ }
74
+ #endif
75
+
76
+ int Event_Selector_nonblock_set(int file_descriptor)
77
+ {
78
+ int flags = fcntl(file_descriptor, F_GETFL, 0);
79
+
80
+ if (!(flags & O_NONBLOCK)) {
81
+ fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
82
+ }
83
+
84
+ return flags;
85
+ }
86
+
87
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags)
88
+ {
89
+ if (!(flags & O_NONBLOCK)) {
90
+ fcntl(file_descriptor, F_SETFL, flags & ~flags);
91
+ }
92
+ }
93
+
94
+ void Init_Event_Selector(VALUE Event_Selector) {
95
+ id_transfer = rb_intern("transfer");
96
+ id_alive_p = rb_intern("alive?");
97
+
98
+ #ifndef HAVE__RB_FIBER_RAISE
99
+ id_raise = rb_intern("raise");
100
+ #endif
101
+
102
+ #ifndef HAVE_RB_FIBER_CURRENT
103
+ id_current = rb_intern("current");
104
+ #endif
105
+
106
+ #ifndef HAVE_RB_IO_DESCRIPTOR
107
+ id_fileno = rb_intern("fileno");
108
+ #endif
109
+
110
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
111
+ id_wait = rb_intern("wait");
112
+ rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
113
+ #endif
114
+ }
115
+
116
+ struct wait_and_transfer_arguments {
117
+ int argc;
118
+ VALUE *argv;
119
+
120
+ struct Event_Selector *backend;
121
+ struct Event_Selector_Queue *waiting;
122
+ };
123
+
124
+ static void queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
125
+ if (waiting->behind) {
126
+ waiting->behind->infront = waiting->infront;
127
+ } else {
128
+ backend->waiting = waiting->infront;
129
+ }
130
+
131
+ if (waiting->infront) {
132
+ waiting->infront->behind = waiting->behind;
133
+ } else {
134
+ backend->ready = waiting->behind;
135
+ }
136
+ }
137
+
138
+ static void queue_push(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
139
+ if (backend->waiting) {
140
+ backend->waiting->behind = waiting;
141
+ waiting->infront = backend->waiting;
142
+ } else {
143
+ backend->ready = waiting;
144
+ }
145
+
146
+ backend->waiting = waiting;
147
+ }
148
+
149
+ static VALUE wait_and_transfer(VALUE _arguments) {
150
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
151
+
152
+ VALUE fiber = arguments->argv[0];
153
+ int argc = arguments->argc - 1;
154
+ VALUE *argv = arguments->argv + 1;
155
+
156
+ return Event_Selector_fiber_transfer(fiber, argc, argv);
157
+ }
158
+
159
+ static VALUE wait_and_transfer_ensure(VALUE _arguments) {
160
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
161
+
162
+ queue_pop(arguments->backend, arguments->waiting);
163
+
164
+ return Qnil;
165
+ }
166
+
167
+ VALUE Event_Selector_resume(struct Event_Selector *backend, int argc, VALUE *argv)
168
+ {
169
+ rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
170
+
171
+ struct Event_Selector_Queue waiting = {
172
+ .behind = NULL,
173
+ .infront = NULL,
174
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
175
+ .fiber = rb_fiber_current()
176
+ };
177
+
178
+ queue_push(backend, &waiting);
179
+
180
+ struct wait_and_transfer_arguments arguments = {
181
+ .argc = argc,
182
+ .argv = argv,
183
+ .backend = backend,
184
+ .waiting = &waiting,
185
+ };
186
+
187
+ return rb_ensure(wait_and_transfer, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
188
+ }
189
+
190
+ static VALUE wait_and_raise(VALUE _arguments) {
191
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
192
+
193
+ VALUE fiber = arguments->argv[0];
194
+ int argc = arguments->argc - 1;
195
+ VALUE *argv = arguments->argv + 1;
196
+
197
+ return Event_Selector_fiber_raise(fiber, argc, argv);
198
+ }
199
+
200
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv)
201
+ {
202
+ rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
203
+
204
+ struct Event_Selector_Queue waiting = {
205
+ .behind = NULL,
206
+ .infront = NULL,
207
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
208
+ .fiber = rb_fiber_current()
209
+ };
210
+
211
+ queue_push(backend, &waiting);
212
+
213
+ struct wait_and_transfer_arguments arguments = {
214
+ .argc = argc,
215
+ .argv = argv,
216
+ .backend = backend,
217
+ .waiting = &waiting,
218
+ };
219
+
220
+ return rb_ensure(wait_and_raise, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
221
+ }
222
+
223
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber)
224
+ {
225
+ struct Event_Selector_Queue *waiting = malloc(sizeof(struct Event_Selector_Queue));
226
+
227
+ waiting->behind = NULL;
228
+ waiting->infront = NULL;
229
+ waiting->flags = EVENT_SELECTOR_QUEUE_INTERNAL;
230
+ waiting->fiber = fiber;
231
+
232
+ queue_push(backend, waiting);
233
+ }
234
+
235
+ static inline
236
+ void Event_Selector_queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *ready)
237
+ {
238
+ if (ready->flags & EVENT_SELECTOR_QUEUE_FIBER) {
239
+ Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
240
+ } else {
241
+ VALUE fiber = ready->fiber;
242
+ queue_pop(backend, ready);
243
+ free(ready);
244
+
245
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
246
+ rb_funcall(fiber, id_transfer, 0);
247
+ }
248
+ }
249
+ }
250
+
251
+ int Event_Selector_queue_flush(struct Event_Selector *backend)
252
+ {
253
+ int count = 0;
254
+
255
+ // Get the current tail and head of the queue:
256
+ struct Event_Selector_Queue *waiting = backend->waiting;
257
+
258
+ // Process from head to tail in order:
259
+ // During this, more items may be appended to tail.
260
+ while (backend->ready) {
261
+ struct Event_Selector_Queue *ready = backend->ready;
262
+
263
+ count += 1;
264
+ Event_Selector_queue_pop(backend, ready);
265
+
266
+ if (ready == waiting) break;
267
+ }
268
+
269
+ return count;
270
+ }
271
+
272
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
273
+ {
274
+ if ((stop->tv_nsec - start->tv_nsec) < 0) {
275
+ duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
276
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
277
+ } else {
278
+ duration->tv_sec = stop->tv_sec - start->tv_sec;
279
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
280
+ }
281
+ }
282
+
283
+ void Event_Selector_current_time(struct timespec *time) {
284
+ clock_gettime(CLOCK_MONOTONIC, time);
285
+ }
@@ -0,0 +1,123 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <ruby.h>
22
+ #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
30
+
31
+ enum Event {
32
+ EVENT_READABLE = 1,
33
+ EVENT_PRIORITY = 2,
34
+ EVENT_WRITABLE = 4,
35
+ EVENT_ERROR = 8,
36
+ EVENT_HANGUP = 16
37
+ };
38
+
39
+ void Init_Event_Selector();
40
+
41
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv);
42
+
43
+ #ifdef HAVE__RB_FIBER_RAISE
44
+ #define Event_Selector_fiber_raise(fiber, argc, argv) rb_fiber_raise(fiber, argc, argv)
45
+ #else
46
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
47
+ #endif
48
+
49
+ #ifdef HAVE_RB_IO_DESCRIPTOR
50
+ #define Event_Selector_io_descriptor(io) rb_io_descriptor(io)
51
+ #else
52
+ int Event_Selector_io_descriptor(VALUE io);
53
+ #endif
54
+
55
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
56
+ #define Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
57
+ #else
58
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid);
59
+ #endif
60
+
61
+ int Event_Selector_nonblock_set(int file_descriptor);
62
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags);
63
+
64
+ enum Event_Selector_Queue_Flags {
65
+ EVENT_SELECTOR_QUEUE_FIBER = 1,
66
+ EVENT_SELECTOR_QUEUE_INTERNAL = 2,
67
+ };
68
+
69
+ struct Event_Selector_Queue {
70
+ struct Event_Selector_Queue *behind;
71
+ struct Event_Selector_Queue *infront;
72
+
73
+ enum Event_Selector_Queue_Flags flags;
74
+
75
+ VALUE fiber;
76
+ };
77
+
78
+ struct Event_Selector {
79
+ VALUE loop;
80
+
81
+ struct Event_Selector_Queue *free;
82
+
83
+ // Append to waiting.
84
+ struct Event_Selector_Queue *waiting;
85
+ // Process from ready.
86
+ struct Event_Selector_Queue *ready;
87
+ };
88
+
89
+ static inline
90
+ void Event_Selector_initialize(struct Event_Selector *backend, VALUE loop) {
91
+ backend->loop = loop;
92
+ backend->waiting = NULL;
93
+ backend->ready = NULL;
94
+ }
95
+
96
+ static inline
97
+ void Event_Selector_mark(struct Event_Selector *backend) {
98
+ rb_gc_mark(backend->loop);
99
+
100
+ struct Event_Selector_Queue *ready = backend->ready;
101
+ while (ready) {
102
+ rb_gc_mark(ready->fiber);
103
+ ready = ready->behind;
104
+ }
105
+ }
106
+
107
+ VALUE Event_Selector_resume(struct Event_Selector *backend, int argc, VALUE *argv);
108
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv);
109
+
110
+ static inline
111
+ VALUE Event_Selector_yield(struct Event_Selector *backend)
112
+ {
113
+ return Event_Selector_resume(backend, 1, &backend->loop);
114
+ }
115
+
116
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber);
117
+ int Event_Selector_queue_flush(struct Event_Selector *backend);
118
+
119
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
120
+ void Event_Selector_current_time(struct timespec *time);
121
+
122
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
123
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -19,7 +19,7 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "uring.h"
22
- #include "backend.h"
22
+ #include "selector.h"
23
23
 
24
24
  #include <liburing.h>
25
25
  #include <poll.h>
@@ -29,63 +29,60 @@
29
29
 
30
30
  static const int DEBUG = 0;
31
31
 
32
- // This option controls whether to all `io_uring_submit()` after every operation:
33
- static const int EARLY_SUBMIT = 1;
34
-
35
- static VALUE Event_Backend_URing = Qnil;
32
+ static VALUE Event_Selector_URing = Qnil;
36
33
 
37
34
  enum {URING_ENTRIES = 64};
38
35
 
39
- struct Event_Backend_URing {
40
- struct Event_Backend backend;
36
+ struct Event_Selector_URing {
37
+ struct Event_Selector backend;
41
38
  struct io_uring ring;
42
39
  size_t pending;
43
40
  };
44
41
 
45
- void Event_Backend_URing_Type_mark(void *_data)
42
+ void Event_Selector_URing_Type_mark(void *_data)
46
43
  {
47
- struct Event_Backend_URing *data = _data;
48
- Event_Backend_mark(&data->backend);
44
+ struct Event_Selector_URing *data = _data;
45
+ Event_Selector_mark(&data->backend);
49
46
  }
50
47
 
51
48
  static
52
- void close_internal(struct Event_Backend_URing *data) {
49
+ void close_internal(struct Event_Selector_URing *data) {
53
50
  if (data->ring.ring_fd >= 0) {
54
51
  io_uring_queue_exit(&data->ring);
55
52
  data->ring.ring_fd = -1;
56
53
  }
57
54
  }
58
55
 
59
- void Event_Backend_URing_Type_free(void *_data)
56
+ void Event_Selector_URing_Type_free(void *_data)
60
57
  {
61
- struct Event_Backend_URing *data = _data;
58
+ struct Event_Selector_URing *data = _data;
62
59
 
63
60
  close_internal(data);
64
61
 
65
62
  free(data);
66
63
  }
67
64
 
68
- size_t Event_Backend_URing_Type_size(const void *data)
65
+ size_t Event_Selector_URing_Type_size(const void *data)
69
66
  {
70
- return sizeof(struct Event_Backend_URing);
67
+ return sizeof(struct Event_Selector_URing);
71
68
  }
72
69
 
73
- static const rb_data_type_t Event_Backend_URing_Type = {
70
+ static const rb_data_type_t Event_Selector_URing_Type = {
74
71
  .wrap_struct_name = "Event::Backend::URing",
75
72
  .function = {
76
- .dmark = Event_Backend_URing_Type_mark,
77
- .dfree = Event_Backend_URing_Type_free,
78
- .dsize = Event_Backend_URing_Type_size,
73
+ .dmark = Event_Selector_URing_Type_mark,
74
+ .dfree = Event_Selector_URing_Type_free,
75
+ .dsize = Event_Selector_URing_Type_size,
79
76
  },
80
77
  .data = NULL,
81
78
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
82
79
  };
83
80
 
84
- VALUE Event_Backend_URing_allocate(VALUE self) {
85
- struct Event_Backend_URing *data = NULL;
86
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
81
+ VALUE Event_Selector_URing_allocate(VALUE self) {
82
+ struct Event_Selector_URing *data = NULL;
83
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
87
84
 
88
- Event_Backend_initialize(&data->backend, Qnil);
85
+ Event_Selector_initialize(&data->backend, Qnil);
89
86
  data->ring.ring_fd = -1;
90
87
 
91
88
  data->pending = 0;
@@ -93,11 +90,11 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
93
90
  return instance;
94
91
  }
95
92
 
96
- VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
97
- struct Event_Backend_URing *data = NULL;
98
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
93
+ VALUE Event_Selector_URing_initialize(VALUE self, VALUE loop) {
94
+ struct Event_Selector_URing *data = NULL;
95
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
99
96
 
100
- Event_Backend_initialize(&data->backend, loop);
97
+ Event_Selector_initialize(&data->backend, loop);
101
98
  int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
102
99
 
103
100
  if (result < 0) {
@@ -109,44 +106,62 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
109
106
  return self;
110
107
  }
111
108
 
112
- VALUE Event_Backend_URing_close(VALUE self) {
113
- struct Event_Backend_URing *data = NULL;
114
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
109
+ VALUE Event_Selector_URing_close(VALUE self) {
110
+ struct Event_Selector_URing *data = NULL;
111
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
115
112
 
116
113
  close_internal(data);
117
114
 
118
115
  return Qnil;
119
116
  }
120
117
 
121
- VALUE Event_Backend_URing_transfer(VALUE self, VALUE fiber)
118
+ VALUE Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
122
119
  {
123
- struct Event_Backend_URing *data = NULL;
124
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
120
+ struct Event_Selector_URing *data = NULL;
121
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
125
122
 
126
- Event_Backend_wait_and_transfer(&data->backend, fiber);
123
+ Event_Selector_resume(&data->backend, argc, argv);
127
124
 
128
125
  return Qnil;
129
126
  }
130
127
 
131
- VALUE Event_Backend_URing_defer(VALUE self)
128
+ VALUE Event_Selector_URing_yield(VALUE self)
132
129
  {
133
- struct Event_Backend_URing *data = NULL;
134
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
130
+ struct Event_Selector_URing *data = NULL;
131
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
135
132
 
136
- Event_Backend_defer(&data->backend);
133
+ Event_Selector_yield(&data->backend);
137
134
 
138
135
  return Qnil;
139
136
  }
140
137
 
141
- VALUE Event_Backend_URing_ready_p(VALUE self) {
142
- struct Event_Backend_URing *data = NULL;
143
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
138
+ VALUE Event_Selector_URing_push(VALUE self, VALUE fiber)
139
+ {
140
+ struct Event_Selector_URing *data = NULL;
141
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
142
+
143
+ Event_Selector_queue_push(&data->backend, fiber);
144
+
145
+ return Qnil;
146
+ }
147
+
148
+ VALUE Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
149
+ {
150
+ struct Event_Selector_URing *data = NULL;
151
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
152
+
153
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
154
+ }
155
+
156
+ VALUE Event_Selector_URing_ready_p(VALUE self) {
157
+ struct Event_Selector_URing *data = NULL;
158
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
144
159
 
145
160
  return data->backend.ready ? Qtrue : Qfalse;
146
161
  }
147
162
 
148
163
  static
149
- int io_uring_submit_flush(struct Event_Backend_URing *data) {
164
+ int io_uring_submit_flush(struct Event_Selector_URing *data) {
150
165
  if (data->pending) {
151
166
  if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
152
167
 
@@ -167,7 +182,7 @@ int io_uring_submit_flush(struct Event_Backend_URing *data) {
167
182
  }
168
183
 
169
184
  static
170
- int io_uring_submit_now(struct Event_Backend_URing *data) {
185
+ int io_uring_submit_now(struct Event_Selector_URing *data) {
171
186
  while (true) {
172
187
  int result = io_uring_submit(&data->ring);
173
188
 
@@ -177,7 +192,7 @@ int io_uring_submit_now(struct Event_Backend_URing *data) {
177
192
  }
178
193
 
179
194
  if (result == -EBUSY || result == -EAGAIN) {
180
- Event_Backend_defer(&data->backend);
195
+ Event_Selector_yield(&data->backend);
181
196
  } else {
182
197
  rb_syserr_fail(-result, "io_uring_submit_now");
183
198
  }
@@ -185,15 +200,11 @@ int io_uring_submit_now(struct Event_Backend_URing *data) {
185
200
  }
186
201
 
187
202
  static
188
- void io_uring_submit_pending(struct Event_Backend_URing *data) {
189
- if (EARLY_SUBMIT) {
190
- io_uring_submit_now(data);
191
- } else {
192
- data->pending += 1;
193
- }
203
+ void io_uring_submit_pending(struct Event_Selector_URing *data) {
204
+ data->pending += 1;
194
205
  }
195
206
 
196
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
207
+ struct io_uring_sqe * io_get_sqe(struct Event_Selector_URing *data) {
197
208
  struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
198
209
 
199
210
  while (sqe == NULL) {
@@ -207,7 +218,7 @@ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
207
218
  }
208
219
 
209
220
  struct process_wait_arguments {
210
- struct Event_Backend_URing *data;
221
+ struct Event_Selector_URing *data;
211
222
  pid_t pid;
212
223
  int flags;
213
224
  int descriptor;
@@ -217,9 +228,9 @@ static
217
228
  VALUE process_wait_transfer(VALUE _arguments) {
218
229
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
219
230
 
220
- Event_Backend_fiber_transfer(arguments->data->backend.loop);
231
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
221
232
 
222
- return Event_Backend_process_status_wait(arguments->pid);
233
+ return Event_Selector_process_status_wait(arguments->pid);
223
234
  }
224
235
 
225
236
  static
@@ -231,9 +242,9 @@ VALUE process_wait_ensure(VALUE _arguments) {
231
242
  return Qnil;
232
243
  }
233
244
 
234
- VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
235
- struct Event_Backend_URing *data = NULL;
236
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
245
+ VALUE Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
246
+ struct Event_Selector_URing *data = NULL;
247
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
237
248
 
238
249
  struct process_wait_arguments process_wait_arguments = {
239
250
  .data = data,
@@ -246,7 +257,7 @@ VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE
246
257
 
247
258
  struct io_uring_sqe *sqe = io_get_sqe(data);
248
259
 
249
- if (DEBUG) fprintf(stderr, "Event_Backend_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
260
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
250
261
  io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
251
262
  io_uring_sqe_set_data(sqe, (void*)fiber);
252
263
  io_uring_submit_pending(data);
@@ -258,9 +269,9 @@ static inline
258
269
  short poll_flags_from_events(int events) {
259
270
  short flags = 0;
260
271
 
261
- if (events & READABLE) flags |= POLLIN;
262
- if (events & PRIORITY) flags |= POLLPRI;
263
- if (events & WRITABLE) flags |= POLLOUT;
272
+ if (events & EVENT_READABLE) flags |= POLLIN;
273
+ if (events & EVENT_PRIORITY) flags |= POLLPRI;
274
+ if (events & EVENT_WRITABLE) flags |= POLLOUT;
264
275
 
265
276
  flags |= POLLERR;
266
277
  flags |= POLLHUP;
@@ -272,15 +283,15 @@ static inline
272
283
  int events_from_poll_flags(short flags) {
273
284
  int events = 0;
274
285
 
275
- if (flags & POLLIN) events |= READABLE;
276
- if (flags & POLLPRI) events |= PRIORITY;
277
- if (flags & POLLOUT) events |= WRITABLE;
286
+ if (flags & POLLIN) events |= EVENT_READABLE;
287
+ if (flags & POLLPRI) events |= EVENT_PRIORITY;
288
+ if (flags & POLLOUT) events |= EVENT_WRITABLE;
278
289
 
279
290
  return events;
280
291
  }
281
292
 
282
293
  struct io_wait_arguments {
283
- struct Event_Backend_URing *data;
294
+ struct Event_Selector_URing *data;
284
295
  VALUE fiber;
285
296
  short flags;
286
297
  };
@@ -288,24 +299,25 @@ struct io_wait_arguments {
288
299
  static
289
300
  VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
290
301
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
291
- struct Event_Backend_URing *data = arguments->data;
302
+ struct Event_Selector_URing *data = arguments->data;
292
303
 
293
304
  struct io_uring_sqe *sqe = io_get_sqe(data);
294
305
 
295
306
  if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
296
307
 
297
308
  io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
298
-
309
+ io_uring_submit_now(data);
310
+
299
311
  rb_exc_raise(exception);
300
312
  };
301
313
 
302
314
  static
303
315
  VALUE io_wait_transfer(VALUE _arguments) {
304
316
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
305
- struct Event_Backend_URing *data = arguments->data;
317
+ struct Event_Selector_URing *data = arguments->data;
306
318
 
307
- VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
308
- if (DEBUG) fprintf(stderr, "io_wait:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
319
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
320
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
309
321
 
310
322
  // We explicitly filter the resulting events based on the requested events.
311
323
  // In some cases, poll will report events we didn't ask for.
@@ -314,19 +326,21 @@ VALUE io_wait_transfer(VALUE _arguments) {
314
326
  return INT2NUM(events_from_poll_flags(flags));
315
327
  };
316
328
 
317
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
318
- struct Event_Backend_URing *data = NULL;
319
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
329
+ VALUE Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
330
+ struct Event_Selector_URing *data = NULL;
331
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
320
332
 
321
- int descriptor = Event_Backend_io_descriptor(io);
333
+ int descriptor = Event_Selector_io_descriptor(io);
322
334
  struct io_uring_sqe *sqe = io_get_sqe(data);
323
335
 
324
336
  short flags = poll_flags_from_events(NUM2INT(events));
325
337
 
326
- if (DEBUG) fprintf(stderr, "Event_Backend_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
338
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
327
339
 
328
340
  io_uring_prep_poll_add(sqe, descriptor, flags);
329
341
  io_uring_sqe_set_data(sqe, (void*)fiber);
342
+
343
+ // If we are going to wait, we assume that we are waiting for a while:
330
344
  io_uring_submit_pending(data);
331
345
 
332
346
  struct io_wait_arguments io_wait_arguments = {
@@ -340,26 +354,26 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
340
354
 
341
355
  #ifdef HAVE_RUBY_IO_BUFFER_H
342
356
 
343
- static int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
357
+ static int io_read(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
344
358
  struct io_uring_sqe *sqe = io_get_sqe(data);
345
359
 
346
360
  if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
347
361
 
348
362
  io_uring_prep_read(sqe, descriptor, buffer, length, 0);
349
363
  io_uring_sqe_set_data(sqe, (void*)fiber);
350
- io_uring_submit_pending(data);
364
+ io_uring_submit_now(data);
351
365
 
352
- VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
353
- if (DEBUG) fprintf(stderr, "io_read:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
366
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
367
+ if (DEBUG) fprintf(stderr, "io_read:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
354
368
 
355
369
  return RB_NUM2INT(result);
356
370
  }
357
371
 
358
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
359
- struct Event_Backend_URing *data = NULL;
360
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
372
+ VALUE Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
373
+ struct Event_Selector_URing *data = NULL;
374
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
361
375
 
362
- int descriptor = Event_Backend_io_descriptor(io);
376
+ int descriptor = Event_Selector_io_descriptor(io);
363
377
 
364
378
  void *base;
365
379
  size_t size;
@@ -379,7 +393,7 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
379
393
  if ((size_t)result >= length) break;
380
394
  length -= result;
381
395
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
382
- Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(READABLE));
396
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_READABLE));
383
397
  } else {
384
398
  rb_syserr_fail(-result, strerror(-result));
385
399
  }
@@ -389,7 +403,7 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
389
403
  }
390
404
 
391
405
  static
392
- int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
406
+ int io_write(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
393
407
  struct io_uring_sqe *sqe = io_get_sqe(data);
394
408
 
395
409
  if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
@@ -398,17 +412,17 @@ int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char
398
412
  io_uring_sqe_set_data(sqe, (void*)fiber);
399
413
  io_uring_submit_pending(data);
400
414
 
401
- int result = RB_NUM2INT(Event_Backend_fiber_transfer(data->backend.loop));
402
- if (DEBUG) fprintf(stderr, "io_write:Event_Backend_fiber_transfer -> %d\n", result);
415
+ int result = RB_NUM2INT(Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
416
+ if (DEBUG) fprintf(stderr, "io_write:Event_Selector_fiber_transfer -> %d\n", result);
403
417
 
404
418
  return result;
405
419
  }
406
420
 
407
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
408
- struct Event_Backend_URing *data = NULL;
409
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
421
+ VALUE Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
422
+ struct Event_Selector_URing *data = NULL;
423
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
410
424
 
411
- int descriptor = Event_Backend_io_descriptor(io);
425
+ int descriptor = Event_Selector_io_descriptor(io);
412
426
 
413
427
  const void *base;
414
428
  size_t size;
@@ -429,7 +443,7 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
429
443
  if ((size_t)result >= length) break;
430
444
  length -= result;
431
445
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
432
- Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(WRITABLE));
446
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_WRITABLE));
433
447
  } else {
434
448
  rb_syserr_fail(-result, strerror(-result));
435
449
  }
@@ -440,23 +454,20 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
440
454
 
441
455
  #endif
442
456
 
443
- static const int ASYNC_CLOSE = 2;
457
+ static const int ASYNC_CLOSE = 1;
444
458
 
445
- VALUE Event_Backend_URing_io_close(VALUE self, VALUE io) {
446
- struct Event_Backend_URing *data = NULL;
447
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
459
+ VALUE Event_Selector_URing_io_close(VALUE self, VALUE io) {
460
+ struct Event_Selector_URing *data = NULL;
461
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
448
462
 
449
- int descriptor = Event_Backend_io_descriptor(io);
463
+ int descriptor = Event_Selector_io_descriptor(io);
450
464
 
451
465
  if (ASYNC_CLOSE) {
452
466
  struct io_uring_sqe *sqe = io_get_sqe(data);
453
467
 
454
468
  io_uring_prep_close(sqe, descriptor);
455
469
  io_uring_sqe_set_data(sqe, NULL);
456
- if (ASYNC_CLOSE == 1)
457
- io_uring_submit_now(data);
458
- else if (ASYNC_CLOSE == 2)
459
- io_uring_submit_pending(data);
470
+ io_uring_submit_now(data);
460
471
  } else {
461
472
  close(descriptor);
462
473
  }
@@ -497,7 +508,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
497
508
  }
498
509
 
499
510
  struct select_arguments {
500
- struct Event_Backend_URing *data;
511
+ struct Event_Selector_URing *data;
501
512
 
502
513
  int result;
503
514
 
@@ -552,34 +563,29 @@ unsigned select_process_completions(struct io_uring *ring) {
552
563
  VALUE result = RB_INT2NUM(cqe->res);
553
564
 
554
565
  if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
555
-
566
+
556
567
  io_uring_cq_advance(ring, 1);
557
-
558
- Event_Backend_fiber_transfer_result(fiber, result);
568
+
569
+ Event_Selector_fiber_transfer(fiber, 1, &result);
559
570
  }
560
571
 
561
572
  // io_uring_cq_advance(ring, completed);
562
-
573
+
563
574
  if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
564
-
575
+
565
576
  return completed;
566
577
  }
567
578
 
568
- VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
569
- struct Event_Backend_URing *data = NULL;
570
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
579
+ VALUE Event_Selector_URing_select(VALUE self, VALUE duration) {
580
+ struct Event_Selector_URing *data = NULL;
581
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
571
582
 
572
- Event_Backend_ready_pop(&data->backend);
583
+ int ready = Event_Selector_queue_flush(&data->backend);
573
584
 
574
- int result = 0;
575
-
576
- // There can only be events waiting if we have been submitting them early:
577
- if (EARLY_SUBMIT) {
578
- result = select_process_completions(&data->ring);
579
- }
585
+ int result = select_process_completions(&data->ring);
580
586
 
581
- // If we aren't submitting events early, we need to submit them and/or wait for them:
582
- if (result == 0) {
587
+ // If the ready list was empty and we didn't process any completions:
588
+ if (!ready && result == 0) {
583
589
  // We might need to wait for events:
584
590
  struct select_arguments arguments = {
585
591
  .data = data,
@@ -603,25 +609,30 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
603
609
  return RB_INT2NUM(result);
604
610
  }
605
611
 
606
- void Init_Event_Backend_URing(VALUE Event_Backend) {
607
- Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
612
+ void Init_Event_Selector_URing(VALUE Event_Selector) {
613
+ Event_Selector_URing = rb_define_class_under(Event_Selector, "URing", rb_cObject);
614
+
615
+ rb_define_alloc_func(Event_Selector_URing, Event_Selector_URing_allocate);
616
+ rb_define_method(Event_Selector_URing, "initialize", Event_Selector_URing_initialize, 1);
617
+
618
+ rb_define_method(Event_Selector_URing, "resume", Event_Selector_URing_resume, -1);
619
+ rb_define_method(Event_Selector_URing, "yield", Event_Selector_URing_yield, 0);
620
+ rb_define_method(Event_Selector_URing, "push", Event_Selector_URing_push, 1);
621
+ rb_define_method(Event_Selector_URing, "raise", Event_Selector_URing_raise, -1);
622
+
623
+ rb_define_method(Event_Selector_URing, "ready?", Event_Selector_URing_ready_p, 0);
608
624
 
609
- rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
610
- rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
611
- rb_define_method(Event_Backend_URing, "transfer", Event_Backend_URing_transfer, 1);
612
- rb_define_method(Event_Backend_URing, "defer", Event_Backend_URing_defer, 0);
613
- rb_define_method(Event_Backend_URing, "ready?", Event_Backend_URing_ready_p, 0);
614
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
615
- rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
625
+ rb_define_method(Event_Selector_URing, "select", Event_Selector_URing_select, 1);
626
+ rb_define_method(Event_Selector_URing, "close", Event_Selector_URing_close, 0);
616
627
 
617
- rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
628
+ rb_define_method(Event_Selector_URing, "io_wait", Event_Selector_URing_io_wait, 3);
618
629
 
619
630
  #ifdef HAVE_RUBY_IO_BUFFER_H
620
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 4);
621
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 4);
631
+ rb_define_method(Event_Selector_URing, "io_read", Event_Selector_URing_io_read, 4);
632
+ rb_define_method(Event_Selector_URing, "io_write", Event_Selector_URing_io_write, 4);
622
633
  #endif
623
634
 
624
- rb_define_method(Event_Backend_URing, "io_close", Event_Backend_URing_io_close, 1);
635
+ rb_define_method(Event_Selector_URing, "io_close", Event_Selector_URing_io_close, 1);
625
636
 
626
- rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
637
+ rb_define_method(Event_Selector_URing, "process_wait", Event_Selector_URing_process_wait, 3);
627
638
  }