event 0.1.2 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,6 +19,7 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "kqueue.h"
22
+ #include "backend.h"
22
23
 
23
24
  #include <sys/event.h>
24
25
  #include <sys/ioctl.h>
@@ -27,9 +28,7 @@
27
28
  static VALUE Event_Backend_KQueue = Qnil;
28
29
  static ID id_fileno, id_transfer;
29
30
 
30
- static const int READABLE = 1, PRIORITY = 2, WRITABLE = 4;
31
-
32
- static const unsigned KQUEUE_MAX_EVENTS = 1024;
31
+ enum {KQUEUE_MAX_EVENTS = 64};
33
32
 
34
33
  struct Event_Backend_KQueue {
35
34
  VALUE loop;
@@ -42,13 +41,19 @@ void Event_Backend_KQueue_Type_mark(void *_data)
42
41
  rb_gc_mark(data->loop);
43
42
  }
44
43
 
44
+ static
45
+ void close_internal(struct Event_Backend_KQueue *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
45
52
  void Event_Backend_KQueue_Type_free(void *_data)
46
53
  {
47
54
  struct Event_Backend_KQueue *data = _data;
48
55
 
49
- if (data->descriptor >= 0) {
50
- close(data->descriptor);
51
- }
56
+ close_internal(data);
52
57
 
53
58
  free(data);
54
59
  }
@@ -98,41 +103,121 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
98
103
  return self;
99
104
  }
100
105
 
101
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
106
+ VALUE Event_Backend_KQueue_close(VALUE self) {
102
107
  struct Event_Backend_KQueue *data = NULL;
103
108
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
104
109
 
105
- struct kevent event;
106
- u_short flags = 0;
107
-
108
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
110
+ close_internal(data);
109
111
 
110
- int mask = NUM2INT(events);
112
+ return Qnil;
113
+ }
114
+
115
+ static
116
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
+ int count = 0;
118
+ struct kevent kevents[2] = {0};
111
119
 
112
- if (mask & READABLE) {
113
- flags |= EVFILT_READ;
120
+ if (events & READABLE) {
121
+ kevents[count].ident = ident;
122
+ kevents[count].filter = EVFILT_READ;
123
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
124
+ kevents[count].udata = (void*)fiber;
125
+
126
+ // #ifdef EV_OOBAND
127
+ // if (events & PRIORITY) {
128
+ // kevents[count].flags |= EV_OOBAND;
129
+ // }
130
+ // #endif
131
+
132
+ count++;
114
133
  }
115
134
 
116
- if (mask & PRIORITY) {
117
- flags |= EV_OOBAND;
135
+ if (events & WRITABLE) {
136
+ kevents[count].ident = ident;
137
+ kevents[count].filter = EVFILT_WRITE;
138
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
139
+ kevents[count].udata = (void*)fiber;
140
+ count++;
118
141
  }
119
142
 
120
- if (mask & WRITABLE) {
121
- flags |= EVFILT_WRITE;
143
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
+
145
+ if (result == -1) {
146
+ rb_sys_fail("kevent(register)");
122
147
  }
148
+
149
+ return events;
150
+ }
123
151
 
124
- EV_SET(&event, descriptor, flags, EV_ADD|EV_ENABLE|EV_ONESHOT, 0, 0, (void*)fiber);
152
+ static
153
+ void io_remove_filters(int descriptor, int ident, int events) {
154
+ int count = 0;
155
+ struct kevent kevents[2] = {0};
125
156
 
126
- // A better approach is to batch all changes:
127
- int result = kevent(data->descriptor, &event, 1, NULL, 0, NULL);
157
+ if (events & READABLE) {
158
+ kevents[count].ident = ident;
159
+ kevents[count].filter = EVFILT_READ;
160
+ kevents[count].flags = EV_DELETE;
161
+
162
+ count++;
163
+ }
128
164
 
129
- if (result == -1) {
130
- rb_sys_fail("kevent");
165
+ if (events & WRITABLE) {
166
+ kevents[count].ident = ident;
167
+ kevents[count].filter = EVFILT_WRITE;
168
+ kevents[count].flags = EV_DELETE;
169
+ count++;
131
170
  }
132
171
 
133
- rb_funcall(data->loop, id_transfer, 0);
172
+ // Ignore the result.
173
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
174
+ }
175
+
176
+ struct io_wait_arguments {
177
+ struct Event_Backend_KQueue *data;
178
+ int events;
179
+ int descriptor;
180
+ };
181
+
182
+ static
183
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
184
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
185
+
186
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
134
187
 
135
- return Qnil;
188
+ rb_exc_raise(exception);
189
+ };
190
+
191
+ static inline
192
+ int events_from_kqueue_filter(int filter) {
193
+ if (filter == EVFILT_READ) return READABLE;
194
+ if (filter == EVFILT_WRITE) return WRITABLE;
195
+
196
+ return 0;
197
+ }
198
+
199
+ static
200
+ VALUE io_wait_transfer(VALUE _arguments) {
201
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
+
203
+ VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
204
+
205
+ return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
+ };
207
+
208
+ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
+ struct Event_Backend_KQueue *data = NULL;
210
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
+
212
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
213
+
214
+ struct io_wait_arguments io_wait_arguments = {
215
+ .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
216
+ .data = data,
217
+ .descriptor = descriptor,
218
+ };
219
+
220
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
136
221
  }
137
222
 
138
223
  static
@@ -150,7 +235,7 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
150
235
 
151
236
  else if (RB_FLOAT_TYPE_P(duration)) {
152
237
  double value = RFLOAT_VALUE(duration);
153
- time_t seconds = duration;
238
+ time_t seconds = value;
154
239
 
155
240
  storage->tv_sec = seconds;
156
241
  storage->tv_nsec = (value - seconds) * 1000000000L;
@@ -161,25 +246,89 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
161
246
  rb_raise(rb_eRuntimeError, "unable to convert timeout");
162
247
  }
163
248
 
249
+ static
250
+ int timeout_nonblocking(struct timespec * timespec) {
251
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
252
+ }
253
+
254
+ struct select_arguments {
255
+ struct Event_Backend_KQueue *data;
256
+
257
+ int count;
258
+ struct kevent events[KQUEUE_MAX_EVENTS];
259
+
260
+ struct timespec storage;
261
+ struct timespec *timeout;
262
+ };
263
+
264
+ static
265
+ void * select_internal(void *_arguments) {
266
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
267
+
268
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
269
+
270
+ return NULL;
271
+ }
272
+
273
+ static
274
+ void select_internal_without_gvl(struct select_arguments *arguments) {
275
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
276
+
277
+ if (arguments->count == -1) {
278
+ rb_sys_fail("select_internal_without_gvl:kevent");
279
+ }
280
+ }
281
+
282
+ static
283
+ void select_internal_with_gvl(struct select_arguments *arguments) {
284
+ select_internal((void *)arguments);
285
+
286
+ if (arguments->count == -1) {
287
+ rb_sys_fail("select_internal_with_gvl:kevent");
288
+ }
289
+ }
290
+
164
291
  VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
165
292
  struct Event_Backend_KQueue *data = NULL;
166
293
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
167
294
 
168
- struct kevent events[KQUEUE_MAX_EVENTS];
169
- struct timespec storage;
295
+ struct select_arguments arguments = {
296
+ .data = data,
297
+ .count = KQUEUE_MAX_EVENTS,
298
+ .storage = {
299
+ .tv_sec = 0,
300
+ .tv_nsec = 0
301
+ }
302
+ };
170
303
 
171
- int count = kevent(data->descriptor, NULL, 0, events, KQUEUE_MAX_EVENTS, make_timeout(duration, &storage));
304
+ // We break this implementation into two parts.
305
+ // (1) count = kevent(..., timeout = 0)
306
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
307
+ // This allows us to avoid releasing and reacquiring the GVL.
308
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
309
+ arguments.timeout = &arguments.storage;
172
310
 
173
- if (count == -1) {
174
- rb_sys_fail("kevent");
311
+ // First do the syscall with no timeout to get any immediately available events:
312
+ select_internal_with_gvl(&arguments);
313
+
314
+ // If there were no pending events, if we have a timeout, wait for more events:
315
+ if (arguments.count == 0) {
316
+ arguments.timeout = make_timeout(duration, &arguments.storage);
317
+
318
+ if (!timeout_nonblocking(arguments.timeout)) {
319
+ arguments.count = KQUEUE_MAX_EVENTS;
320
+
321
+ select_internal_without_gvl(&arguments);
322
+ }
175
323
  }
176
324
 
177
- for (int i = 0; i < count; i += 1) {
178
- VALUE fiber = (VALUE)events[i].udata;
179
- rb_funcall(fiber, id_transfer, 0);
325
+ for (int i = 0; i < arguments.count; i += 1) {
326
+ VALUE fiber = (VALUE)arguments.events[i].udata;
327
+ VALUE result = INT2NUM(arguments.events[i].filter);
328
+ rb_funcall(fiber, id_transfer, 1, result);
180
329
  }
181
330
 
182
- return INT2NUM(count);
331
+ return INT2NUM(arguments.count);
183
332
  }
184
333
 
185
334
  void Init_Event_Backend_KQueue(VALUE Event_Backend) {
@@ -190,6 +339,7 @@ void Init_Event_Backend_KQueue(VALUE Event_Backend) {
190
339
 
191
340
  rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
192
341
  rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
342
+ rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_close, 0);
193
343
 
194
344
  rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
195
345
  rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
@@ -19,21 +19,21 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "uring.h"
22
+ #include "backend.h"
22
23
 
23
24
  #include <liburing.h>
25
+ #include <poll.h>
24
26
  #include <time.h>
25
27
 
26
28
  static VALUE Event_Backend_URing = Qnil;
27
29
  static ID id_fileno, id_transfer;
28
30
 
29
- static const int READABLE = 1, PRIORITY = 2, WRITABLE = 4;
30
-
31
- static const int URING_ENTRIES = 1024;
32
- static const int URING_MAX_EVENTS = 1024;
31
+ enum {URING_ENTRIES = 128};
32
+ enum {URING_MAX_EVENTS = 128};
33
33
 
34
34
  struct Event_Backend_URing {
35
35
  VALUE loop;
36
- struct io_uring* ring;
36
+ struct io_uring ring;
37
37
  };
38
38
 
39
39
  void Event_Backend_URing_Type_mark(void *_data)
@@ -42,14 +42,19 @@ void Event_Backend_URing_Type_mark(void *_data)
42
42
  rb_gc_mark(data->loop);
43
43
  }
44
44
 
45
+ static
46
+ void close_internal(struct Event_Backend_URing *data) {
47
+ if (data->ring.ring_fd >= 0) {
48
+ io_uring_queue_exit(&data->ring);
49
+ data->ring.ring_fd = -1;
50
+ }
51
+ }
52
+
45
53
  void Event_Backend_URing_Type_free(void *_data)
46
54
  {
47
55
  struct Event_Backend_URing *data = _data;
48
56
 
49
- if (data->ring) {
50
- io_uring_queue_exit(data->ring);
51
- xfree(data->ring);
52
- }
57
+ close_internal(data);
53
58
 
54
59
  free(data);
55
60
  }
@@ -75,7 +80,7 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
75
80
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
76
81
 
77
82
  data->loop = Qnil;
78
- data->ring = NULL;
83
+ data->ring.ring_fd = -1;
79
84
 
80
85
  return instance;
81
86
  }
@@ -85,85 +90,130 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
85
90
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
86
91
 
87
92
  data->loop = loop;
88
- data->ring = xmalloc(sizeof(struct io_uring));
89
93
 
90
- int result = io_uring_queue_init(URING_ENTRIES, data->ring, 0);
94
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
91
95
 
92
- if (result == -1) {
93
- rb_sys_fail("io_uring_queue_init");
96
+ if (result < 0) {
97
+ rb_syserr_fail(-result, "io_uring_queue_init");
94
98
  }
95
99
 
100
+ rb_update_max_fd(data->ring.ring_fd);
101
+
96
102
  return self;
97
103
  }
98
104
 
99
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
105
+ VALUE Event_Backend_URing_close(VALUE self) {
100
106
  struct Event_Backend_URing *data = NULL;
101
107
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
102
108
 
103
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
104
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
109
+ close_internal(data);
105
110
 
106
- int mask = NUM2INT(events);
111
+ return Qnil;
112
+ }
113
+
114
+ static inline
115
+ short poll_flags_from_events(int events) {
107
116
  short flags = 0;
108
117
 
109
- if (mask & READABLE) {
110
- flags |= POLL_IN;
111
- }
118
+ if (events & READABLE) flags |= POLLIN;
119
+ if (events & PRIORITY) flags |= POLLPRI;
120
+ if (events & WRITABLE) flags |= POLLOUT;
112
121
 
113
- if (mask & PRIORITY) {
114
- flags |= POLL_PRI;
115
- }
122
+ flags |= POLLERR;
123
+ flags |= POLLHUP;
116
124
 
117
- if (mask & WRITABLE) {
118
- flags |= POLL_OUT;
119
- }
120
-
121
- // fprintf(stderr, "poll_add(%p, %d, %d)\n", sqe, descriptor, flags);
125
+ return flags;
126
+ }
122
127
 
123
- io_uring_prep_poll_add(sqe, descriptor, flags);
124
- io_uring_sqe_set_data(sqe, (void*)fiber);
125
- io_uring_submit(data->ring);
128
+ static inline
129
+ int events_from_poll_flags(short flags) {
130
+ int events = 0;
131
+
132
+ if (flags & POLLIN) events |= READABLE;
133
+ if (flags & POLLPRI) events |= PRIORITY;
134
+ if (flags & POLLOUT) events |= WRITABLE;
126
135
 
127
- // fprintf(stderr, "count = %d, errno = %d\n", count, errno);
136
+ return events;
137
+ }
138
+
139
+ struct io_wait_arguments {
140
+ struct Event_Backend_URing *data;
141
+ VALUE fiber;
142
+ short flags;
143
+ };
128
144
 
129
- rb_funcall(data->loop, id_transfer, 0);
145
+ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
146
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
130
147
 
131
- return Qnil;
148
+ while (sqe == NULL) {
149
+ sqe = io_uring_get_sqe(&data->ring);
150
+ }
151
+
152
+ return sqe;
132
153
  }
133
154
 
134
155
  static
135
- struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
136
- if (duration == Qnil) {
137
- return NULL;
138
- }
156
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
157
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
158
+ struct Event_Backend_URing *data = arguments->data;
139
159
 
140
- if (FIXNUM_P(duration)) {
141
- storage->tv_sec = NUM2TIMET(duration);
142
- storage->tv_nsec = 0;
143
-
144
- return storage;
145
- }
160
+ struct io_uring_sqe *sqe = io_get_sqe(data);
146
161
 
147
- else if (RB_FLOAT_TYPE_P(duration)) {
148
- double value = RFLOAT_VALUE(duration);
149
- time_t seconds = duration;
150
-
151
- storage->tv_sec = seconds;
152
- storage->tv_nsec = (value - seconds) * 1000000000L;
153
-
154
- return storage;
155
- }
162
+ // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
156
163
 
157
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
164
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
165
+ io_uring_submit(&data->ring);
166
+
167
+ rb_exc_raise(exception);
168
+ };
169
+
170
+ static
171
+ VALUE io_wait_transfer(VALUE _arguments) {
172
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
173
+ struct Event_Backend_URing *data = arguments->data;
174
+
175
+ VALUE result = rb_funcall(data->loop, id_transfer, 0);
176
+
177
+ // We explicitly filter the resulting events based on the requested events.
178
+ // In some cases, poll will report events we didn't ask for.
179
+ short flags = arguments->flags & NUM2INT(result);
180
+
181
+ return INT2NUM(events_from_poll_flags(flags));
182
+ };
183
+
184
+ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
185
+ struct Event_Backend_URing *data = NULL;
186
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
187
+
188
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
189
+ struct io_uring_sqe *sqe = io_get_sqe(data);
190
+
191
+ if (!sqe) return INT2NUM(0);
192
+
193
+ short flags = poll_flags_from_events(NUM2INT(events));
194
+
195
+ // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
196
+
197
+ io_uring_prep_poll_add(sqe, descriptor, flags);
198
+ io_uring_sqe_set_data(sqe, (void*)fiber);
199
+ io_uring_submit(&data->ring);
200
+
201
+ struct io_wait_arguments io_wait_arguments = {
202
+ .data = data,
203
+ .fiber = fiber,
204
+ .flags = flags
205
+ };
206
+
207
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
158
208
  }
159
209
 
160
210
  inline static
161
211
  void resize_to_capacity(VALUE string, size_t offset, size_t length) {
162
212
  size_t current_length = RSTRING_LEN(string);
163
213
  long difference = (long)(offset + length) - (long)current_length;
164
-
214
+
165
215
  difference += 1;
166
-
216
+
167
217
  if (difference > 0) {
168
218
  rb_str_modify_expand(string, difference);
169
219
  } else {
@@ -174,7 +224,7 @@ void resize_to_capacity(VALUE string, size_t offset, size_t length) {
174
224
  inline static
175
225
  void resize_to_fit(VALUE string, size_t offset, size_t length) {
176
226
  size_t current_length = RSTRING_LEN(string);
177
-
227
+
178
228
  if (current_length < (offset + length)) {
179
229
  rb_str_set_len(string, offset + length);
180
230
  }
@@ -183,92 +233,174 @@ void resize_to_fit(VALUE string, size_t offset, size_t length) {
183
233
  VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
184
234
  struct Event_Backend_URing *data = NULL;
185
235
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
186
-
236
+
187
237
  resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
188
-
238
+
189
239
  int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
190
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
191
-
240
+ struct io_uring_sqe *sqe = io_get_sqe(data);
241
+
192
242
  struct iovec iovecs[1];
193
243
  iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
194
244
  iovecs[0].iov_len = NUM2SIZET(length);
195
-
245
+
196
246
  io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
197
247
  io_uring_sqe_set_data(sqe, (void*)fiber);
198
- io_uring_submit(data->ring);
248
+ io_uring_submit(&data->ring);
249
+
250
+ // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
199
251
 
200
252
  int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
201
-
253
+
202
254
  if (result < 0) {
203
255
  rb_syserr_fail(-result, strerror(-result));
204
256
  }
205
-
257
+
206
258
  resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
207
-
259
+
208
260
  return INT2NUM(result);
209
261
  }
210
262
 
211
263
  VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
212
264
  struct Event_Backend_URing *data = NULL;
213
265
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
214
-
266
+
215
267
  if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
216
268
  rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
217
269
  }
218
-
270
+
219
271
  int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
220
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
221
-
272
+ struct io_uring_sqe *sqe = io_get_sqe(data);
273
+
222
274
  struct iovec iovecs[1];
223
275
  iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
224
276
  iovecs[0].iov_len = NUM2SIZET(length);
225
-
277
+
226
278
  io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
227
279
  io_uring_sqe_set_data(sqe, (void*)fiber);
228
- io_uring_submit(data->ring);
280
+ io_uring_submit(&data->ring);
281
+
282
+ // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
229
283
 
230
284
  int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
231
-
285
+
232
286
  if (result < 0) {
233
287
  rb_syserr_fail(-result, strerror(-result));
234
288
  }
235
-
289
+
236
290
  return INT2NUM(result);
237
291
  }
238
292
 
293
+ static
294
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
295
+ if (duration == Qnil) {
296
+ return NULL;
297
+ }
298
+
299
+ if (FIXNUM_P(duration)) {
300
+ storage->tv_sec = NUM2TIMET(duration);
301
+ storage->tv_nsec = 0;
302
+
303
+ return storage;
304
+ }
305
+
306
+ else if (RB_FLOAT_TYPE_P(duration)) {
307
+ double value = RFLOAT_VALUE(duration);
308
+ time_t seconds = value;
309
+
310
+ storage->tv_sec = seconds;
311
+ storage->tv_nsec = (value - seconds) * 1000000000L;
312
+
313
+ return storage;
314
+ }
315
+
316
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
317
+ }
318
+
319
+ static
320
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
321
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
322
+ }
323
+
324
+ struct select_arguments {
325
+ struct Event_Backend_URing *data;
326
+
327
+ int count;
328
+ struct io_uring_cqe **cqes;
329
+
330
+ struct __kernel_timespec storage;
331
+ struct __kernel_timespec *timeout;
332
+ };
333
+
334
+ static
335
+ void * select_internal(void *_arguments) {
336
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
337
+
338
+ arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
339
+
340
+ // If waiting resulted in a timeout, there are 0 events.
341
+ if (arguments->count == -ETIME) {
342
+ arguments->count = 0;
343
+ }
344
+
345
+ return NULL;
346
+ }
347
+
348
+ static
349
+ int select_internal_without_gvl(struct select_arguments *arguments) {
350
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
351
+
352
+ if (arguments->count < 0) {
353
+ rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
354
+ }
355
+
356
+ return arguments->count;
357
+ }
358
+
239
359
  VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
240
360
  struct Event_Backend_URing *data = NULL;
241
361
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
242
362
 
243
363
  struct io_uring_cqe *cqes[URING_MAX_EVENTS];
244
- struct __kernel_timespec storage;
245
-
246
- if (duration != Qnil) {
247
- int result = io_uring_wait_cqe_timeout(data->ring, cqes, make_timeout(duration, &storage));
364
+
365
+ // This is a non-blocking operation:
366
+ int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
367
+
368
+ if (result < 0) {
369
+ rb_syserr_fail(-result, strerror(-result));
370
+ } else if (result == 0) {
371
+ // We might need to wait for events:
372
+ struct select_arguments arguments = {
373
+ .data = data,
374
+ .cqes = cqes,
375
+ .timeout = NULL,
376
+ };
377
+
378
+ arguments.timeout = make_timeout(duration, &arguments.storage);
248
379
 
249
- if (result == -ETIME) {
250
- // Timeout.
251
- } else if (result < 0) {
252
- rb_syserr_fail(-result, strerror(-result));
380
+ if (!timeout_nonblocking(arguments.timeout)) {
381
+ result = select_internal_without_gvl(&arguments);
253
382
  }
254
383
  }
255
384
 
256
- int count = io_uring_peek_batch_cqe(data->ring, cqes, URING_MAX_EVENTS);
385
+ // fprintf(stderr, "cqes count=%d\n", result);
257
386
 
258
- if (count == -1) {
259
- rb_sys_fail("io_uring_peek_batch_cqe");
260
- }
261
-
262
- for (int i = 0; i < count; i += 1) {
387
+ for (int i = 0; i < result; i += 1) {
388
+ // If the operation was cancelled, or the operation has no user data (fiber):
389
+ if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
390
+ continue;
391
+ }
392
+
263
393
  VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
264
394
  VALUE result = INT2NUM(cqes[i]->res);
265
-
266
- io_uring_cqe_seen(data->ring, cqes[i]);
395
+
396
+ // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
397
+
398
+ io_uring_cqe_seen(&data->ring, cqes[i]);
267
399
 
268
400
  rb_funcall(fiber, id_transfer, 1, result);
269
401
  }
270
402
 
271
- return INT2NUM(count);
403
+ return INT2NUM(result);
272
404
  }
273
405
 
274
406
  void Init_Event_Backend_URing(VALUE Event_Backend) {
@@ -279,10 +411,11 @@ void Init_Event_Backend_URing(VALUE Event_Backend) {
279
411
 
280
412
  rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
281
413
  rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
414
+ rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
282
415
 
283
416
  rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
284
417
  rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
285
-
418
+
286
419
  rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
287
420
  rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
288
421
  }