event 0.2.1 → 0.4.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -28,14 +28,7 @@
28
28
  static VALUE Event_Backend_KQueue = Qnil;
29
29
  static ID id_fileno, id_transfer;
30
30
 
31
- static const int
32
- READABLE = 1,
33
- PRIORITY = 2,
34
- WRITABLE = 4,
35
- ERROR = 8,
36
- HANGUP = 16;
37
-
38
- static const unsigned KQUEUE_MAX_EVENTS = 1024;
31
+ enum {KQUEUE_MAX_EVENTS = 64};
39
32
 
40
33
  struct Event_Backend_KQueue {
41
34
  VALUE loop;
@@ -48,13 +41,19 @@ void Event_Backend_KQueue_Type_mark(void *_data)
48
41
  rb_gc_mark(data->loop);
49
42
  }
50
43
 
44
+ static
45
+ void close_internal(struct Event_Backend_KQueue *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
51
52
  void Event_Backend_KQueue_Type_free(void *_data)
52
53
  {
53
54
  struct Event_Backend_KQueue *data = _data;
54
55
 
55
- if (data->descriptor >= 0) {
56
- close(data->descriptor);
57
- }
56
+ close_internal(data);
58
57
 
59
58
  free(data);
60
59
  }
@@ -104,50 +103,121 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
104
103
  return self;
105
104
  }
106
105
 
107
- static inline
108
- u_short kqueue_filter_from_events(int events) {
109
- u_short filter = 0;
106
+ VALUE Event_Backend_KQueue_close(VALUE self) {
107
+ struct Event_Backend_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
110
109
 
111
- if (events & READABLE) filter |= EVFILT_READ;
112
- if (events & PRIORITY) filter |= EV_OOBAND;
113
- if (events & WRITABLE) filter |= EVFILT_WRITE;
110
+ close_internal(data);
114
111
 
115
- return filter;
112
+ return Qnil;
116
113
  }
117
114
 
118
- static inline
119
- int events_from_kqueue_filter(u_short filter) {
120
- int events = 0;
115
+ static
116
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
+ int count = 0;
118
+ struct kevent kevents[2] = {0};
121
119
 
122
- if (filter & EVFILT_READ) events |= READABLE;
123
- if (filter & EV_OOBAND) events |= PRIORITY;
124
- if (filter & EVFILT_WRITE) events |= WRITABLE;
120
+ if (events & READABLE) {
121
+ kevents[count].ident = ident;
122
+ kevents[count].filter = EVFILT_READ;
123
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
124
+ kevents[count].udata = (void*)fiber;
125
+
126
+ // #ifdef EV_OOBAND
127
+ // if (events & PRIORITY) {
128
+ // kevents[count].flags |= EV_OOBAND;
129
+ // }
130
+ // #endif
131
+
132
+ count++;
133
+ }
134
+
135
+ if (events & WRITABLE) {
136
+ kevents[count].ident = ident;
137
+ kevents[count].filter = EVFILT_WRITE;
138
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
139
+ kevents[count].udata = (void*)fiber;
140
+ count++;
141
+ }
125
142
 
126
- return INT2NUM(events);
143
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
+
145
+ if (result == -1) {
146
+ rb_sys_fail("kevent(register)");
147
+ }
148
+
149
+ return events;
127
150
  }
128
151
 
129
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
130
- struct Event_Backend_KQueue *data = NULL;
131
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
152
+ static
153
+ void io_remove_filters(int descriptor, int ident, int events) {
154
+ int count = 0;
155
+ struct kevent kevents[2] = {0};
132
156
 
133
- struct kevent event = {0};
157
+ if (events & READABLE) {
158
+ kevents[count].ident = ident;
159
+ kevents[count].filter = EVFILT_READ;
160
+ kevents[count].flags = EV_DELETE;
161
+
162
+ count++;
163
+ }
134
164
 
135
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
165
+ if (events & WRITABLE) {
166
+ kevents[count].ident = ident;
167
+ kevents[count].filter = EVFILT_WRITE;
168
+ kevents[count].flags = EV_DELETE;
169
+ count++;
170
+ }
136
171
 
137
- event.ident = descriptor;
138
- event.filter = kqueue_filters_from_events(events);
139
- event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
140
- event.udata = (void*)fiber;
172
+ // Ignore the result.
173
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
174
+ }
175
+
176
+ struct io_wait_arguments {
177
+ struct Event_Backend_KQueue *data;
178
+ int events;
179
+ int descriptor;
180
+ };
181
+
182
+ static
183
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
184
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
141
185
 
142
- // A better approach is to batch all changes:
143
- int result = kevent(data->descriptor, &event, 1, NULL, 0, NULL);
186
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
144
187
 
145
- if (result == -1) {
146
- rb_sys_fail("kevent");
147
- }
188
+ rb_exc_raise(exception);
189
+ };
190
+
191
+ static inline
192
+ int events_from_kqueue_filter(int filter) {
193
+ if (filter == EVFILT_READ) return READABLE;
194
+ if (filter == EVFILT_WRITE) return WRITABLE;
195
+
196
+ return 0;
197
+ }
198
+
199
+ static
200
+ VALUE io_wait_transfer(VALUE _arguments) {
201
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
+
203
+ VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
148
204
 
149
- VALUE result = rb_funcall(data->loop, id_transfer, 0);
150
205
  return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
+ };
207
+
208
+ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
+ struct Event_Backend_KQueue *data = NULL;
210
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
+
212
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
213
+
214
+ struct io_wait_arguments io_wait_arguments = {
215
+ .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
216
+ .data = data,
217
+ .descriptor = descriptor,
218
+ };
219
+
220
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
151
221
  }
152
222
 
153
223
  static
@@ -165,7 +235,7 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
165
235
 
166
236
  else if (RB_FLOAT_TYPE_P(duration)) {
167
237
  double value = RFLOAT_VALUE(duration);
168
- time_t seconds = duration;
238
+ time_t seconds = value;
169
239
 
170
240
  storage->tv_sec = seconds;
171
241
  storage->tv_nsec = (value - seconds) * 1000000000L;
@@ -176,26 +246,89 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
176
246
  rb_raise(rb_eRuntimeError, "unable to convert timeout");
177
247
  }
178
248
 
249
+ static
250
+ int timeout_nonblocking(struct timespec * timespec) {
251
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
252
+ }
253
+
254
+ struct select_arguments {
255
+ struct Event_Backend_KQueue *data;
256
+
257
+ int count;
258
+ struct kevent events[KQUEUE_MAX_EVENTS];
259
+
260
+ struct timespec storage;
261
+ struct timespec *timeout;
262
+ };
263
+
264
+ static
265
+ void * select_internal(void *_arguments) {
266
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
267
+
268
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
269
+
270
+ return NULL;
271
+ }
272
+
273
+ static
274
+ void select_internal_without_gvl(struct select_arguments *arguments) {
275
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
276
+
277
+ if (arguments->count == -1) {
278
+ rb_sys_fail("select_internal_without_gvl:kevent");
279
+ }
280
+ }
281
+
282
+ static
283
+ void select_internal_with_gvl(struct select_arguments *arguments) {
284
+ select_internal((void *)arguments);
285
+
286
+ if (arguments->count == -1) {
287
+ rb_sys_fail("select_internal_with_gvl:kevent");
288
+ }
289
+ }
290
+
179
291
  VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
180
292
  struct Event_Backend_KQueue *data = NULL;
181
293
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
182
294
 
183
- struct kevent events[KQUEUE_MAX_EVENTS];
184
- struct timespec storage;
295
+ struct select_arguments arguments = {
296
+ .data = data,
297
+ .count = KQUEUE_MAX_EVENTS,
298
+ .storage = {
299
+ .tv_sec = 0,
300
+ .tv_nsec = 0
301
+ }
302
+ };
185
303
 
186
- int count = kevent(data->descriptor, NULL, 0, events, KQUEUE_MAX_EVENTS, make_timeout(duration, &storage));
304
+ // We break this implementation into two parts.
305
+ // (1) count = kevent(..., timeout = 0)
306
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
307
+ // This allows us to avoid releasing and reacquiring the GVL.
308
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
309
+ arguments.timeout = &arguments.storage;
187
310
 
188
- if (count == -1) {
189
- rb_sys_fail("kevent");
311
+ // First do the syscall with no timeout to get any immediately available events:
312
+ select_internal_with_gvl(&arguments);
313
+
314
+ // If there were no pending events, if we have a timeout, wait for more events:
315
+ if (arguments.count == 0) {
316
+ arguments.timeout = make_timeout(duration, &arguments.storage);
317
+
318
+ if (!timeout_nonblocking(arguments.timeout)) {
319
+ arguments.count = KQUEUE_MAX_EVENTS;
320
+
321
+ select_internal_without_gvl(&arguments);
322
+ }
190
323
  }
191
324
 
192
- for (int i = 0; i < count; i += 1) {
193
- VALUE fiber = (VALUE)events[i].udata;
194
- VALUE result = INT2NUM(events[i].filter);
325
+ for (int i = 0; i < arguments.count; i += 1) {
326
+ VALUE fiber = (VALUE)arguments.events[i].udata;
327
+ VALUE result = INT2NUM(arguments.events[i].filter);
195
328
  rb_funcall(fiber, id_transfer, 1, result);
196
329
  }
197
330
 
198
- return INT2NUM(count);
331
+ return INT2NUM(arguments.count);
199
332
  }
200
333
 
201
334
  void Init_Event_Backend_KQueue(VALUE Event_Backend) {
@@ -206,6 +339,7 @@ void Init_Event_Backend_KQueue(VALUE Event_Backend) {
206
339
 
207
340
  rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
208
341
  rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
342
+ rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
209
343
 
210
344
  rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
211
345
  rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
@@ -28,12 +28,12 @@
28
28
  static VALUE Event_Backend_URing = Qnil;
29
29
  static ID id_fileno, id_transfer;
30
30
 
31
- static const int URING_ENTRIES = 1024;
32
- static const int URING_MAX_EVENTS = 1024;
31
+ enum {URING_ENTRIES = 128};
32
+ enum {URING_MAX_EVENTS = 128};
33
33
 
34
34
  struct Event_Backend_URing {
35
35
  VALUE loop;
36
- struct io_uring* ring;
36
+ struct io_uring ring;
37
37
  };
38
38
 
39
39
  void Event_Backend_URing_Type_mark(void *_data)
@@ -42,14 +42,19 @@ void Event_Backend_URing_Type_mark(void *_data)
42
42
  rb_gc_mark(data->loop);
43
43
  }
44
44
 
45
+ static
46
+ void close_internal(struct Event_Backend_URing *data) {
47
+ if (data->ring.ring_fd >= 0) {
48
+ io_uring_queue_exit(&data->ring);
49
+ data->ring.ring_fd = -1;
50
+ }
51
+ }
52
+
45
53
  void Event_Backend_URing_Type_free(void *_data)
46
54
  {
47
55
  struct Event_Backend_URing *data = _data;
48
56
 
49
- if (data->ring) {
50
- io_uring_queue_exit(data->ring);
51
- xfree(data->ring);
52
- }
57
+ close_internal(data);
53
58
 
54
59
  free(data);
55
60
  }
@@ -75,7 +80,7 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
75
80
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
76
81
 
77
82
  data->loop = Qnil;
78
- data->ring = NULL;
83
+ data->ring.ring_fd = -1;
79
84
 
80
85
  return instance;
81
86
  }
@@ -85,17 +90,27 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
85
90
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
86
91
 
87
92
  data->loop = loop;
88
- data->ring = xmalloc(sizeof(struct io_uring));
89
93
 
90
- int result = io_uring_queue_init(URING_ENTRIES, data->ring, 0);
94
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
91
95
 
92
- if (result == -1) {
93
- rb_sys_fail("io_uring_queue_init");
96
+ if (result < 0) {
97
+ rb_syserr_fail(-result, "io_uring_queue_init");
94
98
  }
95
99
 
100
+ rb_update_max_fd(data->ring.ring_fd);
101
+
96
102
  return self;
97
103
  }
98
104
 
105
+ VALUE Event_Backend_URing_close(VALUE self) {
106
+ struct Event_Backend_URing *data = NULL;
107
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
108
+
109
+ close_internal(data);
110
+
111
+ return Qnil;
112
+ }
113
+
99
114
  static inline
100
115
  short poll_flags_from_events(int events) {
101
116
  short flags = 0;
@@ -121,63 +136,84 @@ int events_from_poll_flags(short flags) {
121
136
  return events;
122
137
  }
123
138
 
124
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
125
- struct Event_Backend_URing *data = NULL;
126
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
139
+ struct io_wait_arguments {
140
+ struct Event_Backend_URing *data;
141
+ VALUE fiber;
142
+ short flags;
143
+ };
144
+
145
+ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
146
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
127
147
 
128
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
129
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
148
+ while (sqe == NULL) {
149
+ sqe = io_uring_get_sqe(&data->ring);
150
+ }
130
151
 
131
- short flags = poll_flags_from_events(NUM2INT(events));
152
+ return sqe;
153
+ }
154
+
155
+ static
156
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
157
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
158
+ struct Event_Backend_URing *data = arguments->data;
132
159
 
133
- // fprintf(stderr, "poll_add(%p, %d, %d)\n", sqe, descriptor, flags);
160
+ struct io_uring_sqe *sqe = io_get_sqe(data);
134
161
 
135
- io_uring_prep_poll_add(sqe, descriptor, flags);
136
- io_uring_sqe_set_data(sqe, (void*)fiber);
137
- io_uring_submit(data->ring);
162
+ // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
163
+
164
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
165
+ io_uring_submit(&data->ring);
166
+
167
+ rb_exc_raise(exception);
168
+ };
169
+
170
+ static
171
+ VALUE io_wait_transfer(VALUE _arguments) {
172
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
173
+ struct Event_Backend_URing *data = arguments->data;
138
174
 
139
175
  VALUE result = rb_funcall(data->loop, id_transfer, 0);
140
176
 
141
177
  // We explicitly filter the resulting events based on the requested events.
142
178
  // In some cases, poll will report events we didn't ask for.
143
- flags &= NUM2INT(result);
179
+ short flags = arguments->flags & NUM2INT(result);
144
180
 
145
181
  return INT2NUM(events_from_poll_flags(flags));
146
- }
182
+ };
147
183
 
148
- static
149
- struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
150
- if (duration == Qnil) {
151
- return NULL;
152
- }
184
+ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
185
+ struct Event_Backend_URing *data = NULL;
186
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
153
187
 
154
- if (FIXNUM_P(duration)) {
155
- storage->tv_sec = NUM2TIMET(duration);
156
- storage->tv_nsec = 0;
157
-
158
- return storage;
159
- }
188
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
189
+ struct io_uring_sqe *sqe = io_get_sqe(data);
160
190
 
161
- else if (RB_FLOAT_TYPE_P(duration)) {
162
- double value = RFLOAT_VALUE(duration);
163
- time_t seconds = duration;
164
-
165
- storage->tv_sec = seconds;
166
- storage->tv_nsec = (value - seconds) * 1000000000L;
167
-
168
- return storage;
169
- }
191
+ if (!sqe) return INT2NUM(0);
170
192
 
171
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
193
+ short flags = poll_flags_from_events(NUM2INT(events));
194
+
195
+ // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
196
+
197
+ io_uring_prep_poll_add(sqe, descriptor, flags);
198
+ io_uring_sqe_set_data(sqe, (void*)fiber);
199
+ io_uring_submit(&data->ring);
200
+
201
+ struct io_wait_arguments io_wait_arguments = {
202
+ .data = data,
203
+ .fiber = fiber,
204
+ .flags = flags
205
+ };
206
+
207
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
172
208
  }
173
209
 
174
210
  inline static
175
211
  void resize_to_capacity(VALUE string, size_t offset, size_t length) {
176
212
  size_t current_length = RSTRING_LEN(string);
177
213
  long difference = (long)(offset + length) - (long)current_length;
178
-
214
+
179
215
  difference += 1;
180
-
216
+
181
217
  if (difference > 0) {
182
218
  rb_str_modify_expand(string, difference);
183
219
  } else {
@@ -188,7 +224,7 @@ void resize_to_capacity(VALUE string, size_t offset, size_t length) {
188
224
  inline static
189
225
  void resize_to_fit(VALUE string, size_t offset, size_t length) {
190
226
  size_t current_length = RSTRING_LEN(string);
191
-
227
+
192
228
  if (current_length < (offset + length)) {
193
229
  rb_str_set_len(string, offset + length);
194
230
  }
@@ -197,95 +233,169 @@ void resize_to_fit(VALUE string, size_t offset, size_t length) {
197
233
  VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
198
234
  struct Event_Backend_URing *data = NULL;
199
235
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
200
-
236
+
201
237
  resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
202
-
238
+
203
239
  int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
204
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
205
-
240
+ struct io_uring_sqe *sqe = io_get_sqe(data);
241
+
206
242
  struct iovec iovecs[1];
207
243
  iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
208
244
  iovecs[0].iov_len = NUM2SIZET(length);
209
-
245
+
210
246
  io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
211
247
  io_uring_sqe_set_data(sqe, (void*)fiber);
212
- io_uring_submit(data->ring);
213
-
248
+ io_uring_submit(&data->ring);
249
+
214
250
  // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
215
-
251
+
216
252
  int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
217
-
253
+
218
254
  if (result < 0) {
219
255
  rb_syserr_fail(-result, strerror(-result));
220
256
  }
221
-
257
+
222
258
  resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
223
-
259
+
224
260
  return INT2NUM(result);
225
261
  }
226
262
 
227
263
  VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
228
264
  struct Event_Backend_URing *data = NULL;
229
265
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
230
-
266
+
231
267
  if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
232
268
  rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
233
269
  }
234
-
270
+
235
271
  int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
236
- struct io_uring_sqe *sqe = io_uring_get_sqe(data->ring);
237
-
272
+ struct io_uring_sqe *sqe = io_get_sqe(data);
273
+
238
274
  struct iovec iovecs[1];
239
275
  iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
240
276
  iovecs[0].iov_len = NUM2SIZET(length);
241
-
277
+
242
278
  io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
243
279
  io_uring_sqe_set_data(sqe, (void*)fiber);
244
- io_uring_submit(data->ring);
280
+ io_uring_submit(&data->ring);
245
281
 
246
282
  // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
247
283
 
248
284
  int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
249
-
285
+
250
286
  if (result < 0) {
251
287
  rb_syserr_fail(-result, strerror(-result));
252
288
  }
253
-
289
+
254
290
  return INT2NUM(result);
255
291
  }
256
292
 
293
+ static
294
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
295
+ if (duration == Qnil) {
296
+ return NULL;
297
+ }
298
+
299
+ if (FIXNUM_P(duration)) {
300
+ storage->tv_sec = NUM2TIMET(duration);
301
+ storage->tv_nsec = 0;
302
+
303
+ return storage;
304
+ }
305
+
306
+ else if (RB_FLOAT_TYPE_P(duration)) {
307
+ double value = RFLOAT_VALUE(duration);
308
+ time_t seconds = value;
309
+
310
+ storage->tv_sec = seconds;
311
+ storage->tv_nsec = (value - seconds) * 1000000000L;
312
+
313
+ return storage;
314
+ }
315
+
316
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
317
+ }
318
+
319
+ static
320
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
321
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
322
+ }
323
+
324
+ struct select_arguments {
325
+ struct Event_Backend_URing *data;
326
+
327
+ int count;
328
+ struct io_uring_cqe **cqes;
329
+
330
+ struct __kernel_timespec storage;
331
+ struct __kernel_timespec *timeout;
332
+ };
333
+
334
+ static
335
+ void * select_internal(void *_arguments) {
336
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
337
+
338
+ arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
339
+
340
+ // If waiting resulted in a timeout, there are 0 events.
341
+ if (arguments->count == -ETIME) {
342
+ arguments->count = 0;
343
+ }
344
+
345
+ return NULL;
346
+ }
347
+
348
+ static
349
+ int select_internal_without_gvl(struct select_arguments *arguments) {
350
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
351
+
352
+ if (arguments->count < 0) {
353
+ rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
354
+ }
355
+
356
+ return arguments->count;
357
+ }
358
+
257
359
  VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
258
360
  struct Event_Backend_URing *data = NULL;
259
361
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
260
362
 
261
363
  struct io_uring_cqe *cqes[URING_MAX_EVENTS];
262
- struct __kernel_timespec storage;
263
364
 
264
- int result = io_uring_peek_batch_cqe(data->ring, cqes, URING_MAX_EVENTS);
265
-
266
- // fprintf(stderr, "result = %d\n", result);
365
+ // This is a non-blocking operation:
366
+ int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
267
367
 
268
368
  if (result < 0) {
269
369
  rb_syserr_fail(-result, strerror(-result));
270
370
  } else if (result == 0) {
271
- result = io_uring_wait_cqes(data->ring, cqes, 1, make_timeout(duration, &storage), NULL);
371
+ // We might need to wait for events:
372
+ struct select_arguments arguments = {
373
+ .data = data,
374
+ .cqes = cqes,
375
+ .timeout = NULL,
376
+ };
272
377
 
273
- // fprintf(stderr, "result (timeout) = %d\n", result);
378
+ arguments.timeout = make_timeout(duration, &arguments.storage);
274
379
 
275
- if (result == -ETIME) {
276
- result = 0;
277
- } else if (result < 0) {
278
- rb_syserr_fail(-result, strerror(-result));
380
+ if (!timeout_nonblocking(arguments.timeout)) {
381
+ result = select_internal_without_gvl(&arguments);
279
382
  }
280
383
  }
281
384
 
385
+ // fprintf(stderr, "cqes count=%d\n", result);
386
+
282
387
  for (int i = 0; i < result; i += 1) {
388
+ // If the operation was cancelled, or the operation has no user data (fiber):
389
+ if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
390
+ continue;
391
+ }
392
+
283
393
  VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
284
394
  VALUE result = INT2NUM(cqes[i]->res);
285
395
 
286
- // fprintf(stderr, "cqes[i]->res = %d\n", cqes[i]->res);
396
+ // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
287
397
 
288
- io_uring_cqe_seen(data->ring, cqes[i]);
398
+ io_uring_cqe_seen(&data->ring, cqes[i]);
289
399
 
290
400
  rb_funcall(fiber, id_transfer, 1, result);
291
401
  }
@@ -301,10 +411,11 @@ void Init_Event_Backend_URing(VALUE Event_Backend) {
301
411
 
302
412
  rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
303
413
  rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
414
+ rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
304
415
 
305
416
  rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
306
417
  rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
307
-
418
+
308
419
  rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
309
420
  rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
310
421
  }