event 0.4.1 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -24,9 +24,10 @@
24
24
  #include <sys/event.h>
25
25
  #include <sys/ioctl.h>
26
26
  #include <time.h>
27
+ #include <errno.h>
27
28
 
28
29
  static VALUE Event_Backend_KQueue = Qnil;
29
- static ID id_fileno, id_transfer;
30
+ static ID id_fileno;
30
31
 
31
32
  enum {KQUEUE_MAX_EVENTS = 64};
32
33
 
@@ -112,6 +113,86 @@ VALUE Event_Backend_KQueue_close(VALUE self) {
112
113
  return Qnil;
113
114
  }
114
115
 
116
+ struct process_wait_arguments {
117
+ struct Event_Backend_KQueue *data;
118
+ pid_t pid;
119
+ int flags;
120
+ };
121
+
122
+ static
123
+ int process_add_filters(int descriptor, int ident, VALUE fiber) {
124
+ struct kevent event = {0};
125
+
126
+ event.ident = ident;
127
+ event.filter = EVFILT_PROC;
128
+ event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
129
+ event.fflags = NOTE_EXIT;
130
+ event.udata = (void*)fiber;
131
+
132
+ int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
133
+
134
+ if (result == -1) {
135
+ // No such process - the process has probably already terminated:
136
+ if (errno == ESRCH) {
137
+ return 0;
138
+ }
139
+
140
+ rb_sys_fail("kevent(process_add_filters)");
141
+ }
142
+
143
+ return 1;
144
+ }
145
+
146
+ static
147
+ void process_remove_filters(int descriptor, int ident) {
148
+ struct kevent event = {0};
149
+
150
+ event.ident = ident;
151
+ event.filter = EVFILT_PROC;
152
+ event.flags = EV_DELETE;
153
+ event.fflags = NOTE_EXIT;
154
+
155
+ // Ignore the result.
156
+ kevent(descriptor, &event, 1, NULL, 0, NULL);
157
+ }
158
+
159
+ static
160
+ VALUE process_wait_transfer(VALUE _arguments) {
161
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
162
+
163
+ Event_Backend_transfer(arguments->data->loop);
164
+
165
+ return Event_Backend_process_status_wait(arguments->pid);
166
+ }
167
+
168
+ static
169
+ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
170
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
171
+
172
+ process_remove_filters(arguments->data->descriptor, arguments->pid);
173
+
174
+ rb_exc_raise(exception);
175
+ }
176
+
177
+ VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
178
+ struct Event_Backend_KQueue *data = NULL;
179
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
180
+
181
+ struct process_wait_arguments process_wait_arguments = {
182
+ .data = data,
183
+ .pid = NUM2PIDT(pid),
184
+ .flags = RB_NUM2INT(flags),
185
+ };
186
+
187
+ int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
188
+
189
+ if (waiting) {
190
+ return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
191
+ } else {
192
+ return Event_Backend_process_status_wait(process_wait_arguments.pid);
193
+ }
194
+ }
195
+
115
196
  static
116
197
  int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
198
  int count = 0;
@@ -143,7 +224,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
143
224
  int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
225
 
145
226
  if (result == -1) {
146
- rb_sys_fail("kevent(register)");
227
+ rb_sys_fail("kevent(io_add_filters)");
147
228
  }
148
229
 
149
230
  return events;
@@ -186,7 +267,7 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
186
267
  io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
187
268
 
188
269
  rb_exc_raise(exception);
189
- };
270
+ }
190
271
 
191
272
  static inline
192
273
  int events_from_kqueue_filter(int filter) {
@@ -200,19 +281,19 @@ static
200
281
  VALUE io_wait_transfer(VALUE _arguments) {
201
282
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
283
 
203
- VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
284
+ VALUE result = Event_Backend_transfer(arguments->data->loop);
204
285
 
205
- return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
- };
286
+ return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
287
+ }
207
288
 
208
289
  VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
290
  struct Event_Backend_KQueue *data = NULL;
210
291
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
292
 
212
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
293
+ int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
213
294
 
214
295
  struct io_wait_arguments io_wait_arguments = {
215
- .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
296
+ .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
216
297
  .data = data,
217
298
  .descriptor = descriptor,
218
299
  };
@@ -220,6 +301,154 @@ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE even
220
301
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
221
302
  }
222
303
 
304
+ struct io_read_arguments {
305
+ VALUE self;
306
+ VALUE fiber;
307
+ VALUE io;
308
+
309
+ int flags;
310
+
311
+ int descriptor;
312
+
313
+ VALUE buffer;
314
+ size_t offset;
315
+ size_t length;
316
+ };
317
+
318
+ static
319
+ VALUE io_read_loop(VALUE _arguments) {
320
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
321
+
322
+ size_t offset = arguments->offset;
323
+ size_t length = arguments->length;
324
+ size_t total = 0;
325
+
326
+ while (length > 0) {
327
+ char *buffer = Event_Backend_resize_to_capacity(arguments->buffer, offset, length);
328
+ ssize_t result = read(arguments->descriptor, buffer+offset, length);
329
+
330
+ if (result >= 0) {
331
+ offset += result;
332
+ length -= result;
333
+ total += result;
334
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
335
+ Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
336
+ } else {
337
+ rb_sys_fail("Event_Backend_KQueue_io_read");
338
+ }
339
+ }
340
+
341
+ Event_Backend_resize_to_fit(arguments->buffer, arguments->offset, arguments->length);
342
+
343
+ return SIZET2NUM(total);
344
+ }
345
+
346
+ static
347
+ VALUE io_read_ensure(VALUE _arguments) {
348
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
349
+
350
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
351
+
352
+ return Qnil;
353
+ }
354
+
355
+ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
356
+ struct Event_Backend_KQueue *data = NULL;
357
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
358
+
359
+ int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
360
+
361
+ size_t offset = NUM2SIZET(_offset);
362
+ size_t length = NUM2SIZET(_length);
363
+
364
+ struct io_read_arguments io_read_arguments = {
365
+ .self = self,
366
+ .fiber = fiber,
367
+ .io = io,
368
+
369
+ .flags = Event_Backend_nonblock_set(descriptor),
370
+ .descriptor = descriptor,
371
+ .buffer = buffer,
372
+ .offset = offset,
373
+ .length = length,
374
+ };
375
+
376
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
377
+ }
378
+
379
+ struct io_write_arguments {
380
+ VALUE self;
381
+ VALUE fiber;
382
+ VALUE io;
383
+
384
+ int flags;
385
+
386
+ int descriptor;
387
+
388
+ VALUE buffer;
389
+ size_t offset;
390
+ size_t length;
391
+ };
392
+
393
+ static
394
+ VALUE io_write_loop(VALUE _arguments) {
395
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
396
+
397
+ size_t offset = arguments->offset;
398
+ size_t length = arguments->length;
399
+ size_t total = 0;
400
+
401
+ while (length > 0) {
402
+ char *buffer = Event_Backend_verify_size(arguments->buffer, offset, length);
403
+ ssize_t result = write(arguments->descriptor, buffer+offset, length);
404
+
405
+ if (result >= 0) {
406
+ length -= result;
407
+ offset += result;
408
+ total += result;
409
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
410
+ Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
411
+ } else {
412
+ rb_sys_fail("Event_Backend_KQueue_io_write");
413
+ }
414
+ }
415
+
416
+ return SIZET2NUM(total);
417
+ };
418
+
419
+ static
420
+ VALUE io_write_ensure(VALUE _arguments) {
421
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
422
+
423
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
424
+
425
+ return Qnil;
426
+ };
427
+
428
+ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
429
+ struct Event_Backend_KQueue *data = NULL;
430
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
431
+
432
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
433
+
434
+ size_t offset = NUM2SIZET(_offset);
435
+ size_t length = NUM2SIZET(_length);
436
+
437
+ struct io_write_arguments io_write_arguments = {
438
+ .self = self,
439
+ .fiber = fiber,
440
+ .io = io,
441
+
442
+ .flags = Event_Backend_nonblock_set(descriptor),
443
+ .descriptor = descriptor,
444
+ .buffer = buffer,
445
+ .offset = offset,
446
+ .length = length,
447
+ };
448
+
449
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
450
+ }
451
+
223
452
  static
224
453
  struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
225
454
  if (duration == Qnil) {
@@ -325,7 +554,8 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
325
554
  for (int i = 0; i < arguments.count; i += 1) {
326
555
  VALUE fiber = (VALUE)arguments.events[i].udata;
327
556
  VALUE result = INT2NUM(arguments.events[i].filter);
328
- rb_funcall(fiber, id_transfer, 1, result);
557
+
558
+ Event_Backend_transfer_result(fiber, result);
329
559
  }
330
560
 
331
561
  return INT2NUM(arguments.count);
@@ -333,14 +563,17 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
333
563
 
334
564
  void Init_Event_Backend_KQueue(VALUE Event_Backend) {
335
565
  id_fileno = rb_intern("fileno");
336
- id_transfer = rb_intern("transfer");
337
566
 
338
567
  Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
339
568
 
340
569
  rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
341
570
  rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
571
+ rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
342
572
  rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
343
573
 
344
574
  rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
345
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
575
+ rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 5);
576
+ rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 5);
577
+
578
+ rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
346
579
  }
@@ -0,0 +1,36 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <sys/types.h>
22
+ #include <sys/syscall.h>
23
+ #include <unistd.h>
24
+ #include <poll.h>
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
27
+
28
+ #ifndef __NR_pidfd_open
29
+ #define __NR_pidfd_open 434 /* System call # on most architectures */
30
+ #endif
31
+
32
+ static int
33
+ pidfd_open(pid_t pid, unsigned int flags)
34
+ {
35
+ return syscall(__NR_pidfd_open, pid, flags);
36
+ }
@@ -25,8 +25,10 @@
25
25
  #include <poll.h>
26
26
  #include <time.h>
27
27
 
28
+ #include "pidfd.c"
29
+
28
30
  static VALUE Event_Backend_URing = Qnil;
29
- static ID id_fileno, id_transfer;
31
+ static ID id_fileno;
30
32
 
31
33
  enum {URING_ENTRIES = 128};
32
34
  enum {URING_MAX_EVENTS = 128};
@@ -111,6 +113,66 @@ VALUE Event_Backend_URing_close(VALUE self) {
111
113
  return Qnil;
112
114
  }
113
115
 
116
+ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
117
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
118
+
119
+ while (sqe == NULL) {
120
+ io_uring_submit(&data->ring);
121
+ sqe = io_uring_get_sqe(&data->ring);
122
+ }
123
+
124
+ // fprintf(stderr, "io_get_sqe -> %p\n", sqe);
125
+
126
+ return sqe;
127
+ }
128
+
129
+ struct process_wait_arguments {
130
+ struct Event_Backend_URing *data;
131
+ pid_t pid;
132
+ int flags;
133
+ int descriptor;
134
+ };
135
+
136
+ static
137
+ VALUE process_wait_transfer(VALUE _arguments) {
138
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
139
+
140
+ Event_Backend_transfer(arguments->data->loop);
141
+
142
+ return Event_Backend_process_status_wait(arguments->pid);
143
+ }
144
+
145
+ static
146
+ VALUE process_wait_ensure(VALUE _arguments) {
147
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
148
+
149
+ close(arguments->descriptor);
150
+
151
+ return Qnil;
152
+ }
153
+
154
+ VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
155
+ struct Event_Backend_URing *data = NULL;
156
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
157
+
158
+ struct process_wait_arguments process_wait_arguments = {
159
+ .data = data,
160
+ .pid = NUM2PIDT(pid),
161
+ .flags = NUM2INT(flags),
162
+ };
163
+
164
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
165
+ rb_update_max_fd(process_wait_arguments.descriptor);
166
+
167
+ struct io_uring_sqe *sqe = io_get_sqe(data);
168
+ assert(sqe);
169
+
170
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
171
+ io_uring_sqe_set_data(sqe, (void*)fiber);
172
+
173
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
174
+ }
175
+
114
176
  static inline
115
177
  short poll_flags_from_events(int events) {
116
178
  short flags = 0;
@@ -142,22 +204,13 @@ struct io_wait_arguments {
142
204
  short flags;
143
205
  };
144
206
 
145
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
146
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
147
-
148
- while (sqe == NULL) {
149
- sqe = io_uring_get_sqe(&data->ring);
150
- }
151
-
152
- return sqe;
153
- }
154
-
155
207
  static
156
208
  VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
157
209
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
158
210
  struct Event_Backend_URing *data = arguments->data;
159
211
 
160
212
  struct io_uring_sqe *sqe = io_get_sqe(data);
213
+ assert(sqe);
161
214
 
162
215
  // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
163
216
 
@@ -171,8 +224,8 @@ static
171
224
  VALUE io_wait_transfer(VALUE _arguments) {
172
225
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
173
226
  struct Event_Backend_URing *data = arguments->data;
174
-
175
- VALUE result = rb_funcall(data->loop, id_transfer, 0);
227
+
228
+ VALUE result = Event_Backend_transfer(data->loop);
176
229
 
177
230
  // We explicitly filter the resulting events based on the requested events.
178
231
  // In some cases, poll will report events we didn't ask for.
@@ -187,8 +240,7 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
187
240
 
188
241
  int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
189
242
  struct io_uring_sqe *sqe = io_get_sqe(data);
190
-
191
- if (!sqe) return INT2NUM(0);
243
+ assert(sqe);
192
244
 
193
245
  short flags = poll_flags_from_events(NUM2INT(events));
194
246
 
@@ -196,7 +248,8 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
196
248
 
197
249
  io_uring_prep_poll_add(sqe, descriptor, flags);
198
250
  io_uring_sqe_set_data(sqe, (void*)fiber);
199
- io_uring_submit(&data->ring);
251
+ // fprintf(stderr, "io_uring_submit\n");
252
+ // io_uring_submit(&data->ring);
200
253
 
201
254
  struct io_wait_arguments io_wait_arguments = {
202
255
  .data = data,
@@ -207,87 +260,98 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
207
260
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
208
261
  }
209
262
 
210
- inline static
211
- void resize_to_capacity(VALUE string, size_t offset, size_t length) {
212
- size_t current_length = RSTRING_LEN(string);
213
- long difference = (long)(offset + length) - (long)current_length;
263
+ static
264
+ int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
265
+ struct io_uring_sqe *sqe = io_get_sqe(data);
266
+ assert(sqe);
214
267
 
215
- difference += 1;
268
+ struct iovec iovecs[1];
269
+ iovecs[0].iov_base = buffer;
270
+ iovecs[0].iov_len = length;
216
271
 
217
- if (difference > 0) {
218
- rb_str_modify_expand(string, difference);
219
- } else {
220
- rb_str_modify(string);
221
- }
222
- }
223
-
224
- inline static
225
- void resize_to_fit(VALUE string, size_t offset, size_t length) {
226
- size_t current_length = RSTRING_LEN(string);
272
+ io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
273
+ io_uring_sqe_set_data(sqe, (void*)fiber);
274
+ io_uring_submit(&data->ring);
227
275
 
228
- if (current_length < (offset + length)) {
229
- rb_str_set_len(string, offset + length);
230
- }
276
+ return NUM2INT(Event_Backend_transfer(data->loop));
231
277
  }
232
278
 
233
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
279
+ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE _buffer, VALUE _offset, VALUE _length) {
234
280
  struct Event_Backend_URing *data = NULL;
235
281
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
236
282
 
237
- resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
283
+ int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
238
284
 
239
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
240
- struct io_uring_sqe *sqe = io_get_sqe(data);
241
-
242
- struct iovec iovecs[1];
243
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
244
- iovecs[0].iov_len = NUM2SIZET(length);
245
-
246
- io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
247
- io_uring_sqe_set_data(sqe, (void*)fiber);
248
- io_uring_submit(&data->ring);
285
+ size_t offset = NUM2SIZET(_offset);
286
+ size_t length = NUM2SIZET(_length);
249
287
 
250
- // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
288
+ size_t start = offset;
289
+ size_t total = 0;
251
290
 
252
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
253
-
254
- if (result < 0) {
255
- rb_syserr_fail(-result, strerror(-result));
291
+ while (length > 0) {
292
+ char *buffer = Event_Backend_resize_to_capacity(_buffer, offset, length);
293
+ int result = io_read(data, fiber, descriptor, buffer+offset, length);
294
+
295
+ if (result >= 0) {
296
+ offset += result;
297
+ length -= result;
298
+ total += result;
299
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
300
+ Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(READABLE));
301
+ } else {
302
+ rb_syserr_fail(-result, strerror(-result));
303
+ }
256
304
  }
257
305
 
258
- resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
306
+ Event_Backend_resize_to_fit(_buffer, start, total);
259
307
 
260
- return INT2NUM(result);
308
+ return SIZET2NUM(total);
261
309
  }
262
310
 
263
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
264
- struct Event_Backend_URing *data = NULL;
265
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
266
-
267
- if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
268
- rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
269
- }
270
-
271
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
311
+ static
312
+ int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
272
313
  struct io_uring_sqe *sqe = io_get_sqe(data);
314
+ assert(sqe);
273
315
 
274
316
  struct iovec iovecs[1];
275
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
276
- iovecs[0].iov_len = NUM2SIZET(length);
317
+ iovecs[0].iov_base = buffer;
318
+ iovecs[0].iov_len = length;
277
319
 
278
320
  io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
279
321
  io_uring_sqe_set_data(sqe, (void*)fiber);
280
322
  io_uring_submit(&data->ring);
281
323
 
282
- // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
324
+ return NUM2INT(Event_Backend_transfer(data->loop));
325
+ }
326
+
327
+ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE _buffer, VALUE _offset, VALUE _length) {
328
+ struct Event_Backend_URing *data = NULL;
329
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
330
+
331
+ int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
283
332
 
284
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
333
+ size_t offset = NUM2SIZET(_offset);
334
+ size_t length = NUM2SIZET(_length);
285
335
 
286
- if (result < 0) {
287
- rb_syserr_fail(-result, strerror(-result));
336
+ char *buffer = Event_Backend_verify_size(_buffer, offset, length);
337
+
338
+ size_t total = 0;
339
+
340
+ while (length > 0) {
341
+ int result = io_write(data, fiber, descriptor, buffer+offset, length);
342
+
343
+ if (result >= 0) {
344
+ length -= result;
345
+ offset += result;
346
+ total += result;
347
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
348
+ Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(WRITABLE));
349
+ } else {
350
+ rb_syserr_fail(-result, strerror(-result));
351
+ }
288
352
  }
289
353
 
290
- return INT2NUM(result);
354
+ return SIZET2NUM(total);
291
355
  }
292
356
 
293
357
  static
@@ -324,8 +388,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
324
388
  struct select_arguments {
325
389
  struct Event_Backend_URing *data;
326
390
 
327
- int count;
328
- struct io_uring_cqe **cqes;
391
+ int result;
329
392
 
330
393
  struct __kernel_timespec storage;
331
394
  struct __kernel_timespec *timeout;
@@ -335,12 +398,10 @@ static
335
398
  void * select_internal(void *_arguments) {
336
399
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
337
400
 
338
- arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
401
+ io_uring_submit(&arguments->data->ring);
339
402
 
340
- // If waiting resulted in a timeout, there are 0 events.
341
- if (arguments->count == -ETIME) {
342
- arguments->count = 0;
343
- }
403
+ struct io_uring_cqe *cqe = NULL;
404
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
344
405
 
345
406
  return NULL;
346
407
  }
@@ -349,21 +410,52 @@ static
349
410
  int select_internal_without_gvl(struct select_arguments *arguments) {
350
411
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
351
412
 
352
- if (arguments->count < 0) {
353
- rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
413
+ if (arguments->result == -ETIME) {
414
+ arguments->result = 0;
415
+ } else if (arguments->result < 0) {
416
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
417
+ } else {
418
+ // At least 1 event is waiting:
419
+ arguments->result = 1;
420
+ }
421
+
422
+ return arguments->result;
423
+ }
424
+
425
+ static inline
426
+ unsigned select_process_completions(struct io_uring *ring) {
427
+ unsigned completed = 0;
428
+ unsigned head;
429
+ struct io_uring_cqe *cqe;
430
+
431
+ io_uring_for_each_cqe(ring, head, cqe) {
432
+ ++completed;
433
+
434
+ // If the operation was cancelled, or the operation has no user data (fiber):
435
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
436
+ continue;
437
+ }
438
+
439
+ VALUE fiber = (VALUE)cqe->user_data;
440
+ VALUE result = INT2NUM(cqe->res);
441
+
442
+ // fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
443
+
444
+ Event_Backend_transfer_result(fiber, result);
445
+ }
446
+
447
+ if (completed) {
448
+ io_uring_cq_advance(ring, completed);
354
449
  }
355
450
 
356
- return arguments->count;
451
+ return completed;
357
452
  }
358
453
 
359
454
  VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
360
455
  struct Event_Backend_URing *data = NULL;
361
456
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
362
457
 
363
- struct io_uring_cqe *cqes[URING_MAX_EVENTS];
364
-
365
- // This is a non-blocking operation:
366
- int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
458
+ int result = select_process_completions(&data->ring);
367
459
 
368
460
  if (result < 0) {
369
461
  rb_syserr_fail(-result, strerror(-result));
@@ -371,7 +463,6 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
371
463
  // We might need to wait for events:
372
464
  struct select_arguments arguments = {
373
465
  .data = data,
374
- .cqes = cqes,
375
466
  .timeout = NULL,
376
467
  };
377
468
 
@@ -379,43 +470,29 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
379
470
 
380
471
  if (!timeout_nonblocking(arguments.timeout)) {
381
472
  result = select_internal_without_gvl(&arguments);
473
+ } else {
474
+ io_uring_submit(&data->ring);
382
475
  }
383
476
  }
384
477
 
385
- // fprintf(stderr, "cqes count=%d\n", result);
386
-
387
- for (int i = 0; i < result; i += 1) {
388
- // If the operation was cancelled, or the operation has no user data (fiber):
389
- if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
390
- continue;
391
- }
392
-
393
- VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
394
- VALUE result = INT2NUM(cqes[i]->res);
395
-
396
- // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
397
-
398
- io_uring_cqe_seen(&data->ring, cqes[i]);
399
-
400
- rb_funcall(fiber, id_transfer, 1, result);
401
- }
478
+ result = select_process_completions(&data->ring);
402
479
 
403
480
  return INT2NUM(result);
404
481
  }
405
482
 
406
483
  void Init_Event_Backend_URing(VALUE Event_Backend) {
407
484
  id_fileno = rb_intern("fileno");
408
- id_transfer = rb_intern("transfer");
409
485
 
410
486
  Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
411
487
 
412
488
  rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
413
489
  rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
490
+ rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
414
491
  rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
415
492
 
416
493
  rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
417
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
418
-
419
494
  rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
420
495
  rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
496
+
497
+ rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
421
498
  }