io-event 1.1.6 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c2b1d6d29aedc2b375fa708f5f3518fb39dae6ee0b5c675a4ff571192f3c00ef
4
- data.tar.gz: 6e9b9589ef99c97a287d20e4f60f501d87ca163897d21bdd59eb03aa71085ffd
3
+ metadata.gz: c19fdf43c5e63eb628c4ebb1feedcd048eac2e3ec4b83571f05bcb239eed9d02
4
+ data.tar.gz: 141749bc238099c65ebcc6db1d0d8b8ce1c3477e6e11a9f62e6c50572b9b6d84
5
5
  SHA512:
6
- metadata.gz: 6447991710738ba24577100220b4ca322d79179d736ba3101bdd5fd2a2d8a23aa8983d527ebc13cdde5b038601c17d6a860ed21394a2f3d584ddd68772f505b6
7
- data.tar.gz: e8a0232a3f36be1bfab1a1ca8a9e0a68e761d89d9f1576449ea23644eddc2643f89c9963186376d1211f25fb96853cadef82c673ca81af7430cdacb45d3f37ea
6
+ metadata.gz: c3da16be3fd6a0f98106f3e263ff2229e61a03bf5660fee1d3bb1bf6c191f1fd7475aabb85c8b5c29b25f6205816c728b42d68c74e4a123cea6810ac7be44b23
7
+ data.tar.gz: '026937a0ad18ba0f2783a8e8b8624177e8fbae34ca1a52eb2c5ccd59ae8231d4eea192fc3cc6b54d60e7d5d40d630f97754afa12e115f9ac44bd79caa9e52eee'
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -33,7 +33,7 @@ extension_name = 'IO_Event'
33
33
 
34
34
  # dir_config(extension_name)
35
35
 
36
- $CFLAGS << " -Wall -std=c99"
36
+ $CFLAGS << " -Wall -Wno-unknown-pragmas -std=c99"
37
37
 
38
38
  $srcs = ["io/event/event.c", "io/event/selector/selector.c"]
39
39
  $VPATH << "$(srcdir)/io/event"
@@ -63,6 +63,7 @@ have_func("rb_io_descriptor")
63
63
  have_func("&rb_process_status_wait")
64
64
  have_func("rb_fiber_current")
65
65
  have_func("&rb_fiber_raise")
66
+ have_func("epoll_pwait2")
66
67
 
67
68
  have_header('ruby/io/buffer.h')
68
69
 
@@ -569,6 +569,38 @@ VALUE IO_Event_Selector_EPoll_io_write_compatible(int argc, VALUE *argv, VALUE s
569
569
 
570
570
  #endif
571
571
 
572
+ #if defined(HAVE_EPOLL_PWAIT2)
573
+ static
574
+ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
575
+ if (duration == Qnil) {
576
+ return NULL;
577
+ }
578
+
579
+ if (FIXNUM_P(duration)) {
580
+ storage->tv_sec = NUM2TIMET(duration);
581
+ storage->tv_nsec = 0;
582
+
583
+ return storage;
584
+ }
585
+
586
+ else if (RB_FLOAT_TYPE_P(duration)) {
587
+ double value = RFLOAT_VALUE(duration);
588
+ time_t seconds = value;
589
+
590
+ storage->tv_sec = seconds;
591
+ storage->tv_nsec = (value - seconds) * 1000000000L;
592
+
593
+ return storage;
594
+ }
595
+
596
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
597
+ }
598
+
599
+ static
600
+ int timeout_nonblocking(struct timespec * timespec) {
601
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
602
+ }
603
+ #else
572
604
  static
573
605
  int make_timeout(VALUE duration) {
574
606
  if (duration == Qnil) {
@@ -588,20 +620,35 @@ int make_timeout(VALUE duration) {
588
620
  rb_raise(rb_eRuntimeError, "unable to convert timeout");
589
621
  }
590
622
 
623
+ static
624
+ int timeout_nonblocking(int timeout) {
625
+ return timeout == 0;
626
+ }
627
+ #endif
628
+
591
629
  struct select_arguments {
592
630
  struct IO_Event_Selector_EPoll *data;
593
631
 
594
632
  int count;
595
633
  struct epoll_event events[EPOLL_MAX_EVENTS];
596
-
634
+
635
+ #if defined(HAVE_EPOLL_PWAIT2)
636
+ struct timespec * timeout;
637
+ struct timespec storage;
638
+ #else
597
639
  int timeout;
640
+ #endif
598
641
  };
599
642
 
600
643
  static
601
644
  void * select_internal(void *_arguments) {
602
645
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
603
646
 
647
+ #if defined(HAVE_EPOLL_PWAIT2)
648
+ arguments->count = epoll_pwait2(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout, NULL);
649
+ #else
604
650
  arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
651
+ #endif
605
652
 
606
653
  return NULL;
607
654
  }
@@ -642,9 +689,20 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
642
689
 
643
690
  struct select_arguments arguments = {
644
691
  .data = data,
692
+ #if defined(HAVE_EPOLL_PWAIT2)
693
+ .storage = {
694
+ .tv_sec = 0,
695
+ .tv_nsec = 0
696
+ }
697
+ #else
645
698
  .timeout = 0
699
+ #endif
646
700
  };
647
701
 
702
+ #if defined(HAVE_EPOLL_PWAIT2)
703
+ arguments.timeout = &arguments.storage;
704
+ #endif
705
+
648
706
  // Process any currently pending events:
649
707
  select_internal_with_gvl(&arguments);
650
708
 
@@ -654,9 +712,13 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
654
712
  // 3. There are no items in the ready list,
655
713
  // then we can perform a blocking select.
656
714
  if (!ready && !arguments.count && !data->backend.ready) {
715
+ #if defined(HAVE_EPOLL_PWAIT2)
716
+ arguments.timeout = make_timeout(duration, &arguments.storage);
717
+ #else
657
718
  arguments.timeout = make_timeout(duration);
719
+ #endif
658
720
 
659
- if (arguments.timeout != 0) {
721
+ if (!timeout_nonblocking(arguments.timeout)) {
660
722
  // Wait for events to occur
661
723
  select_internal_without_gvl(&arguments);
662
724
  }
@@ -43,7 +43,7 @@ enum IO_Event {
43
43
  IO_EVENT_HANGUP = 16
44
44
  };
45
45
 
46
- void Init_IO_Event_Selector();
46
+ void Init_IO_Event_Selector(VALUE IO_Event_Selector);
47
47
 
48
48
  static inline int IO_Event_try_again(int error) {
49
49
  return error == EAGAIN || error == EWOULDBLOCK;
@@ -39,6 +39,8 @@ static VALUE IO_Event_Selector_URing = Qnil;
39
39
 
40
40
  enum {URING_ENTRIES = 64};
41
41
 
42
+ #pragma mark - Data Type
43
+
42
44
  struct IO_Event_Selector_URing {
43
45
  struct IO_Event_Selector backend;
44
46
  struct io_uring ring;
@@ -98,6 +100,8 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
98
100
  return instance;
99
101
  }
100
102
 
103
+ #pragma mark - Methods
104
+
101
105
  VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
102
106
  struct IO_Event_Selector_URing *data = NULL;
103
107
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
@@ -179,6 +183,8 @@ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
179
183
  return data->backend.ready ? Qtrue : Qfalse;
180
184
  }
181
185
 
186
+ #pragma mark - Submission Queue
187
+
182
188
  // Flush the submission queue if pending operations are present.
183
189
  static
184
190
  int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
@@ -204,6 +210,8 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
204
210
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
205
211
  static
206
212
  int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
213
+ if (DEBUG && data->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", data->pending);
214
+
207
215
  while (true) {
208
216
  int result = io_uring_submit(&data->ring);
209
217
 
@@ -220,9 +228,12 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
220
228
  }
221
229
  }
222
230
 
231
+ // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
223
232
  static
224
233
  void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
225
234
  data->pending += 1;
235
+
236
+ if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &data->ring, data->pending);
226
237
  }
227
238
 
228
239
  struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
@@ -238,6 +249,8 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
238
249
  return sqe;
239
250
  }
240
251
 
252
+ #pragma mark - Process.wait
253
+
241
254
  struct process_wait_arguments {
242
255
  struct IO_Event_Selector_URing *data;
243
256
  pid_t pid;
@@ -286,6 +299,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, V
286
299
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
287
300
  }
288
301
 
302
+ #pragma mark - IO#wait
303
+
289
304
  static inline
290
305
  short poll_flags_from_events(int events) {
291
306
  short flags = 0;
@@ -381,6 +396,8 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
381
396
 
382
397
  #ifdef HAVE_RUBY_IO_BUFFER_H
383
398
 
399
+ #pragma mark - IO#read
400
+
384
401
  #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)
385
402
  static inline off_t io_seekable(int descriptor) {
386
403
  return -1;
@@ -397,19 +414,67 @@ static inline off_t io_seekable(int descriptor)
397
414
  }
398
415
  #endif
399
416
 
400
- static int io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
401
- struct io_uring_sqe *sqe = io_get_sqe(data);
417
+ #pragma mark - IO#read
402
418
 
403
- if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
419
+ struct io_read_arguments {
420
+ struct IO_Event_Selector_URing *data;
421
+ VALUE fiber;
422
+ int descriptor;
423
+ char *buffer;
424
+ size_t length;
425
+ };
404
426
 
405
- io_uring_prep_read(sqe, descriptor, buffer, length, io_seekable(descriptor));
406
- io_uring_sqe_set_data(sqe, (void*)fiber);
427
+ static VALUE
428
+ io_read_submit(VALUE _arguments)
429
+ {
430
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
431
+ struct IO_Event_Selector_URing *data = arguments->data;
432
+ struct io_uring_sqe *sqe = io_get_sqe(data);
433
+
434
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
435
+
436
+ io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
437
+ io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
407
438
  io_uring_submit_now(data);
408
439
 
409
- VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
410
- if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
440
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
441
+ }
411
442
 
412
- return RB_NUM2INT(result);
443
+ static VALUE
444
+ io_read_cancel(VALUE _arguments, VALUE exception)
445
+ {
446
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
447
+ struct IO_Event_Selector_URing *data = arguments->data;
448
+
449
+ struct io_uring_sqe *sqe = io_get_sqe(data);
450
+
451
+ if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->fiber);
452
+
453
+ io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
454
+ io_uring_sqe_set_data(sqe, NULL);
455
+ io_uring_submit_now(data);
456
+
457
+ rb_exc_raise(exception);
458
+ }
459
+
460
+ static int
461
+ io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
462
+ {
463
+ struct io_read_arguments io_read_arguments = {
464
+ .data = data,
465
+ .fiber = fiber,
466
+ .descriptor = descriptor,
467
+ .buffer = buffer,
468
+ .length = length
469
+ };
470
+
471
+ int result = RB_NUM2INT(
472
+ rb_rescue(io_read_submit, (VALUE)&io_read_arguments, io_read_cancel, (VALUE)&io_read_arguments)
473
+ );
474
+
475
+ if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", result);
476
+
477
+ return result;
413
478
  }
414
479
 
415
480
  VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
@@ -460,19 +525,67 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
460
525
  return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
461
526
  }
462
527
 
463
- static
464
- int io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
528
+ #pragma mark - IO#write
529
+
530
+ struct io_write_arguments {
531
+ struct IO_Event_Selector_URing *data;
532
+ VALUE fiber;
533
+ int descriptor;
534
+ char *buffer;
535
+ size_t length;
536
+ };
537
+
538
+ static VALUE
539
+ io_write_submit(VALUE _argument)
540
+ {
541
+ struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
542
+ struct IO_Event_Selector_URing *data = arguments->data;
543
+
465
544
  struct io_uring_sqe *sqe = io_get_sqe(data);
466
545
 
467
- if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
468
-
469
- io_uring_prep_write(sqe, descriptor, buffer, length, io_seekable(descriptor));
470
- io_uring_sqe_set_data(sqe, (void*)fiber);
546
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
547
+
548
+ io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
549
+ io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
471
550
  io_uring_submit_pending(data);
472
551
 
473
- int result = RB_NUM2INT(IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
474
- if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
552
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
553
+ }
554
+
555
+ static VALUE
556
+ io_write_cancel(VALUE _argument, VALUE exception)
557
+ {
558
+ struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
559
+ struct IO_Event_Selector_URing *data = arguments->data;
560
+
561
+ struct io_uring_sqe *sqe = io_get_sqe(data);
562
+
563
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->fiber);
564
+
565
+ io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
566
+ io_uring_sqe_set_data(sqe, NULL);
567
+ io_uring_submit_now(data);
568
+
569
+ rb_exc_raise(exception);
570
+ }
475
571
 
572
+ static int
573
+ io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
574
+ {
575
+ struct io_write_arguments arguments = {
576
+ .data = data,
577
+ .fiber = fiber,
578
+ .descriptor = descriptor,
579
+ .buffer = buffer,
580
+ .length = length,
581
+ };
582
+
583
+ int result = RB_NUM2INT(
584
+ rb_rescue(io_write_submit, (VALUE)&arguments, io_write_cancel, (VALUE)&arguments)
585
+ );
586
+
587
+ if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
588
+
476
589
  return result;
477
590
  }
478
591
 
@@ -528,6 +641,8 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
528
641
 
529
642
  #endif
530
643
 
644
+ #pragma mark - IO#close
645
+
531
646
  static const int ASYNC_CLOSE = 1;
532
647
 
533
648
  VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
@@ -550,6 +665,8 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
550
665
  return Qtrue;
551
666
  }
552
667
 
668
+ #pragma mark - Event Loop
669
+
553
670
  static
554
671
  struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
555
672
  if (duration == Qnil) {
@@ -649,7 +766,7 @@ unsigned select_process_completions(struct io_uring *ring) {
649
766
 
650
767
  // io_uring_cq_advance(ring, completed);
651
768
 
652
- if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
769
+ if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
653
770
 
654
771
  return completed;
655
772
  }
@@ -720,6 +837,8 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
720
837
  return Qfalse;
721
838
  }
722
839
 
840
+ #pragma mark - Native Methods
841
+
723
842
  void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
724
843
  IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
725
844
  rb_gc_register_mark_object(IO_Event_Selector_URing);
@@ -236,6 +236,10 @@ module IO::Event
236
236
  end
237
237
 
238
238
  return total
239
+ rescue IOError => error
240
+ return -Errno::EBADF::Errno
241
+ rescue SystemCallError => error
242
+ return -error.errno
239
243
  end
240
244
 
241
245
  def io_write(fiber, _io, buffer, length, offset = 0)
@@ -268,6 +272,10 @@ module IO::Event
268
272
  end
269
273
 
270
274
  return total
275
+ rescue IOError => error
276
+ return -Errno::EBADF::Errno
277
+ rescue SystemCallError => error
278
+ return -error.errno
271
279
  end
272
280
 
273
281
  def blocking(&block)
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.1.6"
8
+ VERSION = "1.2.0"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.6
4
+ version: 1.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -41,7 +41,7 @@ cert_chain:
41
41
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
42
42
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
43
43
  -----END CERTIFICATE-----
44
- date: 2023-01-10 00:00:00.000000000 Z
44
+ date: 2023-04-30 00:00:00.000000000 Z
45
45
  dependencies:
46
46
  - !ruby/object:Gem::Dependency
47
47
  name: bake
@@ -150,7 +150,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
150
150
  - !ruby/object:Gem::Version
151
151
  version: '0'
152
152
  requirements: []
153
- rubygems_version: 3.4.1
153
+ rubygems_version: 3.4.10
154
154
  signing_key:
155
155
  specification_version: 4
156
156
  summary: An event loop.
metadata.gz.sig CHANGED
Binary file