io-event 1.1.5 → 1.1.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 26fea80dce46f28ba71af255facb8ef1daca055ba8da63c6bc585eef3baad7b7
4
- data.tar.gz: 5b0eeb129fc7203e0ffb9dedd5c0638ba2c05c025c47496b79491e13fc160095
3
+ metadata.gz: 0202b38104b1b6254a15fc12f687f9102509ba75592b1584d3dd5d0e44c497fb
4
+ data.tar.gz: 925de8b198ea2a9fca99b7c02c5262dd691a9864601c7db109a79c332804b944
5
5
  SHA512:
6
- metadata.gz: f78ca39439f5c89185f96432907f980944d778c2ceffc1f51355e810caac1b844fbb8e8e83b14a3df25d9e8205a40b5b1673052ba9eb2924a9f00639cdfbee54
7
- data.tar.gz: 1418caaf2eb40020e1b7a984d9d77b5c63a795ede985eab0854d24ad35004304ed0fc6ea5eebf5f77ec37d13c5edff490ad6111b98d8899b2ded50cdca043cc3
6
+ metadata.gz: d0bb87512e17c8be8ca0b04390466b0c3de198c6ee4a4a0a074b31d8e2f3bb0f482ece3378d0d9653b9219d36f06d41882f4c78631b27f6c0fb02e4143688d33
7
+ data.tar.gz: 4403550628403eccfb92d60e44ccf096ecb2dee1f2e78917fbef65b8c49cc71b15db96b3074e5b16f0ce005f826db928e4a95fd1214973cf33509659195a2e9a
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -33,7 +33,7 @@ extension_name = 'IO_Event'
33
33
 
34
34
  # dir_config(extension_name)
35
35
 
36
- $CFLAGS << " -Wall -std=c99"
36
+ $CFLAGS << " -Wall -Wno-unknown-pragmas -std=c99"
37
37
 
38
38
  $srcs = ["io/event/event.c", "io/event/selector/selector.c"]
39
39
  $VPATH << "$(srcdir)/io/event"
@@ -39,6 +39,8 @@ static VALUE IO_Event_Selector_URing = Qnil;
39
39
 
40
40
  enum {URING_ENTRIES = 64};
41
41
 
42
+ #pragma mark - Data Type
43
+
42
44
  struct IO_Event_Selector_URing {
43
45
  struct IO_Event_Selector backend;
44
46
  struct io_uring ring;
@@ -98,6 +100,8 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
98
100
  return instance;
99
101
  }
100
102
 
103
+ #pragma mark - Methods
104
+
101
105
  VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
102
106
  struct IO_Event_Selector_URing *data = NULL;
103
107
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
@@ -179,6 +183,9 @@ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
179
183
  return data->backend.ready ? Qtrue : Qfalse;
180
184
  }
181
185
 
186
+ #pragma mark - Submission Queue
187
+
188
+ // Flush the submission queue if pending operations are present.
182
189
  static
183
190
  int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
184
191
  if (data->pending) {
@@ -200,8 +207,11 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
200
207
  return 0;
201
208
  }
202
209
 
210
+ // Immediately flush the submission queue, yielding to the event loop if it was not successful.
203
211
  static
204
212
  int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
213
+ if (DEBUG && data->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", data->pending);
214
+
205
215
  while (true) {
206
216
  int result = io_uring_submit(&data->ring);
207
217
 
@@ -218,9 +228,12 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
218
228
  }
219
229
  }
220
230
 
231
+ // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
221
232
  static
222
233
  void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
223
234
  data->pending += 1;
235
+
236
+ if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &data->ring, data->pending);
224
237
  }
225
238
 
226
239
  struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
@@ -236,6 +249,8 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
236
249
  return sqe;
237
250
  }
238
251
 
252
+ #pragma mark - Process.wait
253
+
239
254
  struct process_wait_arguments {
240
255
  struct IO_Event_Selector_URing *data;
241
256
  pid_t pid;
@@ -284,6 +299,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, V
284
299
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
285
300
  }
286
301
 
302
+ #pragma mark - IO#wait
303
+
287
304
  static inline
288
305
  short poll_flags_from_events(int events) {
289
306
  short flags = 0;
@@ -379,6 +396,8 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
379
396
 
380
397
  #ifdef HAVE_RUBY_IO_BUFFER_H
381
398
 
399
+ #pragma mark - IO#read
400
+
382
401
  #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)
383
402
  static inline off_t io_seekable(int descriptor) {
384
403
  return -1;
@@ -395,19 +414,67 @@ static inline off_t io_seekable(int descriptor)
395
414
  }
396
415
  #endif
397
416
 
398
- static int io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
399
- struct io_uring_sqe *sqe = io_get_sqe(data);
417
+ #pragma mark - IO#read
400
418
 
401
- if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
419
+ struct io_read_arguments {
420
+ struct IO_Event_Selector_URing *data;
421
+ VALUE fiber;
422
+ int descriptor;
423
+ char *buffer;
424
+ size_t length;
425
+ };
402
426
 
403
- io_uring_prep_read(sqe, descriptor, buffer, length, io_seekable(descriptor));
404
- io_uring_sqe_set_data(sqe, (void*)fiber);
427
+ static VALUE
428
+ io_read_submit(VALUE _arguments)
429
+ {
430
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
431
+ struct IO_Event_Selector_URing *data = arguments->data;
432
+ struct io_uring_sqe *sqe = io_get_sqe(data);
433
+
434
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
435
+
436
+ io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
437
+ io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
405
438
  io_uring_submit_now(data);
406
439
 
407
- VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
408
- if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
440
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
441
+ }
442
+
443
+ static VALUE
444
+ io_read_cancel(VALUE _arguments, VALUE exception)
445
+ {
446
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
447
+ struct IO_Event_Selector_URing *data = arguments->data;
448
+
449
+ struct io_uring_sqe *sqe = io_get_sqe(data);
450
+
451
+ if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->fiber);
452
+
453
+ io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
454
+ io_uring_sqe_set_data(sqe, NULL);
455
+ io_uring_submit_now(data);
456
+
457
+ rb_exc_raise(exception);
458
+ }
409
459
 
410
- return RB_NUM2INT(result);
460
+ static int
461
+ io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
462
+ {
463
+ struct io_read_arguments io_read_arguments = {
464
+ .data = data,
465
+ .fiber = fiber,
466
+ .descriptor = descriptor,
467
+ .buffer = buffer,
468
+ .length = length
469
+ };
470
+
471
+ int result = RB_NUM2INT(
472
+ rb_rescue(io_read_submit, (VALUE)&io_read_arguments, io_read_cancel, (VALUE)&io_read_arguments)
473
+ );
474
+
475
+ if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", result);
476
+
477
+ return result;
411
478
  }
412
479
 
413
480
  VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
@@ -458,19 +525,67 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
458
525
  return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
459
526
  }
460
527
 
461
- static
462
- int io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
528
+ #pragma mark - IO#write
529
+
530
+ struct io_write_arguments {
531
+ struct IO_Event_Selector_URing *data;
532
+ VALUE fiber;
533
+ int descriptor;
534
+ char *buffer;
535
+ size_t length;
536
+ };
537
+
538
+ static VALUE
539
+ io_write_submit(VALUE _argument)
540
+ {
541
+ struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
542
+ struct IO_Event_Selector_URing *data = arguments->data;
543
+
463
544
  struct io_uring_sqe *sqe = io_get_sqe(data);
464
545
 
465
- if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
466
-
467
- io_uring_prep_write(sqe, descriptor, buffer, length, io_seekable(descriptor));
468
- io_uring_sqe_set_data(sqe, (void*)fiber);
546
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
547
+
548
+ io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
549
+ io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
469
550
  io_uring_submit_pending(data);
470
551
 
471
- int result = RB_NUM2INT(IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
472
- if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
552
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
553
+ }
473
554
 
555
+ static VALUE
556
+ io_write_cancel(VALUE _argument, VALUE exception)
557
+ {
558
+ struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
559
+ struct IO_Event_Selector_URing *data = arguments->data;
560
+
561
+ struct io_uring_sqe *sqe = io_get_sqe(data);
562
+
563
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->fiber);
564
+
565
+ io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
566
+ io_uring_sqe_set_data(sqe, NULL);
567
+ io_uring_submit_now(data);
568
+
569
+ rb_exc_raise(exception);
570
+ }
571
+
572
+ static int
573
+ io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
574
+ {
575
+ struct io_write_arguments arguments = {
576
+ .data = data,
577
+ .fiber = fiber,
578
+ .descriptor = descriptor,
579
+ .buffer = buffer,
580
+ .length = length,
581
+ };
582
+
583
+ int result = RB_NUM2INT(
584
+ rb_rescue(io_write_submit, (VALUE)&arguments, io_write_cancel, (VALUE)&arguments)
585
+ );
586
+
587
+ if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
588
+
474
589
  return result;
475
590
  }
476
591
 
@@ -526,6 +641,8 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
526
641
 
527
642
  #endif
528
643
 
644
+ #pragma mark - IO#close
645
+
529
646
  static const int ASYNC_CLOSE = 1;
530
647
 
531
648
  VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
@@ -548,6 +665,8 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
548
665
  return Qtrue;
549
666
  }
550
667
 
668
+ #pragma mark - Event Loop
669
+
551
670
  static
552
671
  struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
553
672
  if (duration == Qnil) {
@@ -647,7 +766,7 @@ unsigned select_process_completions(struct io_uring *ring) {
647
766
 
648
767
  // io_uring_cq_advance(ring, completed);
649
768
 
650
- if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
769
+ if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
651
770
 
652
771
  return completed;
653
772
  }
@@ -680,9 +799,6 @@ VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
680
799
  if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
681
800
  // This is a blocking operation, we wait for events:
682
801
  result = select_internal_without_gvl(&arguments);
683
- } else {
684
- // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
685
- io_uring_submit_flush(data);
686
802
  }
687
803
 
688
804
  // After waiting/flushing the SQ, check if there are any completions:
@@ -721,6 +837,8 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
721
837
  return Qfalse;
722
838
  }
723
839
 
840
+ #pragma mark - Native Methods
841
+
724
842
  void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
725
843
  IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
726
844
  rb_gc_register_mark_object(IO_Event_Selector_URing);
@@ -9,6 +9,9 @@ module IO::Event
9
9
  module Selector
10
10
  def self.nonblock(io, &block)
11
11
  io.nonblock(&block)
12
+ rescue Errno::EBADF
13
+ # Windows.
14
+ yield
12
15
  end
13
16
  end
14
17
  end
@@ -140,14 +140,14 @@ module IO::Event
140
140
  end.value
141
141
  end
142
142
 
143
+ EAGAIN = -Errno::EAGAIN::Errno
144
+ EWOULDBLOCK = -Errno::EWOULDBLOCK::Errno
145
+
146
+ def again?(errno)
147
+ errno == EAGAIN or errno == EWOULDBLOCK
148
+ end
149
+
143
150
  if Support.fiber_scheduler_v2?
144
- EAGAIN = -Errno::EAGAIN::Errno
145
- EWOULDBLOCK = -Errno::EWOULDBLOCK::Errno
146
-
147
- def again?(errno)
148
- errno == EAGAIN or errno == EWOULDBLOCK
149
- end
150
-
151
151
  def io_read(fiber, io, buffer, length, offset = 0)
152
152
  total = 0
153
153
 
@@ -236,6 +236,10 @@ module IO::Event
236
236
  end
237
237
 
238
238
  return total
239
+ rescue IOError => error
240
+ return -Errno::EBADF::Errno
241
+ rescue SystemCallError => error
242
+ return -error.errno
239
243
  end
240
244
 
241
245
  def io_write(fiber, _io, buffer, length, offset = 0)
@@ -268,6 +272,10 @@ module IO::Event
268
272
  end
269
273
 
270
274
  return total
275
+ rescue IOError => error
276
+ return -Errno::EBADF::Errno
277
+ rescue SystemCallError => error
278
+ return -error.errno
271
279
  end
272
280
 
273
281
  def blocking(&block)
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.1.5"
8
+ VERSION = "1.1.7"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.5
4
+ version: 1.1.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -41,7 +41,7 @@ cert_chain:
41
41
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
42
42
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
43
43
  -----END CERTIFICATE-----
44
- date: 2023-01-06 00:00:00.000000000 Z
44
+ date: 2023-03-11 00:00:00.000000000 Z
45
45
  dependencies:
46
46
  - !ruby/object:Gem::Dependency
47
47
  name: bake
@@ -150,7 +150,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
150
150
  - !ruby/object:Gem::Version
151
151
  version: '0'
152
152
  requirements: []
153
- rubygems_version: 3.4.1
153
+ rubygems_version: 3.4.6
154
154
  signing_key:
155
155
  specification_version: 4
156
156
  summary: An event loop.
metadata.gz.sig CHANGED
Binary file