io-event 1.3.0 → 1.3.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0743271de82b1b4324953fadd2f5da09cb79609c1153ae8637e4f23f31f0615c
4
- data.tar.gz: e90309751d3bc2716362d596b07a477a0f9b1c6ef27443f06469547bce10cb40
3
+ metadata.gz: 9e419e583855ca1efed750b1ae61bc6fb7b4524cf30cec819a7461eecdb78d45
4
+ data.tar.gz: 7c64477599375b9612004acb9e725e7af260a51d404b071e209e866d734a083a
5
5
  SHA512:
6
- metadata.gz: f1fe9c27d9ed25969eaca758565f99346dea63f397e32e8bec027312b0936b61b6677a520cbacb4f0c3e976979ff3ee4cfac93ea766d92cd8b88fa19cab1ad7c
7
- data.tar.gz: 4c476754268f03564134fc461bdeee26c0989fc6673cd27235ccc6d1a6df4d03b680f677469db531e8b1613622bd356ffa762d35b2b39ab455d522c6d624d45e
6
+ metadata.gz: c9b63cafbb56b9eac0f01edd180a70a38de6465ff4624b07360df135d0eeeab984c5348b0981d9c41ce943248af2a554528914e0cb9ae4606737d4ece2b96a8b
7
+ data.tar.gz: 1fecec388e9a5c517c83dc233a107cd86121a7e88f3cf5275e0ac00dd7a43aa826ae616b815b556c73ec13e5fc108f9c7cbe1bca1215fb0eb8bfed9daadb048b
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -3,26 +3,7 @@
3
3
 
4
4
  # Released under the MIT License.
5
5
  # Copyright, 2021-2023, by Samuel Williams.
6
-
7
- # Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
8
- #
9
- # Permission is hereby granted, free of charge, to any person obtaining a copy
10
- # of this software and associated documentation files (the "Software"), to deal
11
- # in the Software without restriction, including without limitation the rights
12
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
- # copies of the Software, and to permit persons to whom the Software is
14
- # furnished to do so, subject to the following conditions:
15
- #
16
- # The above copyright notice and this permission notice shall be included in
17
- # all copies or substantial portions of the Software.
18
- #
19
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25
- # THE SOFTWARE.
6
+ # Copyright, 2023, by Math Ieu.
26
7
 
27
8
  return if RUBY_DESCRIPTION =~ /jruby/
28
9
 
@@ -34,6 +34,7 @@
34
34
 
35
35
  enum {
36
36
  DEBUG = 0,
37
+ DEBUG_COMPLETION = 0,
37
38
  };
38
39
 
39
40
  static VALUE IO_Event_Selector_URing = Qnil;
@@ -165,6 +166,8 @@ struct IO_Event_Selector_URing_Completion * IO_Event_Selector_URing_Completion_a
165
166
  IO_Event_List_clear(&completion->list);
166
167
  }
167
168
 
169
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_acquire(%p, limit=%ld)\n", (void*)completion, selector->completions.limit);
170
+
168
171
  waiting->completion = completion;
169
172
  completion->waiting = waiting;
170
173
 
@@ -174,6 +177,8 @@ struct IO_Event_Selector_URing_Completion * IO_Event_Selector_URing_Completion_a
174
177
  inline static
175
178
  void IO_Event_Selector_URing_Completion_cancel(struct IO_Event_Selector_URing_Completion *completion)
176
179
  {
180
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_cancel(%p)\n", (void*)completion);
181
+
177
182
  if (completion->waiting) {
178
183
  completion->waiting->completion = NULL;
179
184
  completion->waiting = NULL;
@@ -183,13 +188,17 @@ void IO_Event_Selector_URing_Completion_cancel(struct IO_Event_Selector_URing_Co
183
188
  inline static
184
189
  void IO_Event_Selector_URing_Completion_release(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Completion *completion)
185
190
  {
191
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_release(%p)\n", (void*)completion);
192
+
186
193
  IO_Event_Selector_URing_Completion_cancel(completion);
187
194
  IO_Event_List_prepend(&selector->free_list, &completion->list);
188
195
  }
189
196
 
190
197
  inline static
191
- void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Waiting *waiting)
198
+ void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing_Waiting *waiting)
192
199
  {
200
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Waiting_cancel(%p, %p)\n", (void*)waiting, (void*)waiting->completion);
201
+
193
202
  if (waiting->completion) {
194
203
  waiting->completion->waiting = NULL;
195
204
  waiting->completion = NULL;
@@ -198,10 +207,13 @@ void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing *sele
198
207
  waiting->fiber = 0;
199
208
  }
200
209
 
210
+ struct IO_Event_List_Type IO_Event_Selector_URing_Completion_Type = {};
211
+
201
212
  void IO_Event_Selector_URing_Completion_initialize(void *element)
202
213
  {
203
214
  struct IO_Event_Selector_URing_Completion *completion = element;
204
215
  IO_Event_List_initialize(&completion->list);
216
+ completion->list.type = &IO_Event_Selector_URing_Completion_Type;
205
217
  }
206
218
 
207
219
  void IO_Event_Selector_URing_Completion_free(void *element)
@@ -314,6 +326,32 @@ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
314
326
 
315
327
  #pragma mark - Submission Queue
316
328
 
329
+ static
330
+ void IO_Event_Selector_URing_dump_completion_queue(struct IO_Event_Selector_URing *selector)
331
+ {
332
+ struct io_uring *ring = &selector->ring;
333
+ unsigned head;
334
+ struct io_uring_cqe *cqe;
335
+
336
+ if (DEBUG) {
337
+ int first = 1;
338
+ io_uring_for_each_cqe(ring, head, cqe) {
339
+ if (!first) {
340
+ fprintf(stderr, ", ");
341
+ }
342
+ else {
343
+ fprintf(stderr, "CQ: [");
344
+ first = 0;
345
+ }
346
+
347
+ fprintf(stderr, "%d:%p", (int)cqe->res, (void*)cqe->user_data);
348
+ }
349
+ if (!first) {
350
+ fprintf(stderr, "]\n");
351
+ }
352
+ }
353
+ }
354
+
317
355
  // Flush the submission queue if pending operations are present.
318
356
  static
319
357
  int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
@@ -333,19 +371,24 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
333
371
  return result;
334
372
  }
335
373
 
374
+ if (DEBUG) {
375
+ IO_Event_Selector_URing_dump_completion_queue(selector);
376
+ }
377
+
336
378
  return 0;
337
379
  }
338
380
 
339
381
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
340
382
  static
341
383
  int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
342
- if (DEBUG && selector->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
343
-
384
+ if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
385
+
344
386
  while (true) {
345
387
  int result = io_uring_submit(&selector->ring);
346
388
 
347
389
  if (result >= 0) {
348
390
  selector->pending = 0;
391
+ if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
349
392
  return result;
350
393
  }
351
394
 
@@ -407,7 +450,7 @@ VALUE process_wait_ensure(VALUE _arguments) {
407
450
 
408
451
  close(arguments->descriptor);
409
452
 
410
- IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
453
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
411
454
 
412
455
  return Qnil;
413
456
  }
@@ -437,9 +480,8 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
437
480
  .descriptor = descriptor,
438
481
  };
439
482
 
440
- struct io_uring_sqe *sqe = io_get_sqe(selector);
441
-
442
483
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
484
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
443
485
  io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
444
486
  io_uring_sqe_set_data(sqe, completion);
445
487
  io_uring_submit_pending(selector);
@@ -485,12 +527,16 @@ static
485
527
  VALUE io_wait_ensure(VALUE _arguments) {
486
528
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
487
529
 
488
- // We may want to consider cancellation. Be aware that the order of operations is important here:
489
- // io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
490
- // io_uring_sqe_set_data(sqe, NULL);
491
- // io_uring_submit_now(selector);
530
+ // If the operation is still in progress, cancel it:
531
+ if (arguments->waiting->completion) {
532
+ if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
533
+ struct io_uring_sqe *sqe = io_get_sqe(arguments->selector);
534
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
535
+ io_uring_sqe_set_data(sqe, NULL);
536
+ io_uring_submit_now(arguments->selector);
537
+ }
492
538
 
493
- IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
539
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
494
540
 
495
541
  return Qnil;
496
542
  };
@@ -502,6 +548,8 @@ VALUE io_wait_transfer(VALUE _arguments) {
502
548
 
503
549
  IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
504
550
 
551
+ if (DEBUG) fprintf(stderr, "io_wait_transfer:waiting=%p, result=%d\n", (void*)arguments->waiting, arguments->waiting->result);
552
+
505
553
  if (arguments->waiting->result) {
506
554
  // We explicitly filter the resulting events based on the requested events.
507
555
  // In some cases, poll will report events we didn't ask for.
@@ -516,22 +564,20 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
516
564
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
517
565
 
518
566
  int descriptor = IO_Event_Selector_io_descriptor(io);
519
- struct io_uring_sqe *sqe = io_get_sqe(selector);
520
567
 
521
568
  short flags = poll_flags_from_events(NUM2INT(events));
522
569
 
523
570
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
524
571
 
525
- io_uring_prep_poll_add(sqe, descriptor, flags);
526
-
527
572
  struct IO_Event_Selector_URing_Waiting waiting = {
528
573
  .fiber = fiber,
529
574
  };
530
575
 
531
576
  struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
532
577
 
578
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
579
+ io_uring_prep_poll_add(sqe, descriptor, flags);
533
580
  io_uring_sqe_set_data(sqe, completion);
534
-
535
581
  // If we are going to wait, we assume that we are waiting for a while:
536
582
  io_uring_submit_pending(selector);
537
583
 
@@ -579,10 +625,10 @@ io_read_submit(VALUE _arguments)
579
625
  {
580
626
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
581
627
  struct IO_Event_Selector_URing *selector = arguments->selector;
582
- struct io_uring_sqe *sqe = io_get_sqe(selector);
583
628
 
584
- if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, arguments->descriptor, arguments->buffer, arguments->length);
629
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
585
630
 
631
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
586
632
  io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
587
633
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
588
634
  io_uring_submit_now(selector);
@@ -598,18 +644,16 @@ io_read_ensure(VALUE _arguments)
598
644
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
599
645
  struct IO_Event_Selector_URing *selector = arguments->selector;
600
646
 
601
- struct io_uring_sqe *sqe = io_get_sqe(selector);
602
-
603
- if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->waiting);
604
-
605
- // If the operation has already completed, we don't need to cancel it:
606
- if (!arguments->waiting->result) {
607
- io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
647
+ // If the operation is still in progress, cancel it:
648
+ if (arguments->waiting->completion) {
649
+ if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
650
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
651
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
608
652
  io_uring_sqe_set_data(sqe, NULL);
609
653
  io_uring_submit_now(selector);
610
654
  }
611
655
 
612
- IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
656
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
613
657
 
614
658
  return Qnil;
615
659
  }
@@ -702,10 +746,9 @@ io_write_submit(VALUE _argument)
702
746
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
703
747
  struct IO_Event_Selector_URing *selector = arguments->selector;
704
748
 
705
- struct io_uring_sqe *sqe = io_get_sqe(selector);
706
-
707
- if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, arguments->descriptor, arguments->buffer, arguments->length);
749
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
708
750
 
751
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
709
752
  io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
710
753
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
711
754
  io_uring_submit_pending(selector);
@@ -721,17 +764,16 @@ io_write_ensure(VALUE _argument)
721
764
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
722
765
  struct IO_Event_Selector_URing *selector = arguments->selector;
723
766
 
724
- struct io_uring_sqe *sqe = io_get_sqe(selector);
725
-
726
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->waiting);
727
-
728
- if (!arguments->waiting->result) {
729
- io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
767
+ // If the operation is still in progress, cancel it:
768
+ if (arguments->waiting->completion) {
769
+ if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
770
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
771
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
730
772
  io_uring_sqe_set_data(sqe, NULL);
731
773
  io_uring_submit_now(selector);
732
774
  }
733
775
 
734
- IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
776
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
735
777
 
736
778
  return Qnil;
737
779
  }
@@ -826,7 +868,6 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
826
868
 
827
869
  if (ASYNC_CLOSE) {
828
870
  struct io_uring_sqe *sqe = io_get_sqe(selector);
829
-
830
871
  io_uring_prep_close(sqe, descriptor);
831
872
  io_uring_sqe_set_data(sqe, NULL);
832
873
  io_uring_submit_now(selector);
@@ -919,32 +960,41 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
919
960
  unsigned head;
920
961
  struct io_uring_cqe *cqe;
921
962
 
963
+ if (DEBUG) {
964
+ fprintf(stderr, "select_process_completions: selector=%p\n", (void*)selector);
965
+ IO_Event_Selector_URing_dump_completion_queue(selector);
966
+ }
967
+
922
968
  io_uring_for_each_cqe(ring, head, cqe) {
969
+ if (DEBUG) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
970
+
923
971
  ++completed;
924
972
 
925
- // If the operation was cancelled, or the operation has no user data (fiber):
973
+ // If the operation was cancelled, or the operation has no user data:
926
974
  if (cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
927
975
  io_uring_cq_advance(ring, 1);
928
976
  continue;
929
977
  }
930
978
 
931
- if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
932
-
933
979
  struct IO_Event_Selector_URing_Completion *completion = (void*)cqe->user_data;
934
980
  struct IO_Event_Selector_URing_Waiting *waiting = completion->waiting;
935
981
 
982
+ if (DEBUG) fprintf(stderr, "select_process_completions: completion=%p waiting=%p\n", (void*)completion, (void*)waiting);
983
+
936
984
  if (waiting) {
937
985
  waiting->result = cqe->res;
938
986
  waiting->flags = cqe->flags;
939
987
  }
940
988
 
941
989
  io_uring_cq_advance(ring, 1);
990
+ // This marks the waiting operation as "complete":
991
+ IO_Event_Selector_URing_Completion_release(selector, completion);
942
992
 
943
993
  if (waiting && waiting->fiber) {
994
+ assert(waiting->result != -ECANCELED);
995
+
944
996
  IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
945
997
  }
946
-
947
- IO_Event_Selector_URing_Completion_release(selector, completion);
948
998
  }
949
999
 
950
1000
  if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
@@ -160,7 +160,66 @@ module IO::Event
160
160
  errno == EAGAIN or errno == EWOULDBLOCK
161
161
  end
162
162
 
163
- if Support.fiber_scheduler_v2?
163
+ if Support.fiber_scheduler_v3?
164
+ # Ruby 3.3+, full IO::Buffer support.
165
+
166
+ # @parameter length [Integer] The minimum number of bytes to read.
167
+ # @parameter offset [Integer] The offset into the buffer to read to.
168
+ def io_read(fiber, io, buffer, length, offset = 0)
169
+ total = 0
170
+
171
+ Selector.nonblock(io) do
172
+ while true
173
+ result = Fiber.blocking{buffer.read(io, 0, offset)}
174
+
175
+ if result < 0
176
+ if again?(result)
177
+ self.io_wait(fiber, io, IO::READABLE)
178
+ else
179
+ return result
180
+ end
181
+ elsif result == 0
182
+ break
183
+ else
184
+ total += result
185
+ break if total >= length
186
+ offset += result
187
+ end
188
+ end
189
+ end
190
+
191
+ return total
192
+ end
193
+
194
+ # @parameter length [Integer] The minimum number of bytes to write.
195
+ # @parameter offset [Integer] The offset into the buffer to write from.
196
+ def io_write(fiber, io, buffer, length, offset = 0)
197
+ total = 0
198
+
199
+ Selector.nonblock(io) do
200
+ while true
201
+ result = Fiber.blocking{buffer.write(io, 0, offset)}
202
+
203
+ if result < 0
204
+ if again?(result)
205
+ self.io_wait(fiber, io, IO::READABLE)
206
+ else
207
+ return result
208
+ end
209
+ elsif result == 0
210
+ break result
211
+ else
212
+ total += result
213
+ break if total >= length
214
+ offset += result
215
+ end
216
+ end
217
+ end
218
+
219
+ return total
220
+ end
221
+ elsif Support.fiber_scheduler_v2?
222
+ # Ruby 3.2, most IO::Buffer support, but slightly clunky read/write methods.
164
223
  def io_read(fiber, io, buffer, length, offset = 0)
165
224
  total = 0
166
225
 
@@ -219,6 +278,7 @@ module IO::Event
219
278
  return total
220
279
  end
221
280
  elsif Support.fiber_scheduler_v1?
281
+ # Ruby <= 3.1, limited IO::Buffer support.
222
282
  def io_read(fiber, _io, buffer, length, offset = 0)
223
283
  io = IO.for_fd(_io.fileno, autoclose: false)
224
284
  total = 0
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  # Released under the MIT License.
4
- # Copyright, 2021-2022, by Samuel Williams.
4
+ # Copyright, 2021-2023, by Samuel Williams.
5
5
 
6
6
  require_relative 'selector/select'
7
7
  require_relative 'debug/selector'
@@ -17,6 +17,17 @@ class IO
17
17
  def self.fiber_scheduler_v2?
18
18
  IO.const_defined?(:Buffer) and Fiber.respond_to?(:blocking) and IO::Buffer.instance_method(:read).arity == -1
19
19
  end
20
+
21
+ def self.fiber_scheduler_v3?
22
+ if fiber_scheduler_v2?
23
+ begin
24
+ IO::Buffer.new.slice(0, 0).write(STDOUT)
25
+ return true
26
+ rescue
27
+ return false
28
+ end
29
+ end
30
+ end
20
31
  end
21
32
  end
22
33
  end
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.3.0"
8
+ VERSION = "1.3.2"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,12 +1,12 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.0
4
+ version: 1.3.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
8
- - Bruno Sutic
9
8
  - Math Ieu
9
+ - Bruno Sutic
10
10
  - Alex Matchneer
11
11
  - Benoit Daloze
12
12
  - Delton Ding
@@ -42,7 +42,7 @@ cert_chain:
42
42
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
43
43
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
44
44
  -----END CERTIFICATE-----
45
- date: 2023-08-23 00:00:00.000000000 Z
45
+ date: 2023-08-24 00:00:00.000000000 Z
46
46
  dependencies: []
47
47
  description:
48
48
  email:
@@ -97,7 +97,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
97
97
  - !ruby/object:Gem::Version
98
98
  version: '0'
99
99
  requirements: []
100
- rubygems_version: 3.4.10
100
+ rubygems_version: 3.5.0.dev
101
101
  signing_key:
102
102
  specification_version: 4
103
103
  summary: An event loop.
metadata.gz.sig CHANGED
Binary file