io-event 1.3.1 → 1.3.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/io/event/selector/uring.c +51 -41
- data/lib/io/event/selector/select.rb +61 -1
- data/lib/io/event/support.rb +11 -0
- data/lib/io/event/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +3 -3
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9e419e583855ca1efed750b1ae61bc6fb7b4524cf30cec819a7461eecdb78d45
|
4
|
+
data.tar.gz: 7c64477599375b9612004acb9e725e7af260a51d404b071e209e866d734a083a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c9b63cafbb56b9eac0f01edd180a70a38de6465ff4624b07360df135d0eeeab984c5348b0981d9c41ce943248af2a554528914e0cb9ae4606737d4ece2b96a8b
|
7
|
+
data.tar.gz: 1fecec388e9a5c517c83dc233a107cd86121a7e88f3cf5275e0ac00dd7a43aa826ae616b815b556c73ec13e5fc108f9c7cbe1bca1215fb0eb8bfed9daadb048b
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
@@ -326,6 +326,32 @@ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
|
|
326
326
|
|
327
327
|
#pragma mark - Submission Queue
|
328
328
|
|
329
|
+
static
|
330
|
+
void IO_Event_Selector_URing_dump_completion_queue(struct IO_Event_Selector_URing *selector)
|
331
|
+
{
|
332
|
+
struct io_uring *ring = &selector->ring;
|
333
|
+
unsigned head;
|
334
|
+
struct io_uring_cqe *cqe;
|
335
|
+
|
336
|
+
if (DEBUG) {
|
337
|
+
int first = 1;
|
338
|
+
io_uring_for_each_cqe(ring, head, cqe) {
|
339
|
+
if (!first) {
|
340
|
+
fprintf(stderr, ", ");
|
341
|
+
}
|
342
|
+
else {
|
343
|
+
fprintf(stderr, "CQ: [");
|
344
|
+
first = 0;
|
345
|
+
}
|
346
|
+
|
347
|
+
fprintf(stderr, "%d:%p", (int)cqe->res, (void*)cqe->user_data);
|
348
|
+
}
|
349
|
+
if (!first) {
|
350
|
+
fprintf(stderr, "]\n");
|
351
|
+
}
|
352
|
+
}
|
353
|
+
}
|
354
|
+
|
329
355
|
// Flush the submission queue if pending operations are present.
|
330
356
|
static
|
331
357
|
int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
|
@@ -345,19 +371,24 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
|
|
345
371
|
return result;
|
346
372
|
}
|
347
373
|
|
374
|
+
if (DEBUG) {
|
375
|
+
IO_Event_Selector_URing_dump_completion_queue(selector);
|
376
|
+
}
|
377
|
+
|
348
378
|
return 0;
|
349
379
|
}
|
350
380
|
|
351
381
|
// Immediately flush the submission queue, yielding to the event loop if it was not successful.
|
352
382
|
static
|
353
383
|
int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
|
354
|
-
if (DEBUG
|
355
|
-
|
384
|
+
if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
|
385
|
+
|
356
386
|
while (true) {
|
357
387
|
int result = io_uring_submit(&selector->ring);
|
358
388
|
|
359
389
|
if (result >= 0) {
|
360
390
|
selector->pending = 0;
|
391
|
+
if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
|
361
392
|
return result;
|
362
393
|
}
|
363
394
|
|
@@ -369,12 +400,6 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
|
|
369
400
|
}
|
370
401
|
}
|
371
402
|
|
372
|
-
static
|
373
|
-
void IO_Event_Selector_URing_submit_sqe(struct io_uring_sqe *sqe)
|
374
|
-
{
|
375
|
-
if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_submit_sqe(%p): user_data=%p opcode=%d\n", sqe, (void*)sqe->user_data, sqe->opcode);
|
376
|
-
}
|
377
|
-
|
378
403
|
// Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
|
379
404
|
static
|
380
405
|
void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
|
@@ -455,12 +480,10 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
|
|
455
480
|
.descriptor = descriptor,
|
456
481
|
};
|
457
482
|
|
458
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
459
|
-
|
460
483
|
if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
|
484
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
461
485
|
io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
|
462
486
|
io_uring_sqe_set_data(sqe, completion);
|
463
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
464
487
|
io_uring_submit_pending(selector);
|
465
488
|
|
466
489
|
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
|
@@ -504,14 +527,12 @@ static
|
|
504
527
|
VALUE io_wait_ensure(VALUE _arguments) {
|
505
528
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
506
529
|
|
507
|
-
if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
508
|
-
|
509
530
|
// If the operation is still in progress, cancel it:
|
510
531
|
if (arguments->waiting->completion) {
|
532
|
+
if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
511
533
|
struct io_uring_sqe *sqe = io_get_sqe(arguments->selector);
|
512
534
|
io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
|
513
535
|
io_uring_sqe_set_data(sqe, NULL);
|
514
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
515
536
|
io_uring_submit_now(arguments->selector);
|
516
537
|
}
|
517
538
|
|
@@ -543,23 +564,20 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
|
|
543
564
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
544
565
|
|
545
566
|
int descriptor = IO_Event_Selector_io_descriptor(io);
|
546
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
547
567
|
|
548
568
|
short flags = poll_flags_from_events(NUM2INT(events));
|
549
569
|
|
550
570
|
if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
|
551
571
|
|
552
|
-
io_uring_prep_poll_add(sqe, descriptor, flags);
|
553
|
-
|
554
572
|
struct IO_Event_Selector_URing_Waiting waiting = {
|
555
573
|
.fiber = fiber,
|
556
574
|
};
|
557
575
|
|
558
576
|
struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
|
559
577
|
|
578
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
579
|
+
io_uring_prep_poll_add(sqe, descriptor, flags);
|
560
580
|
io_uring_sqe_set_data(sqe, completion);
|
561
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
562
|
-
|
563
581
|
// If we are going to wait, we assume that we are waiting for a while:
|
564
582
|
io_uring_submit_pending(selector);
|
565
583
|
|
@@ -607,13 +625,12 @@ io_read_submit(VALUE _arguments)
|
|
607
625
|
{
|
608
626
|
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
609
627
|
struct IO_Event_Selector_URing *selector = arguments->selector;
|
610
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
611
628
|
|
612
629
|
if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
613
630
|
|
631
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
614
632
|
io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
|
615
633
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
616
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
617
634
|
io_uring_submit_now(selector);
|
618
635
|
|
619
636
|
IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
|
@@ -627,15 +644,12 @@ io_read_ensure(VALUE _arguments)
|
|
627
644
|
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
628
645
|
struct IO_Event_Selector_URing *selector = arguments->selector;
|
629
646
|
|
630
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
631
|
-
|
632
|
-
if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
633
|
-
|
634
647
|
// If the operation is still in progress, cancel it:
|
635
648
|
if (arguments->waiting->completion) {
|
649
|
+
if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
650
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
636
651
|
io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
|
637
652
|
io_uring_sqe_set_data(sqe, NULL);
|
638
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
639
653
|
io_uring_submit_now(selector);
|
640
654
|
}
|
641
655
|
|
@@ -732,13 +746,11 @@ io_write_submit(VALUE _argument)
|
|
732
746
|
struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
|
733
747
|
struct IO_Event_Selector_URing *selector = arguments->selector;
|
734
748
|
|
735
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
736
|
-
|
737
749
|
if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
|
738
750
|
|
751
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
739
752
|
io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
|
740
753
|
io_uring_sqe_set_data(sqe, arguments->waiting->completion);
|
741
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
742
754
|
io_uring_submit_pending(selector);
|
743
755
|
|
744
756
|
IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
|
@@ -752,15 +764,12 @@ io_write_ensure(VALUE _argument)
|
|
752
764
|
struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
|
753
765
|
struct IO_Event_Selector_URing *selector = arguments->selector;
|
754
766
|
|
755
|
-
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
756
|
-
|
757
|
-
if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
758
|
-
|
759
767
|
// If the operation is still in progress, cancel it:
|
760
768
|
if (arguments->waiting->completion) {
|
769
|
+
if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
|
770
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
761
771
|
io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
|
762
772
|
io_uring_sqe_set_data(sqe, NULL);
|
763
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
764
773
|
io_uring_submit_now(selector);
|
765
774
|
}
|
766
775
|
|
@@ -859,10 +868,8 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
|
|
859
868
|
|
860
869
|
if (ASYNC_CLOSE) {
|
861
870
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
862
|
-
|
863
871
|
io_uring_prep_close(sqe, descriptor);
|
864
872
|
io_uring_sqe_set_data(sqe, NULL);
|
865
|
-
IO_Event_Selector_URing_submit_sqe(sqe);
|
866
873
|
io_uring_submit_now(selector);
|
867
874
|
} else {
|
868
875
|
close(descriptor);
|
@@ -953,7 +960,10 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
|
|
953
960
|
unsigned head;
|
954
961
|
struct io_uring_cqe *cqe;
|
955
962
|
|
956
|
-
if (DEBUG)
|
963
|
+
if (DEBUG) {
|
964
|
+
fprintf(stderr, "select_process_completions: selector=%p\n", (void*)selector);
|
965
|
+
IO_Event_Selector_URing_dump_completion_queue(selector);
|
966
|
+
}
|
957
967
|
|
958
968
|
io_uring_for_each_cqe(ring, head, cqe) {
|
959
969
|
if (DEBUG) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
|
@@ -976,15 +986,15 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
|
|
976
986
|
waiting->flags = cqe->flags;
|
977
987
|
}
|
978
988
|
|
989
|
+
io_uring_cq_advance(ring, 1);
|
990
|
+
// This marks the waiting operation as "complete":
|
991
|
+
IO_Event_Selector_URing_Completion_release(selector, completion);
|
992
|
+
|
979
993
|
if (waiting && waiting->fiber) {
|
980
994
|
assert(waiting->result != -ECANCELED);
|
981
995
|
|
982
996
|
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
983
997
|
}
|
984
|
-
|
985
|
-
// This marks the waiting operation as "complete":
|
986
|
-
IO_Event_Selector_URing_Completion_release(selector, completion);
|
987
|
-
io_uring_cq_advance(ring, 1);
|
988
998
|
}
|
989
999
|
|
990
1000
|
if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
|
@@ -160,7 +160,66 @@ module IO::Event
|
|
160
160
|
errno == EAGAIN or errno == EWOULDBLOCK
|
161
161
|
end
|
162
162
|
|
163
|
-
if Support.
|
163
|
+
if Support.fiber_scheduler_v3?
|
164
|
+
# Ruby 3.3+, full IO::Buffer support.
|
165
|
+
|
166
|
+
# @parameter length [Integer] The minimum number of bytes to read.
|
167
|
+
# @parameter offset [Integer] The offset into the buffer to read to.
|
168
|
+
def io_read(fiber, io, buffer, length, offset = 0)
|
169
|
+
total = 0
|
170
|
+
|
171
|
+
Selector.nonblock(io) do
|
172
|
+
while true
|
173
|
+
result = Fiber.blocking{buffer.read(io, 0, offset)}
|
174
|
+
|
175
|
+
if result < 0
|
176
|
+
if again?(result)
|
177
|
+
self.io_wait(fiber, io, IO::READABLE)
|
178
|
+
else
|
179
|
+
return result
|
180
|
+
end
|
181
|
+
elsif result == 0
|
182
|
+
break
|
183
|
+
else
|
184
|
+
total += result
|
185
|
+
break if total >= length
|
186
|
+
offset += result
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
return total
|
192
|
+
end
|
193
|
+
|
194
|
+
# @parameter length [Integer] The minimum number of bytes to write.
|
195
|
+
# @parameter offset [Integer] The offset into the buffer to write from.
|
196
|
+
def io_write(fiber, io, buffer, length, offset = 0)
|
197
|
+
total = 0
|
198
|
+
|
199
|
+
Selector.nonblock(io) do
|
200
|
+
while true
|
201
|
+
result = Fiber.blocking{buffer.write(io, 0, offset)}
|
202
|
+
|
203
|
+
if result < 0
|
204
|
+
if again?(result)
|
205
|
+
self.io_wait(fiber, io, IO::READABLE)
|
206
|
+
else
|
207
|
+
return result
|
208
|
+
end
|
209
|
+
elsif result == 0
|
210
|
+
break result
|
211
|
+
else
|
212
|
+
total += result
|
213
|
+
break if total >= length
|
214
|
+
offset += result
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
return total
|
220
|
+
end
|
221
|
+
elsif Support.fiber_scheduler_v2?
|
222
|
+
# Ruby 3.2, most IO::Buffer support, but slightly clunky read/write methods.
|
164
223
|
def io_read(fiber, io, buffer, length, offset = 0)
|
165
224
|
total = 0
|
166
225
|
|
@@ -219,6 +278,7 @@ module IO::Event
|
|
219
278
|
return total
|
220
279
|
end
|
221
280
|
elsif Support.fiber_scheduler_v1?
|
281
|
+
# Ruby <= 3.1, limited IO::Buffer support.
|
222
282
|
def io_read(fiber, _io, buffer, length, offset = 0)
|
223
283
|
io = IO.for_fd(_io.fileno, autoclose: false)
|
224
284
|
total = 0
|
data/lib/io/event/support.rb
CHANGED
@@ -17,6 +17,17 @@ class IO
|
|
17
17
|
def self.fiber_scheduler_v2?
|
18
18
|
IO.const_defined?(:Buffer) and Fiber.respond_to?(:blocking) and IO::Buffer.instance_method(:read).arity == -1
|
19
19
|
end
|
20
|
+
|
21
|
+
def self.fiber_scheduler_v3?
|
22
|
+
if fiber_scheduler_v2?
|
23
|
+
begin
|
24
|
+
IO::Buffer.new.slice(0, 0).write(STDOUT)
|
25
|
+
return true
|
26
|
+
rescue
|
27
|
+
return false
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
20
31
|
end
|
21
32
|
end
|
22
33
|
end
|
data/lib/io/event/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.3.
|
4
|
+
version: 1.3.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -42,7 +42,7 @@ cert_chain:
|
|
42
42
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
43
43
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
44
44
|
-----END CERTIFICATE-----
|
45
|
-
date: 2023-08-
|
45
|
+
date: 2023-08-24 00:00:00.000000000 Z
|
46
46
|
dependencies: []
|
47
47
|
description:
|
48
48
|
email:
|
@@ -97,7 +97,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
97
97
|
- !ruby/object:Gem::Version
|
98
98
|
version: '0'
|
99
99
|
requirements: []
|
100
|
-
rubygems_version: 3.
|
100
|
+
rubygems_version: 3.5.0.dev
|
101
101
|
signing_key:
|
102
102
|
specification_version: 4
|
103
103
|
summary: An event loop.
|
metadata.gz.sig
CHANGED
Binary file
|