polyphony 0.72 → 0.75
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +15 -11
- data/.github/workflows/test_io_uring.yml +32 -0
- data/.gitignore +3 -1
- data/CHANGELOG.md +24 -0
- data/Gemfile.lock +16 -13
- data/bin/pdbg +0 -0
- data/bin/polyphony-debug +0 -0
- data/bin/stress.rb +0 -0
- data/bin/test +0 -0
- data/docs/api-reference/exception.md +5 -1
- data/examples/core/ring.rb +29 -0
- data/ext/polyphony/backend_common.c +90 -12
- data/ext/polyphony/backend_common.h +9 -1
- data/ext/polyphony/backend_io_uring.c +257 -134
- data/ext/polyphony/backend_io_uring_context.c +1 -0
- data/ext/polyphony/backend_io_uring_context.h +2 -1
- data/ext/polyphony/backend_libev.c +33 -29
- data/ext/polyphony/event.c +5 -2
- data/ext/polyphony/extconf.rb +1 -0
- data/ext/polyphony/polyphony.c +11 -1
- data/ext/polyphony/polyphony.h +9 -2
- data/ext/polyphony/queue.c +10 -5
- data/ext/polyphony/runqueue_ring_buffer.c +3 -1
- data/ext/polyphony/socket_extensions.c +5 -2
- data/ext/polyphony/thread.c +1 -1
- data/lib/polyphony/{extensions → core}/debug.rb +0 -0
- data/lib/polyphony/core/global_api.rb +0 -3
- data/lib/polyphony/extensions/exception.rb +45 -0
- data/lib/polyphony/extensions/fiber.rb +85 -4
- data/lib/polyphony/extensions/{core.rb → kernel.rb} +0 -73
- data/lib/polyphony/extensions/openssl.rb +5 -1
- data/lib/polyphony/extensions/process.rb +19 -0
- data/lib/polyphony/extensions/socket.rb +12 -6
- data/lib/polyphony/extensions/thread.rb +9 -3
- data/lib/polyphony/extensions/timeout.rb +10 -0
- data/lib/polyphony/extensions.rb +9 -0
- data/lib/polyphony/version.rb +1 -1
- data/lib/polyphony.rb +4 -4
- data/test/helper.rb +0 -5
- data/test/test_backend.rb +3 -5
- data/test/test_global_api.rb +21 -12
- data/test/test_io.rb +2 -2
- data/test/test_kernel.rb +2 -2
- data/test/test_process_supervision.rb +1 -1
- data/test/test_signal.rb +20 -1
- data/test/test_socket.rb +35 -2
- data/test/test_thread.rb +1 -1
- data/test/test_thread_pool.rb +1 -1
- data/test/test_throttler.rb +3 -3
- data/test/test_timer.rb +1 -1
- data/test/test_trace.rb +7 -1
- metadata +11 -5
@@ -42,6 +42,7 @@ typedef struct Backend_t {
|
|
42
42
|
unsigned int pending_sqes;
|
43
43
|
unsigned int prepared_limit;
|
44
44
|
int event_fd;
|
45
|
+
int ring_initialized;
|
45
46
|
} Backend_t;
|
46
47
|
|
47
48
|
static void Backend_mark(void *ptr) {
|
@@ -80,20 +81,32 @@ static VALUE Backend_initialize(VALUE self) {
|
|
80
81
|
|
81
82
|
backend_base_initialize(&backend->base);
|
82
83
|
backend->pending_sqes = 0;
|
83
|
-
backend->
|
84
|
+
backend->ring_initialized = 0;
|
85
|
+
backend->event_fd = -1;
|
84
86
|
|
85
87
|
context_store_initialize(&backend->store);
|
86
|
-
io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
|
87
|
-
backend->event_fd = -1;
|
88
88
|
|
89
|
-
|
89
|
+
backend->prepared_limit = 1024;
|
90
|
+
while (1) {
|
91
|
+
int ret = io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
|
92
|
+
if (!ret) break;
|
93
|
+
|
94
|
+
// if ENOMEM is returned, use a smaller limit
|
95
|
+
if (ret == -ENOMEM && backend->prepared_limit > 64)
|
96
|
+
backend->prepared_limit = backend->prepared_limit / 2;
|
97
|
+
else
|
98
|
+
rb_syserr_fail(-ret, strerror(-ret));
|
99
|
+
}
|
100
|
+
backend->ring_initialized = 1;
|
101
|
+
|
102
|
+
return self;
|
90
103
|
}
|
91
104
|
|
92
105
|
VALUE Backend_finalize(VALUE self) {
|
93
106
|
Backend_t *backend;
|
94
107
|
GetBackend(self, backend);
|
95
108
|
|
96
|
-
io_uring_queue_exit(&backend->ring);
|
109
|
+
if (backend->ring_initialized) io_uring_queue_exit(&backend->ring);
|
97
110
|
if (backend->event_fd != -1) close(backend->event_fd);
|
98
111
|
context_store_free(&backend->store);
|
99
112
|
return self;
|
@@ -127,7 +140,7 @@ void *io_uring_backend_poll_without_gvl(void *ptr) {
|
|
127
140
|
|
128
141
|
// copied from queue.c
|
129
142
|
static inline bool cq_ring_needs_flush(struct io_uring *ring) {
|
130
|
-
|
143
|
+
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
131
144
|
}
|
132
145
|
|
133
146
|
static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
|
@@ -145,9 +158,9 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
|
|
145
158
|
// this peeks at cqes and handles each available cqe
|
146
159
|
void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
|
147
160
|
struct io_uring *ring = &backend->ring;
|
148
|
-
|
161
|
+
bool overflow_checked = false;
|
149
162
|
struct io_uring_cqe *cqe;
|
150
|
-
|
163
|
+
unsigned head;
|
151
164
|
unsigned cqe_count;
|
152
165
|
|
153
166
|
again:
|
@@ -158,16 +171,16 @@ again:
|
|
158
171
|
}
|
159
172
|
io_uring_cq_advance(ring, cqe_count);
|
160
173
|
|
161
|
-
|
174
|
+
if (overflow_checked) goto done;
|
162
175
|
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
176
|
+
if (cq_ring_needs_flush(ring)) {
|
177
|
+
__sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
|
178
|
+
overflow_checked = true;
|
179
|
+
goto again;
|
180
|
+
}
|
168
181
|
|
169
182
|
done:
|
170
|
-
|
183
|
+
return;
|
171
184
|
}
|
172
185
|
|
173
186
|
void io_uring_backend_poll(Backend_t *backend) {
|
@@ -282,9 +295,11 @@ int io_uring_backend_defer_submit_and_await(
|
|
282
295
|
switchpoint_result = backend_await((struct Backend_base *)backend);
|
283
296
|
|
284
297
|
if (ctx->ref_count > 1) {
|
298
|
+
struct io_uring_sqe *sqe;
|
299
|
+
|
285
300
|
// op was not completed (an exception was raised), so we need to cancel it
|
286
301
|
ctx->result = -ECANCELED;
|
287
|
-
|
302
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
288
303
|
io_uring_prep_cancel(sqe, ctx, 0);
|
289
304
|
backend->pending_sqes = 0;
|
290
305
|
io_uring_submit(&backend->ring);
|
@@ -316,16 +331,20 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
|
|
316
331
|
long dynamic_len = length == Qnil;
|
317
332
|
long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
|
318
333
|
long buf_pos = NUM2INT(pos);
|
334
|
+
int shrinkable;
|
335
|
+
char *buf;
|
336
|
+
long total = 0;
|
337
|
+
int read_to_eof = RTEST(to_eof);
|
338
|
+
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
339
|
+
|
340
|
+
|
319
341
|
if (str != Qnil) {
|
320
342
|
int current_len = RSTRING_LEN(str);
|
321
343
|
if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
|
322
344
|
}
|
323
345
|
else buf_pos = 0;
|
324
|
-
|
325
|
-
|
326
|
-
long total = 0;
|
327
|
-
int read_to_eof = RTEST(to_eof);
|
328
|
-
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
346
|
+
shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
|
347
|
+
buf = RSTRING_PTR(str) + buf_pos;
|
329
348
|
|
330
349
|
GetBackend(self, backend);
|
331
350
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -333,16 +352,18 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
|
|
333
352
|
rb_io_check_byte_readable(fptr);
|
334
353
|
io_unset_nonblock(fptr, io);
|
335
354
|
rectify_io_file_pos(fptr);
|
336
|
-
OBJ_TAINT(str);
|
337
355
|
|
338
356
|
while (1) {
|
339
357
|
VALUE resume_value = Qnil;
|
340
358
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
341
359
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
360
|
+
int result;
|
361
|
+
int completed;
|
362
|
+
|
342
363
|
io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
|
343
364
|
|
344
|
-
|
345
|
-
|
365
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
366
|
+
completed = context_store_release(&backend->store, ctx);
|
346
367
|
if (!completed) {
|
347
368
|
context_attach_buffers(ctx, 1, &str);
|
348
369
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -403,10 +424,13 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
|
|
403
424
|
VALUE resume_value = Qnil;
|
404
425
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
405
426
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
427
|
+
ssize_t result;
|
428
|
+
int completed;
|
429
|
+
|
406
430
|
io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
|
407
431
|
|
408
|
-
|
409
|
-
|
432
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
433
|
+
completed = context_store_release(&backend->store, ctx);
|
410
434
|
if (!completed) {
|
411
435
|
context_attach_buffers(ctx, 1, &str);
|
412
436
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -453,10 +477,13 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
|
|
453
477
|
VALUE resume_value = Qnil;
|
454
478
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
455
479
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
480
|
+
ssize_t result;
|
481
|
+
int completed;
|
482
|
+
|
456
483
|
io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
|
457
484
|
|
458
|
-
|
459
|
-
|
485
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
486
|
+
completed = context_store_release(&backend->store, ctx);
|
460
487
|
if (!completed) {
|
461
488
|
context_attach_buffers(ctx, 1, &str);
|
462
489
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -483,6 +510,9 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
|
|
483
510
|
Backend_t *backend;
|
484
511
|
rb_io_t *fptr;
|
485
512
|
VALUE underlying_io;
|
513
|
+
char *buf = StringValuePtr(str);
|
514
|
+
long len = RSTRING_LEN(str);
|
515
|
+
long left = len;
|
486
516
|
|
487
517
|
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
488
518
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -491,18 +521,17 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
|
|
491
521
|
GetOpenFile(io, fptr);
|
492
522
|
io_unset_nonblock(fptr, io);
|
493
523
|
|
494
|
-
char *buf = StringValuePtr(str);
|
495
|
-
long len = RSTRING_LEN(str);
|
496
|
-
long left = len;
|
497
|
-
|
498
524
|
while (left > 0) {
|
499
525
|
VALUE resume_value = Qnil;
|
500
526
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
|
501
527
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
528
|
+
int result;
|
529
|
+
int completed;
|
530
|
+
|
502
531
|
io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
|
503
532
|
|
504
|
-
|
505
|
-
|
533
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
534
|
+
completed = context_store_release(&backend->store, ctx);
|
506
535
|
if (!completed) {
|
507
536
|
context_attach_buffers(ctx, 1, &str);
|
508
537
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -551,10 +580,13 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
|
|
551
580
|
VALUE resume_value = Qnil;
|
552
581
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
|
553
582
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
583
|
+
int result;
|
584
|
+
int completed;
|
585
|
+
|
554
586
|
io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
|
555
587
|
|
556
|
-
|
557
|
-
|
588
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
589
|
+
completed = context_store_release(&backend->store, ctx);
|
558
590
|
if (!completed) {
|
559
591
|
free(iov);
|
560
592
|
context_attach_buffers(ctx, argc, argv);
|
@@ -605,15 +637,18 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
|
|
605
637
|
long dynamic_len = length == Qnil;
|
606
638
|
long len = dynamic_len ? 4096 : NUM2INT(length);
|
607
639
|
long buf_pos = NUM2INT(pos);
|
640
|
+
int shrinkable;
|
641
|
+
char *buf;
|
642
|
+
long total = 0;
|
643
|
+
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);;
|
644
|
+
|
608
645
|
if (str != Qnil) {
|
609
646
|
int current_len = RSTRING_LEN(str);
|
610
647
|
if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
|
611
648
|
}
|
612
649
|
else buf_pos = 0;
|
613
|
-
|
614
|
-
|
615
|
-
long total = 0;
|
616
|
-
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
650
|
+
shrinkable = io_setstrbuf(&str, buf_pos + len);
|
651
|
+
buf = RSTRING_PTR(str) + buf_pos;
|
617
652
|
|
618
653
|
GetBackend(self, backend);
|
619
654
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -621,16 +656,18 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
|
|
621
656
|
rb_io_check_byte_readable(fptr);
|
622
657
|
io_unset_nonblock(fptr, io);
|
623
658
|
rectify_io_file_pos(fptr);
|
624
|
-
OBJ_TAINT(str);
|
625
659
|
|
626
660
|
while (1) {
|
627
661
|
VALUE resume_value = Qnil;
|
628
662
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
629
663
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
664
|
+
int result;
|
665
|
+
int completed;
|
666
|
+
|
630
667
|
io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
|
631
668
|
|
632
|
-
|
633
|
-
|
669
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
670
|
+
completed = context_store_release(&backend->store, ctx);
|
634
671
|
if (!completed) {
|
635
672
|
context_attach_buffers(ctx, 1, &str);
|
636
673
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -677,10 +714,13 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
|
|
677
714
|
VALUE resume_value = Qnil;
|
678
715
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
679
716
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
717
|
+
int result;
|
718
|
+
int completed;
|
719
|
+
|
680
720
|
io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
|
681
721
|
|
682
|
-
|
683
|
-
|
722
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
723
|
+
completed = context_store_release(&backend->store, ctx);
|
684
724
|
if (!completed) {
|
685
725
|
context_attach_buffers(ctx, 1, &str);
|
686
726
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -726,10 +766,13 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
|
|
726
766
|
VALUE resume_value = Qnil;
|
727
767
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
728
768
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
769
|
+
int result;
|
770
|
+
int completed;
|
771
|
+
|
729
772
|
io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
|
730
773
|
|
731
|
-
|
732
|
-
|
774
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
775
|
+
completed = context_store_release(&backend->store, ctx);
|
733
776
|
if (!completed) {
|
734
777
|
context_attach_buffers(ctx, 1, &str);
|
735
778
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -755,6 +798,10 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
|
|
755
798
|
Backend_t *backend;
|
756
799
|
rb_io_t *fptr;
|
757
800
|
VALUE underlying_io;
|
801
|
+
char *buf;
|
802
|
+
long len;
|
803
|
+
long left;
|
804
|
+
int flags_int;
|
758
805
|
|
759
806
|
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
760
807
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -763,19 +810,22 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
|
|
763
810
|
GetOpenFile(io, fptr);
|
764
811
|
io_unset_nonblock(fptr, io);
|
765
812
|
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
813
|
+
buf = StringValuePtr(str);
|
814
|
+
len = RSTRING_LEN(str);
|
815
|
+
left = len;
|
816
|
+
flags_int = NUM2INT(flags);
|
770
817
|
|
771
818
|
while (left > 0) {
|
772
819
|
VALUE resume_value = Qnil;
|
773
820
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
|
774
821
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
822
|
+
int result;
|
823
|
+
int completed;
|
824
|
+
|
775
825
|
io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
|
776
826
|
|
777
|
-
|
778
|
-
|
827
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
828
|
+
completed = context_store_release(&backend->store, ctx);
|
779
829
|
if (!completed) {
|
780
830
|
context_attach_buffers(ctx, 1, &str);
|
781
831
|
RAISE_IF_EXCEPTION(resume_value);
|
@@ -809,10 +859,13 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
809
859
|
VALUE resume_value = Qnil;
|
810
860
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
|
811
861
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
862
|
+
int fd;
|
863
|
+
int completed;
|
864
|
+
|
812
865
|
io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
|
813
866
|
|
814
|
-
|
815
|
-
|
867
|
+
fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
868
|
+
completed = context_store_release(&backend->store, ctx);
|
816
869
|
RAISE_IF_EXCEPTION(resume_value);
|
817
870
|
if (!completed) return resume_value;
|
818
871
|
RB_GC_GUARD(resume_value);
|
@@ -831,7 +884,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
831
884
|
rb_io_synchronized(fp);
|
832
885
|
|
833
886
|
// if (rsock_do_not_reverse_lookup) {
|
834
|
-
|
887
|
+
// fp->mode |= FMODE_NOREVLOOKUP;
|
835
888
|
// }
|
836
889
|
if (loop) {
|
837
890
|
rb_yield(socket);
|
@@ -863,6 +916,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
|
|
863
916
|
rb_io_t *dest_fptr;
|
864
917
|
VALUE underlying_io;
|
865
918
|
int total = 0;
|
919
|
+
VALUE resume_value = Qnil;
|
866
920
|
|
867
921
|
underlying_io = rb_ivar_get(src, ID_ivar_io);
|
868
922
|
if (underlying_io != Qnil) src = underlying_io;
|
@@ -875,15 +929,16 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
|
|
875
929
|
GetOpenFile(dest, dest_fptr);
|
876
930
|
io_unset_nonblock(dest_fptr, dest);
|
877
931
|
|
878
|
-
VALUE resume_value = Qnil;
|
879
|
-
|
880
932
|
while (1) {
|
881
933
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
|
882
934
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
935
|
+
int result;
|
936
|
+
int completed;
|
937
|
+
|
883
938
|
io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
|
884
939
|
|
885
|
-
|
886
|
-
|
940
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
941
|
+
completed = context_store_release(&backend->store, ctx);
|
887
942
|
RAISE_IF_EXCEPTION(resume_value);
|
888
943
|
if (!completed) return resume_value;
|
889
944
|
|
@@ -911,29 +966,31 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE chunksize)
|
|
911
966
|
return io_uring_backend_splice(backend, src, dest, chunksize, 1);
|
912
967
|
}
|
913
968
|
|
914
|
-
|
915
969
|
VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
|
916
970
|
Backend_t *backend;
|
917
971
|
rb_io_t *fptr;
|
918
|
-
struct
|
919
|
-
|
972
|
+
struct sockaddr *ai_addr;
|
973
|
+
int ai_addrlen;
|
920
974
|
VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
|
975
|
+
VALUE resume_value = Qnil;
|
976
|
+
op_context_t *ctx;
|
977
|
+
struct io_uring_sqe *sqe;
|
978
|
+
int result;
|
979
|
+
int completed;
|
980
|
+
|
981
|
+
ai_addrlen = backend_getaddrinfo(host, port, &ai_addr);
|
982
|
+
|
921
983
|
if (underlying_sock != Qnil) sock = underlying_sock;
|
922
984
|
|
923
985
|
GetBackend(self, backend);
|
924
986
|
GetOpenFile(sock, fptr);
|
925
987
|
io_unset_nonblock(fptr, sock);
|
926
988
|
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
|
933
|
-
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
934
|
-
io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
|
935
|
-
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
936
|
-
int completed = context_store_release(&backend->store, ctx);
|
989
|
+
ctx = context_store_acquire(&backend->store, OP_CONNECT);
|
990
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
991
|
+
io_uring_prep_connect(sqe, fptr->fd, ai_addr, ai_addrlen);
|
992
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
993
|
+
completed = context_store_release(&backend->store, ctx);
|
937
994
|
RAISE_IF_EXCEPTION(resume_value);
|
938
995
|
if (!completed) return resume_value;
|
939
996
|
RB_GC_GUARD(resume_value);
|
@@ -946,23 +1003,60 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
|
|
946
1003
|
Backend_t *backend;
|
947
1004
|
rb_io_t *fptr;
|
948
1005
|
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1006
|
+
VALUE resume_value;
|
1007
|
+
|
949
1008
|
if (underlying_io != Qnil) io = underlying_io;
|
950
1009
|
GetBackend(self, backend);
|
951
1010
|
GetOpenFile(io, fptr);
|
952
1011
|
io_unset_nonblock(fptr, io);
|
953
1012
|
|
954
|
-
|
1013
|
+
resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
|
1014
|
+
|
955
1015
|
RAISE_IF_EXCEPTION(resume_value);
|
956
1016
|
RB_GC_GUARD(resume_value);
|
957
1017
|
return self;
|
958
1018
|
}
|
959
1019
|
|
1020
|
+
VALUE Backend_close(VALUE self, VALUE io) {
|
1021
|
+
Backend_t *backend;
|
1022
|
+
rb_io_t *fptr;
|
1023
|
+
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1024
|
+
VALUE resume_value = Qnil;
|
1025
|
+
op_context_t *ctx;
|
1026
|
+
struct io_uring_sqe *sqe;
|
1027
|
+
int result;
|
1028
|
+
int completed;
|
1029
|
+
|
1030
|
+
if (underlying_io != Qnil) io = underlying_io;
|
1031
|
+
GetBackend(self, backend);
|
1032
|
+
GetOpenFile(io, fptr);
|
1033
|
+
|
1034
|
+
if (fptr->fd < 0) return Qnil;
|
1035
|
+
|
1036
|
+
io_unset_nonblock(fptr, io);
|
1037
|
+
|
1038
|
+
ctx = context_store_acquire(&backend->store, OP_CLOSE);
|
1039
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1040
|
+
io_uring_prep_close(sqe, fptr->fd);
|
1041
|
+
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1042
|
+
completed = context_store_release(&backend->store, ctx);
|
1043
|
+
RAISE_IF_EXCEPTION(resume_value);
|
1044
|
+
if (!completed) return resume_value;
|
1045
|
+
RB_GC_GUARD(resume_value);
|
1046
|
+
|
1047
|
+
if (result < 0) rb_syserr_fail(-result, strerror(-result));
|
1048
|
+
|
1049
|
+
fptr_finalize(fptr);
|
1050
|
+
// fptr->fd = -1;
|
1051
|
+
return io;
|
1052
|
+
}
|
1053
|
+
|
960
1054
|
inline struct __kernel_timespec double_to_timespec(double duration) {
|
961
1055
|
double duration_integral;
|
962
1056
|
double duration_fraction = modf(duration, &duration_integral);
|
963
1057
|
struct __kernel_timespec ts;
|
964
1058
|
ts.tv_sec = duration_integral;
|
965
|
-
|
1059
|
+
ts.tv_nsec = floor(duration_fraction * 1000000000);
|
966
1060
|
return ts;
|
967
1061
|
}
|
968
1062
|
|
@@ -974,18 +1068,18 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
|
|
974
1068
|
int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
|
975
1069
|
struct __kernel_timespec ts = double_to_timespec(duration);
|
976
1070
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
977
|
-
|
978
1071
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
1072
|
+
|
979
1073
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
980
1074
|
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
|
981
1075
|
return context_store_release(&backend->store, ctx);
|
982
1076
|
}
|
983
1077
|
|
984
1078
|
VALUE Backend_sleep(VALUE self, VALUE duration) {
|
1079
|
+
VALUE resume_value = Qnil;
|
985
1080
|
Backend_t *backend;
|
986
1081
|
GetBackend(self, backend);
|
987
1082
|
|
988
|
-
VALUE resume_value = Qnil;
|
989
1083
|
io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
|
990
1084
|
RAISE_IF_EXCEPTION(resume_value);
|
991
1085
|
RB_GC_GUARD(resume_value);
|
@@ -994,29 +1088,34 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
|
|
994
1088
|
|
995
1089
|
VALUE Backend_timer_loop(VALUE self, VALUE interval) {
|
996
1090
|
Backend_t *backend;
|
997
|
-
|
1091
|
+
uint64_t interval_ns = NUM2DBL(interval) * 1e9;
|
1092
|
+
uint64_t next_time_ns = 0;
|
1093
|
+
VALUE resume_value = Qnil;
|
1094
|
+
|
998
1095
|
GetBackend(self, backend);
|
999
|
-
double next_time = 0.;
|
1000
1096
|
|
1001
1097
|
while (1) {
|
1002
|
-
double
|
1003
|
-
if (
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1098
|
+
double now_ns = current_time_ns();
|
1099
|
+
if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
|
1100
|
+
if (next_time_ns > now_ns) {
|
1101
|
+
double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
|
1102
|
+
int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
|
1103
|
+
RAISE_IF_EXCEPTION(resume_value);
|
1104
|
+
if (!completed) return resume_value;
|
1105
|
+
}
|
1106
|
+
else {
|
1107
|
+
resume_value = backend_snooze();
|
1108
|
+
RAISE_IF_EXCEPTION(resume_value);
|
1109
|
+
}
|
1012
1110
|
|
1013
1111
|
rb_yield(Qnil);
|
1014
1112
|
|
1015
1113
|
while (1) {
|
1016
|
-
|
1017
|
-
if (
|
1114
|
+
next_time_ns += interval_ns;
|
1115
|
+
if (next_time_ns > now_ns) break;
|
1018
1116
|
}
|
1019
1117
|
}
|
1118
|
+
RB_GC_GUARD(resume_value);
|
1020
1119
|
}
|
1021
1120
|
|
1022
1121
|
struct Backend_timeout_ctx {
|
@@ -1025,12 +1124,13 @@ struct Backend_timeout_ctx {
|
|
1025
1124
|
};
|
1026
1125
|
|
1027
1126
|
VALUE Backend_timeout_ensure(VALUE arg) {
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1127
|
+
struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
|
1128
|
+
if (timeout_ctx->ctx->ref_count) {
|
1129
|
+
struct io_uring_sqe *sqe;
|
1031
1130
|
|
1131
|
+
timeout_ctx->ctx->result = -ECANCELED;
|
1032
1132
|
// op was not completed, so we need to cancel it
|
1033
|
-
|
1133
|
+
sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
|
1034
1134
|
io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
|
1035
1135
|
timeout_ctx->backend->pending_sqes = 0;
|
1036
1136
|
io_uring_submit(&timeout_ctx->backend->ring);
|
@@ -1043,24 +1143,30 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
|
|
1043
1143
|
VALUE duration;
|
1044
1144
|
VALUE exception;
|
1045
1145
|
VALUE move_on_value = Qnil;
|
1046
|
-
|
1047
|
-
|
1048
|
-
struct
|
1146
|
+
struct Backend_timeout_ctx timeout_ctx;
|
1147
|
+
op_context_t *ctx;
|
1148
|
+
struct io_uring_sqe *sqe;
|
1049
1149
|
Backend_t *backend;
|
1050
|
-
|
1150
|
+
struct __kernel_timespec ts;
|
1051
1151
|
VALUE result = Qnil;
|
1052
|
-
VALUE timeout
|
1152
|
+
VALUE timeout;
|
1053
1153
|
|
1054
|
-
|
1154
|
+
rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
|
1055
1155
|
|
1056
|
-
|
1156
|
+
ts = duration_to_timespec(duration);
|
1157
|
+
GetBackend(self, backend);
|
1158
|
+
timeout = rb_funcall(cTimeoutException, ID_new, 0);
|
1159
|
+
|
1160
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1161
|
+
ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
1057
1162
|
ctx->resume_value = timeout;
|
1058
1163
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
1059
1164
|
io_uring_sqe_set_data(sqe, ctx);
|
1060
1165
|
io_uring_backend_defer_submit(backend);
|
1061
1166
|
backend->base.op_count++;
|
1062
1167
|
|
1063
|
-
|
1168
|
+
timeout_ctx.backend = backend;
|
1169
|
+
timeout_ctx.ctx = ctx;
|
1064
1170
|
result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
|
1065
1171
|
|
1066
1172
|
if (result == timeout) {
|
@@ -1077,19 +1183,21 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
|
|
1077
1183
|
VALUE Backend_waitpid(VALUE self, VALUE pid) {
|
1078
1184
|
int pid_int = NUM2INT(pid);
|
1079
1185
|
int fd = pidfd_open(pid_int, 0);
|
1186
|
+
int status;
|
1187
|
+
pid_t ret;
|
1080
1188
|
|
1081
1189
|
if (fd >= 0) {
|
1190
|
+
VALUE resume_value;
|
1082
1191
|
Backend_t *backend;
|
1083
1192
|
GetBackend(self, backend);
|
1084
1193
|
|
1085
|
-
|
1194
|
+
resume_value = io_uring_backend_wait_fd(backend, fd, 0);
|
1086
1195
|
close(fd);
|
1087
1196
|
RAISE_IF_EXCEPTION(resume_value);
|
1088
1197
|
RB_GC_GUARD(resume_value);
|
1089
1198
|
}
|
1090
1199
|
|
1091
|
-
|
1092
|
-
pid_t ret = waitpid(pid_int, &status, WNOHANG);
|
1200
|
+
ret = waitpid(pid_int, &status, WNOHANG);
|
1093
1201
|
if (ret < 0) {
|
1094
1202
|
int e = errno;
|
1095
1203
|
if (e == ECHILD)
|
@@ -1102,6 +1210,8 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
|
|
1102
1210
|
|
1103
1211
|
VALUE Backend_wait_event(VALUE self, VALUE raise) {
|
1104
1212
|
Backend_t *backend;
|
1213
|
+
VALUE resume_value;
|
1214
|
+
|
1105
1215
|
GetBackend(self, backend);
|
1106
1216
|
|
1107
1217
|
if (backend->event_fd == -1) {
|
@@ -1112,7 +1222,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
|
|
1112
1222
|
}
|
1113
1223
|
}
|
1114
1224
|
|
1115
|
-
|
1225
|
+
resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
|
1116
1226
|
if (RTEST(raise)) RAISE_IF_EXCEPTION(resume_value);
|
1117
1227
|
RB_GC_GUARD(resume_value);
|
1118
1228
|
return resume_value;
|
@@ -1125,6 +1235,7 @@ VALUE Backend_kind(VALUE self) {
|
|
1125
1235
|
struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
|
1126
1236
|
rb_io_t *fptr;
|
1127
1237
|
VALUE underlying_io;
|
1238
|
+
struct io_uring_sqe *sqe;
|
1128
1239
|
|
1129
1240
|
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1130
1241
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -1132,17 +1243,15 @@ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, V
|
|
1132
1243
|
GetOpenFile(io, fptr);
|
1133
1244
|
io_unset_nonblock(fptr, io);
|
1134
1245
|
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1139
|
-
io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
|
1246
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1247
|
+
io_uring_prep_write(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), 0);
|
1140
1248
|
return sqe;
|
1141
1249
|
}
|
1142
1250
|
|
1143
1251
|
struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
|
1144
1252
|
rb_io_t *fptr;
|
1145
1253
|
VALUE underlying_io;
|
1254
|
+
struct io_uring_sqe *sqe;
|
1146
1255
|
|
1147
1256
|
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1148
1257
|
if (underlying_io != Qnil) io = underlying_io;
|
@@ -1150,12 +1259,8 @@ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VA
|
|
1150
1259
|
GetOpenFile(io, fptr);
|
1151
1260
|
io_unset_nonblock(fptr, io);
|
1152
1261
|
|
1153
|
-
|
1154
|
-
|
1155
|
-
int flags_int = NUM2INT(flags);
|
1156
|
-
|
1157
|
-
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1158
|
-
io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
|
1262
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1263
|
+
io_uring_prep_send(sqe, fptr->fd, StringValuePtr(str), RSTRING_LEN(str), NUM2INT(flags));
|
1159
1264
|
return sqe;
|
1160
1265
|
}
|
1161
1266
|
|
@@ -1163,6 +1268,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
|
|
1163
1268
|
rb_io_t *src_fptr;
|
1164
1269
|
rb_io_t *dest_fptr;
|
1165
1270
|
VALUE underlying_io;
|
1271
|
+
struct io_uring_sqe *sqe;
|
1166
1272
|
|
1167
1273
|
underlying_io = rb_ivar_get(src, ID_ivar_io);
|
1168
1274
|
if (underlying_io != Qnil) src = underlying_io;
|
@@ -1175,7 +1281,7 @@ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src,
|
|
1175
1281
|
GetOpenFile(dest, dest_fptr);
|
1176
1282
|
io_unset_nonblock(dest_fptr, dest);
|
1177
1283
|
|
1178
|
-
|
1284
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1179
1285
|
io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
|
1180
1286
|
return sqe;
|
1181
1287
|
}
|
@@ -1203,14 +1309,19 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1203
1309
|
unsigned int sqe_count = 0;
|
1204
1310
|
struct io_uring_sqe *last_sqe = 0;
|
1205
1311
|
Backend_t *backend;
|
1312
|
+
int result;
|
1313
|
+
int completed;
|
1314
|
+
op_context_t *ctx;
|
1315
|
+
|
1206
1316
|
GetBackend(self, backend);
|
1207
1317
|
if (argc == 0) return resume_value;
|
1208
1318
|
|
1209
|
-
|
1319
|
+
ctx = context_store_acquire(&backend->store, OP_CHAIN);
|
1210
1320
|
for (int i = 0; i < argc; i++) {
|
1211
1321
|
VALUE op = argv[i];
|
1212
1322
|
VALUE op_type = RARRAY_AREF(op, 0);
|
1213
1323
|
VALUE op_len = RARRAY_LEN(op);
|
1324
|
+
unsigned int flags;
|
1214
1325
|
|
1215
1326
|
if (op_type == SYM_write && op_len == 3) {
|
1216
1327
|
last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
|
@@ -1220,13 +1331,16 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1220
1331
|
else if (op_type == SYM_splice && op_len == 4)
|
1221
1332
|
last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
|
1222
1333
|
else {
|
1334
|
+
|
1223
1335
|
if (sqe_count) {
|
1336
|
+
struct io_uring_sqe *sqe;
|
1337
|
+
|
1224
1338
|
io_uring_sqe_set_data(last_sqe, ctx);
|
1225
1339
|
io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
|
1226
1340
|
|
1227
1341
|
ctx->ref_count = sqe_count;
|
1228
1342
|
ctx->result = -ECANCELED;
|
1229
|
-
|
1343
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1230
1344
|
io_uring_prep_cancel(sqe, ctx, 0);
|
1231
1345
|
backend->pending_sqes = 0;
|
1232
1346
|
io_uring_submit(&backend->ring);
|
@@ -1239,7 +1353,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1239
1353
|
}
|
1240
1354
|
|
1241
1355
|
io_uring_sqe_set_data(last_sqe, ctx);
|
1242
|
-
|
1356
|
+
flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
|
1243
1357
|
io_uring_sqe_set_flags(last_sqe, flags);
|
1244
1358
|
sqe_count++;
|
1245
1359
|
}
|
@@ -1248,14 +1362,16 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1248
1362
|
ctx->ref_count = sqe_count + 1;
|
1249
1363
|
io_uring_backend_defer_submit(backend);
|
1250
1364
|
resume_value = backend_await((struct Backend_base *)backend);
|
1251
|
-
|
1252
|
-
|
1365
|
+
result = ctx->result;
|
1366
|
+
completed = context_store_release(&backend->store, ctx);
|
1253
1367
|
if (!completed) {
|
1368
|
+
struct io_uring_sqe *sqe;
|
1369
|
+
|
1254
1370
|
Backend_chain_ctx_attach_buffers(ctx, argc, argv);
|
1255
1371
|
|
1256
1372
|
// op was not completed (an exception was raised), so we need to cancel it
|
1257
1373
|
ctx->result = -ECANCELED;
|
1258
|
-
|
1374
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1259
1375
|
io_uring_prep_cancel(sqe, ctx, 0);
|
1260
1376
|
backend->pending_sqes = 0;
|
1261
1377
|
io_uring_submit(&backend->ring);
|
@@ -1319,8 +1435,10 @@ static inline void splice_chunks_get_sqe(
|
|
1319
1435
|
}
|
1320
1436
|
|
1321
1437
|
static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
|
1438
|
+
struct io_uring_sqe *sqe;
|
1439
|
+
|
1322
1440
|
ctx->result = -ECANCELED;
|
1323
|
-
|
1441
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1324
1442
|
io_uring_prep_cancel(sqe, ctx, 0);
|
1325
1443
|
backend->pending_sqes = 0;
|
1326
1444
|
io_uring_submit(&backend->ring);
|
@@ -1333,9 +1451,11 @@ static inline int splice_chunks_await_ops(
|
|
1333
1451
|
VALUE *switchpoint_result
|
1334
1452
|
)
|
1335
1453
|
{
|
1454
|
+
int completed;
|
1336
1455
|
int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
|
1456
|
+
|
1337
1457
|
if (result) (*result) = res;
|
1338
|
-
|
1458
|
+
completed = context_store_release(&backend->store, *ctx);
|
1339
1459
|
if (!completed) {
|
1340
1460
|
splice_chunks_cancel(backend, *ctx);
|
1341
1461
|
if (TEST_EXCEPTION(*switchpoint_result)) return 1;
|
@@ -1349,17 +1469,22 @@ static inline int splice_chunks_await_ops(
|
|
1349
1469
|
|
1350
1470
|
VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
|
1351
1471
|
Backend_t *backend;
|
1352
|
-
GetBackend(self, backend);
|
1353
1472
|
int total = 0;
|
1354
1473
|
int err = 0;
|
1355
1474
|
VALUE switchpoint_result = Qnil;
|
1356
1475
|
op_context_t *ctx = 0;
|
1357
1476
|
struct io_uring_sqe *sqe = 0;
|
1358
|
-
|
1477
|
+
int maxlen;
|
1478
|
+
VALUE underlying_io;
|
1479
|
+
VALUE str = Qnil;
|
1480
|
+
VALUE chunk_len_value = Qnil;
|
1359
1481
|
rb_io_t *src_fptr;
|
1360
1482
|
rb_io_t *dest_fptr;
|
1483
|
+
int pipefd[2] = { -1, -1 };
|
1484
|
+
|
1485
|
+
GetBackend(self, backend);
|
1361
1486
|
|
1362
|
-
|
1487
|
+
underlying_io = rb_ivar_get(src, ID_ivar_io);
|
1363
1488
|
if (underlying_io != Qnil) src = underlying_io;
|
1364
1489
|
GetOpenFile(src, src_fptr);
|
1365
1490
|
io_verify_blocking_mode(src_fptr, src, Qtrue);
|
@@ -1370,11 +1495,8 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
|
|
1370
1495
|
GetOpenFile(dest, dest_fptr);
|
1371
1496
|
io_verify_blocking_mode(dest_fptr, dest, Qtrue);
|
1372
1497
|
|
1373
|
-
|
1374
|
-
VALUE str = Qnil;
|
1375
|
-
VALUE chunk_len_value = Qnil;
|
1498
|
+
maxlen = NUM2INT(chunk_size);
|
1376
1499
|
|
1377
|
-
int pipefd[2] = { -1, -1 };
|
1378
1500
|
if (pipe(pipefd) == -1) {
|
1379
1501
|
err = errno;
|
1380
1502
|
goto syscallerror;
|
@@ -1518,6 +1640,7 @@ void Init_Backend() {
|
|
1518
1640
|
rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
|
1519
1641
|
rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
|
1520
1642
|
rb_define_method(cBackend, "write", Backend_write_m, -1);
|
1643
|
+
rb_define_method(cBackend, "close", Backend_close, 1);
|
1521
1644
|
|
1522
1645
|
SYM_io_uring = ID2SYM(rb_intern("io_uring"));
|
1523
1646
|
SYM_send = ID2SYM(rb_intern("send"));
|