polyphony 1.4 → 1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/TODO.md +5 -10
- data/examples/pipes/http_server.rb +42 -12
- data/examples/pipes/http_server2.rb +45 -0
- data/ext/polyphony/backend_common.h +5 -0
- data/ext/polyphony/backend_io_uring.c +140 -120
- data/ext/polyphony/backend_libev.c +43 -16
- data/ext/polyphony/extconf.rb +6 -2
- data/ext/polyphony/pipe.c +1 -1
- data/ext/polyphony/polyphony.c +0 -20
- data/ext/polyphony/polyphony.h +0 -5
- data/ext/polyphony/win_uio.h +18 -0
- data/lib/polyphony/extensions/socket.rb +0 -14
- data/lib/polyphony/extensions/timeout.rb +5 -1
- data/lib/polyphony/version.rb +1 -1
- data/test/test_backend.rb +4 -2
- data/test/test_ext.rb +14 -0
- data/test/test_global_api.rb +4 -4
- data/test/test_socket.rb +0 -95
- metadata +8 -6
@@ -138,12 +138,28 @@ VALUE Backend_post_fork(VALUE self) {
|
|
138
138
|
typedef struct poll_context {
|
139
139
|
struct io_uring *ring;
|
140
140
|
struct io_uring_cqe *cqe;
|
141
|
+
int pending_sqes;
|
141
142
|
int result;
|
142
143
|
} poll_context_t;
|
143
144
|
|
145
|
+
// This function combines the functionality of io_uring_wait_cqe() and io_uring_submit_and_wait()
|
146
|
+
static inline int io_uring_submit_and_wait_cqe(struct io_uring *ring,
|
147
|
+
struct io_uring_cqe **cqe_ptr)
|
148
|
+
{
|
149
|
+
if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr) {
|
150
|
+
io_uring_submit(ring);
|
151
|
+
return 0;
|
152
|
+
}
|
153
|
+
|
154
|
+
*cqe_ptr = NULL;
|
155
|
+
return io_uring_submit_and_wait(ring, 1);
|
156
|
+
}
|
157
|
+
|
144
158
|
void *io_uring_backend_poll_without_gvl(void *ptr) {
|
145
159
|
poll_context_t *ctx = (poll_context_t *)ptr;
|
146
|
-
ctx->result =
|
160
|
+
ctx->result = ctx->pending_sqes ?
|
161
|
+
io_uring_submit_and_wait_cqe(ctx->ring, &ctx->cqe) :
|
162
|
+
io_uring_wait_cqe(ctx->ring, &ctx->cqe);
|
147
163
|
return NULL;
|
148
164
|
}
|
149
165
|
|
@@ -152,6 +168,8 @@ static inline bool cq_ring_needs_flush(struct io_uring *ring) {
|
|
152
168
|
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
153
169
|
}
|
154
170
|
|
171
|
+
#define MULTISHOT_ACCEPT_QUEUE(socket) (rb_ivar_get(socket, ID_ivar_multishot_accept_queue))
|
172
|
+
|
155
173
|
static void handle_multishot_accept_completion(op_context_t *ctx, struct io_uring_cqe *cqe, Backend_t *backend) {
|
156
174
|
// printf("handle_multishot_accept_completion result: %d\n", ctx->result);
|
157
175
|
if (ctx->result == -ECANCELED) {
|
@@ -162,9 +180,9 @@ static void handle_multishot_accept_completion(op_context_t *ctx, struct io_urin
|
|
162
180
|
if (!(cqe->flags & IORING_CQE_F_MORE)) {
|
163
181
|
context_store_release(&backend->store, ctx);
|
164
182
|
}
|
165
|
-
VALUE queue =
|
183
|
+
VALUE queue = MULTISHOT_ACCEPT_QUEUE(ctx->resume_value);
|
166
184
|
if (queue != Qnil)
|
167
|
-
Queue_push(queue,
|
185
|
+
Queue_push(queue, INT2FIX(ctx->result));
|
168
186
|
}
|
169
187
|
}
|
170
188
|
|
@@ -236,7 +254,7 @@ inline void io_uring_backend_defer_submit(Backend_t *backend) {
|
|
236
254
|
void io_uring_backend_poll(Backend_t *backend) {
|
237
255
|
poll_context_t poll_ctx;
|
238
256
|
poll_ctx.ring = &backend->ring;
|
239
|
-
|
257
|
+
poll_ctx.pending_sqes = backend->pending_sqes;
|
240
258
|
|
241
259
|
wait_cqe:
|
242
260
|
backend->base.currently_polling = 1;
|
@@ -247,8 +265,10 @@ wait_cqe:
|
|
247
265
|
return;
|
248
266
|
}
|
249
267
|
|
250
|
-
|
251
|
-
|
268
|
+
if (poll_ctx.cqe) {
|
269
|
+
io_uring_backend_handle_completion(poll_ctx.cqe, backend);
|
270
|
+
io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
|
271
|
+
}
|
252
272
|
}
|
253
273
|
|
254
274
|
inline VALUE Backend_poll(VALUE self, VALUE blocking) {
|
@@ -284,6 +304,7 @@ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
|
|
284
304
|
runqueue_delete(&backend->base.runqueue, fiber);
|
285
305
|
}
|
286
306
|
|
307
|
+
// This function is deprecated
|
287
308
|
inline VALUE Backend_switch_fiber(VALUE self) {
|
288
309
|
Backend_t *backend;
|
289
310
|
GetBackend(self, backend);
|
@@ -331,7 +352,22 @@ VALUE Backend_wakeup(VALUE self) {
|
|
331
352
|
return Qnil;
|
332
353
|
}
|
333
354
|
|
355
|
+
static inline VALUE io_uring_backend_await(VALUE self, struct Backend_t *backend) {
|
356
|
+
backend->base.pending_count++;
|
357
|
+
|
358
|
+
VALUE ret = backend_base_switch_fiber(self, &backend->base);
|
359
|
+
|
360
|
+
// run next fiber
|
361
|
+
COND_TRACE(&backend->base, 4, SYM_unblock, rb_fiber_current(), ret, CALLER());
|
362
|
+
|
363
|
+
backend->base.pending_count--;
|
364
|
+
RB_GC_GUARD(ret);
|
365
|
+
return ret;
|
366
|
+
|
367
|
+
}
|
368
|
+
|
334
369
|
int io_uring_backend_defer_submit_and_await(
|
370
|
+
VALUE self,
|
335
371
|
Backend_t *backend,
|
336
372
|
struct io_uring_sqe *sqe,
|
337
373
|
op_context_t *ctx,
|
@@ -344,7 +380,7 @@ int io_uring_backend_defer_submit_and_await(
|
|
344
380
|
if (sqe) io_uring_sqe_set_data(sqe, ctx);
|
345
381
|
io_uring_backend_defer_submit(backend);
|
346
382
|
|
347
|
-
switchpoint_result =
|
383
|
+
switchpoint_result = io_uring_backend_await(self, backend);
|
348
384
|
|
349
385
|
if (ctx->ref_count > 1) {
|
350
386
|
struct io_uring_sqe *sqe;
|
@@ -363,14 +399,14 @@ int io_uring_backend_defer_submit_and_await(
|
|
363
399
|
return ctx->result;
|
364
400
|
}
|
365
401
|
|
366
|
-
VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
|
402
|
+
VALUE io_uring_backend_wait_fd(VALUE self, Backend_t *backend, int fd, int write) {
|
367
403
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
|
368
404
|
VALUE resumed_value = Qnil;
|
369
405
|
|
370
406
|
struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
|
371
407
|
io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
|
372
408
|
|
373
|
-
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
|
409
|
+
io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resumed_value);
|
374
410
|
context_store_release(&backend->store, ctx);
|
375
411
|
|
376
412
|
RB_GC_GUARD(resumed_value);
|
@@ -378,20 +414,24 @@ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
|
|
378
414
|
}
|
379
415
|
|
380
416
|
static inline int fd_from_io(VALUE io, rb_io_t **fptr, int write_mode, int rectify_file_pos) {
|
417
|
+
if (TYPE(io) == T_FIXNUM) {
|
418
|
+
*fptr = NULL;
|
419
|
+
return FIX2INT(io);
|
420
|
+
}
|
421
|
+
|
381
422
|
if (rb_obj_class(io) == cPipe) {
|
382
423
|
*fptr = NULL;
|
383
424
|
return Pipe_get_fd(io, write_mode);
|
384
425
|
}
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
}
|
426
|
+
|
427
|
+
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
428
|
+
if (underlying_io != Qnil) io = underlying_io;
|
429
|
+
|
430
|
+
GetOpenFile(io, *fptr);
|
431
|
+
int fd = rb_io_descriptor(io);
|
432
|
+
io_unset_nonblock(io, fd);
|
433
|
+
if (rectify_file_pos) rectify_io_file_pos(*fptr);
|
434
|
+
return fd;
|
395
435
|
}
|
396
436
|
|
397
437
|
VALUE Backend_read(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE to_eof, VALUE pos) {
|
@@ -415,7 +455,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE to_eo
|
|
415
455
|
|
416
456
|
io_uring_prep_read(sqe, fd, buffer_spec.ptr, buffer_spec.len, -1);
|
417
457
|
|
418
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
458
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
419
459
|
completed = context_store_release(&backend->store, ctx);
|
420
460
|
if (!completed) {
|
421
461
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -476,7 +516,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
|
|
476
516
|
|
477
517
|
io_uring_prep_read(sqe, fd, ptr, len, -1);
|
478
518
|
|
479
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
519
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
480
520
|
completed = context_store_release(&backend->store, ctx);
|
481
521
|
if (!completed) {
|
482
522
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -525,7 +565,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
|
|
525
565
|
|
526
566
|
io_uring_prep_read(sqe, fd, ptr, len, -1);
|
527
567
|
|
528
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
568
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
529
569
|
completed = context_store_release(&backend->store, ctx);
|
530
570
|
if (!completed) {
|
531
571
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -569,7 +609,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE buffer) {
|
|
569
609
|
|
570
610
|
io_uring_prep_write(sqe, fd, buffer_spec.ptr, left, -1);
|
571
611
|
|
572
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
612
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
573
613
|
completed = context_store_release(&backend->store, ctx);
|
574
614
|
if (!completed) {
|
575
615
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -620,7 +660,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
|
|
620
660
|
|
621
661
|
io_uring_prep_writev(sqe, fd, iov_ptr, iov_count, -1);
|
622
662
|
|
623
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
663
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
624
664
|
completed = context_store_release(&backend->store, ctx);
|
625
665
|
if (!completed) {
|
626
666
|
free(iov);
|
@@ -686,7 +726,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE buffer, VALUE length, VALUE pos)
|
|
686
726
|
|
687
727
|
io_uring_prep_recv(sqe, fd, buffer_spec.ptr, buffer_spec.len, 0);
|
688
728
|
|
689
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
729
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
690
730
|
completed = context_store_release(&backend->store, ctx);
|
691
731
|
if (!completed) {
|
692
732
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -744,7 +784,7 @@ VALUE Backend_recvmsg(VALUE self, VALUE io, VALUE buffer, VALUE maxlen, VALUE po
|
|
744
784
|
|
745
785
|
io_uring_prep_recvmsg(sqe, fd, &msg, NUM2INT(flags));
|
746
786
|
|
747
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
787
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
748
788
|
completed = context_store_release(&backend->store, ctx);
|
749
789
|
if (!completed) {
|
750
790
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -794,7 +834,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
|
|
794
834
|
|
795
835
|
io_uring_prep_recv(sqe, fd, ptr, len, 0);
|
796
836
|
|
797
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
837
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
798
838
|
completed = context_store_release(&backend->store, ctx);
|
799
839
|
if (!completed) {
|
800
840
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -842,7 +882,7 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
|
|
842
882
|
|
843
883
|
io_uring_prep_recv(sqe, fd, ptr, len, 0);
|
844
884
|
|
845
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
885
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
846
886
|
completed = context_store_release(&backend->store, ctx);
|
847
887
|
if (!completed) {
|
848
888
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -886,7 +926,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE buffer, VALUE flags) {
|
|
886
926
|
|
887
927
|
io_uring_prep_send(sqe, fd, buffer_spec.ptr, left, flags_int);
|
888
928
|
|
889
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
929
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
890
930
|
completed = context_store_release(&backend->store, ctx);
|
891
931
|
if (!completed) {
|
892
932
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -947,7 +987,7 @@ VALUE Backend_sendmsg(VALUE self, VALUE io, VALUE buffer, VALUE flags, VALUE des
|
|
947
987
|
|
948
988
|
io_uring_prep_sendmsg(sqe, fd, &msg, flags_int);
|
949
989
|
|
950
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
990
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
951
991
|
completed = context_store_release(&backend->store, ctx);
|
952
992
|
if (!completed) {
|
953
993
|
context_attach_buffers(ctx, 1, &buffer);
|
@@ -968,7 +1008,20 @@ VALUE Backend_sendmsg(VALUE self, VALUE io, VALUE buffer, VALUE flags, VALUE des
|
|
968
1008
|
return INT2FIX(buffer_spec.len);
|
969
1009
|
}
|
970
1010
|
|
971
|
-
VALUE
|
1011
|
+
inline VALUE create_socket_from_fd(int fd, VALUE socket_class) {
|
1012
|
+
rb_io_t *fp;
|
1013
|
+
|
1014
|
+
VALUE socket = rb_obj_alloc(socket_class);
|
1015
|
+
MakeOpenFile(socket, fp);
|
1016
|
+
rb_update_max_fd(fd);
|
1017
|
+
fp->fd = fd;
|
1018
|
+
fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
|
1019
|
+
rb_io_ascii8bit_binmode(socket);
|
1020
|
+
rb_io_synchronized(fp);
|
1021
|
+
return socket;
|
1022
|
+
}
|
1023
|
+
|
1024
|
+
VALUE io_uring_backend_accept(VALUE self, Backend_t *backend, VALUE server_socket, VALUE socket_class, int loop) {
|
972
1025
|
int server_fd;
|
973
1026
|
rb_io_t *server_fptr;
|
974
1027
|
struct sockaddr addr;
|
@@ -986,7 +1039,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
986
1039
|
|
987
1040
|
io_uring_prep_accept(sqe, server_fd, &addr, &len, 0);
|
988
1041
|
|
989
|
-
fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1042
|
+
fd = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
990
1043
|
completed = context_store_release(&backend->store, ctx);
|
991
1044
|
RAISE_IF_EXCEPTION(resume_value);
|
992
1045
|
if (!completed) return resume_value;
|
@@ -995,19 +1048,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
995
1048
|
if (fd < 0)
|
996
1049
|
rb_syserr_fail(-fd, strerror(-fd));
|
997
1050
|
else {
|
998
|
-
|
999
|
-
|
1000
|
-
socket = rb_obj_alloc(socket_class);
|
1001
|
-
MakeOpenFile(socket, fp);
|
1002
|
-
rb_update_max_fd(fd);
|
1003
|
-
fp->fd = fd;
|
1004
|
-
fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
|
1005
|
-
rb_io_ascii8bit_binmode(socket);
|
1006
|
-
rb_io_synchronized(fp);
|
1007
|
-
|
1008
|
-
// if (rsock_do_not_reverse_lookup) {
|
1009
|
-
// fp->mode |= FMODE_NOREVLOOKUP;
|
1010
|
-
// }
|
1051
|
+
socket = create_socket_from_fd(fd, socket_class);
|
1011
1052
|
if (loop) {
|
1012
1053
|
rb_yield(socket);
|
1013
1054
|
socket = Qnil;
|
@@ -1022,30 +1063,20 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
1022
1063
|
|
1023
1064
|
VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
|
1024
1065
|
#ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
|
1025
|
-
VALUE accept_queue =
|
1066
|
+
VALUE accept_queue = MULTISHOT_ACCEPT_QUEUE(server_socket);
|
1026
1067
|
if (accept_queue != Qnil) {
|
1027
1068
|
VALUE next = Queue_shift(0, 0, accept_queue);
|
1028
1069
|
int fd = NUM2INT(next);
|
1029
1070
|
if (fd < 0)
|
1030
1071
|
rb_syserr_fail(-fd, strerror(-fd));
|
1031
|
-
else
|
1032
|
-
|
1033
|
-
|
1034
|
-
VALUE socket = rb_obj_alloc(socket_class);
|
1035
|
-
MakeOpenFile(socket, fp);
|
1036
|
-
rb_update_max_fd(fd);
|
1037
|
-
fp->fd = fd;
|
1038
|
-
fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
|
1039
|
-
rb_io_ascii8bit_binmode(socket);
|
1040
|
-
rb_io_synchronized(fp);
|
1041
|
-
return socket;
|
1042
|
-
}
|
1072
|
+
else
|
1073
|
+
return create_socket_from_fd(fd, socket_class);
|
1043
1074
|
}
|
1044
1075
|
#endif
|
1045
1076
|
|
1046
1077
|
Backend_t *backend;
|
1047
1078
|
GetBackend(self, backend);
|
1048
|
-
return io_uring_backend_accept(backend, server_socket, socket_class, 0);
|
1079
|
+
return io_uring_backend_accept(self, backend, server_socket, socket_class, 0);
|
1049
1080
|
}
|
1050
1081
|
|
1051
1082
|
#ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
|
@@ -1053,9 +1084,25 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
|
|
1053
1084
|
struct multishot_accept_ctx {
|
1054
1085
|
Backend_t *backend;
|
1055
1086
|
VALUE server_socket;
|
1087
|
+
VALUE socket_class;
|
1056
1088
|
op_context_t *op_ctx;
|
1057
1089
|
};
|
1058
1090
|
|
1091
|
+
static inline VALUE accept_loop_from_queue(VALUE server_socket, VALUE socket_class) {
|
1092
|
+
VALUE accept_queue = MULTISHOT_ACCEPT_QUEUE(server_socket);
|
1093
|
+
if (accept_queue == Qnil) return Qnil;
|
1094
|
+
|
1095
|
+
while (true) {
|
1096
|
+
VALUE next = Queue_shift(0, 0, accept_queue);
|
1097
|
+
int fd = NUM2INT(next);
|
1098
|
+
if (fd < 0)
|
1099
|
+
rb_syserr_fail(-fd, strerror(-fd));
|
1100
|
+
else
|
1101
|
+
rb_yield(create_socket_from_fd(fd, socket_class));
|
1102
|
+
}
|
1103
|
+
return Qtrue;
|
1104
|
+
}
|
1105
|
+
|
1059
1106
|
VALUE multishot_accept_start(struct multishot_accept_ctx *ctx) {
|
1060
1107
|
int server_fd;
|
1061
1108
|
rb_io_t *server_fptr;
|
@@ -1071,7 +1118,7 @@ VALUE multishot_accept_start(struct multishot_accept_ctx *ctx) {
|
|
1071
1118
|
io_uring_sqe_set_data(sqe, ctx->op_ctx);
|
1072
1119
|
io_uring_backend_defer_submit(ctx->backend);
|
1073
1120
|
|
1074
|
-
|
1121
|
+
accept_loop_from_queue(ctx->server_socket, ctx->socket_class);
|
1075
1122
|
|
1076
1123
|
return Qnil;
|
1077
1124
|
}
|
@@ -1087,55 +1134,30 @@ VALUE multishot_accept_cleanup(struct multishot_accept_ctx *ctx) {
|
|
1087
1134
|
return Qnil;
|
1088
1135
|
}
|
1089
1136
|
|
1090
|
-
VALUE
|
1091
|
-
|
1092
|
-
GetBackend(self, backend);
|
1093
|
-
|
1094
|
-
struct multishot_accept_ctx ctx;
|
1095
|
-
ctx.backend = backend;
|
1096
|
-
ctx.server_socket = server_socket;
|
1137
|
+
VALUE multishot_accept_loop(Backend_t *backend, VALUE server_socket, VALUE socket_class) {
|
1138
|
+
struct multishot_accept_ctx ctx = { backend, server_socket, socket_class };
|
1097
1139
|
|
1098
1140
|
return rb_ensure(
|
1099
1141
|
SAFE(multishot_accept_start), (VALUE)&ctx,
|
1100
1142
|
SAFE(multishot_accept_cleanup), (VALUE)&ctx
|
1101
1143
|
);
|
1102
1144
|
}
|
1103
|
-
|
1104
1145
|
#endif
|
1105
1146
|
|
1106
1147
|
VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
|
1107
|
-
#ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
|
1108
|
-
VALUE accept_queue = rb_ivar_get(server_socket, ID_ivar_multishot_accept_queue);
|
1109
|
-
if (accept_queue != Qnil) {
|
1110
|
-
while (true) {
|
1111
|
-
VALUE next = Queue_shift(0, 0, accept_queue);
|
1112
|
-
int fd = NUM2INT(next);
|
1113
|
-
if (fd < 0)
|
1114
|
-
rb_syserr_fail(-fd, strerror(-fd));
|
1115
|
-
else {
|
1116
|
-
rb_io_t *fp;
|
1117
|
-
|
1118
|
-
VALUE socket = rb_obj_alloc(socket_class);
|
1119
|
-
MakeOpenFile(socket, fp);
|
1120
|
-
rb_update_max_fd(fd);
|
1121
|
-
fp->fd = fd;
|
1122
|
-
fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
|
1123
|
-
rb_io_ascii8bit_binmode(socket);
|
1124
|
-
rb_io_synchronized(fp);
|
1125
|
-
rb_yield(socket);
|
1126
|
-
}
|
1127
|
-
}
|
1128
|
-
return self;
|
1129
|
-
}
|
1130
|
-
#endif
|
1131
|
-
|
1132
1148
|
Backend_t *backend;
|
1133
1149
|
GetBackend(self, backend);
|
1134
|
-
|
1150
|
+
|
1151
|
+
#ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
|
1152
|
+
multishot_accept_loop(backend, server_socket, socket_class);
|
1153
|
+
#else
|
1154
|
+
io_uring_backend_accept(self, backend, server_socket, socket_class, 1);
|
1155
|
+
#endif
|
1156
|
+
|
1135
1157
|
return self;
|
1136
1158
|
}
|
1137
1159
|
|
1138
|
-
VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int maxlen) {
|
1160
|
+
VALUE io_uring_backend_splice(VALUE self, Backend_t *backend, VALUE src, VALUE dest, int maxlen) {
|
1139
1161
|
int src_fd;
|
1140
1162
|
int dest_fd;
|
1141
1163
|
rb_io_t *src_fptr;
|
@@ -1156,7 +1178,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int max
|
|
1156
1178
|
|
1157
1179
|
io_uring_prep_splice(sqe, src_fd, -1, dest_fd, -1, maxlen, 0);
|
1158
1180
|
|
1159
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1181
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
1160
1182
|
completed = context_store_release(&backend->store, ctx);
|
1161
1183
|
RAISE_IF_EXCEPTION(resume_value);
|
1162
1184
|
if (!completed) return resume_value;
|
@@ -1174,10 +1196,11 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, int max
|
|
1174
1196
|
VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
|
1175
1197
|
Backend_t *backend;
|
1176
1198
|
GetBackend(self, backend);
|
1177
|
-
return io_uring_backend_splice(backend, src, dest, FIX2INT(maxlen));
|
1199
|
+
return io_uring_backend_splice(self, backend, src, dest, FIX2INT(maxlen));
|
1178
1200
|
}
|
1179
1201
|
|
1180
1202
|
struct double_splice_ctx {
|
1203
|
+
VALUE self;
|
1181
1204
|
Backend_t *backend;
|
1182
1205
|
VALUE src;
|
1183
1206
|
VALUE dest;
|
@@ -1222,7 +1245,7 @@ VALUE double_splice_safe(struct double_splice_ctx *ctx) {
|
|
1222
1245
|
io_uring_backend_immediate_submit(ctx->backend);
|
1223
1246
|
|
1224
1247
|
while (1) {
|
1225
|
-
resume_value =
|
1248
|
+
resume_value = io_uring_backend_await(ctx->self, ctx->backend);
|
1226
1249
|
|
1227
1250
|
if ((ctx_src && ctx_src->ref_count == 2 && ctx_dest && ctx_dest->ref_count == 2) || TEST_EXCEPTION(resume_value)) {
|
1228
1251
|
if (ctx_src) {
|
@@ -1273,7 +1296,7 @@ VALUE double_splice_cleanup(struct double_splice_ctx *ctx) {
|
|
1273
1296
|
}
|
1274
1297
|
|
1275
1298
|
VALUE Backend_double_splice(VALUE self, VALUE src, VALUE dest) {
|
1276
|
-
struct double_splice_ctx ctx = { NULL, src, dest, {0, 0} };
|
1299
|
+
struct double_splice_ctx ctx = { self, NULL, src, dest, {0, 0} };
|
1277
1300
|
GetBackend(self, ctx.backend);
|
1278
1301
|
if (pipe(ctx.pipefd) == -1) rb_syserr_fail(errno, strerror(errno));
|
1279
1302
|
|
@@ -1304,7 +1327,7 @@ VALUE Backend_tee(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
|
|
1304
1327
|
|
1305
1328
|
io_uring_prep_tee(sqe, src_fd, dest_fd, FIX2INT(maxlen), 0);
|
1306
1329
|
|
1307
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1330
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
1308
1331
|
completed = context_store_release(&backend->store, ctx);
|
1309
1332
|
RAISE_IF_EXCEPTION(resume_value);
|
1310
1333
|
if (!completed) return resume_value;
|
@@ -1337,7 +1360,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
|
|
1337
1360
|
ctx = context_store_acquire(&backend->store, OP_CONNECT);
|
1338
1361
|
sqe = io_uring_backend_get_sqe(backend);
|
1339
1362
|
io_uring_prep_connect(sqe, fd, ai_addr, ai_addrlen);
|
1340
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1363
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
1341
1364
|
completed = context_store_release(&backend->store, ctx);
|
1342
1365
|
RAISE_IF_EXCEPTION(resume_value);
|
1343
1366
|
if (!completed) return resume_value;
|
@@ -1356,7 +1379,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
|
|
1356
1379
|
|
1357
1380
|
GetBackend(self, backend);
|
1358
1381
|
fd = fd_from_io(io, &fptr, write_mode, 0);
|
1359
|
-
resume_value = io_uring_backend_wait_fd(backend, fd, write_mode);
|
1382
|
+
resume_value = io_uring_backend_wait_fd(self, backend, fd, write_mode);
|
1360
1383
|
|
1361
1384
|
RAISE_IF_EXCEPTION(resume_value);
|
1362
1385
|
RB_GC_GUARD(resume_value);
|
@@ -1378,7 +1401,7 @@ VALUE Backend_close(VALUE self, VALUE io) {
|
|
1378
1401
|
ctx = context_store_acquire(&backend->store, OP_CLOSE);
|
1379
1402
|
sqe = io_uring_backend_get_sqe(backend);
|
1380
1403
|
io_uring_prep_close(sqe, fd);
|
1381
|
-
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
1404
|
+
result = io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, &resume_value);
|
1382
1405
|
completed = context_store_release(&backend->store, ctx);
|
1383
1406
|
RAISE_IF_EXCEPTION(resume_value);
|
1384
1407
|
if (!completed) return resume_value;
|
@@ -1386,7 +1409,7 @@ VALUE Backend_close(VALUE self, VALUE io) {
|
|
1386
1409
|
|
1387
1410
|
if (result < 0) rb_syserr_fail(-result, strerror(-result));
|
1388
1411
|
|
1389
|
-
fptr_finalize(fptr);
|
1412
|
+
if (fptr) fptr_finalize(fptr);
|
1390
1413
|
// fd = -1;
|
1391
1414
|
return io;
|
1392
1415
|
}
|
@@ -1405,13 +1428,13 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
|
|
1405
1428
|
}
|
1406
1429
|
|
1407
1430
|
// returns true if completed, 0 otherwise
|
1408
|
-
int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
|
1431
|
+
int io_uring_backend_submit_timeout_and_await(VALUE self, Backend_t *backend, double duration, VALUE *resume_value) {
|
1409
1432
|
struct __kernel_timespec ts = double_to_timespec(duration);
|
1410
1433
|
struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
|
1411
1434
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
1412
1435
|
|
1413
1436
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
1414
|
-
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
|
1437
|
+
io_uring_backend_defer_submit_and_await(self, backend, sqe, ctx, resume_value);
|
1415
1438
|
return context_store_release(&backend->store, ctx);
|
1416
1439
|
}
|
1417
1440
|
|
@@ -1420,7 +1443,7 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
|
|
1420
1443
|
Backend_t *backend;
|
1421
1444
|
GetBackend(self, backend);
|
1422
1445
|
|
1423
|
-
io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
|
1446
|
+
io_uring_backend_submit_timeout_and_await(self, backend, NUM2DBL(duration), &resume_value);
|
1424
1447
|
RAISE_IF_EXCEPTION(resume_value);
|
1425
1448
|
RB_GC_GUARD(resume_value);
|
1426
1449
|
return resume_value;
|
@@ -1439,7 +1462,7 @@ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
|
|
1439
1462
|
if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
|
1440
1463
|
if (next_time_ns > now_ns) {
|
1441
1464
|
double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
|
1442
|
-
int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
|
1465
|
+
int completed = io_uring_backend_submit_timeout_and_await(self, backend, sleep_duration, &resume_value);
|
1443
1466
|
RAISE_IF_EXCEPTION(resume_value);
|
1444
1467
|
if (!completed) return resume_value;
|
1445
1468
|
}
|
@@ -1531,7 +1554,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
|
|
1531
1554
|
Backend_t *backend;
|
1532
1555
|
GetBackend(self, backend);
|
1533
1556
|
|
1534
|
-
resume_value = io_uring_backend_wait_fd(backend, fd, 0);
|
1557
|
+
resume_value = io_uring_backend_wait_fd(self, backend, fd, 0);
|
1535
1558
|
close(fd);
|
1536
1559
|
RAISE_IF_EXCEPTION(resume_value);
|
1537
1560
|
RB_GC_GUARD(resume_value);
|
@@ -1586,7 +1609,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
|
|
1586
1609
|
else
|
1587
1610
|
backend->event_fd_ctx->ref_count += 1;
|
1588
1611
|
|
1589
|
-
resume_value =
|
1612
|
+
resume_value = io_uring_backend_await(self, backend);
|
1590
1613
|
context_store_release(&backend->store, backend->event_fd_ctx);
|
1591
1614
|
|
1592
1615
|
if (backend->event_fd_ctx->ref_count == 1) {
|
@@ -1719,7 +1742,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1719
1742
|
backend->base.op_count += sqe_count;
|
1720
1743
|
ctx->ref_count = sqe_count + 1;
|
1721
1744
|
io_uring_backend_defer_submit(backend);
|
1722
|
-
resume_value =
|
1745
|
+
resume_value = io_uring_backend_await(self, backend);
|
1723
1746
|
result = ctx->result;
|
1724
1747
|
completed = context_store_release(&backend->store, ctx);
|
1725
1748
|
if (!completed) {
|
@@ -1803,6 +1826,7 @@ static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
|
|
1803
1826
|
}
|
1804
1827
|
|
1805
1828
|
static inline int splice_chunks_await_ops(
|
1829
|
+
VALUE self,
|
1806
1830
|
Backend_t *backend,
|
1807
1831
|
op_context_t **ctx,
|
1808
1832
|
int *result,
|
@@ -1810,7 +1834,7 @@ static inline int splice_chunks_await_ops(
|
|
1810
1834
|
)
|
1811
1835
|
{
|
1812
1836
|
int completed;
|
1813
|
-
int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
|
1837
|
+
int res = io_uring_backend_defer_submit_and_await(self, backend, 0, *ctx, switchpoint_result);
|
1814
1838
|
|
1815
1839
|
if (result) (*result) = res;
|
1816
1840
|
completed = context_store_release(&backend->store, *ctx);
|
@@ -1822,8 +1846,8 @@ static inline int splice_chunks_await_ops(
|
|
1822
1846
|
return 0;
|
1823
1847
|
}
|
1824
1848
|
|
1825
|
-
#define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
|
1826
|
-
if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
|
1849
|
+
#define SPLICE_CHUNKS_AWAIT_OPS(self, backend, ctx, result, switchpoint_result) \
|
1850
|
+
if (splice_chunks_await_ops(self, backend, ctx, result, switchpoint_result)) goto error;
|
1827
1851
|
|
1828
1852
|
VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
|
1829
1853
|
Backend_t *backend;
|
@@ -1866,7 +1890,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
|
|
1866
1890
|
splice_chunks_prep_splice(ctx, sqe, src_fd, pipefd[1], maxlen);
|
1867
1891
|
backend->base.op_count++;
|
1868
1892
|
|
1869
|
-
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
|
1893
|
+
SPLICE_CHUNKS_AWAIT_OPS(self, backend, &ctx, &chunk_len, &switchpoint_result);
|
1870
1894
|
if (chunk_len == 0) break;
|
1871
1895
|
|
1872
1896
|
total += chunk_len;
|
@@ -1901,7 +1925,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
|
|
1901
1925
|
backend->base.op_count++;
|
1902
1926
|
}
|
1903
1927
|
if (ctx) {
|
1904
|
-
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
|
1928
|
+
SPLICE_CHUNKS_AWAIT_OPS(self, backend, &ctx, 0, &switchpoint_result);
|
1905
1929
|
}
|
1906
1930
|
|
1907
1931
|
RB_GC_GUARD(chunk_len_value);
|
@@ -1989,10 +2013,6 @@ void Init_Backend(void) {
|
|
1989
2013
|
rb_define_method(cBackend, "connect", Backend_connect, 3);
|
1990
2014
|
rb_define_method(cBackend, "feed_loop", Backend_feed_loop, 3);
|
1991
2015
|
|
1992
|
-
#ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
|
1993
|
-
rb_define_method(cBackend, "multishot_accept", Backend_multishot_accept, 1);
|
1994
|
-
#endif
|
1995
|
-
|
1996
2016
|
rb_define_method(cBackend, "read", Backend_read, 5);
|
1997
2017
|
rb_define_method(cBackend, "read_loop", Backend_read_loop, 2);
|
1998
2018
|
rb_define_method(cBackend, "recv", Backend_recv, 4);
|