polyphony 0.53.1 → 0.57.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +1 -1
- data/.gitignore +3 -1
- data/CHANGELOG.md +50 -24
- data/Gemfile.lock +3 -1
- data/TODO.md +0 -3
- data/examples/core/idle_gc.rb +21 -0
- data/examples/core/queue.rb +19 -0
- data/examples/io/https_server.rb +30 -0
- data/examples/io/pipe.rb +11 -0
- data/examples/io/splice_chunks.rb +29 -0
- data/examples/io/stdio.rb +8 -0
- data/ext/polyphony/backend_common.c +186 -0
- data/ext/polyphony/backend_common.h +25 -130
- data/ext/polyphony/backend_io_uring.c +369 -108
- data/ext/polyphony/backend_io_uring_context.c +14 -2
- data/ext/polyphony/backend_io_uring_context.h +11 -11
- data/ext/polyphony/backend_libev.c +406 -94
- data/ext/polyphony/polyphony.c +17 -15
- data/ext/polyphony/polyphony.h +3 -0
- data/ext/polyphony/runqueue.c +29 -1
- data/ext/polyphony/thread.c +19 -4
- data/lib/polyphony/core/sync.rb +8 -0
- data/lib/polyphony/extensions/openssl.rb +24 -17
- data/lib/polyphony/extensions/socket.rb +6 -20
- data/lib/polyphony/version.rb +1 -1
- data/polyphony.gemspec +1 -0
- data/test/helper.rb +3 -3
- data/test/test_backend.rb +109 -3
- data/test/test_fiber.rb +0 -1
- data/test/test_io.rb +6 -3
- data/test/test_signal.rb +1 -1
- data/test/test_sync.rb +43 -0
- data/test/test_thread_pool.rb +1 -1
- data/test/test_timer.rb +16 -10
- metadata +23 -2
@@ -1,17 +1,18 @@
|
|
1
|
-
#
|
1
|
+
#ifndef BACKEND_COMMON_H
|
2
|
+
#define BACKEND_COMMON_H
|
2
3
|
|
3
4
|
#include "ruby.h"
|
4
5
|
#include "ruby/io.h"
|
5
6
|
|
7
|
+
struct Backend_base {
|
8
|
+
unsigned int currently_polling;
|
9
|
+
unsigned int pending_count;
|
10
|
+
double idle_gc_period;
|
11
|
+
double idle_gc_last_time;
|
12
|
+
};
|
6
13
|
|
7
14
|
#ifdef POLYPHONY_USE_PIDFD_OPEN
|
8
|
-
|
9
|
-
#define __NR_pidfd_open 434 /* System call # on most architectures */
|
10
|
-
#endif
|
11
|
-
|
12
|
-
static int pidfd_open(pid_t pid, unsigned int flags) {
|
13
|
-
return syscall(__NR_pidfd_open, pid, flags);
|
14
|
-
}
|
15
|
+
int pidfd_open(pid_t pid, unsigned int flags);
|
15
16
|
#endif
|
16
17
|
|
17
18
|
//////////////////////////////////////////////////////////////////////
|
@@ -26,75 +27,19 @@ struct io_internal_read_struct {
|
|
26
27
|
|
27
28
|
#define StringValue(v) rb_string_value(&(v))
|
28
29
|
|
29
|
-
int io_setstrbuf(VALUE *str, long len)
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
*str = rb_str_new(0, len);
|
35
|
-
return 1;
|
36
|
-
}
|
37
|
-
else {
|
38
|
-
VALUE s = StringValue(*str);
|
39
|
-
long clen = RSTRING_LEN(s);
|
40
|
-
if (clen >= len) {
|
41
|
-
rb_str_modify(s);
|
42
|
-
return 0;
|
43
|
-
}
|
44
|
-
len -= clen;
|
45
|
-
}
|
46
|
-
rb_str_modify_expand(*str, len);
|
47
|
-
return 0;
|
48
|
-
}
|
49
|
-
|
50
|
-
#define MAX_REALLOC_GAP 4096
|
51
|
-
|
52
|
-
inline void io_shrink_read_string(VALUE str, long n) {
|
53
|
-
if (rb_str_capacity(str) - n > MAX_REALLOC_GAP) {
|
54
|
-
rb_str_resize(str, n);
|
55
|
-
}
|
56
|
-
}
|
57
|
-
|
58
|
-
void io_set_read_length(VALUE str, long n, int shrinkable) {
|
59
|
-
if (RSTRING_LEN(str) != n) {
|
60
|
-
rb_str_modify(str);
|
61
|
-
rb_str_set_len(str, n);
|
62
|
-
if (shrinkable) io_shrink_read_string(str, n);
|
63
|
-
}
|
64
|
-
}
|
65
|
-
|
66
|
-
inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
|
67
|
-
if (fptr->encs.enc) {
|
68
|
-
return fptr->encs.enc;
|
69
|
-
}
|
70
|
-
return rb_default_external_encoding();
|
71
|
-
}
|
72
|
-
|
73
|
-
VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
|
74
|
-
OBJ_TAINT(str);
|
75
|
-
rb_enc_associate(str, io_read_encoding(fptr));
|
76
|
-
return str;
|
77
|
-
}
|
30
|
+
int io_setstrbuf(VALUE *str, long len);
|
31
|
+
void io_shrink_read_string(VALUE str, long n);
|
32
|
+
void io_set_read_length(VALUE str, long n, int shrinkable);
|
33
|
+
rb_encoding* io_read_encoding(rb_io_t *fptr);
|
34
|
+
VALUE io_enc_str(VALUE str, rb_io_t *fptr);
|
78
35
|
|
79
36
|
//////////////////////////////////////////////////////////////////////
|
80
37
|
//////////////////////////////////////////////////////////////////////
|
81
38
|
|
82
|
-
|
83
|
-
|
84
|
-
backend->pending_count++;
|
85
|
-
ret = Thread_switch_fiber(rb_thread_current());
|
86
|
-
backend->pending_count--;
|
87
|
-
RB_GC_GUARD(ret);
|
88
|
-
return ret;
|
89
|
-
}
|
90
|
-
|
91
|
-
inline VALUE backend_snooze() {
|
92
|
-
Fiber_make_runnable(rb_fiber_current(), Qnil);
|
93
|
-
return Thread_switch_fiber(rb_thread_current());
|
94
|
-
}
|
39
|
+
VALUE backend_await(struct Backend_base *backend);
|
40
|
+
VALUE backend_snooze();
|
95
41
|
|
96
42
|
// macros for doing read loops
|
97
|
-
|
98
43
|
#define READ_LOOP_PREPARE_STR() { \
|
99
44
|
str = Qnil; \
|
100
45
|
shrinkable = io_setstrbuf(&str, len); \
|
@@ -117,63 +62,13 @@ inline VALUE backend_snooze() {
|
|
117
62
|
READ_LOOP_PREPARE_STR(); \
|
118
63
|
}
|
119
64
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
}
|
129
|
-
}
|
130
|
-
|
131
|
-
inline double current_time() {
|
132
|
-
struct timespec ts;
|
133
|
-
clock_gettime(CLOCK_MONOTONIC, &ts);
|
134
|
-
long long ns = ts.tv_sec;
|
135
|
-
ns = ns * 1e9 + ts.tv_nsec;
|
136
|
-
double t = ns;
|
137
|
-
return t / 1e9;
|
138
|
-
}
|
139
|
-
|
140
|
-
inline VALUE backend_timeout_exception(VALUE exception) {
|
141
|
-
if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
|
142
|
-
return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
|
143
|
-
else if (rb_obj_is_kind_of(exception, rb_cClass) == Qtrue)
|
144
|
-
return rb_funcall(exception, ID_new, 0);
|
145
|
-
else
|
146
|
-
return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
|
147
|
-
}
|
65
|
+
void rectify_io_file_pos(rb_io_t *fptr);
|
66
|
+
double current_time();
|
67
|
+
VALUE backend_timeout_exception(VALUE exception);
|
68
|
+
VALUE Backend_timeout_ensure_safe(VALUE arg);
|
69
|
+
VALUE Backend_timeout_ensure_safe(VALUE arg);
|
70
|
+
VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
|
71
|
+
void backend_run_idle_tasks(struct Backend_base *base);
|
72
|
+
void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
|
148
73
|
|
149
|
-
|
150
|
-
return rb_yield(arg);
|
151
|
-
}
|
152
|
-
|
153
|
-
VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
|
154
|
-
return exception;
|
155
|
-
}
|
156
|
-
|
157
|
-
VALUE Backend_timeout_ensure_safe(VALUE arg) {
|
158
|
-
return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
|
159
|
-
}
|
160
|
-
|
161
|
-
static VALUE empty_string = Qnil;
|
162
|
-
|
163
|
-
VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags) {
|
164
|
-
switch (RARRAY_LEN(ary)) {
|
165
|
-
case 0:
|
166
|
-
return Qnil;
|
167
|
-
case 1:
|
168
|
-
return Backend_send(self, io, RARRAY_AREF(ary, 0), flags);
|
169
|
-
default:
|
170
|
-
if (empty_string == Qnil) {
|
171
|
-
empty_string = rb_str_new_literal("");
|
172
|
-
rb_global_variable(&empty_string);
|
173
|
-
}
|
174
|
-
VALUE joined = rb_ary_join(ary, empty_string);
|
175
|
-
VALUE result = Backend_send(self, io, joined, flags);
|
176
|
-
RB_GC_GUARD(joined);
|
177
|
-
return result;
|
178
|
-
}
|
179
|
-
}
|
74
|
+
#endif /* BACKEND_COMMON_H */
|
@@ -4,7 +4,6 @@
|
|
4
4
|
#include <sys/socket.h>
|
5
5
|
#include <sys/uio.h>
|
6
6
|
#include <unistd.h>
|
7
|
-
#include <fcntl.h>
|
8
7
|
#include <netinet/in.h>
|
9
8
|
#include <arpa/inet.h>
|
10
9
|
#include <stdnoreturn.h>
|
@@ -19,39 +18,21 @@
|
|
19
18
|
#include "backend_io_uring_context.h"
|
20
19
|
#include "ruby/thread.h"
|
21
20
|
#include "ruby/io.h"
|
21
|
+
#include "backend_common.h"
|
22
22
|
|
23
23
|
VALUE SYM_io_uring;
|
24
|
+
VALUE SYM_send;
|
25
|
+
VALUE SYM_splice;
|
26
|
+
VALUE SYM_write;
|
24
27
|
|
25
28
|
#ifdef POLYPHONY_UNSET_NONBLOCK
|
26
|
-
|
27
|
-
|
28
|
-
// One of the changes introduced in Ruby 3.0 as part of the work on the
|
29
|
-
// FiberScheduler interface is that all created sockets are marked as
|
30
|
-
// non-blocking. This prevents the io_uring backend from working correctly,
|
31
|
-
// since it will return an EAGAIN error just like a normal syscall. So here
|
32
|
-
// instead of setting O_NONBLOCK (which is required for the libev backend), we
|
33
|
-
// unset it.
|
34
|
-
inline void io_unset_nonblock(rb_io_t *fptr, VALUE io) {
|
35
|
-
VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
|
36
|
-
if (is_nonblocking == Qfalse) return;
|
37
|
-
|
38
|
-
rb_ivar_set(io, ID_ivar_is_nonblocking, Qfalse);
|
39
|
-
|
40
|
-
int oflags = fcntl(fptr->fd, F_GETFL);
|
41
|
-
if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
|
42
|
-
oflags &= !O_NONBLOCK;
|
43
|
-
fcntl(fptr->fd, F_SETFL, oflags);
|
44
|
-
}
|
29
|
+
#define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
|
45
30
|
#else
|
46
|
-
// NOP
|
47
31
|
#define io_unset_nonblock(fptr, io)
|
48
32
|
#endif
|
49
33
|
|
50
34
|
typedef struct Backend_t {
|
51
|
-
|
52
|
-
unsigned int currently_polling;
|
53
|
-
unsigned int pending_count;
|
54
|
-
unsigned int poll_no_wait_count;
|
35
|
+
struct Backend_base base;
|
55
36
|
|
56
37
|
// implementation-specific fields
|
57
38
|
struct io_uring ring;
|
@@ -61,8 +42,6 @@ typedef struct Backend_t {
|
|
61
42
|
int event_fd;
|
62
43
|
} Backend_t;
|
63
44
|
|
64
|
-
#include "backend_common.h"
|
65
|
-
|
66
45
|
static size_t Backend_size(const void *ptr) {
|
67
46
|
return sizeof(Backend_t);
|
68
47
|
}
|
@@ -86,9 +65,11 @@ static VALUE Backend_initialize(VALUE self) {
|
|
86
65
|
Backend_t *backend;
|
87
66
|
GetBackend(self, backend);
|
88
67
|
|
89
|
-
backend->currently_polling = 0;
|
90
|
-
backend->pending_count = 0;
|
91
|
-
backend->
|
68
|
+
backend->base.currently_polling = 0;
|
69
|
+
backend->base.pending_count = 0;
|
70
|
+
backend->base.idle_gc_period = 0;
|
71
|
+
backend->base.idle_gc_last_time = 0;
|
72
|
+
|
92
73
|
backend->pending_sqes = 0;
|
93
74
|
backend->prepared_limit = 2048;
|
94
75
|
|
@@ -116,9 +97,8 @@ VALUE Backend_post_fork(VALUE self) {
|
|
116
97
|
io_uring_queue_exit(&backend->ring);
|
117
98
|
io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
|
118
99
|
context_store_free(&backend->store);
|
119
|
-
backend->currently_polling = 0;
|
120
|
-
backend->pending_count = 0;
|
121
|
-
backend->poll_no_wait_count = 0;
|
100
|
+
backend->base.currently_polling = 0;
|
101
|
+
backend->base.pending_count = 0;
|
122
102
|
backend->pending_sqes = 0;
|
123
103
|
|
124
104
|
return self;
|
@@ -128,7 +108,7 @@ unsigned int Backend_pending_count(VALUE self) {
|
|
128
108
|
Backend_t *backend;
|
129
109
|
GetBackend(self, backend);
|
130
110
|
|
131
|
-
return backend->pending_count;
|
111
|
+
return backend->base.pending_count;
|
132
112
|
}
|
133
113
|
|
134
114
|
typedef struct poll_context {
|
@@ -155,17 +135,9 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
|
|
155
135
|
if (!ctx) return;
|
156
136
|
|
157
137
|
ctx->result = cqe->res;
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
// completion, so we can release the context
|
162
|
-
context_store_release(&backend->store, ctx);
|
163
|
-
else {
|
164
|
-
// otherwise, we mark it as completed, schedule the fiber and let it deal
|
165
|
-
// with releasing the context
|
166
|
-
ctx->completed = 1;
|
167
|
-
if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
|
168
|
-
}
|
138
|
+
if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
|
139
|
+
Fiber_make_runnable(ctx->fiber, ctx->resume_value);
|
140
|
+
context_store_release(&backend->store, ctx);
|
169
141
|
}
|
170
142
|
|
171
143
|
// adapted from io_uring_peek_batch_cqe in queue.c
|
@@ -205,9 +177,9 @@ void io_uring_backend_poll(Backend_t *backend) {
|
|
205
177
|
io_uring_submit(&backend->ring);
|
206
178
|
}
|
207
179
|
|
208
|
-
backend->currently_polling = 1;
|
180
|
+
backend->base.currently_polling = 1;
|
209
181
|
rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
|
210
|
-
backend->currently_polling = 0;
|
182
|
+
backend->base.currently_polling = 0;
|
211
183
|
if (poll_ctx.result < 0) return;
|
212
184
|
|
213
185
|
io_uring_backend_handle_completion(poll_ctx.cqe, backend);
|
@@ -219,16 +191,6 @@ VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue
|
|
219
191
|
Backend_t *backend;
|
220
192
|
GetBackend(self, backend);
|
221
193
|
|
222
|
-
if (is_nowait) {
|
223
|
-
backend->poll_no_wait_count++;
|
224
|
-
if (backend->poll_no_wait_count < 10) return self;
|
225
|
-
|
226
|
-
long runnable_count = Runqueue_len(runqueue);
|
227
|
-
if (backend->poll_no_wait_count < runnable_count) return self;
|
228
|
-
}
|
229
|
-
|
230
|
-
backend->poll_no_wait_count = 0;
|
231
|
-
|
232
194
|
if (is_nowait && backend->pending_sqes) {
|
233
195
|
backend->pending_sqes = 0;
|
234
196
|
io_uring_submit(&backend->ring);
|
@@ -246,7 +208,7 @@ VALUE Backend_wakeup(VALUE self) {
|
|
246
208
|
Backend_t *backend;
|
247
209
|
GetBackend(self, backend);
|
248
210
|
|
249
|
-
if (backend->currently_polling) {
|
211
|
+
if (backend->base.currently_polling) {
|
250
212
|
// Since we're currently blocking while waiting for a completion, we add a
|
251
213
|
// NOP which would cause the io_uring_enter syscall to return
|
252
214
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
@@ -277,16 +239,17 @@ int io_uring_backend_defer_submit_and_await(
|
|
277
239
|
{
|
278
240
|
VALUE switchpoint_result = Qnil;
|
279
241
|
|
280
|
-
|
281
|
-
|
242
|
+
if (sqe) {
|
243
|
+
io_uring_sqe_set_data(sqe, ctx);
|
244
|
+
io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
|
245
|
+
}
|
282
246
|
io_uring_backend_defer_submit(backend);
|
283
247
|
|
284
|
-
switchpoint_result = backend_await(backend);
|
248
|
+
switchpoint_result = backend_await((struct Backend_base *)backend);
|
285
249
|
|
286
|
-
if (
|
250
|
+
if (ctx->ref_count > 1) {
|
251
|
+
// op was not completed (an exception was raised), so we need to cancel it
|
287
252
|
ctx->result = -ECANCELED;
|
288
|
-
|
289
|
-
// op was not completed, so we need to cancel it
|
290
253
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
291
254
|
io_uring_prep_cancel(sqe, ctx, 0);
|
292
255
|
backend->pending_sqes = 0;
|
@@ -300,7 +263,7 @@ int io_uring_backend_defer_submit_and_await(
|
|
300
263
|
}
|
301
264
|
|
302
265
|
VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
|
303
|
-
op_context_t *ctx =
|
266
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
|
304
267
|
VALUE resumed_value = Qnil;
|
305
268
|
|
306
269
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
@@ -332,14 +295,14 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
|
|
332
295
|
|
333
296
|
while (1) {
|
334
297
|
VALUE resume_value = Qnil;
|
335
|
-
op_context_t *ctx =
|
298
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
336
299
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
337
300
|
io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
|
338
301
|
|
339
302
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
340
|
-
|
303
|
+
int completed = context_store_release(&backend->store, ctx);
|
341
304
|
RAISE_IF_EXCEPTION(resume_value);
|
342
|
-
if (!
|
305
|
+
if (!completed) return resume_value;
|
343
306
|
RB_GC_GUARD(resume_value);
|
344
307
|
|
345
308
|
if (result < 0)
|
@@ -393,14 +356,14 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
|
|
393
356
|
|
394
357
|
while (1) {
|
395
358
|
VALUE resume_value = Qnil;
|
396
|
-
op_context_t *ctx =
|
359
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
397
360
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
398
361
|
io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
|
399
362
|
|
400
363
|
ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
401
|
-
|
364
|
+
int completed = context_store_release(&backend->store, ctx);
|
402
365
|
RAISE_IF_EXCEPTION(resume_value);
|
403
|
-
if (!
|
366
|
+
if (!completed) return resume_value;
|
404
367
|
RB_GC_GUARD(resume_value);
|
405
368
|
|
406
369
|
if (result < 0)
|
@@ -440,14 +403,14 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
|
|
440
403
|
|
441
404
|
while (1) {
|
442
405
|
VALUE resume_value = Qnil;
|
443
|
-
op_context_t *ctx =
|
406
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
|
444
407
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
445
408
|
io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
|
446
409
|
|
447
410
|
ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
448
|
-
|
411
|
+
int completed = context_store_release(&backend->store, ctx);
|
449
412
|
RAISE_IF_EXCEPTION(resume_value);
|
450
|
-
if (!
|
413
|
+
if (!completed) return resume_value;
|
451
414
|
RB_GC_GUARD(resume_value);
|
452
415
|
|
453
416
|
if (result < 0)
|
@@ -483,14 +446,14 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
|
|
483
446
|
|
484
447
|
while (left > 0) {
|
485
448
|
VALUE resume_value = Qnil;
|
486
|
-
op_context_t *ctx =
|
449
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
|
487
450
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
488
|
-
io_uring_prep_write(sqe, fptr->fd, buf, left,
|
451
|
+
io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
|
489
452
|
|
490
453
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
491
|
-
|
454
|
+
int completed = context_store_release(&backend->store, ctx);
|
492
455
|
RAISE_IF_EXCEPTION(resume_value);
|
493
|
-
if (!
|
456
|
+
if (!completed) return resume_value;
|
494
457
|
RB_GC_GUARD(resume_value);
|
495
458
|
|
496
459
|
if (result < 0)
|
@@ -532,17 +495,17 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
|
|
532
495
|
|
533
496
|
while (1) {
|
534
497
|
VALUE resume_value = Qnil;
|
535
|
-
op_context_t *ctx =
|
498
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
|
536
499
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
537
500
|
io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
|
538
501
|
|
539
502
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
540
|
-
|
503
|
+
int completed = context_store_release(&backend->store, ctx);
|
541
504
|
if (TEST_EXCEPTION(resume_value)) {
|
542
505
|
free(iov);
|
543
506
|
RAISE_EXCEPTION(resume_value);
|
544
507
|
}
|
545
|
-
if (!
|
508
|
+
if (!completed) {
|
546
509
|
free(iov);
|
547
510
|
return resume_value;
|
548
511
|
}
|
@@ -605,14 +568,14 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
|
|
605
568
|
|
606
569
|
while (1) {
|
607
570
|
VALUE resume_value = Qnil;
|
608
|
-
op_context_t *ctx =
|
571
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
609
572
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
610
573
|
io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
|
611
574
|
|
612
575
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
613
|
-
|
576
|
+
int completed = context_store_release(&backend->store, ctx);
|
614
577
|
RAISE_IF_EXCEPTION(resume_value);
|
615
|
-
if (!
|
578
|
+
if (!completed) return resume_value;
|
616
579
|
RB_GC_GUARD(resume_value);
|
617
580
|
|
618
581
|
if (result < 0)
|
@@ -652,14 +615,14 @@ VALUE Backend_recv_loop(VALUE self, VALUE io) {
|
|
652
615
|
|
653
616
|
while (1) {
|
654
617
|
VALUE resume_value = Qnil;
|
655
|
-
op_context_t *ctx =
|
618
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
656
619
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
657
620
|
io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
|
658
621
|
|
659
622
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
660
|
-
|
623
|
+
int completed = context_store_release(&backend->store, ctx);
|
661
624
|
RAISE_IF_EXCEPTION(resume_value);
|
662
|
-
if (!
|
625
|
+
if (!completed) return resume_value;
|
663
626
|
RB_GC_GUARD(resume_value);
|
664
627
|
|
665
628
|
if (result < 0)
|
@@ -698,14 +661,14 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
|
|
698
661
|
|
699
662
|
while (1) {
|
700
663
|
VALUE resume_value = Qnil;
|
701
|
-
op_context_t *ctx =
|
664
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
|
702
665
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
703
666
|
io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
|
704
667
|
|
705
668
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
706
|
-
|
669
|
+
int completed = context_store_release(&backend->store, ctx);
|
707
670
|
RAISE_IF_EXCEPTION(resume_value);
|
708
|
-
if (!
|
671
|
+
if (!completed) return resume_value;
|
709
672
|
RB_GC_GUARD(resume_value);
|
710
673
|
|
711
674
|
if (result < 0)
|
@@ -741,14 +704,14 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
|
|
741
704
|
|
742
705
|
while (left > 0) {
|
743
706
|
VALUE resume_value = Qnil;
|
744
|
-
op_context_t *ctx =
|
707
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
|
745
708
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
746
709
|
io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
|
747
710
|
|
748
711
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
749
|
-
|
712
|
+
int completed = context_store_release(&backend->store, ctx);
|
750
713
|
RAISE_IF_EXCEPTION(resume_value);
|
751
|
-
if (!
|
714
|
+
if (!completed) return resume_value;
|
752
715
|
RB_GC_GUARD(resume_value);
|
753
716
|
|
754
717
|
if (result < 0)
|
@@ -775,14 +738,14 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
|
|
775
738
|
|
776
739
|
while (1) {
|
777
740
|
VALUE resume_value = Qnil;
|
778
|
-
op_context_t *ctx =
|
741
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
|
779
742
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
780
743
|
io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
|
781
744
|
|
782
745
|
int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
783
|
-
|
746
|
+
int completed = context_store_release(&backend->store, ctx);
|
784
747
|
RAISE_IF_EXCEPTION(resume_value);
|
785
|
-
if (!
|
748
|
+
if (!completed) return resume_value;
|
786
749
|
RB_GC_GUARD(resume_value);
|
787
750
|
|
788
751
|
if (fd < 0)
|
@@ -846,14 +809,14 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
|
|
846
809
|
VALUE resume_value = Qnil;
|
847
810
|
|
848
811
|
while (1) {
|
849
|
-
op_context_t *ctx =
|
812
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
|
850
813
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
851
814
|
io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
|
852
815
|
|
853
816
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
854
|
-
|
817
|
+
int completed = context_store_release(&backend->store, ctx);
|
855
818
|
RAISE_IF_EXCEPTION(resume_value);
|
856
|
-
if (!
|
819
|
+
if (!completed) return resume_value;
|
857
820
|
|
858
821
|
if (result < 0)
|
859
822
|
rb_syserr_fail(-result, strerror(-result));
|
@@ -897,13 +860,13 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
|
|
897
860
|
addr.sin_port = htons(NUM2INT(port));
|
898
861
|
|
899
862
|
VALUE resume_value = Qnil;
|
900
|
-
op_context_t *ctx =
|
863
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
|
901
864
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
902
865
|
io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
|
903
866
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
904
|
-
|
867
|
+
int completed = context_store_release(&backend->store, ctx);
|
905
868
|
RAISE_IF_EXCEPTION(resume_value);
|
906
|
-
if (!
|
869
|
+
if (!completed) return resume_value;
|
907
870
|
RB_GC_GUARD(resume_value);
|
908
871
|
|
909
872
|
if (result < 0) rb_syserr_fail(-result, strerror(-result));
|
@@ -943,12 +906,11 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
|
|
943
906
|
struct __kernel_timespec ts = double_to_timespec(duration);
|
944
907
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
945
908
|
|
946
|
-
op_context_t *ctx =
|
909
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
947
910
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
948
911
|
|
949
912
|
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
|
950
|
-
|
951
|
-
return ctx->completed;
|
913
|
+
return context_store_release(&backend->store, ctx);
|
952
914
|
}
|
953
915
|
|
954
916
|
VALUE Backend_sleep(VALUE self, VALUE duration) {
|
@@ -996,7 +958,7 @@ struct Backend_timeout_ctx {
|
|
996
958
|
|
997
959
|
VALUE Backend_timeout_ensure(VALUE arg) {
|
998
960
|
struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
|
999
|
-
if (
|
961
|
+
if (timeout_ctx->ctx->ref_count) {
|
1000
962
|
timeout_ctx->ctx->result = -ECANCELED;
|
1001
963
|
|
1002
964
|
// op was not completed, so we need to cancel it
|
@@ -1005,7 +967,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
|
|
1005
967
|
timeout_ctx->backend->pending_sqes = 0;
|
1006
968
|
io_uring_submit(&timeout_ctx->backend->ring);
|
1007
969
|
}
|
1008
|
-
|
970
|
+
context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
|
1009
971
|
return Qnil;
|
1010
972
|
}
|
1011
973
|
|
@@ -1023,7 +985,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
|
|
1023
985
|
|
1024
986
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1025
987
|
|
1026
|
-
op_context_t *ctx =
|
988
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
1027
989
|
ctx->resume_value = timeout;
|
1028
990
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
1029
991
|
io_uring_sqe_set_data(sqe, ctx);
|
@@ -1091,6 +1053,299 @@ VALUE Backend_kind(VALUE self) {
|
|
1091
1053
|
return SYM_io_uring;
|
1092
1054
|
}
|
1093
1055
|
|
1056
|
+
struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
|
1057
|
+
rb_io_t *fptr;
|
1058
|
+
VALUE underlying_io;
|
1059
|
+
|
1060
|
+
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1061
|
+
if (underlying_io != Qnil) io = underlying_io;
|
1062
|
+
io = rb_io_get_write_io(io);
|
1063
|
+
GetOpenFile(io, fptr);
|
1064
|
+
io_unset_nonblock(fptr, io);
|
1065
|
+
|
1066
|
+
char *buf = StringValuePtr(str);
|
1067
|
+
long len = RSTRING_LEN(str);
|
1068
|
+
|
1069
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1070
|
+
io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
|
1071
|
+
return sqe;
|
1072
|
+
}
|
1073
|
+
|
1074
|
+
struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
|
1075
|
+
rb_io_t *fptr;
|
1076
|
+
VALUE underlying_io;
|
1077
|
+
|
1078
|
+
underlying_io = rb_ivar_get(io, ID_ivar_io);
|
1079
|
+
if (underlying_io != Qnil) io = underlying_io;
|
1080
|
+
io = rb_io_get_write_io(io);
|
1081
|
+
GetOpenFile(io, fptr);
|
1082
|
+
io_unset_nonblock(fptr, io);
|
1083
|
+
|
1084
|
+
char *buf = StringValuePtr(str);
|
1085
|
+
long len = RSTRING_LEN(str);
|
1086
|
+
int flags_int = NUM2INT(flags);
|
1087
|
+
|
1088
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1089
|
+
io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
|
1090
|
+
return sqe;
|
1091
|
+
}
|
1092
|
+
|
1093
|
+
struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE maxlen) {
|
1094
|
+
rb_io_t *src_fptr;
|
1095
|
+
rb_io_t *dest_fptr;
|
1096
|
+
VALUE underlying_io;
|
1097
|
+
|
1098
|
+
underlying_io = rb_ivar_get(src, ID_ivar_io);
|
1099
|
+
if (underlying_io != Qnil) src = underlying_io;
|
1100
|
+
GetOpenFile(src, src_fptr);
|
1101
|
+
io_unset_nonblock(src_fptr, src);
|
1102
|
+
|
1103
|
+
underlying_io = rb_ivar_get(dest, ID_ivar_io);
|
1104
|
+
if (underlying_io != Qnil) dest = underlying_io;
|
1105
|
+
dest = rb_io_get_write_io(dest);
|
1106
|
+
GetOpenFile(dest, dest_fptr);
|
1107
|
+
io_unset_nonblock(dest_fptr, dest);
|
1108
|
+
|
1109
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1110
|
+
io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
|
1111
|
+
return sqe;
|
1112
|
+
}
|
1113
|
+
|
1114
|
+
VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
1115
|
+
VALUE resume_value = Qnil;
|
1116
|
+
unsigned int sqe_count = 0;
|
1117
|
+
struct io_uring_sqe *last_sqe = 0;
|
1118
|
+
Backend_t *backend;
|
1119
|
+
GetBackend(self, backend);
|
1120
|
+
if (argc == 0) return resume_value;
|
1121
|
+
|
1122
|
+
op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
|
1123
|
+
for (int i = 0; i < argc; i++) {
|
1124
|
+
VALUE op = argv[i];
|
1125
|
+
VALUE op_type = RARRAY_AREF(op, 0);
|
1126
|
+
VALUE op_len = RARRAY_LEN(op);
|
1127
|
+
|
1128
|
+
if (op_type == SYM_write && op_len == 3) {
|
1129
|
+
last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
|
1130
|
+
}
|
1131
|
+
else if (op_type == SYM_send && op_len == 4)
|
1132
|
+
last_sqe = Backend_chain_prepare_send(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
|
1133
|
+
else if (op_type == SYM_splice && op_len == 4)
|
1134
|
+
last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
|
1135
|
+
else {
|
1136
|
+
if (sqe_count) {
|
1137
|
+
io_uring_sqe_set_data(last_sqe, ctx);
|
1138
|
+
io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
|
1139
|
+
|
1140
|
+
ctx->ref_count = sqe_count;
|
1141
|
+
ctx->result = -ECANCELED;
|
1142
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1143
|
+
io_uring_prep_cancel(sqe, ctx, 0);
|
1144
|
+
backend->pending_sqes = 0;
|
1145
|
+
io_uring_submit(&backend->ring);
|
1146
|
+
}
|
1147
|
+
else {
|
1148
|
+
ctx->ref_count = 1;
|
1149
|
+
context_store_release(&backend->store, ctx);
|
1150
|
+
}
|
1151
|
+
rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
|
1152
|
+
}
|
1153
|
+
|
1154
|
+
io_uring_sqe_set_data(last_sqe, ctx);
|
1155
|
+
unsigned int flags = (i == argc - 1) ? IOSQE_ASYNC : IOSQE_ASYNC & IOSQE_IO_LINK;
|
1156
|
+
io_uring_sqe_set_flags(last_sqe, flags);
|
1157
|
+
sqe_count++;
|
1158
|
+
}
|
1159
|
+
|
1160
|
+
ctx->ref_count = sqe_count + 1;
|
1161
|
+
io_uring_backend_defer_submit(backend);
|
1162
|
+
resume_value = backend_await((struct Backend_base *)backend);
|
1163
|
+
int result = ctx->result;
|
1164
|
+
int completed = context_store_release(&backend->store, ctx);
|
1165
|
+
if (!completed) {
|
1166
|
+
// op was not completed (an exception was raised), so we need to cancel it
|
1167
|
+
ctx->result = -ECANCELED;
|
1168
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1169
|
+
io_uring_prep_cancel(sqe, ctx, 0);
|
1170
|
+
backend->pending_sqes = 0;
|
1171
|
+
io_uring_submit(&backend->ring);
|
1172
|
+
RAISE_IF_EXCEPTION(resume_value);
|
1173
|
+
return resume_value;
|
1174
|
+
}
|
1175
|
+
|
1176
|
+
RB_GC_GUARD(resume_value);
|
1177
|
+
return INT2NUM(result);
|
1178
|
+
}
|
1179
|
+
|
1180
|
+
VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
|
1181
|
+
Backend_t *backend;
|
1182
|
+
GetBackend(self, backend);
|
1183
|
+
backend->base.idle_gc_period = NUM2DBL(period);
|
1184
|
+
backend->base.idle_gc_last_time = current_time();
|
1185
|
+
return self;
|
1186
|
+
}
|
1187
|
+
|
1188
|
+
inline VALUE Backend_run_idle_tasks(VALUE self) {
|
1189
|
+
Backend_t *backend;
|
1190
|
+
GetBackend(self, backend);
|
1191
|
+
backend_run_idle_tasks(&backend->base);
|
1192
|
+
return self;
|
1193
|
+
}
|
1194
|
+
|
1195
|
+
static inline void splice_chunks_prep_write(op_context_t *ctx, struct io_uring_sqe *sqe, int fd, VALUE str) {
|
1196
|
+
char *buf = RSTRING_PTR(str);
|
1197
|
+
int len = RSTRING_LEN(str);
|
1198
|
+
io_uring_prep_write(sqe, fd, buf, len, 0);
|
1199
|
+
// io_uring_prep_send(sqe, fd, buf, len, 0);
|
1200
|
+
io_uring_sqe_set_data(sqe, ctx);
|
1201
|
+
}
|
1202
|
+
|
1203
|
+
static inline void splice_chunks_prep_splice(op_context_t *ctx, struct io_uring_sqe *sqe, int src, int dest, int maxlen) {
|
1204
|
+
io_uring_prep_splice(sqe, src, -1, dest, -1, maxlen, 0);
|
1205
|
+
io_uring_sqe_set_data(sqe, ctx);
|
1206
|
+
}
|
1207
|
+
|
1208
|
+
static inline void splice_chunks_get_sqe(
|
1209
|
+
Backend_t *backend,
|
1210
|
+
op_context_t **ctx,
|
1211
|
+
struct io_uring_sqe **sqe,
|
1212
|
+
enum op_type type
|
1213
|
+
)
|
1214
|
+
{
|
1215
|
+
if (*ctx) {
|
1216
|
+
if (*sqe) (*sqe)->flags |= IOSQE_IO_LINK;
|
1217
|
+
(*ctx)->ref_count++;
|
1218
|
+
}
|
1219
|
+
else
|
1220
|
+
*ctx = context_store_acquire(&backend->store, type);
|
1221
|
+
(*sqe) = io_uring_get_sqe(&backend->ring);
|
1222
|
+
}
|
1223
|
+
|
1224
|
+
static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
|
1225
|
+
ctx->result = -ECANCELED;
|
1226
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1227
|
+
io_uring_prep_cancel(sqe, ctx, 0);
|
1228
|
+
backend->pending_sqes = 0;
|
1229
|
+
io_uring_submit(&backend->ring);
|
1230
|
+
}
|
1231
|
+
|
1232
|
+
static inline int splice_chunks_await_ops(
|
1233
|
+
Backend_t *backend,
|
1234
|
+
op_context_t **ctx,
|
1235
|
+
int *result,
|
1236
|
+
VALUE *switchpoint_result
|
1237
|
+
)
|
1238
|
+
{
|
1239
|
+
int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
|
1240
|
+
if (result) (*result) = res;
|
1241
|
+
int completed = context_store_release(&backend->store, *ctx);
|
1242
|
+
if (!completed) {
|
1243
|
+
splice_chunks_cancel(backend, *ctx);
|
1244
|
+
if (TEST_EXCEPTION(*switchpoint_result)) return 1;
|
1245
|
+
}
|
1246
|
+
*ctx = 0;
|
1247
|
+
return 0;
|
1248
|
+
}
|
1249
|
+
|
1250
|
+
#define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
|
1251
|
+
if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
|
1252
|
+
|
1253
|
+
VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
|
1254
|
+
Backend_t *backend;
|
1255
|
+
GetBackend(self, backend);
|
1256
|
+
int total = 0;
|
1257
|
+
int err = 0;
|
1258
|
+
VALUE switchpoint_result = Qnil;
|
1259
|
+
op_context_t *ctx = 0;
|
1260
|
+
struct io_uring_sqe *sqe = 0;
|
1261
|
+
|
1262
|
+
rb_io_t *src_fptr;
|
1263
|
+
rb_io_t *dest_fptr;
|
1264
|
+
|
1265
|
+
VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
|
1266
|
+
if (underlying_io != Qnil) src = underlying_io;
|
1267
|
+
GetOpenFile(src, src_fptr);
|
1268
|
+
io_verify_blocking_mode(src_fptr, src, Qtrue);
|
1269
|
+
|
1270
|
+
underlying_io = rb_ivar_get(dest, ID_ivar_io);
|
1271
|
+
if (underlying_io != Qnil) dest = underlying_io;
|
1272
|
+
dest = rb_io_get_write_io(dest);
|
1273
|
+
GetOpenFile(dest, dest_fptr);
|
1274
|
+
io_verify_blocking_mode(dest_fptr, dest, Qtrue);
|
1275
|
+
|
1276
|
+
int maxlen = NUM2INT(chunk_size);
|
1277
|
+
VALUE str = Qnil;
|
1278
|
+
VALUE chunk_len_value = Qnil;
|
1279
|
+
|
1280
|
+
int pipefd[2] = { -1, -1 };
|
1281
|
+
if (pipe(pipefd) == -1) {
|
1282
|
+
err = errno;
|
1283
|
+
goto syscallerror;
|
1284
|
+
}
|
1285
|
+
|
1286
|
+
if (prefix != Qnil) {
|
1287
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1288
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
|
1289
|
+
}
|
1290
|
+
|
1291
|
+
while (1) {
|
1292
|
+
int chunk_len;
|
1293
|
+
VALUE chunk_prefix_str = Qnil;
|
1294
|
+
VALUE chunk_postfix_str = Qnil;
|
1295
|
+
|
1296
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
|
1297
|
+
splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
|
1298
|
+
|
1299
|
+
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
|
1300
|
+
if (chunk_len == 0) break;
|
1301
|
+
|
1302
|
+
total += chunk_len;
|
1303
|
+
chunk_len_value = INT2NUM(chunk_len);
|
1304
|
+
|
1305
|
+
|
1306
|
+
if (chunk_prefix != Qnil) {
|
1307
|
+
chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
|
1308
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1309
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
|
1310
|
+
}
|
1311
|
+
|
1312
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
|
1313
|
+
splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
|
1314
|
+
|
1315
|
+
if (chunk_postfix != Qnil) {
|
1316
|
+
chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
|
1317
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1318
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
|
1319
|
+
}
|
1320
|
+
|
1321
|
+
RB_GC_GUARD(chunk_prefix_str);
|
1322
|
+
RB_GC_GUARD(chunk_postfix_str);
|
1323
|
+
}
|
1324
|
+
|
1325
|
+
if (postfix != Qnil) {
|
1326
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1327
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
|
1328
|
+
}
|
1329
|
+
if (ctx) {
|
1330
|
+
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
|
1331
|
+
}
|
1332
|
+
|
1333
|
+
RB_GC_GUARD(str);
|
1334
|
+
RB_GC_GUARD(chunk_len_value);
|
1335
|
+
RB_GC_GUARD(switchpoint_result);
|
1336
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1337
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1338
|
+
return INT2NUM(total);
|
1339
|
+
syscallerror:
|
1340
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1341
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1342
|
+
rb_syserr_fail(err, strerror(err));
|
1343
|
+
error:
|
1344
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1345
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1346
|
+
return RAISE_EXCEPTION(switchpoint_result);
|
1347
|
+
}
|
1348
|
+
|
1094
1349
|
void Init_Backend() {
|
1095
1350
|
VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
|
1096
1351
|
rb_define_alloc_func(cBackend, Backend_allocate);
|
@@ -1102,6 +1357,9 @@ void Init_Backend() {
|
|
1102
1357
|
rb_define_method(cBackend, "poll", Backend_poll, 3);
|
1103
1358
|
rb_define_method(cBackend, "break", Backend_wakeup, 0);
|
1104
1359
|
rb_define_method(cBackend, "kind", Backend_kind, 0);
|
1360
|
+
rb_define_method(cBackend, "chain", Backend_chain, -1);
|
1361
|
+
rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
|
1362
|
+
rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
|
1105
1363
|
|
1106
1364
|
rb_define_method(cBackend, "accept", Backend_accept, 2);
|
1107
1365
|
rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
|
@@ -1130,6 +1388,9 @@ void Init_Backend() {
|
|
1130
1388
|
#endif
|
1131
1389
|
|
1132
1390
|
SYM_io_uring = ID2SYM(rb_intern("io_uring"));
|
1391
|
+
SYM_send = ID2SYM(rb_intern("send"));
|
1392
|
+
SYM_splice = ID2SYM(rb_intern("splice"));
|
1393
|
+
SYM_write = ID2SYM(rb_intern("write"));
|
1133
1394
|
}
|
1134
1395
|
|
1135
1396
|
#endif // POLYPHONY_BACKEND_LIBURING
|