polyphony 0.54.0 → 0.59

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,17 +1,42 @@
1
- #include <time.h>
1
+ #ifndef BACKEND_COMMON_H
2
+ #define BACKEND_COMMON_H
2
3
 
3
4
  #include "ruby.h"
4
5
  #include "ruby/io.h"
6
+ #include "runqueue.h"
5
7
 
8
+ struct backend_stats {
9
+ int scheduled_fibers;
10
+ int waiting_fibers;
11
+ int pending_ops;
12
+ };
6
13
 
7
- #ifdef POLYPHONY_USE_PIDFD_OPEN
8
- #ifndef __NR_pidfd_open
9
- #define __NR_pidfd_open 434 /* System call # on most architectures */
10
- #endif
14
+ struct Backend_base {
15
+ runqueue_t runqueue;
16
+ unsigned int currently_polling;
17
+ unsigned int pending_count;
18
+ double idle_gc_period;
19
+ double idle_gc_last_time;
20
+ VALUE idle_proc;
21
+ VALUE trace_proc;
22
+ };
11
23
 
12
- static int pidfd_open(pid_t pid, unsigned int flags) {
13
- return syscall(__NR_pidfd_open, pid, flags);
14
- }
24
+ void backend_base_initialize(struct Backend_base *base);
25
+ void backend_base_finalize(struct Backend_base *base);
26
+ void backend_base_mark(struct Backend_base *base);
27
+ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
28
+ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
29
+ void backend_trace(struct Backend_base *base, int argc, VALUE *argv);
30
+
31
+ // tracing
32
+ #define SHOULD_TRACE(base) ((base)->trace_proc != Qnil)
33
+ #define TRACE(base, ...) rb_funcall((base)->trace_proc, ID_call, __VA_ARGS__)
34
+ #define COND_TRACE(base, ...) if (SHOULD_TRACE(base)) { TRACE(base, __VA_ARGS__); }
35
+
36
+
37
+
38
+ #ifdef POLYPHONY_USE_PIDFD_OPEN
39
+ int pidfd_open(pid_t pid, unsigned int flags);
15
40
  #endif
16
41
 
17
42
  //////////////////////////////////////////////////////////////////////
@@ -26,75 +51,19 @@ struct io_internal_read_struct {
26
51
 
27
52
  #define StringValue(v) rb_string_value(&(v))
28
53
 
29
- int io_setstrbuf(VALUE *str, long len) {
30
- #ifdef _WIN32
31
- len = (len + 1) & ~1L; /* round up for wide char */
32
- #endif
33
- if (*str == Qnil) {
34
- *str = rb_str_new(0, len);
35
- return 1;
36
- }
37
- else {
38
- VALUE s = StringValue(*str);
39
- long clen = RSTRING_LEN(s);
40
- if (clen >= len) {
41
- rb_str_modify(s);
42
- return 0;
43
- }
44
- len -= clen;
45
- }
46
- rb_str_modify_expand(*str, len);
47
- return 0;
48
- }
49
-
50
- #define MAX_REALLOC_GAP 4096
51
-
52
- inline void io_shrink_read_string(VALUE str, long n) {
53
- if (rb_str_capacity(str) - n > MAX_REALLOC_GAP) {
54
- rb_str_resize(str, n);
55
- }
56
- }
57
-
58
- void io_set_read_length(VALUE str, long n, int shrinkable) {
59
- if (RSTRING_LEN(str) != n) {
60
- rb_str_modify(str);
61
- rb_str_set_len(str, n);
62
- if (shrinkable) io_shrink_read_string(str, n);
63
- }
64
- }
65
-
66
- inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
67
- if (fptr->encs.enc) {
68
- return fptr->encs.enc;
69
- }
70
- return rb_default_external_encoding();
71
- }
72
-
73
- VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
74
- OBJ_TAINT(str);
75
- rb_enc_associate(str, io_read_encoding(fptr));
76
- return str;
77
- }
54
+ int io_setstrbuf(VALUE *str, long len);
55
+ void io_shrink_read_string(VALUE str, long n);
56
+ void io_set_read_length(VALUE str, long n, int shrinkable);
57
+ rb_encoding* io_read_encoding(rb_io_t *fptr);
58
+ VALUE io_enc_str(VALUE str, rb_io_t *fptr);
78
59
 
79
60
  //////////////////////////////////////////////////////////////////////
80
61
  //////////////////////////////////////////////////////////////////////
81
62
 
82
- inline VALUE backend_await(Backend_t *backend) {
83
- VALUE ret;
84
- backend->pending_count++;
85
- ret = Thread_switch_fiber(rb_thread_current());
86
- backend->pending_count--;
87
- RB_GC_GUARD(ret);
88
- return ret;
89
- }
90
-
91
- inline VALUE backend_snooze() {
92
- Fiber_make_runnable(rb_fiber_current(), Qnil);
93
- return Thread_switch_fiber(rb_thread_current());
94
- }
63
+ VALUE backend_await(struct Backend_base *backend);
64
+ VALUE backend_snooze();
95
65
 
96
66
  // macros for doing read loops
97
-
98
67
  #define READ_LOOP_PREPARE_STR() { \
99
68
  str = Qnil; \
100
69
  shrinkable = io_setstrbuf(&str, len); \
@@ -117,63 +86,13 @@ inline VALUE backend_snooze() {
117
86
  READ_LOOP_PREPARE_STR(); \
118
87
  }
119
88
 
120
- inline void rectify_io_file_pos(rb_io_t *fptr) {
121
- // Apparently after reopening a closed file, the file position is not reset,
122
- // which causes the read to fail. Fortunately we can use fptr->rbuf.len to
123
- // find out if that's the case.
124
- // See: https://github.com/digital-fabric/polyphony/issues/30
125
- if (fptr->rbuf.len > 0) {
126
- lseek(fptr->fd, -fptr->rbuf.len, SEEK_CUR);
127
- fptr->rbuf.len = 0;
128
- }
129
- }
130
-
131
- inline double current_time() {
132
- struct timespec ts;
133
- clock_gettime(CLOCK_MONOTONIC, &ts);
134
- long long ns = ts.tv_sec;
135
- ns = ns * 1e9 + ts.tv_nsec;
136
- double t = ns;
137
- return t / 1e9;
138
- }
139
-
140
- inline VALUE backend_timeout_exception(VALUE exception) {
141
- if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
142
- return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
143
- else if (rb_obj_is_kind_of(exception, rb_cClass) == Qtrue)
144
- return rb_funcall(exception, ID_new, 0);
145
- else
146
- return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
147
- }
89
+ void rectify_io_file_pos(rb_io_t *fptr);
90
+ double current_time();
91
+ VALUE backend_timeout_exception(VALUE exception);
92
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
93
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
94
+ VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
95
+ void backend_run_idle_tasks(struct Backend_base *base);
96
+ void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
148
97
 
149
- VALUE Backend_timeout_safe(VALUE arg) {
150
- return rb_yield(arg);
151
- }
152
-
153
- VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
154
- return exception;
155
- }
156
-
157
- VALUE Backend_timeout_ensure_safe(VALUE arg) {
158
- return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
159
- }
160
-
161
- static VALUE empty_string = Qnil;
162
-
163
- VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags) {
164
- switch (RARRAY_LEN(ary)) {
165
- case 0:
166
- return Qnil;
167
- case 1:
168
- return Backend_send(self, io, RARRAY_AREF(ary, 0), flags);
169
- default:
170
- if (empty_string == Qnil) {
171
- empty_string = rb_str_new_literal("");
172
- rb_global_variable(&empty_string);
173
- }
174
- VALUE joined = rb_ary_join(ary, empty_string);
175
- VALUE result = Backend_send(self, io, joined, flags);
176
- RB_GC_GUARD(joined);
177
- return result;
178
- }
179
- }
98
+ #endif /* BACKEND_COMMON_H */
@@ -4,7 +4,6 @@
4
4
  #include <sys/socket.h>
5
5
  #include <sys/uio.h>
6
6
  #include <unistd.h>
7
- #include <fcntl.h>
8
7
  #include <netinet/in.h>
9
8
  #include <arpa/inet.h>
10
9
  #include <stdnoreturn.h>
@@ -19,39 +18,21 @@
19
18
  #include "backend_io_uring_context.h"
20
19
  #include "ruby/thread.h"
21
20
  #include "ruby/io.h"
21
+ #include "backend_common.h"
22
22
 
23
23
  VALUE SYM_io_uring;
24
+ VALUE SYM_send;
25
+ VALUE SYM_splice;
26
+ VALUE SYM_write;
24
27
 
25
28
  #ifdef POLYPHONY_UNSET_NONBLOCK
26
- ID ID_ivar_is_nonblocking;
27
-
28
- // One of the changes introduced in Ruby 3.0 as part of the work on the
29
- // FiberScheduler interface is that all created sockets are marked as
30
- // non-blocking. This prevents the io_uring backend from working correctly,
31
- // since it will return an EAGAIN error just like a normal syscall. So here
32
- // instead of setting O_NONBLOCK (which is required for the libev backend), we
33
- // unset it.
34
- inline void io_unset_nonblock(rb_io_t *fptr, VALUE io) {
35
- VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
36
- if (is_nonblocking == Qfalse) return;
37
-
38
- rb_ivar_set(io, ID_ivar_is_nonblocking, Qfalse);
39
-
40
- int oflags = fcntl(fptr->fd, F_GETFL);
41
- if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
42
- oflags &= !O_NONBLOCK;
43
- fcntl(fptr->fd, F_SETFL, oflags);
44
- }
29
+ #define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
45
30
  #else
46
- // NOP
47
31
  #define io_unset_nonblock(fptr, io)
48
32
  #endif
49
33
 
50
34
  typedef struct Backend_t {
51
- // common fields
52
- unsigned int currently_polling;
53
- unsigned int pending_count;
54
- unsigned int poll_no_wait_count;
35
+ struct Backend_base base;
55
36
 
56
37
  // implementation-specific fields
57
38
  struct io_uring ring;
@@ -61,7 +42,15 @@ typedef struct Backend_t {
61
42
  int event_fd;
62
43
  } Backend_t;
63
44
 
64
- #include "backend_common.h"
45
+ static void Backend_mark(void *ptr) {
46
+ Backend_t *backend = ptr;
47
+ backend_base_mark(&backend->base);
48
+ }
49
+
50
+ static void Backend_free(void *ptr) {
51
+ Backend_t *backend = ptr;
52
+ backend_base_finalize(&backend->base);
53
+ }
65
54
 
66
55
  static size_t Backend_size(const void *ptr) {
67
56
  return sizeof(Backend_t);
@@ -69,7 +58,7 @@ static size_t Backend_size(const void *ptr) {
69
58
 
70
59
  static const rb_data_type_t Backend_type = {
71
60
  "IOUringBackend",
72
- {0, 0, Backend_size,},
61
+ {Backend_mark, Backend_free, Backend_size,},
73
62
  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
74
63
  };
75
64
 
@@ -86,9 +75,7 @@ static VALUE Backend_initialize(VALUE self) {
86
75
  Backend_t *backend;
87
76
  GetBackend(self, backend);
88
77
 
89
- backend->currently_polling = 0;
90
- backend->pending_count = 0;
91
- backend->poll_no_wait_count = 0;
78
+ backend_base_initialize(&backend->base);
92
79
  backend->pending_sqes = 0;
93
80
  backend->prepared_limit = 2048;
94
81
 
@@ -116,21 +103,13 @@ VALUE Backend_post_fork(VALUE self) {
116
103
  io_uring_queue_exit(&backend->ring);
117
104
  io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
118
105
  context_store_free(&backend->store);
119
- backend->currently_polling = 0;
120
- backend->pending_count = 0;
121
- backend->poll_no_wait_count = 0;
106
+ backend->base.currently_polling = 0;
107
+ backend->base.pending_count = 0;
122
108
  backend->pending_sqes = 0;
123
109
 
124
110
  return self;
125
111
  }
126
112
 
127
- unsigned int Backend_pending_count(VALUE self) {
128
- Backend_t *backend;
129
- GetBackend(self, backend);
130
-
131
- return backend->pending_count;
132
- }
133
-
134
113
  typedef struct poll_context {
135
114
  struct io_uring *ring;
136
115
  struct io_uring_cqe *cqe;
@@ -154,18 +133,11 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
154
133
  op_context_t *ctx = io_uring_cqe_get_data(cqe);
155
134
  if (!ctx) return;
156
135
 
136
+ // printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
157
137
  ctx->result = cqe->res;
158
-
159
- if (ctx->completed)
160
- // already marked as deleted as result of fiber resuming before op
161
- // completion, so we can release the context
162
- context_store_release(&backend->store, ctx);
163
- else {
164
- // otherwise, we mark it as completed, schedule the fiber and let it deal
165
- // with releasing the context
166
- ctx->completed = 1;
167
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
168
- }
138
+ if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
139
+ Fiber_make_runnable(ctx->fiber, ctx->resume_value);
140
+ context_store_release(&backend->store, ctx);
169
141
  }
170
142
 
171
143
  // adapted from io_uring_peek_batch_cqe in queue.c
@@ -205,48 +177,70 @@ void io_uring_backend_poll(Backend_t *backend) {
205
177
  io_uring_submit(&backend->ring);
206
178
  }
207
179
 
208
- backend->currently_polling = 1;
180
+ backend->base.currently_polling = 1;
209
181
  rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
210
- backend->currently_polling = 0;
182
+ backend->base.currently_polling = 0;
211
183
  if (poll_ctx.result < 0) return;
212
184
 
213
185
  io_uring_backend_handle_completion(poll_ctx.cqe, backend);
214
186
  io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
215
187
  }
216
188
 
217
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
218
- int is_nowait = nowait == Qtrue;
189
+ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
190
+ int is_blocking = blocking == Qtrue;
219
191
  Backend_t *backend;
220
192
  GetBackend(self, backend);
221
193
 
222
- if (is_nowait) {
223
- backend->poll_no_wait_count++;
224
- if (backend->poll_no_wait_count < 10) return self;
225
-
226
- long runnable_count = Runqueue_len(runqueue);
227
- if (backend->poll_no_wait_count < runnable_count) return self;
228
- }
229
-
230
- backend->poll_no_wait_count = 0;
231
-
232
- if (is_nowait && backend->pending_sqes) {
194
+ if (!is_blocking && backend->pending_sqes) {
233
195
  backend->pending_sqes = 0;
234
196
  io_uring_submit(&backend->ring);
235
197
  }
236
198
 
237
- COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
238
- if (!is_nowait) io_uring_backend_poll(backend);
199
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
200
+ if (is_blocking) io_uring_backend_poll(backend);
239
201
  io_uring_backend_handle_ready_cqes(backend);
240
- COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
202
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
241
203
 
242
204
  return self;
243
205
  }
244
206
 
207
+ inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
208
+ Backend_t *backend;
209
+ GetBackend(self, backend);
210
+
211
+ backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
212
+ }
213
+
214
+ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
215
+ Backend_t *backend;
216
+ GetBackend(self, backend);
217
+
218
+ runqueue_delete(&backend->base.runqueue, fiber);
219
+ }
220
+
221
+ inline VALUE Backend_switch_fiber(VALUE self) {
222
+ Backend_t *backend;
223
+ GetBackend(self, backend);
224
+
225
+ return backend_base_switch_fiber(self, &backend->base);
226
+ }
227
+
228
+ inline struct backend_stats Backend_stats(VALUE self) {
229
+ Backend_t *backend;
230
+ GetBackend(self, backend);
231
+
232
+ return (struct backend_stats){
233
+ .scheduled_fibers = runqueue_len(&backend->base.runqueue),
234
+ .waiting_fibers = 0,
235
+ .pending_ops = backend->base.pending_count
236
+ };
237
+ }
238
+
245
239
  VALUE Backend_wakeup(VALUE self) {
246
240
  Backend_t *backend;
247
241
  GetBackend(self, backend);
248
242
 
249
- if (backend->currently_polling) {
243
+ if (backend->base.currently_polling) {
250
244
  // Since we're currently blocking while waiting for a completion, we add a
251
245
  // NOP which would cause the io_uring_enter syscall to return
252
246
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -277,16 +271,17 @@ int io_uring_backend_defer_submit_and_await(
277
271
  {
278
272
  VALUE switchpoint_result = Qnil;
279
273
 
280
- io_uring_sqe_set_data(sqe, ctx);
281
- io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
274
+ if (sqe) {
275
+ io_uring_sqe_set_data(sqe, ctx);
276
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
277
+ }
282
278
  io_uring_backend_defer_submit(backend);
283
279
 
284
- switchpoint_result = backend_await(backend);
280
+ switchpoint_result = backend_await((struct Backend_base *)backend);
285
281
 
286
- if (!ctx->completed) {
282
+ if (ctx->ref_count > 1) {
283
+ // op was not completed (an exception was raised), so we need to cancel it
287
284
  ctx->result = -ECANCELED;
288
-
289
- // op was not completed, so we need to cancel it
290
285
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
291
286
  io_uring_prep_cancel(sqe, ctx, 0);
292
287
  backend->pending_sqes = 0;
@@ -300,7 +295,7 @@ int io_uring_backend_defer_submit_and_await(
300
295
  }
301
296
 
302
297
  VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
303
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_POLL);
298
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
304
299
  VALUE resumed_value = Qnil;
305
300
 
306
301
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -332,14 +327,14 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
332
327
 
333
328
  while (1) {
334
329
  VALUE resume_value = Qnil;
335
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
330
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
336
331
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
337
332
  io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
338
333
 
339
334
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
340
- OP_CONTEXT_RELEASE(&backend->store, ctx);
335
+ int completed = context_store_release(&backend->store, ctx);
341
336
  RAISE_IF_EXCEPTION(resume_value);
342
- if (!ctx->completed) return resume_value;
337
+ if (!completed) return resume_value;
343
338
  RB_GC_GUARD(resume_value);
344
339
 
345
340
  if (result < 0)
@@ -393,14 +388,14 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
393
388
 
394
389
  while (1) {
395
390
  VALUE resume_value = Qnil;
396
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
391
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
397
392
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
398
393
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
399
394
 
400
395
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
401
- OP_CONTEXT_RELEASE(&backend->store, ctx);
396
+ int completed = context_store_release(&backend->store, ctx);
402
397
  RAISE_IF_EXCEPTION(resume_value);
403
- if (!ctx->completed) return resume_value;
398
+ if (!completed) return resume_value;
404
399
  RB_GC_GUARD(resume_value);
405
400
 
406
401
  if (result < 0)
@@ -440,14 +435,14 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
440
435
 
441
436
  while (1) {
442
437
  VALUE resume_value = Qnil;
443
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
438
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
444
439
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
445
440
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
446
441
 
447
442
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
448
- OP_CONTEXT_RELEASE(&backend->store, ctx);
443
+ int completed = context_store_release(&backend->store, ctx);
449
444
  RAISE_IF_EXCEPTION(resume_value);
450
- if (!ctx->completed) return resume_value;
445
+ if (!completed) return resume_value;
451
446
  RB_GC_GUARD(resume_value);
452
447
 
453
448
  if (result < 0)
@@ -483,14 +478,14 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
483
478
 
484
479
  while (left > 0) {
485
480
  VALUE resume_value = Qnil;
486
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITE);
481
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
487
482
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
488
- io_uring_prep_write(sqe, fptr->fd, buf, left, -1);
483
+ io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
489
484
 
490
485
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
491
- OP_CONTEXT_RELEASE(&backend->store, ctx);
486
+ int completed = context_store_release(&backend->store, ctx);
492
487
  RAISE_IF_EXCEPTION(resume_value);
493
- if (!ctx->completed) return resume_value;
488
+ if (!completed) return resume_value;
494
489
  RB_GC_GUARD(resume_value);
495
490
 
496
491
  if (result < 0)
@@ -532,17 +527,17 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
532
527
 
533
528
  while (1) {
534
529
  VALUE resume_value = Qnil;
535
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITEV);
530
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
536
531
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
537
532
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
538
533
 
539
534
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
540
- OP_CONTEXT_RELEASE(&backend->store, ctx);
535
+ int completed = context_store_release(&backend->store, ctx);
541
536
  if (TEST_EXCEPTION(resume_value)) {
542
537
  free(iov);
543
538
  RAISE_EXCEPTION(resume_value);
544
539
  }
545
- if (!ctx->completed) {
540
+ if (!completed) {
546
541
  free(iov);
547
542
  return resume_value;
548
543
  }
@@ -605,14 +600,14 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
605
600
 
606
601
  while (1) {
607
602
  VALUE resume_value = Qnil;
608
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
603
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
609
604
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
610
605
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
611
606
 
612
607
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
613
- OP_CONTEXT_RELEASE(&backend->store, ctx);
608
+ int completed = context_store_release(&backend->store, ctx);
614
609
  RAISE_IF_EXCEPTION(resume_value);
615
- if (!ctx->completed) return resume_value;
610
+ if (!completed) return resume_value;
616
611
  RB_GC_GUARD(resume_value);
617
612
 
618
613
  if (result < 0)
@@ -652,14 +647,14 @@ VALUE Backend_recv_loop(VALUE self, VALUE io) {
652
647
 
653
648
  while (1) {
654
649
  VALUE resume_value = Qnil;
655
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
650
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
656
651
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
657
652
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
658
653
 
659
654
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
660
- OP_CONTEXT_RELEASE(&backend->store, ctx);
655
+ int completed = context_store_release(&backend->store, ctx);
661
656
  RAISE_IF_EXCEPTION(resume_value);
662
- if (!ctx->completed) return resume_value;
657
+ if (!completed) return resume_value;
663
658
  RB_GC_GUARD(resume_value);
664
659
 
665
660
  if (result < 0)
@@ -698,14 +693,14 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
698
693
 
699
694
  while (1) {
700
695
  VALUE resume_value = Qnil;
701
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
696
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
702
697
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
703
698
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
704
699
 
705
700
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
706
- OP_CONTEXT_RELEASE(&backend->store, ctx);
701
+ int completed = context_store_release(&backend->store, ctx);
707
702
  RAISE_IF_EXCEPTION(resume_value);
708
- if (!ctx->completed) return resume_value;
703
+ if (!completed) return resume_value;
709
704
  RB_GC_GUARD(resume_value);
710
705
 
711
706
  if (result < 0)
@@ -741,14 +736,14 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
741
736
 
742
737
  while (left > 0) {
743
738
  VALUE resume_value = Qnil;
744
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SEND);
739
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
745
740
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
746
741
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
747
742
 
748
743
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
749
- OP_CONTEXT_RELEASE(&backend->store, ctx);
744
+ int completed = context_store_release(&backend->store, ctx);
750
745
  RAISE_IF_EXCEPTION(resume_value);
751
- if (!ctx->completed) return resume_value;
746
+ if (!completed) return resume_value;
752
747
  RB_GC_GUARD(resume_value);
753
748
 
754
749
  if (result < 0)
@@ -775,14 +770,14 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
775
770
 
776
771
  while (1) {
777
772
  VALUE resume_value = Qnil;
778
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
773
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
779
774
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
780
775
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
781
776
 
782
777
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
783
- OP_CONTEXT_RELEASE(&backend->store, ctx);
778
+ int completed = context_store_release(&backend->store, ctx);
784
779
  RAISE_IF_EXCEPTION(resume_value);
785
- if (!ctx->completed) return resume_value;
780
+ if (!completed) return resume_value;
786
781
  RB_GC_GUARD(resume_value);
787
782
 
788
783
  if (fd < 0)
@@ -846,14 +841,14 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
846
841
  VALUE resume_value = Qnil;
847
842
 
848
843
  while (1) {
849
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SPLICE);
844
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
850
845
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
851
846
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
852
847
 
853
848
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
854
- OP_CONTEXT_RELEASE(&backend->store, ctx);
849
+ int completed = context_store_release(&backend->store, ctx);
855
850
  RAISE_IF_EXCEPTION(resume_value);
856
- if (!ctx->completed) return resume_value;
851
+ if (!completed) return resume_value;
857
852
 
858
853
  if (result < 0)
859
854
  rb_syserr_fail(-result, strerror(-result));
@@ -897,13 +892,13 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
897
892
  addr.sin_port = htons(NUM2INT(port));
898
893
 
899
894
  VALUE resume_value = Qnil;
900
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_CONNECT);
895
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
901
896
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
902
897
  io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
903
898
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
904
- OP_CONTEXT_RELEASE(&backend->store, ctx);
899
+ int completed = context_store_release(&backend->store, ctx);
905
900
  RAISE_IF_EXCEPTION(resume_value);
906
- if (!ctx->completed) return resume_value;
901
+ if (!completed) return resume_value;
907
902
  RB_GC_GUARD(resume_value);
908
903
 
909
904
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
@@ -943,12 +938,10 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
943
938
  struct __kernel_timespec ts = double_to_timespec(duration);
944
939
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
945
940
 
946
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
941
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
947
942
  io_uring_prep_timeout(sqe, &ts, 0, 0);
948
-
949
943
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
950
- OP_CONTEXT_RELEASE(&backend->store, ctx);
951
- return ctx->completed;
944
+ return context_store_release(&backend->store, ctx);
952
945
  }
953
946
 
954
947
  VALUE Backend_sleep(VALUE self, VALUE duration) {
@@ -996,7 +989,7 @@ struct Backend_timeout_ctx {
996
989
 
997
990
  VALUE Backend_timeout_ensure(VALUE arg) {
998
991
  struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
999
- if (!timeout_ctx->ctx->completed) {
992
+ if (timeout_ctx->ctx->ref_count) {
1000
993
  timeout_ctx->ctx->result = -ECANCELED;
1001
994
 
1002
995
  // op was not completed, so we need to cancel it
@@ -1005,7 +998,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
1005
998
  timeout_ctx->backend->pending_sqes = 0;
1006
999
  io_uring_submit(&timeout_ctx->backend->ring);
1007
1000
  }
1008
- OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
1001
+ context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
1009
1002
  return Qnil;
1010
1003
  }
1011
1004
 
@@ -1023,7 +1016,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1023
1016
 
1024
1017
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1025
1018
 
1026
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
1019
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1027
1020
  ctx->resume_value = timeout;
1028
1021
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1029
1022
  io_uring_sqe_set_data(sqe, ctx);
@@ -1091,6 +1084,321 @@ VALUE Backend_kind(VALUE self) {
1091
1084
  return SYM_io_uring;
1092
1085
  }
1093
1086
 
1087
+ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1088
+ rb_io_t *fptr;
1089
+ VALUE underlying_io;
1090
+
1091
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1092
+ if (underlying_io != Qnil) io = underlying_io;
1093
+ io = rb_io_get_write_io(io);
1094
+ GetOpenFile(io, fptr);
1095
+ io_unset_nonblock(fptr, io);
1096
+
1097
+ char *buf = StringValuePtr(str);
1098
+ long len = RSTRING_LEN(str);
1099
+
1100
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1101
+ io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
1102
+ return sqe;
1103
+ }
1104
+
1105
+ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1106
+ rb_io_t *fptr;
1107
+ VALUE underlying_io;
1108
+
1109
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1110
+ if (underlying_io != Qnil) io = underlying_io;
1111
+ io = rb_io_get_write_io(io);
1112
+ GetOpenFile(io, fptr);
1113
+ io_unset_nonblock(fptr, io);
1114
+
1115
+ char *buf = StringValuePtr(str);
1116
+ long len = RSTRING_LEN(str);
1117
+ int flags_int = NUM2INT(flags);
1118
+
1119
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1120
+ io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1121
+ return sqe;
1122
+ }
1123
+
1124
+ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE maxlen) {
1125
+ rb_io_t *src_fptr;
1126
+ rb_io_t *dest_fptr;
1127
+ VALUE underlying_io;
1128
+
1129
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1130
+ if (underlying_io != Qnil) src = underlying_io;
1131
+ GetOpenFile(src, src_fptr);
1132
+ io_unset_nonblock(src_fptr, src);
1133
+
1134
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1135
+ if (underlying_io != Qnil) dest = underlying_io;
1136
+ dest = rb_io_get_write_io(dest);
1137
+ GetOpenFile(dest, dest_fptr);
1138
+ io_unset_nonblock(dest_fptr, dest);
1139
+
1140
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1141
+ io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1142
+ return sqe;
1143
+ }
1144
+
1145
+ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1146
+ VALUE resume_value = Qnil;
1147
+ unsigned int sqe_count = 0;
1148
+ struct io_uring_sqe *last_sqe = 0;
1149
+ Backend_t *backend;
1150
+ GetBackend(self, backend);
1151
+ if (argc == 0) return resume_value;
1152
+
1153
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1154
+ for (int i = 0; i < argc; i++) {
1155
+ VALUE op = argv[i];
1156
+ VALUE op_type = RARRAY_AREF(op, 0);
1157
+ VALUE op_len = RARRAY_LEN(op);
1158
+
1159
+ if (op_type == SYM_write && op_len == 3) {
1160
+ last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
1161
+ }
1162
+ else if (op_type == SYM_send && op_len == 4)
1163
+ last_sqe = Backend_chain_prepare_send(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1164
+ else if (op_type == SYM_splice && op_len == 4)
1165
+ last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1166
+ else {
1167
+ if (sqe_count) {
1168
+ io_uring_sqe_set_data(last_sqe, ctx);
1169
+ io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1170
+
1171
+ ctx->ref_count = sqe_count;
1172
+ ctx->result = -ECANCELED;
1173
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1174
+ io_uring_prep_cancel(sqe, ctx, 0);
1175
+ backend->pending_sqes = 0;
1176
+ io_uring_submit(&backend->ring);
1177
+ }
1178
+ else {
1179
+ ctx->ref_count = 1;
1180
+ context_store_release(&backend->store, ctx);
1181
+ }
1182
+ rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1183
+ }
1184
+
1185
+ io_uring_sqe_set_data(last_sqe, ctx);
1186
+ unsigned int flags = (i == argc - 1) ? IOSQE_ASYNC : IOSQE_ASYNC & IOSQE_IO_LINK;
1187
+ io_uring_sqe_set_flags(last_sqe, flags);
1188
+ sqe_count++;
1189
+ }
1190
+
1191
+ ctx->ref_count = sqe_count + 1;
1192
+ io_uring_backend_defer_submit(backend);
1193
+ resume_value = backend_await((struct Backend_base *)backend);
1194
+ int result = ctx->result;
1195
+ int completed = context_store_release(&backend->store, ctx);
1196
+ if (!completed) {
1197
+ // op was not completed (an exception was raised), so we need to cancel it
1198
+ ctx->result = -ECANCELED;
1199
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1200
+ io_uring_prep_cancel(sqe, ctx, 0);
1201
+ backend->pending_sqes = 0;
1202
+ io_uring_submit(&backend->ring);
1203
+ RAISE_IF_EXCEPTION(resume_value);
1204
+ return resume_value;
1205
+ }
1206
+
1207
+ RB_GC_GUARD(resume_value);
1208
+ return INT2NUM(result);
1209
+ }
1210
+
1211
+ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1212
+ Backend_t *backend;
1213
+ GetBackend(self, backend);
1214
+ backend->base.idle_gc_period = NUM2DBL(period);
1215
+ backend->base.idle_gc_last_time = current_time();
1216
+ return self;
1217
+ }
1218
+
1219
+ VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
1220
+ Backend_t *backend;
1221
+ GetBackend(self, backend);
1222
+ backend->base.idle_proc = block;
1223
+ return self;
1224
+ }
1225
+
1226
+ inline VALUE Backend_run_idle_tasks(VALUE self) {
1227
+ Backend_t *backend;
1228
+ GetBackend(self, backend);
1229
+ backend_run_idle_tasks(&backend->base);
1230
+ return self;
1231
+ }
1232
+
1233
+ static inline void splice_chunks_prep_write(op_context_t *ctx, struct io_uring_sqe *sqe, int fd, VALUE str) {
1234
+ char *buf = RSTRING_PTR(str);
1235
+ int len = RSTRING_LEN(str);
1236
+ io_uring_prep_write(sqe, fd, buf, len, 0);
1237
+ // io_uring_prep_send(sqe, fd, buf, len, 0);
1238
+ io_uring_sqe_set_data(sqe, ctx);
1239
+ }
1240
+
1241
+ static inline void splice_chunks_prep_splice(op_context_t *ctx, struct io_uring_sqe *sqe, int src, int dest, int maxlen) {
1242
+ io_uring_prep_splice(sqe, src, -1, dest, -1, maxlen, 0);
1243
+ io_uring_sqe_set_data(sqe, ctx);
1244
+ }
1245
+
1246
+ static inline void splice_chunks_get_sqe(
1247
+ Backend_t *backend,
1248
+ op_context_t **ctx,
1249
+ struct io_uring_sqe **sqe,
1250
+ enum op_type type
1251
+ )
1252
+ {
1253
+ if (*ctx) {
1254
+ if (*sqe) (*sqe)->flags |= IOSQE_IO_LINK;
1255
+ (*ctx)->ref_count++;
1256
+ }
1257
+ else
1258
+ *ctx = context_store_acquire(&backend->store, type);
1259
+ (*sqe) = io_uring_get_sqe(&backend->ring);
1260
+ }
1261
+
1262
+ static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1263
+ ctx->result = -ECANCELED;
1264
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1265
+ io_uring_prep_cancel(sqe, ctx, 0);
1266
+ backend->pending_sqes = 0;
1267
+ io_uring_submit(&backend->ring);
1268
+ }
1269
+
1270
+ static inline int splice_chunks_await_ops(
1271
+ Backend_t *backend,
1272
+ op_context_t **ctx,
1273
+ int *result,
1274
+ VALUE *switchpoint_result
1275
+ )
1276
+ {
1277
+ int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
1278
+ if (result) (*result) = res;
1279
+ int completed = context_store_release(&backend->store, *ctx);
1280
+ if (!completed) {
1281
+ splice_chunks_cancel(backend, *ctx);
1282
+ if (TEST_EXCEPTION(*switchpoint_result)) return 1;
1283
+ }
1284
+ *ctx = 0;
1285
+ return 0;
1286
+ }
1287
+
1288
+ #define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
1289
+ if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
1290
+
1291
+ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1292
+ Backend_t *backend;
1293
+ GetBackend(self, backend);
1294
+ int total = 0;
1295
+ int err = 0;
1296
+ VALUE switchpoint_result = Qnil;
1297
+ op_context_t *ctx = 0;
1298
+ struct io_uring_sqe *sqe = 0;
1299
+
1300
+ rb_io_t *src_fptr;
1301
+ rb_io_t *dest_fptr;
1302
+
1303
+ VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
1304
+ if (underlying_io != Qnil) src = underlying_io;
1305
+ GetOpenFile(src, src_fptr);
1306
+ io_verify_blocking_mode(src_fptr, src, Qtrue);
1307
+
1308
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1309
+ if (underlying_io != Qnil) dest = underlying_io;
1310
+ dest = rb_io_get_write_io(dest);
1311
+ GetOpenFile(dest, dest_fptr);
1312
+ io_verify_blocking_mode(dest_fptr, dest, Qtrue);
1313
+
1314
+ int maxlen = NUM2INT(chunk_size);
1315
+ VALUE str = Qnil;
1316
+ VALUE chunk_len_value = Qnil;
1317
+
1318
+ int pipefd[2] = { -1, -1 };
1319
+ if (pipe(pipefd) == -1) {
1320
+ err = errno;
1321
+ goto syscallerror;
1322
+ }
1323
+
1324
+ if (prefix != Qnil) {
1325
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1326
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
1327
+ }
1328
+
1329
+ while (1) {
1330
+ int chunk_len;
1331
+ VALUE chunk_prefix_str = Qnil;
1332
+ VALUE chunk_postfix_str = Qnil;
1333
+
1334
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1335
+ splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
1336
+
1337
+ SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1338
+ if (chunk_len == 0) break;
1339
+
1340
+ total += chunk_len;
1341
+ chunk_len_value = INT2NUM(chunk_len);
1342
+
1343
+
1344
+ if (chunk_prefix != Qnil) {
1345
+ chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
1346
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1347
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
1348
+ }
1349
+
1350
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1351
+ splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
1352
+
1353
+ if (chunk_postfix != Qnil) {
1354
+ chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
1355
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1356
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
1357
+ }
1358
+
1359
+ RB_GC_GUARD(chunk_prefix_str);
1360
+ RB_GC_GUARD(chunk_postfix_str);
1361
+ }
1362
+
1363
+ if (postfix != Qnil) {
1364
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1365
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
1366
+ }
1367
+ if (ctx) {
1368
+ SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
1369
+ }
1370
+
1371
+ RB_GC_GUARD(str);
1372
+ RB_GC_GUARD(chunk_len_value);
1373
+ RB_GC_GUARD(switchpoint_result);
1374
+ if (pipefd[0] != -1) close(pipefd[0]);
1375
+ if (pipefd[1] != -1) close(pipefd[1]);
1376
+ return INT2NUM(total);
1377
+ syscallerror:
1378
+ if (pipefd[0] != -1) close(pipefd[0]);
1379
+ if (pipefd[1] != -1) close(pipefd[1]);
1380
+ rb_syserr_fail(err, strerror(err));
1381
+ error:
1382
+ if (pipefd[0] != -1) close(pipefd[0]);
1383
+ if (pipefd[1] != -1) close(pipefd[1]);
1384
+ return RAISE_EXCEPTION(switchpoint_result);
1385
+ }
1386
+
1387
+ VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
1388
+ Backend_t *backend;
1389
+ GetBackend(self, backend);
1390
+ backend_trace(&backend->base, argc, argv);
1391
+ return self;
1392
+ }
1393
+
1394
+ VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
1395
+ Backend_t *backend;
1396
+ GetBackend(self, backend);
1397
+
1398
+ backend->base.trace_proc = block;
1399
+ return self;
1400
+ }
1401
+
1094
1402
  void Init_Backend() {
1095
1403
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
1096
1404
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -1098,10 +1406,16 @@ void Init_Backend() {
1098
1406
  rb_define_method(cBackend, "initialize", Backend_initialize, 0);
1099
1407
  rb_define_method(cBackend, "finalize", Backend_finalize, 0);
1100
1408
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1409
+ rb_define_method(cBackend, "trace", Backend_trace, -1);
1410
+ rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1101
1411
 
1102
- rb_define_method(cBackend, "poll", Backend_poll, 3);
1412
+ rb_define_method(cBackend, "poll", Backend_poll, 1);
1103
1413
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1104
1414
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1415
+ rb_define_method(cBackend, "chain", Backend_chain, -1);
1416
+ rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1417
+ rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
1418
+ rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
1105
1419
 
1106
1420
  rb_define_method(cBackend, "accept", Backend_accept, 2);
1107
1421
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
@@ -1130,6 +1444,9 @@ void Init_Backend() {
1130
1444
  #endif
1131
1445
 
1132
1446
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1447
+ SYM_send = ID2SYM(rb_intern("send"));
1448
+ SYM_splice = ID2SYM(rb_intern("splice"));
1449
+ SYM_write = ID2SYM(rb_intern("write"));
1133
1450
  }
1134
1451
 
1135
1452
  #endif // POLYPHONY_BACKEND_LIBURING