polyphony 0.53.2 → 0.58

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,21 @@
1
- #include <time.h>
1
+ #ifndef BACKEND_COMMON_H
2
+ #define BACKEND_COMMON_H
2
3
 
3
4
  #include "ruby.h"
4
5
  #include "ruby/io.h"
5
6
 
7
+ struct Backend_base {
8
+ unsigned int currently_polling;
9
+ unsigned int pending_count;
10
+ double idle_gc_period;
11
+ double idle_gc_last_time;
12
+ VALUE idle_block;
13
+ };
6
14
 
7
- #ifdef POLYPHONY_USE_PIDFD_OPEN
8
- #ifndef __NR_pidfd_open
9
- #define __NR_pidfd_open 434 /* System call # on most architectures */
10
- #endif
15
+ void initialize_backend_base(struct Backend_base *base);
11
16
 
12
- static int pidfd_open(pid_t pid, unsigned int flags) {
13
- return syscall(__NR_pidfd_open, pid, flags);
14
- }
17
+ #ifdef POLYPHONY_USE_PIDFD_OPEN
18
+ int pidfd_open(pid_t pid, unsigned int flags);
15
19
  #endif
16
20
 
17
21
  //////////////////////////////////////////////////////////////////////
@@ -26,75 +30,19 @@ struct io_internal_read_struct {
26
30
 
27
31
  #define StringValue(v) rb_string_value(&(v))
28
32
 
29
- int io_setstrbuf(VALUE *str, long len) {
30
- #ifdef _WIN32
31
- len = (len + 1) & ~1L; /* round up for wide char */
32
- #endif
33
- if (*str == Qnil) {
34
- *str = rb_str_new(0, len);
35
- return 1;
36
- }
37
- else {
38
- VALUE s = StringValue(*str);
39
- long clen = RSTRING_LEN(s);
40
- if (clen >= len) {
41
- rb_str_modify(s);
42
- return 0;
43
- }
44
- len -= clen;
45
- }
46
- rb_str_modify_expand(*str, len);
47
- return 0;
48
- }
49
-
50
- #define MAX_REALLOC_GAP 4096
51
-
52
- inline void io_shrink_read_string(VALUE str, long n) {
53
- if (rb_str_capacity(str) - n > MAX_REALLOC_GAP) {
54
- rb_str_resize(str, n);
55
- }
56
- }
57
-
58
- void io_set_read_length(VALUE str, long n, int shrinkable) {
59
- if (RSTRING_LEN(str) != n) {
60
- rb_str_modify(str);
61
- rb_str_set_len(str, n);
62
- if (shrinkable) io_shrink_read_string(str, n);
63
- }
64
- }
65
-
66
- inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
67
- if (fptr->encs.enc) {
68
- return fptr->encs.enc;
69
- }
70
- return rb_default_external_encoding();
71
- }
72
-
73
- VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
74
- OBJ_TAINT(str);
75
- rb_enc_associate(str, io_read_encoding(fptr));
76
- return str;
77
- }
33
+ int io_setstrbuf(VALUE *str, long len);
34
+ void io_shrink_read_string(VALUE str, long n);
35
+ void io_set_read_length(VALUE str, long n, int shrinkable);
36
+ rb_encoding* io_read_encoding(rb_io_t *fptr);
37
+ VALUE io_enc_str(VALUE str, rb_io_t *fptr);
78
38
 
79
39
  //////////////////////////////////////////////////////////////////////
80
40
  //////////////////////////////////////////////////////////////////////
81
41
 
82
- inline VALUE backend_await(Backend_t *backend) {
83
- VALUE ret;
84
- backend->pending_count++;
85
- ret = Thread_switch_fiber(rb_thread_current());
86
- backend->pending_count--;
87
- RB_GC_GUARD(ret);
88
- return ret;
89
- }
90
-
91
- inline VALUE backend_snooze() {
92
- Fiber_make_runnable(rb_fiber_current(), Qnil);
93
- return Thread_switch_fiber(rb_thread_current());
94
- }
42
+ VALUE backend_await(struct Backend_base *backend);
43
+ VALUE backend_snooze();
95
44
 
96
45
  // macros for doing read loops
97
-
98
46
  #define READ_LOOP_PREPARE_STR() { \
99
47
  str = Qnil; \
100
48
  shrinkable = io_setstrbuf(&str, len); \
@@ -117,63 +65,13 @@ inline VALUE backend_snooze() {
117
65
  READ_LOOP_PREPARE_STR(); \
118
66
  }
119
67
 
120
- inline void rectify_io_file_pos(rb_io_t *fptr) {
121
- // Apparently after reopening a closed file, the file position is not reset,
122
- // which causes the read to fail. Fortunately we can use fptr->rbuf.len to
123
- // find out if that's the case.
124
- // See: https://github.com/digital-fabric/polyphony/issues/30
125
- if (fptr->rbuf.len > 0) {
126
- lseek(fptr->fd, -fptr->rbuf.len, SEEK_CUR);
127
- fptr->rbuf.len = 0;
128
- }
129
- }
130
-
131
- inline double current_time() {
132
- struct timespec ts;
133
- clock_gettime(CLOCK_MONOTONIC, &ts);
134
- long long ns = ts.tv_sec;
135
- ns = ns * 1e9 + ts.tv_nsec;
136
- double t = ns;
137
- return t / 1e9;
138
- }
139
-
140
- inline VALUE backend_timeout_exception(VALUE exception) {
141
- if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
142
- return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
143
- else if (rb_obj_is_kind_of(exception, rb_cClass) == Qtrue)
144
- return rb_funcall(exception, ID_new, 0);
145
- else
146
- return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
147
- }
148
-
149
- VALUE Backend_timeout_safe(VALUE arg) {
150
- return rb_yield(arg);
151
- }
152
-
153
- VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
154
- return exception;
155
- }
156
-
157
- VALUE Backend_timeout_ensure_safe(VALUE arg) {
158
- return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
159
- }
160
-
161
- static VALUE empty_string = Qnil;
68
+ void rectify_io_file_pos(rb_io_t *fptr);
69
+ double current_time();
70
+ VALUE backend_timeout_exception(VALUE exception);
71
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
72
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
73
+ VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
74
+ void backend_run_idle_tasks(struct Backend_base *base);
75
+ void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
162
76
 
163
- VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags) {
164
- switch (RARRAY_LEN(ary)) {
165
- case 0:
166
- return Qnil;
167
- case 1:
168
- return Backend_send(self, io, RARRAY_AREF(ary, 0), flags);
169
- default:
170
- if (empty_string == Qnil) {
171
- empty_string = rb_str_new_literal("");
172
- rb_global_variable(&empty_string);
173
- }
174
- VALUE joined = rb_ary_join(ary, empty_string);
175
- VALUE result = Backend_send(self, io, joined, flags);
176
- RB_GC_GUARD(joined);
177
- return result;
178
- }
179
- }
77
+ #endif /* BACKEND_COMMON_H */
@@ -4,7 +4,6 @@
4
4
  #include <sys/socket.h>
5
5
  #include <sys/uio.h>
6
6
  #include <unistd.h>
7
- #include <fcntl.h>
8
7
  #include <netinet/in.h>
9
8
  #include <arpa/inet.h>
10
9
  #include <stdnoreturn.h>
@@ -19,39 +18,21 @@
19
18
  #include "backend_io_uring_context.h"
20
19
  #include "ruby/thread.h"
21
20
  #include "ruby/io.h"
21
+ #include "backend_common.h"
22
22
 
23
23
  VALUE SYM_io_uring;
24
+ VALUE SYM_send;
25
+ VALUE SYM_splice;
26
+ VALUE SYM_write;
24
27
 
25
28
  #ifdef POLYPHONY_UNSET_NONBLOCK
26
- ID ID_ivar_is_nonblocking;
27
-
28
- // One of the changes introduced in Ruby 3.0 as part of the work on the
29
- // FiberScheduler interface is that all created sockets are marked as
30
- // non-blocking. This prevents the io_uring backend from working correctly,
31
- // since it will return an EAGAIN error just like a normal syscall. So here
32
- // instead of setting O_NONBLOCK (which is required for the libev backend), we
33
- // unset it.
34
- inline void io_unset_nonblock(rb_io_t *fptr, VALUE io) {
35
- VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
36
- if (is_nonblocking == Qfalse) return;
37
-
38
- rb_ivar_set(io, ID_ivar_is_nonblocking, Qfalse);
39
-
40
- int oflags = fcntl(fptr->fd, F_GETFL);
41
- if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
42
- oflags &= !O_NONBLOCK;
43
- fcntl(fptr->fd, F_SETFL, oflags);
44
- }
29
+ #define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
45
30
  #else
46
- // NOP
47
31
  #define io_unset_nonblock(fptr, io)
48
32
  #endif
49
33
 
50
34
  typedef struct Backend_t {
51
- // common fields
52
- unsigned int currently_polling;
53
- unsigned int pending_count;
54
- unsigned int poll_no_wait_count;
35
+ struct Backend_base base;
55
36
 
56
37
  // implementation-specific fields
57
38
  struct io_uring ring;
@@ -61,7 +42,11 @@ typedef struct Backend_t {
61
42
  int event_fd;
62
43
  } Backend_t;
63
44
 
64
- #include "backend_common.h"
45
+ static void Backend_mark(void *ptr) {
46
+ Backend_t *backend = ptr;
47
+ if (backend->base.idle_block != Qnil)
48
+ rb_gc_mark(backend->base.idle_block);
49
+ }
65
50
 
66
51
  static size_t Backend_size(const void *ptr) {
67
52
  return sizeof(Backend_t);
@@ -69,7 +54,7 @@ static size_t Backend_size(const void *ptr) {
69
54
 
70
55
  static const rb_data_type_t Backend_type = {
71
56
  "IOUringBackend",
72
- {0, 0, Backend_size,},
57
+ {Backend_mark, 0, Backend_size,},
73
58
  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
74
59
  };
75
60
 
@@ -86,9 +71,7 @@ static VALUE Backend_initialize(VALUE self) {
86
71
  Backend_t *backend;
87
72
  GetBackend(self, backend);
88
73
 
89
- backend->currently_polling = 0;
90
- backend->pending_count = 0;
91
- backend->poll_no_wait_count = 0;
74
+ initialize_backend_base(&backend->base);
92
75
  backend->pending_sqes = 0;
93
76
  backend->prepared_limit = 2048;
94
77
 
@@ -116,9 +99,8 @@ VALUE Backend_post_fork(VALUE self) {
116
99
  io_uring_queue_exit(&backend->ring);
117
100
  io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
118
101
  context_store_free(&backend->store);
119
- backend->currently_polling = 0;
120
- backend->pending_count = 0;
121
- backend->poll_no_wait_count = 0;
102
+ backend->base.currently_polling = 0;
103
+ backend->base.pending_count = 0;
122
104
  backend->pending_sqes = 0;
123
105
 
124
106
  return self;
@@ -128,7 +110,7 @@ unsigned int Backend_pending_count(VALUE self) {
128
110
  Backend_t *backend;
129
111
  GetBackend(self, backend);
130
112
 
131
- return backend->pending_count;
113
+ return backend->base.pending_count;
132
114
  }
133
115
 
134
116
  typedef struct poll_context {
@@ -154,18 +136,11 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
154
136
  op_context_t *ctx = io_uring_cqe_get_data(cqe);
155
137
  if (!ctx) return;
156
138
 
139
+ // printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
157
140
  ctx->result = cqe->res;
158
-
159
- if (ctx->completed)
160
- // already marked as deleted as result of fiber resuming before op
161
- // completion, so we can release the context
162
- context_store_release(&backend->store, ctx);
163
- else {
164
- // otherwise, we mark it as completed, schedule the fiber and let it deal
165
- // with releasing the context
166
- ctx->completed = 1;
167
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
168
- }
141
+ if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
142
+ Fiber_make_runnable(ctx->fiber, ctx->resume_value);
143
+ context_store_release(&backend->store, ctx);
169
144
  }
170
145
 
171
146
  // adapted from io_uring_peek_batch_cqe in queue.c
@@ -205,9 +180,9 @@ void io_uring_backend_poll(Backend_t *backend) {
205
180
  io_uring_submit(&backend->ring);
206
181
  }
207
182
 
208
- backend->currently_polling = 1;
183
+ backend->base.currently_polling = 1;
209
184
  rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
210
- backend->currently_polling = 0;
185
+ backend->base.currently_polling = 0;
211
186
  if (poll_ctx.result < 0) return;
212
187
 
213
188
  io_uring_backend_handle_completion(poll_ctx.cqe, backend);
@@ -219,16 +194,6 @@ VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue
219
194
  Backend_t *backend;
220
195
  GetBackend(self, backend);
221
196
 
222
- if (is_nowait) {
223
- backend->poll_no_wait_count++;
224
- if (backend->poll_no_wait_count < 10) return self;
225
-
226
- long runnable_count = Runqueue_len(runqueue);
227
- if (backend->poll_no_wait_count < runnable_count) return self;
228
- }
229
-
230
- backend->poll_no_wait_count = 0;
231
-
232
197
  if (is_nowait && backend->pending_sqes) {
233
198
  backend->pending_sqes = 0;
234
199
  io_uring_submit(&backend->ring);
@@ -246,7 +211,7 @@ VALUE Backend_wakeup(VALUE self) {
246
211
  Backend_t *backend;
247
212
  GetBackend(self, backend);
248
213
 
249
- if (backend->currently_polling) {
214
+ if (backend->base.currently_polling) {
250
215
  // Since we're currently blocking while waiting for a completion, we add a
251
216
  // NOP which would cause the io_uring_enter syscall to return
252
217
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -277,16 +242,17 @@ int io_uring_backend_defer_submit_and_await(
277
242
  {
278
243
  VALUE switchpoint_result = Qnil;
279
244
 
280
- io_uring_sqe_set_data(sqe, ctx);
281
- io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
245
+ if (sqe) {
246
+ io_uring_sqe_set_data(sqe, ctx);
247
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
248
+ }
282
249
  io_uring_backend_defer_submit(backend);
283
250
 
284
- switchpoint_result = backend_await(backend);
251
+ switchpoint_result = backend_await((struct Backend_base *)backend);
285
252
 
286
- if (!ctx->completed) {
253
+ if (ctx->ref_count > 1) {
254
+ // op was not completed (an exception was raised), so we need to cancel it
287
255
  ctx->result = -ECANCELED;
288
-
289
- // op was not completed, so we need to cancel it
290
256
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
291
257
  io_uring_prep_cancel(sqe, ctx, 0);
292
258
  backend->pending_sqes = 0;
@@ -300,7 +266,7 @@ int io_uring_backend_defer_submit_and_await(
300
266
  }
301
267
 
302
268
  VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
303
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_POLL);
269
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
304
270
  VALUE resumed_value = Qnil;
305
271
 
306
272
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -332,14 +298,14 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
332
298
 
333
299
  while (1) {
334
300
  VALUE resume_value = Qnil;
335
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
301
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
336
302
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
337
303
  io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
338
304
 
339
305
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
340
- OP_CONTEXT_RELEASE(&backend->store, ctx);
306
+ int completed = context_store_release(&backend->store, ctx);
341
307
  RAISE_IF_EXCEPTION(resume_value);
342
- if (!ctx->completed) return resume_value;
308
+ if (!completed) return resume_value;
343
309
  RB_GC_GUARD(resume_value);
344
310
 
345
311
  if (result < 0)
@@ -393,14 +359,14 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
393
359
 
394
360
  while (1) {
395
361
  VALUE resume_value = Qnil;
396
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
362
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
397
363
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
398
364
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
399
365
 
400
366
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
401
- OP_CONTEXT_RELEASE(&backend->store, ctx);
367
+ int completed = context_store_release(&backend->store, ctx);
402
368
  RAISE_IF_EXCEPTION(resume_value);
403
- if (!ctx->completed) return resume_value;
369
+ if (!completed) return resume_value;
404
370
  RB_GC_GUARD(resume_value);
405
371
 
406
372
  if (result < 0)
@@ -440,14 +406,14 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
440
406
 
441
407
  while (1) {
442
408
  VALUE resume_value = Qnil;
443
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
409
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
444
410
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
445
411
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
446
412
 
447
413
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
448
- OP_CONTEXT_RELEASE(&backend->store, ctx);
414
+ int completed = context_store_release(&backend->store, ctx);
449
415
  RAISE_IF_EXCEPTION(resume_value);
450
- if (!ctx->completed) return resume_value;
416
+ if (!completed) return resume_value;
451
417
  RB_GC_GUARD(resume_value);
452
418
 
453
419
  if (result < 0)
@@ -483,14 +449,14 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
483
449
 
484
450
  while (left > 0) {
485
451
  VALUE resume_value = Qnil;
486
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITE);
452
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
487
453
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
488
- io_uring_prep_write(sqe, fptr->fd, buf, left, -1);
454
+ io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
489
455
 
490
456
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
491
- OP_CONTEXT_RELEASE(&backend->store, ctx);
457
+ int completed = context_store_release(&backend->store, ctx);
492
458
  RAISE_IF_EXCEPTION(resume_value);
493
- if (!ctx->completed) return resume_value;
459
+ if (!completed) return resume_value;
494
460
  RB_GC_GUARD(resume_value);
495
461
 
496
462
  if (result < 0)
@@ -532,17 +498,17 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
532
498
 
533
499
  while (1) {
534
500
  VALUE resume_value = Qnil;
535
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITEV);
501
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
536
502
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
537
503
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
538
504
 
539
505
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
540
- OP_CONTEXT_RELEASE(&backend->store, ctx);
506
+ int completed = context_store_release(&backend->store, ctx);
541
507
  if (TEST_EXCEPTION(resume_value)) {
542
508
  free(iov);
543
509
  RAISE_EXCEPTION(resume_value);
544
510
  }
545
- if (!ctx->completed) {
511
+ if (!completed) {
546
512
  free(iov);
547
513
  return resume_value;
548
514
  }
@@ -605,14 +571,14 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
605
571
 
606
572
  while (1) {
607
573
  VALUE resume_value = Qnil;
608
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
574
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
609
575
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
610
576
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
611
577
 
612
578
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
613
- OP_CONTEXT_RELEASE(&backend->store, ctx);
579
+ int completed = context_store_release(&backend->store, ctx);
614
580
  RAISE_IF_EXCEPTION(resume_value);
615
- if (!ctx->completed) return resume_value;
581
+ if (!completed) return resume_value;
616
582
  RB_GC_GUARD(resume_value);
617
583
 
618
584
  if (result < 0)
@@ -652,14 +618,14 @@ VALUE Backend_recv_loop(VALUE self, VALUE io) {
652
618
 
653
619
  while (1) {
654
620
  VALUE resume_value = Qnil;
655
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
621
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
656
622
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
657
623
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
658
624
 
659
625
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
660
- OP_CONTEXT_RELEASE(&backend->store, ctx);
626
+ int completed = context_store_release(&backend->store, ctx);
661
627
  RAISE_IF_EXCEPTION(resume_value);
662
- if (!ctx->completed) return resume_value;
628
+ if (!completed) return resume_value;
663
629
  RB_GC_GUARD(resume_value);
664
630
 
665
631
  if (result < 0)
@@ -698,14 +664,14 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
698
664
 
699
665
  while (1) {
700
666
  VALUE resume_value = Qnil;
701
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
667
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
702
668
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
703
669
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
704
670
 
705
671
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
706
- OP_CONTEXT_RELEASE(&backend->store, ctx);
672
+ int completed = context_store_release(&backend->store, ctx);
707
673
  RAISE_IF_EXCEPTION(resume_value);
708
- if (!ctx->completed) return resume_value;
674
+ if (!completed) return resume_value;
709
675
  RB_GC_GUARD(resume_value);
710
676
 
711
677
  if (result < 0)
@@ -741,14 +707,14 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
741
707
 
742
708
  while (left > 0) {
743
709
  VALUE resume_value = Qnil;
744
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SEND);
710
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
745
711
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
746
712
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
747
713
 
748
714
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
749
- OP_CONTEXT_RELEASE(&backend->store, ctx);
715
+ int completed = context_store_release(&backend->store, ctx);
750
716
  RAISE_IF_EXCEPTION(resume_value);
751
- if (!ctx->completed) return resume_value;
717
+ if (!completed) return resume_value;
752
718
  RB_GC_GUARD(resume_value);
753
719
 
754
720
  if (result < 0)
@@ -775,14 +741,14 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
775
741
 
776
742
  while (1) {
777
743
  VALUE resume_value = Qnil;
778
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
744
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
779
745
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
780
746
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
781
747
 
782
748
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
783
- OP_CONTEXT_RELEASE(&backend->store, ctx);
749
+ int completed = context_store_release(&backend->store, ctx);
784
750
  RAISE_IF_EXCEPTION(resume_value);
785
- if (!ctx->completed) return resume_value;
751
+ if (!completed) return resume_value;
786
752
  RB_GC_GUARD(resume_value);
787
753
 
788
754
  if (fd < 0)
@@ -846,14 +812,14 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
846
812
  VALUE resume_value = Qnil;
847
813
 
848
814
  while (1) {
849
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SPLICE);
815
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
850
816
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
851
817
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
852
818
 
853
819
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
854
- OP_CONTEXT_RELEASE(&backend->store, ctx);
820
+ int completed = context_store_release(&backend->store, ctx);
855
821
  RAISE_IF_EXCEPTION(resume_value);
856
- if (!ctx->completed) return resume_value;
822
+ if (!completed) return resume_value;
857
823
 
858
824
  if (result < 0)
859
825
  rb_syserr_fail(-result, strerror(-result));
@@ -897,13 +863,13 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
897
863
  addr.sin_port = htons(NUM2INT(port));
898
864
 
899
865
  VALUE resume_value = Qnil;
900
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_CONNECT);
866
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
901
867
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
902
868
  io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
903
869
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
904
- OP_CONTEXT_RELEASE(&backend->store, ctx);
870
+ int completed = context_store_release(&backend->store, ctx);
905
871
  RAISE_IF_EXCEPTION(resume_value);
906
- if (!ctx->completed) return resume_value;
872
+ if (!completed) return resume_value;
907
873
  RB_GC_GUARD(resume_value);
908
874
 
909
875
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
@@ -943,12 +909,10 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
943
909
  struct __kernel_timespec ts = double_to_timespec(duration);
944
910
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
945
911
 
946
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
912
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
947
913
  io_uring_prep_timeout(sqe, &ts, 0, 0);
948
-
949
914
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
950
- OP_CONTEXT_RELEASE(&backend->store, ctx);
951
- return ctx->completed;
915
+ return context_store_release(&backend->store, ctx);
952
916
  }
953
917
 
954
918
  VALUE Backend_sleep(VALUE self, VALUE duration) {
@@ -996,7 +960,7 @@ struct Backend_timeout_ctx {
996
960
 
997
961
  VALUE Backend_timeout_ensure(VALUE arg) {
998
962
  struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
999
- if (!timeout_ctx->ctx->completed) {
963
+ if (timeout_ctx->ctx->ref_count) {
1000
964
  timeout_ctx->ctx->result = -ECANCELED;
1001
965
 
1002
966
  // op was not completed, so we need to cancel it
@@ -1005,7 +969,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
1005
969
  timeout_ctx->backend->pending_sqes = 0;
1006
970
  io_uring_submit(&timeout_ctx->backend->ring);
1007
971
  }
1008
- OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
972
+ context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
1009
973
  return Qnil;
1010
974
  }
1011
975
 
@@ -1023,7 +987,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1023
987
 
1024
988
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1025
989
 
1026
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
990
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1027
991
  ctx->resume_value = timeout;
1028
992
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1029
993
  io_uring_sqe_set_data(sqe, ctx);
@@ -1091,6 +1055,306 @@ VALUE Backend_kind(VALUE self) {
1091
1055
  return SYM_io_uring;
1092
1056
  }
1093
1057
 
1058
+ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1059
+ rb_io_t *fptr;
1060
+ VALUE underlying_io;
1061
+
1062
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1063
+ if (underlying_io != Qnil) io = underlying_io;
1064
+ io = rb_io_get_write_io(io);
1065
+ GetOpenFile(io, fptr);
1066
+ io_unset_nonblock(fptr, io);
1067
+
1068
+ char *buf = StringValuePtr(str);
1069
+ long len = RSTRING_LEN(str);
1070
+
1071
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1072
+ io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
1073
+ return sqe;
1074
+ }
1075
+
1076
+ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1077
+ rb_io_t *fptr;
1078
+ VALUE underlying_io;
1079
+
1080
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1081
+ if (underlying_io != Qnil) io = underlying_io;
1082
+ io = rb_io_get_write_io(io);
1083
+ GetOpenFile(io, fptr);
1084
+ io_unset_nonblock(fptr, io);
1085
+
1086
+ char *buf = StringValuePtr(str);
1087
+ long len = RSTRING_LEN(str);
1088
+ int flags_int = NUM2INT(flags);
1089
+
1090
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1091
+ io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1092
+ return sqe;
1093
+ }
1094
+
1095
+ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE maxlen) {
1096
+ rb_io_t *src_fptr;
1097
+ rb_io_t *dest_fptr;
1098
+ VALUE underlying_io;
1099
+
1100
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1101
+ if (underlying_io != Qnil) src = underlying_io;
1102
+ GetOpenFile(src, src_fptr);
1103
+ io_unset_nonblock(src_fptr, src);
1104
+
1105
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1106
+ if (underlying_io != Qnil) dest = underlying_io;
1107
+ dest = rb_io_get_write_io(dest);
1108
+ GetOpenFile(dest, dest_fptr);
1109
+ io_unset_nonblock(dest_fptr, dest);
1110
+
1111
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1112
+ io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1113
+ return sqe;
1114
+ }
1115
+
1116
+ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1117
+ VALUE resume_value = Qnil;
1118
+ unsigned int sqe_count = 0;
1119
+ struct io_uring_sqe *last_sqe = 0;
1120
+ Backend_t *backend;
1121
+ GetBackend(self, backend);
1122
+ if (argc == 0) return resume_value;
1123
+
1124
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1125
+ for (int i = 0; i < argc; i++) {
1126
+ VALUE op = argv[i];
1127
+ VALUE op_type = RARRAY_AREF(op, 0);
1128
+ VALUE op_len = RARRAY_LEN(op);
1129
+
1130
+ if (op_type == SYM_write && op_len == 3) {
1131
+ last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
1132
+ }
1133
+ else if (op_type == SYM_send && op_len == 4)
1134
+ last_sqe = Backend_chain_prepare_send(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1135
+ else if (op_type == SYM_splice && op_len == 4)
1136
+ last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1137
+ else {
1138
+ if (sqe_count) {
1139
+ io_uring_sqe_set_data(last_sqe, ctx);
1140
+ io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1141
+
1142
+ ctx->ref_count = sqe_count;
1143
+ ctx->result = -ECANCELED;
1144
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1145
+ io_uring_prep_cancel(sqe, ctx, 0);
1146
+ backend->pending_sqes = 0;
1147
+ io_uring_submit(&backend->ring);
1148
+ }
1149
+ else {
1150
+ ctx->ref_count = 1;
1151
+ context_store_release(&backend->store, ctx);
1152
+ }
1153
+ rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1154
+ }
1155
+
1156
+ io_uring_sqe_set_data(last_sqe, ctx);
1157
+ unsigned int flags = (i == argc - 1) ? IOSQE_ASYNC : IOSQE_ASYNC & IOSQE_IO_LINK;
1158
+ io_uring_sqe_set_flags(last_sqe, flags);
1159
+ sqe_count++;
1160
+ }
1161
+
1162
+ ctx->ref_count = sqe_count + 1;
1163
+ io_uring_backend_defer_submit(backend);
1164
+ resume_value = backend_await((struct Backend_base *)backend);
1165
+ int result = ctx->result;
1166
+ int completed = context_store_release(&backend->store, ctx);
1167
+ if (!completed) {
1168
+ // op was not completed (an exception was raised), so we need to cancel it
1169
+ ctx->result = -ECANCELED;
1170
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1171
+ io_uring_prep_cancel(sqe, ctx, 0);
1172
+ backend->pending_sqes = 0;
1173
+ io_uring_submit(&backend->ring);
1174
+ RAISE_IF_EXCEPTION(resume_value);
1175
+ return resume_value;
1176
+ }
1177
+
1178
+ RB_GC_GUARD(resume_value);
1179
+ return INT2NUM(result);
1180
+ }
1181
+
1182
+ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1183
+ Backend_t *backend;
1184
+ GetBackend(self, backend);
1185
+ backend->base.idle_gc_period = NUM2DBL(period);
1186
+ backend->base.idle_gc_last_time = current_time();
1187
+ return self;
1188
+ }
1189
+
1190
+ VALUE Backend_idle_block_set(VALUE self, VALUE block) {
1191
+ Backend_t *backend;
1192
+ GetBackend(self, backend);
1193
+ backend->base.idle_block = block;
1194
+ return self;
1195
+ }
1196
+
1197
+ inline VALUE Backend_run_idle_tasks(VALUE self) {
1198
+ Backend_t *backend;
1199
+ GetBackend(self, backend);
1200
+ backend_run_idle_tasks(&backend->base);
1201
+ return self;
1202
+ }
1203
+
1204
+ static inline void splice_chunks_prep_write(op_context_t *ctx, struct io_uring_sqe *sqe, int fd, VALUE str) {
1205
+ char *buf = RSTRING_PTR(str);
1206
+ int len = RSTRING_LEN(str);
1207
+ io_uring_prep_write(sqe, fd, buf, len, 0);
1208
+ // io_uring_prep_send(sqe, fd, buf, len, 0);
1209
+ io_uring_sqe_set_data(sqe, ctx);
1210
+ }
1211
+
1212
+ static inline void splice_chunks_prep_splice(op_context_t *ctx, struct io_uring_sqe *sqe, int src, int dest, int maxlen) {
1213
+ io_uring_prep_splice(sqe, src, -1, dest, -1, maxlen, 0);
1214
+ io_uring_sqe_set_data(sqe, ctx);
1215
+ }
1216
+
1217
+ static inline void splice_chunks_get_sqe(
1218
+ Backend_t *backend,
1219
+ op_context_t **ctx,
1220
+ struct io_uring_sqe **sqe,
1221
+ enum op_type type
1222
+ )
1223
+ {
1224
+ if (*ctx) {
1225
+ if (*sqe) (*sqe)->flags |= IOSQE_IO_LINK;
1226
+ (*ctx)->ref_count++;
1227
+ }
1228
+ else
1229
+ *ctx = context_store_acquire(&backend->store, type);
1230
+ (*sqe) = io_uring_get_sqe(&backend->ring);
1231
+ }
1232
+
1233
+ static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
1234
+ ctx->result = -ECANCELED;
1235
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1236
+ io_uring_prep_cancel(sqe, ctx, 0);
1237
+ backend->pending_sqes = 0;
1238
+ io_uring_submit(&backend->ring);
1239
+ }
1240
+
1241
+ static inline int splice_chunks_await_ops(
1242
+ Backend_t *backend,
1243
+ op_context_t **ctx,
1244
+ int *result,
1245
+ VALUE *switchpoint_result
1246
+ )
1247
+ {
1248
+ int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
1249
+ if (result) (*result) = res;
1250
+ int completed = context_store_release(&backend->store, *ctx);
1251
+ if (!completed) {
1252
+ splice_chunks_cancel(backend, *ctx);
1253
+ if (TEST_EXCEPTION(*switchpoint_result)) return 1;
1254
+ }
1255
+ *ctx = 0;
1256
+ return 0;
1257
+ }
1258
+
1259
+ #define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
1260
+ if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
1261
+
1262
+ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1263
+ Backend_t *backend;
1264
+ GetBackend(self, backend);
1265
+ int total = 0;
1266
+ int err = 0;
1267
+ VALUE switchpoint_result = Qnil;
1268
+ op_context_t *ctx = 0;
1269
+ struct io_uring_sqe *sqe = 0;
1270
+
1271
+ rb_io_t *src_fptr;
1272
+ rb_io_t *dest_fptr;
1273
+
1274
+ VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
1275
+ if (underlying_io != Qnil) src = underlying_io;
1276
+ GetOpenFile(src, src_fptr);
1277
+ io_verify_blocking_mode(src_fptr, src, Qtrue);
1278
+
1279
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1280
+ if (underlying_io != Qnil) dest = underlying_io;
1281
+ dest = rb_io_get_write_io(dest);
1282
+ GetOpenFile(dest, dest_fptr);
1283
+ io_verify_blocking_mode(dest_fptr, dest, Qtrue);
1284
+
1285
+ int maxlen = NUM2INT(chunk_size);
1286
+ VALUE str = Qnil;
1287
+ VALUE chunk_len_value = Qnil;
1288
+
1289
+ int pipefd[2] = { -1, -1 };
1290
+ if (pipe(pipefd) == -1) {
1291
+ err = errno;
1292
+ goto syscallerror;
1293
+ }
1294
+
1295
+ if (prefix != Qnil) {
1296
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1297
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
1298
+ }
1299
+
1300
+ while (1) {
1301
+ int chunk_len;
1302
+ VALUE chunk_prefix_str = Qnil;
1303
+ VALUE chunk_postfix_str = Qnil;
1304
+
1305
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1306
+ splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
1307
+
1308
+ SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1309
+ if (chunk_len == 0) break;
1310
+
1311
+ total += chunk_len;
1312
+ chunk_len_value = INT2NUM(chunk_len);
1313
+
1314
+
1315
+ if (chunk_prefix != Qnil) {
1316
+ chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
1317
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1318
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
1319
+ }
1320
+
1321
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1322
+ splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
1323
+
1324
+ if (chunk_postfix != Qnil) {
1325
+ chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
1326
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1327
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
1328
+ }
1329
+
1330
+ RB_GC_GUARD(chunk_prefix_str);
1331
+ RB_GC_GUARD(chunk_postfix_str);
1332
+ }
1333
+
1334
+ if (postfix != Qnil) {
1335
+ splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1336
+ splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
1337
+ }
1338
+ if (ctx) {
1339
+ SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
1340
+ }
1341
+
1342
+ RB_GC_GUARD(str);
1343
+ RB_GC_GUARD(chunk_len_value);
1344
+ RB_GC_GUARD(switchpoint_result);
1345
+ if (pipefd[0] != -1) close(pipefd[0]);
1346
+ if (pipefd[1] != -1) close(pipefd[1]);
1347
+ return INT2NUM(total);
1348
+ syscallerror:
1349
+ if (pipefd[0] != -1) close(pipefd[0]);
1350
+ if (pipefd[1] != -1) close(pipefd[1]);
1351
+ rb_syserr_fail(err, strerror(err));
1352
+ error:
1353
+ if (pipefd[0] != -1) close(pipefd[0]);
1354
+ if (pipefd[1] != -1) close(pipefd[1]);
1355
+ return RAISE_EXCEPTION(switchpoint_result);
1356
+ }
1357
+
1094
1358
  void Init_Backend() {
1095
1359
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
1096
1360
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -1102,6 +1366,10 @@ void Init_Backend() {
1102
1366
  rb_define_method(cBackend, "poll", Backend_poll, 3);
1103
1367
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1104
1368
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1369
+ rb_define_method(cBackend, "chain", Backend_chain, -1);
1370
+ rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1371
+ rb_define_method(cBackend, "idle_block=", Backend_idle_block_set, 1);
1372
+ rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
1105
1373
 
1106
1374
  rb_define_method(cBackend, "accept", Backend_accept, 2);
1107
1375
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
@@ -1130,6 +1398,9 @@ void Init_Backend() {
1130
1398
  #endif
1131
1399
 
1132
1400
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1401
+ SYM_send = ID2SYM(rb_intern("send"));
1402
+ SYM_splice = ID2SYM(rb_intern("splice"));
1403
+ SYM_write = ID2SYM(rb_intern("write"));
1133
1404
  }
1134
1405
 
1135
1406
  #endif // POLYPHONY_BACKEND_LIBURING