polyphony 0.53.0 → 0.56.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,18 @@
1
- #include <time.h>
1
+ #ifndef BACKEND_COMMON_H
2
+ #define BACKEND_COMMON_H
2
3
 
3
4
  #include "ruby.h"
4
5
  #include "ruby/io.h"
5
6
 
7
+ struct Backend_base {
8
+ unsigned int currently_polling;
9
+ unsigned int pending_count;
10
+ double idle_gc_period;
11
+ double idle_gc_last_time;
12
+ };
6
13
 
7
14
  #ifdef POLYPHONY_USE_PIDFD_OPEN
8
- #ifndef __NR_pidfd_open
9
- #define __NR_pidfd_open 434 /* System call # on most architectures */
10
- #endif
11
-
12
- static int pidfd_open(pid_t pid, unsigned int flags) {
13
- return syscall(__NR_pidfd_open, pid, flags);
14
- }
15
+ int pidfd_open(pid_t pid, unsigned int flags);
15
16
  #endif
16
17
 
17
18
  //////////////////////////////////////////////////////////////////////
@@ -26,75 +27,19 @@ struct io_internal_read_struct {
26
27
 
27
28
  #define StringValue(v) rb_string_value(&(v))
28
29
 
29
- int io_setstrbuf(VALUE *str, long len) {
30
- #ifdef _WIN32
31
- len = (len + 1) & ~1L; /* round up for wide char */
32
- #endif
33
- if (*str == Qnil) {
34
- *str = rb_str_new(0, len);
35
- return 1;
36
- }
37
- else {
38
- VALUE s = StringValue(*str);
39
- long clen = RSTRING_LEN(s);
40
- if (clen >= len) {
41
- rb_str_modify(s);
42
- return 0;
43
- }
44
- len -= clen;
45
- }
46
- rb_str_modify_expand(*str, len);
47
- return 0;
48
- }
49
-
50
- #define MAX_REALLOC_GAP 4096
51
-
52
- inline void io_shrink_read_string(VALUE str, long n) {
53
- if (rb_str_capacity(str) - n > MAX_REALLOC_GAP) {
54
- rb_str_resize(str, n);
55
- }
56
- }
57
-
58
- void io_set_read_length(VALUE str, long n, int shrinkable) {
59
- if (RSTRING_LEN(str) != n) {
60
- rb_str_modify(str);
61
- rb_str_set_len(str, n);
62
- if (shrinkable) io_shrink_read_string(str, n);
63
- }
64
- }
65
-
66
- inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
67
- if (fptr->encs.enc) {
68
- return fptr->encs.enc;
69
- }
70
- return rb_default_external_encoding();
71
- }
72
-
73
- VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
74
- OBJ_TAINT(str);
75
- rb_enc_associate(str, io_read_encoding(fptr));
76
- return str;
77
- }
30
+ int io_setstrbuf(VALUE *str, long len);
31
+ void io_shrink_read_string(VALUE str, long n);
32
+ void io_set_read_length(VALUE str, long n, int shrinkable);
33
+ rb_encoding* io_read_encoding(rb_io_t *fptr);
34
+ VALUE io_enc_str(VALUE str, rb_io_t *fptr);
78
35
 
79
36
  //////////////////////////////////////////////////////////////////////
80
37
  //////////////////////////////////////////////////////////////////////
81
38
 
82
- inline VALUE backend_await(Backend_t *backend) {
83
- VALUE ret;
84
- backend->pending_count++;
85
- ret = Thread_switch_fiber(rb_thread_current());
86
- backend->pending_count--;
87
- RB_GC_GUARD(ret);
88
- return ret;
89
- }
90
-
91
- inline VALUE backend_snooze() {
92
- Fiber_make_runnable(rb_fiber_current(), Qnil);
93
- return Thread_switch_fiber(rb_thread_current());
94
- }
39
+ VALUE backend_await(struct Backend_base *backend);
40
+ VALUE backend_snooze();
95
41
 
96
42
  // macros for doing read loops
97
-
98
43
  #define READ_LOOP_PREPARE_STR() { \
99
44
  str = Qnil; \
100
45
  shrinkable = io_setstrbuf(&str, len); \
@@ -117,63 +62,13 @@ inline VALUE backend_snooze() {
117
62
  READ_LOOP_PREPARE_STR(); \
118
63
  }
119
64
 
120
- inline void rectify_io_file_pos(rb_io_t *fptr) {
121
- // Apparently after reopening a closed file, the file position is not reset,
122
- // which causes the read to fail. Fortunately we can use fptr->rbuf.len to
123
- // find out if that's the case.
124
- // See: https://github.com/digital-fabric/polyphony/issues/30
125
- if (fptr->rbuf.len > 0) {
126
- lseek(fptr->fd, -fptr->rbuf.len, SEEK_CUR);
127
- fptr->rbuf.len = 0;
128
- }
129
- }
130
-
131
- inline double current_time() {
132
- struct timespec ts;
133
- clock_gettime(CLOCK_MONOTONIC, &ts);
134
- long long ns = ts.tv_sec;
135
- ns = ns * 1e9 + ts.tv_nsec;
136
- double t = ns;
137
- return t / 1e9;
138
- }
139
-
140
- inline VALUE backend_timeout_exception(VALUE exception) {
141
- if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
142
- return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
143
- else if (rb_obj_is_kind_of(exception, rb_cClass) == Qtrue)
144
- return rb_funcall(exception, ID_new, 0);
145
- else
146
- return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
147
- }
65
+ void rectify_io_file_pos(rb_io_t *fptr);
66
+ double current_time();
67
+ VALUE backend_timeout_exception(VALUE exception);
68
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
69
+ VALUE Backend_timeout_ensure_safe(VALUE arg);
70
+ VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
71
+ void backend_run_idle_tasks(struct Backend_base *base);
72
+ void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
148
73
 
149
- VALUE Backend_timeout_safe(VALUE arg) {
150
- return rb_yield(arg);
151
- }
152
-
153
- VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
154
- return exception;
155
- }
156
-
157
- VALUE Backend_timeout_ensure_safe(VALUE arg) {
158
- return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
159
- }
160
-
161
- static VALUE empty_string = Qnil;
162
-
163
- VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags) {
164
- switch (RARRAY_LEN(ary)) {
165
- case 0:
166
- return Qnil;
167
- case 1:
168
- return Backend_send(self, io, RARRAY_AREF(ary, 0), flags);
169
- default:
170
- if (empty_string == Qnil) {
171
- empty_string = rb_str_new_literal("");
172
- rb_global_variable(&empty_string);
173
- }
174
- VALUE joined = rb_ary_join(ary, empty_string);
175
- VALUE result = Backend_send(self, io, joined, flags);
176
- RB_GC_GUARD(joined);
177
- return result;
178
- }
179
- }
74
+ #endif /* BACKEND_COMMON_H */
@@ -4,7 +4,6 @@
4
4
  #include <sys/socket.h>
5
5
  #include <sys/uio.h>
6
6
  #include <unistd.h>
7
- #include <fcntl.h>
8
7
  #include <netinet/in.h>
9
8
  #include <arpa/inet.h>
10
9
  #include <stdnoreturn.h>
@@ -19,39 +18,21 @@
19
18
  #include "backend_io_uring_context.h"
20
19
  #include "ruby/thread.h"
21
20
  #include "ruby/io.h"
21
+ #include "backend_common.h"
22
22
 
23
23
  VALUE SYM_io_uring;
24
+ VALUE SYM_send;
25
+ VALUE SYM_splice;
26
+ VALUE SYM_write;
24
27
 
25
28
  #ifdef POLYPHONY_UNSET_NONBLOCK
26
- ID ID_ivar_is_nonblocking;
27
-
28
- // One of the changes introduced in Ruby 3.0 as part of the work on the
29
- // FiberScheduler interface is that all created sockets are marked as
30
- // non-blocking. This prevents the io_uring backend from working correctly,
31
- // since it will return an EAGAIN error just like a normal syscall. So here
32
- // instead of setting O_NONBLOCK (which is required for the libev backend), we
33
- // unset it.
34
- inline void io_unset_nonblock(rb_io_t *fptr, VALUE io) {
35
- VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
36
- if (is_nonblocking == Qfalse) return;
37
-
38
- rb_ivar_set(io, ID_ivar_is_nonblocking, Qfalse);
39
-
40
- int oflags = fcntl(fptr->fd, F_GETFL);
41
- if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
42
- oflags &= !O_NONBLOCK;
43
- fcntl(fptr->fd, F_SETFL, oflags);
44
- }
29
+ #define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
45
30
  #else
46
- // NOP
47
31
  #define io_unset_nonblock(fptr, io)
48
32
  #endif
49
33
 
50
34
  typedef struct Backend_t {
51
- // common fields
52
- unsigned int currently_polling;
53
- unsigned int pending_count;
54
- unsigned int poll_no_wait_count;
35
+ struct Backend_base base;
55
36
 
56
37
  // implementation-specific fields
57
38
  struct io_uring ring;
@@ -61,8 +42,6 @@ typedef struct Backend_t {
61
42
  int event_fd;
62
43
  } Backend_t;
63
44
 
64
- #include "backend_common.h"
65
-
66
45
  static size_t Backend_size(const void *ptr) {
67
46
  return sizeof(Backend_t);
68
47
  }
@@ -86,9 +65,11 @@ static VALUE Backend_initialize(VALUE self) {
86
65
  Backend_t *backend;
87
66
  GetBackend(self, backend);
88
67
 
89
- backend->currently_polling = 0;
90
- backend->pending_count = 0;
91
- backend->poll_no_wait_count = 0;
68
+ backend->base.currently_polling = 0;
69
+ backend->base.pending_count = 0;
70
+ backend->base.idle_gc_period = 0;
71
+ backend->base.idle_gc_last_time = 0;
72
+
92
73
  backend->pending_sqes = 0;
93
74
  backend->prepared_limit = 2048;
94
75
 
@@ -116,9 +97,8 @@ VALUE Backend_post_fork(VALUE self) {
116
97
  io_uring_queue_exit(&backend->ring);
117
98
  io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
118
99
  context_store_free(&backend->store);
119
- backend->currently_polling = 0;
120
- backend->pending_count = 0;
121
- backend->poll_no_wait_count = 0;
100
+ backend->base.currently_polling = 0;
101
+ backend->base.pending_count = 0;
122
102
  backend->pending_sqes = 0;
123
103
 
124
104
  return self;
@@ -128,7 +108,7 @@ unsigned int Backend_pending_count(VALUE self) {
128
108
  Backend_t *backend;
129
109
  GetBackend(self, backend);
130
110
 
131
- return backend->pending_count;
111
+ return backend->base.pending_count;
132
112
  }
133
113
 
134
114
  typedef struct poll_context {
@@ -150,26 +130,18 @@ static inline bool cq_ring_needs_flush(struct io_uring *ring) {
150
130
  return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
151
131
  }
152
132
 
153
- void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
133
+ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
154
134
  op_context_t *ctx = io_uring_cqe_get_data(cqe);
155
135
  if (!ctx) return;
156
136
 
157
137
  ctx->result = cqe->res;
158
-
159
- if (ctx->completed)
160
- // already marked as deleted as result of fiber resuming before op
161
- // completion, so we can release the context
162
- context_store_release(&backend->store, ctx);
163
- else {
164
- // otherwise, we mark it as completed, schedule the fiber and let it deal
165
- // with releasing the context
166
- ctx->completed = 1;
167
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
168
- }
138
+ if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
139
+ Fiber_make_runnable(ctx->fiber, ctx->resume_value);
140
+ context_store_release(&backend->store, ctx);
169
141
  }
170
142
 
171
143
  // adapted from io_uring_peek_batch_cqe in queue.c
172
- // this peeks at cqes and for each one
144
+ // this peeks at cqes and handles each available cqe
173
145
  void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
174
146
  struct io_uring *ring = &backend->ring;
175
147
  bool overflow_checked = false;
@@ -205,9 +177,9 @@ void io_uring_backend_poll(Backend_t *backend) {
205
177
  io_uring_submit(&backend->ring);
206
178
  }
207
179
 
208
- backend->currently_polling = 1;
180
+ backend->base.currently_polling = 1;
209
181
  rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
210
- backend->currently_polling = 0;
182
+ backend->base.currently_polling = 0;
211
183
  if (poll_ctx.result < 0) return;
212
184
 
213
185
  io_uring_backend_handle_completion(poll_ctx.cqe, backend);
@@ -219,16 +191,6 @@ VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue
219
191
  Backend_t *backend;
220
192
  GetBackend(self, backend);
221
193
 
222
- if (is_nowait) {
223
- backend->poll_no_wait_count++;
224
- if (backend->poll_no_wait_count < 10) return self;
225
-
226
- long runnable_count = Runqueue_len(runqueue);
227
- if (backend->poll_no_wait_count < runnable_count) return self;
228
- }
229
-
230
- backend->poll_no_wait_count = 0;
231
-
232
194
  if (is_nowait && backend->pending_sqes) {
233
195
  backend->pending_sqes = 0;
234
196
  io_uring_submit(&backend->ring);
@@ -246,7 +208,7 @@ VALUE Backend_wakeup(VALUE self) {
246
208
  Backend_t *backend;
247
209
  GetBackend(self, backend);
248
210
 
249
- if (backend->currently_polling) {
211
+ if (backend->base.currently_polling) {
250
212
  // Since we're currently blocking while waiting for a completion, we add a
251
213
  // NOP which would cause the io_uring_enter syscall to return
252
214
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -281,12 +243,11 @@ int io_uring_backend_defer_submit_and_await(
281
243
  io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
282
244
  io_uring_backend_defer_submit(backend);
283
245
 
284
- switchpoint_result = backend_await(backend);
246
+ switchpoint_result = backend_await((struct Backend_base *)backend);
285
247
 
286
- if (!ctx->completed) {
248
+ if (ctx->ref_count > 1) {
249
+ // op was not completed (an exception was raised), so we need to cancel it
287
250
  ctx->result = -ECANCELED;
288
-
289
- // op was not completed, so we need to cancel it
290
251
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
291
252
  io_uring_prep_cancel(sqe, ctx, 0);
292
253
  backend->pending_sqes = 0;
@@ -300,7 +261,7 @@ int io_uring_backend_defer_submit_and_await(
300
261
  }
301
262
 
302
263
  VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
303
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_POLL);
264
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
304
265
  VALUE resumed_value = Qnil;
305
266
 
306
267
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -315,8 +276,8 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
315
276
  Backend_t *backend;
316
277
  rb_io_t *fptr;
317
278
  long dynamic_len = length == Qnil;
318
- long len = dynamic_len ? 4096 : NUM2INT(length);
319
- int shrinkable = io_setstrbuf(&str, len);
279
+ long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
280
+ int shrinkable = io_setstrbuf(&str, buffer_size);
320
281
  char *buf = RSTRING_PTR(str);
321
282
  long total = 0;
322
283
  int read_to_eof = RTEST(to_eof);
@@ -332,14 +293,14 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
332
293
 
333
294
  while (1) {
334
295
  VALUE resume_value = Qnil;
335
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
296
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
336
297
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
337
- io_uring_prep_read(sqe, fptr->fd, buf, len - total, -1);
298
+ io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
338
299
 
339
300
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
340
- OP_CONTEXT_RELEASE(&backend->store, ctx);
301
+ int completed = context_store_release(&backend->store, ctx);
341
302
  RAISE_IF_EXCEPTION(resume_value);
342
- if (!ctx->completed) return resume_value;
303
+ if (!completed) return resume_value;
343
304
  RB_GC_GUARD(resume_value);
344
305
 
345
306
  if (result < 0)
@@ -350,14 +311,15 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
350
311
  total += result;
351
312
  if (!read_to_eof) break;
352
313
 
353
- if (total == len) {
314
+ if (total == buffer_size) {
354
315
  if (!dynamic_len) break;
355
316
 
317
+ // resize buffer
356
318
  rb_str_resize(str, total);
357
- rb_str_modify_expand(str, len);
319
+ rb_str_modify_expand(str, buffer_size);
358
320
  buf = RSTRING_PTR(str) + total;
359
321
  shrinkable = 0;
360
- len += len;
322
+ buffer_size += buffer_size;
361
323
  }
362
324
  else buf += result;
363
325
  }
@@ -392,14 +354,14 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
392
354
 
393
355
  while (1) {
394
356
  VALUE resume_value = Qnil;
395
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
357
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
396
358
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
397
359
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
398
360
 
399
361
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
400
- OP_CONTEXT_RELEASE(&backend->store, ctx);
362
+ int completed = context_store_release(&backend->store, ctx);
401
363
  RAISE_IF_EXCEPTION(resume_value);
402
- if (!ctx->completed) return resume_value;
364
+ if (!completed) return resume_value;
403
365
  RB_GC_GUARD(resume_value);
404
366
 
405
367
  if (result < 0)
@@ -439,14 +401,14 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
439
401
 
440
402
  while (1) {
441
403
  VALUE resume_value = Qnil;
442
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
404
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
443
405
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
444
406
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
445
407
 
446
408
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
447
- OP_CONTEXT_RELEASE(&backend->store, ctx);
409
+ int completed = context_store_release(&backend->store, ctx);
448
410
  RAISE_IF_EXCEPTION(resume_value);
449
- if (!ctx->completed) return resume_value;
411
+ if (!completed) return resume_value;
450
412
  RB_GC_GUARD(resume_value);
451
413
 
452
414
  if (result < 0)
@@ -482,14 +444,14 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
482
444
 
483
445
  while (left > 0) {
484
446
  VALUE resume_value = Qnil;
485
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITE);
447
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
486
448
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
487
449
  io_uring_prep_write(sqe, fptr->fd, buf, left, -1);
488
450
 
489
451
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
490
- OP_CONTEXT_RELEASE(&backend->store, ctx);
452
+ int completed = context_store_release(&backend->store, ctx);
491
453
  RAISE_IF_EXCEPTION(resume_value);
492
- if (!ctx->completed) return resume_value;
454
+ if (!completed) return resume_value;
493
455
  RB_GC_GUARD(resume_value);
494
456
 
495
457
  if (result < 0)
@@ -531,17 +493,17 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
531
493
 
532
494
  while (1) {
533
495
  VALUE resume_value = Qnil;
534
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITEV);
496
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
535
497
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
536
498
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
537
499
 
538
500
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
539
- OP_CONTEXT_RELEASE(&backend->store, ctx);
501
+ int completed = context_store_release(&backend->store, ctx);
540
502
  if (TEST_EXCEPTION(resume_value)) {
541
503
  free(iov);
542
504
  RAISE_EXCEPTION(resume_value);
543
505
  }
544
- if (!ctx->completed) {
506
+ if (!completed) {
545
507
  free(iov);
546
508
  return resume_value;
547
509
  }
@@ -604,14 +566,14 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
604
566
 
605
567
  while (1) {
606
568
  VALUE resume_value = Qnil;
607
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
569
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
608
570
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
609
571
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
610
572
 
611
573
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
612
- OP_CONTEXT_RELEASE(&backend->store, ctx);
574
+ int completed = context_store_release(&backend->store, ctx);
613
575
  RAISE_IF_EXCEPTION(resume_value);
614
- if (!ctx->completed) return resume_value;
576
+ if (!completed) return resume_value;
615
577
  RB_GC_GUARD(resume_value);
616
578
 
617
579
  if (result < 0)
@@ -651,14 +613,14 @@ VALUE Backend_recv_loop(VALUE self, VALUE io) {
651
613
 
652
614
  while (1) {
653
615
  VALUE resume_value = Qnil;
654
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
616
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
655
617
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
656
618
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
657
619
 
658
620
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
659
- OP_CONTEXT_RELEASE(&backend->store, ctx);
621
+ int completed = context_store_release(&backend->store, ctx);
660
622
  RAISE_IF_EXCEPTION(resume_value);
661
- if (!ctx->completed) return resume_value;
623
+ if (!completed) return resume_value;
662
624
  RB_GC_GUARD(resume_value);
663
625
 
664
626
  if (result < 0)
@@ -697,14 +659,14 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
697
659
 
698
660
  while (1) {
699
661
  VALUE resume_value = Qnil;
700
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
662
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
701
663
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
702
664
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
703
665
 
704
666
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
705
- OP_CONTEXT_RELEASE(&backend->store, ctx);
667
+ int completed = context_store_release(&backend->store, ctx);
706
668
  RAISE_IF_EXCEPTION(resume_value);
707
- if (!ctx->completed) return resume_value;
669
+ if (!completed) return resume_value;
708
670
  RB_GC_GUARD(resume_value);
709
671
 
710
672
  if (result < 0)
@@ -740,14 +702,14 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
740
702
 
741
703
  while (left > 0) {
742
704
  VALUE resume_value = Qnil;
743
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SEND);
705
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
744
706
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
745
707
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
746
708
 
747
709
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
748
- OP_CONTEXT_RELEASE(&backend->store, ctx);
710
+ int completed = context_store_release(&backend->store, ctx);
749
711
  RAISE_IF_EXCEPTION(resume_value);
750
- if (!ctx->completed) return resume_value;
712
+ if (!completed) return resume_value;
751
713
  RB_GC_GUARD(resume_value);
752
714
 
753
715
  if (result < 0)
@@ -774,14 +736,14 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
774
736
 
775
737
  while (1) {
776
738
  VALUE resume_value = Qnil;
777
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
739
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
778
740
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
779
741
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
780
742
 
781
743
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
782
- OP_CONTEXT_RELEASE(&backend->store, ctx);
744
+ int completed = context_store_release(&backend->store, ctx);
783
745
  RAISE_IF_EXCEPTION(resume_value);
784
- if (!ctx->completed) return resume_value;
746
+ if (!completed) return resume_value;
785
747
  RB_GC_GUARD(resume_value);
786
748
 
787
749
  if (fd < 0)
@@ -845,20 +807,20 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
845
807
  VALUE resume_value = Qnil;
846
808
 
847
809
  while (1) {
848
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SPLICE);
810
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
849
811
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
850
812
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
851
813
 
852
814
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
853
- OP_CONTEXT_RELEASE(&backend->store, ctx);
815
+ int completed = context_store_release(&backend->store, ctx);
854
816
  RAISE_IF_EXCEPTION(resume_value);
855
- if (!ctx->completed) return resume_value;
817
+ if (!completed) return resume_value;
856
818
 
857
819
  if (result < 0)
858
820
  rb_syserr_fail(-result, strerror(-result));
859
821
 
860
- if (result == 0 || !loop) return INT2NUM(total);
861
822
  total += result;
823
+ if (result == 0 || !loop) return INT2NUM(total);
862
824
  }
863
825
 
864
826
  RB_GC_GUARD(resume_value);
@@ -896,13 +858,13 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
896
858
  addr.sin_port = htons(NUM2INT(port));
897
859
 
898
860
  VALUE resume_value = Qnil;
899
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_CONNECT);
861
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
900
862
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
901
863
  io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
902
864
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
903
- OP_CONTEXT_RELEASE(&backend->store, ctx);
865
+ int completed = context_store_release(&backend->store, ctx);
904
866
  RAISE_IF_EXCEPTION(resume_value);
905
- if (!ctx->completed) return resume_value;
867
+ if (!completed) return resume_value;
906
868
  RB_GC_GUARD(resume_value);
907
869
 
908
870
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
@@ -942,12 +904,11 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
942
904
  struct __kernel_timespec ts = double_to_timespec(duration);
943
905
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
944
906
 
945
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
907
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
946
908
  io_uring_prep_timeout(sqe, &ts, 0, 0);
947
909
 
948
910
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
949
- OP_CONTEXT_RELEASE(&backend->store, ctx);
950
- return ctx->completed;
911
+ return context_store_release(&backend->store, ctx);
951
912
  }
952
913
 
953
914
  VALUE Backend_sleep(VALUE self, VALUE duration) {
@@ -995,7 +956,7 @@ struct Backend_timeout_ctx {
995
956
 
996
957
  VALUE Backend_timeout_ensure(VALUE arg) {
997
958
  struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
998
- if (!timeout_ctx->ctx->completed) {
959
+ if (timeout_ctx->ctx->ref_count) {
999
960
  timeout_ctx->ctx->result = -ECANCELED;
1000
961
 
1001
962
  // op was not completed, so we need to cancel it
@@ -1004,7 +965,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
1004
965
  timeout_ctx->backend->pending_sqes = 0;
1005
966
  io_uring_submit(&timeout_ctx->backend->ring);
1006
967
  }
1007
- OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
968
+ context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
1008
969
  return Qnil;
1009
970
  }
1010
971
 
@@ -1022,7 +983,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1022
983
 
1023
984
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1024
985
 
1025
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
986
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1026
987
  ctx->resume_value = timeout;
1027
988
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1028
989
  io_uring_sqe_set_data(sqe, ctx);
@@ -1090,6 +1051,145 @@ VALUE Backend_kind(VALUE self) {
1090
1051
  return SYM_io_uring;
1091
1052
  }
1092
1053
 
1054
+ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1055
+ rb_io_t *fptr;
1056
+ VALUE underlying_io;
1057
+
1058
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1059
+ if (underlying_io != Qnil) io = underlying_io;
1060
+ io = rb_io_get_write_io(io);
1061
+ GetOpenFile(io, fptr);
1062
+ io_unset_nonblock(fptr, io);
1063
+
1064
+ char *buf = StringValuePtr(str);
1065
+ long len = RSTRING_LEN(str);
1066
+
1067
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1068
+ io_uring_prep_write(sqe, fptr->fd, buf, len, -1);
1069
+ return sqe;
1070
+ }
1071
+
1072
+ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1073
+ rb_io_t *fptr;
1074
+ VALUE underlying_io;
1075
+
1076
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1077
+ if (underlying_io != Qnil) io = underlying_io;
1078
+ io = rb_io_get_write_io(io);
1079
+ GetOpenFile(io, fptr);
1080
+ io_unset_nonblock(fptr, io);
1081
+
1082
+ char *buf = StringValuePtr(str);
1083
+ long len = RSTRING_LEN(str);
1084
+ int flags_int = NUM2INT(flags);
1085
+
1086
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1087
+ io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1088
+ return sqe;
1089
+ }
1090
+
1091
+ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE maxlen) {
1092
+ rb_io_t *src_fptr;
1093
+ rb_io_t *dest_fptr;
1094
+ VALUE underlying_io;
1095
+
1096
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1097
+ if (underlying_io != Qnil) src = underlying_io;
1098
+ GetOpenFile(src, src_fptr);
1099
+ io_unset_nonblock(src_fptr, src);
1100
+
1101
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1102
+ if (underlying_io != Qnil) dest = underlying_io;
1103
+ dest = rb_io_get_write_io(dest);
1104
+ GetOpenFile(dest, dest_fptr);
1105
+ io_unset_nonblock(dest_fptr, dest);
1106
+
1107
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1108
+ io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1109
+ return sqe;
1110
+ }
1111
+
1112
+ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1113
+ VALUE resume_value = Qnil;
1114
+ unsigned int sqe_count = 0;
1115
+ struct io_uring_sqe *last_sqe = 0;
1116
+ Backend_t *backend;
1117
+ GetBackend(self, backend);
1118
+ if (argc == 0) return resume_value;
1119
+
1120
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1121
+ for (int i = 0; i < argc; i++) {
1122
+ VALUE op = argv[i];
1123
+ VALUE op_type = RARRAY_AREF(op, 0);
1124
+ VALUE op_len = RARRAY_LEN(op);
1125
+
1126
+ if (op_type == SYM_write && op_len == 3) {
1127
+ last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
1128
+ }
1129
+ else if (op_type == SYM_send && op_len == 4)
1130
+ last_sqe = Backend_chain_prepare_send(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1131
+ else if (op_type == SYM_splice && op_len == 4)
1132
+ last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1133
+ else {
1134
+ if (sqe_count) {
1135
+ io_uring_sqe_set_data(last_sqe, ctx);
1136
+ io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1137
+
1138
+ ctx->ref_count = sqe_count;
1139
+ ctx->result = -ECANCELED;
1140
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1141
+ io_uring_prep_cancel(sqe, ctx, 0);
1142
+ backend->pending_sqes = 0;
1143
+ io_uring_submit(&backend->ring);
1144
+ }
1145
+ else {
1146
+ ctx->ref_count = 1;
1147
+ context_store_release(&backend->store, ctx);
1148
+ }
1149
+ rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1150
+ }
1151
+
1152
+ io_uring_sqe_set_data(last_sqe, ctx);
1153
+ unsigned int flags = (i == argc - 1) ? IOSQE_ASYNC : IOSQE_ASYNC & IOSQE_IO_LINK;
1154
+ io_uring_sqe_set_flags(last_sqe, flags);
1155
+ sqe_count++;
1156
+ }
1157
+
1158
+ ctx->ref_count = sqe_count + 1;
1159
+ io_uring_backend_defer_submit(backend);
1160
+ resume_value = backend_await((struct Backend_base *)backend);
1161
+ int result = ctx->result;
1162
+ int completed = context_store_release(&backend->store, ctx);
1163
+ if (!completed) {
1164
+ // op was not completed (an exception was raised), so we need to cancel it
1165
+ ctx->result = -ECANCELED;
1166
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1167
+ io_uring_prep_cancel(sqe, ctx, 0);
1168
+ backend->pending_sqes = 0;
1169
+ io_uring_submit(&backend->ring);
1170
+ RAISE_IF_EXCEPTION(resume_value);
1171
+ return resume_value;
1172
+ }
1173
+
1174
+ RB_GC_GUARD(resume_value);
1175
+ return INT2NUM(result);
1176
+ }
1177
+
1178
+ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1179
+ Backend_t *backend;
1180
+ GetBackend(self, backend);
1181
+ backend->base.idle_gc_period = NUM2DBL(period);
1182
+ backend->base.idle_gc_last_time = current_time();
1183
+ return self;
1184
+ }
1185
+
1186
+ inline VALUE Backend_run_idle_tasks(VALUE self) {
1187
+ Backend_t *backend;
1188
+ GetBackend(self, backend);
1189
+ backend_run_idle_tasks(&backend->base);
1190
+ return self;
1191
+ }
1192
+
1093
1193
  void Init_Backend() {
1094
1194
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
1095
1195
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -1101,6 +1201,8 @@ void Init_Backend() {
1101
1201
  rb_define_method(cBackend, "poll", Backend_poll, 3);
1102
1202
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1103
1203
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1204
+ rb_define_method(cBackend, "chain", Backend_chain, -1);
1205
+ rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1104
1206
 
1105
1207
  rb_define_method(cBackend, "accept", Backend_accept, 2);
1106
1208
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
@@ -1129,6 +1231,9 @@ void Init_Backend() {
1129
1231
  #endif
1130
1232
 
1131
1233
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1234
+ SYM_send = ID2SYM(rb_intern("send"));
1235
+ SYM_splice = ID2SYM(rb_intern("splice"));
1236
+ SYM_write = ID2SYM(rb_intern("write"));
1132
1237
  }
1133
1238
 
1134
1239
  #endif // POLYPHONY_BACKEND_LIBURING