polyphony 0.45.5 → 0.47.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +2 -0
  3. data/.gitmodules +0 -0
  4. data/CHANGELOG.md +23 -0
  5. data/Gemfile.lock +1 -1
  6. data/README.md +3 -3
  7. data/Rakefile +1 -1
  8. data/TODO.md +21 -22
  9. data/bin/test +4 -0
  10. data/examples/core/enumerable.rb +64 -0
  11. data/examples/performance/fiber_resume.rb +43 -0
  12. data/examples/performance/fiber_transfer.rb +13 -4
  13. data/examples/performance/thread-vs-fiber/compare.rb +59 -0
  14. data/examples/performance/thread-vs-fiber/em_server.rb +33 -0
  15. data/examples/performance/thread-vs-fiber/polyphony_server.rb +10 -21
  16. data/examples/performance/thread-vs-fiber/threaded_server.rb +22 -15
  17. data/examples/performance/thread_switch.rb +44 -0
  18. data/ext/liburing/liburing.h +585 -0
  19. data/ext/liburing/liburing/README.md +4 -0
  20. data/ext/liburing/liburing/barrier.h +73 -0
  21. data/ext/liburing/liburing/compat.h +15 -0
  22. data/ext/liburing/liburing/io_uring.h +343 -0
  23. data/ext/liburing/queue.c +333 -0
  24. data/ext/liburing/register.c +187 -0
  25. data/ext/liburing/setup.c +210 -0
  26. data/ext/liburing/syscall.c +54 -0
  27. data/ext/liburing/syscall.h +18 -0
  28. data/ext/polyphony/backend.h +0 -14
  29. data/ext/polyphony/backend_common.h +129 -0
  30. data/ext/polyphony/backend_io_uring.c +995 -0
  31. data/ext/polyphony/backend_io_uring_context.c +74 -0
  32. data/ext/polyphony/backend_io_uring_context.h +53 -0
  33. data/ext/polyphony/{libev_backend.c → backend_libev.c} +304 -294
  34. data/ext/polyphony/event.c +1 -1
  35. data/ext/polyphony/extconf.rb +31 -13
  36. data/ext/polyphony/fiber.c +35 -24
  37. data/ext/polyphony/libev.c +4 -0
  38. data/ext/polyphony/libev.h +8 -2
  39. data/ext/polyphony/liburing.c +8 -0
  40. data/ext/polyphony/playground.c +51 -0
  41. data/ext/polyphony/polyphony.c +8 -5
  42. data/ext/polyphony/polyphony.h +23 -19
  43. data/ext/polyphony/polyphony_ext.c +10 -4
  44. data/ext/polyphony/queue.c +100 -35
  45. data/ext/polyphony/thread.c +10 -10
  46. data/lib/polyphony/adapters/trace.rb +2 -2
  47. data/lib/polyphony/core/exceptions.rb +0 -4
  48. data/lib/polyphony/core/global_api.rb +45 -21
  49. data/lib/polyphony/core/resource_pool.rb +12 -1
  50. data/lib/polyphony/extensions/core.rb +9 -15
  51. data/lib/polyphony/extensions/debug.rb +13 -0
  52. data/lib/polyphony/extensions/fiber.rb +8 -4
  53. data/lib/polyphony/extensions/openssl.rb +6 -0
  54. data/lib/polyphony/extensions/socket.rb +73 -10
  55. data/lib/polyphony/version.rb +1 -1
  56. data/test/helper.rb +36 -4
  57. data/test/io_uring_test.rb +55 -0
  58. data/test/stress.rb +4 -1
  59. data/test/test_backend.rb +63 -6
  60. data/test/test_ext.rb +1 -2
  61. data/test/test_fiber.rb +55 -20
  62. data/test/test_global_api.rb +107 -35
  63. data/test/test_queue.rb +117 -0
  64. data/test/test_resource_pool.rb +21 -0
  65. data/test/test_socket.rb +2 -2
  66. data/test/test_throttler.rb +3 -6
  67. data/test/test_trace.rb +7 -5
  68. metadata +28 -3
@@ -0,0 +1,18 @@
1
+ /* SPDX-License-Identifier: MIT */
2
+ #ifndef LIBURING_SYSCALL_H
3
+ #define LIBURING_SYSCALL_H
4
+
5
+ #include <signal.h>
6
+
7
+ struct io_uring_params;
8
+
9
+ /*
10
+ * System calls
11
+ */
12
+ extern int __sys_io_uring_setup(unsigned entries, struct io_uring_params *p);
13
+ extern int __sys_io_uring_enter(int fd, unsigned to_submit,
14
+ unsigned min_complete, unsigned flags, sigset_t *sig);
15
+ extern int __sys_io_uring_register(int fd, unsigned int opcode, const void *arg,
16
+ unsigned int nr_args);
17
+
18
+ #endif
@@ -3,20 +3,6 @@
3
3
 
4
4
  #include "ruby.h"
5
5
 
6
- // backend interface function signatures
7
-
8
- // VALUE LibevBackend_accept(VALUE self, VALUE sock);
9
- // VALUE LibevBackend_accept_loop(VALUE self, VALUE sock);
10
- // VALUE LibevBackend_connect(VALUE self, VALUE sock, VALUE host, VALUE port);
11
- // VALUE LibevBackend_finalize(VALUE self);
12
- // VALUE LibevBackend_post_fork(VALUE self);
13
- // VALUE LibevBackend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof);
14
- // VALUE LibevBackend_read_loop(VALUE self, VALUE io);
15
- // VALUE LibevBackend_sleep(VALUE self, VALUE duration);
16
- // VALUE LibevBackend_wait_io(VALUE self, VALUE io, VALUE write);
17
- // VALUE LibevBackend_wait_pid(VALUE self, VALUE pid);
18
- // VALUE LibevBackend_write(int argc, VALUE *argv, VALUE self);
19
-
20
6
  typedef VALUE (* backend_pending_count_t)(VALUE self);
21
7
  typedef VALUE (*backend_poll_t)(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
22
8
  typedef VALUE (* backend_ref_t)(VALUE self);
@@ -0,0 +1,129 @@
1
+ #include <time.h>
2
+
3
+ #include "ruby.h"
4
+ #include "ruby/io.h"
5
+
6
+ //////////////////////////////////////////////////////////////////////
7
+ //////////////////////////////////////////////////////////////////////
8
+ // the following is copied verbatim from the Ruby source code (io.c)
9
+ struct io_internal_read_struct {
10
+ int fd;
11
+ int nonblock;
12
+ void *buf;
13
+ size_t capa;
14
+ };
15
+
16
+ #define StringValue(v) rb_string_value(&(v))
17
+
18
+ inline int io_setstrbuf(VALUE *str, long len) {
19
+ #ifdef _WIN32
20
+ len = (len + 1) & ~1L; /* round up for wide char */
21
+ #endif
22
+ if (NIL_P(*str)) {
23
+ *str = rb_str_new(0, len);
24
+ return 1;
25
+ }
26
+ else {
27
+ VALUE s = StringValue(*str);
28
+ long clen = RSTRING_LEN(s);
29
+ if (clen >= len) {
30
+ rb_str_modify(s);
31
+ return 0;
32
+ }
33
+ len -= clen;
34
+ }
35
+ rb_str_modify_expand(*str, len);
36
+ return 0;
37
+ }
38
+
39
+ #define MAX_REALLOC_GAP 4096
40
+
41
+ inline void io_shrink_read_string(VALUE str, long n) {
42
+ if (rb_str_capacity(str) - n > MAX_REALLOC_GAP) {
43
+ rb_str_resize(str, n);
44
+ }
45
+ }
46
+
47
+ inline void io_set_read_length(VALUE str, long n, int shrinkable) {
48
+ if (RSTRING_LEN(str) != n) {
49
+ rb_str_modify(str);
50
+ rb_str_set_len(str, n);
51
+ if (shrinkable) io_shrink_read_string(str, n);
52
+ }
53
+ }
54
+
55
+ inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
56
+ if (fptr->encs.enc) {
57
+ return fptr->encs.enc;
58
+ }
59
+ return rb_default_external_encoding();
60
+ }
61
+
62
+ inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
63
+ OBJ_TAINT(str);
64
+ rb_enc_associate(str, io_read_encoding(fptr));
65
+ return str;
66
+ }
67
+
68
+ //////////////////////////////////////////////////////////////////////
69
+ //////////////////////////////////////////////////////////////////////
70
+
71
+ inline VALUE backend_await(Backend_t *backend) {
72
+ VALUE ret;
73
+ backend->ref_count++;
74
+ ret = Thread_switch_fiber(rb_thread_current());
75
+ backend->ref_count--;
76
+ RB_GC_GUARD(ret);
77
+ return ret;
78
+ }
79
+
80
+ inline VALUE backend_snooze() {
81
+ Fiber_make_runnable(rb_fiber_current(), Qnil);
82
+ return Thread_switch_fiber(rb_thread_current());
83
+ }
84
+
85
+ // macros for doing read loops
86
+
87
+ #define READ_LOOP_PREPARE_STR() { \
88
+ str = Qnil; \
89
+ shrinkable = io_setstrbuf(&str, len); \
90
+ buf = RSTRING_PTR(str); \
91
+ total = 0; \
92
+ OBJ_TAINT(str); \
93
+ }
94
+
95
+ #define READ_LOOP_YIELD_STR() { \
96
+ io_set_read_length(str, total, shrinkable); \
97
+ io_enc_str(str, fptr); \
98
+ rb_yield(str); \
99
+ READ_LOOP_PREPARE_STR(); \
100
+ }
101
+
102
+ inline void rectify_io_file_pos(rb_io_t *fptr) {
103
+ // Apparently after reopening a closed file, the file position is not reset,
104
+ // which causes the read to fail. Fortunately we can use fptr->rbuf.len to
105
+ // find out if that's the case.
106
+ // See: https://github.com/digital-fabric/polyphony/issues/30
107
+ if (fptr->rbuf.len > 0) {
108
+ lseek(fptr->fd, -fptr->rbuf.len, SEEK_CUR);
109
+ fptr->rbuf.len = 0;
110
+ }
111
+ }
112
+
113
+ inline double current_time() {
114
+ struct timespec ts;
115
+ clock_gettime(CLOCK_MONOTONIC, &ts);
116
+ long long ns = ts.tv_sec;
117
+ ns = ns * 1000000000 + ts.tv_nsec;
118
+ double t = ns;
119
+ return t / 1e9;
120
+ }
121
+
122
+ inline VALUE backend_timeout_exception(VALUE exception) {
123
+ if (RTEST(rb_obj_is_kind_of(exception, rb_cArray)))
124
+ return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
125
+ else if (RTEST(rb_obj_is_kind_of(exception, rb_cClass)))
126
+ return rb_funcall(exception, ID_new, 0);
127
+ else
128
+ return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
129
+ }
@@ -0,0 +1,995 @@
1
+ #ifdef POLYPHONY_BACKEND_LIBURING
2
+
3
+ #include <netdb.h>
4
+ #include <sys/socket.h>
5
+ #include <sys/uio.h>
6
+ #include <unistd.h>
7
+ #include <fcntl.h>
8
+ #include <netinet/in.h>
9
+ #include <arpa/inet.h>
10
+ #include <stdnoreturn.h>
11
+ #include <poll.h>
12
+ #include <sys/types.h>
13
+ #include <sys/eventfd.h>
14
+ #include <sys/wait.h>
15
+ #include <errno.h>
16
+
17
+ #include "polyphony.h"
18
+ #include "../liburing/liburing.h"
19
+ #include "ruby/thread.h"
20
+ #include "backend_io_uring_context.h"
21
+
22
+ #ifndef __NR_pidfd_open
23
+ #define __NR_pidfd_open 434 /* System call # on most architectures */
24
+ #endif
25
+
26
+ static int pidfd_open(pid_t pid, unsigned int flags) {
27
+ return syscall(__NR_pidfd_open, pid, flags);
28
+ }
29
+
30
+ VALUE cTCPSocket;
31
+ VALUE SYM_io_uring;
32
+
33
+ typedef struct Backend_t {
34
+ struct io_uring ring;
35
+ op_context_store_t store;
36
+ int waiting_for_cqe;
37
+ unsigned int ref_count;
38
+ unsigned int run_no_wait_count;
39
+ unsigned int pending_sqes;
40
+ unsigned int prepared_limit;
41
+ int event_fd;
42
+ } Backend_t;
43
+
44
+ #include "backend_common.h"
45
+
46
+ static size_t Backend_size(const void *ptr) {
47
+ return sizeof(Backend_t);
48
+ }
49
+
50
+ static const rb_data_type_t Backend_type = {
51
+ "IOUringBackend",
52
+ {0, 0, Backend_size,},
53
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
54
+ };
55
+
56
+ static VALUE Backend_allocate(VALUE klass) {
57
+ Backend_t *backend = ALLOC(Backend_t);
58
+
59
+ return TypedData_Wrap_Struct(klass, &Backend_type, backend);
60
+ }
61
+
62
+ #define GetBackend(obj, backend) \
63
+ TypedData_Get_Struct((obj), Backend_t, &Backend_type, (backend))
64
+
65
+ static VALUE Backend_initialize(VALUE self) {
66
+ Backend_t *backend;
67
+ GetBackend(self, backend);
68
+
69
+ backend->waiting_for_cqe = 0;
70
+ backend->ref_count = 0;
71
+ backend->run_no_wait_count = 0;
72
+ backend->pending_sqes = 0;
73
+ backend->prepared_limit = 1024;
74
+
75
+ context_store_initialize(&backend->store);
76
+ io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
77
+ backend->event_fd = -1;
78
+
79
+ return Qnil;
80
+ }
81
+
82
+ VALUE Backend_finalize(VALUE self) {
83
+ Backend_t *backend;
84
+ GetBackend(self, backend);
85
+
86
+ io_uring_queue_exit(&backend->ring);
87
+ if (backend->event_fd != -1) close(backend->event_fd);
88
+ context_store_free(&backend->store);
89
+ return self;
90
+ }
91
+
92
+ VALUE Backend_post_fork(VALUE self) {
93
+ Backend_t *backend;
94
+ GetBackend(self, backend);
95
+
96
+ io_uring_queue_exit(&backend->ring);
97
+ io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
98
+ context_store_free(&backend->store);
99
+ backend->waiting_for_cqe = 0;
100
+ backend->ref_count = 0;
101
+ backend->run_no_wait_count = 0;
102
+ backend->pending_sqes = 0;
103
+
104
+ return self;
105
+ }
106
+
107
+ VALUE Backend_ref(VALUE self) {
108
+ Backend_t *backend;
109
+ GetBackend(self, backend);
110
+
111
+ backend->ref_count++;
112
+ return self;
113
+ }
114
+
115
+ VALUE Backend_unref(VALUE self) {
116
+ Backend_t *backend;
117
+ GetBackend(self, backend);
118
+
119
+ backend->ref_count--;
120
+ return self;
121
+ }
122
+
123
+ int Backend_ref_count(VALUE self) {
124
+ Backend_t *backend;
125
+ GetBackend(self, backend);
126
+
127
+ return backend->ref_count;
128
+ }
129
+
130
+ void Backend_reset_ref_count(VALUE self) {
131
+ Backend_t *backend;
132
+ GetBackend(self, backend);
133
+
134
+ backend->ref_count = 0;
135
+ }
136
+
137
+ VALUE Backend_pending_count(VALUE self) {
138
+ return INT2NUM(0);
139
+ }
140
+
141
+ typedef struct poll_context {
142
+ struct io_uring *ring;
143
+ struct io_uring_cqe *cqe;
144
+ int result;
145
+ } poll_context_t;
146
+
147
+ extern int __sys_io_uring_enter(int fd, unsigned to_submit, unsigned min_complete, unsigned flags, sigset_t *sig);
148
+
149
+ void *io_uring_backend_poll_without_gvl(void *ptr) {
150
+ poll_context_t *ctx = (poll_context_t *)ptr;
151
+ ctx->result = io_uring_wait_cqe(ctx->ring, &ctx->cqe);
152
+ return NULL;
153
+ }
154
+
155
+ // copied from queue.c
156
+ static inline bool cq_ring_needs_flush(struct io_uring *ring) {
157
+ return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
158
+ }
159
+
160
+ void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
161
+ op_context_t *ctx = io_uring_cqe_get_data(cqe);
162
+ if (ctx == 0) return;
163
+
164
+ ctx->result = cqe->res;
165
+
166
+ if (ctx->completed)
167
+ // already marked as deleted as result of fiber resuming before op
168
+ // completion, so we can release the context
169
+ context_store_release(&backend->store, ctx);
170
+ else {
171
+ // otherwise, we mark it as completed, schedule the fiber and let it deal
172
+ // with releasing the context
173
+ ctx->completed = 1;
174
+ if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
175
+ }
176
+ }
177
+
178
+ // adapted from io_uring_peek_batch_cqe in queue.c
179
+ // this peeks at cqes and for each one
180
+ void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
181
+ struct io_uring *ring = &backend->ring;
182
+ bool overflow_checked = false;
183
+ struct io_uring_cqe *cqe;
184
+ unsigned head;
185
+ unsigned cqe_count;
186
+
187
+ again:
188
+ cqe_count = 0;
189
+ io_uring_for_each_cqe(ring, head, cqe) {
190
+ ++cqe_count;
191
+ io_uring_backend_handle_completion(cqe, backend);
192
+ }
193
+ io_uring_cq_advance(ring, cqe_count);
194
+
195
+ if (overflow_checked) goto done;
196
+
197
+ if (cq_ring_needs_flush(ring)) {
198
+ __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
199
+ overflow_checked = true;
200
+ goto again;
201
+ }
202
+
203
+ done:
204
+ return;
205
+ }
206
+
207
+ void io_uring_backend_poll(Backend_t *backend) {
208
+ poll_context_t poll_ctx;
209
+ poll_ctx.ring = &backend->ring;
210
+ if (backend->pending_sqes) {
211
+ backend->pending_sqes = 0;
212
+ io_uring_submit(&backend->ring);
213
+ }
214
+
215
+ backend->waiting_for_cqe = 1;
216
+ rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
217
+ backend->waiting_for_cqe = 0;
218
+ if (poll_ctx.result < 0) return;
219
+
220
+ io_uring_backend_handle_completion(poll_ctx.cqe, backend);
221
+ io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
222
+ }
223
+
224
+ VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
225
+ int is_nowait = nowait == Qtrue;
226
+ Backend_t *backend;
227
+ GetBackend(self, backend);
228
+
229
+ if (is_nowait) {
230
+ backend->run_no_wait_count++;
231
+ if (backend->run_no_wait_count < 10) return self;
232
+
233
+ long runnable_count = Runqueue_len(runqueue);
234
+ if (backend->run_no_wait_count < runnable_count) return self;
235
+ }
236
+
237
+ backend->run_no_wait_count = 0;
238
+
239
+ if (is_nowait && backend->pending_sqes) {
240
+ backend->pending_sqes = 0;
241
+ io_uring_submit(&backend->ring);
242
+ }
243
+
244
+ COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
245
+ if (!is_nowait) io_uring_backend_poll(backend);
246
+ io_uring_backend_handle_ready_cqes(backend);
247
+ COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
248
+
249
+ return self;
250
+ }
251
+
252
+ VALUE Backend_wakeup(VALUE self) {
253
+ Backend_t *backend;
254
+ GetBackend(self, backend);
255
+
256
+ if (backend->waiting_for_cqe) {
257
+ // Since we're currently blocking while waiting for a completion, we add a
258
+ // NOP which would cause the io_uring_enter syscall to return
259
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
260
+ io_uring_prep_nop(sqe);
261
+ backend->pending_sqes = 0;
262
+ io_uring_submit(&backend->ring);
263
+
264
+ return Qtrue;
265
+ }
266
+
267
+ return Qnil;
268
+ }
269
+
270
+ inline void io_uring_backend_defer_submit(Backend_t *backend) {
271
+ backend->pending_sqes += 1;
272
+ if (backend->pending_sqes >= backend->prepared_limit) {
273
+ backend->pending_sqes = 0;
274
+ io_uring_submit(&backend->ring);
275
+ }
276
+ }
277
+
278
+ int io_uring_backend_defer_submit_and_await(
279
+ Backend_t *backend,
280
+ struct io_uring_sqe *sqe,
281
+ op_context_t *ctx,
282
+ VALUE *value_ptr
283
+ )
284
+ {
285
+ VALUE switchpoint_result = Qnil;
286
+
287
+ io_uring_sqe_set_data(sqe, ctx);
288
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
289
+ io_uring_backend_defer_submit(backend);
290
+
291
+ backend->ref_count++;
292
+ switchpoint_result = backend_await(backend);
293
+ backend->ref_count--;
294
+
295
+ if (!ctx->completed) {
296
+ ctx->result = -ECANCELED;
297
+
298
+ // op was not completed, so we need to cancel it
299
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
300
+ io_uring_prep_cancel(sqe, ctx, 0);
301
+ backend->pending_sqes = 0;
302
+ io_uring_submit(&backend->ring);
303
+ }
304
+
305
+ if (value_ptr) (*value_ptr) = switchpoint_result;
306
+ RB_GC_GUARD(switchpoint_result);
307
+ RB_GC_GUARD(ctx->fiber);
308
+ return ctx->result;
309
+ }
310
+
311
+ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
312
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_POLL);
313
+ VALUE resumed_value = Qnil;
314
+
315
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
316
+ io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
317
+
318
+ io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
319
+ RB_GC_GUARD(resumed_value);
320
+ return resumed_value;
321
+ }
322
+
323
+ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof) {
324
+ Backend_t *backend;
325
+ rb_io_t *fptr;
326
+ long dynamic_len = length == Qnil;
327
+ long len = dynamic_len ? 4096 : NUM2INT(length);
328
+ int shrinkable = io_setstrbuf(&str, len);
329
+ char *buf = RSTRING_PTR(str);
330
+ long total = 0;
331
+ int read_to_eof = RTEST(to_eof);
332
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
333
+
334
+ GetBackend(self, backend);
335
+ if (underlying_io != Qnil) io = underlying_io;
336
+ GetOpenFile(io, fptr);
337
+ rb_io_check_byte_readable(fptr);
338
+ rectify_io_file_pos(fptr);
339
+ OBJ_TAINT(str);
340
+
341
+ while (1) {
342
+ VALUE resume_value = Qnil;
343
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
344
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
345
+ io_uring_prep_read(sqe, fptr->fd, buf, len - total, -1);
346
+
347
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
348
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
349
+ RAISE_IF_EXCEPTION(resume_value);
350
+ if (!ctx->completed) return resume_value;
351
+ RB_GC_GUARD(resume_value);
352
+
353
+ if (result < 0)
354
+ rb_syserr_fail(-result, strerror(-result));
355
+ else if (result == 0)
356
+ break; // EOF
357
+ else {
358
+ total += result;
359
+ if (!read_to_eof) break;
360
+
361
+ if (total == len) {
362
+ if (!dynamic_len) break;
363
+
364
+ rb_str_resize(str, total);
365
+ rb_str_modify_expand(str, len);
366
+ buf = RSTRING_PTR(str) + total;
367
+ shrinkable = 0;
368
+ len += len;
369
+ }
370
+ else buf += result;
371
+ }
372
+ }
373
+
374
+ io_set_read_length(str, total, shrinkable);
375
+ io_enc_str(str, fptr);
376
+
377
+ if (total == 0) return Qnil;
378
+
379
+ return str;
380
+ }
381
+
382
+ VALUE Backend_read_loop(VALUE self, VALUE io) {
383
+ Backend_t *backend;
384
+ rb_io_t *fptr;
385
+ VALUE str;
386
+ long total;
387
+ long len = 8192;
388
+ int shrinkable;
389
+ char *buf;
390
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
391
+
392
+ READ_LOOP_PREPARE_STR();
393
+
394
+ GetBackend(self, backend);
395
+ if (underlying_io != Qnil) io = underlying_io;
396
+ GetOpenFile(io, fptr);
397
+ rb_io_check_byte_readable(fptr);
398
+ rectify_io_file_pos(fptr);
399
+
400
+ while (1) {
401
+ VALUE resume_value = Qnil;
402
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
403
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
404
+ io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
405
+
406
+ ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
407
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
408
+ RAISE_IF_EXCEPTION(resume_value);
409
+ if (!ctx->completed) return resume_value;
410
+ RB_GC_GUARD(resume_value);
411
+
412
+ if (result < 0)
413
+ rb_syserr_fail(-result, strerror(-result));
414
+ else if (result == 0)
415
+ break; // EOF
416
+ else {
417
+ total = result;
418
+ READ_LOOP_YIELD_STR();
419
+ }
420
+ }
421
+
422
+ RB_GC_GUARD(str);
423
+
424
+ return io;
425
+ }
426
+
427
+ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
428
+ Backend_t *backend;
429
+ rb_io_t *fptr;
430
+ VALUE underlying_io;
431
+
432
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
433
+ if (underlying_io != Qnil) io = underlying_io;
434
+ GetBackend(self, backend);
435
+ io = rb_io_get_write_io(io);
436
+ GetOpenFile(io, fptr);
437
+
438
+ char *buf = StringValuePtr(str);
439
+ long len = RSTRING_LEN(str);
440
+ long left = len;
441
+
442
+ while (left > 0) {
443
+ VALUE resume_value = Qnil;
444
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITE);
445
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
446
+ io_uring_prep_write(sqe, fptr->fd, buf, left, -1);
447
+
448
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
449
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
450
+ RAISE_IF_EXCEPTION(resume_value);
451
+ if (!ctx->completed) return resume_value;
452
+ RB_GC_GUARD(resume_value);
453
+
454
+ if (result < 0)
455
+ rb_syserr_fail(-result, strerror(-result));
456
+ else {
457
+ buf += result;
458
+ left -= result;
459
+ }
460
+ }
461
+
462
+ return INT2NUM(len);
463
+ }
464
+
465
+ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
466
+ Backend_t *backend;
467
+ rb_io_t *fptr;
468
+ VALUE underlying_io;
469
+ long total_length = 0;
470
+ long total_written = 0;
471
+ struct iovec *iov = 0;
472
+ struct iovec *iov_ptr = 0;
473
+ int iov_count = argc;
474
+
475
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
476
+ if (underlying_io != Qnil) io = underlying_io;
477
+ GetBackend(self, backend);
478
+ io = rb_io_get_write_io(io);
479
+ GetOpenFile(io, fptr);
480
+
481
+ iov = malloc(iov_count * sizeof(struct iovec));
482
+ for (int i = 0; i < argc; i++) {
483
+ VALUE str = argv[i];
484
+ iov[i].iov_base = StringValuePtr(str);
485
+ iov[i].iov_len = RSTRING_LEN(str);
486
+ total_length += iov[i].iov_len;
487
+ }
488
+ iov_ptr = iov;
489
+
490
+ while (1) {
491
+ VALUE resume_value = Qnil;
492
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITEV);
493
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
494
+ io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
495
+
496
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
497
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
498
+ if (TEST_EXCEPTION(resume_value)) {
499
+ free(iov);
500
+ RAISE_EXCEPTION(resume_value);
501
+ }
502
+ if (!ctx->completed) {
503
+ free(iov);
504
+ return resume_value;
505
+ }
506
+ RB_GC_GUARD(resume_value);
507
+
508
+ if (result < 0) {
509
+ free(iov);
510
+ rb_syserr_fail(-result, strerror(-result));
511
+ }
512
+ else {
513
+ total_written += result;
514
+ if (total_written >= total_length) break;
515
+
516
+ while (result > 0) {
517
+ if ((size_t) result < iov_ptr[0].iov_len) {
518
+ iov_ptr[0].iov_base = (char *) iov_ptr[0].iov_base + result;
519
+ iov_ptr[0].iov_len -= result;
520
+ result = 0;
521
+ }
522
+ else {
523
+ result -= iov_ptr[0].iov_len;
524
+ iov_ptr += 1;
525
+ iov_count -= 1;
526
+ }
527
+ }
528
+ }
529
+ }
530
+
531
+ free(iov);
532
+ return INT2NUM(total_written);
533
+ }
534
+
535
+ VALUE Backend_write_m(int argc, VALUE *argv, VALUE self) {
536
+ if (argc < 2)
537
+ // TODO: raise ArgumentError
538
+ rb_raise(rb_eRuntimeError, "(wrong number of arguments (expected 2 or more))");
539
+
540
+ return (argc == 2) ?
541
+ Backend_write(self, argv[0], argv[1]) :
542
+ Backend_writev(self, argv[0], argc - 1, argv + 1);
543
+ }
544
+
545
+ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
546
+ Backend_t *backend;
547
+ rb_io_t *fptr;
548
+ long dynamic_len = length == Qnil;
549
+ long len = dynamic_len ? 4096 : NUM2INT(length);
550
+ int shrinkable = io_setstrbuf(&str, len);
551
+ char *buf = RSTRING_PTR(str);
552
+ long total = 0;
553
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
554
+
555
+ GetBackend(self, backend);
556
+ if (underlying_io != Qnil) io = underlying_io;
557
+ GetOpenFile(io, fptr);
558
+ rb_io_check_byte_readable(fptr);
559
+ rectify_io_file_pos(fptr);
560
+ OBJ_TAINT(str);
561
+
562
+ while (1) {
563
+ VALUE resume_value = Qnil;
564
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
565
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
566
+ io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
567
+
568
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
569
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
570
+ RAISE_IF_EXCEPTION(resume_value);
571
+ if (!ctx->completed) return resume_value;
572
+ RB_GC_GUARD(resume_value);
573
+
574
+ if (result < 0)
575
+ rb_syserr_fail(-result, strerror(-result));
576
+ else {
577
+ total += result;
578
+ break;
579
+ }
580
+ }
581
+
582
+ io_set_read_length(str, total, shrinkable);
583
+ io_enc_str(str, fptr);
584
+
585
+ if (total == 0) return Qnil;
586
+
587
+ return str;
588
+ }
589
+
590
+ VALUE Backend_recv_loop(VALUE self, VALUE io) {
591
+ Backend_t *backend;
592
+ rb_io_t *fptr;
593
+ VALUE str;
594
+ long total;
595
+ long len = 8192;
596
+ int shrinkable;
597
+ char *buf;
598
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
599
+
600
+ READ_LOOP_PREPARE_STR();
601
+
602
+ GetBackend(self, backend);
603
+ if (underlying_io != Qnil) io = underlying_io;
604
+ GetOpenFile(io, fptr);
605
+ rb_io_check_byte_readable(fptr);
606
+ rectify_io_file_pos(fptr);
607
+
608
+ while (1) {
609
+ VALUE resume_value = Qnil;
610
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
611
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
612
+ io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
613
+
614
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
615
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
616
+ RAISE_IF_EXCEPTION(resume_value);
617
+ if (!ctx->completed) return resume_value;
618
+ RB_GC_GUARD(resume_value);
619
+
620
+ if (result < 0)
621
+ rb_syserr_fail(-result, strerror(-result));
622
+ else if (result == 0)
623
+ break; // EOF
624
+ else {
625
+ total = result;
626
+ READ_LOOP_YIELD_STR();
627
+ }
628
+ }
629
+
630
+ RB_GC_GUARD(str);
631
+ return io;
632
+ }
633
+
634
+ VALUE Backend_send(VALUE self, VALUE io, VALUE str) {
635
+ Backend_t *backend;
636
+ rb_io_t *fptr;
637
+ VALUE underlying_io;
638
+
639
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
640
+ if (underlying_io != Qnil) io = underlying_io;
641
+ GetBackend(self, backend);
642
+ io = rb_io_get_write_io(io);
643
+ GetOpenFile(io, fptr);
644
+
645
+ char *buf = StringValuePtr(str);
646
+ long len = RSTRING_LEN(str);
647
+ long left = len;
648
+
649
+ while (left > 0) {
650
+ VALUE resume_value = Qnil;
651
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SEND);
652
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
653
+ io_uring_prep_send(sqe, fptr->fd, buf, left, 0);
654
+
655
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
656
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
657
+ RAISE_IF_EXCEPTION(resume_value);
658
+ if (!ctx->completed) return resume_value;
659
+ RB_GC_GUARD(resume_value);
660
+
661
+ if (result < 0)
662
+ rb_syserr_fail(-result, strerror(-result));
663
+ else {
664
+ buf += result;
665
+ left -= result;
666
+ }
667
+ }
668
+
669
+ return INT2NUM(len);
670
+ }
671
+
672
+ VALUE io_uring_backend_accept(Backend_t *backend, VALUE sock, int loop) {
673
+ rb_io_t *fptr;
674
+ struct sockaddr addr;
675
+ socklen_t len = (socklen_t)sizeof addr;
676
+ VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
677
+ VALUE socket = Qnil;
678
+ if (underlying_sock != Qnil) sock = underlying_sock;
679
+
680
+ GetOpenFile(sock, fptr);
681
+ while (1) {
682
+ VALUE resume_value = Qnil;
683
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
684
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
685
+ io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
686
+
687
+ int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
688
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
689
+ RAISE_IF_EXCEPTION(resume_value);
690
+ if (!ctx->completed) return resume_value;
691
+ RB_GC_GUARD(resume_value);
692
+
693
+ if (fd < 0)
694
+ rb_syserr_fail(-fd, strerror(-fd));
695
+ else {
696
+ rb_io_t *fp;
697
+
698
+ socket = rb_obj_alloc(cTCPSocket);
699
+ MakeOpenFile(socket, fp);
700
+ rb_update_max_fd(fd);
701
+ fp->fd = fd;
702
+ fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
703
+ rb_io_ascii8bit_binmode(socket);
704
+ rb_io_synchronized(fp);
705
+
706
+ // if (rsock_do_not_reverse_lookup) {
707
+ // fp->mode |= FMODE_NOREVLOOKUP;
708
+ // }
709
+ if (loop) {
710
+ rb_yield(socket);
711
+ socket = Qnil;
712
+ }
713
+ else
714
+ return socket;
715
+ }
716
+ }
717
+ RB_GC_GUARD(socket);
718
+ return Qnil;
719
+ }
720
+
721
+ VALUE Backend_accept(VALUE self, VALUE sock) {
722
+ Backend_t *backend;
723
+ GetBackend(self, backend);
724
+ return io_uring_backend_accept(backend, sock, 0);
725
+ }
726
+
727
+ VALUE Backend_accept_loop(VALUE self, VALUE sock) {
728
+ Backend_t *backend;
729
+ GetBackend(self, backend);
730
+ io_uring_backend_accept(backend, sock, 1);
731
+ return self;
732
+ }
733
+
734
+ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
735
+ Backend_t *backend;
736
+ rb_io_t *fptr;
737
+ struct sockaddr_in addr;
738
+ char *host_buf = StringValueCStr(host);
739
+ VALUE underlying_sock = rb_ivar_get(sock, ID_ivar_io);
740
+ if (underlying_sock != Qnil) sock = underlying_sock;
741
+
742
+ GetBackend(self, backend);
743
+ GetOpenFile(sock, fptr);
744
+
745
+ addr.sin_family = AF_INET;
746
+ addr.sin_addr.s_addr = inet_addr(host_buf);
747
+ addr.sin_port = htons(NUM2INT(port));
748
+
749
+ VALUE resume_value = Qnil;
750
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_CONNECT);
751
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
752
+ io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
753
+ int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
754
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
755
+ RAISE_IF_EXCEPTION(resume_value);
756
+ if (!ctx->completed) return resume_value;
757
+ RB_GC_GUARD(resume_value);
758
+
759
+ if (result < 0) rb_syserr_fail(-result, strerror(-result));
760
+ return sock;
761
+ }
762
+
763
+ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
764
+ Backend_t *backend;
765
+ rb_io_t *fptr;
766
+ VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
767
+ if (underlying_io != Qnil) io = underlying_io;
768
+ GetBackend(self, backend);
769
+ GetOpenFile(io, fptr);
770
+
771
+ VALUE resume_value = io_uring_backend_wait_fd(backend, fptr->fd, RTEST(write));
772
+ RAISE_IF_EXCEPTION(resume_value);
773
+ RB_GC_GUARD(resume_value);
774
+ return self;
775
+ }
776
+
777
+ inline struct __kernel_timespec double_to_timespec(double duration) {
778
+ double duration_integral;
779
+ double duration_fraction = modf(duration, &duration_integral);
780
+ struct __kernel_timespec ts;
781
+ ts.tv_sec = duration_integral;
782
+ ts.tv_nsec = floor(duration_fraction * 1000000000);
783
+ return ts;
784
+ }
785
+
786
+ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
787
+ return double_to_timespec(NUM2DBL(duration));
788
+ }
789
+
790
+ // returns true if completed, 0 otherwise
791
+ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
792
+ struct __kernel_timespec ts = double_to_timespec(duration);
793
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
794
+
795
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
796
+ io_uring_prep_timeout(sqe, &ts, 0, 0);
797
+
798
+ io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
799
+ OP_CONTEXT_RELEASE(&backend->store, ctx);
800
+ return ctx->completed;
801
+ }
802
+
803
+ VALUE Backend_sleep(VALUE self, VALUE duration) {
804
+ Backend_t *backend;
805
+ GetBackend(self, backend);
806
+
807
+ VALUE resume_value = Qnil;
808
+ io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
809
+ RAISE_IF_EXCEPTION(resume_value);
810
+ RB_GC_GUARD(resume_value);
811
+ return resume_value;
812
+ }
813
+
814
+ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
815
+ Backend_t *backend;
816
+ double interval_d = NUM2DBL(interval);
817
+ GetBackend(self, backend);
818
+ double next_time = 0.;
819
+
820
+ while (1) {
821
+ double now = current_time();
822
+ if (next_time == 0.) next_time = current_time() + interval_d;
823
+ double sleep_duration = next_time - now;
824
+ if (sleep_duration < 0) sleep_duration = 0;
825
+
826
+ VALUE resume_value = Qnil;
827
+ int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
828
+ RAISE_IF_EXCEPTION(resume_value);
829
+ if (!completed) return resume_value;
830
+ RB_GC_GUARD(resume_value);
831
+
832
+ rb_yield(Qnil);
833
+
834
+ while (1) {
835
+ next_time += interval_d;
836
+ if (next_time > now) break;
837
+ }
838
+ }
839
+ }
840
+
841
+ VALUE Backend_timeout_safe(VALUE arg) {
842
+ return rb_yield(arg);
843
+ }
844
+
845
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
846
+ return exception;
847
+ }
848
+
849
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
850
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
851
+ }
852
+
853
+ struct Backend_timeout_ctx {
854
+ Backend_t *backend;
855
+ op_context_t *ctx;
856
+ };
857
+
858
+ VALUE Backend_timeout_ensure(VALUE arg) {
859
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
860
+ if (!timeout_ctx->ctx->completed) {
861
+ timeout_ctx->ctx->result = -ECANCELED;
862
+
863
+ // op was not completed, so we need to cancel it
864
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
865
+ io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
866
+ timeout_ctx->backend->pending_sqes = 0;
867
+ io_uring_submit(&timeout_ctx->backend->ring);
868
+ }
869
+ OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
870
+ return Qnil;
871
+ }
872
+
873
+ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
874
+ VALUE duration;
875
+ VALUE exception;
876
+ VALUE move_on_value = Qnil;
877
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
878
+
879
+ struct __kernel_timespec ts = duration_to_timespec(duration);
880
+ Backend_t *backend;
881
+ GetBackend(self, backend);
882
+ VALUE result = Qnil;
883
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
884
+
885
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
886
+
887
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
888
+ ctx->resume_value = timeout;
889
+ io_uring_prep_timeout(sqe, &ts, 0, 0);
890
+ io_uring_sqe_set_data(sqe, ctx);
891
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
892
+ io_uring_backend_defer_submit(backend);
893
+
894
+ struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
895
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
896
+
897
+ if (result == timeout) {
898
+ if (exception == Qnil) return move_on_value;
899
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
900
+ }
901
+
902
+ RAISE_IF_EXCEPTION(result);
903
+ RB_GC_GUARD(result);
904
+ RB_GC_GUARD(timeout);
905
+ return result;
906
+ }
907
+
908
+ VALUE Backend_waitpid(VALUE self, VALUE pid) {
909
+ Backend_t *backend;
910
+ int pid_int = NUM2INT(pid);
911
+ int fd = pidfd_open(pid_int, 0);
912
+ GetBackend(self, backend);
913
+
914
+ VALUE resume_value = io_uring_backend_wait_fd(backend, fd, 0);
915
+ close(fd);
916
+
917
+ RAISE_IF_EXCEPTION(resume_value);
918
+ RB_GC_GUARD(resume_value);
919
+
920
+ int status;
921
+ pid_t ret = waitpid(pid_int, &status, WNOHANG);
922
+ return rb_ary_new_from_args(2, INT2NUM(ret), INT2NUM(WEXITSTATUS(status)));
923
+ }
924
+
925
+ VALUE Backend_wait_event(VALUE self, VALUE raise) {
926
+ Backend_t *backend;
927
+ GetBackend(self, backend);
928
+
929
+ if (backend->event_fd == -1) {
930
+ backend->event_fd = eventfd(0, 0);
931
+ if (backend->event_fd == -1) {
932
+ int n = errno;
933
+ rb_syserr_fail(n, strerror(n));
934
+ }
935
+ }
936
+
937
+ VALUE resume_value = io_uring_backend_wait_fd(backend, backend->event_fd, 0);
938
+ if (RTEST(raise)) RAISE_IF_EXCEPTION(resume_value);
939
+ RB_GC_GUARD(resume_value);
940
+ return resume_value;
941
+ }
942
+
943
+ VALUE Backend_kind(VALUE self) {
944
+ return SYM_io_uring;
945
+ }
946
+
947
+ void Init_Backend() {
948
+ rb_require("socket");
949
+ cTCPSocket = rb_const_get(rb_cObject, rb_intern("TCPSocket"));
950
+
951
+ VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cData);
952
+ rb_define_alloc_func(cBackend, Backend_allocate);
953
+
954
+ rb_define_method(cBackend, "initialize", Backend_initialize, 0);
955
+ rb_define_method(cBackend, "finalize", Backend_finalize, 0);
956
+ rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
957
+ rb_define_method(cBackend, "pending_count", Backend_pending_count, 0);
958
+
959
+ rb_define_method(cBackend, "ref", Backend_ref, 0);
960
+ rb_define_method(cBackend, "unref", Backend_unref, 0);
961
+
962
+ rb_define_method(cBackend, "poll", Backend_poll, 3);
963
+ rb_define_method(cBackend, "break", Backend_wakeup, 0);
964
+
965
+ rb_define_method(cBackend, "read", Backend_read, 4);
966
+ rb_define_method(cBackend, "read_loop", Backend_read_loop, 1);
967
+ rb_define_method(cBackend, "write", Backend_write_m, -1);
968
+ rb_define_method(cBackend, "recv", Backend_recv, 3);
969
+ rb_define_method(cBackend, "recv_loop", Backend_recv_loop, 1);
970
+ rb_define_method(cBackend, "send", Backend_send, 2);
971
+ rb_define_method(cBackend, "accept", Backend_accept, 1);
972
+ rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 1);
973
+ rb_define_method(cBackend, "connect", Backend_connect, 3);
974
+ rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
975
+ rb_define_method(cBackend, "sleep", Backend_sleep, 1);
976
+ rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
977
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
978
+ rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
979
+ rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
980
+
981
+ rb_define_method(cBackend, "kind", Backend_kind, 0);
982
+
983
+ SYM_io_uring = ID2SYM(rb_intern("io_uring"));
984
+
985
+ __BACKEND__.pending_count = Backend_pending_count;
986
+ __BACKEND__.poll = Backend_poll;
987
+ __BACKEND__.ref = Backend_ref;
988
+ __BACKEND__.ref_count = Backend_ref_count;
989
+ __BACKEND__.reset_ref_count = Backend_reset_ref_count;
990
+ __BACKEND__.unref = Backend_unref;
991
+ __BACKEND__.wait_event = Backend_wait_event;
992
+ __BACKEND__.wakeup = Backend_wakeup;
993
+ }
994
+
995
+ #endif // POLYPHONY_BACKEND_LIBURING