uringmachine 0.1 → 0.3

Sign up to get free protection for your applications and to get access to all the features.
data/ext/um/ring.c DELETED
@@ -1,755 +0,0 @@
1
- #include "iou.h"
2
- #include "ruby/thread.h"
3
- #include <sys/mman.h>
4
-
5
- VALUE mIOU;
6
- VALUE cRing;
7
-
8
- VALUE SYM_accept;
9
- VALUE SYM_block;
10
- VALUE SYM_buffer;
11
- VALUE SYM_buffer_group;
12
- VALUE SYM_buffer_offset;
13
- VALUE SYM_close;
14
- VALUE SYM_count;
15
- VALUE SYM_emit;
16
- VALUE SYM_fd;
17
- VALUE SYM_id;
18
- VALUE SYM_interval;
19
- VALUE SYM_len;
20
- VALUE SYM_link;
21
- VALUE SYM_multishot;
22
- VALUE SYM_op;
23
- VALUE SYM_read;
24
- VALUE SYM_result;
25
- VALUE SYM_signal;
26
- VALUE SYM_size;
27
- VALUE SYM_spec_data;
28
- VALUE SYM_stop;
29
- VALUE SYM_timeout;
30
- VALUE SYM_utf8;
31
- VALUE SYM_write;
32
-
33
- static void IOURing_mark(void *ptr) {
34
- IOURing_t *iour = ptr;
35
- rb_gc_mark_movable(iour->pending_ops);
36
- }
37
-
38
- static void IOURing_compact(void *ptr) {
39
- IOURing_t *iour = ptr;
40
- iour->pending_ops = rb_gc_location(iour->pending_ops);
41
- }
42
-
43
- void cleanup_iour(IOURing_t *iour) {
44
- if (!iour->ring_initialized) return;
45
-
46
- for (unsigned i = 0; i < iour->br_counter; i++) {
47
- struct buf_ring_descriptor *desc = iour->brs + i;
48
- io_uring_free_buf_ring(&iour->ring, desc->br, desc->buf_count, i);
49
- free(desc->buf_base);
50
- }
51
- iour->br_counter = 0;
52
- io_uring_queue_exit(&iour->ring);
53
- iour->ring_initialized = 0;
54
- }
55
-
56
- static void IOU_free(void *ptr) {
57
- cleanup_iour((IOURing_t *)ptr);
58
- }
59
-
60
- static size_t IOURing_size(const void *ptr) {
61
- return sizeof(IOURing_t);
62
- }
63
-
64
- static const rb_data_type_t IOURing_type = {
65
- "IOURing",
66
- {IOURing_mark, IOU_free, IOURing_size, IOURing_compact},
67
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
68
- };
69
-
70
- static VALUE IOURing_allocate(VALUE klass) {
71
- IOURing_t *iour = ALLOC(IOURing_t);
72
-
73
- return TypedData_Wrap_Struct(klass, &IOURing_type, iour);
74
- }
75
-
76
- VALUE IOURing_initialize(VALUE self) {
77
- IOURing_t *iour = RTYPEDDATA_DATA(self);
78
-
79
- iour->ring_initialized = 0;
80
- iour->op_counter = 0;
81
- iour->unsubmitted_sqes = 0;
82
- iour->br_counter = 0;
83
-
84
- RB_OBJ_WRITE(self, &iour->pending_ops, rb_hash_new());
85
-
86
- unsigned prepared_limit = 1024;
87
- int flags = 0;
88
- #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
89
- flags |= IORING_SETUP_SUBMIT_ALL;
90
- #endif
91
- #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
92
- flags |= IORING_SETUP_COOP_TASKRUN;
93
- #endif
94
-
95
- while (1) {
96
- int ret = io_uring_queue_init(prepared_limit, &iour->ring, flags);
97
- if (likely(!ret)) break;
98
-
99
- // if ENOMEM is returned, try with half as much entries
100
- if (unlikely(ret == -ENOMEM && prepared_limit > 64))
101
- prepared_limit = prepared_limit / 2;
102
- else
103
- rb_syserr_fail(-ret, strerror(-ret));
104
- }
105
- iour->ring_initialized = 1;
106
-
107
- return self;
108
- }
109
-
110
- VALUE IOURing_close(VALUE self) {
111
- IOURing_t *iour = RTYPEDDATA_DATA(self);
112
- cleanup_iour(iour);
113
- return self;
114
- }
115
-
116
- VALUE IOURing_closed_p(VALUE self) {
117
- IOURing_t *iour = RTYPEDDATA_DATA(self);
118
- return iour->ring_initialized ? Qfalse : Qtrue;
119
- }
120
-
121
- VALUE IOURing_pending_ops(VALUE self) {
122
- IOURing_t *iour = RTYPEDDATA_DATA(self);
123
- return iour->pending_ops;
124
- }
125
-
126
- inline IOURing_t *get_iou(VALUE self) {
127
- IOURing_t *iour = RTYPEDDATA_DATA(self);
128
- if (!iour->ring_initialized)
129
- rb_raise(rb_eRuntimeError, "IOU ring was not initialized");
130
- return iour;
131
- }
132
-
133
- static inline struct io_uring_sqe *get_sqe(IOURing_t *iour) {
134
- struct io_uring_sqe *sqe;
135
- sqe = io_uring_get_sqe(&iour->ring);
136
- if (likely(sqe)) goto done;
137
-
138
- rb_raise(rb_eRuntimeError, "Failed to get SQE");
139
-
140
- // TODO: retry getting SQE?
141
-
142
- // if (likely(backend->pending_sqes))
143
- // io_uring_backend_immediate_submit(backend);
144
- // else {
145
- // VALUE resume_value = backend_snooze(&backend->base);
146
- // RAISE_IF_EXCEPTION(resume_value);
147
- // }
148
- done:
149
- return sqe;
150
- }
151
-
152
- static inline void get_required_kwargs(VALUE spec, VALUE *values, int argc, ...) {
153
- if (TYPE(spec) != T_HASH)
154
- rb_raise(rb_eArgError, "Expected keyword arguments");
155
-
156
- va_list ptr;
157
- va_start(ptr, argc);
158
- for (int i = 0; i < argc; i++) {
159
- VALUE k = va_arg(ptr, VALUE);
160
- VALUE v = rb_hash_aref(spec, k);
161
- if (NIL_P(v))
162
- rb_raise(rb_eArgError, "Missing %"PRIsVALUE" value", k);
163
- values[i] = v;
164
- }
165
- va_end(ptr);
166
- }
167
-
168
- VALUE IOURing_setup_buffer_ring(VALUE self, VALUE opts) {
169
- IOURing_t *iour = get_iou(self);
170
-
171
- if (iour->br_counter == BUFFER_RING_MAX_COUNT)
172
- rb_raise(rb_eRuntimeError, "Cannot setup more than BUFFER_RING_MAX_COUNT buffer rings");
173
-
174
- VALUE values[2];
175
- get_required_kwargs(opts, values, 2, SYM_count, SYM_size);
176
- VALUE count = values[0];
177
- VALUE size = values[1];
178
-
179
- struct buf_ring_descriptor *desc = iour->brs + iour->br_counter;
180
- desc->buf_count = NUM2UINT(count);
181
- desc->buf_size = NUM2UINT(size);
182
-
183
- desc->br_size = sizeof(struct io_uring_buf) * desc->buf_count;
184
- void *mapped = mmap(
185
- NULL, desc->br_size, PROT_READ | PROT_WRITE,
186
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0
187
- );
188
- if (mapped == MAP_FAILED)
189
- rb_raise(rb_eRuntimeError, "Failed to allocate buffer ring");
190
-
191
- desc->br = (struct io_uring_buf_ring *)mapped;
192
- io_uring_buf_ring_init(desc->br);
193
-
194
- unsigned bg_id = iour->br_counter;
195
- struct io_uring_buf_reg reg = {
196
- .ring_addr = (unsigned long)desc->br,
197
- .ring_entries = desc->buf_count,
198
- .bgid = bg_id
199
- };
200
- int ret = io_uring_register_buf_ring(&iour->ring, &reg, 0);
201
- if (ret) {
202
- munmap(desc->br, desc->br_size);
203
- rb_syserr_fail(-ret, strerror(-ret));
204
- }
205
-
206
- desc->buf_base = malloc(desc->buf_count * desc->buf_size);
207
- if (!desc->buf_base) {
208
- io_uring_free_buf_ring(&iour->ring, desc->br, desc->buf_count, bg_id);
209
- rb_raise(rb_eRuntimeError, "Failed to allocate buffers");
210
- }
211
-
212
- int mask = io_uring_buf_ring_mask(desc->buf_count);
213
- for (unsigned i = 0; i < desc->buf_count; i++) {
214
- io_uring_buf_ring_add(
215
- desc->br, desc->buf_base + i * desc->buf_size, desc->buf_size,
216
- i, mask, i);
217
- }
218
- io_uring_buf_ring_advance(desc->br, desc->buf_count);
219
- iour->br_counter++;
220
- return UINT2NUM(bg_id);
221
- }
222
-
223
- static inline VALUE setup_op_ctx(IOURing_t *iour, enum op_type type, VALUE op, VALUE id, VALUE spec) {
224
- rb_hash_aset(spec, SYM_id, id);
225
- rb_hash_aset(spec, SYM_op, op);
226
- VALUE block_proc = rb_block_given_p() ? rb_block_proc() : Qnil;
227
- if (block_proc != Qnil)
228
- rb_hash_aset(spec, SYM_block, block_proc);
229
- VALUE ctx = rb_funcall(cOpCtx, rb_intern("new"), 2, spec, block_proc);
230
- OpCtx_type_set(ctx, type);
231
- rb_hash_aset(iour->pending_ops, id, ctx);
232
- return ctx;
233
- }
234
-
235
- static inline void setup_sqe(struct io_uring_sqe *sqe, int id, VALUE spec) {
236
- sqe->user_data = id;
237
- sqe->flags = 0;
238
- if (spec != Qnil && RTEST(rb_hash_aref(spec, SYM_link)))
239
- sqe->flags |= IOSQE_IO_LINK;
240
- }
241
-
242
- VALUE IOURing_emit(VALUE self, VALUE spec) {
243
- IOURing_t *iour = get_iou(self);
244
- unsigned id_i = ++iour->op_counter;
245
- VALUE id = UINT2NUM(id_i);
246
-
247
- struct io_uring_sqe *sqe = get_sqe(iour);
248
- sqe->user_data = id_i;
249
- VALUE ctx = setup_op_ctx(iour, OP_emit, SYM_emit, id, spec);
250
- if (rb_hash_aref(spec, SYM_signal) == SYM_stop)
251
- OpCtx_stop_signal_set(ctx);
252
-
253
- io_uring_prep_nop(sqe);
254
-
255
- // immediately submit
256
- io_uring_submit(&iour->ring);
257
- iour->unsubmitted_sqes = 0;
258
-
259
- return id;
260
- }
261
-
262
- VALUE IOURing_prep_accept(VALUE self, VALUE spec) {
263
- IOURing_t *iour = get_iou(self);
264
- unsigned id_i = ++iour->op_counter;
265
- VALUE id = UINT2NUM(id_i);
266
-
267
- VALUE values[1];
268
- get_required_kwargs(spec, values, 1, SYM_fd);
269
- VALUE fd = values[0];
270
- VALUE multishot = rb_hash_aref(spec, SYM_multishot);
271
-
272
- struct io_uring_sqe *sqe = get_sqe(iour);
273
- setup_sqe(sqe, id_i, spec);
274
-
275
- VALUE ctx = setup_op_ctx(iour, OP_accept, SYM_accept, id, spec);
276
- struct sa_data *sa = OpCtx_sa_get(ctx);
277
- if (RTEST(multishot))
278
- io_uring_prep_multishot_accept(sqe, NUM2INT(fd), &sa->addr, &sa->len, 0);
279
- else
280
- io_uring_prep_accept(sqe, NUM2INT(fd), &sa->addr, &sa->len, 0);
281
- iour->unsubmitted_sqes++;
282
- return id;
283
- }
284
-
285
- VALUE prep_cancel_id(IOURing_t *iour, unsigned op_id_i) {
286
- unsigned id_i = ++iour->op_counter;
287
- VALUE id = UINT2NUM(id_i);
288
-
289
- struct io_uring_sqe *sqe = get_sqe(iour);
290
- io_uring_prep_cancel64(sqe, op_id_i, 0);
291
- sqe->user_data = id_i;
292
- iour->unsubmitted_sqes++;
293
-
294
- return id;
295
- }
296
-
297
- VALUE IOURing_prep_cancel(VALUE self, VALUE spec) {
298
- IOURing_t *iour = get_iou(self);
299
-
300
- if (TYPE(spec) == T_FIXNUM)
301
- return prep_cancel_id(iour, NUM2UINT(spec));
302
-
303
- if (TYPE(spec) != T_HASH)
304
- rb_raise(rb_eArgError, "Expected operation id or keyword arguments");
305
-
306
- VALUE id = rb_hash_aref(spec, SYM_id);
307
- if (!NIL_P(id))
308
- return prep_cancel_id(iour, NUM2UINT(id));
309
-
310
- rb_raise(rb_eArgError, "Missing operation id");
311
- }
312
-
313
- VALUE IOURing_prep_close(VALUE self, VALUE spec) {
314
- IOURing_t *iour = get_iou(self);
315
- unsigned id_i = ++iour->op_counter;
316
- VALUE id = UINT2NUM(id_i);
317
-
318
- VALUE values[1];
319
- get_required_kwargs(spec, values, 1, SYM_fd);
320
- VALUE fd = values[0];
321
-
322
- struct io_uring_sqe *sqe = get_sqe(iour);
323
- setup_sqe(sqe, id_i, spec);
324
-
325
- setup_op_ctx(iour, OP_close, SYM_close, id, spec);
326
-
327
- io_uring_prep_close(sqe, NUM2INT(fd));
328
- iour->unsubmitted_sqes++;
329
- return id;
330
- }
331
-
332
- VALUE IOURing_prep_nop(VALUE self) {
333
- IOURing_t *iour = get_iou(self);
334
- unsigned id_i = ++iour->op_counter;
335
- VALUE id = UINT2NUM(id_i);
336
-
337
- struct io_uring_sqe *sqe = get_sqe(iour);
338
- io_uring_prep_nop(sqe);
339
- sqe->user_data = id_i;
340
- iour->unsubmitted_sqes++;
341
-
342
- return id;
343
- }
344
-
345
- static inline void * prepare_read_buffer(VALUE buffer, unsigned len, int ofs) {
346
- unsigned current_len = RSTRING_LEN(buffer);
347
- if (ofs < 0) ofs = current_len + ofs + 1;
348
- unsigned new_len = len + (unsigned)ofs;
349
-
350
- if (current_len < new_len)
351
- rb_str_modify_expand(buffer, new_len);
352
- else
353
- rb_str_modify(buffer);
354
- return RSTRING_PTR(buffer) + ofs;
355
- }
356
-
357
- static inline void adjust_read_buffer_len(VALUE buffer, int result, int ofs) {
358
- rb_str_modify(buffer);
359
- unsigned len = result > 0 ? (unsigned)result : 0;
360
- unsigned current_len = RSTRING_LEN(buffer);
361
- if (ofs < 0) ofs = current_len + ofs + 1;
362
- rb_str_set_len(buffer, len + (unsigned)ofs);
363
- }
364
-
365
- VALUE prep_read_multishot(IOURing_t *iour, VALUE spec) {
366
- unsigned id_i = ++iour->op_counter;
367
- VALUE id = UINT2NUM(id_i);
368
-
369
- VALUE values[2];
370
- get_required_kwargs(spec, values, 2, SYM_fd, SYM_buffer_group);
371
- int fd = NUM2INT(values[0]);
372
- unsigned bg_id = NUM2UINT(values[1]);
373
- int utf8 = RTEST(rb_hash_aref(spec, SYM_utf8));
374
-
375
- struct io_uring_sqe *sqe = get_sqe(iour);
376
- setup_sqe(sqe, id_i, spec);
377
-
378
- VALUE ctx = setup_op_ctx(iour, OP_read, SYM_read, id, spec);
379
- OpCtx_rd_set(ctx, Qnil, 0, bg_id, utf8);
380
-
381
- io_uring_prep_read_multishot(sqe, fd, 0, -1, bg_id);
382
- iour->unsubmitted_sqes++;
383
- return id;
384
- }
385
-
386
- VALUE IOURing_prep_read(VALUE self, VALUE spec) {
387
- IOURing_t *iour = get_iou(self);
388
-
389
- if (RTEST(rb_hash_aref(spec, SYM_multishot)))
390
- return prep_read_multishot(iour, spec);
391
-
392
- unsigned id_i = ++iour->op_counter;
393
- VALUE id = UINT2NUM(id_i);
394
-
395
- VALUE values[3];
396
- get_required_kwargs(spec, values, 3, SYM_fd, SYM_buffer, SYM_len);
397
-
398
- VALUE fd = values[0];
399
- VALUE buffer = values[1];
400
- VALUE len = values[2];
401
- unsigned len_i = NUM2UINT(len);
402
-
403
- VALUE buffer_offset = rb_hash_aref(spec, SYM_buffer_offset);
404
- int buffer_offset_i = NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset);
405
- int utf8 = RTEST(rb_hash_aref(spec, SYM_utf8));
406
-
407
- struct io_uring_sqe *sqe = get_sqe(iour);
408
- setup_sqe(sqe, id_i, spec);
409
-
410
- VALUE ctx = setup_op_ctx(iour, OP_read, SYM_read, id, spec);
411
- OpCtx_rd_set(ctx, buffer, buffer_offset_i, 0, utf8);
412
-
413
- void *ptr = prepare_read_buffer(buffer, len_i, buffer_offset_i);
414
- io_uring_prep_read(sqe, NUM2INT(fd), ptr, len_i, -1);
415
- iour->unsubmitted_sqes++;
416
- return id;
417
- }
418
-
419
- VALUE IOURing_prep_timeout(VALUE self, VALUE spec) {
420
- IOURing_t *iour = get_iou(self);
421
- unsigned id_i = ++iour->op_counter;
422
- VALUE id = UINT2NUM(id_i);
423
-
424
- VALUE values[1];
425
- get_required_kwargs(spec, values, 1, SYM_interval);
426
- VALUE interval = values[0];
427
- VALUE multishot = rb_hash_aref(spec, SYM_multishot);
428
- unsigned flags = RTEST(multishot) ? IORING_TIMEOUT_MULTISHOT : 0;
429
-
430
- struct io_uring_sqe *sqe = get_sqe(iour);
431
- setup_sqe(sqe, id_i, spec);
432
-
433
- VALUE ctx = setup_op_ctx(iour, OP_timeout, SYM_timeout, id, spec);
434
- OpCtx_ts_set(ctx, interval);
435
-
436
- io_uring_prep_timeout(sqe, OpCtx_ts_get(ctx), 0, flags);
437
- iour->unsubmitted_sqes++;
438
- return id;
439
- }
440
-
441
- VALUE IOURing_prep_write(VALUE self, VALUE spec) {
442
- IOURing_t *iour = get_iou(self);
443
- unsigned id_i = ++iour->op_counter;
444
- VALUE id = UINT2NUM(id_i);
445
-
446
- VALUE values[2];
447
- get_required_kwargs(spec, values, 2, SYM_fd, SYM_buffer);
448
- VALUE fd = values[0];
449
- VALUE buffer = values[1];
450
- VALUE len = rb_hash_aref(spec, SYM_len);
451
- unsigned nbytes = NIL_P(len) ? RSTRING_LEN(buffer) : NUM2UINT(len);
452
-
453
- struct io_uring_sqe *sqe = get_sqe(iour);
454
- setup_sqe(sqe, id_i, spec);
455
-
456
- setup_op_ctx(iour, OP_write, SYM_write, id, spec);
457
-
458
- io_uring_prep_write(sqe, NUM2INT(fd), RSTRING_PTR(buffer), nbytes, -1);
459
- iour->unsubmitted_sqes++;
460
- return id;
461
- }
462
-
463
- VALUE IOURing_submit(VALUE self) {
464
- IOURing_t *iour = get_iou(self);
465
- if (!iour->unsubmitted_sqes)
466
- return INT2NUM(0);
467
-
468
- iour->unsubmitted_sqes = 0;
469
- int ret = io_uring_submit(&iour->ring);
470
- if (ret < 0)
471
- rb_syserr_fail(-ret, strerror(-ret));
472
-
473
- return INT2NUM(ret);
474
- }
475
-
476
- inline VALUE make_empty_op_with_result(VALUE id, VALUE result) {
477
- VALUE hash = rb_hash_new();
478
- rb_hash_aset(hash, SYM_id, id);
479
- rb_hash_aset(hash, SYM_result, result);
480
- RB_GC_GUARD(hash);
481
- return hash;
482
- }
483
-
484
- typedef struct {
485
- IOURing_t *iour;
486
- struct io_uring_cqe *cqe;
487
- int ret;
488
- } wait_for_completion_ctx_t;
489
-
490
- void *wait_for_completion_without_gvl(void *ptr) {
491
- wait_for_completion_ctx_t *ctx = (wait_for_completion_ctx_t *)ptr;
492
- ctx->ret = io_uring_wait_cqe(&ctx->iour->ring, &ctx->cqe);
493
- return NULL;
494
- }
495
-
496
- static inline void update_read_buffer_from_buffer_ring(IOURing_t *iour, VALUE ctx, struct io_uring_cqe *cqe) {
497
- VALUE buf = Qnil;
498
- if (cqe->res == 0) {
499
- buf = rb_str_new_literal("");
500
- goto done;
501
- }
502
-
503
- struct read_data *rd = OpCtx_rd_get(ctx);
504
- unsigned buf_idx = cqe->flags >> IORING_CQE_BUFFER_SHIFT;
505
-
506
- struct buf_ring_descriptor *desc = iour->brs + rd->bg_id;
507
- char *src = desc->buf_base + desc->buf_size * buf_idx;
508
- buf = rd->utf8_encoding ? rb_utf8_str_new(src, cqe->res) : rb_str_new(src, cqe->res);
509
-
510
- // add buffer back to buffer ring
511
- io_uring_buf_ring_add(
512
- desc->br, src, desc->buf_size, buf_idx,
513
- io_uring_buf_ring_mask(desc->buf_count), 0
514
- );
515
- io_uring_buf_ring_advance(desc->br, 1);
516
- done:
517
- rb_hash_aset(OpCtx_spec_get(ctx), SYM_buffer, buf);
518
- RB_GC_GUARD(buf);
519
- return;
520
- }
521
-
522
- static inline void update_read_buffer(IOURing_t *iour, VALUE ctx, struct io_uring_cqe *cqe) {
523
- if (cqe->res < 0) return;
524
-
525
- if (cqe->flags & IORING_CQE_F_BUFFER) {
526
- update_read_buffer_from_buffer_ring(iour, ctx, cqe);
527
- return;
528
- }
529
-
530
- if (cqe->res == 0) return;
531
-
532
- struct read_data *rd = OpCtx_rd_get(ctx);
533
- adjust_read_buffer_len(rd->buffer, cqe->res, rd->buffer_offset);
534
- }
535
-
536
- static inline VALUE get_cqe_ctx(IOURing_t *iour, struct io_uring_cqe *cqe, int *stop_flag, VALUE *spec) {
537
- VALUE id = UINT2NUM(cqe->user_data);
538
- VALUE ctx = rb_hash_aref(iour->pending_ops, id);
539
- VALUE result = INT2NUM(cqe->res);
540
- if (NIL_P(ctx)) {
541
- *spec = make_empty_op_with_result(id, result);
542
- return Qnil;
543
- }
544
-
545
- // post completion work
546
- switch (OpCtx_type_get(ctx)) {
547
- case OP_read:
548
- update_read_buffer(iour, ctx, cqe);
549
- break;
550
- case OP_emit:
551
- if (stop_flag && OpCtx_stop_signal_p(ctx))
552
- *stop_flag = 1;
553
- break;
554
- default:
555
- }
556
-
557
- // for multishot ops, the IORING_CQE_F_MORE flag indicates more completions
558
- // will be coming, so we need to keep the spec. Otherwise, we remove it.
559
- if (!(cqe->flags & IORING_CQE_F_MORE))
560
- rb_hash_delete(iour->pending_ops, id);
561
-
562
- *spec = OpCtx_spec_get(ctx);
563
- rb_hash_aset(*spec, SYM_result, result);
564
- RB_GC_GUARD(ctx);
565
- return ctx;
566
- }
567
-
568
- VALUE IOURing_wait_for_completion(VALUE self) {
569
- IOURing_t *iour = get_iou(self);
570
-
571
- wait_for_completion_ctx_t cqe_ctx = {
572
- .iour = iour
573
- };
574
-
575
- rb_thread_call_without_gvl(wait_for_completion_without_gvl, (void *)&cqe_ctx, RUBY_UBF_IO, 0);
576
-
577
- if (unlikely(cqe_ctx.ret < 0)) {
578
- rb_syserr_fail(-cqe_ctx.ret, strerror(-cqe_ctx.ret));
579
- }
580
- io_uring_cqe_seen(&iour->ring, cqe_ctx.cqe);
581
-
582
- VALUE spec = Qnil;
583
- get_cqe_ctx(iour, cqe_ctx.cqe, 0, &spec);
584
- return spec;
585
- }
586
-
587
- static inline void process_cqe(IOURing_t *iour, struct io_uring_cqe *cqe, int block_given, int *stop_flag) {
588
- if (stop_flag) *stop_flag = 0;
589
- VALUE spec;
590
- VALUE ctx = get_cqe_ctx(iour, cqe, stop_flag, &spec);
591
- if (stop_flag && *stop_flag) return;
592
-
593
- if (block_given)
594
- rb_yield(spec);
595
- else if (ctx != Qnil) {
596
- VALUE proc = OpCtx_proc_get(ctx);
597
- if (RTEST(proc))
598
- rb_proc_call_with_block_kw(proc, 1, &spec, Qnil, Qnil);
599
- }
600
-
601
- RB_GC_GUARD(ctx);
602
- }
603
-
604
- // copied from liburing/queue.c
605
- static inline bool cq_ring_needs_flush(struct io_uring *ring) {
606
- return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
607
- }
608
-
609
- // adapted from io_uring_peek_batch_cqe in liburing/queue.c
610
- // this peeks at cqes and handles each available cqe
611
- static inline int process_ready_cqes(IOURing_t *iour, int block_given, int *stop_flag) {
612
- unsigned total_count = 0;
613
-
614
- iterate:
615
- bool overflow_checked = false;
616
- struct io_uring_cqe *cqe;
617
- unsigned head;
618
- unsigned count = 0;
619
- io_uring_for_each_cqe(&iour->ring, head, cqe) {
620
- ++count;
621
- if (stop_flag) *stop_flag = 0;
622
- process_cqe(iour, cqe, block_given, stop_flag);
623
- if (stop_flag && *stop_flag)
624
- break;
625
- }
626
- io_uring_cq_advance(&iour->ring, count);
627
- total_count += count;
628
-
629
- if (overflow_checked) goto done;
630
- if (stop_flag && *stop_flag) goto done;
631
-
632
- if (cq_ring_needs_flush(&iour->ring)) {
633
- io_uring_enter(iour->ring.ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
634
- overflow_checked = true;
635
- goto iterate;
636
- }
637
-
638
- done:
639
- return total_count;
640
- }
641
-
642
- VALUE IOURing_process_completions(int argc, VALUE *argv, VALUE self) {
643
- IOURing_t *iour = get_iou(self);
644
- int block_given = rb_block_given_p();
645
- VALUE wait;
646
-
647
- rb_scan_args(argc, argv, "01", &wait);
648
- int wait_i = RTEST(wait);
649
- unsigned count = 0;
650
-
651
- // automatically submit any unsubmitted SQEs
652
- if (iour->unsubmitted_sqes) {
653
- io_uring_submit(&iour->ring);
654
- iour->unsubmitted_sqes = 0;
655
- }
656
-
657
- if (wait_i) {
658
- wait_for_completion_ctx_t ctx = { .iour = iour };
659
-
660
- rb_thread_call_without_gvl(wait_for_completion_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
661
- if (unlikely(ctx.ret < 0)) {
662
- rb_syserr_fail(-ctx.ret, strerror(-ctx.ret));
663
- }
664
- ++count;
665
- io_uring_cqe_seen(&iour->ring, ctx.cqe);
666
- process_cqe(iour, ctx.cqe, block_given, 0);
667
- }
668
-
669
- count += process_ready_cqes(iour, block_given, 0);
670
- return UINT2NUM(count);
671
- }
672
-
673
- VALUE IOURing_process_completions_loop(VALUE self) {
674
- IOURing_t *iour = get_iou(self);
675
- int block_given = rb_block_given_p();
676
- int stop_flag = 0;
677
- wait_for_completion_ctx_t ctx = { .iour = iour };
678
-
679
- while (1) {
680
- // automatically submit any unsubmitted SQEs
681
- if (iour->unsubmitted_sqes) {
682
- io_uring_submit(&iour->ring);
683
- iour->unsubmitted_sqes = 0;
684
- }
685
-
686
- rb_thread_call_without_gvl(wait_for_completion_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
687
- if (unlikely(ctx.ret < 0)) {
688
- rb_syserr_fail(-ctx.ret, strerror(-ctx.ret));
689
- }
690
- io_uring_cqe_seen(&iour->ring, ctx.cqe);
691
- process_cqe(iour, ctx.cqe, block_given, &stop_flag);
692
- if (stop_flag) goto done;
693
-
694
- process_ready_cqes(iour, block_given, &stop_flag);
695
- if (stop_flag) goto done;
696
- }
697
- done:
698
- return self;
699
- }
700
-
701
- #define MAKE_SYM(sym) ID2SYM(rb_intern(sym))
702
-
703
- void Init_IOURing(void) {
704
- rb_ext_ractor_safe(true);
705
-
706
- mIOU = rb_define_module("IOU");
707
- cRing = rb_define_class_under(mIOU, "Ring", rb_cObject);
708
- rb_define_alloc_func(cRing, IOURing_allocate);
709
-
710
- rb_define_method(cRing, "initialize", IOURing_initialize, 0);
711
- rb_define_method(cRing, "close", IOURing_close, 0);
712
- rb_define_method(cRing, "closed?", IOURing_closed_p, 0);
713
- rb_define_method(cRing, "pending_ops", IOURing_pending_ops, 0);
714
- rb_define_method(cRing, "setup_buffer_ring", IOURing_setup_buffer_ring, 1);
715
-
716
- rb_define_method(cRing, "emit", IOURing_emit, 1);
717
-
718
- rb_define_method(cRing, "prep_accept", IOURing_prep_accept, 1);
719
- rb_define_method(cRing, "prep_cancel", IOURing_prep_cancel, 1);
720
- rb_define_method(cRing, "prep_close", IOURing_prep_close, 1);
721
- rb_define_method(cRing, "prep_nop", IOURing_prep_nop, 0);
722
- rb_define_method(cRing, "prep_read", IOURing_prep_read, 1);
723
- rb_define_method(cRing, "prep_timeout", IOURing_prep_timeout, 1);
724
- rb_define_method(cRing, "prep_write", IOURing_prep_write, 1);
725
-
726
- rb_define_method(cRing, "submit", IOURing_submit, 0);
727
- rb_define_method(cRing, "wait_for_completion", IOURing_wait_for_completion, 0);
728
- rb_define_method(cRing, "process_completions", IOURing_process_completions, -1);
729
- rb_define_method(cRing, "process_completions_loop", IOURing_process_completions_loop, 0);
730
-
731
- SYM_accept = MAKE_SYM("accept");
732
- SYM_block = MAKE_SYM("block");
733
- SYM_buffer = MAKE_SYM("buffer");
734
- SYM_buffer_group = MAKE_SYM("buffer_group");
735
- SYM_buffer_offset = MAKE_SYM("buffer_offset");
736
- SYM_close = MAKE_SYM("close");
737
- SYM_count = MAKE_SYM("count");
738
- SYM_emit = MAKE_SYM("emit");
739
- SYM_fd = MAKE_SYM("fd");
740
- SYM_id = MAKE_SYM("id");
741
- SYM_interval = MAKE_SYM("interval");
742
- SYM_len = MAKE_SYM("len");
743
- SYM_link = MAKE_SYM("link");
744
- SYM_multishot = MAKE_SYM("multishot");
745
- SYM_op = MAKE_SYM("op");
746
- SYM_read = MAKE_SYM("read");
747
- SYM_result = MAKE_SYM("result");
748
- SYM_signal = MAKE_SYM("signal");
749
- SYM_size = MAKE_SYM("size");
750
- SYM_spec_data = MAKE_SYM("spec_data");
751
- SYM_stop = MAKE_SYM("stop");
752
- SYM_timeout = MAKE_SYM("timeout");
753
- SYM_utf8 = MAKE_SYM("utf8");
754
- SYM_write = MAKE_SYM("write");
755
- }