uringmachine 0.2 → 0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -0
  3. data/README.md +85 -0
  4. data/TODO.md +5 -0
  5. data/examples/echo_server.rb +18 -40
  6. data/examples/inout.rb +19 -0
  7. data/examples/nc.rb +36 -0
  8. data/ext/um/extconf.rb +6 -15
  9. data/ext/um/um.c +340 -53
  10. data/ext/um/um.h +33 -11
  11. data/ext/um/um_class.c +101 -119
  12. data/ext/um/um_const.c +184 -0
  13. data/ext/um/um_op.c +39 -18
  14. data/ext/um/um_utils.c +48 -3
  15. data/lib/uringmachine/version.rb +1 -1
  16. data/lib/uringmachine.rb +12 -0
  17. data/test/helper.rb +13 -12
  18. data/test/test_um.rb +301 -3
  19. data/vendor/liburing/.github/workflows/build.yml +29 -1
  20. data/vendor/liburing/.gitignore +1 -0
  21. data/vendor/liburing/CHANGELOG +15 -0
  22. data/vendor/liburing/CONTRIBUTING.md +165 -0
  23. data/vendor/liburing/configure +32 -0
  24. data/vendor/liburing/examples/Makefile +8 -1
  25. data/vendor/liburing/examples/kdigest.c +405 -0
  26. data/vendor/liburing/examples/proxy.c +75 -8
  27. data/vendor/liburing/liburing.pc.in +1 -1
  28. data/vendor/liburing/src/Makefile +16 -2
  29. data/vendor/liburing/src/include/liburing/io_uring.h +31 -0
  30. data/vendor/liburing/src/include/liburing/sanitize.h +39 -0
  31. data/vendor/liburing/src/include/liburing.h +31 -4
  32. data/vendor/liburing/src/liburing-ffi.map +5 -0
  33. data/vendor/liburing/src/liburing.map +1 -0
  34. data/vendor/liburing/src/queue.c +3 -0
  35. data/vendor/liburing/src/register.c +36 -0
  36. data/vendor/liburing/src/sanitize.c +176 -0
  37. data/vendor/liburing/src/setup.c +1 -1
  38. data/vendor/liburing/test/35fa71a030ca.c +7 -0
  39. data/vendor/liburing/test/500f9fbadef8.c +2 -0
  40. data/vendor/liburing/test/7ad0e4b2f83c.c +0 -25
  41. data/vendor/liburing/test/917257daa0fe.c +7 -0
  42. data/vendor/liburing/test/Makefile +31 -4
  43. data/vendor/liburing/test/a0908ae19763.c +7 -0
  44. data/vendor/liburing/test/a4c0b3decb33.c +7 -0
  45. data/vendor/liburing/test/accept.c +14 -4
  46. data/vendor/liburing/test/b19062a56726.c +7 -0
  47. data/vendor/liburing/test/bind-listen.c +2 -2
  48. data/vendor/liburing/test/buf-ring-nommap.c +10 -3
  49. data/vendor/liburing/test/buf-ring.c +2 -0
  50. data/vendor/liburing/test/coredump.c +7 -0
  51. data/vendor/liburing/test/cq-overflow.c +13 -1
  52. data/vendor/liburing/test/d4ae271dfaae.c +11 -3
  53. data/vendor/liburing/test/defer-taskrun.c +2 -2
  54. data/vendor/liburing/test/defer-tw-timeout.c +4 -1
  55. data/vendor/liburing/test/defer.c +2 -2
  56. data/vendor/liburing/test/double-poll-crash.c +1 -1
  57. data/vendor/liburing/test/eeed8b54e0df.c +2 -0
  58. data/vendor/liburing/test/eventfd.c +0 -1
  59. data/vendor/liburing/test/exit-no-cleanup.c +11 -0
  60. data/vendor/liburing/test/fadvise.c +9 -26
  61. data/vendor/liburing/test/fdinfo.c +9 -1
  62. data/vendor/liburing/test/file-register.c +14 -2
  63. data/vendor/liburing/test/file-update.c +1 -1
  64. data/vendor/liburing/test/file-verify.c +27 -16
  65. data/vendor/liburing/test/files-exit-hang-timeout.c +1 -2
  66. data/vendor/liburing/test/fixed-buf-iter.c +3 -1
  67. data/vendor/liburing/test/fixed-hugepage.c +12 -1
  68. data/vendor/liburing/test/fsnotify.c +1 -0
  69. data/vendor/liburing/test/futex.c +16 -4
  70. data/vendor/liburing/test/helpers.c +47 -0
  71. data/vendor/liburing/test/helpers.h +6 -0
  72. data/vendor/liburing/test/init-mem.c +5 -3
  73. data/vendor/liburing/test/io-cancel.c +0 -24
  74. data/vendor/liburing/test/io_uring_passthrough.c +2 -0
  75. data/vendor/liburing/test/io_uring_register.c +25 -6
  76. data/vendor/liburing/test/iopoll-leak.c +4 -0
  77. data/vendor/liburing/test/iopoll-overflow.c +1 -1
  78. data/vendor/liburing/test/iopoll.c +3 -3
  79. data/vendor/liburing/test/kallsyms.c +203 -0
  80. data/vendor/liburing/test/link-timeout.c +159 -0
  81. data/vendor/liburing/test/linked-defer-close.c +224 -0
  82. data/vendor/liburing/test/madvise.c +12 -25
  83. data/vendor/liburing/test/min-timeout-wait.c +0 -25
  84. data/vendor/liburing/test/min-timeout.c +0 -25
  85. data/vendor/liburing/test/mkdir.c +6 -0
  86. data/vendor/liburing/test/msg-ring.c +8 -2
  87. data/vendor/liburing/test/napi-test.c +15 -2
  88. data/vendor/liburing/test/no-mmap-inval.c +2 -0
  89. data/vendor/liburing/test/nop.c +44 -0
  90. data/vendor/liburing/test/ooo-file-unreg.c +1 -1
  91. data/vendor/liburing/test/open-close.c +40 -0
  92. data/vendor/liburing/test/openat2.c +37 -14
  93. data/vendor/liburing/test/poll-many.c +13 -7
  94. data/vendor/liburing/test/poll-mshot-update.c +17 -10
  95. data/vendor/liburing/test/poll-v-poll.c +6 -3
  96. data/vendor/liburing/test/pollfree.c +148 -0
  97. data/vendor/liburing/test/read-mshot-empty.c +156 -153
  98. data/vendor/liburing/test/read-mshot.c +276 -27
  99. data/vendor/liburing/test/read-write.c +78 -13
  100. data/vendor/liburing/test/recv-msgall-stream.c +3 -0
  101. data/vendor/liburing/test/recv-msgall.c +5 -0
  102. data/vendor/liburing/test/recvsend_bundle-inc.c +680 -0
  103. data/vendor/liburing/test/recvsend_bundle.c +92 -29
  104. data/vendor/liburing/test/reg-fd-only.c +14 -4
  105. data/vendor/liburing/test/regbuf-clone.c +187 -0
  106. data/vendor/liburing/test/regbuf-merge.c +7 -0
  107. data/vendor/liburing/test/register-restrictions.c +86 -85
  108. data/vendor/liburing/test/rename.c +59 -1
  109. data/vendor/liburing/test/ringbuf-read.c +5 -0
  110. data/vendor/liburing/test/ringbuf-status.c +5 -1
  111. data/vendor/liburing/test/runtests.sh +16 -1
  112. data/vendor/liburing/test/send-zerocopy.c +59 -0
  113. data/vendor/liburing/test/short-read.c +1 -0
  114. data/vendor/liburing/test/socket.c +43 -0
  115. data/vendor/liburing/test/splice.c +3 -1
  116. data/vendor/liburing/test/sq-poll-dup.c +1 -1
  117. data/vendor/liburing/test/sq-poll-share.c +2 -0
  118. data/vendor/liburing/test/sqpoll-disable-exit.c +8 -0
  119. data/vendor/liburing/test/sqpoll-exit-hang.c +1 -25
  120. data/vendor/liburing/test/sqpoll-sleep.c +1 -25
  121. data/vendor/liburing/test/statx.c +89 -0
  122. data/vendor/liburing/test/stdout.c +2 -0
  123. data/vendor/liburing/test/submit-and-wait.c +1 -25
  124. data/vendor/liburing/test/submit-reuse.c +4 -26
  125. data/vendor/liburing/test/symlink.c +12 -1
  126. data/vendor/liburing/test/sync-cancel.c +48 -21
  127. data/vendor/liburing/test/thread-exit.c +5 -0
  128. data/vendor/liburing/test/timeout-new.c +1 -26
  129. data/vendor/liburing/test/timeout.c +12 -26
  130. data/vendor/liburing/test/unlink.c +94 -1
  131. data/vendor/liburing/test/uring_cmd_ublk.c +1252 -0
  132. data/vendor/liburing/test/waitid.c +62 -8
  133. data/vendor/liburing/test/wq-aff.c +35 -0
  134. data/vendor/liburing/test/xfail_prep_link_timeout_out_of_scope.c +46 -0
  135. data/vendor/liburing/test/xfail_register_buffers_out_of_scope.c +51 -0
  136. metadata +17 -4
  137. data/examples/event_loop.rb +0 -69
  138. data/examples/fibers.rb +0 -105
data/ext/um/um.c CHANGED
@@ -1,6 +1,53 @@
1
1
  #include "um.h"
2
2
  #include "ruby/thread.h"
3
- #include <sys/mman.h>
3
+
4
+ void um_setup(struct um *machine) {
5
+ machine->ring_initialized = 0;
6
+ machine->unsubmitted_count = 0;
7
+ machine->buffer_ring_count = 0;
8
+ machine->pending_count = 0;
9
+ machine->runqueue_head = NULL;
10
+ machine->runqueue_tail = NULL;
11
+ machine->op_freelist = NULL;
12
+ machine->result_freelist = NULL;
13
+
14
+ unsigned prepared_limit = 4096;
15
+ unsigned flags = 0;
16
+ #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
17
+ flags |= IORING_SETUP_SUBMIT_ALL;
18
+ #endif
19
+ #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
20
+ flags |= IORING_SETUP_COOP_TASKRUN;
21
+ #endif
22
+
23
+ while (1) {
24
+ int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
25
+ if (likely(!ret)) break;
26
+
27
+ // if ENOMEM is returned, try with half as much entries
28
+ if (unlikely(ret == -ENOMEM && prepared_limit > 64))
29
+ prepared_limit = prepared_limit / 2;
30
+ else
31
+ rb_syserr_fail(-ret, strerror(-ret));
32
+ }
33
+ machine->ring_initialized = 1;
34
+ }
35
+
36
+ inline void um_teardown(struct um *machine) {
37
+ if (!machine->ring_initialized) return;
38
+
39
+ for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
40
+ struct buf_ring_descriptor *desc = machine->buffer_rings + i;
41
+ io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
42
+ free(desc->buf_base);
43
+ }
44
+ machine->buffer_ring_count = 0;
45
+ io_uring_queue_exit(&machine->ring);
46
+ machine->ring_initialized = 0;
47
+
48
+ um_free_op_linked_list(machine, machine->op_freelist);
49
+ um_free_op_linked_list(machine, machine->runqueue_head);
50
+ }
4
51
 
5
52
  static inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
6
53
  struct io_uring_sqe *sqe;
@@ -24,22 +71,6 @@ done:
24
71
  return sqe;
25
72
  }
26
73
 
27
- inline void um_cleanup(struct um *machine) {
28
- if (!machine->ring_initialized) return;
29
-
30
- for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
31
- struct buf_ring_descriptor *desc = machine->buffer_rings + i;
32
- io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
33
- free(desc->buf_base);
34
- }
35
- machine->buffer_ring_count = 0;
36
- io_uring_queue_exit(&machine->ring);
37
- machine->ring_initialized = 0;
38
-
39
- um_free_linked_list(machine, machine->freelist_head);
40
- um_free_linked_list(machine, machine->runqueue_head);
41
- }
42
-
43
74
  struct wait_for_cqe_ctx {
44
75
  struct um *machine;
45
76
  struct io_uring_cqe *cqe;
@@ -66,6 +97,7 @@ inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *
66
97
 
67
98
  inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
68
99
  if (!op->results_head) {
100
+ // if no results are ready yet, schedule the corresponding fiber
69
101
  struct um_op *op2 = um_op_checkout(machine);
70
102
  op2->state = OP_schedule;
71
103
  op2->fiber = op->fiber;
@@ -82,21 +114,25 @@ inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
82
114
  struct um_op *op = (struct um_op *)cqe->user_data;
83
115
  if (unlikely(!op)) return;
84
116
 
117
+ // if (op->is_multishot)
118
+ // printf("process_cqe %p state: %d result: %d flags: %d (%d)\n", op, op->state, cqe->res, cqe->flags, (cqe->flags & IORING_CQE_F_MORE));
119
+
85
120
  switch (op->state) {
86
121
  case OP_submitted:
87
122
  if (unlikely(cqe->res == -ECANCELED)) {
88
123
  um_op_checkin(machine, op);
89
124
  break;
90
125
  }
91
- if (!op->is_multishot)
92
- um_handle_submitted_op_cqe_single(machine, op, cqe);
93
- else
126
+ if (op->is_multishot)
94
127
  um_handle_submitted_op_cqe_multi(machine, op, cqe);
128
+ else
129
+ um_handle_submitted_op_cqe_single(machine, op, cqe);
95
130
  break;
96
131
  case OP_abandonned:
97
132
  // op has been abandonned by the I/O method, so we need to cleanup (check
98
133
  // the op in to the free list).
99
134
  um_op_checkin(machine, op);
135
+ break;
100
136
  default:
101
137
  // TODO: invalid state, should raise!
102
138
  }
@@ -163,7 +199,7 @@ loop:
163
199
  // we need to submit events and check completions without blocking
164
200
  if (
165
201
  unlikely(
166
- first_iteration && machine->unsubmitted_count &&
202
+ first_iteration && machine->unsubmitted_count &&
167
203
  machine->runqueue_head &&
168
204
  machine->runqueue_head->fiber == rb_fiber_current()
169
205
  )
@@ -176,9 +212,9 @@ loop:
176
212
  op = um_runqueue_shift(machine);
177
213
  if (op) {
178
214
  VALUE resume_value = op->resume_value;
179
- if (op->state == OP_schedule) {
215
+ if (op->state == OP_schedule)
180
216
  um_op_checkin(machine, op);
181
- }
217
+
182
218
  // the resume value is disregarded, we pass the fiber itself
183
219
  VALUE v = rb_fiber_transfer(op->fiber, 1, &resume_value);
184
220
  return v;
@@ -193,7 +229,7 @@ static inline void um_cancel_op(struct um *machine, struct um_op *op) {
193
229
  io_uring_prep_cancel64(sqe, (long long)op, 0);
194
230
  }
195
231
 
196
- static inline VALUE um_await_op(struct um *machine, struct um_op *op, int *result, int *flags) {
232
+ static inline VALUE um_await_op(struct um *machine, struct um_op *op, __s32 *result, __u32 *flags) {
197
233
  op->fiber = rb_fiber_current();
198
234
  VALUE v = um_fiber_switch(machine);
199
235
  int is_exception = um_value_is_exception_p(v);
@@ -207,8 +243,6 @@ static inline VALUE um_await_op(struct um *machine, struct um_op *op, int *resul
207
243
  // checked in.
208
244
  if (result) *result = op->cqe_result;
209
245
  if (flags) *flags = op->cqe_flags;
210
- if (!op->is_multishot)
211
- um_op_checkin(machine, op);
212
246
  }
213
247
 
214
248
  if (unlikely(is_exception)) um_raise_exception(v);
@@ -246,7 +280,11 @@ inline void um_interrupt(struct um *machine, VALUE fiber, VALUE value) {
246
280
  struct op_ensure_ctx {
247
281
  struct um *machine;
248
282
  struct um_op *op;
283
+ int fd;
249
284
  int bgid;
285
+
286
+ void *read_buf;
287
+ int read_maxlen;
250
288
  };
251
289
 
252
290
  VALUE um_timeout_ensure(VALUE arg) {
@@ -258,10 +296,10 @@ VALUE um_timeout_ensure(VALUE arg) {
258
296
  um_cancel_op(ctx->machine, ctx->op);
259
297
  ctx->op->state == OP_abandonned;
260
298
  }
261
- else {
299
+ else
262
300
  // completed, so can be checked in
263
301
  um_op_checkin(ctx->machine, ctx->op);
264
- }
302
+
265
303
  return Qnil;
266
304
  }
267
305
 
@@ -270,10 +308,10 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
270
308
  if (!ID_new) ID_new = rb_intern("new");
271
309
 
272
310
  struct um_op *op = um_op_checkout(machine);
273
- struct __kernel_timespec ts = um_double_to_timespec(NUM2DBL(interval));
311
+ op->ts = um_double_to_timespec(NUM2DBL(interval));
274
312
 
275
313
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
276
- io_uring_prep_timeout(sqe, &ts, 0, 0);
314
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
277
315
  op->state = OP_submitted;
278
316
  op->fiber = rb_fiber_current();
279
317
  op->resume_value = rb_funcall(class, ID_new, 0);
@@ -282,23 +320,31 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
282
320
  return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
283
321
  }
284
322
 
323
+ inline void discard_op_if_completed(struct um *machine, struct um_op *op) {
324
+ if (op->state == OP_completed) um_op_checkin(machine, op);
325
+ }
326
+
285
327
  inline VALUE um_sleep(struct um *machine, double duration) {
286
328
  struct um_op *op = um_op_checkout(machine);
287
- struct __kernel_timespec ts = um_double_to_timespec(duration);
329
+ op->ts = um_double_to_timespec(duration);
288
330
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
289
- int result = 0;
331
+ __s32 result = 0;
290
332
 
291
- io_uring_prep_timeout(sqe, &ts, 0, 0);
333
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
292
334
  op->state = OP_submitted;
293
335
 
294
- return um_await_op(machine, op, &result, NULL);
336
+ um_await_op(machine, op, &result, NULL);
337
+
338
+ discard_op_if_completed(machine, op);
339
+ if (result != -ETIME) um_raise_on_system_error(result);
340
+ return Qnil;
295
341
  }
296
342
 
297
343
  inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
298
344
  struct um_op *op = um_op_checkout(machine);
299
345
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
300
- int result = 0;
301
- int flags = 0;
346
+ __s32 result = 0;
347
+ __u32 flags = 0;
302
348
 
303
349
  void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
304
350
  io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
@@ -306,13 +352,15 @@ inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int b
306
352
 
307
353
  um_await_op(machine, op, &result, &flags);
308
354
 
355
+ discard_op_if_completed(machine, op);
309
356
  um_raise_on_system_error(result);
310
357
  um_update_read_buffer(machine, buffer, buffer_offset, result, flags);
311
358
  return INT2FIX(result);
312
359
  }
313
360
 
314
- VALUE um_read_each_ensure(VALUE arg) {
361
+ VALUE um_multishot_ensure(VALUE arg) {
315
362
  struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
363
+
316
364
  switch (ctx->op->state) {
317
365
  case OP_submitted:
318
366
  um_cancel_op(ctx->machine, ctx->op);
@@ -320,43 +368,282 @@ VALUE um_read_each_ensure(VALUE arg) {
320
368
  case OP_completed:
321
369
  um_op_checkin(ctx->machine, ctx->op);
322
370
  break;
323
- default:
371
+ default:
324
372
  }
373
+
374
+ if (ctx->read_buf) free(ctx->read_buf);
325
375
  return Qnil;
326
376
  }
327
377
 
378
+ static inline void um_read_each_prepare_op(struct op_ensure_ctx *ctx, int singleshot_mode) {
379
+ struct um_op *op = um_op_checkout(ctx->machine);
380
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, op);
381
+
382
+ if (singleshot_mode)
383
+ io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
384
+ else {
385
+ io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
386
+ op->is_multishot = 1;
387
+ }
388
+
389
+ op->state = OP_submitted;
390
+ ctx->op = op;
391
+ }
392
+
393
+ int um_read_each_safe_loop_singleshot(struct op_ensure_ctx *ctx, int total) {
394
+ struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
395
+ __s32 result = 0;
396
+ ctx->read_maxlen = desc->buf_size;
397
+ ctx->read_buf = malloc(desc->buf_size);
398
+
399
+ while (1) {
400
+ um_read_each_prepare_op(ctx, 1);
401
+ um_await_op(ctx->machine, ctx->op, &result, NULL);
402
+ um_raise_on_system_error(result);
403
+ if (!result) return total;
404
+
405
+ total += result;
406
+ VALUE buf = rb_str_new(ctx->read_buf, result);
407
+ rb_yield(buf);
408
+ um_op_checkin(ctx->machine, ctx->op);
409
+ }
410
+ }
411
+
412
+
413
+
414
+ int um_read_each_multishot_process_results(struct op_ensure_ctx *ctx, int *total) {
415
+ __s32 result = 0;
416
+ __u32 flags = 0;
417
+ __s32 bad_result = 0;
418
+ int eof = 0;
419
+
420
+ while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
421
+ if (result < 0) {
422
+ bad_result = result;
423
+ break;
424
+ }
425
+ if (result == 0) {
426
+ eof = 1;
427
+ break;
428
+ }
429
+
430
+ *total += result;
431
+ VALUE buf = um_get_string_from_buffer_ring(ctx->machine, ctx->bgid, result, flags);
432
+ rb_yield(buf);
433
+ }
434
+
435
+ if (ctx->op->state == OP_completed) {
436
+ um_op_checkin(ctx->machine, ctx->op);
437
+
438
+ // TTY devices might not support multishot reads:
439
+ // https://github.com/axboe/liburing/issues/1185. A workaround is to
440
+ // fallback to singleshot mode, using the first buffer in the buffer
441
+ // group.
442
+ if (!(flags & IORING_CQE_F_BUFFER)) {
443
+ *total = um_read_each_safe_loop_singleshot(ctx, *total);
444
+ return 0;
445
+ }
446
+ else
447
+ um_read_each_prepare_op(ctx, 0);
448
+ }
449
+ if (bad_result)
450
+ um_raise_on_system_error(bad_result);
451
+
452
+ return eof ? 0 : 1;
453
+ }
454
+
328
455
  VALUE um_read_each_safe_loop(VALUE arg) {
329
456
  struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
330
- int result = 0;
331
- int flags = 0;
332
457
  int total = 0;
333
458
 
459
+ um_read_each_prepare_op(ctx, 0);
460
+
334
461
  while (1) {
335
462
  um_await_op(ctx->machine, ctx->op, NULL, NULL);
463
+ if (!ctx->op->results_head)
464
+ rb_raise(rb_eRuntimeError, "no result found!\n");
465
+
466
+ if (!um_read_each_multishot_process_results(ctx, &total))
467
+ return INT2NUM(total);
468
+ }
469
+ }
470
+
471
+ VALUE um_read_each(struct um *machine, int fd, int bgid) {
472
+ struct op_ensure_ctx ctx = { .machine = machine, .fd = fd, .bgid = bgid, .read_buf = NULL };
473
+ return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
474
+ }
475
+
476
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, int len) {
477
+ struct um_op *op = um_op_checkout(machine);
478
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
479
+ __s32 result = 0;
480
+ __u32 flags = 0;
481
+
482
+ io_uring_prep_write(sqe, fd, RSTRING_PTR(buffer), len, -1);
483
+ op->state = OP_submitted;
484
+
485
+ um_await_op(machine, op, &result, &flags);
486
+
487
+ discard_op_if_completed(machine, op);
488
+ um_raise_on_system_error(result);
489
+ return INT2FIX(result);
490
+ }
491
+
492
+ VALUE um_close(struct um *machine, int fd) {
493
+ struct um_op *op = um_op_checkout(machine);
494
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
495
+ __s32 result = 0;
496
+ __u32 flags = 0;
497
+
498
+ io_uring_prep_close(sqe, fd);
499
+ op->state = OP_submitted;
500
+
501
+ um_await_op(machine, op, &result, &flags);
502
+
503
+ discard_op_if_completed(machine, op);
504
+ um_raise_on_system_error(result);
505
+ return INT2FIX(fd);
506
+ }
507
+
508
+ VALUE um_accept(struct um *machine, int fd) {
509
+ struct um_op *op = um_op_checkout(machine);
510
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
511
+ struct sockaddr addr;
512
+ socklen_t len;
513
+ __s32 result = 0;
514
+ __u32 flags = 0;
515
+
516
+ io_uring_prep_accept(sqe, fd, &addr, &len, 0);
517
+ op->state = OP_submitted;
518
+
519
+ um_await_op(machine, op, &result, &flags);
520
+
521
+ discard_op_if_completed(machine, op);
522
+ um_raise_on_system_error(result);
523
+ return INT2FIX(result);
524
+ }
525
+
526
+ VALUE um_accept_each_safe_loop(VALUE arg) {
527
+ struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
528
+ __s32 result = 0;
529
+ __u32 flags = 0;
530
+
531
+ while (1) {
532
+ um_await_op(ctx->machine, ctx->op, &result, &flags);
336
533
  if (!ctx->op->results_head) {
337
- // TODO: raise, this shouldn't happen
338
- printf("no result found!\n");
534
+ // this shouldn't happen!
535
+ rb_raise(rb_eRuntimeError, "no result found for accept_each loop");
339
536
  }
537
+
340
538
  while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
341
- if (likely(result > 0)) {
342
- total += result;
343
- VALUE buf = get_string_from_buffer_ring(ctx->machine, ctx->bgid, result, flags);
344
- rb_yield(buf);
345
- }
539
+ um_raise_on_system_error(result);
540
+ if (likely(result > 0))
541
+ rb_yield(INT2FIX(result));
346
542
  else
347
- return INT2FIX(total);
543
+ return Qnil;
348
544
  }
349
545
  }
350
546
  }
351
547
 
352
- VALUE um_read_each(struct um *machine, int fd, int bgid) {
548
+ VALUE um_accept_each(struct um *machine, int fd) {
353
549
  struct um_op *op = um_op_checkout(machine);
354
550
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
355
-
551
+ io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
552
+ op->state = OP_submitted;
356
553
  op->is_multishot = 1;
357
- io_uring_prep_read_multishot(sqe, fd, 0, -1, bgid);
554
+
555
+ struct op_ensure_ctx ctx = { .machine = machine, .op = op, .read_buf = NULL };
556
+ return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
557
+ }
558
+
559
+ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
560
+ struct um_op *op = um_op_checkout(machine);
561
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
562
+ int result = 0;
563
+
564
+ io_uring_prep_socket(sqe, domain, type, protocol, flags);
358
565
  op->state = OP_submitted;
359
566
 
360
- struct op_ensure_ctx ctx = { .machine = machine, .op = op, .bgid = bgid };
361
- return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_read_each_ensure, (VALUE)&ctx);
567
+ um_await_op(machine, op, &result, NULL);
568
+
569
+ discard_op_if_completed(machine, op);
570
+ um_raise_on_system_error(result);
571
+ return INT2FIX(result);
362
572
  }
573
+
574
+ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
575
+ struct um_op *op = um_op_checkout(machine);
576
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
577
+ int result = 0;
578
+
579
+ io_uring_prep_connect(sqe, fd, addr, addrlen);
580
+ op->state = OP_submitted;
581
+
582
+ um_await_op(machine, op, &result, NULL);
583
+
584
+ discard_op_if_completed(machine, op);
585
+ um_raise_on_system_error(result);
586
+ return INT2FIX(result);
587
+ }
588
+
589
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
590
+ struct um_op *op = um_op_checkout(machine);
591
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
592
+ int result = 0;
593
+
594
+ io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
595
+ op->state = OP_submitted;
596
+
597
+ um_await_op(machine, op, &result, NULL);
598
+
599
+ discard_op_if_completed(machine, op);
600
+ um_raise_on_system_error(result);
601
+ return INT2FIX(result);
602
+ }
603
+
604
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
605
+ struct um_op *op = um_op_checkout(machine);
606
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
607
+ int result = 0;
608
+
609
+ void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
610
+ io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
611
+ op->state = OP_submitted;
612
+
613
+ um_await_op(machine, op, &result, NULL);
614
+
615
+ discard_op_if_completed(machine, op);
616
+ um_raise_on_system_error(result);
617
+ um_update_read_buffer(machine, buffer, 0, result, flags);
618
+ return INT2FIX(result);
619
+ }
620
+
621
+ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
622
+ struct um_op *op = um_op_checkout(machine);
623
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
624
+ int result = 0;
625
+
626
+ io_uring_prep_bind(sqe, fd, addr, addrlen);
627
+ op->state = OP_submitted;
628
+
629
+ um_await_op(machine, op, &result, NULL);
630
+
631
+ discard_op_if_completed(machine, op);
632
+ um_raise_on_system_error(result);
633
+ return INT2FIX(result);
634
+ }
635
+
636
+ VALUE um_listen(struct um *machine, int fd, int backlog) {
637
+ struct um_op *op = um_op_checkout(machine);
638
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
639
+ int result = 0;
640
+
641
+ io_uring_prep_listen(sqe, fd, backlog);
642
+ op->state = OP_submitted;
643
+
644
+ um_await_op(machine, op, &result, NULL);
645
+
646
+ discard_op_if_completed(machine, op);
647
+ um_raise_on_system_error(result);
648
+ return INT2FIX(result);
649
+ }
data/ext/um/um.h CHANGED
@@ -32,8 +32,8 @@ enum op_state {
32
32
  struct um_result_entry {
33
33
  struct um_result_entry *next;
34
34
 
35
- int result;
36
- int flags;
35
+ __s32 result;
36
+ __u32 flags;
37
37
  };
38
38
 
39
39
  struct um_op {
@@ -44,10 +44,12 @@ struct um_op {
44
44
  // linked list for multishot results
45
45
  struct um_result_entry *results_head;
46
46
  struct um_result_entry *results_tail;
47
-
47
+
48
48
  VALUE fiber;
49
49
  VALUE resume_value;
50
50
  int is_multishot;
51
+ struct __kernel_timespec ts;
52
+
51
53
  int cqe_result;
52
54
  int cqe_flags;
53
55
  };
@@ -57,13 +59,16 @@ struct buf_ring_descriptor {
57
59
  size_t br_size;
58
60
  unsigned buf_count;
59
61
  unsigned buf_size;
60
- char *buf_base;
62
+ unsigned buf_mask;
63
+ void *buf_base;
61
64
  };
62
65
 
63
66
  #define BUFFER_RING_MAX_COUNT 10
64
67
 
65
68
  struct um {
66
- struct um_op *freelist_head;
69
+ struct um_op *op_freelist;
70
+ struct um_result_entry *result_freelist;
71
+
67
72
  struct um_op *runqueue_head;
68
73
  struct um_op *runqueue_tail;
69
74
 
@@ -79,25 +84,29 @@ struct um {
79
84
 
80
85
  extern VALUE cUM;
81
86
 
87
+ void um_setup(struct um *machine);
88
+ void um_teardown(struct um *machine);
89
+ void um_free_op_linked_list(struct um *machine, struct um_op *op);
90
+ void um_free_result_linked_list(struct um *machine, struct um_result_entry *entry);
91
+
82
92
  struct __kernel_timespec um_double_to_timespec(double value);
83
93
  int um_value_is_exception_p(VALUE v);
84
94
  VALUE um_raise_exception(VALUE v);
85
95
  void um_raise_on_system_error(int result);
86
96
 
87
97
  void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs);
88
- void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, int result, int flags);
89
- VALUE get_string_from_buffer_ring(struct um *machine, int bgid, int result, int flags);
98
+ void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, __s32 result, __u32 flags);
90
99
 
91
- void um_cleanup(struct um *machine);
100
+ int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count);
101
+ VALUE um_get_string_from_buffer_ring(struct um *machine, int bgid, __s32 result, __u32 flags);
92
102
 
93
- void um_free_linked_list(struct um *machine, struct um_op *op);
94
103
  VALUE um_fiber_switch(struct um *machine);
95
104
  VALUE um_await(struct um *machine);
96
105
 
97
106
  void um_op_checkin(struct um *machine, struct um_op *op);
98
107
  struct um_op* um_op_checkout(struct um *machine);
99
- void um_op_result_push(struct um *machine, struct um_op *op, int result, int flags);
100
- int um_op_result_shift(struct um *machine, struct um_op *op, int *result, int *flags);
108
+ void um_op_result_push(struct um *machine, struct um_op *op, __s32 result, __u32 flags);
109
+ int um_op_result_shift(struct um *machine, struct um_op *op, __s32 *result, __u32 *flags);
101
110
 
102
111
  struct um_op *um_runqueue_find_by_fiber(struct um *machine, VALUE fiber);
103
112
  void um_runqueue_push(struct um *machine, struct um_op *op);
@@ -111,5 +120,18 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
111
120
  VALUE um_sleep(struct um *machine, double duration);
112
121
  VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
113
122
  VALUE um_read_each(struct um *machine, int fd, int bgid);
123
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, int len);
124
+ VALUE um_close(struct um *machine, int fd);
125
+
126
+ VALUE um_accept(struct um *machine, int fd);
127
+ VALUE um_accept_each(struct um *machine, int fd);
128
+ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags);
129
+ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen);
130
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags);
131
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags);
132
+ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen);
133
+ VALUE um_listen(struct um *machine, int fd, int backlog);
134
+
135
+ void um_define_net_constants(VALUE mod);
114
136
 
115
137
  #endif // UM_H