uringmachine 0.19.1 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +3 -4
  3. data/CHANGELOG.md +32 -1
  4. data/TODO.md +0 -39
  5. data/examples/bm_fileno.rb +33 -0
  6. data/examples/bm_mutex.rb +85 -0
  7. data/examples/bm_mutex_single.rb +33 -0
  8. data/examples/bm_queue.rb +29 -29
  9. data/examples/bm_send.rb +2 -5
  10. data/examples/bm_snooze.rb +20 -42
  11. data/examples/bm_write.rb +4 -1
  12. data/examples/fiber_scheduler_demo.rb +15 -51
  13. data/examples/fiber_scheduler_fork.rb +24 -0
  14. data/examples/nc_ssl.rb +71 -0
  15. data/ext/um/extconf.rb +5 -15
  16. data/ext/um/um.c +310 -74
  17. data/ext/um/um.h +66 -29
  18. data/ext/um/um_async_op.c +1 -1
  19. data/ext/um/um_async_op_class.c +2 -2
  20. data/ext/um/um_buffer.c +1 -1
  21. data/ext/um/um_class.c +178 -31
  22. data/ext/um/um_const.c +51 -3
  23. data/ext/um/um_mutex_class.c +1 -1
  24. data/ext/um/um_op.c +37 -0
  25. data/ext/um/um_queue_class.c +1 -1
  26. data/ext/um/um_stream.c +5 -5
  27. data/ext/um/um_stream_class.c +3 -0
  28. data/ext/um/um_sync.c +28 -39
  29. data/ext/um/um_utils.c +59 -19
  30. data/grant-2025/journal.md +353 -0
  31. data/grant-2025/tasks.md +135 -0
  32. data/lib/uringmachine/fiber_scheduler.rb +316 -57
  33. data/lib/uringmachine/version.rb +1 -1
  34. data/lib/uringmachine.rb +6 -0
  35. data/test/test_fiber_scheduler.rb +640 -0
  36. data/test/test_stream.rb +2 -2
  37. data/test/test_um.rb +722 -54
  38. data/uringmachine.gemspec +5 -5
  39. data/vendor/liburing/.github/workflows/ci.yml +94 -1
  40. data/vendor/liburing/.github/workflows/test_build.c +9 -0
  41. data/vendor/liburing/configure +27 -0
  42. data/vendor/liburing/examples/Makefile +6 -0
  43. data/vendor/liburing/examples/helpers.c +8 -0
  44. data/vendor/liburing/examples/helpers.h +5 -0
  45. data/vendor/liburing/liburing.spec +1 -1
  46. data/vendor/liburing/src/Makefile +9 -3
  47. data/vendor/liburing/src/include/liburing/barrier.h +11 -5
  48. data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
  49. data/vendor/liburing/src/include/liburing/io_uring.h +51 -0
  50. data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
  51. data/vendor/liburing/src/include/liburing.h +458 -121
  52. data/vendor/liburing/src/liburing-ffi.map +16 -0
  53. data/vendor/liburing/src/liburing.map +8 -0
  54. data/vendor/liburing/src/sanitize.c +4 -1
  55. data/vendor/liburing/src/setup.c +7 -4
  56. data/vendor/liburing/test/232c93d07b74.c +4 -16
  57. data/vendor/liburing/test/Makefile +15 -1
  58. data/vendor/liburing/test/accept.c +2 -13
  59. data/vendor/liburing/test/bind-listen.c +175 -13
  60. data/vendor/liburing/test/conn-unreach.c +132 -0
  61. data/vendor/liburing/test/fd-pass.c +32 -7
  62. data/vendor/liburing/test/fdinfo.c +39 -12
  63. data/vendor/liburing/test/fifo-futex-poll.c +114 -0
  64. data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
  65. data/vendor/liburing/test/futex.c +1 -1
  66. data/vendor/liburing/test/helpers.c +99 -2
  67. data/vendor/liburing/test/helpers.h +9 -0
  68. data/vendor/liburing/test/io_uring_passthrough.c +6 -12
  69. data/vendor/liburing/test/mock_file.c +379 -0
  70. data/vendor/liburing/test/mock_file.h +47 -0
  71. data/vendor/liburing/test/nop.c +2 -2
  72. data/vendor/liburing/test/nop32-overflow.c +150 -0
  73. data/vendor/liburing/test/nop32.c +126 -0
  74. data/vendor/liburing/test/pipe.c +166 -0
  75. data/vendor/liburing/test/poll-race-mshot.c +13 -1
  76. data/vendor/liburing/test/read-write.c +4 -4
  77. data/vendor/liburing/test/recv-mshot-fair.c +81 -34
  78. data/vendor/liburing/test/recvsend_bundle.c +1 -1
  79. data/vendor/liburing/test/resize-rings.c +2 -0
  80. data/vendor/liburing/test/ring-query.c +322 -0
  81. data/vendor/liburing/test/ringbuf-loop.c +87 -0
  82. data/vendor/liburing/test/ringbuf-read.c +4 -4
  83. data/vendor/liburing/test/runtests.sh +2 -2
  84. data/vendor/liburing/test/send-zerocopy.c +43 -5
  85. data/vendor/liburing/test/send_recv.c +103 -32
  86. data/vendor/liburing/test/shutdown.c +2 -12
  87. data/vendor/liburing/test/socket-nb.c +3 -14
  88. data/vendor/liburing/test/socket-rw-eagain.c +2 -12
  89. data/vendor/liburing/test/socket-rw-offset.c +2 -12
  90. data/vendor/liburing/test/socket-rw.c +2 -12
  91. data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
  92. data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
  93. data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
  94. data/vendor/liburing/test/timestamp.c +56 -19
  95. data/vendor/liburing/test/vec-regbuf.c +2 -4
  96. data/vendor/liburing/test/wq-aff.c +7 -0
  97. metadata +37 -15
data/ext/um/um_op.c CHANGED
@@ -1,5 +1,42 @@
1
1
  #include "um.h"
2
2
 
3
+ const char * um_op_kind_name(enum um_op_kind kind) {
4
+ switch (kind) {
5
+ case OP_TIMEOUT: return "OP_TIMEOUT";
6
+ case OP_SCHEDULE: return "OP_SCHEDULE";
7
+ case OP_SLEEP: return "OP_SLEEP";
8
+ case OP_OPEN: return "OP_OPEN";
9
+ case OP_READ: return "OP_READ";
10
+ case OP_WRITE: return "OP_WRITE";
11
+ case OP_WRITE_ASYNC: return "OP_WRITE_ASYNC";
12
+ case OP_CLOSE: return "OP_CLOSE";
13
+ case OP_CLOSE_ASYNC: return "OP_CLOSE_ASYNC";
14
+ case OP_STATX: return "OP_STATX";
15
+ case OP_ACCEPT: return "OP_ACCEPT";
16
+ case OP_RECV: return "OP_RECV";
17
+ case OP_SEND: return "OP_SEND";
18
+ case OP_SEND_BUNDLE: return "OP_SEND_BUNDLE";
19
+ case OP_SOCKET: return "OP_SOCKET";
20
+ case OP_CONNECT: return "OP_CONNECT";
21
+ case OP_BIND: return "OP_BIND";
22
+ case OP_LISTEN: return "OP_LISTEN";
23
+ case OP_GETSOCKOPT: return "OP_GETSOCKOPT";
24
+ case OP_SETSOCKOPT: return "OP_SETSOCKOPT";
25
+ case OP_SHUTDOWN: return "OP_SHUTDOWN";
26
+ case OP_SHUTDOWN_ASYNC: return "OP_SHUTDOWN_ASYNC";
27
+ case OP_POLL: return "OP_POLL";
28
+ case OP_WAITID: return "OP_WAITID";
29
+ case OP_FUTEX_WAIT: return "OP_FUTEX_WAIT";
30
+ case OP_FUTEX_WAKE: return "OP_FUTEX_WAKE";
31
+ case OP_ACCEPT_MULTISHOT: return "OP_ACCEPT_MULTISHOT";
32
+ case OP_READ_MULTISHOT: return "OP_READ_MULTISHOT";
33
+ case OP_RECV_MULTISHOT: return "OP_RECV_MULTISHOT";
34
+ case OP_TIMEOUT_MULTISHOT: return "OP_TIMEOUT_MULTISHOT";
35
+ case OP_SLEEP_MULTISHOT: return "OP_SLEEP_MULTISHOT";
36
+ default: return "UNKNOWN_OP_KIND";
37
+ }
38
+ }
39
+
3
40
  inline void um_op_clear(struct um *machine, struct um_op *op) {
4
41
  memset(op, 0, sizeof(struct um_op));
5
42
  op->fiber = Qnil;
@@ -26,7 +26,7 @@ static const rb_data_type_t Queue_type = {
26
26
  .dsize = NULL,
27
27
  .dcompact = Queue_compact
28
28
  },
29
- .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
30
30
  };
31
31
 
32
32
  static VALUE Queue_allocate(VALUE klass) {
data/ext/um/um_stream.c CHANGED
@@ -211,7 +211,7 @@ static inline VALUE resp_decode_simple_error(char *ptr, ulong len) {
211
211
  if (!ID_new) ID_new = rb_intern("new");
212
212
 
213
213
  VALUE msg = rb_str_new(ptr + 1, len - 1);
214
- VALUE err = rb_funcall(rb_eRuntimeError, ID_new, 1, msg);
214
+ VALUE err = rb_funcall(eStreamRESPError, ID_new, 1, msg);
215
215
  RB_GC_GUARD(msg);
216
216
  return err;
217
217
  }
@@ -221,7 +221,7 @@ static inline VALUE resp_decode_error(struct um_stream *stream, VALUE out_buffer
221
221
  if (!ID_new) ID_new = rb_intern("new");
222
222
 
223
223
  VALUE msg = resp_decode_string(stream, out_buffer, len);
224
- VALUE err = rb_funcall(rb_eRuntimeError, ID_new, 1, msg);
224
+ VALUE err = rb_funcall(eStreamRESPError, ID_new, 1, msg);
225
225
  RB_GC_GUARD(msg);
226
226
  return err;
227
227
  }
@@ -264,7 +264,7 @@ VALUE resp_decode(struct um_stream *stream, VALUE out_buffer) {
264
264
  case ':': // integer
265
265
  return resp_decode_integer(ptr);
266
266
  case '(': // big integer
267
- rb_raise(rb_eRuntimeError, "Big integers are not supported");
267
+ um_raise_internal_error("Big integers are not supported");
268
268
  case ',': // float
269
269
  return resp_decode_float(ptr);
270
270
 
@@ -274,7 +274,7 @@ VALUE resp_decode(struct um_stream *stream, VALUE out_buffer) {
274
274
  data_len = resp_parse_length_field(ptr, len);
275
275
  return resp_decode_error(stream, out_buffer, data_len);
276
276
  default:
277
- rb_raise(rb_eRuntimeError, "Invalid character encountered");
277
+ um_raise_internal_error("Invalid character encountered");
278
278
  }
279
279
 
280
280
  RB_GC_GUARD(msg);
@@ -390,6 +390,6 @@ void resp_encode(struct um_write_buffer *buf, VALUE obj) {
390
390
  return;
391
391
  }
392
392
  default:
393
- rb_raise(rb_eRuntimeError, "Can't encode object");
393
+ um_raise_internal_error("Can't encode object");
394
394
  }
395
395
  }
@@ -1,6 +1,7 @@
1
1
  #include "um.h"
2
2
 
3
3
  VALUE cStream;
4
+ VALUE eStreamRESPError;
4
5
 
5
6
  static void Stream_mark(void *ptr) {
6
7
  struct um_stream *stream = ptr;
@@ -94,4 +95,6 @@ void Init_Stream(void) {
94
95
 
95
96
  rb_define_method(cStream, "resp_decode", Stream_resp_decode, 0);
96
97
  rb_define_singleton_method(cStream, "resp_encode", Stream_resp_encode, 2);
98
+
99
+ eStreamRESPError = rb_define_class_under(cStream, "RESPError", rb_eStandardError);
97
100
  }
data/ext/um/um_sync.c CHANGED
@@ -4,13 +4,13 @@
4
4
 
5
5
  #define FUTEX2_SIZE_U32 0x02
6
6
 
7
- void um_futex_wait(struct um *machine, uint32_t *futex, uint32_t expect) {
7
+ // The value argument is the current (known) futex value.
8
+ void um_futex_wait(struct um *machine, uint32_t *futex, uint32_t value) {
8
9
  struct um_op op;
9
10
  um_prep_op(machine, &op, OP_FUTEX_WAIT, 0);
10
11
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
11
12
  io_uring_prep_futex_wait(
12
- sqe, (uint32_t *)futex, expect, FUTEX_BITSET_MATCH_ANY,
13
- FUTEX2_SIZE_U32, 0
13
+ sqe, (uint32_t *)futex, value, FUTEX_BITSET_MATCH_ANY, FUTEX2_SIZE_U32, 0
14
14
  );
15
15
 
16
16
  VALUE ret = um_fiber_switch(machine);
@@ -29,10 +29,8 @@ void um_futex_wake(struct um *machine, uint32_t *futex, uint32_t num_waiters) {
29
29
  struct um_op op;
30
30
  um_prep_op(machine, &op, OP_FUTEX_WAKE, 0);
31
31
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
32
- // submit futex_wait
33
32
  io_uring_prep_futex_wake(
34
- sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY,
35
- FUTEX2_SIZE_U32, 0
33
+ sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY, FUTEX2_SIZE_U32, 0
36
34
  );
37
35
 
38
36
  VALUE ret = um_fiber_switch(machine);
@@ -45,55 +43,55 @@ void um_futex_wake(struct um *machine, uint32_t *futex, uint32_t num_waiters) {
45
43
  void um_futex_wake_transient(struct um *machine, uint32_t *futex, uint32_t num_waiters) {
46
44
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
47
45
  io_uring_prep_futex_wake(
48
- sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY,
49
- FUTEX2_SIZE_U32, 0
46
+ sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY, FUTEX2_SIZE_U32, 0
50
47
  );
48
+ um_submit(machine);
51
49
  }
52
50
 
53
-
54
51
  #define MUTEX_LOCKED 1
55
52
  #define MUTEX_UNLOCKED 0
56
53
 
57
54
  void um_mutex_init(struct um_mutex *mutex) {
58
55
  mutex->state = MUTEX_UNLOCKED;
56
+ mutex->num_waiters = 0;
59
57
  }
60
58
 
61
- inline void um_mutex_lock(struct um *machine, uint32_t *state) {
62
- while (*state == MUTEX_LOCKED) {
63
- um_futex_wait(machine, state, MUTEX_LOCKED);
59
+ inline void um_mutex_lock(struct um *machine, struct um_mutex *mutex) {
60
+ mutex->num_waiters++;
61
+ while (mutex->state == MUTEX_LOCKED) {
62
+ um_futex_wait(machine, &mutex->state, MUTEX_LOCKED);
64
63
  }
65
- *state = MUTEX_LOCKED;
64
+ mutex->num_waiters--;
65
+ mutex->state = MUTEX_LOCKED;
66
66
  }
67
67
 
68
- inline void um_mutex_unlock(struct um *machine, uint32_t *state) {
69
- *state = MUTEX_UNLOCKED;
70
- // Wake up 1 waiting fiber
71
- um_futex_wake(machine, state, 1);
68
+ inline void um_mutex_unlock(struct um *machine, struct um_mutex *mutex) {
69
+ mutex->state = MUTEX_UNLOCKED;
70
+
71
+ if (mutex->num_waiters)
72
+ // Wake up 1 waiting fiber
73
+ um_futex_wake(machine, &mutex->state, 1);
72
74
  }
73
75
 
74
76
  struct sync_ctx {
75
77
  struct um *machine;
76
- VALUE mutex;
77
- uint32_t *state;
78
+ struct um_mutex *mutex;
78
79
  };
79
80
 
80
81
  VALUE synchronize_start(VALUE arg) {
81
82
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
82
- um_mutex_lock(ctx->machine, ctx->state);
83
+ um_mutex_lock(ctx->machine, ctx->mutex);
83
84
  return rb_yield(Qnil);
84
85
  }
85
86
 
86
87
  VALUE synchronize_complete(VALUE arg) {
87
88
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
88
- // Mutex is an embedded data class, so it might have moved while the operation
89
- // was ongoing. We need to update the pointer to the embedded state variable.
90
- ctx->state = &Mutex_data(ctx->mutex)->state;
91
- um_mutex_unlock(ctx->machine, ctx->state);
89
+ um_mutex_unlock(ctx->machine, ctx->mutex);
92
90
  return Qnil;
93
91
  }
94
92
 
95
- inline VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state) {
96
- struct sync_ctx ctx = { .machine = machine, .mutex = mutex, .state = state };
93
+ inline VALUE um_mutex_synchronize(struct um *machine, struct um_mutex *mutex) {
94
+ struct sync_ctx ctx = { .machine = machine, .mutex = mutex };
97
95
  return rb_ensure(synchronize_start, (VALUE)&ctx, synchronize_complete, (VALUE)&ctx);
98
96
  }
99
97
 
@@ -209,7 +207,6 @@ static inline VALUE um_queue_add(struct um *machine, struct um_queue *queue, VAL
209
207
  else queue_add_tail(queue, value);
210
208
 
211
209
  queue->count++;
212
-
213
210
  queue->state = QUEUE_READY;
214
211
  if (queue->num_waiters)
215
212
  um_futex_wake_transient(machine, &queue->state, 1);
@@ -228,7 +225,6 @@ enum queue_op { QUEUE_POP, QUEUE_SHIFT };
228
225
 
229
226
  struct queue_wait_ctx {
230
227
  struct um *machine;
231
- VALUE queue_obj;
232
228
  struct um_queue *queue;
233
229
  enum queue_op op;
234
230
  };
@@ -241,10 +237,8 @@ VALUE um_queue_remove_start(VALUE arg) {
241
237
  um_futex_wait(ctx->machine, &ctx->queue->state, QUEUE_EMPTY);
242
238
  }
243
239
 
244
- if (ctx->queue->state != QUEUE_READY)
245
- rb_raise(rb_eRuntimeError, "Internal error: queue should be in ready state!");
246
- if (!ctx->queue->tail)
247
- rb_raise(rb_eRuntimeError, "Internal error: queue should be in ready state!");
240
+ assert(ctx->queue->state == QUEUE_READY);
241
+ assert(ctx->queue->tail);
248
242
 
249
243
  ctx->queue->count--;
250
244
  return (ctx->op == QUEUE_POP ? queue_remove_tail : queue_remove_head)(ctx->queue);
@@ -252,11 +246,6 @@ VALUE um_queue_remove_start(VALUE arg) {
252
246
 
253
247
  VALUE um_queue_remove_complete(VALUE arg) {
254
248
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
255
-
256
- // the um_queue struct is embedded, so it might have been moved while the op
257
- // was ongoing, so we need to get it again on op completion
258
- ctx->queue = Queue_data(ctx->queue_obj);
259
-
260
249
  ctx->queue->num_waiters--;
261
250
 
262
251
  if (ctx->queue->num_waiters && ctx->queue->tail) {
@@ -270,11 +259,11 @@ VALUE um_queue_remove_complete(VALUE arg) {
270
259
  }
271
260
 
272
261
  VALUE um_queue_pop(struct um *machine, struct um_queue *queue) {
273
- struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_POP };
262
+ struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_POP };
274
263
  return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
275
264
  }
276
265
 
277
266
  VALUE um_queue_shift(struct um *machine, struct um_queue *queue) {
278
- struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_SHIFT };
267
+ struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_SHIFT };
279
268
  return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
280
269
  }
data/ext/um/um_utils.c CHANGED
@@ -1,6 +1,7 @@
1
1
  #include "um.h"
2
2
  #include <sys/mman.h>
3
3
  #include <stdlib.h>
4
+ #include <ruby/io/buffer.h>
4
5
 
5
6
  inline struct __kernel_timespec um_double_to_timespec(double value) {
6
7
  double integral;
@@ -32,35 +33,70 @@ inline void um_raise_on_error_result(int result) {
32
33
  if (unlikely(result < 0)) rb_syserr_fail(-result, strerror(-result));
33
34
  }
34
35
 
35
- inline void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs) {
36
- unsigned current_len = RSTRING_LEN(buffer);
37
- if (ofs < 0) ofs = current_len + ofs + 1;
38
- unsigned new_len = len + (unsigned)ofs;
39
-
40
- if (current_len < new_len)
41
- rb_str_modify_expand(buffer, new_len);
36
+ inline void * um_prepare_read_buffer(VALUE buffer, ssize_t len, ssize_t ofs) {
37
+ if (TYPE(buffer) == T_STRING) {
38
+ size_t current_len = RSTRING_LEN(buffer);
39
+ if (len == -1) len = current_len;
40
+ if (ofs < 0) ofs = current_len + ofs + 1;
41
+ size_t new_len = len + (size_t)ofs;
42
+
43
+ if (current_len < new_len)
44
+ rb_str_modify_expand(buffer, new_len);
45
+ else
46
+ rb_str_modify(buffer);
47
+ return RSTRING_PTR(buffer) + ofs;
48
+ }
49
+ else if (IO_BUFFER_P(buffer)) {
50
+ void *base;
51
+ size_t size;
52
+ rb_io_buffer_get_bytes_for_writing(buffer, &base, &size); // writing *to* buffer
53
+ if (len == -1) len = size;
54
+ if (ofs < 0) ofs = size + ofs + 1;
55
+ size_t new_size = len + (size_t)ofs;
56
+
57
+ if (size < new_size) {
58
+ rb_io_buffer_resize(buffer, new_size);
59
+ rb_io_buffer_get_bytes_for_writing(buffer, &base, &size);
60
+ }
61
+ return base + ofs;
62
+ }
42
63
  else
43
- rb_str_modify(buffer);
44
- return RSTRING_PTR(buffer) + ofs;
64
+ um_raise_internal_error("Invalid buffer provided");
45
65
  }
46
66
 
47
- static inline void adjust_read_buffer_len(VALUE buffer, int result, int ofs) {
48
- rb_str_modify(buffer);
49
- unsigned len = result > 0 ? (unsigned)result : 0;
50
- unsigned current_len = RSTRING_LEN(buffer);
51
- if (ofs < 0) ofs = current_len + ofs + 1;
52
- rb_str_set_len(buffer, len + (unsigned)ofs);
67
+ static inline void adjust_read_buffer_len(VALUE buffer, int result, ssize_t ofs) {
68
+ if (TYPE(buffer) == T_STRING) {
69
+ rb_str_modify(buffer);
70
+ unsigned len = result > 0 ? (unsigned)result : 0;
71
+ unsigned current_len = RSTRING_LEN(buffer);
72
+ if (ofs < 0) ofs = current_len + ofs + 1;
73
+ rb_str_set_len(buffer, len + (unsigned)ofs);
74
+ }
75
+ else if (IO_BUFFER_P(buffer)) {
76
+ // do nothing?
77
+ }
53
78
  }
54
79
 
55
- inline void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, __s32 result, __u32 flags) {
80
+ inline void um_update_read_buffer(struct um *machine, VALUE buffer, ssize_t buffer_offset, __s32 result, __u32 flags) {
56
81
  if (!result) return;
57
82
 
58
83
  adjust_read_buffer_len(buffer, result, buffer_offset);
59
84
  }
60
85
 
86
+ inline void um_get_buffer_bytes_for_writing(VALUE buffer, const void **base, size_t *size) {
87
+ if (TYPE(buffer) == T_STRING) {
88
+ *base = RSTRING_PTR(buffer);
89
+ *size = RSTRING_LEN(buffer);
90
+ }
91
+ else if (IO_BUFFER_P(buffer))
92
+ rb_io_buffer_get_bytes_for_reading(buffer, base, size); // reading *from* buffer
93
+ else
94
+ um_raise_internal_error("Invalid buffer provided");
95
+ }
96
+
61
97
  int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count) {
62
98
  if (machine->buffer_ring_count == BUFFER_RING_MAX_COUNT)
63
- rb_raise(rb_eRuntimeError, "Cannot setup more than BUFFER_RING_MAX_COUNT buffer rings");
99
+ um_raise_internal_error("Cannot setup more than BUFFER_RING_MAX_COUNT buffer rings");
64
100
 
65
101
  struct buf_ring_descriptor *desc = machine->buffer_rings + machine->buffer_ring_count;
66
102
  desc->buf_count = count;
@@ -72,7 +108,7 @@ int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count) {
72
108
  NULL, desc->br_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0
73
109
  );
74
110
  if (mapped == MAP_FAILED)
75
- rb_raise(rb_eRuntimeError, "Failed to allocate buffer ring");
111
+ um_raise_internal_error("Failed to allocate buffer ring");
76
112
 
77
113
  desc->br = (struct io_uring_buf_ring *)mapped;
78
114
  io_uring_buf_ring_init(desc->br);
@@ -88,7 +124,7 @@ int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count) {
88
124
  if (size > 0) {
89
125
  if (posix_memalign(&desc->buf_base, 4096, desc->buf_count * desc->buf_size)) {
90
126
  io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, bg_id);
91
- rb_raise(rb_eRuntimeError, "Failed to allocate buffers");
127
+ um_raise_internal_error("Failed to allocate buffers");
92
128
  }
93
129
 
94
130
  void *ptr = desc->buf_base;
@@ -143,3 +179,7 @@ inline void um_add_strings_to_buffer_ring(struct um *machine, int bgid, VALUE st
143
179
  RB_GC_GUARD(converted);
144
180
  io_uring_buf_ring_advance(desc->br, count);
145
181
  }
182
+
183
+ inline void um_raise_internal_error(const char *msg) {
184
+ rb_raise(eUMError, "UringMachine error: %s", msg);
185
+ }