uringmachine 0.22.1 → 0.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/ext/um/um.c CHANGED
@@ -3,6 +3,7 @@
3
3
  #include <ruby/thread.h>
4
4
  #include <assert.h>
5
5
  #include <poll.h>
6
+ #include <liburing/io_uring.h>
6
7
 
7
8
  #define DEFAULT_SIZE 4096
8
9
 
@@ -21,7 +22,7 @@ void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_mse
21
22
  memset(machine, 0, sizeof(struct um));
22
23
 
23
24
  RB_OBJ_WRITE(self, &machine->self, self);
24
- RB_OBJ_WRITE(self, &machine->pending_fibers, rb_hash_new());
25
+ RB_OBJ_WRITE(self, &machine->pending_fibers, rb_set_new());
25
26
 
26
27
  machine->size = (size > 0) ? size : DEFAULT_SIZE;
27
28
  machine->sqpoll_mode = !!sqpoll_timeout_msec;
@@ -369,9 +370,9 @@ inline VALUE um_switch(struct um *machine) {
369
370
 
370
371
  inline VALUE um_yield(struct um *machine) {
371
372
  VALUE fiber = rb_fiber_current();
372
- rb_hash_aset(machine->pending_fibers, fiber, Qtrue);
373
+ rb_set_add(machine->pending_fibers, fiber);
373
374
  VALUE ret = um_switch(machine);
374
- rb_hash_delete(machine->pending_fibers, fiber);
375
+ rb_set_delete(machine->pending_fibers, fiber);
375
376
  return ret;
376
377
  }
377
378
 
@@ -384,11 +385,11 @@ inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
384
385
  um_cancel_op(machine, op);
385
386
 
386
387
  VALUE fiber = rb_fiber_current();
387
- rb_hash_aset(machine->pending_fibers, fiber, Qtrue);
388
+ rb_set_add(machine->pending_fibers, fiber);
388
389
  while (!um_op_completed_p(op)) {
389
390
  um_switch(machine);
390
391
  }
391
- rb_hash_delete(machine->pending_fibers, fiber);
392
+ rb_set_delete(machine->pending_fibers, fiber);
392
393
  }
393
394
 
394
395
  inline int um_check_completion(struct um *machine, struct um_op *op) {
@@ -436,6 +437,7 @@ struct op_ctx {
436
437
  int fd;
437
438
  int bgid;
438
439
 
440
+ struct um_queue *queue;
439
441
  void *read_buf;
440
442
  int read_maxlen;
441
443
  struct __kernel_timespec ts;
@@ -561,6 +563,56 @@ VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_
561
563
  return ret;
562
564
  }
563
565
 
566
+ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
567
+ __u64 file_offset = -1;
568
+ if (TYPE(argv[argc - 1]) == T_FIXNUM) {
569
+ file_offset = NUM2UINT(argv[argc - 1]);
570
+ argc--;
571
+ }
572
+
573
+ size_t total_len, len;
574
+ struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, &total_len);
575
+ struct iovec *iovecs_ptr = iovecs;
576
+ int iovecs_len = argc;
577
+ struct um_op op;
578
+ VALUE ret = Qnil;
579
+ int writev_res = 0;
580
+
581
+ if (unlikely(!total_len)) {
582
+ free(iovecs);
583
+ return INT2NUM(0);
584
+ }
585
+ len = total_len;
586
+ while (len) {
587
+ um_prep_op(machine, &op, OP_WRITEV, 0);
588
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
589
+ io_uring_prep_writev(sqe, fd, iovecs_ptr, iovecs_len, file_offset);
590
+
591
+ ret = um_yield(machine);
592
+
593
+ int completed = um_op_completed_p(&op);
594
+ if (unlikely(!completed)) goto cancelled;
595
+
596
+ writev_res = op.result.res;
597
+ if (unlikely(writev_res < 0)) goto done;
598
+
599
+ len -= writev_res;
600
+ if (len) {
601
+ um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
602
+ if (file_offset != (__u64)-1) file_offset += writev_res;
603
+ }
604
+ }
605
+
606
+ cancelled:
607
+ um_cancel_and_wait(machine, &op);
608
+ done:
609
+ free(iovecs);
610
+ RAISE_IF_EXCEPTION(ret);
611
+ RB_GC_GUARD(ret);
612
+ um_raise_on_error_result(writev_res);
613
+ return INT2NUM(total_len);
614
+ }
615
+
564
616
  VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
565
617
  const void *base;
566
618
  size_t size;
@@ -670,6 +722,35 @@ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
670
722
  if (um_check_completion(machine, &op))
671
723
  ret = INT2NUM(op.result.res);
672
724
 
725
+ RAISE_IF_EXCEPTION(ret);
726
+ RB_GC_GUARD(ret);
727
+ return ret;
728
+ // int ret = write(fd, base, len);
729
+ // return UINT2NUM(ret);
730
+ }
731
+
732
+ // for some reason we don't get this define from liburing/io_uring.h
733
+ #define IORING_SEND_VECTORIZED (1U << 5)
734
+
735
+ VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
736
+ struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, NULL);
737
+ struct um_op op;
738
+ um_prep_op(machine, &op, OP_SEND, 0);
739
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
740
+
741
+ io_uring_prep_send(sqe, fd, iovecs, argc, MSG_WAITALL);
742
+ sqe->ioprio |= IORING_SEND_VECTORIZED;
743
+
744
+ VALUE ret = um_yield(machine);
745
+
746
+ int completed = um_op_completed_p(&op);
747
+ if (unlikely(!completed)) um_cancel_and_wait(machine, &op);
748
+ free(iovecs);
749
+ if (likely(completed)) {
750
+ um_raise_on_error_result(op.result.res);
751
+ ret = INT2NUM(op.result.res);
752
+ }
753
+
673
754
  RAISE_IF_EXCEPTION(ret);
674
755
  RB_GC_GUARD(ret);
675
756
  return ret;
@@ -1066,7 +1147,7 @@ VALUE accept_each_start(VALUE arg) {
1066
1147
  more = (result->flags & IORING_CQE_F_MORE);
1067
1148
  if (result->res < 0) {
1068
1149
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1069
- return Qnil;
1150
+ rb_syserr_fail(-result->res, strerror(-result->res));
1070
1151
  }
1071
1152
  rb_yield(INT2NUM(result->res));
1072
1153
  result = result->next;
@@ -1081,6 +1162,40 @@ VALUE accept_each_start(VALUE arg) {
1081
1162
  return Qnil;
1082
1163
  }
1083
1164
 
1165
+ VALUE accept_into_queue_start(VALUE arg) {
1166
+ struct op_ctx *ctx = (struct op_ctx *)arg;
1167
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1168
+ io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
1169
+
1170
+ while (true) {
1171
+ VALUE ret = um_yield(ctx->machine);
1172
+ if (!um_op_completed_p(ctx->op)) {
1173
+ RAISE_IF_EXCEPTION(ret);
1174
+ return ret;
1175
+ }
1176
+ RB_GC_GUARD(ret);
1177
+
1178
+ int more = false;
1179
+ struct um_op_result *result = &ctx->op->result;
1180
+ while (result) {
1181
+ more = (result->flags & IORING_CQE_F_MORE);
1182
+ if (result->res < 0) {
1183
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
1184
+ rb_syserr_fail(-result->res, strerror(-result->res));
1185
+ }
1186
+ um_queue_push(ctx->machine, ctx->queue, INT2NUM(result->res));
1187
+ result = result->next;
1188
+ }
1189
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
1190
+ if (more)
1191
+ ctx->op->flags &= ~OP_F_COMPLETED;
1192
+ else
1193
+ break;
1194
+ }
1195
+
1196
+ return Qnil;
1197
+ }
1198
+
1084
1199
  VALUE multishot_complete(VALUE arg) {
1085
1200
  struct op_ctx *ctx = (struct op_ctx *)arg;
1086
1201
  if (ctx->op->multishot_result_count) {
@@ -1095,8 +1210,6 @@ VALUE multishot_complete(VALUE arg) {
1095
1210
  if (ctx->read_buf)
1096
1211
  free(ctx->read_buf);
1097
1212
 
1098
- rb_hash_delete(ctx->machine->pending_fibers, ctx->op->fiber);
1099
-
1100
1213
  return Qnil;
1101
1214
  }
1102
1215
 
@@ -1108,6 +1221,16 @@ VALUE um_accept_each(struct um *machine, int fd) {
1108
1221
  return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1109
1222
  }
1110
1223
 
1224
+ VALUE um_accept_into_queue(struct um *machine, int fd, VALUE queue) {
1225
+ struct um_op op;
1226
+ um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT, OP_F_MULTISHOT);
1227
+
1228
+ struct op_ctx ctx = {
1229
+ .machine = machine, .op = &op, .fd = fd, .queue = Queue_data(queue), .read_buf = NULL
1230
+ };
1231
+ return rb_ensure(accept_into_queue_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1232
+ }
1233
+
1111
1234
  int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1112
1235
  struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
1113
1236
  ctx->read_maxlen = desc->buf_size;
data/ext/um/um.h CHANGED
@@ -43,6 +43,7 @@ enum um_op_kind {
43
43
  OP_OPEN,
44
44
  OP_READ,
45
45
  OP_WRITE,
46
+ OP_WRITEV,
46
47
  OP_WRITE_ASYNC,
47
48
  OP_CLOSE,
48
49
  OP_CLOSE_ASYNC,
@@ -51,6 +52,7 @@ enum um_op_kind {
51
52
  OP_ACCEPT,
52
53
  OP_RECV,
53
54
  OP_SEND,
55
+ OP_SENDV,
54
56
  OP_SEND_BUNDLE,
55
57
  OP_SOCKET,
56
58
  OP_CONNECT,
@@ -266,6 +268,9 @@ void um_update_read_buffer(struct um *machine, VALUE buffer, ssize_t buffer_offs
266
268
  int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count);
267
269
  VALUE um_get_string_from_buffer_ring(struct um *machine, int bgid, __s32 result, __u32 flags);
268
270
  void um_add_strings_to_buffer_ring(struct um *machine, int bgid, VALUE strings);
271
+ struct iovec *um_alloc_iovecs_for_writing(int argc, VALUE *argv, size_t *total_len);
272
+ void um_advance_iovecs_for_writing(struct iovec **ptr, int *len, size_t adv);
273
+
269
274
 
270
275
  struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op);
271
276
 
@@ -288,6 +293,7 @@ VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t b
288
293
  size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen);
289
294
  VALUE um_read_each(struct um *machine, int fd, int bgid);
290
295
  VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset);
296
+ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv);
291
297
  VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset);
292
298
  VALUE um_close(struct um *machine, int fd);
293
299
  VALUE um_close_async(struct um *machine, int fd);
@@ -304,9 +310,11 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
304
310
 
305
311
  VALUE um_accept(struct um *machine, int fd);
306
312
  VALUE um_accept_each(struct um *machine, int fd);
313
+ VALUE um_accept_into_queue(struct um *machine, int fd, VALUE queue);
307
314
  VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags);
308
315
  VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen);
309
316
  VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags);
317
+ VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv);
310
318
  VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings);
311
319
  VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags);
312
320
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags);
data/ext/um/um_class.c CHANGED
@@ -235,6 +235,16 @@ VALUE UM_write(int argc, VALUE *argv, VALUE self) {
235
235
  return um_write(machine, NUM2INT(fd), buffer, len_i, file_offset_i);
236
236
  }
237
237
 
238
+ VALUE UM_writev(int argc, VALUE *argv, VALUE self) {
239
+ struct um *machine = um_get_machine(self);
240
+ if (argc < 1)
241
+ rb_raise(rb_eArgError, "wrong number of arguments (given 0, expected 1+)");
242
+ int fd = NUM2INT(argv[0]);
243
+ if (argc < 2) return INT2NUM(0);
244
+
245
+ return um_writev(machine, fd, argc - 1, argv + 1);
246
+ }
247
+
238
248
  VALUE UM_write_async(int argc, VALUE *argv, VALUE self) {
239
249
  struct um *machine = um_get_machine(self);
240
250
  VALUE fd;
@@ -274,6 +284,11 @@ VALUE UM_accept_each(VALUE self, VALUE fd) {
274
284
  return um_accept_each(machine, NUM2INT(fd));
275
285
  }
276
286
 
287
+ VALUE UM_accept_into_queue(VALUE self, VALUE fd, VALUE queue) {
288
+ struct um *machine = um_get_machine(self);
289
+ return um_accept_into_queue(machine, NUM2INT(fd), queue);
290
+ }
291
+
277
292
  VALUE UM_socket(VALUE self, VALUE domain, VALUE type, VALUE protocol, VALUE flags) {
278
293
  struct um *machine = um_get_machine(self);
279
294
  return um_socket(machine, NUM2INT(domain), NUM2INT(type), NUM2INT(protocol), NUM2UINT(flags));
@@ -306,6 +321,18 @@ VALUE UM_send(VALUE self, VALUE fd, VALUE buffer, VALUE len, VALUE flags) {
306
321
  return um_send(machine, NUM2INT(fd), buffer, NUM2INT(len), NUM2INT(flags));
307
322
  }
308
323
 
324
+ #ifdef HAVE_IO_URING_SEND_VECTORIZED
325
+ VALUE UM_sendv(int argc, VALUE *argv, VALUE self) {
326
+ struct um *machine = um_get_machine(self);
327
+ if (argc < 1)
328
+ rb_raise(rb_eArgError, "wrong number of arguments (given 0, expected 1+)");
329
+ int fd = NUM2INT(argv[0]);
330
+ if (argc < 2) return INT2NUM(0);
331
+
332
+ return um_sendv(machine, fd, argc - 1, argv + 1);
333
+ }
334
+ #endif
335
+
309
336
  VALUE UM_send_bundle(int argc, VALUE *argv, VALUE self) {
310
337
  struct um *machine = um_get_machine(self);
311
338
  VALUE fd;
@@ -586,6 +613,7 @@ void Init_UM(void) {
586
613
  rb_define_method(cUM, "sleep", UM_sleep, 1);
587
614
  rb_define_method(cUM, "periodically", UM_periodically, 1);
588
615
  rb_define_method(cUM, "write", UM_write, -1);
616
+ rb_define_method(cUM, "writev", UM_writev, -1);
589
617
  rb_define_method(cUM, "write_async", UM_write_async, -1);
590
618
  rb_define_method(cUM, "statx", UM_statx, 4);
591
619
 
@@ -599,6 +627,7 @@ void Init_UM(void) {
599
627
 
600
628
  rb_define_method(cUM, "accept", UM_accept, 1);
601
629
  rb_define_method(cUM, "accept_each", UM_accept_each, 1);
630
+ rb_define_method(cUM, "accept_into_queue", UM_accept_into_queue, 2);
602
631
  rb_define_method(cUM, "bind", UM_bind, 3);
603
632
  rb_define_method(cUM, "connect", UM_connect, 3);
604
633
  rb_define_method(cUM, "getsockopt", UM_getsockopt, 3);
@@ -606,6 +635,11 @@ void Init_UM(void) {
606
635
  rb_define_method(cUM, "recv", UM_recv, 4);
607
636
  rb_define_method(cUM, "recv_each", UM_recv_each, 3);
608
637
  rb_define_method(cUM, "send", UM_send, 4);
638
+
639
+ #ifdef HAVE_IO_URING_SEND_VECTORIZED
640
+ rb_define_method(cUM, "sendv", UM_sendv, -1);
641
+ #endif
642
+
609
643
  rb_define_method(cUM, "send_bundle", UM_send_bundle, -1);
610
644
  rb_define_method(cUM, "setsockopt", UM_setsockopt, 4);
611
645
  rb_define_method(cUM, "socket", UM_socket, 4);
data/ext/um/um_const.c CHANGED
@@ -425,7 +425,6 @@ void um_define_net_constants(VALUE mod) {
425
425
  DEF_CONST_INT(mod, SIGTSTP);
426
426
  DEF_CONST_INT(mod, SIGCONT);
427
427
  DEF_CONST_INT(mod, SIGCHLD);
428
- // DEF_CONST_INT(mod, SIGCLD);
429
428
  DEF_CONST_INT(mod, SIGTTIN);
430
429
  DEF_CONST_INT(mod, SIGTTOU);
431
430
  DEF_CONST_INT(mod, SIGIO);
@@ -438,5 +437,4 @@ void um_define_net_constants(VALUE mod) {
438
437
  DEF_CONST_INT(mod, SIGUSR2);
439
438
  DEF_CONST_INT(mod, SIGPWR);
440
439
  DEF_CONST_INT(mod, SIGPOLL);
441
-
442
440
  }
data/ext/um/um_op.c CHANGED
@@ -1,5 +1,8 @@
1
1
  #include "um.h"
2
2
 
3
+ #define UM_OP_ALLOC_BATCH_SIZE 256
4
+ #define UM_OP_RESULT_ALLOC_BATCH_SIZE 256
5
+
3
6
  const char * um_op_kind_name(enum um_op_kind kind) {
4
7
  switch (kind) {
5
8
  case OP_TIMEOUT: return "OP_TIMEOUT";
@@ -8,6 +11,7 @@ const char * um_op_kind_name(enum um_op_kind kind) {
8
11
  case OP_OPEN: return "OP_OPEN";
9
12
  case OP_READ: return "OP_READ";
10
13
  case OP_WRITE: return "OP_WRITE";
14
+ case OP_WRITEV: return "OP_WRITEV";
11
15
  case OP_WRITE_ASYNC: return "OP_WRITE_ASYNC";
12
16
  case OP_CLOSE: return "OP_CLOSE";
13
17
  case OP_CLOSE_ASYNC: return "OP_CLOSE_ASYNC";
@@ -15,6 +19,7 @@ const char * um_op_kind_name(enum um_op_kind kind) {
15
19
  case OP_ACCEPT: return "OP_ACCEPT";
16
20
  case OP_RECV: return "OP_RECV";
17
21
  case OP_SEND: return "OP_SEND";
22
+ case OP_SENDV: return "OP_SENDV";
18
23
  case OP_SEND_BUNDLE: return "OP_SEND_BUNDLE";
19
24
  case OP_SOCKET: return "OP_SOCKET";
20
25
  case OP_CONNECT: return "OP_CONNECT";
@@ -113,7 +118,13 @@ inline struct um_op_result *multishot_result_alloc(struct um *machine) {
113
118
  machine->result_freelist = result->next;
114
119
  return result;
115
120
  }
116
- return malloc(sizeof(struct um_op_result));
121
+
122
+ struct um_op_result *batch = malloc(sizeof(struct um_op_result) * UM_OP_RESULT_ALLOC_BATCH_SIZE);
123
+ for (int i = 1; i < (UM_OP_RESULT_ALLOC_BATCH_SIZE - 1); i++) {
124
+ batch[i].next = &batch[i + 1];
125
+ }
126
+ machine->result_freelist = batch + 1;
127
+ return batch;
117
128
  }
118
129
 
119
130
  inline void multishot_result_free(struct um *machine, struct um_op_result *result) {
@@ -159,7 +170,14 @@ inline struct um_op *um_op_alloc(struct um *machine) {
159
170
  machine->metrics.ops_free--;
160
171
  return op;
161
172
  }
162
- return malloc(sizeof(struct um_op));
173
+
174
+ struct um_op *batch = malloc(sizeof(struct um_op) * UM_OP_ALLOC_BATCH_SIZE);
175
+ for (int i = 1; i < (UM_OP_ALLOC_BATCH_SIZE - 1); i++) {
176
+ batch[i].next = &batch[i + 1];
177
+ }
178
+ machine->op_freelist = batch + 1;
179
+ machine->metrics.ops_free += (UM_OP_ALLOC_BATCH_SIZE - 1);
180
+ return batch;
163
181
  }
164
182
 
165
183
  inline void um_op_free(struct um *machine, struct um_op *op) {
data/ext/um/um_utils.c CHANGED
@@ -199,3 +199,30 @@ inline void um_add_strings_to_buffer_ring(struct um *machine, int bgid, VALUE st
199
199
  inline void um_raise_internal_error(const char *msg) {
200
200
  rb_raise(eUMError, "UringMachine error: %s", msg);
201
201
  }
202
+
203
+ inline struct iovec *um_alloc_iovecs_for_writing(int argc, VALUE *argv, size_t *total_len) {
204
+ struct iovec *iovecs = malloc(sizeof(struct iovec) * argc);
205
+ size_t len = 0;
206
+
207
+ for (int i = 0; i < argc; i++) {
208
+ um_get_buffer_bytes_for_writing(argv[i], (const void **)&iovecs[i].iov_base, &iovecs[i].iov_len);
209
+ len += iovecs[i].iov_len;
210
+ }
211
+ if (total_len) *total_len = len;
212
+ return iovecs;
213
+ }
214
+
215
+ inline void um_advance_iovecs_for_writing(struct iovec **ptr, int *len, size_t adv) {
216
+ while (adv) {
217
+ if (adv < (*ptr)->iov_len) {
218
+ (*ptr)->iov_base += adv;
219
+ (*ptr)->iov_len -= adv;
220
+ return;
221
+ }
222
+ else {
223
+ adv -= (*ptr)->iov_len;
224
+ (*ptr)++;
225
+ (*len)--;
226
+ }
227
+ }
228
+ }
@@ -55,7 +55,7 @@ Pidfds eliminate race conditions, improve cross-thread safety, and make process
55
55
  management reliably asynchronous. This enables safer job-runners, supervisors,
56
56
  and async orchestration patterns in Ruby.
57
57
 
58
- 4. Proper fork support for Fiber Scheduler (Fiber::Scheduler#process_fork)
58
+ 4. [v] Proper fork support for Fiber Scheduler (Fiber::Scheduler#process_fork)
59
59
 
60
60
  Summary:
61
61
 
@@ -68,7 +68,7 @@ fork + async currently work inconsistently. This project makes forking
68
68
  predictable, allowing libraries and apps to do post-fork setup (e.g., reconnect
69
69
  I/O, restart loops) correctly and safely.
70
70
 
71
- 5. Async-aware IO#close via io_uring prep_close + scheduler hook
71
+ 5. [v] Async-aware IO#close via io_uring prep_close + scheduler hook
72
72
 
73
73
  Summary:
74
74
 
data/grant-2025/tasks.md CHANGED
@@ -24,8 +24,13 @@
24
24
  - [ ] max fiber switches before submitting unsubmitted SQEs
25
25
  - [ ] measure switches since last submitting / last CQE processing
26
26
 
27
- - [ ] Add support for using IO::Buffer in association with io_uring registered
28
- buffers / buffer rings
27
+ - [ ] Better buffer management buffer rings
28
+ - [v] Add `UM#sendv` method (see below)
29
+ - [v] Benchmark `#sendv` vs `#send_bundle` (in concurrent situation)
30
+ - [ ] Benchmark `#read_each` vs `#read` (in concurrent situation)
31
+ - [ ] Support for `IO::Buffer`? How's the API gonna look like?
32
+ - [ ] Some higher-level abstraction for managing a *pool* of buffer rings
33
+
29
34
  - [ ] Add some way to measure fiber CPU time.
30
35
  https://github.com/socketry/async/issues/428
31
36
 
@@ -114,16 +119,6 @@
114
119
 
115
120
  - [v] Postgres test
116
121
 
117
- - [ ] Measure CPU (thread) time usage for above examples
118
-
119
- - run each version 1M times
120
- - measure total real time, total CPU time
121
-
122
- ```ruby
123
- real_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
124
- cpu_time = Process.clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID)
125
- ```
126
-
127
122
  - [ ] Ruby Fiber::Scheduler interface
128
123
  - [v] Make a PR for resetting the scheduler and resetting the fiber non-blocking flag.
129
124
  - [v] hook for close
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.22.1'
4
+ VERSION = '0.23.0'
5
5
  end
data/test/helper.rb CHANGED
@@ -65,10 +65,11 @@ class UMBaseTest < Minitest::Test
65
65
  end
66
66
 
67
67
  def teardown
68
- pending_fibers = @machine.pending_fibers.keys
69
- if pending_fibers.size > 0
70
- raise "leaked fibers: #{pending_fibers}"
71
- end
68
+ return if !@machine
69
+
70
+ pending_fibers = @machine.pending_fibers
71
+ raise "leaked fibers: #{pending_fibers}" if pending_fibers.size > 0
72
+
72
73
  GC.start
73
74
  end
74
75
 
@@ -772,22 +772,6 @@ class FiberSchedulerIOClassMethodsTest < UMBaseTest
772
772
  }, @scheduler.calls.map { it[:sym] }.tally)
773
773
  end
774
774
 
775
- def test_IO_s_foreach
776
- buf = []
777
- Fiber.schedule do
778
- IO.foreach(@fn) { buf << it }
779
- end
780
- @scheduler.join
781
- assert_equal ['==='], buf
782
- assert_equal({
783
- fiber: 1,
784
- io_read: 3,
785
- blocking_operation_wait: 1,
786
- io_close: 1,
787
- join: 1
788
- }, @scheduler.calls.map { it[:sym] }.tally)
789
- end
790
-
791
775
  def test_IO_s_popen
792
776
  ret = nil
793
777
  Fiber.schedule do
@@ -1374,7 +1358,7 @@ class FiberSchedulerNetHTTPTest < UMBaseTest
1374
1358
  assert_equal C, calls[:fiber]
1375
1359
  assert_equal C, calls[:io_close]
1376
1360
  assert_in_range (C * 2)..(C * 4), calls[:io_wait]
1377
- assert_in_range (C * 7)..(C * 17), calls[:blocking_operation_wait]
1361
+ assert_in_range (C * 7)..(C * 20), calls[:blocking_operation_wait]
1378
1362
  end
1379
1363
  end
1380
1364