uringmachine 0.32.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 299bd9cc7810b3d67352cf1c35e1bad8228d26dc1495d2e03e8283299347836a
4
- data.tar.gz: 1241fd5922a26d0223a6d345984e7548bb4486160a5f7f70cb25f4356cde4736
3
+ metadata.gz: 8704dd734601efec57a8bb095c43dc44f752cd01b0cbe04c550fbc321fb2fbfa
4
+ data.tar.gz: 86d5a99e31296951b2fb565bd12a60b64f6ef43ba331c16bb1307d5a27b27971
5
5
  SHA512:
6
- metadata.gz: 0075bd142ba474e475eb53fca3eb8e88bbb5fdba8214d7832b34582f3089e2720e99e069f49d7244b2c2cd9f5536ca5113589b9a5658d3f3679e3d6352043e1d
7
- data.tar.gz: fe68ab2c66601a0aa5786e86b86173b5c6b94a1f58d0a7151546c4c596b1d625912ebd32670c8230ab301886f3a6a0a35edff1cd7a472f5253cb181e0156a1f7
6
+ metadata.gz: e83aece1507a0be85ccbcc7bd229dbe2976fab489a59bb07814e147f51de5a9e526a4a4fbc08ddc5b3b01d96bfd132ad79de40acb58bcfca31eaab2b1516df01
7
+ data.tar.gz: 0d5572ccc1822cf50d9c0d3e1ad0fccb39f80640b753a9962a737a58ca6ea0024cb3342fe3c91d097f4f785186ded75f3cde587463409a2148ddb81bfef8de8f
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ # 1.0.0 2026-04-27
2
+
3
+ - Remove custom -Wxxx CFLAGS
4
+
5
+ # 0.33.0 2026-04-12
6
+
7
+ - Use buffer pool for `#read_each`, `#recv_each` methods
8
+ - Remove `#setup_buffer_group`, `#send_bundle`
9
+
1
10
  # 0.32.0 2026-04-03
2
11
 
3
12
  - Rename `UM::Connection` to `UM::IO`
data/README.md CHANGED
@@ -37,7 +37,7 @@ implementation that allows integration with the entire Ruby ecosystem.
37
37
  - Excellent performance characteristics for concurrent I/O-bound applications.
38
38
  - `Fiber::Scheduler` implementation to automatically integrate with the Ruby
39
39
  ecosystem in a transparent fashion.
40
- - [IO](#io-api) class with automatic buffer management for reading.
40
+ - [UM::IO](#io-higher-level-api) class with automatic buffer management for reading.
41
41
  - Optimized I/O for encrypted SSL connections.
42
42
 
43
43
  ## Design
@@ -286,7 +286,7 @@ fiber = Fiber.schedule do
286
286
  end
287
287
  ```
288
288
 
289
- ## IO API
289
+ ## IO Higher-level API
290
290
 
291
291
  `UringMachine::IO` is a class designed for efficiently read from and write to a
292
292
  socket or other file descriptor. The IO class is ideal for implementing
data/TODO.md CHANGED
@@ -1,28 +1,13 @@
1
- - Rename Connection to IO
2
-
3
- ```ruby
4
- io = machine.io(fd)
5
- l = io.read_line(4)
6
- io.write('foo')
7
- ```
8
-
9
- - Add `IO#fd`/`IO#target` method
10
-
11
- - Add `UM#inspect` (show size, modes)
12
- - Add `UM::IO#inspect` (show target, mode, pending bytes)
13
-
14
- ## Reimplement multishot read/recv using buffer pool
15
-
16
- - remove `#setup_buffer_ring` method
17
- - use buffer pool, just like UM::Connection
18
-
19
1
  ## immediate
20
2
 
3
+ - Add `IO#http_xxx` methods
4
+ - `#http_read_request_headers()`
5
+ - `#http_read_body(content_length)` (-1 means chunked TE)
6
+
21
7
  - Add tests for support for Set in `machine#await`
22
8
  - Add tests for support for Set, Array in `machine#join`
23
9
  - Add `UM#read_file` for reading entire file
24
10
  - Add `UM#write_file` for writing entire file
25
- - Rename stream methods: `:fd`, `:socket`, `:ssl`
26
11
 
27
12
  ## Balancing I/O with the runqueue
28
13
 
@@ -48,7 +33,7 @@
48
33
  - debounce
49
34
 
50
35
  ```ruby
51
- debouncer = machine.debounce { }
36
+ debouncer = machine.debounce { ... }
52
37
  ```
53
38
 
54
39
  - happy eyeballs algo for TCP connect
@@ -104,8 +89,6 @@
104
89
  When doing a `call`, we need to provide a mailbox for the response. can this be
105
90
  automatic?
106
91
 
107
- ##
108
-
109
92
  ## Syntax / pattern for launching/supervising multiple operations
110
93
 
111
94
  Select (see above):
@@ -125,3 +108,36 @@ machine.shift_select(*queues) #=> [result, queue]
125
108
  # ['1.1.1.1:80', '2.2.2.2:80']
126
109
  tcp_connect_he(*addrs)
127
110
  ```
111
+
112
+ ## Character scanning in UM::IO
113
+
114
+ ```c
115
+ // bitmaps for character types can be generated with a bit of Ruby:
116
+ //
117
+ // def t(r); (0..255).map { [it].pack('c') =~ r ? 1 : 0 }; end
118
+ // def tn(r); (0..255).map { [it].pack('c') =~ r ? 0 : 1 }; end
119
+ // def u64(bits); bits.reverse.join.to_i(2); end
120
+ // def p(a); a.each_slice(64).map { u64(it) }; end
121
+
122
+ // usage:
123
+ //
124
+ // p(t(/[a-zA-Z0-9]/)).map { format('%016X', it) }
125
+
126
+
127
+ // /[a-zA-Z0-9]/
128
+ uint64_t alpha_numeric[] = [
129
+ 0x000000000000FFC0,
130
+ 0x7FFFFFE07FFFFFE0,
131
+ 0x0000000000000000,
132
+ 0x0000000000000000
133
+ ];
134
+
135
+ // HTTP method: /[a-zA-Z]/ (3-12 characters)
136
+ // header-key: /[a-zA-Z\-]/ ()
137
+ // path: /^($/
138
+
139
+ // check if character is in bitmap
140
+ inline int test_char(char c, uint64 *bitmap) {
141
+ return bitmap[c / 64] & (1UL << (c % 64));
142
+ }
143
+ ```
@@ -4,7 +4,7 @@ require_relative './common'
4
4
  require 'securerandom'
5
5
 
6
6
  C = ENV['C']&.to_i || 50
7
- I = 10
7
+ I = 100
8
8
  puts "C=#{C}"
9
9
 
10
10
  class UMBenchmark
@@ -12,6 +12,7 @@ class UMBenchmark
12
12
 
13
13
  def start_redis_server
14
14
  `docker run --name #{CONTAINER_NAME} -d -p 6379:6379 redis:latest`
15
+ create_redis_conn
15
16
  end
16
17
 
17
18
  def stop_redis_server
@@ -22,7 +23,7 @@ class UMBenchmark
22
23
  Redis.new
23
24
  rescue
24
25
  if retries < 3
25
- sleep 0.5
26
+ sleep 0.2
26
27
  create_redis_conn(retries + 1)
27
28
  else
28
29
  raise
@@ -31,7 +32,7 @@ class UMBenchmark
31
32
 
32
33
  def query_redis(conn)
33
34
  conn.set('abc', 'def')
34
- p conn.get('abc')
35
+ conn.get('abc')
35
36
  end
36
37
 
37
38
  def with_container
@@ -45,25 +46,33 @@ class UMBenchmark
45
46
  stop_redis_server
46
47
  end
47
48
 
49
+ def create_redis_conn(retries = 0)
50
+ Redis.new
51
+ rescue
52
+ raise if retries >= 3
53
+
54
+ sleep 0.5
55
+ create_redis_conn(retries + 1)
56
+ end
57
+
48
58
  def benchmark
49
59
  with_container {
50
60
  Benchmark.bm { run_benchmarks(it) }
51
61
  }
52
62
  end
53
63
 
54
- # def do_threads(threads, ios)
55
- # C.times.map do
56
- # threads << Thread.new do
57
- # conn = create_redis_conn
58
- # I.times { query_redis(conn) }
59
- # ensure
60
- # conn.close
61
- # end
62
- # end
63
- # end
64
+ def do_threads(threads, ios)
65
+ C.times.map do
66
+ threads << Thread.new do
67
+ conn = create_redis_conn
68
+ I.times { query_redis(conn) }
69
+ ensure
70
+ conn.close
71
+ end
72
+ end
73
+ end
64
74
 
65
75
  def do_scheduler(scheduler, ios)
66
- return if !scheduler.is_a?(UM::FiberScheduler)
67
76
  C.times do
68
77
  Fiber.schedule do
69
78
  conn = create_redis_conn
data/benchmark/common.rb CHANGED
@@ -62,7 +62,7 @@ class UMBenchmark
62
62
  # baseline_um: [:baseline_um, "UM no concurrency"],
63
63
  # thread_pool: [:thread_pool, "ThreadPool"],
64
64
 
65
- # threads: [:threads, "Threads"],
65
+ threads: [:threads, "Threads"],
66
66
 
67
67
  # async_uring: [:scheduler, "Async uring"],
68
68
  # async_uring_x2: [:scheduler_x, "Async uring x2"],
@@ -70,10 +70,10 @@ class UMBenchmark
70
70
  # async_epoll: [:scheduler, "Async epoll"],
71
71
  # async_epoll_x2: [:scheduler_x, "Async epoll x2"],
72
72
 
73
- # um_fs: [:scheduler, "UM FS"],
74
- # um_fs_x2: [:scheduler_x, "UM FS x2"],
73
+ um_fs: [:scheduler, "UM FS"],
74
+ um_fs_x2: [:scheduler_x, "UM FS x2"],
75
75
 
76
- # um: [:um, "UM"],
76
+ um: [:um, "UM"],
77
77
  # um_sidecar: [:um, "UM sidecar"],
78
78
  # um_sqpoll: [:um, "UM sqpoll"],
79
79
  um_x2: [:um_x, "UM x2"],
data/docs/um_api.md CHANGED
@@ -43,26 +43,21 @@
43
43
  - `pop(queue)` - removes and returns a value off the end of the given queue.
44
44
  - `prep_timeout(interval)` - returns a timeout AsyncOp with the given interval.
45
45
  - `push(queue, value)` - adds the given value to the end of the given queue.
46
- - `read_each(fd, bgid) { |data| ... }` - reads repeatedly from the given fd
47
- using the given buffer group id, yielding each chunk of data to the given
48
- block.
46
+ - `read_each(fd) { |data| ... }` - reads repeatedly from the given fd, yielding
47
+ each chunk of data to the given block.
49
48
  - `read(fd, buffer[, maxlen[, buffer_offset[, file_offset]]])` - reads from the
50
49
  given fd int o the given buffer (String or IO::Buffer).
51
- - `recv_each(fd, bgid, flags)` - receives from the given fd using the given
52
- buffer group id, with the given flags.
50
+ - `recv_each(fd, flags)` - receives repeatedly from the given fd with the given
51
+ flags.
53
52
  - `recv(fd, buffer, maxlen, flags)` - receives from the given fd into the given
54
53
  buffer.
55
54
  - `schedule(fiber, value)` - adds the given fiber to the runqueue with the given
56
55
  resume value.
57
56
  - `select(rfds, wfds, efds)` - selects ready fds from the given readable,
58
57
  writable and exeptable fds.
59
- - `send_bundle(fd, bgid, *strings)` - sends a bundle of buffers to the given fd
60
- using the given buffer group id.
61
58
  - `send(fd, buffer, len, flags)` - sends to the given fd from the given buffer.
62
59
  - `sendv(fd, *buffers)` - sends multiple buffers to the given fd.
63
60
  - `setsockopt(fd, level, opt, value)` - sets a socket option.
64
- - `setup_buffer_ring(size, count)` - sets up a buffer ring and returns the
65
- buffer group id.
66
61
  - `shift(queue)` - removes and returns a value from the head of given queue.
67
62
  - `shutdown_async(fd, how)` - shuts down the given socket fd without blocking.
68
63
  - `shutdown(fd, how)` - shuts down the given socket fd.
data/ext/um/extconf.rb CHANGED
@@ -74,11 +74,4 @@ $defs << '-DHAVE_IO_URING_PREP_BIND' if config[:prep_bind]
74
74
  $defs << '-DHAVE_IO_URING_PREP_LISTEN' if config[:prep_listen]
75
75
  $defs << '-DHAVE_IO_URING_SEND_VECTORIZED' if config[:send_vectoized]
76
76
 
77
- $CFLAGS << ' -Werror -Wall -Wextra'
78
-
79
- if ENV['SANITIZE']
80
- $CFLAGS << ' -fsanitize=undefined,address -lasan'
81
- $LDFLAGS << ' -fsanitize=undefined,address -lasan'
82
- end
83
-
84
77
  create_makefile 'um_ext'
data/ext/um/um.c CHANGED
@@ -49,12 +49,6 @@ inline void um_teardown(struct um *machine) {
49
49
  if (machine->sidecar_mode) um_sidecar_teardown(machine);
50
50
  if (machine->sidecar_signal) free(machine->sidecar_signal);
51
51
 
52
- for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
53
- struct buf_ring_descriptor *desc = machine->buffer_rings + i;
54
- io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
55
- free(desc->buf_base);
56
- }
57
- machine->buffer_ring_count = 0;
58
52
  io_uring_queue_exit(&machine->ring);
59
53
  machine->ring_initialized = 0;
60
54
 
@@ -68,8 +62,7 @@ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
68
62
  machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
69
63
  );
70
64
 
71
- struct io_uring_sqe *sqe;
72
- sqe = io_uring_get_sqe(&machine->ring);
65
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&machine->ring);
73
66
  if (likely(sqe)) goto done;
74
67
 
75
68
  um_raise_internal_error("Submission queue full. Consider raising the machine size.");
@@ -288,22 +281,22 @@ void *um_wait_for_cqe_without_gvl(void *ptr) {
288
281
  return NULL;
289
282
  }
290
283
 
291
- inline void um_profile_wait_cqe_pre(struct um *machine, double *time_monotonic0, VALUE *fiber) {
292
- // *fiber = rb_fiber_current();
293
- *time_monotonic0 = um_get_time_monotonic();
294
- // double time_cpu = um_get_time_cpu();
295
- // double elapsed = time_cpu - machine->metrics.time_last_cpu;
296
- // um_update_fiber_time_run(fiber, time_monotonic0, elapsed);
297
- // machine->metrics.time_last_cpu = time_cpu;
298
- }
284
+ // inline void um_profile_wait_cqe_pre(struct um *machine, double *time_monotonic0, VALUE *fiber) {
285
+ // VALUE fiber = rb_fiber_current();
286
+ // *time_monotonic0 = um_get_time_monotonic();
287
+ // double time_cpu = um_get_time_cpu();
288
+ // double elapsed = time_cpu - machine->metrics.time_last_cpu;
289
+ // um_update_fiber_time_run(fiber, time_monotonic0, elapsed);
290
+ // machine->metrics.time_last_cpu = time_cpu;
291
+ // }
299
292
 
300
- inline void um_profile_wait_cqe_post(struct um *machine, double time_monotonic0, VALUE fiber) {
301
- // double time_cpu = um_get_time_cpu();
302
- double elapsed = um_get_time_monotonic() - time_monotonic0;
303
- // um_update_fiber_last_time(fiber, cpu_time1);
304
- machine->metrics.time_total_wait += elapsed;
305
- // machine->metrics.time_last_cpu = time_cpu;
306
- }
293
+ // inline void um_profile_wait_cqe_post(struct um *machine, double time_monotonic0, VALUE fiber) {
294
+ // // double time_cpu = um_get_time_cpu();
295
+ // double elapsed = um_get_time_monotonic() - time_monotonic0;
296
+ // // um_update_fiber_last_time(fiber, cpu_time1);
297
+ // machine->metrics.time_total_wait += elapsed;
298
+ // // machine->metrics.time_last_cpu = time_cpu;
299
+ // }
307
300
 
308
301
  inline void *um_wait_for_sidecar_signal(void *ptr) {
309
302
  struct um *machine = ptr;
@@ -335,11 +328,11 @@ static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wa
335
328
  // fprintf(stderr, "<< sidecar wait cqes\n");
336
329
  }
337
330
  else {
338
- double time_monotonic0 = 0.0;
339
- VALUE fiber;
340
- if (machine->profile_mode) um_profile_wait_cqe_pre(machine, &time_monotonic0, &fiber);
331
+ // double time_monotonic0 = 0.0;
332
+ // VALUE fiber;
333
+ // if (machine->profile_mode) um_profile_wait_cqe_pre(machine, &time_monotonic0, &fiber);
341
334
  rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
342
- if (machine->profile_mode) um_profile_wait_cqe_post(machine, time_monotonic0, fiber);
335
+ // if (machine->profile_mode) um_profile_wait_cqe_post(machine, time_monotonic0, fiber);
343
336
 
344
337
  if (unlikely(ctx.result < 0)) {
345
338
  // the internal calls to (maybe submit) and wait for cqes may fail with:
@@ -363,14 +356,14 @@ static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wa
363
356
  }
364
357
  }
365
358
 
366
- inline void um_profile_switch(struct um *machine, VALUE next_fiber) {
367
- // *current_fiber = rb_fiber_current();
368
- // double time_cpu = um_get_time_cpu();
369
- // double elapsed = time_cpu - machine->metrics.time_last_cpu;
370
- // um_update_fiber_time_run(cur_fiber, time_cpu, elapsed);
371
- // um_update_fiber_time_wait(next_fiber, time_cpu);
372
- // machine->metrics.time_last_cpu = time_cpu;
373
- }
359
+ // inline void um_profile_switch(struct um *machine, VALUE next_fiber) {
360
+ // VALUE current_fiber = rb_fiber_current();
361
+ // double time_cpu = um_get_time_cpu();
362
+ // double elapsed = time_cpu - machine->metrics.time_last_cpu;
363
+ // um_update_fiber_time_run(cur_fiber, time_cpu, elapsed);
364
+ // um_update_fiber_time_wait(next_fiber, time_cpu);
365
+ // machine->metrics.time_last_cpu = time_cpu;
366
+ // }
374
367
 
375
368
  inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
376
369
  DEBUG_PRINTF("* process_runqueue_op: op=%p kind=%s ref_count=%d flags=%x\n",
@@ -384,7 +377,7 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
384
377
  op->flags &= ~OP_F_SCHEDULED;
385
378
  um_op_release(machine, op);
386
379
 
387
- if (machine->profile_mode) um_profile_switch(machine, fiber);
380
+ // if (machine->profile_mode) um_profile_switch(machine, fiber);
388
381
  VALUE ret = rb_fiber_transfer(fiber, 1, &value);
389
382
  RB_GC_GUARD(value);
390
383
  RB_GC_GUARD(ret);
@@ -523,7 +516,6 @@ struct op_ctx {
523
516
  struct um *machine;
524
517
  struct um_op *op;
525
518
  int fd;
526
- int bgid;
527
519
 
528
520
  struct um_queue *queue;
529
521
  void *read_buf;
@@ -870,25 +862,6 @@ VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
870
862
  return ret;
871
863
  }
872
864
 
873
- VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
874
- um_add_strings_to_buffer_ring(machine, bgid, strings);
875
- struct um_op *op = um_op_acquire(machine);
876
- um_prep_op(machine, op, OP_SEND_BUNDLE, 2, 0);
877
- struct io_uring_sqe *sqe = um_get_sqe(machine, op);
878
- io_uring_prep_send_bundle(sqe, fd, 0, MSG_NOSIGNAL | MSG_WAITALL);
879
- sqe->flags |= IOSQE_BUFFER_SELECT;
880
- sqe->buf_group = bgid;
881
-
882
- VALUE ret = um_yield(machine);
883
-
884
- if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
885
- um_op_release(machine, op);
886
-
887
- RAISE_IF_EXCEPTION(ret);
888
- RB_GC_GUARD(ret);
889
- return ret;
890
- }
891
-
892
865
  VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags) {
893
866
  void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
894
867
  struct um_op *op = um_op_acquire(machine);
@@ -1461,72 +1434,44 @@ VALUE um_accept_into_queue(struct um *machine, int fd, VALUE queue) {
1461
1434
  return rb_ensure(accept_into_queue_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1462
1435
  }
1463
1436
 
1464
- int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1465
- struct buf_ring_descriptor *desc = ctx->machine->buffer_rings + ctx->bgid;
1466
- ctx->read_maxlen = desc->buf_size;
1467
- ctx->read_buf = malloc(desc->buf_size);
1468
- int total = 0;
1469
-
1470
- while (1) {
1471
- um_prep_op(ctx->machine, ctx->op, OP_READ, 2, 0);
1472
- struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1473
- io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
1474
-
1475
- VALUE ret = um_yield(ctx->machine);
1476
-
1477
- if (likely(um_verify_op_completion(ctx->machine, ctx->op, true))) {
1478
- VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
1479
- total += ctx->op->result.res;
1480
- rb_yield(buf);
1481
- RB_GC_GUARD(buf);
1482
- }
1483
- else {
1484
- RAISE_IF_EXCEPTION(ret);
1485
- return 0;
1486
- }
1487
- RB_GC_GUARD(ret);
1488
- }
1489
- return 0;
1490
- }
1491
-
1492
1437
  // // returns true if more results are expected
1493
- int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
1438
+ inline int read_recv_each_multishot_process_result(struct op_ctx *ctx, struct um_op_result *result, int *total) {
1494
1439
  if (result->res == 0)
1495
1440
  return false;
1496
1441
 
1497
1442
  *total += result->res;
1498
- VALUE buf = um_read_from_buffer_ring(ctx->machine, ctx->bgid, result->res, result->flags);
1499
- rb_yield(buf);
1500
- RB_GC_GUARD(buf);
1501
-
1502
- // TTY devices might not support multishot reads:
1503
- // https://github.com/axboe/liburing/issues/1185. We detect this by checking
1504
- // if the F_MORE flag is absent, then switch to single shot mode.
1505
- if (unlikely(!(result->flags & IORING_CQE_F_MORE))) {
1506
- *total += um_read_each_singleshot_loop(ctx);
1507
- return false;
1443
+ if (likely(result->segment)) {
1444
+ VALUE buf = rb_str_new(result->segment->ptr, result->segment->len);
1445
+ um_segment_checkin(ctx->machine, result->segment);
1446
+ result->segment = NULL;
1447
+ rb_yield(buf);
1448
+ RB_GC_GUARD(buf);
1508
1449
  }
1509
1450
 
1510
1451
  return true;
1511
1452
  }
1512
1453
 
1513
- void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
1454
+ static inline void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
1455
+ bp_ensure_commit_level(ctx->machine);
1456
+ ctx->op->bp_commit_level = ctx->machine->bp_commit_level;
1457
+
1514
1458
  switch (ctx->op->kind) {
1515
1459
  case OP_READ_MULTISHOT:
1516
- io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, ctx->bgid);
1460
+ io_uring_prep_read_multishot(sqe, ctx->fd, 0, -1, BP_BGID);
1517
1461
  return;
1518
1462
  case OP_RECV_MULTISHOT:
1519
1463
  io_uring_prep_recv_multishot(sqe, ctx->fd, NULL, 0, 0);
1520
- sqe->buf_group = ctx->bgid;
1464
+ sqe->buf_group = BP_BGID;
1521
1465
  sqe->flags |= IOSQE_BUFFER_SELECT;
1522
1466
  return;
1523
1467
  default:
1524
- return;
1468
+ um_raise_internal_error("Invalid multishot op");
1525
1469
  }
1526
1470
  }
1527
1471
 
1528
1472
  VALUE read_recv_each_start(VALUE arg) {
1529
1473
  struct op_ctx *ctx = (struct op_ctx *)arg;
1474
+
1530
1475
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1531
1476
  read_recv_each_prep(sqe, ctx);
1532
1477
  int total = 0;
@@ -1558,19 +1503,19 @@ VALUE read_recv_each_start(VALUE arg) {
1558
1503
  return Qnil;
1559
1504
  }
1560
1505
 
1561
- VALUE um_read_each(struct um *machine, int fd, int bgid) {
1506
+ VALUE um_read_each(struct um *machine, int fd) {
1562
1507
  struct um_op *op = um_op_acquire(machine);
1563
- um_prep_op(machine, op, OP_READ_MULTISHOT, 2, OP_F_MULTISHOT);
1508
+ um_prep_op(machine, op, OP_READ_MULTISHOT, 2, OP_F_MULTISHOT | OP_F_BUFFER_POOL);
1564
1509
 
1565
- struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL };
1510
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .read_buf = NULL };
1566
1511
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1567
1512
  }
1568
1513
 
1569
- VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
1514
+ VALUE um_recv_each(struct um *machine, int fd, int flags) {
1570
1515
  struct um_op *op = um_op_acquire(machine);
1571
- um_prep_op(machine, op, OP_RECV_MULTISHOT, 2, OP_F_MULTISHOT);
1516
+ um_prep_op(machine, op, OP_RECV_MULTISHOT, 2, OP_F_MULTISHOT | OP_F_BUFFER_POOL);
1572
1517
 
1573
- struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
1518
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .read_buf = NULL, .flags = flags };
1574
1519
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1575
1520
  }
1576
1521
 
@@ -1623,7 +1568,6 @@ extern VALUE SYM_ops_free;
1623
1568
  extern VALUE SYM_ops_transient;
1624
1569
  extern VALUE SYM_time_total_cpu;
1625
1570
  extern VALUE SYM_time_total_wait;
1626
- extern VALUE SYM_buffer_groups;
1627
1571
  extern VALUE SYM_buffers_allocated;
1628
1572
  extern VALUE SYM_buffers_free;
1629
1573
  extern VALUE SYM_segments_free;
data/ext/um/um.h CHANGED
@@ -56,7 +56,6 @@ enum um_op_kind {
56
56
  OP_RECV,
57
57
  OP_RECVMSG,
58
58
  OP_SEND,
59
- OP_SEND_BUNDLE,
60
59
  OP_SENDMSG,
61
60
  OP_SENDV,
62
61
  OP_SOCKET,
@@ -168,15 +167,6 @@ struct um_op {
168
167
  };
169
168
  };
170
169
 
171
- struct buf_ring_descriptor {
172
- struct io_uring_buf_ring *br;
173
- size_t br_size;
174
- unsigned buf_count;
175
- unsigned buf_size;
176
- unsigned buf_mask;
177
- void *buf_base;
178
- };
179
-
180
170
  struct um_metrics {
181
171
  ulong total_ops; // total ops submitted
182
172
  ulong total_switches; // total fiber switches
@@ -199,8 +189,6 @@ struct um_metrics {
199
189
  double time_first_cpu; // last seen time stamp
200
190
  };
201
191
 
202
- #define BUFFER_RING_MAX_COUNT 10
203
-
204
192
  struct um {
205
193
  VALUE self;
206
194
 
@@ -216,13 +204,9 @@ struct um {
216
204
  pthread_t sidecar_thread;
217
205
  uint32_t *sidecar_signal;
218
206
 
219
- uint buffer_ring_count; // number of registered buffer rings
220
-
221
207
  uint size; // size of SQ
222
208
  uint sqpoll_mode; // SQPOLL mode enabled
223
209
 
224
- struct buf_ring_descriptor buffer_rings[BUFFER_RING_MAX_COUNT];
225
-
226
210
  struct um_op *transient_head; // list of pending transient ops
227
211
  VALUE pending_fibers; // set containing pending fibers
228
212
 
@@ -351,9 +335,7 @@ void um_raise_on_error_result(int result);
351
335
  int um_get_buffer_bytes_for_writing(VALUE buffer, const void **base, size_t *size, int raise_on_bad_buffer);
352
336
  void * um_prepare_read_buffer(VALUE buffer, ssize_t len, ssize_t ofs);
353
337
  void um_update_read_buffer(VALUE buffer, ssize_t buffer_offset, __s32 result);
354
- int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count);
355
- VALUE um_read_from_buffer_ring(struct um *machine, int bgid, __s32 result, __u32 flags);
356
- void um_add_strings_to_buffer_ring(struct um *machine, int bgid, VALUE strings);
338
+
357
339
  struct iovec *um_alloc_iovecs_for_writing(int argc, VALUE *argv, size_t *total_len);
358
340
  void um_advance_iovecs_for_writing(struct iovec **ptr, int *len, size_t adv);
359
341
 
@@ -376,7 +358,7 @@ VALUE um_sleep(struct um *machine, double duration);
376
358
  VALUE um_periodically(struct um *machine, double interval);
377
359
  VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset, __u64 file_offset);
378
360
  size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen);
379
- VALUE um_read_each(struct um *machine, int fd, int bgid);
361
+ VALUE um_read_each(struct um *machine, int fd);
380
362
  VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset);
381
363
  size_t um_write_raw(struct um *machine, int fd, const char *buffer, size_t len);
382
364
  VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv);
@@ -405,9 +387,8 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
405
387
  VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags);
406
388
  size_t um_send_raw(struct um *machine, int fd, const char *buffer, size_t len, int flags);
407
389
  VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv);
408
- VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings);
409
390
  VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags);
410
- VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags);
391
+ VALUE um_recv_each(struct um *machine, int fd, int flags);
411
392
  VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen);
412
393
  VALUE um_listen(struct um *machine, int fd, int backlog);
413
394
  VALUE um_getsockopt(struct um *machine, int fd, int level, int opt);