uringmachine 0.19 → 0.19.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2e4b4b986a75268a9de9dbafacd7d2a5c31cc08058b86a9681757be698fe697e
4
- data.tar.gz: 99007eb49069d9cfebc70ce6da9c45cbbd5e7c4d518892869ddcb55c401b1cdb
3
+ metadata.gz: ff89f34d541e086f8016156d4b5290a6a2fdc94292f04e7a3104ef9e7cda256d
4
+ data.tar.gz: 01dbe57b83effbab744fbd07461dcd803246a88bbf3ce1aeaf1e4eace32fd0ad
5
5
  SHA512:
6
- metadata.gz: f7c55065ac4aef687f91236bf5cb55be4756dee002e61e627775a93d74d069a8ca45372c43cc71d31efce4b33cfb623443a7ef53b929563b48ec8a0e59cbdf92
7
- data.tar.gz: 066a85ff0f7218e410abdc89e63cc20ae1ff858ecbc857a84f83ab812286fb1f6dd099d44e254452f466d66569046de3cffe0ba9d7d4f52bb1717670a34c5398
6
+ metadata.gz: ca0e44f1121384634b4234b2cd49643f87e8255e77eb250f64fb62be8bcdcb18e6a82480e5da003322c941c9ae2def119a048797d53bc064c4e1f77796b82c4d
7
+ data.tar.gz: 322090fd12498525d303b63f66ec1c559ed9933e89e74d3b01c9cc5c0f3f1dcee951c767625690c288478d5e460248418bbd693fbd7c31146c7aff76cf545a5d
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ # 0.19.1 2025-01-03
2
+
3
+ - Add `RB_GC_GUARD` in `process_runqueue_op`
4
+
1
5
  # 0.19 2025-10-27
2
6
 
3
7
  - Fix usage of `RAISE_IF_EXCEPTION` after `RB_GC_GUARD`
data/TODO.md CHANGED
@@ -1,3 +1,44 @@
1
+ ## immediate
2
+
3
+ - make a reproducer for segfault on timeout, spin lots of fibers where a timeout
4
+ wraps a #shift call (from an empty queue).
5
+ - see also: https://mensfeld.pl/2025/11/ruby-ffi-gc-bug-hash-becomes-string/
6
+
7
+ Analysis:
8
+
9
+ - The segfault is related to timeouts
10
+ - Looking at process_runqueue_op (um.c):
11
+
12
+ ```c
13
+ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
14
+ VALUE fiber = op->fiber;
15
+ VALUE value = op->value;
16
+
17
+ // on timeout, the op flags are changed to turn on OP_F_TRANSIENT
18
+ if (unlikely(op->flags & OP_F_TRANSIENT))
19
+ // here the op is freed, so the value is not visible to the GC anymoore
20
+ um_op_free(machine, op);
21
+
22
+ // if a GC occurs here, we risk a segfault
23
+
24
+ // value is used
25
+ return rb_fiber_transfer(fiber, 1, &value);
26
+ }
27
+ ```
28
+
29
+ - So, a possible solution is to put a `RB_GC_GUARD` after the `return`.
30
+ - But first, I want to be able to reproduce it. We can start by setting
31
+ `GC.stress = true` on tests and see if we segfault.
32
+
33
+ ## FiberScheduler implementation
34
+
35
+ Some resources:
36
+
37
+ - https://github.com/socketry/async/blob/main/context/getting-started.md
38
+ - https://github.com/socketry/async/blob/main/context/scheduler.md
39
+ - https://github.com/socketry/async/blob/main/lib/async/scheduler.rb#L28
40
+ -
41
+
1
42
  ## useful concurrency tools
2
43
 
3
44
  - debounce
data/ext/um/um.c CHANGED
@@ -194,7 +194,10 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
194
194
  if (unlikely(op->flags & OP_F_TRANSIENT))
195
195
  um_op_free(machine, op);
196
196
 
197
- return rb_fiber_transfer(fiber, 1, &value);
197
+ VALUE ret = rb_fiber_transfer(fiber, 1, &value);
198
+ RB_GC_GUARD(value);
199
+ RB_GC_GUARD(ret);
200
+ return ret;
198
201
  }
199
202
 
200
203
  inline VALUE um_fiber_switch(struct um *machine) {
@@ -266,6 +269,7 @@ inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind,
266
269
  VALUE fiber = (flags & OP_F_FREE_ON_COMPLETE) ? Qnil : rb_fiber_current();
267
270
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
268
271
  RB_OBJ_WRITE(machine->self, &op->value, Qnil);
272
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
269
273
  }
270
274
 
271
275
  inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
@@ -275,6 +279,7 @@ inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
275
279
  op->flags = OP_F_TRANSIENT;
276
280
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
277
281
  RB_OBJ_WRITE(machine->self, &op->value, value);
282
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
278
283
  um_runqueue_push(machine, op);
279
284
  }
280
285
 
@@ -311,6 +316,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
311
316
  op->ts = um_double_to_timespec(NUM2DBL(interval));
312
317
  RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
313
318
  RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
319
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
314
320
 
315
321
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
316
322
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
@@ -407,7 +413,9 @@ VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
407
413
  VALUE um_write_async(struct um *machine, int fd, VALUE str) {
408
414
  struct um_op *op = um_op_alloc(machine);
409
415
  um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
416
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
410
417
  RB_OBJ_WRITE(machine->self, &op->value, str);
418
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
411
419
 
412
420
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
413
421
  io_uring_prep_write(sqe, fd, RSTRING_PTR(str), RSTRING_LEN(str), -1);
@@ -434,6 +442,9 @@ VALUE um_close(struct um *machine, int fd) {
434
442
  VALUE um_close_async(struct um *machine, int fd) {
435
443
  struct um_op *op = um_op_alloc(machine);
436
444
  um_prep_op(machine, op, OP_CLOSE_ASYNC, OP_F_FREE_ON_COMPLETE);
445
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
446
+ RB_OBJ_WRITE(machine->self, &op->value, Qnil);
447
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
437
448
 
438
449
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
439
450
  io_uring_prep_close(sqe, fd);
@@ -643,6 +654,10 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
643
654
  VALUE um_shutdown_async(struct um *machine, int fd, int how) {
644
655
  struct um_op *op = um_op_alloc(machine);
645
656
  um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, OP_F_FREE_ON_COMPLETE);
657
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
658
+ RB_OBJ_WRITE(machine->self, &op->value, Qnil);
659
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
660
+
646
661
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
647
662
  io_uring_prep_shutdown(sqe, fd, how);
648
663
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.19'
4
+ VERSION = '0.19.1'
5
5
  end
data/test/test_um.rb CHANGED
@@ -142,6 +142,31 @@ class ScheduleTest < UMBaseTest
142
142
  assert_kind_of TOError, e
143
143
  end
144
144
 
145
+ def test_timeout_stress
146
+ skip
147
+ # GC.stress = true
148
+ c = 0
149
+ fs = 100.times.map {
150
+ machine.spin {
151
+ q = UM::Queue.new
152
+ 1000.times {
153
+ machine.sleep rand(0.001..0.005)
154
+ begin
155
+ machine.timeout(rand(0.001..0.06), TOError) do
156
+ machine.shift(q)
157
+ end
158
+ rescue => _e
159
+ c += 1
160
+ STDOUT << '*' if c % 1000 == 0
161
+ end
162
+ }
163
+ }
164
+ }
165
+ machine.join(*fs)
166
+ ensure
167
+ GC.stress = false
168
+ end
169
+
145
170
  def test_timeout_with_raising_block
146
171
  e = nil
147
172
  begin
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: uringmachine
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.19'
4
+ version: 0.19.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner