uringmachine 0.12 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1f8224da7817436bd74a52e650362c9b77d5dbc8877c0fa1cefd1c52e57ceb03
4
- data.tar.gz: c7bb88c42bf59a9d1f105e2356c4f71f0fec49c7cbbe662744465ee3b34d5141
3
+ metadata.gz: 540e2e4df0b1953f58e36ab4dea2024a793b1bea4fd2358f64733f003b192510
4
+ data.tar.gz: 463cf782db4604358e1f54270e24e4bb97ee92176cafdb36079fd9d4a5132fda
5
5
  SHA512:
6
- metadata.gz: 39d930e9f4d8a17676c28e4bf24f38b585dbd0cd124316272778abb285391be4bb9ecf14452d19568ffa8f6a0144c6a98b2e3ee1f50d58ec7c5101333f5de5e7
7
- data.tar.gz: 95468f71d402a2a8abf675dbbae6e94b4e1919c5552f0b831b7387016d3fb24271756585cf66283130ed3440818ccc94f504d73a46d2ab8f9faa4607b0cc822b
6
+ metadata.gz: 9e54c0f81a6a6a0523a5210a00e5bd7d3a7c0598d8e15c4434080429ae10328079a06d1075fab88894eb2c6ac827e8c929b4fbc18a2bdc7cf20d7efda0d1bd81
7
+ data.tar.gz: 9de7ecd460344ec496fc061edb15a9fadd8b9a5b81db0ca8844afd6cc81bb79b7881af54a008b3c43565725f9234ea66728e69c7eaa6a09d090a0977128e0326
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ # 2025-06-07 Version 0.12.1
2
+
3
+ - Improve portability of `UM` constants
4
+
1
5
  # 2025-06-03 Version 0.12
2
6
 
3
7
  - Add buffer, maxlen params to `Stream#get_line`
data/ext/um/um.c CHANGED
@@ -285,7 +285,7 @@ struct op_ctx {
285
285
  int flags;
286
286
  };
287
287
 
288
- VALUE um_timeout_ensure(VALUE arg) {
288
+ VALUE um_timeout_complete(VALUE arg) {
289
289
  struct op_ctx *ctx = (struct op_ctx *)arg;
290
290
 
291
291
  if (!um_op_completed_p(ctx->op)) {
@@ -311,7 +311,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
311
311
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
312
312
 
313
313
  struct op_ctx ctx = { .machine = machine, .op = op };
314
- return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
314
+ return rb_ensure(rb_yield, Qnil, um_timeout_complete, (VALUE)&ctx);
315
315
  }
316
316
 
317
317
  /*******************************************************************************
@@ -653,7 +653,7 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
653
653
  multishot ops
654
654
  *******************************************************************************/
655
655
 
656
- VALUE accept_each_begin(VALUE arg) {
656
+ VALUE accept_each_start(VALUE arg) {
657
657
  struct op_ctx *ctx = (struct op_ctx *)arg;
658
658
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
659
659
  io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
@@ -684,7 +684,7 @@ VALUE accept_each_begin(VALUE arg) {
684
684
  return Qnil;
685
685
  }
686
686
 
687
- VALUE multishot_ensure(VALUE arg) {
687
+ VALUE multishot_complete(VALUE arg) {
688
688
  struct op_ctx *ctx = (struct op_ctx *)arg;
689
689
  if (ctx->op->multishot_result_count) {
690
690
  int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
@@ -706,7 +706,7 @@ VALUE um_accept_each(struct um *machine, int fd) {
706
706
  um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
707
707
 
708
708
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
709
- return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
709
+ return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
710
710
  }
711
711
 
712
712
  int um_read_each_singleshot_loop(struct op_ctx *ctx) {
@@ -771,7 +771,7 @@ void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
771
771
  }
772
772
  }
773
773
 
774
- VALUE read_recv_each_begin(VALUE arg) {
774
+ VALUE read_recv_each_start(VALUE arg) {
775
775
  struct op_ctx *ctx = (struct op_ctx *)arg;
776
776
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
777
777
  read_recv_each_prep(sqe, ctx);
@@ -809,7 +809,7 @@ VALUE um_read_each(struct um *machine, int fd, int bgid) {
809
809
  um_prep_op(machine, &op, OP_READ_MULTISHOT);
810
810
 
811
811
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
812
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
812
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
813
813
  }
814
814
 
815
815
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
@@ -817,10 +817,10 @@ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
817
817
  um_prep_op(machine, &op, OP_RECV_MULTISHOT);
818
818
 
819
819
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
820
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
820
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
821
821
  }
822
822
 
823
- VALUE periodically_begin(VALUE arg) {
823
+ VALUE periodically_start(VALUE arg) {
824
824
  struct op_ctx *ctx = (struct op_ctx *)arg;
825
825
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
826
826
  io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
@@ -857,6 +857,6 @@ VALUE um_periodically(struct um *machine, double interval) {
857
857
  op.ts = um_double_to_timespec(interval);
858
858
 
859
859
  struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
860
- return rb_ensure(periodically_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
860
+ return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
861
861
  }
862
862
 
data/ext/um/um.h CHANGED
@@ -124,7 +124,6 @@ struct um {
124
124
  };
125
125
 
126
126
  struct um_mutex {
127
- VALUE self;
128
127
  uint32_t state;
129
128
  };
130
129
 
@@ -147,15 +146,11 @@ struct um_queue {
147
146
  };
148
147
 
149
148
  struct um_async_op {
150
- VALUE self;
151
-
152
149
  struct um *machine;
153
150
  struct um_op *op;
154
151
  };
155
152
 
156
153
  struct um_stream {
157
- VALUE self;
158
-
159
154
  struct um *machine;
160
155
  int fd;
161
156
  VALUE buffer;
@@ -259,7 +254,7 @@ struct um_mutex *Mutex_data(VALUE self);
259
254
  struct um_queue *Queue_data(VALUE self);
260
255
 
261
256
  void um_mutex_init(struct um_mutex *mutex);
262
- VALUE um_mutex_synchronize(struct um *machine, uint32_t *state);
257
+ VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state);
263
258
 
264
259
  void um_queue_init(struct um_queue *queue);
265
260
  void um_queue_free(struct um_queue *queue);
@@ -7,44 +7,40 @@ VALUE SYM_timeout;
7
7
 
8
8
  static void AsyncOp_mark(void *ptr) {
9
9
  struct um_async_op *async_op = ptr;
10
- rb_gc_mark_movable(async_op->self);
11
10
  rb_gc_mark_movable(async_op->machine->self);
12
11
  }
13
12
 
14
- static void AsyncOp_compact(void *ptr) {
15
- struct um_async_op *async_op = ptr;
16
- async_op->self = rb_gc_location(async_op->self);
17
- }
18
-
19
- static size_t AsyncOp_size(const void *ptr) {
20
- return sizeof(struct um_async_op);
21
- }
22
-
23
13
  static void AsyncOp_free(void *ptr) {
24
14
  struct um_async_op *async_op = ptr;
25
- um_op_free(async_op->machine, async_op->op);
26
- free(ptr);
15
+ if (async_op->op)
16
+ um_op_free(async_op->machine, async_op->op);
27
17
  }
28
18
 
29
19
  static const rb_data_type_t AsyncOp_type = {
30
- "UringMachine::AsyncOp",
31
- {AsyncOp_mark, AsyncOp_free, AsyncOp_size, AsyncOp_compact},
32
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
20
+ .wrap_struct_name = "UringMachine::AsyncOp",
21
+ .function = {
22
+ .dmark = AsyncOp_mark,
23
+ .dfree = AsyncOp_free,
24
+ .dsize = NULL,
25
+ .dcompact = NULL
26
+ },
27
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
33
28
  };
34
29
 
35
30
  static VALUE AsyncOp_allocate(VALUE klass) {
36
- struct um_async_op *async_op = malloc(sizeof(struct um_async_op));
37
- return TypedData_Wrap_Struct(klass, &AsyncOp_type, async_op);
31
+ struct um_async_op *async_op;
32
+ return TypedData_Make_Struct(klass, struct um_async_op, &AsyncOp_type, async_op);
38
33
  }
39
34
 
40
- inline struct um_async_op *AsyncOp_data(VALUE self) {
41
- return RTYPEDDATA_DATA(self);
35
+ static inline struct um_async_op *AsyncOp_data(VALUE self) {
36
+ struct um_async_op *async_op;
37
+ TypedData_Get_Struct(self, struct um_async_op, &AsyncOp_type, async_op);
38
+ return async_op;
42
39
  }
43
40
 
44
41
  VALUE AsyncOp_initialize(VALUE self) {
45
42
  struct um_async_op *async_op = AsyncOp_data(self);
46
43
  memset(async_op, 0, sizeof(struct um_async_op));
47
- async_op->self = self;
48
44
  return self;
49
45
  }
50
46
 
data/ext/um/um_class.c CHANGED
@@ -24,27 +24,28 @@ static void UM_free(void *ptr) {
24
24
  free(ptr);
25
25
  }
26
26
 
27
- static size_t UM_size(const void *ptr) {
28
- return sizeof(struct um);
29
- }
30
-
31
- static const rb_data_type_t UM_type = {
32
- "UringMachine",
33
- {UM_mark, UM_free, UM_size, UM_compact},
34
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
27
+ static const rb_data_type_t UringMachine_type = {
28
+ .wrap_struct_name = "UringMachine",
29
+ .function = {
30
+ .dmark = UM_mark,
31
+ .dfree = UM_free,
32
+ .dsize = NULL,
33
+ .dcompact = UM_compact
34
+ },
35
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
35
36
  };
36
37
 
37
38
  static VALUE UM_allocate(VALUE klass) {
38
- struct um *machine = ALLOC(struct um);
39
-
40
- return TypedData_Wrap_Struct(klass, &UM_type, machine);
39
+ struct um *um;
40
+ return TypedData_Make_Struct(klass, struct um, &UringMachine_type, um);
41
41
  }
42
42
 
43
43
  inline struct um *um_get_machine(VALUE self) {
44
- struct um *machine = RTYPEDDATA_DATA(self);
45
- if (!machine->ring_initialized)
46
- rb_raise(rb_eRuntimeError, "Machine not initialized");
47
- return machine;
44
+ struct um *um;
45
+ TypedData_Get_Struct(self, struct um, &UringMachine_type, um);
46
+ if (!um->ring_initialized) rb_raise(rb_eRuntimeError, "Machine not initialized");
47
+
48
+ return um;
48
49
  }
49
50
 
50
51
  VALUE UM_initialize(VALUE self) {
@@ -244,7 +245,7 @@ VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
244
245
  VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
245
246
  struct um *machine = um_get_machine(self);
246
247
  struct um_mutex *mutex_data = Mutex_data(mutex);
247
- return um_mutex_synchronize(machine, &mutex_data->state);
248
+ return um_mutex_synchronize(machine, mutex, &mutex_data->state);
248
249
  }
249
250
 
250
251
  VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
@@ -278,7 +279,7 @@ struct um_open_ctx {
278
279
  VALUE fd;
279
280
  };
280
281
 
281
- VALUE UM_open_ensure(VALUE arg) {
282
+ VALUE UM_open_complete(VALUE arg) {
282
283
  struct um_open_ctx *ctx = (struct um_open_ctx *)arg;
283
284
  UM_close(ctx->self, ctx->fd);
284
285
  return ctx->self;
@@ -290,7 +291,7 @@ VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
290
291
  VALUE fd = um_open(machine, pathname, NUM2INT(flags), 0666);
291
292
  if (rb_block_given_p()) {
292
293
  struct um_open_ctx ctx = { self, fd };
293
- return rb_ensure(rb_yield, fd, UM_open_ensure, (VALUE)&ctx);
294
+ return rb_ensure(rb_yield, fd, UM_open_complete, (VALUE)&ctx);
294
295
  }
295
296
  else
296
297
  return fd;
data/ext/um/um_const.c CHANGED
@@ -39,18 +39,24 @@ void um_define_net_constants(VALUE mod) {
39
39
  DEF_CONST_INT(mod, STATX_BASIC_STATS);
40
40
  DEF_CONST_INT(mod, STATX_BTIME);
41
41
  DEF_CONST_INT(mod, STATX_ALL);
42
+ #ifdef STATX_MNT_ID
42
43
  DEF_CONST_INT(mod, STATX_MNT_ID);
44
+ #endif
45
+ #ifdef STATX_DIOALIGN
43
46
  DEF_CONST_INT(mod, STATX_DIOALIGN);
47
+ #endif
48
+ #ifdef STATX_MNT_ID_UNIQUE
44
49
  DEF_CONST_INT(mod, STATX_MNT_ID_UNIQUE);
45
- #ifdef STATX_SUBVOL
50
+ #endif
51
+ #ifdef STATX_SUBVOL
46
52
  DEF_CONST_INT(mod, STATX_SUBVOL);
47
- #endif
48
- #ifdef STATX_WRITE_ATOMIC
53
+ #endif
54
+ #ifdef STATX_WRITE_ATOMIC
49
55
  DEF_CONST_INT(mod, STATX_WRITE_ATOMIC);
50
- #endif
51
- #ifdef STATX_DIO_READ_ALIGN
56
+ #endif
57
+ #ifdef STATX_DIO_READ_ALIGN
52
58
  DEF_CONST_INT(mod, STATX_DIO_READ_ALIGN);
53
- #endif
59
+ #endif
54
60
 
55
61
  DEF_CONST_INT(mod, MSG_CONFIRM);
56
62
  DEF_CONST_INT(mod, MSG_DONTROUTE);
@@ -3,38 +3,30 @@
3
3
 
4
4
  VALUE cMutex;
5
5
 
6
- static void Mutex_mark(void *ptr) {
7
- struct um_mutex *mutex = ptr;
8
- rb_gc_mark_movable(mutex->self);
9
- }
10
-
11
- static void Mutex_compact(void *ptr) {
12
- struct um_mutex *mutex = ptr;
13
- mutex->self = rb_gc_location(mutex->self);
14
- }
15
-
16
- static size_t Mutex_size(const void *ptr) {
17
- return sizeof(struct um_mutex);
18
- }
19
-
20
6
  static const rb_data_type_t Mutex_type = {
21
- "UringMachineMutex",
22
- {Mutex_mark, free, Mutex_size, Mutex_compact},
23
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
7
+ .wrap_struct_name = "UringMachine::Mutex",
8
+ .function = {
9
+ .dmark = NULL,
10
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
11
+ .dsize = NULL,
12
+ .dcompact = NULL
13
+ },
14
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
24
15
  };
25
16
 
26
17
  static VALUE Mutex_allocate(VALUE klass) {
27
- struct um_mutex *mutex = malloc(sizeof(struct um_mutex));
28
- return TypedData_Wrap_Struct(klass, &Mutex_type, mutex);
18
+ struct um_mutex *mutex;
19
+ return TypedData_Make_Struct(klass, struct um_mutex, &Mutex_type, mutex);
29
20
  }
30
21
 
31
22
  inline struct um_mutex *Mutex_data(VALUE self) {
32
- return RTYPEDDATA_DATA(self);
23
+ struct um_mutex *mutex;
24
+ TypedData_Get_Struct(self, struct um_mutex, &Mutex_type, mutex);
25
+ return mutex;
33
26
  }
34
27
 
35
28
  VALUE Mutex_initialize(VALUE self) {
36
29
  struct um_mutex *mutex = Mutex_data(self);
37
- mutex->self = self;
38
30
  um_mutex_init(mutex);
39
31
  return self;
40
32
  }
@@ -18,23 +18,26 @@ static void Queue_free(void *ptr) {
18
18
  um_queue_free(queue);
19
19
  }
20
20
 
21
- static size_t Queue_size(const void *ptr) {
22
- return sizeof(struct um_queue);
23
- }
24
-
25
21
  static const rb_data_type_t Queue_type = {
26
- "UringMachineQueue",
27
- {Queue_mark, Queue_free, Queue_size, Queue_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
22
+ .wrap_struct_name = "UringMachine::Queue",
23
+ .function = {
24
+ .dmark = Queue_mark,
25
+ .dfree = Queue_free,
26
+ .dsize = NULL,
27
+ .dcompact = Queue_compact
28
+ },
29
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
30
  };
30
31
 
31
32
  static VALUE Queue_allocate(VALUE klass) {
32
- struct um_queue *queue = malloc(sizeof(struct um_queue));
33
- return TypedData_Wrap_Struct(klass, &Queue_type, queue);
33
+ struct um_queue *queue;
34
+ return TypedData_Make_Struct(klass, struct um_queue, &Queue_type, queue);
34
35
  }
35
36
 
36
37
  inline struct um_queue *Queue_data(VALUE self) {
37
- return RTYPEDDATA_DATA(self);
38
+ struct um_queue *queue;
39
+ TypedData_Get_Struct(self, struct um_queue, &Queue_type, queue);
40
+ return queue;
38
41
  }
39
42
 
40
43
  VALUE Queue_initialize(VALUE self) {
@@ -4,42 +4,40 @@ VALUE cStream;
4
4
 
5
5
  static void Stream_mark(void *ptr) {
6
6
  struct um_stream *stream = ptr;
7
- rb_gc_mark_movable(stream->self);
8
7
  rb_gc_mark_movable(stream->buffer);
9
8
  }
10
9
 
11
10
  static void Stream_compact(void *ptr) {
12
11
  struct um_stream *stream = ptr;
13
- stream->self = rb_gc_location(stream->self);
14
12
  stream->buffer = rb_gc_location(stream->buffer);
15
13
  }
16
14
 
17
- static void Stream_free(void *ptr) {
18
- free(ptr);
19
- }
20
-
21
- static size_t Stream_size(const void *ptr) {
22
- return sizeof(struct um_stream);
23
- }
24
-
25
15
  static const rb_data_type_t Stream_type = {
26
- "UringMachine::Stream",
27
- {Stream_mark, Stream_free, Stream_size, Stream_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
16
+ .wrap_struct_name = "UringMachine::Stream",
17
+ .function = {
18
+ .dmark = Stream_mark,
19
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
20
+ .dsize = NULL,
21
+ .dcompact = Stream_compact
22
+ },
23
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
24
  };
30
25
 
31
26
  static VALUE Stream_allocate(VALUE klass) {
32
- struct um_stream *stream = ALLOC(struct um_stream);
27
+ struct um_stream *stream;
28
+ return TypedData_Make_Struct(klass, struct um_stream, &Stream_type, stream);
29
+ }
33
30
 
34
- return TypedData_Wrap_Struct(klass, &Stream_type, stream);
31
+ static inline struct um_stream *Stream_data(VALUE self) {
32
+ struct um_stream *stream;
33
+ TypedData_Get_Struct(self, struct um_stream, &Stream_type, stream);
34
+ return stream;
35
35
  }
36
36
 
37
37
  VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
38
- struct um_stream *stream = RTYPEDDATA_DATA(self);
38
+ struct um_stream *stream = Stream_data(self);
39
39
 
40
- stream->self = self;
41
-
42
- stream->machine = RTYPEDDATA_DATA(machine);
40
+ stream->machine = um_get_machine(machine);
43
41
  stream->fd = NUM2ULONG(fd);
44
42
  stream->buffer = rb_utf8_str_new_literal("");
45
43
  rb_str_resize(stream->buffer, 1 << 16); // 64KB
@@ -53,21 +51,21 @@ VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
53
51
  }
54
52
 
55
53
  VALUE Stream_get_line(VALUE self, VALUE buf, VALUE limit) {
56
- struct um_stream *stream = RTYPEDDATA_DATA(self);
54
+ struct um_stream *stream = Stream_data(self);
57
55
  if (unlikely(stream->eof)) return Qnil;
58
56
 
59
57
  return stream_get_line(stream, buf, NUM2LONG(limit));
60
58
  }
61
59
 
62
60
  VALUE Stream_get_string(VALUE self, VALUE buf, VALUE len) {
63
- struct um_stream *stream = RTYPEDDATA_DATA(self);
61
+ struct um_stream *stream = Stream_data(self);
64
62
  if (unlikely(stream->eof)) return Qnil;
65
63
 
66
64
  return stream_get_string(stream, buf, NUM2LONG(len));
67
65
  }
68
66
 
69
67
  VALUE Stream_resp_decode(VALUE self) {
70
- struct um_stream *stream = RTYPEDDATA_DATA(self);
68
+ struct um_stream *stream = Stream_data(self);
71
69
  if (unlikely(stream->eof)) return Qnil;
72
70
 
73
71
  VALUE out_buffer = rb_utf8_str_new_literal("");
data/ext/um/um_sync.c CHANGED
@@ -73,24 +73,28 @@ inline void um_mutex_unlock(struct um *machine, uint32_t *state) {
73
73
 
74
74
  struct sync_ctx {
75
75
  struct um *machine;
76
+ VALUE mutex;
76
77
  uint32_t *state;
77
78
  };
78
79
 
79
- VALUE synchronize_begin(VALUE arg) {
80
+ VALUE synchronize_start(VALUE arg) {
80
81
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
81
82
  um_mutex_lock(ctx->machine, ctx->state);
82
83
  return rb_yield(Qnil);
83
84
  }
84
85
 
85
- VALUE synchronize_ensure(VALUE arg) {
86
+ VALUE synchronize_complete(VALUE arg) {
86
87
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
88
+ // Mutex is an embedded data class, so it might have moved while the operation
89
+ // was ongoing. We need to update the pointer to the embedded state variable.
90
+ ctx->state = &Mutex_data(ctx->mutex)->state;
87
91
  um_mutex_unlock(ctx->machine, ctx->state);
88
92
  return Qnil;
89
93
  }
90
94
 
91
- inline VALUE um_mutex_synchronize(struct um *machine, uint32_t *state) {
92
- struct sync_ctx ctx = { .machine = machine, .state = state };
93
- return rb_ensure(synchronize_begin, (VALUE)&ctx, synchronize_ensure, (VALUE)&ctx);
95
+ inline VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state) {
96
+ struct sync_ctx ctx = { .machine = machine, .mutex = mutex, .state = state };
97
+ return rb_ensure(synchronize_start, (VALUE)&ctx, synchronize_complete, (VALUE)&ctx);
94
98
  }
95
99
 
96
100
  #define QUEUE_EMPTY 0
@@ -116,8 +120,6 @@ inline void um_queue_free(struct um_queue *queue) {
116
120
  free(entry);
117
121
  entry = next;
118
122
  }
119
-
120
- free(queue);
121
123
  }
122
124
 
123
125
  inline void um_queue_mark(struct um_queue *queue) {
@@ -226,11 +228,12 @@ enum queue_op { QUEUE_POP, QUEUE_SHIFT };
226
228
 
227
229
  struct queue_wait_ctx {
228
230
  struct um *machine;
231
+ VALUE queue_obj;
229
232
  struct um_queue *queue;
230
233
  enum queue_op op;
231
234
  };
232
235
 
233
- VALUE um_queue_remove_begin(VALUE arg) {
236
+ VALUE um_queue_remove_start(VALUE arg) {
234
237
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
235
238
 
236
239
  ctx->queue->num_waiters++;
@@ -247,9 +250,13 @@ VALUE um_queue_remove_begin(VALUE arg) {
247
250
  return (ctx->op == QUEUE_POP ? queue_remove_tail : queue_remove_head)(ctx->queue);
248
251
  }
249
252
 
250
- VALUE um_queue_remove_ensure(VALUE arg) {
253
+ VALUE um_queue_remove_complete(VALUE arg) {
251
254
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
252
255
 
256
+ // the um_queue struct is embedded, so it might have been moved while the op
257
+ // was ongoing, so we need to get it again on op completion
258
+ ctx->queue = Queue_data(ctx->queue_obj);
259
+
253
260
  ctx->queue->num_waiters--;
254
261
 
255
262
  if (ctx->queue->num_waiters && ctx->queue->tail) {
@@ -263,11 +270,11 @@ VALUE um_queue_remove_ensure(VALUE arg) {
263
270
  }
264
271
 
265
272
  VALUE um_queue_pop(struct um *machine, struct um_queue *queue) {
266
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_POP };
267
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
273
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_POP };
274
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
268
275
  }
269
276
 
270
277
  VALUE um_queue_shift(struct um *machine, struct um_queue *queue) {
271
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_SHIFT };
272
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
278
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_SHIFT };
279
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
273
280
  }
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.12'
4
+ VERSION = '0.12.1'
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: uringmachine
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.12'
4
+ version: 0.12.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner