uringmachine 0.12 → 0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1f8224da7817436bd74a52e650362c9b77d5dbc8877c0fa1cefd1c52e57ceb03
4
- data.tar.gz: c7bb88c42bf59a9d1f105e2356c4f71f0fec49c7cbbe662744465ee3b34d5141
3
+ metadata.gz: bce8525371c9f0aa2b279b815d04456573319b3eddaae14b79ddbf20ece1189f
4
+ data.tar.gz: dc1e70ca4101d607f2c04df639dabc917c379e3765a12c5440682a72cbb24316
5
5
  SHA512:
6
- metadata.gz: 39d930e9f4d8a17676c28e4bf24f38b585dbd0cd124316272778abb285391be4bb9ecf14452d19568ffa8f6a0144c6a98b2e3ee1f50d58ec7c5101333f5de5e7
7
- data.tar.gz: 95468f71d402a2a8abf675dbbae6e94b4e1919c5552f0b831b7387016d3fb24271756585cf66283130ed3440818ccc94f504d73a46d2ab8f9faa4607b0cc822b
6
+ metadata.gz: 258605dfe54482f5330a7795e18efa3397692b0a306cf7fd1ad6a2408fe8c7e098abca78d66baaddd78faac74aae6beeda9fe6a3d2e39ab4e196b585810bc212
7
+ data.tar.gz: 6110908bf2c3d8a404f015429835dfb585ae711f14457b785d7e298f6caf2449341ca62ab3fa560054f51c9de6031195e8ead20afabace47cfe24cdf9fd6ae48
data/CHANGELOG.md CHANGED
@@ -1,3 +1,11 @@
1
+ # 2025-06-07 Version 0.13
2
+
3
+ - Add `#write_async`
4
+
5
+ # 2025-06-07 Version 0.12.1
6
+
7
+ - Improve portability of `UM` constants
8
+
1
9
  # 2025-06-03 Version 0.12
2
10
 
3
11
  - Add buffer, maxlen params to `Stream#get_line`
data/ext/um/um.c CHANGED
@@ -72,6 +72,11 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
72
72
  // op, op->kind, op->flags, cqe->res, cqe->flags, machine->pending_count
73
73
  // );
74
74
 
75
+ if (op->flags & OP_F_FREE_ON_COMPLETE) {
76
+ um_op_free(machine, op);
77
+ return;
78
+ }
79
+
75
80
  if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
76
81
 
77
82
  op->flags |= OP_F_COMPLETED;
@@ -285,7 +290,7 @@ struct op_ctx {
285
290
  int flags;
286
291
  };
287
292
 
288
- VALUE um_timeout_ensure(VALUE arg) {
293
+ VALUE um_timeout_complete(VALUE arg) {
289
294
  struct op_ctx *ctx = (struct op_ctx *)arg;
290
295
 
291
296
  if (!um_op_completed_p(ctx->op)) {
@@ -311,7 +316,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
311
316
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
312
317
 
313
318
  struct op_ctx ctx = { .machine = machine, .op = op };
314
- return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
319
+ return rb_ensure(rb_yield, Qnil, um_timeout_complete, (VALUE)&ctx);
315
320
  }
316
321
 
317
322
  /*******************************************************************************
@@ -390,6 +395,19 @@ VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
390
395
  return raise_if_exception(ret);
391
396
  }
392
397
 
398
+ VALUE um_write_async(struct um *machine, int fd, VALUE str) {
399
+ struct um_op *op = um_op_alloc(machine);
400
+ memset(op, 0, sizeof(struct um_op));
401
+ op->kind = OP_WRITE_ASYNC;
402
+ op->flags = OP_F_FREE_ON_COMPLETE;
403
+ op->fiber = Qnil;
404
+ RB_OBJ_WRITE(machine->self, &op->value, str);
405
+
406
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
407
+ io_uring_prep_write(sqe, fd, RSTRING_PTR(str), RSTRING_LEN(str), -1);
408
+ return str;
409
+ }
410
+
393
411
  VALUE um_close(struct um *machine, int fd) {
394
412
  struct um_op op;
395
413
  um_prep_op(machine, &op, OP_CLOSE);
@@ -653,7 +671,7 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
653
671
  multishot ops
654
672
  *******************************************************************************/
655
673
 
656
- VALUE accept_each_begin(VALUE arg) {
674
+ VALUE accept_each_start(VALUE arg) {
657
675
  struct op_ctx *ctx = (struct op_ctx *)arg;
658
676
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
659
677
  io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
@@ -684,7 +702,7 @@ VALUE accept_each_begin(VALUE arg) {
684
702
  return Qnil;
685
703
  }
686
704
 
687
- VALUE multishot_ensure(VALUE arg) {
705
+ VALUE multishot_complete(VALUE arg) {
688
706
  struct op_ctx *ctx = (struct op_ctx *)arg;
689
707
  if (ctx->op->multishot_result_count) {
690
708
  int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
@@ -706,7 +724,7 @@ VALUE um_accept_each(struct um *machine, int fd) {
706
724
  um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
707
725
 
708
726
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
709
- return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
727
+ return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
710
728
  }
711
729
 
712
730
  int um_read_each_singleshot_loop(struct op_ctx *ctx) {
@@ -771,7 +789,7 @@ void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
771
789
  }
772
790
  }
773
791
 
774
- VALUE read_recv_each_begin(VALUE arg) {
792
+ VALUE read_recv_each_start(VALUE arg) {
775
793
  struct op_ctx *ctx = (struct op_ctx *)arg;
776
794
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
777
795
  read_recv_each_prep(sqe, ctx);
@@ -809,7 +827,7 @@ VALUE um_read_each(struct um *machine, int fd, int bgid) {
809
827
  um_prep_op(machine, &op, OP_READ_MULTISHOT);
810
828
 
811
829
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
812
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
830
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
813
831
  }
814
832
 
815
833
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
@@ -817,10 +835,10 @@ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
817
835
  um_prep_op(machine, &op, OP_RECV_MULTISHOT);
818
836
 
819
837
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
820
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
838
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
821
839
  }
822
840
 
823
- VALUE periodically_begin(VALUE arg) {
841
+ VALUE periodically_start(VALUE arg) {
824
842
  struct op_ctx *ctx = (struct op_ctx *)arg;
825
843
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
826
844
  io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
@@ -857,6 +875,6 @@ VALUE um_periodically(struct um *machine, double interval) {
857
875
  op.ts = um_double_to_timespec(interval);
858
876
 
859
877
  struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
860
- return rb_ensure(periodically_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
878
+ return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
861
879
  }
862
880
 
data/ext/um/um.h CHANGED
@@ -28,6 +28,7 @@ enum op_kind {
28
28
  OP_OPEN,
29
29
  OP_READ,
30
30
  OP_WRITE,
31
+ OP_WRITE_ASYNC,
31
32
  OP_CLOSE,
32
33
  OP_STATX,
33
34
 
@@ -59,6 +60,7 @@ enum op_kind {
59
60
  #define OP_F_ASYNC (1U << 2) // op belongs to an AsyncOp
60
61
  #define OP_F_IGNORE_CANCELED (1U << 3) // CQE with -ECANCEL should be ignored
61
62
  #define OP_F_MULTISHOT (1U << 4) // op is multishot
63
+ #define OP_F_FREE_ON_COMPLETE (1U << 5) // op should be freed on receiving CQE
62
64
 
63
65
  struct um_op_result {
64
66
  __s32 res;
@@ -124,7 +126,6 @@ struct um {
124
126
  };
125
127
 
126
128
  struct um_mutex {
127
- VALUE self;
128
129
  uint32_t state;
129
130
  };
130
131
 
@@ -147,15 +148,11 @@ struct um_queue {
147
148
  };
148
149
 
149
150
  struct um_async_op {
150
- VALUE self;
151
-
152
151
  struct um *machine;
153
152
  struct um_op *op;
154
153
  };
155
154
 
156
155
  struct um_stream {
157
- VALUE self;
158
-
159
156
  struct um *machine;
160
157
  int fd;
161
158
  VALUE buffer;
@@ -235,6 +232,7 @@ VALUE um_close(struct um *machine, int fd);
235
232
  VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode);
236
233
  VALUE um_waitpid(struct um *machine, int pid, int options);
237
234
  VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned int mask);
235
+ VALUE um_write_async(struct um *machine, int fd, VALUE str);
238
236
 
239
237
  VALUE um_accept(struct um *machine, int fd);
240
238
  VALUE um_accept_each(struct um *machine, int fd);
@@ -259,7 +257,7 @@ struct um_mutex *Mutex_data(VALUE self);
259
257
  struct um_queue *Queue_data(VALUE self);
260
258
 
261
259
  void um_mutex_init(struct um_mutex *mutex);
262
- VALUE um_mutex_synchronize(struct um *machine, uint32_t *state);
260
+ VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state);
263
261
 
264
262
  void um_queue_init(struct um_queue *queue);
265
263
  void um_queue_free(struct um_queue *queue);
@@ -7,44 +7,40 @@ VALUE SYM_timeout;
7
7
 
8
8
  static void AsyncOp_mark(void *ptr) {
9
9
  struct um_async_op *async_op = ptr;
10
- rb_gc_mark_movable(async_op->self);
11
10
  rb_gc_mark_movable(async_op->machine->self);
12
11
  }
13
12
 
14
- static void AsyncOp_compact(void *ptr) {
15
- struct um_async_op *async_op = ptr;
16
- async_op->self = rb_gc_location(async_op->self);
17
- }
18
-
19
- static size_t AsyncOp_size(const void *ptr) {
20
- return sizeof(struct um_async_op);
21
- }
22
-
23
13
  static void AsyncOp_free(void *ptr) {
24
14
  struct um_async_op *async_op = ptr;
25
- um_op_free(async_op->machine, async_op->op);
26
- free(ptr);
15
+ if (async_op->op)
16
+ um_op_free(async_op->machine, async_op->op);
27
17
  }
28
18
 
29
19
  static const rb_data_type_t AsyncOp_type = {
30
- "UringMachine::AsyncOp",
31
- {AsyncOp_mark, AsyncOp_free, AsyncOp_size, AsyncOp_compact},
32
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
20
+ .wrap_struct_name = "UringMachine::AsyncOp",
21
+ .function = {
22
+ .dmark = AsyncOp_mark,
23
+ .dfree = AsyncOp_free,
24
+ .dsize = NULL,
25
+ .dcompact = NULL
26
+ },
27
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
33
28
  };
34
29
 
35
30
  static VALUE AsyncOp_allocate(VALUE klass) {
36
- struct um_async_op *async_op = malloc(sizeof(struct um_async_op));
37
- return TypedData_Wrap_Struct(klass, &AsyncOp_type, async_op);
31
+ struct um_async_op *async_op;
32
+ return TypedData_Make_Struct(klass, struct um_async_op, &AsyncOp_type, async_op);
38
33
  }
39
34
 
40
- inline struct um_async_op *AsyncOp_data(VALUE self) {
41
- return RTYPEDDATA_DATA(self);
35
+ static inline struct um_async_op *AsyncOp_data(VALUE self) {
36
+ struct um_async_op *async_op;
37
+ TypedData_Get_Struct(self, struct um_async_op, &AsyncOp_type, async_op);
38
+ return async_op;
42
39
  }
43
40
 
44
41
  VALUE AsyncOp_initialize(VALUE self) {
45
42
  struct um_async_op *async_op = AsyncOp_data(self);
46
43
  memset(async_op, 0, sizeof(struct um_async_op));
47
- async_op->self = self;
48
44
  return self;
49
45
  }
50
46
 
data/ext/um/um_class.c CHANGED
@@ -24,27 +24,28 @@ static void UM_free(void *ptr) {
24
24
  free(ptr);
25
25
  }
26
26
 
27
- static size_t UM_size(const void *ptr) {
28
- return sizeof(struct um);
29
- }
30
-
31
- static const rb_data_type_t UM_type = {
32
- "UringMachine",
33
- {UM_mark, UM_free, UM_size, UM_compact},
34
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
27
+ static const rb_data_type_t UringMachine_type = {
28
+ .wrap_struct_name = "UringMachine",
29
+ .function = {
30
+ .dmark = UM_mark,
31
+ .dfree = UM_free,
32
+ .dsize = NULL,
33
+ .dcompact = UM_compact
34
+ },
35
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
35
36
  };
36
37
 
37
38
  static VALUE UM_allocate(VALUE klass) {
38
- struct um *machine = ALLOC(struct um);
39
-
40
- return TypedData_Wrap_Struct(klass, &UM_type, machine);
39
+ struct um *um;
40
+ return TypedData_Make_Struct(klass, struct um, &UringMachine_type, um);
41
41
  }
42
42
 
43
43
  inline struct um *um_get_machine(VALUE self) {
44
- struct um *machine = RTYPEDDATA_DATA(self);
45
- if (!machine->ring_initialized)
46
- rb_raise(rb_eRuntimeError, "Machine not initialized");
47
- return machine;
44
+ struct um *um;
45
+ TypedData_Get_Struct(self, struct um, &UringMachine_type, um);
46
+ if (!um->ring_initialized) rb_raise(rb_eRuntimeError, "Machine not initialized");
47
+
48
+ return um;
48
49
  }
49
50
 
50
51
  VALUE UM_initialize(VALUE self) {
@@ -130,6 +131,11 @@ VALUE UM_write(int argc, VALUE *argv, VALUE self) {
130
131
  return um_write(machine, NUM2INT(fd), buffer, bytes);
131
132
  }
132
133
 
134
+ VALUE UM_write_async(VALUE self, VALUE fd, VALUE str) {
135
+ struct um *machine = um_get_machine(self);
136
+ return um_write_async(machine, NUM2INT(fd), str);
137
+ }
138
+
133
139
  VALUE UM_statx(VALUE self, VALUE dirfd, VALUE path, VALUE flags, VALUE mask) {
134
140
  struct um *machine = um_get_machine(self);
135
141
  return um_statx(machine, NUM2INT(dirfd), path, NUM2INT(flags), NUM2UINT(mask));
@@ -244,7 +250,7 @@ VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
244
250
  VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
245
251
  struct um *machine = um_get_machine(self);
246
252
  struct um_mutex *mutex_data = Mutex_data(mutex);
247
- return um_mutex_synchronize(machine, &mutex_data->state);
253
+ return um_mutex_synchronize(machine, mutex, &mutex_data->state);
248
254
  }
249
255
 
250
256
  VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
@@ -278,7 +284,7 @@ struct um_open_ctx {
278
284
  VALUE fd;
279
285
  };
280
286
 
281
- VALUE UM_open_ensure(VALUE arg) {
287
+ VALUE UM_open_complete(VALUE arg) {
282
288
  struct um_open_ctx *ctx = (struct um_open_ctx *)arg;
283
289
  UM_close(ctx->self, ctx->fd);
284
290
  return ctx->self;
@@ -290,7 +296,7 @@ VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
290
296
  VALUE fd = um_open(machine, pathname, NUM2INT(flags), 0666);
291
297
  if (rb_block_given_p()) {
292
298
  struct um_open_ctx ctx = { self, fd };
293
- return rb_ensure(rb_yield, fd, UM_open_ensure, (VALUE)&ctx);
299
+ return rb_ensure(rb_yield, fd, UM_open_complete, (VALUE)&ctx);
294
300
  }
295
301
  else
296
302
  return fd;
@@ -347,6 +353,7 @@ void Init_UM(void) {
347
353
  rb_define_method(cUM, "sleep", UM_sleep, 1);
348
354
  rb_define_method(cUM, "periodically", UM_periodically, 1);
349
355
  rb_define_method(cUM, "write", UM_write, -1);
356
+ rb_define_method(cUM, "write_async", UM_write_async, 2);
350
357
  rb_define_method(cUM, "statx", UM_statx, 4);
351
358
 
352
359
  rb_define_method(cUM, "waitpid", UM_waitpid, 2);
data/ext/um/um_const.c CHANGED
@@ -39,18 +39,24 @@ void um_define_net_constants(VALUE mod) {
39
39
  DEF_CONST_INT(mod, STATX_BASIC_STATS);
40
40
  DEF_CONST_INT(mod, STATX_BTIME);
41
41
  DEF_CONST_INT(mod, STATX_ALL);
42
+ #ifdef STATX_MNT_ID
42
43
  DEF_CONST_INT(mod, STATX_MNT_ID);
44
+ #endif
45
+ #ifdef STATX_DIOALIGN
43
46
  DEF_CONST_INT(mod, STATX_DIOALIGN);
47
+ #endif
48
+ #ifdef STATX_MNT_ID_UNIQUE
44
49
  DEF_CONST_INT(mod, STATX_MNT_ID_UNIQUE);
45
- #ifdef STATX_SUBVOL
50
+ #endif
51
+ #ifdef STATX_SUBVOL
46
52
  DEF_CONST_INT(mod, STATX_SUBVOL);
47
- #endif
48
- #ifdef STATX_WRITE_ATOMIC
53
+ #endif
54
+ #ifdef STATX_WRITE_ATOMIC
49
55
  DEF_CONST_INT(mod, STATX_WRITE_ATOMIC);
50
- #endif
51
- #ifdef STATX_DIO_READ_ALIGN
56
+ #endif
57
+ #ifdef STATX_DIO_READ_ALIGN
52
58
  DEF_CONST_INT(mod, STATX_DIO_READ_ALIGN);
53
- #endif
59
+ #endif
54
60
 
55
61
  DEF_CONST_INT(mod, MSG_CONFIRM);
56
62
  DEF_CONST_INT(mod, MSG_DONTROUTE);
@@ -3,38 +3,30 @@
3
3
 
4
4
  VALUE cMutex;
5
5
 
6
- static void Mutex_mark(void *ptr) {
7
- struct um_mutex *mutex = ptr;
8
- rb_gc_mark_movable(mutex->self);
9
- }
10
-
11
- static void Mutex_compact(void *ptr) {
12
- struct um_mutex *mutex = ptr;
13
- mutex->self = rb_gc_location(mutex->self);
14
- }
15
-
16
- static size_t Mutex_size(const void *ptr) {
17
- return sizeof(struct um_mutex);
18
- }
19
-
20
6
  static const rb_data_type_t Mutex_type = {
21
- "UringMachineMutex",
22
- {Mutex_mark, free, Mutex_size, Mutex_compact},
23
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
7
+ .wrap_struct_name = "UringMachine::Mutex",
8
+ .function = {
9
+ .dmark = NULL,
10
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
11
+ .dsize = NULL,
12
+ .dcompact = NULL
13
+ },
14
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
24
15
  };
25
16
 
26
17
  static VALUE Mutex_allocate(VALUE klass) {
27
- struct um_mutex *mutex = malloc(sizeof(struct um_mutex));
28
- return TypedData_Wrap_Struct(klass, &Mutex_type, mutex);
18
+ struct um_mutex *mutex;
19
+ return TypedData_Make_Struct(klass, struct um_mutex, &Mutex_type, mutex);
29
20
  }
30
21
 
31
22
  inline struct um_mutex *Mutex_data(VALUE self) {
32
- return RTYPEDDATA_DATA(self);
23
+ struct um_mutex *mutex;
24
+ TypedData_Get_Struct(self, struct um_mutex, &Mutex_type, mutex);
25
+ return mutex;
33
26
  }
34
27
 
35
28
  VALUE Mutex_initialize(VALUE self) {
36
29
  struct um_mutex *mutex = Mutex_data(self);
37
- mutex->self = self;
38
30
  um_mutex_init(mutex);
39
31
  return self;
40
32
  }
@@ -18,23 +18,26 @@ static void Queue_free(void *ptr) {
18
18
  um_queue_free(queue);
19
19
  }
20
20
 
21
- static size_t Queue_size(const void *ptr) {
22
- return sizeof(struct um_queue);
23
- }
24
-
25
21
  static const rb_data_type_t Queue_type = {
26
- "UringMachineQueue",
27
- {Queue_mark, Queue_free, Queue_size, Queue_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
22
+ .wrap_struct_name = "UringMachine::Queue",
23
+ .function = {
24
+ .dmark = Queue_mark,
25
+ .dfree = Queue_free,
26
+ .dsize = NULL,
27
+ .dcompact = Queue_compact
28
+ },
29
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
30
  };
30
31
 
31
32
  static VALUE Queue_allocate(VALUE klass) {
32
- struct um_queue *queue = malloc(sizeof(struct um_queue));
33
- return TypedData_Wrap_Struct(klass, &Queue_type, queue);
33
+ struct um_queue *queue;
34
+ return TypedData_Make_Struct(klass, struct um_queue, &Queue_type, queue);
34
35
  }
35
36
 
36
37
  inline struct um_queue *Queue_data(VALUE self) {
37
- return RTYPEDDATA_DATA(self);
38
+ struct um_queue *queue;
39
+ TypedData_Get_Struct(self, struct um_queue, &Queue_type, queue);
40
+ return queue;
38
41
  }
39
42
 
40
43
  VALUE Queue_initialize(VALUE self) {
@@ -4,42 +4,40 @@ VALUE cStream;
4
4
 
5
5
  static void Stream_mark(void *ptr) {
6
6
  struct um_stream *stream = ptr;
7
- rb_gc_mark_movable(stream->self);
8
7
  rb_gc_mark_movable(stream->buffer);
9
8
  }
10
9
 
11
10
  static void Stream_compact(void *ptr) {
12
11
  struct um_stream *stream = ptr;
13
- stream->self = rb_gc_location(stream->self);
14
12
  stream->buffer = rb_gc_location(stream->buffer);
15
13
  }
16
14
 
17
- static void Stream_free(void *ptr) {
18
- free(ptr);
19
- }
20
-
21
- static size_t Stream_size(const void *ptr) {
22
- return sizeof(struct um_stream);
23
- }
24
-
25
15
  static const rb_data_type_t Stream_type = {
26
- "UringMachine::Stream",
27
- {Stream_mark, Stream_free, Stream_size, Stream_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
16
+ .wrap_struct_name = "UringMachine::Stream",
17
+ .function = {
18
+ .dmark = Stream_mark,
19
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
20
+ .dsize = NULL,
21
+ .dcompact = Stream_compact
22
+ },
23
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
24
  };
30
25
 
31
26
  static VALUE Stream_allocate(VALUE klass) {
32
- struct um_stream *stream = ALLOC(struct um_stream);
27
+ struct um_stream *stream;
28
+ return TypedData_Make_Struct(klass, struct um_stream, &Stream_type, stream);
29
+ }
33
30
 
34
- return TypedData_Wrap_Struct(klass, &Stream_type, stream);
31
+ static inline struct um_stream *Stream_data(VALUE self) {
32
+ struct um_stream *stream;
33
+ TypedData_Get_Struct(self, struct um_stream, &Stream_type, stream);
34
+ return stream;
35
35
  }
36
36
 
37
37
  VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
38
- struct um_stream *stream = RTYPEDDATA_DATA(self);
38
+ struct um_stream *stream = Stream_data(self);
39
39
 
40
- stream->self = self;
41
-
42
- stream->machine = RTYPEDDATA_DATA(machine);
40
+ stream->machine = um_get_machine(machine);
43
41
  stream->fd = NUM2ULONG(fd);
44
42
  stream->buffer = rb_utf8_str_new_literal("");
45
43
  rb_str_resize(stream->buffer, 1 << 16); // 64KB
@@ -53,21 +51,21 @@ VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
53
51
  }
54
52
 
55
53
  VALUE Stream_get_line(VALUE self, VALUE buf, VALUE limit) {
56
- struct um_stream *stream = RTYPEDDATA_DATA(self);
54
+ struct um_stream *stream = Stream_data(self);
57
55
  if (unlikely(stream->eof)) return Qnil;
58
56
 
59
57
  return stream_get_line(stream, buf, NUM2LONG(limit));
60
58
  }
61
59
 
62
60
  VALUE Stream_get_string(VALUE self, VALUE buf, VALUE len) {
63
- struct um_stream *stream = RTYPEDDATA_DATA(self);
61
+ struct um_stream *stream = Stream_data(self);
64
62
  if (unlikely(stream->eof)) return Qnil;
65
63
 
66
64
  return stream_get_string(stream, buf, NUM2LONG(len));
67
65
  }
68
66
 
69
67
  VALUE Stream_resp_decode(VALUE self) {
70
- struct um_stream *stream = RTYPEDDATA_DATA(self);
68
+ struct um_stream *stream = Stream_data(self);
71
69
  if (unlikely(stream->eof)) return Qnil;
72
70
 
73
71
  VALUE out_buffer = rb_utf8_str_new_literal("");
data/ext/um/um_sync.c CHANGED
@@ -73,24 +73,28 @@ inline void um_mutex_unlock(struct um *machine, uint32_t *state) {
73
73
 
74
74
  struct sync_ctx {
75
75
  struct um *machine;
76
+ VALUE mutex;
76
77
  uint32_t *state;
77
78
  };
78
79
 
79
- VALUE synchronize_begin(VALUE arg) {
80
+ VALUE synchronize_start(VALUE arg) {
80
81
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
81
82
  um_mutex_lock(ctx->machine, ctx->state);
82
83
  return rb_yield(Qnil);
83
84
  }
84
85
 
85
- VALUE synchronize_ensure(VALUE arg) {
86
+ VALUE synchronize_complete(VALUE arg) {
86
87
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
88
+ // Mutex is an embedded data class, so it might have moved while the operation
89
+ // was ongoing. We need to update the pointer to the embedded state variable.
90
+ ctx->state = &Mutex_data(ctx->mutex)->state;
87
91
  um_mutex_unlock(ctx->machine, ctx->state);
88
92
  return Qnil;
89
93
  }
90
94
 
91
- inline VALUE um_mutex_synchronize(struct um *machine, uint32_t *state) {
92
- struct sync_ctx ctx = { .machine = machine, .state = state };
93
- return rb_ensure(synchronize_begin, (VALUE)&ctx, synchronize_ensure, (VALUE)&ctx);
95
+ inline VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state) {
96
+ struct sync_ctx ctx = { .machine = machine, .mutex = mutex, .state = state };
97
+ return rb_ensure(synchronize_start, (VALUE)&ctx, synchronize_complete, (VALUE)&ctx);
94
98
  }
95
99
 
96
100
  #define QUEUE_EMPTY 0
@@ -116,8 +120,6 @@ inline void um_queue_free(struct um_queue *queue) {
116
120
  free(entry);
117
121
  entry = next;
118
122
  }
119
-
120
- free(queue);
121
123
  }
122
124
 
123
125
  inline void um_queue_mark(struct um_queue *queue) {
@@ -226,11 +228,12 @@ enum queue_op { QUEUE_POP, QUEUE_SHIFT };
226
228
 
227
229
  struct queue_wait_ctx {
228
230
  struct um *machine;
231
+ VALUE queue_obj;
229
232
  struct um_queue *queue;
230
233
  enum queue_op op;
231
234
  };
232
235
 
233
- VALUE um_queue_remove_begin(VALUE arg) {
236
+ VALUE um_queue_remove_start(VALUE arg) {
234
237
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
235
238
 
236
239
  ctx->queue->num_waiters++;
@@ -247,9 +250,13 @@ VALUE um_queue_remove_begin(VALUE arg) {
247
250
  return (ctx->op == QUEUE_POP ? queue_remove_tail : queue_remove_head)(ctx->queue);
248
251
  }
249
252
 
250
- VALUE um_queue_remove_ensure(VALUE arg) {
253
+ VALUE um_queue_remove_complete(VALUE arg) {
251
254
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
252
255
 
256
+ // the um_queue struct is embedded, so it might have been moved while the op
257
+ // was ongoing, so we need to get it again on op completion
258
+ ctx->queue = Queue_data(ctx->queue_obj);
259
+
253
260
  ctx->queue->num_waiters--;
254
261
 
255
262
  if (ctx->queue->num_waiters && ctx->queue->tail) {
@@ -263,11 +270,11 @@ VALUE um_queue_remove_ensure(VALUE arg) {
263
270
  }
264
271
 
265
272
  VALUE um_queue_pop(struct um *machine, struct um_queue *queue) {
266
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_POP };
267
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
273
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_POP };
274
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
268
275
  }
269
276
 
270
277
  VALUE um_queue_shift(struct um *machine, struct um_queue *queue) {
271
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_SHIFT };
272
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
278
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_SHIFT };
279
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
273
280
  }
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.12'
4
+ VERSION = '0.13'
5
5
  end
data/test/test_um.rb CHANGED
@@ -532,6 +532,30 @@ class WriteTest < UMBaseTest
532
532
  end
533
533
  end
534
534
 
535
+ class WriteAsyncTest < UMBaseTest
536
+ def test_write_async
537
+ r, w = IO.pipe
538
+
539
+ assert_equal 0, machine.pending_count
540
+ machine.write_async(w.fileno, 'foo')
541
+ assert_equal 1, machine.pending_count
542
+
543
+ machine.snooze
544
+ assert_equal 0, machine.pending_count
545
+ assert_equal 'foo', r.readpartial(3)
546
+ end
547
+
548
+ def test_write_async_bad_fd
549
+ r, _w = IO.pipe
550
+
551
+ assert_equal 0, machine.pending_count
552
+ machine.write_async(r.fileno, 'foo')
553
+ assert_equal 1, machine.pending_count
554
+ machine.snooze
555
+ assert_equal 0, machine.pending_count
556
+ end
557
+ end
558
+
535
559
  class CloseTest < UMBaseTest
536
560
  def test_close
537
561
  r, w = IO.pipe
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: uringmachine
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.12'
4
+ version: '0.13'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner