uringmachine 0.11.1 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ccb8488c3f313b7a3890ae110fc5cebc7ac0062bbaa9ef362fb7ff5b02c680b9
4
- data.tar.gz: 41f2c23a9427609c4d01a6eef319cbd40d029cf85918fd6c8d00e5b502f83ef6
3
+ metadata.gz: 540e2e4df0b1953f58e36ab4dea2024a793b1bea4fd2358f64733f003b192510
4
+ data.tar.gz: 463cf782db4604358e1f54270e24e4bb97ee92176cafdb36079fd9d4a5132fda
5
5
  SHA512:
6
- metadata.gz: 9bd08ebae21a7a10407ff04102d9cbf491bfab65ebe601b6ab3a6b7790632c946873415af4f45321e959bd39e875ff386e8c85f74329743204a9c0c047ce5da0
7
- data.tar.gz: 3cd63222c0b990ef83f2b324aa46b36bd142a7b43a1a273428a9b70a95cfe25e0a1b06a2985a7463fe51a995c78e82838e34ee20d3c332cc46278ffd738652dd
6
+ metadata.gz: 9e54c0f81a6a6a0523a5210a00e5bd7d3a7c0598d8e15c4434080429ae10328079a06d1075fab88894eb2c6ac827e8c929b4fbc18a2bdc7cf20d7efda0d1bd81
7
+ data.tar.gz: 9de7ecd460344ec496fc061edb15a9fadd8b9a5b81db0ca8844afd6cc81bb79b7881af54a008b3c43565725f9234ea66728e69c7eaa6a09d090a0977128e0326
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ # 2025-06-07 Version 0.12.1
2
+
3
+ - Improve portability of `UM` constants
4
+
5
+ # 2025-06-03 Version 0.12
6
+
7
+ - Add buffer, maxlen params to `Stream#get_line`
8
+ - Add buffer param to `Stream#get_string`
9
+ - Remove `Stream#resp_get_line`, `Stream#resp_get_string` methods
10
+
1
11
  # 2025-06-02 Version 0.11.1
2
12
 
3
13
  - Fix `UM::Stream` behaviour on GC
data/TODO.md CHANGED
@@ -22,3 +22,27 @@
22
22
  When doing a `call`, we need to provide a mailbox for the response. can this be
23
23
  automatic?
24
24
 
25
+ # streams
26
+
27
+ We're still missing:
28
+
29
+ - limit on line length in `get_line`
30
+ - ability to supply buffer to `get_line` and `get_string`
31
+ - allow read to eof, maybe with `read_to_eof`
32
+
33
+ For the sake of performance, simplicity and explicitness, we change the API as follows:
34
+
35
+ ```ruby
36
+ stream.get_line(buf, limit)
37
+ # the defaults:
38
+ stream.get_line(nil, -1)
39
+
40
+ stream.get_string(len, buf)
41
+ # defaults:
42
+ stream.get_string(len, nil)
43
+
44
+ # and
45
+ stream.read_to_eof(buf)
46
+ # defaults:
47
+ stream.read_to_eof(nil)
48
+ ```
@@ -132,11 +132,12 @@ end
132
132
  def stream_parse_headers(fd)
133
133
  stream = UM::Stream.new($machine, fd)
134
134
 
135
- headers = stream_get_request_line(stream)
135
+ buf = String.new(capacity: 65536)
136
+ headers = stream_get_request_line(stream, buf)
136
137
  return nil if !headers
137
138
 
138
139
  while true
139
- line = stream.get_line()
140
+ line = stream.get_line(buf, 0)
140
141
  break if line.empty?
141
142
 
142
143
  m = line.match(RE_HEADER_LINE)
@@ -148,8 +149,8 @@ def stream_parse_headers(fd)
148
149
  headers
149
150
  end
150
151
 
151
- def stream_get_request_line(stream)
152
- line = stream.get_line()
152
+ def stream_get_request_line(stream, buf)
153
+ line = stream.get_line(buf, 0)
153
154
 
154
155
  m = line.match(RE_REQUEST_LINE)
155
156
  return nil if !m
@@ -181,42 +182,43 @@ ensure
181
182
  ($machine.close(wfd) rescue nil) if wfd
182
183
  end
183
184
 
184
- # 10000.times { parse_http_parser }
185
- # 10000.times { parse_http_stringio }
186
- # 10000.times { parse_http_stream }
187
- # exit
188
-
189
- # GC.disable
190
-
191
- # OS = ObjectSpace
192
-
193
- # def object_count
194
- # counts = ObjectSpace.count_objects
195
- # counts[:TOTAL] - counts[:FREE]
196
- # end
185
+ def compare_allocs
186
+ GC.disable
187
+ x = 1000
188
+ p(
189
+ alloc_http_parser: alloc_count { x.times { parse_http_parser } },
190
+ alloc_stringio: alloc_count { x.times { parse_http_stringio } },
191
+ alloc_stream: alloc_count { x.times { parse_http_stream } }
192
+ )
193
+ ensure
194
+ GC.enable
195
+ end
197
196
 
198
- # def alloc_count
199
- # GC.start
200
- # count0 = object_count
201
- # yield
202
- # count1 = object_count
203
- # count1 - count0
204
- # end
197
+ def object_count
198
+ counts = ObjectSpace.count_objects
199
+ counts[:TOTAL] - counts[:FREE]
200
+ end
205
201
 
206
- # X = 100
207
- # p(
208
- # alloc_http_parser: alloc_count { X.times { parse_http_parser } },
209
- # alloc_stringio: alloc_count { X.times { parse_http_stringio } },
210
- # alloc_stream: alloc_count { X.times { parse_http_stream } }
211
- # )
212
- # exit
202
+ def alloc_count
203
+ GC.start
204
+ count0 = object_count
205
+ yield
206
+ # GC.start
207
+ count1 = object_count
208
+ count1 - count0
209
+ end
213
210
 
214
- Benchmark.ips do |x|
215
- x.config(:time => 5, :warmup => 3)
211
+ def benchmark
212
+ Benchmark.ips do |x|
213
+ x.config(:time => 5, :warmup => 3)
216
214
 
217
- x.report("http_parser") { parse_http_parser }
218
- x.report("stringio") { parse_http_stringio }
219
- x.report("stream") { parse_http_stream }
215
+ x.report("http_parser") { parse_http_parser }
216
+ x.report("stringio") { parse_http_stringio }
217
+ x.report("stream") { parse_http_stream }
220
218
 
221
- x.compare!
219
+ x.compare!
220
+ end
222
221
  end
222
+
223
+ compare_allocs
224
+ benchmark
data/ext/um/um.c CHANGED
@@ -285,7 +285,7 @@ struct op_ctx {
285
285
  int flags;
286
286
  };
287
287
 
288
- VALUE um_timeout_ensure(VALUE arg) {
288
+ VALUE um_timeout_complete(VALUE arg) {
289
289
  struct op_ctx *ctx = (struct op_ctx *)arg;
290
290
 
291
291
  if (!um_op_completed_p(ctx->op)) {
@@ -311,7 +311,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
311
311
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
312
312
 
313
313
  struct op_ctx ctx = { .machine = machine, .op = op };
314
- return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
314
+ return rb_ensure(rb_yield, Qnil, um_timeout_complete, (VALUE)&ctx);
315
315
  }
316
316
 
317
317
  /*******************************************************************************
@@ -653,7 +653,7 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
653
653
  multishot ops
654
654
  *******************************************************************************/
655
655
 
656
- VALUE accept_each_begin(VALUE arg) {
656
+ VALUE accept_each_start(VALUE arg) {
657
657
  struct op_ctx *ctx = (struct op_ctx *)arg;
658
658
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
659
659
  io_uring_prep_multishot_accept(sqe, ctx->fd, NULL, NULL, 0);
@@ -684,7 +684,7 @@ VALUE accept_each_begin(VALUE arg) {
684
684
  return Qnil;
685
685
  }
686
686
 
687
- VALUE multishot_ensure(VALUE arg) {
687
+ VALUE multishot_complete(VALUE arg) {
688
688
  struct op_ctx *ctx = (struct op_ctx *)arg;
689
689
  if (ctx->op->multishot_result_count) {
690
690
  int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
@@ -706,7 +706,7 @@ VALUE um_accept_each(struct um *machine, int fd) {
706
706
  um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT);
707
707
 
708
708
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
709
- return rb_ensure(accept_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
709
+ return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
710
710
  }
711
711
 
712
712
  int um_read_each_singleshot_loop(struct op_ctx *ctx) {
@@ -771,7 +771,7 @@ void read_recv_each_prep(struct io_uring_sqe *sqe, struct op_ctx *ctx) {
771
771
  }
772
772
  }
773
773
 
774
- VALUE read_recv_each_begin(VALUE arg) {
774
+ VALUE read_recv_each_start(VALUE arg) {
775
775
  struct op_ctx *ctx = (struct op_ctx *)arg;
776
776
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
777
777
  read_recv_each_prep(sqe, ctx);
@@ -809,7 +809,7 @@ VALUE um_read_each(struct um *machine, int fd, int bgid) {
809
809
  um_prep_op(machine, &op, OP_READ_MULTISHOT);
810
810
 
811
811
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
812
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
812
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
813
813
  }
814
814
 
815
815
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
@@ -817,10 +817,10 @@ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
817
817
  um_prep_op(machine, &op, OP_RECV_MULTISHOT);
818
818
 
819
819
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
820
- return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
820
+ return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
821
821
  }
822
822
 
823
- VALUE periodically_begin(VALUE arg) {
823
+ VALUE periodically_start(VALUE arg) {
824
824
  struct op_ctx *ctx = (struct op_ctx *)arg;
825
825
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
826
826
  io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
@@ -857,6 +857,6 @@ VALUE um_periodically(struct um *machine, double interval) {
857
857
  op.ts = um_double_to_timespec(interval);
858
858
 
859
859
  struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
860
- return rb_ensure(periodically_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
860
+ return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
861
861
  }
862
862
 
data/ext/um/um.h CHANGED
@@ -124,7 +124,6 @@ struct um {
124
124
  };
125
125
 
126
126
  struct um_mutex {
127
- VALUE self;
128
127
  uint32_t state;
129
128
  };
130
129
 
@@ -147,15 +146,11 @@ struct um_queue {
147
146
  };
148
147
 
149
148
  struct um_async_op {
150
- VALUE self;
151
-
152
149
  struct um *machine;
153
150
  struct um_op *op;
154
151
  };
155
152
 
156
153
  struct um_stream {
157
- VALUE self;
158
-
159
154
  struct um *machine;
160
155
  int fd;
161
156
  VALUE buffer;
@@ -259,7 +254,7 @@ struct um_mutex *Mutex_data(VALUE self);
259
254
  struct um_queue *Queue_data(VALUE self);
260
255
 
261
256
  void um_mutex_init(struct um_mutex *mutex);
262
- VALUE um_mutex_synchronize(struct um *machine, uint32_t *state);
257
+ VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state);
263
258
 
264
259
  void um_queue_init(struct um_queue *queue);
265
260
  void um_queue_free(struct um_queue *queue);
@@ -270,11 +265,8 @@ VALUE um_queue_pop(struct um *machine, struct um_queue *queue);
270
265
  VALUE um_queue_unshift(struct um *machine, struct um_queue *queue, VALUE value);
271
266
  VALUE um_queue_shift(struct um *machine, struct um_queue *queue);
272
267
 
273
- int stream_read_more(struct um_stream *stream);
274
- VALUE stream_get_line(struct um_stream *stream);
275
- VALUE stream_get_string(struct um_stream *stream, ulong len);
276
- VALUE resp_get_line(struct um_stream *stream, VALUE out_buffer);
277
- VALUE resp_get_string(struct um_stream *stream, ulong len, VALUE out_buffer);
268
+ VALUE stream_get_line(struct um_stream *stream, VALUE buf, ssize_t maxlen);
269
+ VALUE stream_get_string(struct um_stream *stream, VALUE buf, ssize_t len);
278
270
  VALUE resp_decode(struct um_stream *stream, VALUE out_buffer);
279
271
  void resp_encode(struct um_write_buffer *buf, VALUE obj);
280
272
 
@@ -7,44 +7,40 @@ VALUE SYM_timeout;
7
7
 
8
8
  static void AsyncOp_mark(void *ptr) {
9
9
  struct um_async_op *async_op = ptr;
10
- rb_gc_mark_movable(async_op->self);
11
10
  rb_gc_mark_movable(async_op->machine->self);
12
11
  }
13
12
 
14
- static void AsyncOp_compact(void *ptr) {
15
- struct um_async_op *async_op = ptr;
16
- async_op->self = rb_gc_location(async_op->self);
17
- }
18
-
19
- static size_t AsyncOp_size(const void *ptr) {
20
- return sizeof(struct um_async_op);
21
- }
22
-
23
13
  static void AsyncOp_free(void *ptr) {
24
14
  struct um_async_op *async_op = ptr;
25
- um_op_free(async_op->machine, async_op->op);
26
- free(ptr);
15
+ if (async_op->op)
16
+ um_op_free(async_op->machine, async_op->op);
27
17
  }
28
18
 
29
19
  static const rb_data_type_t AsyncOp_type = {
30
- "UringMachine::AsyncOp",
31
- {AsyncOp_mark, AsyncOp_free, AsyncOp_size, AsyncOp_compact},
32
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
20
+ .wrap_struct_name = "UringMachine::AsyncOp",
21
+ .function = {
22
+ .dmark = AsyncOp_mark,
23
+ .dfree = AsyncOp_free,
24
+ .dsize = NULL,
25
+ .dcompact = NULL
26
+ },
27
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
33
28
  };
34
29
 
35
30
  static VALUE AsyncOp_allocate(VALUE klass) {
36
- struct um_async_op *async_op = malloc(sizeof(struct um_async_op));
37
- return TypedData_Wrap_Struct(klass, &AsyncOp_type, async_op);
31
+ struct um_async_op *async_op;
32
+ return TypedData_Make_Struct(klass, struct um_async_op, &AsyncOp_type, async_op);
38
33
  }
39
34
 
40
- inline struct um_async_op *AsyncOp_data(VALUE self) {
41
- return RTYPEDDATA_DATA(self);
35
+ static inline struct um_async_op *AsyncOp_data(VALUE self) {
36
+ struct um_async_op *async_op;
37
+ TypedData_Get_Struct(self, struct um_async_op, &AsyncOp_type, async_op);
38
+ return async_op;
42
39
  }
43
40
 
44
41
  VALUE AsyncOp_initialize(VALUE self) {
45
42
  struct um_async_op *async_op = AsyncOp_data(self);
46
43
  memset(async_op, 0, sizeof(struct um_async_op));
47
- async_op->self = self;
48
44
  return self;
49
45
  }
50
46
 
data/ext/um/um_class.c CHANGED
@@ -24,27 +24,28 @@ static void UM_free(void *ptr) {
24
24
  free(ptr);
25
25
  }
26
26
 
27
- static size_t UM_size(const void *ptr) {
28
- return sizeof(struct um);
29
- }
30
-
31
- static const rb_data_type_t UM_type = {
32
- "UringMachine",
33
- {UM_mark, UM_free, UM_size, UM_compact},
34
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
27
+ static const rb_data_type_t UringMachine_type = {
28
+ .wrap_struct_name = "UringMachine",
29
+ .function = {
30
+ .dmark = UM_mark,
31
+ .dfree = UM_free,
32
+ .dsize = NULL,
33
+ .dcompact = UM_compact
34
+ },
35
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
35
36
  };
36
37
 
37
38
  static VALUE UM_allocate(VALUE klass) {
38
- struct um *machine = ALLOC(struct um);
39
-
40
- return TypedData_Wrap_Struct(klass, &UM_type, machine);
39
+ struct um *um;
40
+ return TypedData_Make_Struct(klass, struct um, &UringMachine_type, um);
41
41
  }
42
42
 
43
43
  inline struct um *um_get_machine(VALUE self) {
44
- struct um *machine = RTYPEDDATA_DATA(self);
45
- if (!machine->ring_initialized)
46
- rb_raise(rb_eRuntimeError, "Machine not initialized");
47
- return machine;
44
+ struct um *um;
45
+ TypedData_Get_Struct(self, struct um, &UringMachine_type, um);
46
+ if (!um->ring_initialized) rb_raise(rb_eRuntimeError, "Machine not initialized");
47
+
48
+ return um;
48
49
  }
49
50
 
50
51
  VALUE UM_initialize(VALUE self) {
@@ -244,7 +245,7 @@ VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
244
245
  VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
245
246
  struct um *machine = um_get_machine(self);
246
247
  struct um_mutex *mutex_data = Mutex_data(mutex);
247
- return um_mutex_synchronize(machine, &mutex_data->state);
248
+ return um_mutex_synchronize(machine, mutex, &mutex_data->state);
248
249
  }
249
250
 
250
251
  VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
@@ -278,7 +279,7 @@ struct um_open_ctx {
278
279
  VALUE fd;
279
280
  };
280
281
 
281
- VALUE UM_open_ensure(VALUE arg) {
282
+ VALUE UM_open_complete(VALUE arg) {
282
283
  struct um_open_ctx *ctx = (struct um_open_ctx *)arg;
283
284
  UM_close(ctx->self, ctx->fd);
284
285
  return ctx->self;
@@ -290,7 +291,7 @@ VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
290
291
  VALUE fd = um_open(machine, pathname, NUM2INT(flags), 0666);
291
292
  if (rb_block_given_p()) {
292
293
  struct um_open_ctx ctx = { self, fd };
293
- return rb_ensure(rb_yield, fd, UM_open_ensure, (VALUE)&ctx);
294
+ return rb_ensure(rb_yield, fd, UM_open_complete, (VALUE)&ctx);
294
295
  }
295
296
  else
296
297
  return fd;
data/ext/um/um_const.c CHANGED
@@ -39,18 +39,24 @@ void um_define_net_constants(VALUE mod) {
39
39
  DEF_CONST_INT(mod, STATX_BASIC_STATS);
40
40
  DEF_CONST_INT(mod, STATX_BTIME);
41
41
  DEF_CONST_INT(mod, STATX_ALL);
42
+ #ifdef STATX_MNT_ID
42
43
  DEF_CONST_INT(mod, STATX_MNT_ID);
44
+ #endif
45
+ #ifdef STATX_DIOALIGN
43
46
  DEF_CONST_INT(mod, STATX_DIOALIGN);
47
+ #endif
48
+ #ifdef STATX_MNT_ID_UNIQUE
44
49
  DEF_CONST_INT(mod, STATX_MNT_ID_UNIQUE);
45
- #ifdef STATX_SUBVOL
50
+ #endif
51
+ #ifdef STATX_SUBVOL
46
52
  DEF_CONST_INT(mod, STATX_SUBVOL);
47
- #endif
48
- #ifdef STATX_WRITE_ATOMIC
53
+ #endif
54
+ #ifdef STATX_WRITE_ATOMIC
49
55
  DEF_CONST_INT(mod, STATX_WRITE_ATOMIC);
50
- #endif
51
- #ifdef STATX_DIO_READ_ALIGN
56
+ #endif
57
+ #ifdef STATX_DIO_READ_ALIGN
52
58
  DEF_CONST_INT(mod, STATX_DIO_READ_ALIGN);
53
- #endif
59
+ #endif
54
60
 
55
61
  DEF_CONST_INT(mod, MSG_CONFIRM);
56
62
  DEF_CONST_INT(mod, MSG_DONTROUTE);
@@ -3,38 +3,30 @@
3
3
 
4
4
  VALUE cMutex;
5
5
 
6
- static void Mutex_mark(void *ptr) {
7
- struct um_mutex *mutex = ptr;
8
- rb_gc_mark_movable(mutex->self);
9
- }
10
-
11
- static void Mutex_compact(void *ptr) {
12
- struct um_mutex *mutex = ptr;
13
- mutex->self = rb_gc_location(mutex->self);
14
- }
15
-
16
- static size_t Mutex_size(const void *ptr) {
17
- return sizeof(struct um_mutex);
18
- }
19
-
20
6
  static const rb_data_type_t Mutex_type = {
21
- "UringMachineMutex",
22
- {Mutex_mark, free, Mutex_size, Mutex_compact},
23
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
7
+ .wrap_struct_name = "UringMachine::Mutex",
8
+ .function = {
9
+ .dmark = NULL,
10
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
11
+ .dsize = NULL,
12
+ .dcompact = NULL
13
+ },
14
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
24
15
  };
25
16
 
26
17
  static VALUE Mutex_allocate(VALUE klass) {
27
- struct um_mutex *mutex = malloc(sizeof(struct um_mutex));
28
- return TypedData_Wrap_Struct(klass, &Mutex_type, mutex);
18
+ struct um_mutex *mutex;
19
+ return TypedData_Make_Struct(klass, struct um_mutex, &Mutex_type, mutex);
29
20
  }
30
21
 
31
22
  inline struct um_mutex *Mutex_data(VALUE self) {
32
- return RTYPEDDATA_DATA(self);
23
+ struct um_mutex *mutex;
24
+ TypedData_Get_Struct(self, struct um_mutex, &Mutex_type, mutex);
25
+ return mutex;
33
26
  }
34
27
 
35
28
  VALUE Mutex_initialize(VALUE self) {
36
29
  struct um_mutex *mutex = Mutex_data(self);
37
- mutex->self = self;
38
30
  um_mutex_init(mutex);
39
31
  return self;
40
32
  }
@@ -18,23 +18,26 @@ static void Queue_free(void *ptr) {
18
18
  um_queue_free(queue);
19
19
  }
20
20
 
21
- static size_t Queue_size(const void *ptr) {
22
- return sizeof(struct um_queue);
23
- }
24
-
25
21
  static const rb_data_type_t Queue_type = {
26
- "UringMachineQueue",
27
- {Queue_mark, Queue_free, Queue_size, Queue_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
22
+ .wrap_struct_name = "UringMachine::Queue",
23
+ .function = {
24
+ .dmark = Queue_mark,
25
+ .dfree = Queue_free,
26
+ .dsize = NULL,
27
+ .dcompact = Queue_compact
28
+ },
29
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
30
  };
30
31
 
31
32
  static VALUE Queue_allocate(VALUE klass) {
32
- struct um_queue *queue = malloc(sizeof(struct um_queue));
33
- return TypedData_Wrap_Struct(klass, &Queue_type, queue);
33
+ struct um_queue *queue;
34
+ return TypedData_Make_Struct(klass, struct um_queue, &Queue_type, queue);
34
35
  }
35
36
 
36
37
  inline struct um_queue *Queue_data(VALUE self) {
37
- return RTYPEDDATA_DATA(self);
38
+ struct um_queue *queue;
39
+ TypedData_Get_Struct(self, struct um_queue, &Queue_type, queue);
40
+ return queue;
38
41
  }
39
42
 
40
43
  VALUE Queue_initialize(VALUE self) {
data/ext/um/um_stream.c CHANGED
@@ -1,31 +1,5 @@
1
1
  #include "um.h"
2
-
3
- VALUE stream_get_line(struct um_stream *stream) {
4
- char *start = RSTRING_PTR(stream->buffer) + stream->pos;
5
- while (true) {
6
- char * lf_ptr = memchr(start, '\n', stream->len - stream->pos);
7
- if (lf_ptr) {
8
- ulong len = lf_ptr - start;
9
- if (len && (start[len - 1] == '\r')) len -= 1;
10
-
11
- VALUE str = rb_str_new(start, len);
12
- stream->pos += lf_ptr - start + 1;
13
- return str;
14
- }
15
-
16
- if (!stream_read_more(stream)) return Qnil;
17
- }
18
- }
19
-
20
- VALUE stream_get_string(struct um_stream *stream, ulong len) {
21
- while (stream->len - stream->pos < len)
22
- if (!stream_read_more(stream)) return Qnil;
23
-
24
- char *start = RSTRING_PTR(stream->buffer) + stream->pos;
25
- VALUE str = rb_utf8_str_new(start, len);
26
- stream->pos += len;
27
- return str;
28
- }
2
+ #include <stdlib.h>
29
3
 
30
4
  static inline void stream_check_truncate_buffer(struct um_stream *stream) {
31
5
  if ((stream->pos == stream->len) && (stream->len >= 1 << 12)) {
@@ -67,10 +41,67 @@ int stream_read_more(struct um_stream *stream) {
67
41
  return 1;
68
42
  }
69
43
 
70
- // ensure string can hold at least len bytes
44
+ // ensures given string can hold at least given len bytes (+trailing null)
71
45
  static inline void str_expand(VALUE str, size_t len) {
72
- size_t capa = rb_str_capacity(str);
73
- if (capa < len + 1) rb_str_modify_expand(str, len + 1 - capa);
46
+ rb_str_resize(str, len);
47
+ }
48
+
49
+ static inline void str_copy_bytes(VALUE dest, const char *src, ssize_t len) {
50
+ str_expand(dest, len + 1);
51
+ char *dest_ptr = RSTRING_PTR(dest);
52
+ memcpy(dest_ptr, src, len);
53
+ dest_ptr[len] = 0;
54
+ rb_str_set_len(dest, len);
55
+ }
56
+
57
+ VALUE stream_get_line(struct um_stream *stream, VALUE buf, ssize_t maxlen) {
58
+ char *start = RSTRING_PTR(stream->buffer) + stream->pos;
59
+ while (true) {
60
+ ssize_t pending_len = stream->len - stream->pos;
61
+ ssize_t search_len = pending_len;
62
+ ssize_t absmax_len = labs(maxlen);
63
+ int should_limit_len = (absmax_len > 0) && (search_len > maxlen);
64
+ if (should_limit_len) search_len = absmax_len;
65
+
66
+ char * lf_ptr = memchr(start, '\n', search_len);
67
+ if (lf_ptr) {
68
+ ssize_t len = lf_ptr - start;
69
+ if (len && (start[len - 1] == '\r')) len -= 1;
70
+
71
+ stream->pos += lf_ptr - start + 1;
72
+ if (NIL_P(buf)) return rb_utf8_str_new(start, len);
73
+
74
+ str_copy_bytes(buf, start, len);
75
+ return buf;
76
+ }
77
+ else if (should_limit_len && pending_len > search_len)
78
+ // maxlen
79
+ return Qnil;
80
+
81
+ if (!stream_read_more(stream))
82
+ return Qnil;
83
+ else
84
+ // update start ptr (it might have changed after reading)
85
+ start = RSTRING_PTR(stream->buffer) + stream->pos;
86
+ }
87
+ }
88
+
89
+ VALUE stream_get_string(struct um_stream *stream, VALUE buf, ssize_t len) {
90
+ size_t abslen = labs(len);
91
+ while (stream->len - stream->pos < abslen)
92
+ if (!stream_read_more(stream)) {
93
+ if (len > 0) return Qnil;
94
+
95
+ abslen = stream->len - stream->pos;
96
+ }
97
+
98
+ char *start = RSTRING_PTR(stream->buffer) + stream->pos;
99
+ stream->pos += abslen;
100
+
101
+ if (NIL_P(buf)) return rb_utf8_str_new(start, abslen);
102
+
103
+ str_copy_bytes(buf, start, len);
104
+ return buf;
74
105
  }
75
106
 
76
107
  VALUE resp_get_line(struct um_stream *stream, VALUE out_buffer) {
@@ -89,11 +120,7 @@ VALUE resp_get_line(struct um_stream *stream, VALUE out_buffer) {
89
120
  return str;
90
121
  }
91
122
 
92
- str_expand(out_buffer, len + 1);
93
- char *dest_ptr = RSTRING_PTR(out_buffer);
94
- memcpy(dest_ptr, start, len);
95
- dest_ptr[len] = 0; // add null at end
96
- rb_str_set_len(out_buffer, len);
123
+ str_copy_bytes(out_buffer, start, len);
97
124
  return out_buffer;
98
125
  }
99
126
 
@@ -116,11 +143,7 @@ VALUE resp_get_string(struct um_stream *stream, ulong len, VALUE out_buffer) {
116
143
 
117
144
  if (NIL_P(out_buffer)) return rb_utf8_str_new(start, len);
118
145
 
119
- str_expand(out_buffer, len + 1);
120
- char *dest_ptr = RSTRING_PTR(out_buffer);
121
- memcpy(dest_ptr, start, len);
122
- dest_ptr[len] = 0; // add null at end
123
- rb_str_set_len(out_buffer, len);
146
+ str_copy_bytes(out_buffer, start, len);
124
147
  return out_buffer;
125
148
  }
126
149
 
@@ -4,42 +4,40 @@ VALUE cStream;
4
4
 
5
5
  static void Stream_mark(void *ptr) {
6
6
  struct um_stream *stream = ptr;
7
- rb_gc_mark_movable(stream->self);
8
7
  rb_gc_mark_movable(stream->buffer);
9
8
  }
10
9
 
11
10
  static void Stream_compact(void *ptr) {
12
11
  struct um_stream *stream = ptr;
13
- stream->self = rb_gc_location(stream->self);
14
12
  stream->buffer = rb_gc_location(stream->buffer);
15
13
  }
16
14
 
17
- static void Stream_free(void *ptr) {
18
- free(ptr);
19
- }
20
-
21
- static size_t Stream_size(const void *ptr) {
22
- return sizeof(struct um_stream);
23
- }
24
-
25
15
  static const rb_data_type_t Stream_type = {
26
- "UringMachine::Stream",
27
- {Stream_mark, Stream_free, Stream_size, Stream_compact},
28
- 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
16
+ .wrap_struct_name = "UringMachine::Stream",
17
+ .function = {
18
+ .dmark = Stream_mark,
19
+ .dfree = RUBY_TYPED_DEFAULT_FREE,
20
+ .dsize = NULL,
21
+ .dcompact = Stream_compact
22
+ },
23
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
29
24
  };
30
25
 
31
26
  static VALUE Stream_allocate(VALUE klass) {
32
- struct um_stream *stream = ALLOC(struct um_stream);
27
+ struct um_stream *stream;
28
+ return TypedData_Make_Struct(klass, struct um_stream, &Stream_type, stream);
29
+ }
33
30
 
34
- return TypedData_Wrap_Struct(klass, &Stream_type, stream);
31
+ static inline struct um_stream *Stream_data(VALUE self) {
32
+ struct um_stream *stream;
33
+ TypedData_Get_Struct(self, struct um_stream, &Stream_type, stream);
34
+ return stream;
35
35
  }
36
36
 
37
37
  VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
38
- struct um_stream *stream = RTYPEDDATA_DATA(self);
38
+ struct um_stream *stream = Stream_data(self);
39
39
 
40
- stream->self = self;
41
-
42
- stream->machine = RTYPEDDATA_DATA(machine);
40
+ stream->machine = um_get_machine(machine);
43
41
  stream->fd = NUM2ULONG(fd);
44
42
  stream->buffer = rb_utf8_str_new_literal("");
45
43
  rb_str_resize(stream->buffer, 1 << 16); // 64KB
@@ -52,40 +50,22 @@ VALUE Stream_initialize(VALUE self, VALUE machine, VALUE fd) {
52
50
  return self;
53
51
  }
54
52
 
55
- VALUE Stream_get_line(VALUE self) {
56
- struct um_stream *stream = RTYPEDDATA_DATA(self);
57
- if (unlikely(stream->eof)) return Qnil;
58
-
59
- return stream_get_line(stream);
60
- }
61
-
62
- VALUE Stream_get_string(VALUE self, VALUE len) {
63
- struct um_stream *stream = RTYPEDDATA_DATA(self);
64
- if (unlikely(stream->eof)) return Qnil;
65
-
66
- return stream_get_string(stream, NUM2ULONG(len));
67
- }
68
-
69
- VALUE Stream_resp_get_line(VALUE self) {
70
- struct um_stream *stream = RTYPEDDATA_DATA(self);
53
+ VALUE Stream_get_line(VALUE self, VALUE buf, VALUE limit) {
54
+ struct um_stream *stream = Stream_data(self);
71
55
  if (unlikely(stream->eof)) return Qnil;
72
56
 
73
- VALUE line = resp_get_line(stream, Qnil);
74
- RB_GC_GUARD(line);
75
- return line;
57
+ return stream_get_line(stream, buf, NUM2LONG(limit));
76
58
  }
77
59
 
78
- VALUE Stream_resp_get_string(VALUE self, VALUE len) {
79
- struct um_stream *stream = RTYPEDDATA_DATA(self);
60
+ VALUE Stream_get_string(VALUE self, VALUE buf, VALUE len) {
61
+ struct um_stream *stream = Stream_data(self);
80
62
  if (unlikely(stream->eof)) return Qnil;
81
63
 
82
- VALUE str = resp_get_string(stream, NUM2ULONG(len), Qnil);
83
- RB_GC_GUARD(str);
84
- return str;
64
+ return stream_get_string(stream, buf, NUM2LONG(len));
85
65
  }
86
66
 
87
67
  VALUE Stream_resp_decode(VALUE self) {
88
- struct um_stream *stream = RTYPEDDATA_DATA(self);
68
+ struct um_stream *stream = Stream_data(self);
89
69
  if (unlikely(stream->eof)) return Qnil;
90
70
 
91
71
  VALUE out_buffer = rb_utf8_str_new_literal("");
@@ -109,11 +89,8 @@ void Init_Stream(void) {
109
89
 
110
90
  rb_define_method(cStream, "initialize", Stream_initialize, 2);
111
91
 
112
- rb_define_method(cStream, "get_line", Stream_get_line, 0);
113
- rb_define_method(cStream, "get_string", Stream_get_string, 1);
114
-
115
- rb_define_method(cStream, "resp_get_line", Stream_resp_get_line, 0);
116
- rb_define_method(cStream, "resp_get_string", Stream_resp_get_string, 1);
92
+ rb_define_method(cStream, "get_line", Stream_get_line, 2);
93
+ rb_define_method(cStream, "get_string", Stream_get_string, 2);
117
94
 
118
95
  rb_define_method(cStream, "resp_decode", Stream_resp_decode, 0);
119
96
 
data/ext/um/um_sync.c CHANGED
@@ -73,24 +73,28 @@ inline void um_mutex_unlock(struct um *machine, uint32_t *state) {
73
73
 
74
74
  struct sync_ctx {
75
75
  struct um *machine;
76
+ VALUE mutex;
76
77
  uint32_t *state;
77
78
  };
78
79
 
79
- VALUE synchronize_begin(VALUE arg) {
80
+ VALUE synchronize_start(VALUE arg) {
80
81
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
81
82
  um_mutex_lock(ctx->machine, ctx->state);
82
83
  return rb_yield(Qnil);
83
84
  }
84
85
 
85
- VALUE synchronize_ensure(VALUE arg) {
86
+ VALUE synchronize_complete(VALUE arg) {
86
87
  struct sync_ctx *ctx = (struct sync_ctx *)arg;
88
+ // Mutex is an embedded data class, so it might have moved while the operation
89
+ // was ongoing. We need to update the pointer to the embedded state variable.
90
+ ctx->state = &Mutex_data(ctx->mutex)->state;
87
91
  um_mutex_unlock(ctx->machine, ctx->state);
88
92
  return Qnil;
89
93
  }
90
94
 
91
- inline VALUE um_mutex_synchronize(struct um *machine, uint32_t *state) {
92
- struct sync_ctx ctx = { .machine = machine, .state = state };
93
- return rb_ensure(synchronize_begin, (VALUE)&ctx, synchronize_ensure, (VALUE)&ctx);
95
+ inline VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state) {
96
+ struct sync_ctx ctx = { .machine = machine, .mutex = mutex, .state = state };
97
+ return rb_ensure(synchronize_start, (VALUE)&ctx, synchronize_complete, (VALUE)&ctx);
94
98
  }
95
99
 
96
100
  #define QUEUE_EMPTY 0
@@ -116,8 +120,6 @@ inline void um_queue_free(struct um_queue *queue) {
116
120
  free(entry);
117
121
  entry = next;
118
122
  }
119
-
120
- free(queue);
121
123
  }
122
124
 
123
125
  inline void um_queue_mark(struct um_queue *queue) {
@@ -226,11 +228,12 @@ enum queue_op { QUEUE_POP, QUEUE_SHIFT };
226
228
 
227
229
  struct queue_wait_ctx {
228
230
  struct um *machine;
231
+ VALUE queue_obj;
229
232
  struct um_queue *queue;
230
233
  enum queue_op op;
231
234
  };
232
235
 
233
- VALUE um_queue_remove_begin(VALUE arg) {
236
+ VALUE um_queue_remove_start(VALUE arg) {
234
237
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
235
238
 
236
239
  ctx->queue->num_waiters++;
@@ -247,9 +250,13 @@ VALUE um_queue_remove_begin(VALUE arg) {
247
250
  return (ctx->op == QUEUE_POP ? queue_remove_tail : queue_remove_head)(ctx->queue);
248
251
  }
249
252
 
250
- VALUE um_queue_remove_ensure(VALUE arg) {
253
+ VALUE um_queue_remove_complete(VALUE arg) {
251
254
  struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
252
255
 
256
+ // the um_queue struct is embedded, so it might have been moved while the op
257
+ // was ongoing, so we need to get it again on op completion
258
+ ctx->queue = Queue_data(ctx->queue_obj);
259
+
253
260
  ctx->queue->num_waiters--;
254
261
 
255
262
  if (ctx->queue->num_waiters && ctx->queue->tail) {
@@ -263,11 +270,11 @@ VALUE um_queue_remove_ensure(VALUE arg) {
263
270
  }
264
271
 
265
272
  VALUE um_queue_pop(struct um *machine, struct um_queue *queue) {
266
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_POP };
267
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
273
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_POP };
274
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
268
275
  }
269
276
 
270
277
  VALUE um_queue_shift(struct um *machine, struct um_queue *queue) {
271
- struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_SHIFT };
272
- return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
278
+ struct queue_wait_ctx ctx = { .machine = machine, .queue_obj = queue->self, .queue = queue, .op = QUEUE_SHIFT };
279
+ return rb_ensure(um_queue_remove_start, (VALUE)&ctx, um_queue_remove_complete, (VALUE)&ctx);
273
280
  }
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.11.1'
4
+ VERSION = '0.12.1'
5
5
  end
data/test/test_stream.rb CHANGED
@@ -15,41 +15,92 @@ class StreamTest < StreamBaseTest
15
15
  machine.write(@wfd, "foo\nbar\r\nbaz")
16
16
  machine.close(@wfd)
17
17
 
18
- assert_equal 'foo', @stream.get_line
19
- assert_equal 'bar', @stream.get_line
20
- assert_nil @stream.get_line
18
+ assert_equal 'foo', @stream.get_line(nil, 0)
19
+ assert_equal 'bar', @stream.get_line(nil, 0)
20
+ assert_nil @stream.get_line(nil, 0)
21
+ end
22
+
23
+ def test_get_line_with_buf
24
+ machine.write(@wfd, "foo\nbar\r\nbaz")
25
+ machine.close(@wfd)
26
+
27
+ buf = +''
28
+ ret = @stream.get_line(buf, 0)
29
+ assert_equal 'foo', buf
30
+ assert_equal ret, buf
31
+
32
+ ret = @stream.get_line(buf, 0)
33
+ assert_equal 'bar', buf
34
+ assert_equal ret, buf
35
+ end
36
+
37
+ def test_get_line_with_positive_maxlen
38
+ machine.write(@wfd, "foobar\r\n")
39
+ machine.close(@wfd)
40
+
41
+ buf = +''
42
+ ret = @stream.get_line(buf, 3)
43
+ assert_nil ret
44
+ assert_equal '', buf
45
+
46
+ # verify that stream pos has not changed
47
+ ret = @stream.get_line(buf, 0)
48
+ assert_equal 'foobar', buf
49
+ assert_equal ret, buf
50
+ end
51
+
52
+ def test_get_line_with_negative_maxlen
53
+ machine.write(@wfd, "foobar\r\n")
54
+ machine.close(@wfd)
55
+
56
+ buf = +''
57
+ ret = @stream.get_line(buf, -3)
58
+ assert_nil ret
59
+ assert_equal '', buf
60
+
61
+ # verify that stream pos has not changed
62
+ ret = @stream.get_line(buf, 0)
63
+ assert_equal 'foobar', buf
64
+ assert_equal ret, buf
21
65
  end
22
66
 
23
67
  def test_get_string
24
68
  machine.write(@wfd, "foobarbazblahzzz")
25
69
  machine.close(@wfd)
26
70
 
27
- assert_equal 'foobar', @stream.get_string(6)
28
- assert_equal 'baz', @stream.get_string(3)
29
- assert_equal 'blah', @stream.get_string(4)
30
- assert_nil @stream.get_string(4)
71
+ assert_equal 'foobar', @stream.get_string(nil, 6)
72
+ assert_equal 'baz', @stream.get_string(nil, 3)
73
+ assert_equal 'blah', @stream.get_string(nil, 4)
74
+ assert_nil @stream.get_string(nil, 4)
31
75
  end
32
- end
33
76
 
34
- class StreamRespTest < StreamBaseTest
35
- def test_trdp_get_line
36
- machine.write(@wfd, "foo\r\nbarbar\r\nbaz\n")
77
+ def test_get_string_with_buf
78
+ machine.write(@wfd, "foobarbazblahzzz")
37
79
  machine.close(@wfd)
38
-
39
- assert_equal 'foo', @stream.resp_get_line
40
- assert_equal 'barbar', @stream.resp_get_line
41
- assert_nil @stream.resp_get_line
80
+
81
+ buf = +''
82
+ ret = @stream.get_string(buf, 6)
83
+ assert_equal 'foobar', buf
84
+ assert_equal ret, buf
85
+
86
+ ret = @stream.get_string(buf, 3)
87
+ assert_equal 'baz', buf
88
+ assert_equal ret, buf
42
89
  end
43
90
 
44
- def test_resp_get_string
45
- machine.write(@wfd, "foo\r\nbarbar\r\nbaz\n")
91
+ def test_get_string_with_negative_len
92
+ machine.write(@wfd, "foobar")
46
93
  machine.close(@wfd)
47
94
 
48
- assert_equal 'foo', @stream.resp_get_string(3)
49
- assert_equal 'barbar', @stream.resp_get_string(6)
50
- assert_nil @stream.resp_get_string(3)
95
+ ret = @stream.get_string(nil, -12)
96
+ assert_equal 'foobar', ret
97
+
98
+ ret = @stream.get_string(nil, -4)
99
+ assert_nil ret
51
100
  end
101
+ end
52
102
 
103
+ class StreamRespTest < StreamBaseTest
53
104
  def test_resp_decode
54
105
  machine.write(@wfd, "+foo bar\r\n")
55
106
  assert_equal "foo bar", @stream.resp_decode
data/test/test_um.rb CHANGED
@@ -271,7 +271,7 @@ class PeriodicallyTest < UMBaseTest
271
271
  rescue Cancel
272
272
  cancel = 1
273
273
  end
274
- machine.snooze
274
+ 2.times { machine.snooze }
275
275
  assert_equal 0, machine.pending_count
276
276
  t1 = monotonic_clock
277
277
  assert_in_range 0.05..0.08, t1 - t0
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: uringmachine
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.1
4
+ version: 0.12.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner