uringmachine 0.23.1 → 0.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+
5
+ require 'uringmachine'
6
+ require 'uringmachine/fiber_scheduler'
7
+ require 'securerandom'
8
+
9
+ $machine = UringMachine.new
10
+ scheduler = UM::FiberScheduler.new($machine)
11
+ Fiber.set_scheduler scheduler
12
+
13
+ fn = "/tmp/file_io_#{SecureRandom.hex}"
14
+
15
+ r, w = IO.pipe
16
+
17
+ f1 = Fiber.schedule do
18
+ File.open(fn, 'w') {
19
+ it.sync = true
20
+ UM.debug "writing..."
21
+ it << 'foobar'
22
+ # w.close
23
+ }
24
+
25
+ File.open(fn, 'r') {
26
+ UM.debug "reading..."
27
+ buf = it.read
28
+ UM.debug "read: #{buf}"
29
+ }
30
+ rescue => e
31
+ p e
32
+ p e.backtrace
33
+ end
34
+ scheduler.join
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/inline'
4
+
5
+ gemfile do
6
+ source 'https://rubygems.org'
7
+ gem 'io-event'
8
+ gem 'async'
9
+ end
10
+
11
+
12
+ require 'async'
13
+ require 'securerandom'
14
+
15
+ selector ||= IO::Event::Selector::URing.new(Fiber.current)
16
+ scheduler = Async::Scheduler.new(selector:)
17
+ Fiber.set_scheduler scheduler
18
+
19
+ fn = "/tmp/file_io_#{SecureRandom.hex}"
20
+
21
+ scheduler.run do
22
+ Fiber.schedule do
23
+ File.open(fn, 'w') {
24
+ it << 'foo'
25
+ p pre_flush: IO.read(fn)
26
+ it.flush
27
+ it << 'bar'
28
+ p post_flush: IO.read(fn)
29
+
30
+ }
31
+ p post_close: IO.read(fn)
32
+ end
33
+ end
data/ext/um/um.c CHANGED
@@ -18,7 +18,7 @@ inline void prepare_io_uring_params(struct io_uring_params *params, uint sqpoll_
18
18
  params->flags |= IORING_SETUP_COOP_TASKRUN;
19
19
  }
20
20
 
21
- void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_msec) {
21
+ void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_msec, int sidecar_mode) {
22
22
  memset(machine, 0, sizeof(struct um));
23
23
 
24
24
  RB_OBJ_WRITE(self, &machine->self, self);
@@ -27,16 +27,26 @@ void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_mse
27
27
  machine->size = (size > 0) ? size : DEFAULT_SIZE;
28
28
  machine->sqpoll_mode = !!sqpoll_timeout_msec;
29
29
 
30
+ // sidecar handling
31
+ machine->sidecar_mode = sidecar_mode;
32
+ machine->sidecar_signal = aligned_alloc(4, sizeof(uint32_t));
33
+ memset(machine->sidecar_signal, 0, sizeof(uint32_t));
34
+
30
35
  struct io_uring_params params;
31
36
  prepare_io_uring_params(&params, sqpoll_timeout_msec);
32
37
  int ret = io_uring_queue_init_params(machine->size, &machine->ring, &params);
33
38
  if (ret) rb_syserr_fail(-ret, strerror(-ret));
34
39
  machine->ring_initialized = 1;
40
+
41
+ if (machine->sidecar_mode) um_sidecar_setup(machine);
35
42
  }
36
43
 
37
44
  inline void um_teardown(struct um *machine) {
38
45
  if (!machine->ring_initialized) return;
39
46
 
47
+ if (machine->sidecar_mode) um_sidecar_teardown(machine);
48
+ if (machine->sidecar_signal) free(machine->sidecar_signal);
49
+
40
50
  for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
41
51
  struct buf_ring_descriptor *desc = machine->buffer_rings + i;
42
52
  io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
@@ -59,8 +69,7 @@ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
59
69
  sqe = io_uring_get_sqe(&machine->ring);
60
70
  if (likely(sqe)) goto done;
61
71
 
62
- fprintf(stderr, "!!!Failed to get SQE\n");
63
- um_raise_internal_error("Failed to get SQE");
72
+ um_raise_internal_error("Submission queue full. Consider raising the machine size.");
64
73
 
65
74
  // TODO: retry getting SQE?
66
75
 
@@ -271,37 +280,60 @@ inline void um_profile_wait_cqe_post(struct um *machine, double time_monotonic0,
271
280
  // machine->metrics.time_last_cpu = time_cpu;
272
281
  }
273
282
 
283
+ inline void *um_wait_for_sidecar_signal(void *ptr) {
284
+ struct um *machine = ptr;
285
+ um_sidecar_signal_wait(machine);
286
+ return NULL;
287
+ }
288
+
274
289
  // Waits for the given minimum number of completion entries. The wait_nr is
275
290
  // either 1 - where we wait for at least one CQE to be ready, or 0, where we
276
291
  // don't wait, and just process any CQEs that already ready.
277
292
  static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wait_nr) {
278
293
  struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL, .wait_nr = wait_nr };
279
294
  machine->metrics.total_waits++;
280
- double time_monotonic0 = 0.0;
281
- VALUE fiber;
282
- if (machine->profile_mode) um_profile_wait_cqe_pre(machine, &time_monotonic0, &fiber);
283
- rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
284
- if (machine->profile_mode) um_profile_wait_cqe_post(machine, time_monotonic0, fiber);
285
-
286
- if (unlikely(ctx.result < 0)) {
287
- // the internal calls to (maybe submit) and wait for cqes may fail with:
288
- // -EINTR (interrupted by signal)
289
- // -EAGAIN (apparently can be returned when wait_nr = 0)
290
- // both should not raise an exception.
291
- switch (ctx.result) {
292
- case -EINTR:
293
- case -EAGAIN:
294
- // do nothing
295
- break;
296
- default:
297
- rb_syserr_fail(-ctx.result, strerror(-ctx.result));
298
- }
299
- }
300
295
 
301
- if (ctx.cqe) {
302
- um_process_cqe(machine, ctx.cqe);
303
- io_uring_cq_advance(&machine->ring, 1);
296
+ if (machine->sidecar_mode) {
297
+ // fprintf(stderr, ">> sidecar wait cqes (unsubmitted: %d)\n", machine->metrics.ops_unsubmitted);
298
+ if (machine->metrics.ops_unsubmitted) {
299
+ io_uring_submit(&machine->ring);
300
+ machine->metrics.ops_unsubmitted = 0;
301
+ }
302
+ if (wait_nr) {
303
+ // fprintf(stderr, ">> um_wait_for_sidecar_signal\n");
304
+ // rb_thread_call_without_gvl(um_wait_for_sidecar_signal, (void *)machine, RUBY_UBF_PROCESS, 0);
305
+ // fprintf(stderr, "<< um_wait_for_sidecar_signal\n");
306
+ um_sidecar_signal_wait(machine);
307
+ }
304
308
  um_process_ready_cqes(machine);
309
+ // fprintf(stderr, "<< sidecar wait cqes\n");
310
+ }
311
+ else {
312
+ double time_monotonic0 = 0.0;
313
+ VALUE fiber;
314
+ if (machine->profile_mode) um_profile_wait_cqe_pre(machine, &time_monotonic0, &fiber);
315
+ rb_thread_call_without_gvl(um_wait_for_cqe_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
316
+ if (machine->profile_mode) um_profile_wait_cqe_post(machine, time_monotonic0, fiber);
317
+
318
+ if (unlikely(ctx.result < 0)) {
319
+ // the internal calls to (maybe submit) and wait for cqes may fail with:
320
+ // -EINTR (interrupted by signal)
321
+ // -EAGAIN (apparently can be returned when wait_nr = 0)
322
+ // both should not raise an exception.
323
+ switch (ctx.result) {
324
+ case -EINTR:
325
+ case -EAGAIN:
326
+ // do nothing
327
+ break;
328
+ default:
329
+ rb_syserr_fail(-ctx.result, strerror(-ctx.result));
330
+ }
331
+ }
332
+ if (ctx.cqe) {
333
+ um_process_cqe(machine, ctx.cqe);
334
+ io_uring_cq_advance(&machine->ring, 1);
335
+ um_process_ready_cqes(machine);
336
+ }
305
337
  }
306
338
  }
307
339
 
@@ -336,30 +368,15 @@ inline VALUE um_switch(struct um *machine) {
336
368
  &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
337
369
  machine->metrics.total_ops
338
370
  );
371
+
339
372
  while (true) {
340
373
  struct um_op *op = um_runqueue_shift(machine);
341
374
  if (op) {
342
375
  if (unlikely(op->flags & OP_F_RUNQUEUE_SKIP)) continue;
343
376
 
344
- // in case of a snooze, we need to prevent a situation where completions
345
- // are not processed because the runqueue is never empty. Theoretically,
346
- // we can still have a situation where multiple fibers are all doing a
347
- // snooze repeatedly, which can prevent completions from being processed.
348
-
349
- // is the op a snooze op and is this the same fiber as the current one?
350
- if (unlikely(op->kind == OP_SCHEDULE && op->fiber == rb_fiber_current())) {
351
- // are there any pending ops (i.e. waiting for completion)?
352
- if (machine->metrics.ops_pending > 0) {
353
- // if yes, process completions, get runqueue head, put original op
354
- // back on runqueue.
355
- // um_process_ready_cqes(machine);
356
- um_wait_for_and_process_ready_cqes(machine, 0);
357
- struct um_op *op2 = um_runqueue_shift(machine);
358
- if (likely(op2 && op2 != op)) {
359
- um_runqueue_push(machine, op);
360
- op = op2;
361
- }
362
- }
377
+ // in test mode we want to process I/O on each snooze
378
+ if (unlikely(machine->test_mode && (op->kind == OP_SCHEDULE))) {
379
+ um_wait_for_and_process_ready_cqes(machine, 0);
363
380
  }
364
381
  return process_runqueue_op(machine, op);
365
382
  }
@@ -592,10 +609,10 @@ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
592
609
 
593
610
  int completed = um_op_completed_p(&op);
594
611
  if (unlikely(!completed)) goto cancelled;
595
-
612
+
596
613
  writev_res = op.result.res;
597
614
  if (unlikely(writev_res < 0)) goto done;
598
-
615
+
599
616
  len -= writev_res;
600
617
  if (len) {
601
618
  um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
@@ -1016,7 +1033,7 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1016
1033
  }
1017
1034
 
1018
1035
  while (pending) {
1019
- um_wait_for_and_process_ready_cqes(machine, 0);
1036
+ um_wait_for_and_process_ready_cqes(machine, 1);
1020
1037
 
1021
1038
  for (uint i = 0; i < total_len; i++) {
1022
1039
  struct um_op *op = ops + i;
data/ext/um/um.h CHANGED
@@ -156,7 +156,11 @@ struct um {
156
156
  uint mark; // used to mark instances for debugging
157
157
 
158
158
  struct um_metrics metrics;
159
+ int test_mode;
159
160
  int profile_mode;
161
+ int sidecar_mode;
162
+ pthread_t sidecar_thread;
163
+ uint32_t *sidecar_signal;
160
164
 
161
165
  uint buffer_ring_count; // number of registered buffer rings
162
166
 
@@ -227,7 +231,7 @@ extern VALUE cAsyncOp;
227
231
  extern VALUE eStreamRESPError;
228
232
 
229
233
  struct um *um_get_machine(VALUE self);
230
- void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_msec);
234
+ void um_setup(VALUE self, struct um *machine, uint size, uint sqpoll_timeout_msec, int sidecar_mode);
231
235
  void um_teardown(struct um *machine);
232
236
 
233
237
  VALUE um_metrics(struct um *machine, struct um_metrics *metrics);
@@ -350,6 +354,7 @@ VALUE stream_get_line(struct um_stream *stream, VALUE buf, ssize_t maxlen);
350
354
  VALUE stream_get_string(struct um_stream *stream, VALUE buf, ssize_t len);
351
355
  VALUE resp_decode(struct um_stream *stream, VALUE out_buffer);
352
356
  void resp_encode(struct um_write_buffer *buf, VALUE obj);
357
+ void resp_encode_cmd(struct um_write_buffer *buf, int argc, VALUE *argv);
353
358
 
354
359
  __attribute__((noreturn)) void um_raise_internal_error(const char *msg);
355
360
 
@@ -358,4 +363,9 @@ void write_buffer_update_len(struct um_write_buffer *buf);
358
363
 
359
364
  void um_define_net_constants(VALUE mod);
360
365
 
366
+ void um_sidecar_setup(struct um *machine);
367
+ void um_sidecar_teardown(struct um *machine);
368
+ void um_sidecar_signal_wait(struct um *machine);
369
+ void um_sidecar_signal_wake(struct um *machine);
370
+
361
371
  #endif // UM_H
data/ext/um/um_class.c CHANGED
@@ -72,6 +72,7 @@ inline struct um *um_get_machine(VALUE self) {
72
72
  static inline uint get_sqpoll_timeout_msec(VALUE sqpoll_timeout) {
73
73
  switch (TYPE(sqpoll_timeout)) {
74
74
  case T_NIL:
75
+ case T_UNDEF:
75
76
  case T_FALSE:
76
77
  return 0;
77
78
  case T_FLOAT:
@@ -86,16 +87,24 @@ static inline uint get_sqpoll_timeout_msec(VALUE sqpoll_timeout) {
86
87
  }
87
88
 
88
89
  VALUE UM_initialize(int argc, VALUE *argv, VALUE self) {
90
+ static ID kwargs_ids[3];
89
91
  struct um *machine = RTYPEDDATA_DATA(self);
90
- VALUE entries;
91
- VALUE sqpoll_timeout;
92
- rb_scan_args(argc, argv, "02", &entries, &sqpoll_timeout);
92
+ VALUE opts, kwargs[3] = {Qnil, Qnil, Qnil};
93
93
 
94
- uint entries_i = NIL_P(entries) ? 0 : NUM2UINT(entries);
95
- uint sqpoll_timeout_msec = get_sqpoll_timeout_msec(sqpoll_timeout);
94
+ if (!kwargs_ids[0]) {
95
+ kwargs_ids[0] = rb_intern_const("size");
96
+ kwargs_ids[1] = rb_intern_const("sqpoll");
97
+ kwargs_ids[2] = rb_intern_const("sidecar");
98
+ }
99
+ rb_scan_args(argc, argv, "0:", &opts);
100
+ if (!NIL_P(opts)) {
101
+ rb_get_kwargs(opts, kwargs_ids, 0, 3, kwargs);
102
+ }
96
103
 
104
+ uint entries_i = TYPE(kwargs[0]) == T_FIXNUM ? NUM2UINT(kwargs[0]) : 0;
105
+ uint sqpoll_timeout_msec = get_sqpoll_timeout_msec(kwargs[1]);
106
+ um_setup(self, machine, entries_i, sqpoll_timeout_msec, RTEST(kwargs[2]));
97
107
 
98
- um_setup(self, machine, entries_i, sqpoll_timeout_msec);
99
108
  return self;
100
109
  }
101
110
 
@@ -121,12 +130,17 @@ VALUE UM_metrics(VALUE self) {
121
130
  return um_metrics(machine, &machine->metrics);
122
131
  }
123
132
 
124
- VALUE UM_profile_p(VALUE self) {
133
+ VALUE UM_profile_mode_p(VALUE self) {
125
134
  struct um *machine = um_get_machine(self);
126
135
  return machine->profile_mode ? Qtrue : Qfalse;
127
136
  }
128
137
 
129
- VALUE UM_profile_set(VALUE self, VALUE value) {
138
+ VALUE UM_sqpoll_mode_p(VALUE self) {
139
+ struct um *machine = um_get_machine(self);
140
+ return machine->sqpoll_mode ? Qtrue : Qfalse;
141
+ }
142
+
143
+ VALUE UM_profile_mode_set(VALUE self, VALUE value) {
130
144
  struct um *machine = um_get_machine(self);
131
145
  machine->profile_mode = RTEST(value);
132
146
  if (machine->profile_mode) {
@@ -136,6 +150,29 @@ VALUE UM_profile_set(VALUE self, VALUE value) {
136
150
  return value;
137
151
  }
138
152
 
153
+ VALUE UM_test_mode_set(VALUE self, VALUE value) {
154
+ struct um *machine = um_get_machine(self);
155
+ machine->test_mode = RTEST(value);
156
+ return value;
157
+ }
158
+
159
+ VALUE UM_sidecar_mode_p(VALUE self) {
160
+ struct um *machine = um_get_machine(self);
161
+ return machine->sidecar_mode ? Qtrue : Qfalse;
162
+ }
163
+
164
+ VALUE UM_sidecar_start(VALUE self) {
165
+ struct um *machine = um_get_machine(self);
166
+ um_sidecar_setup(machine);
167
+ return self;
168
+ }
169
+
170
+ VALUE UM_sidecar_stop(VALUE self) {
171
+ struct um *machine = um_get_machine(self);
172
+ um_sidecar_teardown(machine);
173
+ return self;
174
+ }
175
+
139
176
  VALUE UM_snooze(VALUE self) {
140
177
  struct um *machine = um_get_machine(self);
141
178
  um_schedule(machine, rb_fiber_current(), Qnil);
@@ -581,8 +618,14 @@ void Init_UM(void) {
581
618
  rb_define_method(cUM, "size", UM_size, 0);
582
619
  rb_define_method(cUM, "mark", UM_mark_m, 1);
583
620
  rb_define_method(cUM, "metrics", UM_metrics, 0);
584
- rb_define_method(cUM, "profile?", UM_profile_p, 0);
585
- rb_define_method(cUM, "profile", UM_profile_set, 1);
621
+ rb_define_method(cUM, "sqpoll_mode?", UM_sqpoll_mode_p, 0);
622
+ rb_define_method(cUM, "profile_mode?", UM_profile_mode_p, 0);
623
+ rb_define_method(cUM, "profile_mode=", UM_profile_mode_set, 1);
624
+ rb_define_method(cUM, "test_mode=", UM_test_mode_set, 1);
625
+
626
+ rb_define_method(cUM, "sidecar_mode?", UM_sidecar_mode_p, 0);
627
+ rb_define_method(cUM, "sidecar_start", UM_sidecar_start, 0);
628
+ rb_define_method(cUM, "sidecar_stop", UM_sidecar_stop, 0);
586
629
 
587
630
  rb_define_method(cUM, "setup_buffer_ring", UM_setup_buffer_ring, 2);
588
631
 
@@ -639,7 +682,7 @@ void Init_UM(void) {
639
682
  #ifdef HAVE_IO_URING_SEND_VECTORIZED
640
683
  rb_define_method(cUM, "sendv", UM_sendv, -1);
641
684
  #endif
642
-
685
+
643
686
  rb_define_method(cUM, "send_bundle", UM_send_bundle, -1);
644
687
  rb_define_method(cUM, "setsockopt", UM_setsockopt, 4);
645
688
  rb_define_method(cUM, "socket", UM_socket, 4);
@@ -0,0 +1,106 @@
1
+ #include "um.h"
2
+ #include <stdatomic.h>
3
+ #include <linux/futex.h>
4
+ #include <sys/syscall.h>
5
+ #include <pthread.h>
6
+ #include <unistd.h>
7
+ #include <ruby/thread.h>
8
+
9
+ #define FUTEX2_SIZE_U32 0x02
10
+ #define SIDECAR_THREAD_STACK_SIZE PTHREAD_STACK_MIN
11
+
12
+ #define RAISE_ON_ERR(ret) if (ret) rb_syserr_fail(errno, strerror(errno))
13
+
14
+ static inline int futex(uint32_t *uaddr, int op, uint32_t val, const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3) {
15
+ return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
16
+ }
17
+
18
+ struct futex_wait_ctx {
19
+ uint32_t *futexp;
20
+ uint32_t oldval;
21
+ };
22
+
23
+ void *futex_wait_without_gvl(void *ptr) {
24
+ struct futex_wait_ctx *ctx = ptr;
25
+ futex(ctx->futexp, FUTEX_WAIT, ctx->oldval, NULL, NULL, 0);
26
+ return NULL;
27
+ }
28
+
29
+ static inline void xchg_futex_wait(uint32_t *futexp, uint32_t oldval, uint32_t newval) {
30
+ struct futex_wait_ctx ctx = { futexp, oldval };
31
+ while (1) {
32
+ if (atomic_compare_exchange_strong(futexp, &newval, oldval))
33
+ break;
34
+
35
+ rb_thread_call_without_gvl(futex_wait_without_gvl, (void *)&ctx, RUBY_UBF_IO, 0);
36
+ // int ret = futex(futexp, FUTEX_WAIT, oldval, NULL, NULL, 0);
37
+
38
+ }
39
+ }
40
+
41
+ static inline void xchg_futex_wake(uint32_t *futexp, uint32_t oldval, uint32_t newval) {
42
+ while (1) {
43
+ if (atomic_compare_exchange_strong(futexp, &newval, oldval))
44
+ break;
45
+
46
+ usleep(1);
47
+ }
48
+
49
+ futex(futexp, FUTEX_WAKE, 1, NULL, NULL, 0);
50
+ }
51
+
52
+ inline void um_sidecar_signal_wait(struct um *machine) {
53
+ // wait for machine->sidecar_signal to equal 1, then reset it to 0
54
+ xchg_futex_wait(machine->sidecar_signal, 0, 1);
55
+ }
56
+
57
+ inline void um_sidecar_signal_wake(struct um *machine) {
58
+ // busy-wait for machine->sidecar_signal to equal 0, then set it to 1 and wakeup futex waiter
59
+ xchg_futex_wake(machine->sidecar_signal, 1, 0);
60
+ }
61
+
62
+ static void *sidecar_start(void *arg) {
63
+ pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
64
+ struct um *machine = arg;
65
+ while (1) {
66
+ int ret = io_uring_enter2(machine->ring.enter_ring_fd, 0, 1, IORING_ENTER_GETEVENTS, NULL, 0);
67
+ if (!ret) {
68
+ um_sidecar_signal_wake(machine);
69
+ }
70
+ }
71
+ return NULL;
72
+ }
73
+
74
+ void um_sidecar_setup(struct um *machine) {
75
+ if (machine->sidecar_thread) return;
76
+
77
+ int ret;
78
+ pthread_attr_t attr;
79
+
80
+ ret = pthread_attr_init(&attr);
81
+ RAISE_ON_ERR(ret);
82
+
83
+ ret = pthread_attr_setstacksize(&attr, SIDECAR_THREAD_STACK_SIZE);
84
+ RAISE_ON_ERR(ret);
85
+
86
+ sigset_t sigmask;
87
+ sigemptyset(&sigmask);
88
+ ret = pthread_attr_setsigmask_np(&attr, &sigmask);
89
+ RAISE_ON_ERR(ret);
90
+
91
+ ret = pthread_create(&machine->sidecar_thread, &attr, sidecar_start, machine);
92
+ RAISE_ON_ERR(ret);
93
+
94
+ ret = pthread_attr_destroy(&attr);
95
+ RAISE_ON_ERR(ret);
96
+ }
97
+
98
+
99
+ void um_sidecar_teardown(struct um *machine) {
100
+ if (machine->sidecar_thread) {
101
+ pthread_cancel(machine->sidecar_thread);
102
+ pthread_join(machine->sidecar_thread, NULL);
103
+
104
+ machine->sidecar_thread = 0;
105
+ }
106
+ }
data/ext/um/um_stream.c CHANGED
@@ -393,3 +393,34 @@ void resp_encode(struct um_write_buffer *buf, VALUE obj) {
393
393
  um_raise_internal_error("Can't encode object");
394
394
  }
395
395
  }
396
+
397
+ void resp_encode_cmd(struct um_write_buffer *buf, int argc, VALUE *argv) {
398
+ char tmp1[48];
399
+ char tmp2[60];
400
+
401
+ sprintf(tmp1, "*%d\r\n", argc);
402
+ write_buffer_append_cstr(buf, tmp1);
403
+ for (int i = 0; i < argc; i++) {
404
+ switch (TYPE(argv[i])) {
405
+ case T_FIXNUM:
406
+ sprintf(tmp1, "%ld", NUM2LONG(argv[i]));
407
+ sprintf(tmp2, "$%ld\r\n%s\r\n", strlen(tmp1), (char *)tmp1);
408
+ write_buffer_append_cstr(buf, tmp2);
409
+ break;
410
+ case T_FLOAT:
411
+ sprintf(tmp1, "%lg", NUM2DBL(argv[i]));
412
+ sprintf(tmp2, "$%ld\r\n%s\r\n", strlen(tmp1), (char *)tmp1);
413
+ write_buffer_append_cstr(buf, tmp2);
414
+ break;
415
+ case T_STRING:
416
+ write_buffer_append_resp_bulk_string(buf, argv[i]);
417
+ break;
418
+ case T_SYMBOL:
419
+ write_buffer_append_resp_bulk_string(buf, rb_sym_to_s(argv[i]));
420
+ break;
421
+ default:
422
+ um_raise_internal_error("Can't encode object");
423
+ }
424
+ }
425
+ return;
426
+ }
@@ -84,6 +84,18 @@ VALUE Stream_resp_encode(VALUE self, VALUE str, VALUE obj) {
84
84
  return str;
85
85
  }
86
86
 
87
+ VALUE Stream_resp_encode_cmd(int argc, VALUE *argv, VALUE self) {
88
+ struct um_write_buffer buf;
89
+ VALUE str;
90
+ rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
91
+ str = argv[0];
92
+ write_buffer_init(&buf, str);
93
+ rb_str_modify(str);
94
+ resp_encode_cmd(&buf, argc - 1, argv + 1);
95
+ write_buffer_update_len(&buf);
96
+ return str;
97
+ }
98
+
87
99
  void Init_Stream(void) {
88
100
  VALUE cStream = rb_define_class_under(cUM, "Stream", rb_cObject);
89
101
  rb_define_alloc_func(cStream, Stream_allocate);
@@ -94,7 +106,9 @@ void Init_Stream(void) {
94
106
  rb_define_method(cStream, "get_string", Stream_get_string, 2);
95
107
 
96
108
  rb_define_method(cStream, "resp_decode", Stream_resp_decode, 0);
109
+
97
110
  rb_define_singleton_method(cStream, "resp_encode", Stream_resp_encode, 2);
111
+ rb_define_singleton_method(cStream, "resp_encode_cmd", Stream_resp_encode_cmd, -1);
98
112
 
99
113
  eStreamRESPError = rb_define_class_under(cStream, "RESPError", rb_eStandardError);
100
114
  }