uringmachine 0.5 → 0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/ext/um/um.c CHANGED
@@ -5,7 +5,6 @@ void um_setup(VALUE self, struct um *machine) {
5
5
  memset(machine, 0, sizeof(struct um));
6
6
 
7
7
  RB_OBJ_WRITE(self, &machine->self, self);
8
- RB_OBJ_WRITE(self, &machine->poll_fiber, Qnil);
9
8
 
10
9
  unsigned prepared_limit = 4096;
11
10
  unsigned flags = IORING_SETUP_SUBMIT_ALL | IORING_SETUP_COOP_TASKRUN;
@@ -76,7 +75,7 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
76
75
  if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
77
76
 
78
77
  op->flags |= OP_F_COMPLETED;
79
- if (unlikely(op->flags & OP_F_TRANSIENT))
78
+ if (op->flags & OP_F_TRANSIENT)
80
79
  um_op_transient_remove(machine, op);
81
80
 
82
81
  if (op->flags & OP_F_MULTISHOT) {
@@ -89,6 +88,8 @@ static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe)
89
88
  op->result.flags = cqe->flags;
90
89
  }
91
90
 
91
+ if (op->flags & OP_F_ASYNC) return;
92
+
92
93
  um_runqueue_push(machine, op);
93
94
  }
94
95
 
@@ -181,7 +182,7 @@ inline VALUE um_fiber_switch(struct um *machine) {
181
182
  }
182
183
  }
183
184
 
184
- static inline void um_submit_cancel_op(struct um *machine, struct um_op *op) {
185
+ void um_submit_cancel_op(struct um *machine, struct um_op *op) {
185
186
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
186
187
  io_uring_prep_cancel64(sqe, (long long)op, 0);
187
188
  }
@@ -216,6 +217,8 @@ inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind)
216
217
  case OP_ACCEPT_MULTISHOT:
217
218
  case OP_READ_MULTISHOT:
218
219
  case OP_RECV_MULTISHOT:
220
+ case OP_TIMEOUT_MULTISHOT:
221
+ case OP_SLEEP_MULTISHOT:
219
222
  op->flags |= OP_F_MULTISHOT;
220
223
  default:
221
224
  }
@@ -261,7 +264,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
261
264
  static ID ID_new = 0;
262
265
  if (!ID_new) ID_new = rb_intern("new");
263
266
 
264
- struct um_op *op = malloc(sizeof(struct um_op));
267
+ struct um_op *op = um_op_alloc(machine);
265
268
  um_prep_op(machine, op, OP_TIMEOUT);
266
269
  op->ts = um_double_to_timespec(NUM2DBL(interval));
267
270
  RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
@@ -297,6 +300,33 @@ VALUE um_sleep(struct um *machine, double duration) {
297
300
  return raise_if_exception(ret);
298
301
  }
299
302
 
303
+ // VALUE um_periodically(struct um *machine, double interval) {
304
+ // struct um_op op;
305
+ // VALUE ret = Qnil;
306
+ // um_prep_op(machine, &op, OP_SLEEP_MULTISHOT);
307
+ // op.ts = um_double_to_timespec(interval);
308
+ // op.flags |= OP_F_MULTISHOT;
309
+ // struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
310
+ // io_uring_prep_timeout(sqe, &op.ts, 0, IORING_TIMEOUT_MULTISHOT);
311
+
312
+ // while (true) {
313
+ // ret = um_fiber_switch(machine);
314
+
315
+ // if (!um_op_completed_p(&op)) {
316
+ // um_cancel_and_wait(machine, &op);
317
+ // break;
318
+ // }
319
+ // else {
320
+ // if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
321
+ // ret = DBL2NUM(interval);
322
+ // }
323
+ // }
324
+
325
+ // RB_GC_GUARD(ret);
326
+ // return raise_if_exception(ret);
327
+
328
+ // }
329
+
300
330
  inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
301
331
  struct um_op op;
302
332
  um_prep_op(machine, &op, OP_READ);
@@ -701,3 +731,44 @@ VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
701
731
  struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
702
732
  return rb_ensure(read_recv_each_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
703
733
  }
734
+
735
+ VALUE periodically_begin(VALUE arg) {
736
+ struct op_ctx *ctx = (struct op_ctx *)arg;
737
+ struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
738
+ io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
739
+
740
+ while (true) {
741
+ VALUE ret = um_fiber_switch(ctx->machine);
742
+ if (!um_op_completed_p(ctx->op))
743
+ return raise_if_exception(ret);
744
+
745
+ int more = false;
746
+ struct um_op_result *result = &ctx->op->result;
747
+ while (result) {
748
+ more = (result->flags & IORING_CQE_F_MORE);
749
+ if (result->res < 0 && result->res != -ETIME) {
750
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
751
+ return Qnil;
752
+ }
753
+ rb_yield(Qnil);
754
+ result = result->next;
755
+ }
756
+ um_op_multishot_results_clear(ctx->machine, ctx->op);
757
+ if (more)
758
+ ctx->op->flags &= ~OP_F_COMPLETED;
759
+ else
760
+ break;
761
+ }
762
+
763
+ return Qnil;
764
+ }
765
+
766
+ VALUE um_periodically(struct um *machine, double interval) {
767
+ struct um_op op;
768
+ um_prep_op(machine, &op, OP_SLEEP_MULTISHOT);
769
+ op.ts = um_double_to_timespec(interval);
770
+
771
+ struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
772
+ return rb_ensure(periodically_begin, (VALUE)&ctx, multishot_ensure, (VALUE)&ctx);
773
+ }
774
+
data/ext/um/um.h CHANGED
@@ -1,7 +1,7 @@
1
1
  #ifndef UM_H
2
2
  #define UM_H
3
3
 
4
- #include "ruby.h"
4
+ #include <ruby.h>
5
5
  #include <liburing.h>
6
6
 
7
7
  // debugging
@@ -43,13 +43,16 @@ enum op_kind {
43
43
 
44
44
  OP_ACCEPT_MULTISHOT,
45
45
  OP_READ_MULTISHOT,
46
- OP_RECV_MULTISHOT
46
+ OP_RECV_MULTISHOT,
47
+ OP_TIMEOUT_MULTISHOT,
48
+ OP_SLEEP_MULTISHOT
47
49
  };
48
50
 
49
- #define OP_F_COMPLETED (1U << 0)
50
- #define OP_F_TRANSIENT (1U << 1)
51
- #define OP_F_IGNORE_CANCELED (1U << 2)
52
- #define OP_F_MULTISHOT (1U << 3)
51
+ #define OP_F_COMPLETED (1U << 0) // op is completed (set on each CQE for multishot ops)
52
+ #define OP_F_TRANSIENT (1U << 1) // op is heap allocated
53
+ #define OP_F_ASYNC (1U << 2) // op belongs to an AsyncOp
54
+ #define OP_F_IGNORE_CANCELED (1U << 3) // CQE with -ECANCEL should be ignored
55
+ #define OP_F_MULTISHOT (1U << 4) // op is multishot
53
56
 
54
57
  struct um_op_result {
55
58
  __s32 res;
@@ -66,6 +69,7 @@ struct um_op {
66
69
 
67
70
  VALUE fiber;
68
71
  VALUE value;
72
+ VALUE async_op;
69
73
 
70
74
  struct um_op_result result;
71
75
  struct um_op_result *multishot_result_tail;
@@ -93,7 +97,6 @@ struct buf_ring_descriptor {
93
97
 
94
98
  struct um {
95
99
  VALUE self;
96
- VALUE poll_fiber;
97
100
 
98
101
  struct um_buffer *buffer_freelist;
99
102
 
@@ -137,10 +140,19 @@ struct um_queue {
137
140
  uint32_t count;
138
141
  };
139
142
 
143
+ struct um_async_op {
144
+ VALUE self;
145
+
146
+ struct um *machine;
147
+ struct um_op *op;
148
+ };
149
+
140
150
  extern VALUE cUM;
141
151
  extern VALUE cMutex;
142
152
  extern VALUE cQueue;
153
+ extern VALUE cAsyncOp;
143
154
 
155
+ struct um *um_get_machine(VALUE self);
144
156
  void um_setup(VALUE self, struct um *machine);
145
157
  void um_teardown(struct um *machine);
146
158
 
@@ -179,6 +191,7 @@ struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op);
179
191
 
180
192
  VALUE um_fiber_switch(struct um *machine);
181
193
  VALUE um_await(struct um *machine);
194
+ void um_submit_cancel_op(struct um *machine, struct um_op *op);
182
195
  void um_cancel_and_wait(struct um *machine, struct um_op *op);
183
196
  int um_check_completion(struct um *machine, struct um_op *op);
184
197
 
@@ -188,6 +201,7 @@ void um_schedule(struct um *machine, VALUE fiber, VALUE value);
188
201
  VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
189
202
 
190
203
  VALUE um_sleep(struct um *machine, double duration);
204
+ VALUE um_periodically(struct um *machine, double interval);
191
205
  VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
192
206
  VALUE um_read_each(struct um *machine, int fd, int bgid);
193
207
  VALUE um_write(struct um *machine, int fd, VALUE str, int len);
@@ -207,6 +221,12 @@ VALUE um_listen(struct um *machine, int fd, int backlog);
207
221
  VALUE um_getsockopt(struct um *machine, int fd, int level, int opt);
208
222
  VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value);
209
223
 
224
+ void um_async_op_set(VALUE self, struct um *machine, struct um_op *op);
225
+ VALUE um_async_op_await(struct um_async_op *async_op);
226
+ void um_async_op_cancel(struct um_async_op *async_op);
227
+
228
+ VALUE um_prep_timeout(struct um *machine, double interval);
229
+
210
230
  struct um_mutex *Mutex_data(VALUE self);
211
231
  struct um_queue *Queue_data(VALUE self);
212
232
 
@@ -224,4 +244,6 @@ VALUE um_queue_shift(struct um *machine, struct um_queue *queue);
224
244
 
225
245
  void um_define_net_constants(VALUE mod);
226
246
 
247
+ // void Init_micro_ssl(VALUE mod);
248
+
227
249
  #endif // UM_H
@@ -0,0 +1,40 @@
1
+ #include "um.h"
2
+ #include <stdlib.h>
3
+
4
+ VALUE um_prep_timeout(struct um *machine, double interval) {
5
+ static ID ID_new = 0;
6
+ if (!ID_new) ID_new = rb_intern("new");
7
+
8
+ struct um_op *op = malloc(sizeof(struct um_op));
9
+ um_prep_op(machine, op, OP_TIMEOUT);
10
+ op->ts = um_double_to_timespec(interval);
11
+ op->flags = OP_F_TRANSIENT | OP_F_ASYNC;
12
+
13
+ VALUE obj = rb_funcall(cAsyncOp, rb_intern_const("new"), 0);
14
+ um_async_op_set(obj, machine, op);
15
+
16
+ RB_OBJ_WRITE(machine->self, &op->async_op, obj);
17
+
18
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
19
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
20
+
21
+ um_op_transient_add(machine, op);
22
+
23
+ return obj;
24
+ }
25
+
26
+ VALUE um_async_op_await(struct um_async_op *async_op) {
27
+ RB_OBJ_WRITE(async_op->machine->self, &async_op->op->fiber, rb_fiber_current());
28
+ async_op->op->flags &= ~OP_F_ASYNC;
29
+
30
+ VALUE ret = um_fiber_switch(async_op->machine);
31
+ if (!um_op_completed_p(async_op->op))
32
+ um_cancel_and_wait(async_op->machine, async_op->op);
33
+
34
+ raise_if_exception(ret);
35
+ return INT2NUM(async_op->op->result.res);
36
+ }
37
+
38
+ void um_async_op_cancel(struct um_async_op *async_op) {
39
+ um_submit_cancel_op(async_op->machine, async_op->op);
40
+ }
@@ -0,0 +1,136 @@
1
+ #include "um.h"
2
+ #include <stdlib.h>
3
+
4
+ VALUE cAsyncOp;
5
+
6
+ VALUE SYM_timeout;
7
+
8
+ static void AsyncOp_mark(void *ptr) {
9
+ struct um_async_op *async_op = ptr;
10
+ rb_gc_mark_movable(async_op->self);
11
+ rb_gc_mark_movable(async_op->machine->self);
12
+ }
13
+
14
+ static void AsyncOp_compact(void *ptr) {
15
+ struct um_async_op *async_op = ptr;
16
+ async_op->self = rb_gc_location(async_op->self);
17
+ }
18
+
19
+ static size_t AsyncOp_size(const void *ptr) {
20
+ return sizeof(struct um_async_op);
21
+ }
22
+
23
+ static void AsyncOp_free(void *ptr) {
24
+ struct um_async_op *async_op = ptr;
25
+ um_op_free(async_op->machine, async_op->op);
26
+ free(ptr);
27
+ }
28
+
29
+ static const rb_data_type_t AsyncOp_type = {
30
+ "UringMachine::AsyncOp",
31
+ {AsyncOp_mark, AsyncOp_free, AsyncOp_size, AsyncOp_compact},
32
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
33
+ };
34
+
35
+ static VALUE AsyncOp_allocate(VALUE klass) {
36
+ struct um_async_op *async_op = malloc(sizeof(struct um_async_op));
37
+ return TypedData_Wrap_Struct(klass, &AsyncOp_type, async_op);
38
+ }
39
+
40
+ inline struct um_async_op *AsyncOp_data(VALUE self) {
41
+ return RTYPEDDATA_DATA(self);
42
+ }
43
+
44
+ VALUE AsyncOp_initialize(VALUE self) {
45
+ struct um_async_op *async_op = AsyncOp_data(self);
46
+ memset(async_op, 0, sizeof(struct um_async_op));
47
+ async_op->self = self;
48
+ return self;
49
+ }
50
+
51
+ void um_async_op_set(VALUE self, struct um *machine, struct um_op *op) {
52
+ struct um_async_op *async_op = AsyncOp_data(self);
53
+ async_op->machine = machine;
54
+ async_op->op = op;
55
+ }
56
+
57
+ inline void raise_on_missing_op(struct um_async_op *async_op) {
58
+ if (!async_op->op)
59
+ rb_raise(rb_eRuntimeError, "Missing op");
60
+ }
61
+
62
+ inline int async_op_is_done(struct um_async_op *async_op) {
63
+ return (async_op->op->flags & OP_F_COMPLETED);
64
+ }
65
+
66
+ VALUE AsyncOp_kind(VALUE self) {
67
+ struct um_async_op *async_op = AsyncOp_data(self);
68
+ raise_on_missing_op(async_op);
69
+
70
+ switch(async_op->op->kind) {
71
+ case OP_TIMEOUT:
72
+ return SYM_timeout;
73
+ default:
74
+ rb_raise(rb_eRuntimeError, "Invalid op kind");
75
+ }
76
+ }
77
+
78
+ VALUE AsyncOp_done_p(VALUE self) {
79
+ struct um_async_op *async_op = AsyncOp_data(self);
80
+ raise_on_missing_op(async_op);
81
+
82
+ return async_op_is_done(async_op) ? Qtrue : Qfalse;
83
+ }
84
+
85
+ VALUE AsyncOp_result(VALUE self) {
86
+ struct um_async_op *async_op = AsyncOp_data(self);
87
+ raise_on_missing_op(async_op);
88
+
89
+ return async_op_is_done(async_op) ? INT2NUM(async_op->op->result.res) : Qnil;
90
+ }
91
+
92
+ VALUE AsyncOp_cancelled_p(VALUE self) {
93
+ struct um_async_op *async_op = AsyncOp_data(self);
94
+ raise_on_missing_op(async_op);
95
+
96
+ if (!async_op_is_done(async_op)) return Qnil;
97
+
98
+ return (async_op->op->result.res == -ECANCELED) ? Qtrue : Qfalse;
99
+ }
100
+
101
+ VALUE AsyncOp_await(VALUE self) {
102
+ struct um_async_op *async_op = AsyncOp_data(self);
103
+ raise_on_missing_op(async_op);
104
+
105
+ if (async_op_is_done(async_op))
106
+ return INT2NUM(async_op->op->result.res);
107
+
108
+ return um_async_op_await(async_op);
109
+ }
110
+
111
+ VALUE AsyncOp_cancel(VALUE self) {
112
+ struct um_async_op *async_op = AsyncOp_data(self);
113
+ raise_on_missing_op(async_op);
114
+
115
+ if (!async_op_is_done(async_op))
116
+ um_async_op_cancel(async_op);
117
+
118
+ return self;
119
+ }
120
+
121
+ void Init_AsyncOp(void) {
122
+ cAsyncOp = rb_define_class_under(cUM, "AsyncOp", rb_cObject);
123
+ rb_define_alloc_func(cAsyncOp, AsyncOp_allocate);
124
+
125
+ rb_define_method(cAsyncOp, "initialize", AsyncOp_initialize, 0);
126
+ rb_define_method(cAsyncOp, "kind", AsyncOp_kind, 0);
127
+ rb_define_method(cAsyncOp, "done?", AsyncOp_done_p, 0);
128
+ rb_define_method(cAsyncOp, "result", AsyncOp_result, 0);
129
+ rb_define_method(cAsyncOp, "cancelled?", AsyncOp_cancelled_p, 0);
130
+
131
+ rb_define_method(cAsyncOp, "await", AsyncOp_await, 0);
132
+ rb_define_method(cAsyncOp, "join", AsyncOp_await, 0);
133
+ rb_define_method(cAsyncOp, "cancel", AsyncOp_cancel, 0);
134
+
135
+ SYM_timeout = ID2SYM(rb_intern("timeout"));
136
+ }
data/ext/um/um_class.c CHANGED
@@ -14,7 +14,6 @@ static void UM_mark(void *ptr) {
14
14
  static void UM_compact(void *ptr) {
15
15
  struct um *machine = ptr;
16
16
  machine->self = rb_gc_location(machine->self);
17
- machine->poll_fiber = rb_gc_location(machine->poll_fiber);
18
17
 
19
18
  um_op_list_compact(machine, machine->transient_head);
20
19
  um_op_list_compact(machine, machine->runqueue_head);
@@ -41,7 +40,7 @@ static VALUE UM_allocate(VALUE klass) {
41
40
  return TypedData_Wrap_Struct(klass, &UM_type, machine);
42
41
  }
43
42
 
44
- inline struct um *get_machine(VALUE self) {
43
+ inline struct um *um_get_machine(VALUE self) {
45
44
  struct um *machine = RTYPEDDATA_DATA(self);
46
45
  if (!machine->ring_initialized)
47
46
  rb_raise(rb_eRuntimeError, "Machine not initialized");
@@ -55,45 +54,50 @@ VALUE UM_initialize(VALUE self) {
55
54
  }
56
55
 
57
56
  VALUE UM_setup_buffer_ring(VALUE self, VALUE size, VALUE count) {
58
- struct um *machine = get_machine(self);
57
+ struct um *machine = um_get_machine(self);
59
58
  int bgid = um_setup_buffer_ring(machine, NUM2UINT(size), NUM2UINT(count));
60
59
  return INT2NUM(bgid);
61
60
  }
62
61
 
63
62
  VALUE UM_pending_count(VALUE self) {
64
- struct um *machine = get_machine(self);
63
+ struct um *machine = um_get_machine(self);
65
64
  return INT2NUM(machine->pending_count);
66
65
  }
67
66
 
68
67
  VALUE UM_snooze(VALUE self) {
69
- struct um *machine = get_machine(self);
68
+ struct um *machine = um_get_machine(self);
70
69
  um_schedule(machine, rb_fiber_current(), Qnil);
71
70
  return um_await(machine);
72
71
  }
73
72
 
74
73
  VALUE UM_yield(VALUE self) {
75
- struct um *machine = get_machine(self);
74
+ struct um *machine = um_get_machine(self);
76
75
  return um_await(machine);
77
76
  }
78
77
 
79
78
  VALUE UM_schedule(VALUE self, VALUE fiber, VALUE value) {
80
- struct um *machine = get_machine(self);
79
+ struct um *machine = um_get_machine(self);
81
80
  um_schedule(machine, fiber, value);
82
81
  return self;
83
82
  }
84
83
 
85
84
  VALUE UM_timeout(VALUE self, VALUE interval, VALUE class) {
86
- struct um *machine = get_machine(self);
85
+ struct um *machine = um_get_machine(self);
87
86
  return um_timeout(machine, interval, class);
88
87
  }
89
88
 
90
89
  VALUE UM_sleep(VALUE self, VALUE duration) {
91
- struct um *machine = get_machine(self);
90
+ struct um *machine = um_get_machine(self);
92
91
  return um_sleep(machine, NUM2DBL(duration));
93
92
  }
94
93
 
94
+ VALUE UM_periodically(VALUE self, VALUE interval) {
95
+ struct um *machine = um_get_machine(self);
96
+ return um_periodically(machine, NUM2DBL(interval));
97
+ }
98
+
95
99
  VALUE UM_read(int argc, VALUE *argv, VALUE self) {
96
- struct um *machine = get_machine(self);
100
+ struct um *machine = um_get_machine(self);
97
101
  VALUE fd;
98
102
  VALUE buffer;
99
103
  VALUE maxlen;
@@ -108,7 +112,7 @@ VALUE UM_read(int argc, VALUE *argv, VALUE self) {
108
112
 
109
113
  VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
110
114
  #ifdef HAVE_IO_URING_PREP_READ_MULTISHOT
111
- struct um *machine = get_machine(self);
115
+ struct um *machine = um_get_machine(self);
112
116
  return um_read_each(machine, NUM2INT(fd), NUM2INT(bgid));
113
117
  #else
114
118
  rb_raise(rb_eRuntimeError, "Not supported by kernel");
@@ -116,7 +120,7 @@ VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
116
120
  }
117
121
 
118
122
  VALUE UM_write(int argc, VALUE *argv, VALUE self) {
119
- struct um *machine = get_machine(self);
123
+ struct um *machine = um_get_machine(self);
120
124
  VALUE fd;
121
125
  VALUE buffer;
122
126
  VALUE len;
@@ -127,27 +131,27 @@ VALUE UM_write(int argc, VALUE *argv, VALUE self) {
127
131
  }
128
132
 
129
133
  VALUE UM_close(VALUE self, VALUE fd) {
130
- struct um *machine = get_machine(self);
134
+ struct um *machine = um_get_machine(self);
131
135
  return um_close(machine, NUM2INT(fd));
132
136
  }
133
137
 
134
138
  VALUE UM_accept(VALUE self, VALUE fd) {
135
- struct um *machine = get_machine(self);
139
+ struct um *machine = um_get_machine(self);
136
140
  return um_accept(machine, NUM2INT(fd));
137
141
  }
138
142
 
139
143
  VALUE UM_accept_each(VALUE self, VALUE fd) {
140
- struct um *machine = get_machine(self);
144
+ struct um *machine = um_get_machine(self);
141
145
  return um_accept_each(machine, NUM2INT(fd));
142
146
  }
143
147
 
144
148
  VALUE UM_socket(VALUE self, VALUE domain, VALUE type, VALUE protocol, VALUE flags) {
145
- struct um *machine = get_machine(self);
149
+ struct um *machine = um_get_machine(self);
146
150
  return um_socket(machine, NUM2INT(domain), NUM2INT(type), NUM2INT(protocol), NUM2UINT(flags));
147
151
  }
148
152
 
149
153
  VALUE UM_connect(VALUE self, VALUE fd, VALUE host, VALUE port) {
150
- struct um *machine = get_machine(self);
154
+ struct um *machine = um_get_machine(self);
151
155
 
152
156
  struct sockaddr_in addr;
153
157
  memset(&addr, 0, sizeof(addr));
@@ -159,17 +163,17 @@ VALUE UM_connect(VALUE self, VALUE fd, VALUE host, VALUE port) {
159
163
  }
160
164
 
161
165
  VALUE UM_send(VALUE self, VALUE fd, VALUE buffer, VALUE len, VALUE flags) {
162
- struct um *machine = get_machine(self);
166
+ struct um *machine = um_get_machine(self);
163
167
  return um_send(machine, NUM2INT(fd), buffer, NUM2INT(len), NUM2INT(flags));
164
168
  }
165
169
 
166
170
  VALUE UM_recv(VALUE self, VALUE fd, VALUE buffer, VALUE maxlen, VALUE flags) {
167
- struct um *machine = get_machine(self);
171
+ struct um *machine = um_get_machine(self);
168
172
  return um_recv(machine, NUM2INT(fd), buffer, NUM2INT(maxlen), NUM2INT(flags));
169
173
  }
170
174
 
171
175
  VALUE UM_recv_each(VALUE self, VALUE fd, VALUE bgid, VALUE flags) {
172
- struct um *machine = get_machine(self);
176
+ struct um *machine = um_get_machine(self);
173
177
  return um_recv_each(machine, NUM2INT(fd), NUM2INT(bgid), NUM2INT(flags));
174
178
  }
175
179
 
@@ -181,7 +185,7 @@ VALUE UM_bind(VALUE self, VALUE fd, VALUE host, VALUE port) {
181
185
  addr.sin_port = htons(NUM2INT(port));
182
186
 
183
187
  #ifdef HAVE_IO_URING_PREP_BIND
184
- struct um *machine = get_machine(self);
188
+ struct um *machine = um_get_machine(self);
185
189
  return um_bind(machine, NUM2INT(fd), (struct sockaddr *)&addr, sizeof(addr));
186
190
  #else
187
191
  int res = bind(NUM2INT(fd), (struct sockaddr *)&addr, sizeof(addr));
@@ -193,7 +197,7 @@ VALUE UM_bind(VALUE self, VALUE fd, VALUE host, VALUE port) {
193
197
 
194
198
  VALUE UM_listen(VALUE self, VALUE fd, VALUE backlog) {
195
199
  #ifdef HAVE_IO_URING_PREP_LISTEN
196
- struct um *machine = get_machine(self);
200
+ struct um *machine = um_get_machine(self);
197
201
  return um_listen(machine, NUM2INT(fd), NUM2INT(backlog));
198
202
  #else
199
203
  int res = listen(NUM2INT(fd), NUM2INT(backlog));
@@ -215,43 +219,43 @@ static inline int numeric_value(VALUE value) {
215
219
  }
216
220
 
217
221
  VALUE UM_getsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt) {
218
- struct um *machine = get_machine(self);
222
+ struct um *machine = um_get_machine(self);
219
223
  return um_getsockopt(machine, NUM2INT(fd), NUM2INT(level), NUM2INT(opt));
220
224
  }
221
225
 
222
226
  VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
223
- struct um *machine = get_machine(self);
227
+ struct um *machine = um_get_machine(self);
224
228
  return um_setsockopt(machine, NUM2INT(fd), NUM2INT(level), NUM2INT(opt), numeric_value(value));
225
229
  }
226
230
 
227
231
  #ifdef HAVE_IO_URING_PREP_FUTEX
228
232
 
229
233
  VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
230
- struct um *machine = get_machine(self);
234
+ struct um *machine = um_get_machine(self);
231
235
  struct um_mutex *mutex_data = Mutex_data(mutex);
232
236
  return um_mutex_synchronize(machine, &mutex_data->state);
233
237
  }
234
238
 
235
239
  VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
236
- struct um *machine = get_machine(self);
240
+ struct um *machine = um_get_machine(self);
237
241
  struct um_queue *que = Queue_data(queue);
238
242
  return um_queue_push(machine, que, value);
239
243
  }
240
244
 
241
245
  VALUE UM_queue_pop(VALUE self, VALUE queue) {
242
- struct um *machine = get_machine(self);
246
+ struct um *machine = um_get_machine(self);
243
247
  struct um_queue *que = Queue_data(queue);
244
248
  return um_queue_pop(machine, que);
245
249
  }
246
250
 
247
251
  VALUE UM_queue_unshift(VALUE self, VALUE queue, VALUE value) {
248
- struct um *machine = get_machine(self);
252
+ struct um *machine = um_get_machine(self);
249
253
  struct um_queue *que = Queue_data(queue);
250
254
  return um_queue_unshift(machine, que, value);
251
255
  }
252
256
 
253
257
  VALUE UM_queue_shift(VALUE self, VALUE queue) {
254
- struct um *machine = get_machine(self);
258
+ struct um *machine = um_get_machine(self);
255
259
  struct um_queue *que = Queue_data(queue);
256
260
  return um_queue_shift(machine, que);
257
261
  }
@@ -270,7 +274,7 @@ VALUE UM_open_ensure(VALUE arg) {
270
274
  }
271
275
 
272
276
  VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
273
- struct um *machine = get_machine(self);
277
+ struct um *machine = um_get_machine(self);
274
278
  // TODO: take optional perm (mode) arg
275
279
  VALUE fd = um_open(machine, pathname, NUM2INT(flags), 0666);
276
280
  if (rb_block_given_p()) {
@@ -282,10 +286,15 @@ VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
282
286
  }
283
287
 
284
288
  VALUE UM_waitpid(VALUE self, VALUE pid, VALUE options) {
285
- struct um *machine = get_machine(self);
289
+ struct um *machine = um_get_machine(self);
286
290
  return um_waitpid(machine, NUM2INT(pid), NUM2INT(options));
287
291
  }
288
292
 
293
+ VALUE UM_prep_timeout(VALUE self, VALUE interval) {
294
+ struct um *machine = um_get_machine(self);
295
+ return um_prep_timeout(machine, NUM2DBL(interval));
296
+ }
297
+
289
298
  VALUE UM_pipe(VALUE self) {
290
299
  int fds[2];
291
300
  int ret = pipe(fds);
@@ -325,6 +334,7 @@ void Init_UM(void) {
325
334
  rb_define_method(cUM, "read", UM_read, -1);
326
335
  rb_define_method(cUM, "read_each", UM_read_each, 2);
327
336
  rb_define_method(cUM, "sleep", UM_sleep, 1);
337
+ rb_define_method(cUM, "periodically", UM_periodically, 1);
328
338
  rb_define_method(cUM, "write", UM_write, -1);
329
339
 
330
340
  rb_define_method(cUM, "waitpid", UM_waitpid, 2);
@@ -341,6 +351,8 @@ void Init_UM(void) {
341
351
  rb_define_method(cUM, "setsockopt", UM_setsockopt, 4);
342
352
  rb_define_method(cUM, "socket", UM_socket, 4);
343
353
 
354
+ rb_define_method(cUM, "prep_timeout", UM_prep_timeout, 1);
355
+
344
356
  #ifdef HAVE_IO_URING_PREP_FUTEX
345
357
  rb_define_method(cUM, "pop", UM_queue_pop, 1);
346
358
  rb_define_method(cUM, "push", UM_queue_push, 2);
@@ -349,5 +361,7 @@ void Init_UM(void) {
349
361
  rb_define_method(cUM, "unshift", UM_queue_unshift, 2);
350
362
  #endif
351
363
 
364
+ // Init_micro_ssl(cUM);
365
+
352
366
  um_define_net_constants(cUM);
353
367
  }