uringmachine 0.4 → 0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +2 -1
  3. data/CHANGELOG.md +14 -0
  4. data/README.md +44 -1
  5. data/TODO.md +12 -3
  6. data/examples/bm_snooze.rb +89 -0
  7. data/examples/bm_write.rb +56 -0
  8. data/examples/dns_client.rb +12 -0
  9. data/examples/http_server.rb +42 -43
  10. data/examples/server_client.rb +64 -0
  11. data/examples/snooze.rb +44 -0
  12. data/examples/write_dev_null.rb +16 -0
  13. data/ext/um/extconf.rb +24 -14
  14. data/ext/um/um.c +468 -414
  15. data/ext/um/um.h +129 -39
  16. data/ext/um/um_buffer.c +49 -0
  17. data/ext/um/um_class.c +148 -24
  18. data/ext/um/um_const.c +30 -1
  19. data/ext/um/um_ext.c +4 -0
  20. data/ext/um/um_mutex_class.c +47 -0
  21. data/ext/um/um_op.c +86 -111
  22. data/ext/um/um_queue_class.c +58 -0
  23. data/ext/um/um_sync.c +273 -0
  24. data/ext/um/um_utils.c +1 -1
  25. data/lib/uringmachine/dns_resolver.rb +84 -0
  26. data/lib/uringmachine/version.rb +1 -1
  27. data/lib/uringmachine.rb +19 -3
  28. data/supressions/ruby.supp +71 -0
  29. data/test/test_um.rb +466 -47
  30. data/vendor/liburing/.gitignore +5 -0
  31. data/vendor/liburing/CHANGELOG +1 -0
  32. data/vendor/liburing/configure +32 -0
  33. data/vendor/liburing/examples/Makefile +1 -0
  34. data/vendor/liburing/examples/reg-wait.c +159 -0
  35. data/vendor/liburing/liburing.spec +1 -1
  36. data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
  37. data/vendor/liburing/src/include/liburing.h +28 -2
  38. data/vendor/liburing/src/int_flags.h +10 -3
  39. data/vendor/liburing/src/liburing-ffi.map +13 -2
  40. data/vendor/liburing/src/liburing.map +9 -0
  41. data/vendor/liburing/src/queue.c +25 -16
  42. data/vendor/liburing/src/register.c +73 -4
  43. data/vendor/liburing/src/setup.c +46 -18
  44. data/vendor/liburing/src/setup.h +6 -0
  45. data/vendor/liburing/test/Makefile +7 -0
  46. data/vendor/liburing/test/cmd-discard.c +427 -0
  47. data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
  48. data/vendor/liburing/test/file-exit-unreg.c +48 -0
  49. data/vendor/liburing/test/io_uring_passthrough.c +2 -0
  50. data/vendor/liburing/test/io_uring_register.c +13 -2
  51. data/vendor/liburing/test/napi-test.c +1 -1
  52. data/vendor/liburing/test/no-mmap-inval.c +1 -1
  53. data/vendor/liburing/test/read-mshot-empty.c +2 -0
  54. data/vendor/liburing/test/read-mshot-stdin.c +121 -0
  55. data/vendor/liburing/test/read-mshot.c +6 -0
  56. data/vendor/liburing/test/recvsend_bundle.c +2 -2
  57. data/vendor/liburing/test/reg-fd-only.c +1 -1
  58. data/vendor/liburing/test/reg-wait.c +251 -0
  59. data/vendor/liburing/test/regbuf-clone.c +458 -0
  60. data/vendor/liburing/test/resize-rings.c +643 -0
  61. data/vendor/liburing/test/rsrc_tags.c +1 -1
  62. data/vendor/liburing/test/sqpoll-sleep.c +39 -8
  63. data/vendor/liburing/test/sqwait.c +136 -0
  64. data/vendor/liburing/test/sync-cancel.c +8 -1
  65. data/vendor/liburing/test/timeout.c +13 -8
  66. metadata +22 -4
  67. data/examples/http_server_multishot.rb +0 -57
  68. data/examples/http_server_simpler.rb +0 -34
data/ext/um/um_op.c CHANGED
@@ -1,148 +1,123 @@
1
1
  #include "um.h"
2
2
 
3
- inline struct um_result_entry *um_result_checkout(struct um *machine) {
4
- if (machine->result_freelist) {
5
- struct um_result_entry *entry = machine->result_freelist;
6
- machine->result_freelist = entry->next;
7
- return entry;
8
- }
9
-
10
- struct um_result_entry *entry = malloc(sizeof(struct um_result_entry));
11
- return entry;
12
- }
13
-
14
- inline void um_result_checkin(struct um *machine, struct um_result_entry *entry) {
15
- entry->next = machine->result_freelist;
16
- machine->result_freelist = entry;
3
+ inline void um_op_clear(struct um *machine, struct um_op *op) {
4
+ memset(op, 0, sizeof(struct um_op));
5
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
6
+ RB_OBJ_WRITE(machine->self, &op->value, Qnil);
17
7
  }
18
8
 
19
- inline void um_op_result_cleanup(struct um *machine, struct um_op *op) {
20
- struct um_result_entry *entry = op->results_head;
21
- while (entry) {
22
- struct um_result_entry *next = entry->next;
23
- um_result_checkin(machine, entry);
24
- entry = next;
9
+ inline void um_op_transient_add(struct um *machine, struct um_op *op) {
10
+ if (machine->transient_head) {
11
+ op->next = machine->transient_head;
12
+ machine->transient_head->prev = op;
25
13
  }
26
- op->results_head = op->results_tail = NULL;
14
+ machine->transient_head = op;
27
15
  }
28
16
 
29
- inline void um_op_result_push(struct um *machine, struct um_op *op, __s32 result, __u32 flags) {
30
- struct um_result_entry *entry = um_result_checkout(machine);
31
- entry->next = 0;
32
- entry->result = result;
33
- entry->flags = flags;
34
- if (op->results_tail) {
35
- op->results_tail->next = entry;
36
- op->results_tail = entry;
37
- }
38
- else {
39
- op->results_head = op->results_tail = entry;
40
- }
41
- }
17
+ inline void um_op_transient_remove(struct um *machine, struct um_op *op) {
18
+ if (op->prev)
19
+ op->prev->next = op->next;
20
+ if (op->next)
21
+ op->next->prev = op->prev;
42
22
 
43
- inline int um_op_result_shift(struct um *machine, struct um_op *op, __s32 *result, __u32 *flags) {
44
- if (!op->results_head) return 0;
45
-
46
- struct um_result_entry *entry = op->results_head;
47
- *result = entry->result;
48
- *flags = entry->flags;
49
- op->results_head = entry->next;
50
- if (!op->results_head)
51
- op->results_tail = NULL;
52
- um_result_checkin(machine, entry);
53
- return 1;
23
+ if (machine->transient_head == op)
24
+ machine->transient_head = op->next;
54
25
  }
55
26
 
56
- inline void um_op_clear(struct um_op *op) {
57
- memset(op, 0, sizeof(struct um_op));
58
- op->fiber = op->resume_value = Qnil;
27
+ inline void um_runqueue_push(struct um *machine, struct um_op *op) {
28
+ if (machine->runqueue_tail) {
29
+ op->prev = machine->runqueue_tail;
30
+ machine->runqueue_tail->next = op;
31
+ machine->runqueue_tail = op;
32
+ }
33
+ else
34
+ machine->runqueue_head = machine->runqueue_tail = op;
35
+ op->next = NULL;
59
36
  }
60
37
 
61
- inline struct um_op *um_op_checkout(struct um *machine) {
62
- machine->pending_count++;
63
-
64
- struct um_op *op = machine->op_freelist;
65
- if (op)
66
- machine->op_freelist = op->next;
67
- else
68
- op = malloc(sizeof(struct um_op));
38
+ inline struct um_op *um_runqueue_shift(struct um *machine) {
39
+ struct um_op *op = machine->runqueue_head;
40
+ if (!op) return NULL;
69
41
 
70
- um_op_clear(op);
42
+ machine->runqueue_head = op->next;
43
+ if (!machine->runqueue_head)
44
+ machine->runqueue_tail = NULL;
71
45
  return op;
72
46
  }
73
47
 
74
- inline void um_op_checkin(struct um *machine, struct um_op *op) {
75
- machine->pending_count--;
76
-
77
- um_op_result_cleanup(machine, op);
78
- op->next = machine->op_freelist;
79
- machine->op_freelist = op;
48
+ inline void um_op_list_mark(struct um *machine, struct um_op *head) {
49
+ while (head) {
50
+ struct um_op *next = head->next;
51
+ rb_gc_mark_movable(head->fiber);
52
+ rb_gc_mark_movable(head->value);
53
+ head = next;
54
+ }
80
55
  }
81
56
 
82
- inline struct um_op *um_runqueue_find_by_fiber(struct um *machine, VALUE fiber) {
83
- struct um_op *op = machine->runqueue_head;
84
- while (op) {
85
- if (op->fiber == fiber) return op;
86
-
87
- op = op->next;
57
+ inline void um_op_list_compact(struct um *machine, struct um_op *head) {
58
+ while (head) {
59
+ struct um_op *next = head->next;
60
+ head->fiber = rb_gc_location(head->fiber);
61
+ head->value = rb_gc_location(head->value);
62
+ head = next;
88
63
  }
89
- return NULL;
90
64
  }
91
65
 
92
- inline void um_runqueue_push(struct um *machine, struct um_op *op) {
93
- if (machine->runqueue_tail) {
94
- op->prev = machine->runqueue_tail;
95
- machine->runqueue_tail->next = op;
96
- machine->runqueue_tail = op;
97
- }
98
- else {
99
- op->prev = NULL;
100
- machine->runqueue_head = machine->runqueue_tail = op;
66
+ inline struct um_op_result *multishot_result_alloc(struct um *machine) {
67
+ if (machine->result_freelist) {
68
+ struct um_op_result *result = machine->result_freelist;
69
+ machine->result_freelist = result->next;
70
+ return result;
101
71
  }
102
- op->next = NULL;
72
+ return malloc(sizeof(struct um_op_result));
103
73
  }
104
74
 
105
- inline void um_runqueue_unshift(struct um *machine, struct um_op *op) {
106
- if (machine->runqueue_head) {
107
- op->next = machine->runqueue_head;
108
- machine->runqueue_head->prev = op;
109
- machine->runqueue_head = op;
75
+ inline void multishot_result_free(struct um *machine, struct um_op_result *result) {
76
+ result->next = machine->result_freelist;
77
+ machine->result_freelist = result;
78
+ }
79
+
80
+ inline void um_op_multishot_results_push(struct um *machine, struct um_op *op, __s32 res, __u32 flags) {
81
+ if (!op->multishot_result_count) {
82
+ op->result.res = res;
83
+ op->result.flags = flags;
84
+ op->result.next = NULL;
85
+ op->multishot_result_tail = &op->result;
110
86
  }
111
87
  else {
112
- op->next = NULL;
113
- machine->runqueue_head = machine->runqueue_tail = op;
88
+ struct um_op_result *result = multishot_result_alloc(machine);
89
+ result->res = res;
90
+ result->flags = flags;
91
+ result->next = NULL;
92
+ op->multishot_result_tail->next = result;
93
+ op->multishot_result_tail = result;
114
94
  }
115
- op->prev = NULL;
95
+ op->multishot_result_count++;
116
96
  }
117
97
 
118
- inline struct um_op *um_runqueue_shift(struct um *machine) {
119
- struct um_op *op = machine->runqueue_head;
120
- if (!op) return NULL;
98
+ inline void um_op_multishot_results_clear(struct um *machine, struct um_op *op) {
99
+ if (op->multishot_result_count < 1) return;
121
100
 
122
- op->prev = NULL;
123
- if (!op->next) {
124
- machine->runqueue_head = machine->runqueue_tail = NULL;
101
+ struct um_op_result *result = op->result.next;
102
+ while (result) {
103
+ struct um_op_result *next = result->next;
104
+ multishot_result_free(machine, result);
105
+ result = next;
125
106
  }
126
- else {
127
- machine->runqueue_head = op->next;
128
- op->next = NULL;
129
- }
130
- return op;
107
+ op->multishot_result_tail = NULL;
108
+ op->multishot_result_count = 0;
131
109
  }
132
110
 
133
- inline void um_free_op_linked_list(struct um *machine, struct um_op *op) {
134
- while (op) {
135
- struct um_op *next = op->next;
136
- um_op_result_cleanup(machine, op);
137
- free(op);
138
- op = next;
111
+ inline struct um_op *um_op_alloc(struct um *machine) {
112
+ if (machine->op_freelist) {
113
+ struct um_op *op = machine->op_freelist;
114
+ machine->op_freelist = op->next;
115
+ return op;
139
116
  }
117
+ return malloc(sizeof(struct um_op));
140
118
  }
141
119
 
142
- inline void um_free_result_linked_list(struct um *machine, struct um_result_entry *entry) {
143
- while (entry) {
144
- struct um_result_entry *next = entry->next;
145
- free(entry);
146
- entry = next;
147
- }
120
+ inline void um_op_free(struct um *machine, struct um_op *op) {
121
+ op->next = machine->op_freelist;
122
+ machine->op_freelist = op;
148
123
  }
@@ -0,0 +1,58 @@
1
+ #include "um.h"
2
+ #include <stdlib.h>
3
+
4
+ VALUE cQueue;
5
+
6
+ static void Queue_mark(void *ptr) {
7
+ struct um_queue *queue = ptr;
8
+ um_queue_mark(queue);
9
+ }
10
+
11
+ static void Queue_compact(void *ptr) {
12
+ struct um_queue *queue = ptr;
13
+ um_queue_compact(queue);
14
+ }
15
+
16
+ static void Queue_free(void *ptr) {
17
+ struct um_queue *queue = ptr;
18
+ um_queue_free(queue);
19
+ }
20
+
21
+ static size_t Queue_size(const void *ptr) {
22
+ return sizeof(struct um_queue);
23
+ }
24
+
25
+ static const rb_data_type_t Queue_type = {
26
+ "UringMachineQueue",
27
+ {Queue_mark, Queue_free, Queue_size, Queue_compact},
28
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
29
+ };
30
+
31
+ static VALUE Queue_allocate(VALUE klass) {
32
+ struct um_queue *queue = malloc(sizeof(struct um_queue));
33
+ return TypedData_Wrap_Struct(klass, &Queue_type, queue);
34
+ }
35
+
36
+ inline struct um_queue *Queue_data(VALUE self) {
37
+ return RTYPEDDATA_DATA(self);
38
+ }
39
+
40
+ VALUE Queue_initialize(VALUE self) {
41
+ struct um_queue *queue = Queue_data(self);
42
+ RB_OBJ_WRITE(self, &queue->self, self);
43
+ um_queue_init(queue);
44
+ return self;
45
+ }
46
+
47
+ VALUE Queue_count(VALUE self) {
48
+ struct um_queue *queue = Queue_data(self);
49
+ return UINT2NUM(queue->count);
50
+ }
51
+
52
+ void Init_Queue(void) {
53
+ cQueue = rb_define_class_under(cUM, "Queue", rb_cObject);
54
+ rb_define_alloc_func(cQueue, Queue_allocate);
55
+
56
+ rb_define_method(cQueue, "initialize", Queue_initialize, 0);
57
+ rb_define_method(cQueue, "count", Queue_count, 0);
58
+ }
data/ext/um/um_sync.c ADDED
@@ -0,0 +1,273 @@
1
+ #include "um.h"
2
+ #include <stdatomic.h>
3
+ #include <linux/futex.h>
4
+
5
+ #define FUTEX2_SIZE_U32 0x02
6
+
7
+ void um_futex_wait(struct um *machine, uint32_t *futex, uint32_t expect) {
8
+ struct um_op op;
9
+ um_prep_op(machine, &op, OP_FUTEX_WAIT);
10
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
11
+ io_uring_prep_futex_wait(
12
+ sqe, (uint32_t *)futex, expect, FUTEX_BITSET_MATCH_ANY,
13
+ FUTEX2_SIZE_U32, 0
14
+ );
15
+
16
+ VALUE ret = um_fiber_switch(machine);
17
+ if (!um_op_completed_p(&op))
18
+ um_cancel_and_wait(machine, &op);
19
+ else {
20
+ if (op.result.res != -EAGAIN)
21
+ um_raise_on_error_result(op.result.res);
22
+ }
23
+
24
+ RB_GC_GUARD(ret);
25
+ raise_if_exception(ret);
26
+ }
27
+
28
+ void um_futex_wake(struct um *machine, uint32_t *futex, uint32_t num_waiters) {
29
+ struct um_op op;
30
+ um_prep_op(machine, &op, OP_FUTEX_WAKE);
31
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
32
+ // submit futex_wait
33
+ io_uring_prep_futex_wake(
34
+ sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY,
35
+ FUTEX2_SIZE_U32, 0
36
+ );
37
+
38
+ VALUE ret = um_fiber_switch(machine);
39
+ um_check_completion(machine, &op);
40
+
41
+ RB_GC_GUARD(ret);
42
+ raise_if_exception(ret);
43
+ }
44
+
45
+ void um_futex_wake_transient(struct um *machine, uint32_t *futex, uint32_t num_waiters) {
46
+ struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
47
+ io_uring_prep_futex_wake(
48
+ sqe, (uint32_t *)futex, num_waiters, FUTEX_BITSET_MATCH_ANY,
49
+ FUTEX2_SIZE_U32, 0
50
+ );
51
+ }
52
+
53
+
54
+ #define MUTEX_LOCKED 1
55
+ #define MUTEX_UNLOCKED 0
56
+
57
+ void um_mutex_init(struct um_mutex *mutex) {
58
+ mutex->state = MUTEX_UNLOCKED;
59
+ }
60
+
61
+ inline void um_mutex_lock(struct um *machine, uint32_t *state) {
62
+ while (*state == MUTEX_LOCKED) {
63
+ um_futex_wait(machine, state, MUTEX_LOCKED);
64
+ }
65
+ *state = MUTEX_LOCKED;
66
+ }
67
+
68
+ inline void um_mutex_unlock(struct um *machine, uint32_t *state) {
69
+ *state = MUTEX_UNLOCKED;
70
+ // Wake up 1 waiting fiber
71
+ um_futex_wake(machine, state, 1);
72
+ }
73
+
74
+ struct sync_ctx {
75
+ struct um *machine;
76
+ uint32_t *state;
77
+ };
78
+
79
+ VALUE synchronize_begin(VALUE arg) {
80
+ struct sync_ctx *ctx = (struct sync_ctx *)arg;
81
+ um_mutex_lock(ctx->machine, ctx->state);
82
+ return rb_yield(Qnil);
83
+ }
84
+
85
+ VALUE synchronize_ensure(VALUE arg) {
86
+ struct sync_ctx *ctx = (struct sync_ctx *)arg;
87
+ um_mutex_unlock(ctx->machine, ctx->state);
88
+ return Qnil;
89
+ }
90
+
91
+ inline VALUE um_mutex_synchronize(struct um *machine, uint32_t *state) {
92
+ struct sync_ctx ctx = { .machine = machine, .state = state };
93
+ return rb_ensure(synchronize_begin, (VALUE)&ctx, synchronize_ensure, (VALUE)&ctx);
94
+ }
95
+
96
+ #define QUEUE_EMPTY 0
97
+ #define QUEUE_READY 1
98
+
99
+ inline void um_queue_init(struct um_queue *queue) {
100
+ queue->head = queue->tail = queue->free_head = NULL;
101
+ queue->state = QUEUE_EMPTY;
102
+ queue->count = 0;
103
+ }
104
+
105
+ inline void um_queue_free(struct um_queue *queue) {
106
+ struct um_queue_entry *entry = queue->head;
107
+ while (entry) {
108
+ struct um_queue_entry *next = entry->next;
109
+ free(entry);
110
+ entry = next;
111
+ }
112
+
113
+ entry = queue->free_head;
114
+ while (entry) {
115
+ struct um_queue_entry *next = entry->next;
116
+ free(entry);
117
+ entry = next;
118
+ }
119
+
120
+ free(queue);
121
+ }
122
+
123
+ inline void um_queue_mark(struct um_queue *queue) {
124
+ rb_gc_mark_movable(queue->self);
125
+ struct um_queue_entry *entry = queue->head;
126
+ while (entry) {
127
+ rb_gc_mark_movable(entry->value);
128
+ entry = entry->next;
129
+ }
130
+ }
131
+
132
+ inline void um_queue_compact(struct um_queue *queue) {
133
+ queue->self = rb_gc_location(queue->self);
134
+ struct um_queue_entry *entry = queue->head;
135
+ while (entry) {
136
+ entry->value = rb_gc_location(entry->value);
137
+ entry = entry->next;
138
+ }
139
+ }
140
+
141
+ inline struct um_queue_entry *um_queue_entry_checkout(struct um_queue *queue) {
142
+ struct um_queue_entry *entry = queue->free_head;
143
+ if (entry) {
144
+ queue->free_head = entry->next;
145
+ }
146
+ else
147
+ entry = malloc(sizeof(struct um_queue_entry));
148
+ return entry;
149
+ }
150
+
151
+ inline void um_queue_entry_checkin(struct um_queue *queue, struct um_queue_entry *entry) {
152
+ entry->next = queue->free_head;
153
+ queue->free_head = entry;
154
+ }
155
+
156
+ static inline void queue_add_head(struct um_queue *queue, VALUE value) {
157
+ struct um_queue_entry *entry = um_queue_entry_checkout(queue);
158
+
159
+ entry->next = queue->head;
160
+ if (queue->head) {
161
+ queue->head->prev = entry;
162
+ queue->head = entry;
163
+ }
164
+ else
165
+ queue->head = queue->tail = entry;
166
+ entry->prev = NULL;
167
+ RB_OBJ_WRITE(queue->self, &entry->value, value);
168
+ }
169
+
170
+ static inline void queue_add_tail(struct um_queue *queue, VALUE value) {
171
+ struct um_queue_entry *entry = um_queue_entry_checkout(queue);
172
+
173
+ entry->prev = queue->tail;
174
+ if (queue->tail) {
175
+ queue->tail->next = entry;
176
+ queue->tail = entry;
177
+ }
178
+ else
179
+ queue->head = queue->tail = entry;
180
+ entry->next = NULL;
181
+ RB_OBJ_WRITE(queue->self, &entry->value, value);
182
+ }
183
+
184
+ VALUE queue_remove_head(struct um_queue *queue) {
185
+ struct um_queue_entry *entry = queue->head;
186
+ queue->head = entry->next;
187
+ if (!queue->head) queue->tail = NULL;
188
+
189
+ VALUE v = entry->value;
190
+ um_queue_entry_checkin(queue, entry);
191
+ return v;
192
+
193
+ }
194
+
195
+ VALUE queue_remove_tail(struct um_queue *queue) {
196
+ struct um_queue_entry *entry = queue->tail;
197
+ queue->tail = entry->prev;
198
+ if (!queue->tail) queue->head = NULL;
199
+
200
+ VALUE v = entry->value;
201
+ um_queue_entry_checkin(queue, entry);
202
+ return v;
203
+ }
204
+
205
+ static inline VALUE um_queue_add(struct um *machine, struct um_queue *queue, VALUE value, int add_head) {
206
+ if (add_head) queue_add_head(queue, value);
207
+ else queue_add_tail(queue, value);
208
+
209
+ queue->count++;
210
+
211
+ queue->state = QUEUE_READY;
212
+ if (queue->num_waiters)
213
+ um_futex_wake_transient(machine, &queue->state, 1);
214
+ return queue->self;
215
+ }
216
+
217
+ VALUE um_queue_push(struct um *machine, struct um_queue *queue, VALUE value) {
218
+ return um_queue_add(machine, queue, value, false);
219
+ }
220
+
221
+ VALUE um_queue_unshift(struct um *machine, struct um_queue *queue, VALUE value) {
222
+ return um_queue_add(machine, queue, value, true);
223
+ }
224
+
225
+ enum queue_op { QUEUE_POP, QUEUE_SHIFT };
226
+
227
+ struct queue_wait_ctx {
228
+ struct um *machine;
229
+ struct um_queue *queue;
230
+ enum queue_op op;
231
+ };
232
+
233
+ VALUE um_queue_remove_begin(VALUE arg) {
234
+ struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
235
+
236
+ ctx->queue->num_waiters++;
237
+ while (ctx->queue->state == QUEUE_EMPTY) {
238
+ um_futex_wait(ctx->machine, &ctx->queue->state, QUEUE_EMPTY);
239
+ }
240
+
241
+ if (ctx->queue->state != QUEUE_READY)
242
+ rb_raise(rb_eRuntimeError, "Internal error: queue should be in ready state!");
243
+ if (!ctx->queue->tail)
244
+ rb_raise(rb_eRuntimeError, "Internal error: queue should be in ready state!");
245
+
246
+ ctx->queue->count--;
247
+ return (ctx->op == QUEUE_POP ? queue_remove_tail : queue_remove_head)(ctx->queue);
248
+ }
249
+
250
+ VALUE um_queue_remove_ensure(VALUE arg) {
251
+ struct queue_wait_ctx *ctx = (struct queue_wait_ctx *)arg;
252
+
253
+ ctx->queue->num_waiters--;
254
+
255
+ if (ctx->queue->num_waiters && ctx->queue->tail) {
256
+ um_futex_wake_transient(ctx->machine, &ctx->queue->state, 1);
257
+ }
258
+ else if (!ctx->queue->tail) {
259
+ ctx->queue->state = QUEUE_EMPTY;
260
+ }
261
+
262
+ return Qnil;
263
+ }
264
+
265
+ VALUE um_queue_pop(struct um *machine, struct um_queue *queue) {
266
+ struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_POP };
267
+ return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
268
+ }
269
+
270
+ VALUE um_queue_shift(struct um *machine, struct um_queue *queue) {
271
+ struct queue_wait_ctx ctx = { .machine = machine, .queue = queue, .op = QUEUE_SHIFT };
272
+ return rb_ensure(um_queue_remove_begin, (VALUE)&ctx, um_queue_remove_ensure, (VALUE)&ctx);
273
+ }
data/ext/um/um_utils.c CHANGED
@@ -24,7 +24,7 @@ inline VALUE um_raise_exception(VALUE e) {
24
24
  return rb_funcall(rb_mKernel, ID_raise, 1, e);
25
25
  }
26
26
 
27
- inline void um_raise_on_system_error(int result) {
27
+ inline void um_raise_on_error_result(int result) {
28
28
  if (unlikely(result < 0)) rb_syserr_fail(-result, strerror(-result));
29
29
  }
30
30
 
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'resolv'
4
+
5
+ class UringMachine
6
+ class DNSResolver
7
+ def initialize(machine)
8
+ @machine = machine
9
+ @requests = UM::Queue.new
10
+ @nameservers = get_nameservers
11
+ @fiber = @machine.spin { handle_requests_loop }
12
+ @last_id = 0
13
+ @cache = {}
14
+ end
15
+
16
+ def resolve(hostname, type)
17
+ @machine.push(@requests, [hostname, type, Fiber.current])
18
+ @machine.yield
19
+ end
20
+
21
+ def handle_requests_loop
22
+ while true
23
+ hostname, type, fiber = @machine.shift(@requests)
24
+ res = do_resolve(hostname, type)
25
+ @machine.schedule(fiber, res)
26
+ end
27
+ end
28
+
29
+ def get_nameservers
30
+ nameservers = []
31
+ IO.readlines('/etc/resolv.conf').each do |line|
32
+ if line =~ /^nameserver (.+)$/
33
+ nameservers << $1.split(/\s+/).first
34
+ end
35
+ end
36
+ nameservers
37
+ end
38
+
39
+ def socket_fd
40
+ @socket_fd ||= prepare_socket
41
+ end
42
+
43
+ def prepare_socket
44
+ fd = @machine.socket(UM::AF_INET, UM::SOCK_DGRAM, 0, 0)
45
+ @machine.bind(fd, '0.0.0.0', 0)
46
+ @machine.connect(fd, @nameservers.sample, 53)
47
+ fd
48
+ end
49
+
50
+ def do_resolve(hostname, type, try_count = 0)
51
+ fd = socket_fd
52
+ req = prepare_request_packet(hostname, type)
53
+ msg = req.encode
54
+ @machine.send(fd, msg, msg.bytesize, 0)
55
+
56
+ buf = +''
57
+ @machine.recv(fd, buf, 16384, 0)
58
+
59
+ msg = Resolv::DNS::Message.decode buf
60
+ addrs = []
61
+ msg.each_answer do |name, ttl, data|
62
+ p [name, ttl, data]
63
+ if data.kind_of?(Resolv::DNS::Resource::IN::A) ||
64
+ data.kind_of?(Resolv::DNS::Resource::IN::AAAA)
65
+ addrs << data.address.to_s
66
+ end
67
+ end
68
+ addrs
69
+ end
70
+
71
+ def prepare_request_packet(hostname, type)
72
+ msg = Resolv::DNS::Message.new
73
+ msg.id = (@last_id += 1)
74
+ msg.rd = 1
75
+ msg.add_question hostname, msg_type(type)
76
+ msg
77
+ end
78
+
79
+ def msg_type(type)
80
+ # TODO: add support for other types
81
+ Resolv::DNS::Resource::IN::A
82
+ end
83
+ end
84
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.4'
4
+ VERSION = '0.5'
5
5
  end