uringmachine 0.4 → 0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -1
- data/CHANGELOG.md +14 -0
- data/README.md +44 -1
- data/TODO.md +12 -3
- data/examples/bm_snooze.rb +89 -0
- data/examples/bm_write.rb +56 -0
- data/examples/dns_client.rb +12 -0
- data/examples/http_server.rb +42 -43
- data/examples/server_client.rb +64 -0
- data/examples/snooze.rb +44 -0
- data/examples/write_dev_null.rb +16 -0
- data/ext/um/extconf.rb +24 -14
- data/ext/um/um.c +468 -414
- data/ext/um/um.h +129 -39
- data/ext/um/um_buffer.c +49 -0
- data/ext/um/um_class.c +148 -24
- data/ext/um/um_const.c +30 -1
- data/ext/um/um_ext.c +4 -0
- data/ext/um/um_mutex_class.c +47 -0
- data/ext/um/um_op.c +86 -111
- data/ext/um/um_queue_class.c +58 -0
- data/ext/um/um_sync.c +273 -0
- data/ext/um/um_utils.c +1 -1
- data/lib/uringmachine/dns_resolver.rb +84 -0
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +19 -3
- data/supressions/ruby.supp +71 -0
- data/test/test_um.rb +466 -47
- data/vendor/liburing/.gitignore +5 -0
- data/vendor/liburing/CHANGELOG +1 -0
- data/vendor/liburing/configure +32 -0
- data/vendor/liburing/examples/Makefile +1 -0
- data/vendor/liburing/examples/reg-wait.c +159 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
- data/vendor/liburing/src/include/liburing.h +28 -2
- data/vendor/liburing/src/int_flags.h +10 -3
- data/vendor/liburing/src/liburing-ffi.map +13 -2
- data/vendor/liburing/src/liburing.map +9 -0
- data/vendor/liburing/src/queue.c +25 -16
- data/vendor/liburing/src/register.c +73 -4
- data/vendor/liburing/src/setup.c +46 -18
- data/vendor/liburing/src/setup.h +6 -0
- data/vendor/liburing/test/Makefile +7 -0
- data/vendor/liburing/test/cmd-discard.c +427 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
- data/vendor/liburing/test/file-exit-unreg.c +48 -0
- data/vendor/liburing/test/io_uring_passthrough.c +2 -0
- data/vendor/liburing/test/io_uring_register.c +13 -2
- data/vendor/liburing/test/napi-test.c +1 -1
- data/vendor/liburing/test/no-mmap-inval.c +1 -1
- data/vendor/liburing/test/read-mshot-empty.c +2 -0
- data/vendor/liburing/test/read-mshot-stdin.c +121 -0
- data/vendor/liburing/test/read-mshot.c +6 -0
- data/vendor/liburing/test/recvsend_bundle.c +2 -2
- data/vendor/liburing/test/reg-fd-only.c +1 -1
- data/vendor/liburing/test/reg-wait.c +251 -0
- data/vendor/liburing/test/regbuf-clone.c +458 -0
- data/vendor/liburing/test/resize-rings.c +643 -0
- data/vendor/liburing/test/rsrc_tags.c +1 -1
- data/vendor/liburing/test/sqpoll-sleep.c +39 -8
- data/vendor/liburing/test/sqwait.c +136 -0
- data/vendor/liburing/test/sync-cancel.c +8 -1
- data/vendor/liburing/test/timeout.c +13 -8
- metadata +22 -4
- data/examples/http_server_multishot.rb +0 -57
- data/examples/http_server_simpler.rb +0 -34
data/ext/um/um.h
CHANGED
@@ -20,38 +20,64 @@
|
|
20
20
|
#define likely(cond) __builtin_expect(!!(cond), 1)
|
21
21
|
#endif
|
22
22
|
|
23
|
-
enum
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
23
|
+
enum op_kind {
|
24
|
+
OP_TIMEOUT,
|
25
|
+
OP_SCHEDULE,
|
26
|
+
|
27
|
+
OP_SLEEP,
|
28
|
+
OP_READ,
|
29
|
+
OP_WRITE,
|
30
|
+
OP_CLOSE,
|
31
|
+
OP_ACCEPT,
|
32
|
+
OP_RECV,
|
33
|
+
OP_SEND,
|
34
|
+
OP_SOCKET,
|
35
|
+
OP_CONNECT,
|
36
|
+
OP_BIND,
|
37
|
+
OP_LISTEN,
|
38
|
+
OP_GETSOCKOPT,
|
39
|
+
OP_SETSOCKOPT,
|
40
|
+
|
41
|
+
OP_FUTEX_WAIT,
|
42
|
+
OP_FUTEX_WAKE,
|
43
|
+
|
44
|
+
OP_ACCEPT_MULTISHOT,
|
45
|
+
OP_READ_MULTISHOT,
|
46
|
+
OP_RECV_MULTISHOT
|
30
47
|
};
|
31
48
|
|
32
|
-
|
33
|
-
|
49
|
+
#define OP_F_COMPLETED (1U << 0)
|
50
|
+
#define OP_F_TRANSIENT (1U << 1)
|
51
|
+
#define OP_F_IGNORE_CANCELED (1U << 2)
|
52
|
+
#define OP_F_MULTISHOT (1U << 3)
|
34
53
|
|
35
|
-
|
54
|
+
struct um_op_result {
|
55
|
+
__s32 res;
|
36
56
|
__u32 flags;
|
57
|
+
struct um_op_result *next;
|
37
58
|
};
|
38
59
|
|
39
60
|
struct um_op {
|
40
|
-
enum op_state state;
|
41
61
|
struct um_op *prev;
|
42
62
|
struct um_op *next;
|
43
63
|
|
44
|
-
|
45
|
-
|
46
|
-
struct um_result_entry *results_tail;
|
64
|
+
enum op_kind kind;
|
65
|
+
unsigned flags;
|
47
66
|
|
48
67
|
VALUE fiber;
|
49
|
-
VALUE
|
50
|
-
int is_multishot;
|
51
|
-
struct __kernel_timespec ts;
|
68
|
+
VALUE value;
|
52
69
|
|
53
|
-
|
54
|
-
|
70
|
+
struct um_op_result result;
|
71
|
+
struct um_op_result *multishot_result_tail;
|
72
|
+
unsigned multishot_result_count;
|
73
|
+
|
74
|
+
struct __kernel_timespec ts; // used for timeout operation
|
75
|
+
};
|
76
|
+
|
77
|
+
struct um_buffer {
|
78
|
+
struct um_buffer *next;
|
79
|
+
void *ptr;
|
80
|
+
long len;
|
55
81
|
};
|
56
82
|
|
57
83
|
struct buf_ring_descriptor {
|
@@ -66,11 +92,10 @@ struct buf_ring_descriptor {
|
|
66
92
|
#define BUFFER_RING_MAX_COUNT 10
|
67
93
|
|
68
94
|
struct um {
|
69
|
-
|
70
|
-
|
95
|
+
VALUE self;
|
96
|
+
VALUE poll_fiber;
|
71
97
|
|
72
|
-
struct
|
73
|
-
struct um_op *runqueue_tail;
|
98
|
+
struct um_buffer *buffer_freelist;
|
74
99
|
|
75
100
|
struct io_uring ring;
|
76
101
|
|
@@ -80,48 +105,95 @@ struct um {
|
|
80
105
|
|
81
106
|
struct buf_ring_descriptor buffer_rings[BUFFER_RING_MAX_COUNT];
|
82
107
|
unsigned int buffer_ring_count;
|
108
|
+
|
109
|
+
struct um_op *transient_head;
|
110
|
+
struct um_op *runqueue_head;
|
111
|
+
struct um_op *runqueue_tail;
|
112
|
+
|
113
|
+
struct um_op *op_freelist;
|
114
|
+
struct um_op_result *result_freelist;
|
115
|
+
};
|
116
|
+
|
117
|
+
struct um_mutex {
|
118
|
+
VALUE self;
|
119
|
+
uint32_t state;
|
120
|
+
};
|
121
|
+
|
122
|
+
struct um_queue_entry {
|
123
|
+
struct um_queue_entry *prev;
|
124
|
+
struct um_queue_entry *next;
|
125
|
+
VALUE value;
|
126
|
+
};
|
127
|
+
|
128
|
+
struct um_queue {
|
129
|
+
VALUE self;
|
130
|
+
|
131
|
+
struct um_queue_entry *head;
|
132
|
+
struct um_queue_entry *tail;
|
133
|
+
struct um_queue_entry *free_head;
|
134
|
+
|
135
|
+
uint32_t num_waiters;
|
136
|
+
uint32_t state;
|
137
|
+
uint32_t count;
|
83
138
|
};
|
84
139
|
|
85
140
|
extern VALUE cUM;
|
141
|
+
extern VALUE cMutex;
|
142
|
+
extern VALUE cQueue;
|
86
143
|
|
87
|
-
void um_setup(struct um *machine);
|
144
|
+
void um_setup(VALUE self, struct um *machine);
|
88
145
|
void um_teardown(struct um *machine);
|
89
|
-
|
90
|
-
|
146
|
+
|
147
|
+
struct um_op *um_op_alloc(struct um *machine);
|
148
|
+
void um_op_free(struct um *machine, struct um_op *op);
|
149
|
+
void um_op_clear(struct um *machine, struct um_op *op);
|
150
|
+
void um_op_transient_add(struct um *machine, struct um_op *op);
|
151
|
+
void um_op_transient_remove(struct um *machine, struct um_op *op);
|
152
|
+
void um_op_list_mark(struct um *machine, struct um_op *head);
|
153
|
+
void um_op_list_compact(struct um *machine, struct um_op *head);
|
154
|
+
|
155
|
+
void um_op_multishot_results_push(struct um *machine, struct um_op *op, __s32 res, __u32 flags);
|
156
|
+
void um_op_multishot_results_clear(struct um *machine, struct um_op *op);
|
157
|
+
|
158
|
+
void um_runqueue_push(struct um *machine, struct um_op *op);
|
159
|
+
struct um_op *um_runqueue_shift(struct um *machine);
|
160
|
+
|
161
|
+
struct um_buffer *um_buffer_checkout(struct um *machine, int len);
|
162
|
+
void um_buffer_checkin(struct um *machine, struct um_buffer *buffer);
|
163
|
+
void um_free_buffer_linked_list(struct um *machine);
|
91
164
|
|
92
165
|
struct __kernel_timespec um_double_to_timespec(double value);
|
93
166
|
int um_value_is_exception_p(VALUE v);
|
94
167
|
VALUE um_raise_exception(VALUE v);
|
95
|
-
void um_raise_on_system_error(int result);
|
96
168
|
|
169
|
+
#define raise_if_exception(v) (um_value_is_exception_p(v) ? um_raise_exception(v) : v)
|
170
|
+
|
171
|
+
void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind);
|
172
|
+
void um_raise_on_error_result(int result);
|
97
173
|
void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs);
|
98
174
|
void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, __s32 result, __u32 flags);
|
99
|
-
|
100
175
|
int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count);
|
101
176
|
VALUE um_get_string_from_buffer_ring(struct um *machine, int bgid, __s32 result, __u32 flags);
|
102
177
|
|
178
|
+
struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op);
|
179
|
+
|
103
180
|
VALUE um_fiber_switch(struct um *machine);
|
104
181
|
VALUE um_await(struct um *machine);
|
182
|
+
void um_cancel_and_wait(struct um *machine, struct um_op *op);
|
183
|
+
int um_check_completion(struct um *machine, struct um_op *op);
|
105
184
|
|
106
|
-
|
107
|
-
struct um_op* um_op_checkout(struct um *machine);
|
108
|
-
void um_op_result_push(struct um *machine, struct um_op *op, __s32 result, __u32 flags);
|
109
|
-
int um_op_result_shift(struct um *machine, struct um_op *op, __s32 *result, __u32 *flags);
|
110
|
-
|
111
|
-
struct um_op *um_runqueue_find_by_fiber(struct um *machine, VALUE fiber);
|
112
|
-
void um_runqueue_push(struct um *machine, struct um_op *op);
|
113
|
-
struct um_op *um_runqueue_shift(struct um *machine);
|
114
|
-
void um_runqueue_unshift(struct um *machine, struct um_op *op);
|
185
|
+
#define um_op_completed_p(op) ((op)->flags & OP_F_COMPLETED)
|
115
186
|
|
116
187
|
void um_schedule(struct um *machine, VALUE fiber, VALUE value);
|
117
|
-
void um_interrupt(struct um *machine, VALUE fiber, VALUE value);
|
118
188
|
VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
|
119
189
|
|
120
190
|
VALUE um_sleep(struct um *machine, double duration);
|
121
191
|
VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
|
122
192
|
VALUE um_read_each(struct um *machine, int fd, int bgid);
|
123
|
-
VALUE um_write(struct um *machine, int fd, VALUE
|
193
|
+
VALUE um_write(struct um *machine, int fd, VALUE str, int len);
|
124
194
|
VALUE um_close(struct um *machine, int fd);
|
195
|
+
VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode);
|
196
|
+
VALUE um_waitpid(struct um *machine, int pid, int options);
|
125
197
|
|
126
198
|
VALUE um_accept(struct um *machine, int fd);
|
127
199
|
VALUE um_accept_each(struct um *machine, int fd);
|
@@ -129,8 +201,26 @@ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint fla
|
|
129
201
|
VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen);
|
130
202
|
VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags);
|
131
203
|
VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags);
|
204
|
+
VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags);
|
132
205
|
VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen);
|
133
206
|
VALUE um_listen(struct um *machine, int fd, int backlog);
|
207
|
+
VALUE um_getsockopt(struct um *machine, int fd, int level, int opt);
|
208
|
+
VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value);
|
209
|
+
|
210
|
+
struct um_mutex *Mutex_data(VALUE self);
|
211
|
+
struct um_queue *Queue_data(VALUE self);
|
212
|
+
|
213
|
+
void um_mutex_init(struct um_mutex *mutex);
|
214
|
+
VALUE um_mutex_synchronize(struct um *machine, uint32_t *state);
|
215
|
+
|
216
|
+
void um_queue_init(struct um_queue *queue);
|
217
|
+
void um_queue_free(struct um_queue *queue);
|
218
|
+
void um_queue_mark(struct um_queue *queue);
|
219
|
+
void um_queue_compact(struct um_queue *queue);
|
220
|
+
VALUE um_queue_push(struct um *machine, struct um_queue *queue, VALUE value);
|
221
|
+
VALUE um_queue_pop(struct um *machine, struct um_queue *queue);
|
222
|
+
VALUE um_queue_unshift(struct um *machine, struct um_queue *queue, VALUE value);
|
223
|
+
VALUE um_queue_shift(struct um *machine, struct um_queue *queue);
|
134
224
|
|
135
225
|
void um_define_net_constants(VALUE mod);
|
136
226
|
|
data/ext/um/um_buffer.c
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
#include "um.h"
|
2
|
+
|
3
|
+
inline long buffer_size(long len) {
|
4
|
+
len--;
|
5
|
+
len |= len >> 1;
|
6
|
+
len |= len >> 2;
|
7
|
+
len |= len >> 4;
|
8
|
+
len |= len >> 8;
|
9
|
+
len |= len >> 16;
|
10
|
+
len++;
|
11
|
+
return (len > 4096) ? len : 4096;
|
12
|
+
}
|
13
|
+
|
14
|
+
inline struct um_buffer *um_buffer_checkout(struct um *machine, int len) {
|
15
|
+
struct um_buffer *buffer = machine->buffer_freelist;
|
16
|
+
if (buffer)
|
17
|
+
machine->buffer_freelist = buffer->next;
|
18
|
+
else {
|
19
|
+
buffer = malloc(sizeof(struct um_buffer));
|
20
|
+
memset(buffer, 0, sizeof(struct um_buffer));
|
21
|
+
}
|
22
|
+
|
23
|
+
if (buffer->len < len) {
|
24
|
+
if (buffer->ptr) {
|
25
|
+
free(buffer->ptr);
|
26
|
+
buffer->ptr = NULL;
|
27
|
+
}
|
28
|
+
|
29
|
+
buffer->len = buffer_size(len);
|
30
|
+
if (posix_memalign(&buffer->ptr, 4096, buffer->len))
|
31
|
+
rb_raise(rb_eRuntimeError, "Failed to allocate buffer");
|
32
|
+
}
|
33
|
+
return buffer;
|
34
|
+
}
|
35
|
+
|
36
|
+
inline void um_buffer_checkin(struct um *machine, struct um_buffer *buffer) {
|
37
|
+
buffer->next = machine->buffer_freelist;
|
38
|
+
machine->buffer_freelist = buffer;
|
39
|
+
}
|
40
|
+
|
41
|
+
inline void um_free_buffer_linked_list(struct um *machine) {
|
42
|
+
struct um_buffer *buffer = machine->buffer_freelist;
|
43
|
+
while (buffer) {
|
44
|
+
struct um_buffer *next = buffer->next;
|
45
|
+
if (buffer->ptr) free(buffer->ptr);
|
46
|
+
free(buffer);
|
47
|
+
buffer = next;
|
48
|
+
}
|
49
|
+
}
|
data/ext/um/um_class.c
CHANGED
@@ -4,13 +4,20 @@
|
|
4
4
|
VALUE cUM;
|
5
5
|
|
6
6
|
static void UM_mark(void *ptr) {
|
7
|
-
|
8
|
-
|
7
|
+
struct um *machine = ptr;
|
8
|
+
rb_gc_mark_movable(machine->self);
|
9
|
+
|
10
|
+
um_op_list_mark(machine, machine->transient_head);
|
11
|
+
um_op_list_mark(machine, machine->runqueue_head);
|
9
12
|
}
|
10
13
|
|
11
14
|
static void UM_compact(void *ptr) {
|
12
|
-
|
13
|
-
|
15
|
+
struct um *machine = ptr;
|
16
|
+
machine->self = rb_gc_location(machine->self);
|
17
|
+
machine->poll_fiber = rb_gc_location(machine->poll_fiber);
|
18
|
+
|
19
|
+
um_op_list_compact(machine, machine->transient_head);
|
20
|
+
um_op_list_compact(machine, machine->runqueue_head);
|
14
21
|
}
|
15
22
|
|
16
23
|
static void UM_free(void *ptr) {
|
@@ -43,7 +50,7 @@ inline struct um *get_machine(VALUE self) {
|
|
43
50
|
|
44
51
|
VALUE UM_initialize(VALUE self) {
|
45
52
|
struct um *machine = RTYPEDDATA_DATA(self);
|
46
|
-
um_setup(machine);
|
53
|
+
um_setup(self, machine);
|
47
54
|
return self;
|
48
55
|
}
|
49
56
|
|
@@ -55,7 +62,7 @@ VALUE UM_setup_buffer_ring(VALUE self, VALUE size, VALUE count) {
|
|
55
62
|
|
56
63
|
VALUE UM_pending_count(VALUE self) {
|
57
64
|
struct um *machine = get_machine(self);
|
58
|
-
return
|
65
|
+
return INT2NUM(machine->pending_count);
|
59
66
|
}
|
60
67
|
|
61
68
|
VALUE UM_snooze(VALUE self) {
|
@@ -75,12 +82,6 @@ VALUE UM_schedule(VALUE self, VALUE fiber, VALUE value) {
|
|
75
82
|
return self;
|
76
83
|
}
|
77
84
|
|
78
|
-
VALUE UM_interrupt(VALUE self, VALUE fiber, VALUE value) {
|
79
|
-
struct um *machine = get_machine(self);
|
80
|
-
um_interrupt(machine, fiber, value);
|
81
|
-
return self;
|
82
|
-
}
|
83
|
-
|
84
85
|
VALUE UM_timeout(VALUE self, VALUE interval, VALUE class) {
|
85
86
|
struct um *machine = get_machine(self);
|
86
87
|
return um_timeout(machine, interval, class);
|
@@ -88,8 +89,7 @@ VALUE UM_timeout(VALUE self, VALUE interval, VALUE class) {
|
|
88
89
|
|
89
90
|
VALUE UM_sleep(VALUE self, VALUE duration) {
|
90
91
|
struct um *machine = get_machine(self);
|
91
|
-
um_sleep(machine, NUM2DBL(duration));
|
92
|
-
return duration;
|
92
|
+
return um_sleep(machine, NUM2DBL(duration));
|
93
93
|
}
|
94
94
|
|
95
95
|
VALUE UM_read(int argc, VALUE *argv, VALUE self) {
|
@@ -107,8 +107,12 @@ VALUE UM_read(int argc, VALUE *argv, VALUE self) {
|
|
107
107
|
}
|
108
108
|
|
109
109
|
VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
|
110
|
+
#ifdef HAVE_IO_URING_PREP_READ_MULTISHOT
|
110
111
|
struct um *machine = get_machine(self);
|
111
112
|
return um_read_each(machine, NUM2INT(fd), NUM2INT(bgid));
|
113
|
+
#else
|
114
|
+
rb_raise(rb_eRuntimeError, "Not supported by kernel");
|
115
|
+
#endif
|
112
116
|
}
|
113
117
|
|
114
118
|
VALUE UM_write(int argc, VALUE *argv, VALUE self) {
|
@@ -164,6 +168,11 @@ VALUE UM_recv(VALUE self, VALUE fd, VALUE buffer, VALUE maxlen, VALUE flags) {
|
|
164
168
|
return um_recv(machine, NUM2INT(fd), buffer, NUM2INT(maxlen), NUM2INT(flags));
|
165
169
|
}
|
166
170
|
|
171
|
+
VALUE UM_recv_each(VALUE self, VALUE fd, VALUE bgid, VALUE flags) {
|
172
|
+
struct um *machine = get_machine(self);
|
173
|
+
return um_recv_each(machine, NUM2INT(fd), NUM2INT(bgid), NUM2INT(flags));
|
174
|
+
}
|
175
|
+
|
167
176
|
VALUE UM_bind(VALUE self, VALUE fd, VALUE host, VALUE port) {
|
168
177
|
struct sockaddr_in addr;
|
169
178
|
memset(&addr, 0, sizeof(addr));
|
@@ -194,6 +203,104 @@ VALUE UM_listen(VALUE self, VALUE fd, VALUE backlog) {
|
|
194
203
|
#endif
|
195
204
|
}
|
196
205
|
|
206
|
+
static inline int numeric_value(VALUE value) {
|
207
|
+
switch (TYPE(value)) {
|
208
|
+
case T_TRUE:
|
209
|
+
return 1;
|
210
|
+
case T_FALSE:
|
211
|
+
return 0;
|
212
|
+
default:
|
213
|
+
return NUM2INT(value);
|
214
|
+
}
|
215
|
+
}
|
216
|
+
|
217
|
+
VALUE UM_getsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt) {
|
218
|
+
struct um *machine = get_machine(self);
|
219
|
+
return um_getsockopt(machine, NUM2INT(fd), NUM2INT(level), NUM2INT(opt));
|
220
|
+
}
|
221
|
+
|
222
|
+
VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
|
223
|
+
struct um *machine = get_machine(self);
|
224
|
+
return um_setsockopt(machine, NUM2INT(fd), NUM2INT(level), NUM2INT(opt), numeric_value(value));
|
225
|
+
}
|
226
|
+
|
227
|
+
#ifdef HAVE_IO_URING_PREP_FUTEX
|
228
|
+
|
229
|
+
VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
|
230
|
+
struct um *machine = get_machine(self);
|
231
|
+
struct um_mutex *mutex_data = Mutex_data(mutex);
|
232
|
+
return um_mutex_synchronize(machine, &mutex_data->state);
|
233
|
+
}
|
234
|
+
|
235
|
+
VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
|
236
|
+
struct um *machine = get_machine(self);
|
237
|
+
struct um_queue *que = Queue_data(queue);
|
238
|
+
return um_queue_push(machine, que, value);
|
239
|
+
}
|
240
|
+
|
241
|
+
VALUE UM_queue_pop(VALUE self, VALUE queue) {
|
242
|
+
struct um *machine = get_machine(self);
|
243
|
+
struct um_queue *que = Queue_data(queue);
|
244
|
+
return um_queue_pop(machine, que);
|
245
|
+
}
|
246
|
+
|
247
|
+
VALUE UM_queue_unshift(VALUE self, VALUE queue, VALUE value) {
|
248
|
+
struct um *machine = get_machine(self);
|
249
|
+
struct um_queue *que = Queue_data(queue);
|
250
|
+
return um_queue_unshift(machine, que, value);
|
251
|
+
}
|
252
|
+
|
253
|
+
VALUE UM_queue_shift(VALUE self, VALUE queue) {
|
254
|
+
struct um *machine = get_machine(self);
|
255
|
+
struct um_queue *que = Queue_data(queue);
|
256
|
+
return um_queue_shift(machine, que);
|
257
|
+
}
|
258
|
+
|
259
|
+
#endif
|
260
|
+
|
261
|
+
struct um_open_ctx {
|
262
|
+
VALUE self;
|
263
|
+
VALUE fd;
|
264
|
+
};
|
265
|
+
|
266
|
+
VALUE UM_open_ensure(VALUE arg) {
|
267
|
+
struct um_open_ctx *ctx = (struct um_open_ctx *)arg;
|
268
|
+
UM_close(ctx->self, ctx->fd);
|
269
|
+
return ctx->self;
|
270
|
+
}
|
271
|
+
|
272
|
+
VALUE UM_open(VALUE self, VALUE pathname, VALUE flags) {
|
273
|
+
struct um *machine = get_machine(self);
|
274
|
+
// TODO: take optional perm (mode) arg
|
275
|
+
VALUE fd = um_open(machine, pathname, NUM2INT(flags), 0666);
|
276
|
+
if (rb_block_given_p()) {
|
277
|
+
struct um_open_ctx ctx = { self, fd };
|
278
|
+
return rb_ensure(rb_yield, fd, UM_open_ensure, (VALUE)&ctx);
|
279
|
+
}
|
280
|
+
else
|
281
|
+
return fd;
|
282
|
+
}
|
283
|
+
|
284
|
+
VALUE UM_waitpid(VALUE self, VALUE pid, VALUE options) {
|
285
|
+
struct um *machine = get_machine(self);
|
286
|
+
return um_waitpid(machine, NUM2INT(pid), NUM2INT(options));
|
287
|
+
}
|
288
|
+
|
289
|
+
VALUE UM_pipe(VALUE self) {
|
290
|
+
int fds[2];
|
291
|
+
int ret = pipe(fds);
|
292
|
+
if (ret) {
|
293
|
+
int e = errno;
|
294
|
+
rb_syserr_fail(e, strerror(e));
|
295
|
+
}
|
296
|
+
|
297
|
+
return rb_ary_new_from_args(2, INT2NUM(fds[0]), INT2NUM(fds[1]));
|
298
|
+
}
|
299
|
+
|
300
|
+
VALUE UM_kernel_version(VALUE self) {
|
301
|
+
return INT2NUM(UM_KERNEL_VERSION);
|
302
|
+
}
|
303
|
+
|
197
304
|
void Init_UM(void) {
|
198
305
|
rb_ext_ractor_safe(true);
|
199
306
|
|
@@ -201,29 +308,46 @@ void Init_UM(void) {
|
|
201
308
|
rb_define_alloc_func(cUM, UM_allocate);
|
202
309
|
|
203
310
|
rb_define_method(cUM, "initialize", UM_initialize, 0);
|
204
|
-
rb_define_method(cUM, "setup_buffer_ring", UM_setup_buffer_ring, 2);
|
205
311
|
rb_define_method(cUM, "pending_count", UM_pending_count, 0);
|
312
|
+
rb_define_method(cUM, "setup_buffer_ring", UM_setup_buffer_ring, 2);
|
313
|
+
|
314
|
+
rb_define_singleton_method(cUM, "pipe", UM_pipe, 0);
|
315
|
+
rb_define_singleton_method(cUM, "kernel_version", UM_kernel_version, 0);
|
316
|
+
|
206
317
|
|
207
|
-
rb_define_method(cUM, "snooze", UM_snooze, 0);
|
208
|
-
rb_define_method(cUM, "yield", UM_yield, 0);
|
209
318
|
rb_define_method(cUM, "schedule", UM_schedule, 2);
|
210
|
-
rb_define_method(cUM, "
|
319
|
+
rb_define_method(cUM, "snooze", UM_snooze, 0);
|
211
320
|
rb_define_method(cUM, "timeout", UM_timeout, 2);
|
321
|
+
rb_define_method(cUM, "yield", UM_yield, 0);
|
212
322
|
|
213
|
-
rb_define_method(cUM, "
|
323
|
+
rb_define_method(cUM, "close", UM_close, 1);
|
324
|
+
rb_define_method(cUM, "open", UM_open, 2);
|
214
325
|
rb_define_method(cUM, "read", UM_read, -1);
|
215
326
|
rb_define_method(cUM, "read_each", UM_read_each, 2);
|
327
|
+
rb_define_method(cUM, "sleep", UM_sleep, 1);
|
216
328
|
rb_define_method(cUM, "write", UM_write, -1);
|
217
|
-
|
329
|
+
|
330
|
+
rb_define_method(cUM, "waitpid", UM_waitpid, 2);
|
218
331
|
|
219
332
|
rb_define_method(cUM, "accept", UM_accept, 1);
|
220
333
|
rb_define_method(cUM, "accept_each", UM_accept_each, 1);
|
221
|
-
rb_define_method(cUM, "socket", UM_socket, 4);
|
222
|
-
rb_define_method(cUM, "connect", UM_connect, 3);
|
223
|
-
rb_define_method(cUM, "send", UM_send, 4);
|
224
|
-
rb_define_method(cUM, "recv", UM_recv, 4);
|
225
334
|
rb_define_method(cUM, "bind", UM_bind, 3);
|
335
|
+
rb_define_method(cUM, "connect", UM_connect, 3);
|
336
|
+
rb_define_method(cUM, "getsockopt", UM_getsockopt, 3);
|
226
337
|
rb_define_method(cUM, "listen", UM_listen, 2);
|
338
|
+
rb_define_method(cUM, "recv", UM_recv, 4);
|
339
|
+
rb_define_method(cUM, "recv_each", UM_recv_each, 3);
|
340
|
+
rb_define_method(cUM, "send", UM_send, 4);
|
341
|
+
rb_define_method(cUM, "setsockopt", UM_setsockopt, 4);
|
342
|
+
rb_define_method(cUM, "socket", UM_socket, 4);
|
343
|
+
|
344
|
+
#ifdef HAVE_IO_URING_PREP_FUTEX
|
345
|
+
rb_define_method(cUM, "pop", UM_queue_pop, 1);
|
346
|
+
rb_define_method(cUM, "push", UM_queue_push, 2);
|
347
|
+
rb_define_method(cUM, "shift", UM_queue_shift, 1);
|
348
|
+
rb_define_method(cUM, "synchronize", UM_mutex_synchronize, 1);
|
349
|
+
rb_define_method(cUM, "unshift", UM_queue_unshift, 2);
|
350
|
+
#endif
|
227
351
|
|
228
352
|
um_define_net_constants(cUM);
|
229
353
|
}
|
data/ext/um/um_const.c
CHANGED
@@ -1,4 +1,8 @@
|
|
1
1
|
#include "ruby.h"
|
2
|
+
|
3
|
+
#include <fcntl.h>
|
4
|
+
#include <sys/wait.h>
|
5
|
+
|
2
6
|
#include <arpa/inet.h>
|
3
7
|
#include <sys/types.h>
|
4
8
|
#include <sys/socket.h>
|
@@ -12,6 +16,31 @@
|
|
12
16
|
#define DEF_CONST_INT(mod, v) rb_define_const(mod, #v, INT2NUM(v))
|
13
17
|
|
14
18
|
void um_define_net_constants(VALUE mod) {
|
19
|
+
DEF_CONST_INT(mod, O_APPEND);
|
20
|
+
DEF_CONST_INT(mod, O_CLOEXEC);
|
21
|
+
DEF_CONST_INT(mod, O_CREAT);
|
22
|
+
DEF_CONST_INT(mod, O_DIRECT);
|
23
|
+
DEF_CONST_INT(mod, O_DIRECTORY);
|
24
|
+
DEF_CONST_INT(mod, O_DSYNC);
|
25
|
+
DEF_CONST_INT(mod, O_EXCL);
|
26
|
+
DEF_CONST_INT(mod, O_NOCTTY);
|
27
|
+
DEF_CONST_INT(mod, O_NOFOLLOW);
|
28
|
+
DEF_CONST_INT(mod, O_PATH);
|
29
|
+
DEF_CONST_INT(mod, O_RDONLY);
|
30
|
+
DEF_CONST_INT(mod, O_RDWR);
|
31
|
+
DEF_CONST_INT(mod, O_SYNC);
|
32
|
+
DEF_CONST_INT(mod, O_TMPFILE);
|
33
|
+
DEF_CONST_INT(mod, O_TRUNC);
|
34
|
+
DEF_CONST_INT(mod, O_WRONLY);
|
35
|
+
|
36
|
+
DEF_CONST_INT(mod, WNOHANG);
|
37
|
+
DEF_CONST_INT(mod, WUNTRACED);
|
38
|
+
DEF_CONST_INT(mod, WCONTINUED);
|
39
|
+
DEF_CONST_INT(mod, WEXITED);
|
40
|
+
DEF_CONST_INT(mod, WSTOPPED);
|
41
|
+
DEF_CONST_INT(mod, WCONTINUED);
|
42
|
+
DEF_CONST_INT(mod, WNOWAIT);
|
43
|
+
|
15
44
|
DEF_CONST_INT(mod, SOCK_STREAM);
|
16
45
|
DEF_CONST_INT(mod, SOCK_DGRAM);
|
17
46
|
DEF_CONST_INT(mod, SOCK_RAW);
|
@@ -181,4 +210,4 @@ void um_define_net_constants(VALUE mod) {
|
|
181
210
|
DEF_CONST_INT(mod, IF_NAMESIZE);
|
182
211
|
|
183
212
|
DEF_CONST_INT(mod, SOMAXCONN);
|
184
|
-
}
|
213
|
+
}
|
data/ext/um/um_ext.c
CHANGED
@@ -0,0 +1,47 @@
|
|
1
|
+
#include "um.h"
|
2
|
+
#include <stdlib.h>
|
3
|
+
|
4
|
+
VALUE cMutex;
|
5
|
+
|
6
|
+
static void Mutex_mark(void *ptr) {
|
7
|
+
struct um_mutex *mutex = ptr;
|
8
|
+
rb_gc_mark_movable(mutex->self);
|
9
|
+
}
|
10
|
+
|
11
|
+
static void Mutex_compact(void *ptr) {
|
12
|
+
struct um_mutex *mutex = ptr;
|
13
|
+
mutex->self = rb_gc_location(mutex->self);
|
14
|
+
}
|
15
|
+
|
16
|
+
static size_t Mutex_size(const void *ptr) {
|
17
|
+
return sizeof(struct um_mutex);
|
18
|
+
}
|
19
|
+
|
20
|
+
static const rb_data_type_t Mutex_type = {
|
21
|
+
"UringMachineMutex",
|
22
|
+
{Mutex_mark, free, Mutex_size, Mutex_compact},
|
23
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
24
|
+
};
|
25
|
+
|
26
|
+
static VALUE Mutex_allocate(VALUE klass) {
|
27
|
+
struct um_mutex *mutex = malloc(sizeof(struct um_mutex));
|
28
|
+
return TypedData_Wrap_Struct(klass, &Mutex_type, mutex);
|
29
|
+
}
|
30
|
+
|
31
|
+
inline struct um_mutex *Mutex_data(VALUE self) {
|
32
|
+
return RTYPEDDATA_DATA(self);
|
33
|
+
}
|
34
|
+
|
35
|
+
VALUE Mutex_initialize(VALUE self) {
|
36
|
+
struct um_mutex *mutex = Mutex_data(self);
|
37
|
+
mutex->self = self;
|
38
|
+
um_mutex_init(mutex);
|
39
|
+
return self;
|
40
|
+
}
|
41
|
+
|
42
|
+
void Init_Mutex(void) {
|
43
|
+
cMutex = rb_define_class_under(cUM, "Mutex", rb_cObject);
|
44
|
+
rb_define_alloc_func(cMutex, Mutex_allocate);
|
45
|
+
|
46
|
+
rb_define_method(cMutex, "initialize", Mutex_initialize, 0);
|
47
|
+
}
|