uringmachine 0.1 → 0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/Rakefile +1 -1
- data/ext/um/um.c +228 -38
- data/ext/um/um.h +36 -8
- data/ext/um/um_class.c +98 -31
- data/ext/um/um_ext.c +0 -6
- data/ext/um/um_op.c +70 -6
- data/ext/um/um_utils.c +51 -1
- data/lib/uringmachine/version.rb +3 -1
- data/lib/uringmachine.rb +0 -3
- data/test/helper.rb +5 -12
- data/test/test_um.rb +259 -13
- data/uringmachine.gemspec +1 -1
- metadata +2 -6
- data/ext/um/iou.h +0 -101
- data/ext/um/op_ctx.c +0 -138
- data/ext/um/ring.c +0 -755
- data/test/test_iou.rb +0 -876
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7129b3c8605d5734f7152bddb4fa1b6f034fd1de26262eabfbfc2190846967bd
|
4
|
+
data.tar.gz: 902e6663bac65d56e45d67383e2cd3eb0601534ed2b451f4f76b2812ce6125b1
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 683db63642ddb5d98c9eb137f8121a8cb8cb0fe44456928fca78cf8e322cebc01078efa71379abcceae1a1889140bf5b39f6ac16348db0b77d9c8a5bb9cf5cd0
|
7
|
+
data.tar.gz: 850dce780030b6102371861805f831299d3ea113f8c02141515afe167b0e49f43a51812a6159a91b6335085b41fcb226713eeb963ae69d208818cb1f3cb303b3
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,15 @@
|
|
1
|
+
# 2024-10-04 Version 0.3
|
2
|
+
|
3
|
+
- Fix race condition affecting `#timeout` and `#sleep`.
|
4
|
+
- Add `#accept_each`
|
5
|
+
- Add `#accept`
|
6
|
+
|
7
|
+
# 2024-10-03 Version 0.2
|
8
|
+
|
9
|
+
- Remove old IOU code.
|
10
|
+
- Add `#read_each`
|
11
|
+
- Add `#read`
|
12
|
+
|
1
13
|
# 2024-10-03 Version 0.1
|
2
14
|
|
3
15
|
The basic fiber scheduling stuff
|
data/Rakefile
CHANGED
@@ -26,7 +26,7 @@ CLEAN.include "**/*.o", "**/*.so", "**/*.so.*", "**/*.a", "**/*.bundle", "**/*.j
|
|
26
26
|
|
27
27
|
task :release do
|
28
28
|
require_relative './lib/uringmachine/version'
|
29
|
-
version =
|
29
|
+
version = UringMachine::VERSION
|
30
30
|
|
31
31
|
puts 'Building uringmachine...'
|
32
32
|
`gem build uringmachine.gemspec`
|
data/ext/um/um.c
CHANGED
@@ -2,6 +2,54 @@
|
|
2
2
|
#include "ruby/thread.h"
|
3
3
|
#include <sys/mman.h>
|
4
4
|
|
5
|
+
void um_setup(struct um *machine) {
|
6
|
+
machine->ring_initialized = 0;
|
7
|
+
machine->unsubmitted_count = 0;
|
8
|
+
machine->buffer_ring_count = 0;
|
9
|
+
machine->pending_count = 0;
|
10
|
+
machine->runqueue_head = NULL;
|
11
|
+
machine->runqueue_tail = NULL;
|
12
|
+
machine->op_freelist = NULL;
|
13
|
+
machine->result_freelist = NULL;
|
14
|
+
|
15
|
+
unsigned prepared_limit = 4096;
|
16
|
+
int flags = 0;
|
17
|
+
#ifdef HAVE_IORING_SETUP_SUBMIT_ALL
|
18
|
+
flags |= IORING_SETUP_SUBMIT_ALL;
|
19
|
+
#endif
|
20
|
+
#ifdef HAVE_IORING_SETUP_COOP_TASKRUN
|
21
|
+
flags |= IORING_SETUP_COOP_TASKRUN;
|
22
|
+
#endif
|
23
|
+
|
24
|
+
while (1) {
|
25
|
+
int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
|
26
|
+
if (likely(!ret)) break;
|
27
|
+
|
28
|
+
// if ENOMEM is returned, try with half as much entries
|
29
|
+
if (unlikely(ret == -ENOMEM && prepared_limit > 64))
|
30
|
+
prepared_limit = prepared_limit / 2;
|
31
|
+
else
|
32
|
+
rb_syserr_fail(-ret, strerror(-ret));
|
33
|
+
}
|
34
|
+
machine->ring_initialized = 1;
|
35
|
+
}
|
36
|
+
|
37
|
+
inline void um_teardown(struct um *machine) {
|
38
|
+
if (!machine->ring_initialized) return;
|
39
|
+
|
40
|
+
for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
|
41
|
+
struct buf_ring_descriptor *desc = machine->buffer_rings + i;
|
42
|
+
io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
|
43
|
+
free(desc->buf_base);
|
44
|
+
}
|
45
|
+
machine->buffer_ring_count = 0;
|
46
|
+
io_uring_queue_exit(&machine->ring);
|
47
|
+
machine->ring_initialized = 0;
|
48
|
+
|
49
|
+
um_free_op_linked_list(machine, machine->op_freelist);
|
50
|
+
um_free_op_linked_list(machine, machine->runqueue_head);
|
51
|
+
}
|
52
|
+
|
5
53
|
static inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
|
6
54
|
struct io_uring_sqe *sqe;
|
7
55
|
sqe = io_uring_get_sqe(&machine->ring);
|
@@ -24,22 +72,6 @@ done:
|
|
24
72
|
return sqe;
|
25
73
|
}
|
26
74
|
|
27
|
-
inline void um_cleanup(struct um *machine) {
|
28
|
-
if (!machine->ring_initialized) return;
|
29
|
-
|
30
|
-
for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
|
31
|
-
struct buf_ring_descriptor *desc = machine->buffer_rings + i;
|
32
|
-
io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
|
33
|
-
free(desc->buf_base);
|
34
|
-
}
|
35
|
-
machine->buffer_ring_count = 0;
|
36
|
-
io_uring_queue_exit(&machine->ring);
|
37
|
-
machine->ring_initialized = 0;
|
38
|
-
|
39
|
-
um_free_linked_list(machine->runqueue_head);
|
40
|
-
um_free_linked_list(machine->freelist_head);
|
41
|
-
}
|
42
|
-
|
43
75
|
struct wait_for_cqe_ctx {
|
44
76
|
struct um *machine;
|
45
77
|
struct io_uring_cqe *cqe;
|
@@ -57,26 +89,47 @@ void *um_wait_for_cqe_without_gvl(void *ptr) {
|
|
57
89
|
return NULL;
|
58
90
|
}
|
59
91
|
|
92
|
+
inline void um_handle_submitted_op_cqe_single(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
93
|
+
op->cqe_result = cqe->res;
|
94
|
+
op->cqe_flags = cqe->flags;
|
95
|
+
op->state = OP_completed;
|
96
|
+
um_runqueue_push(machine, op);
|
97
|
+
}
|
98
|
+
|
99
|
+
inline void um_handle_submitted_op_cqe_multi(struct um *machine, struct um_op *op, struct io_uring_cqe *cqe) {
|
100
|
+
if (!op->results_head) {
|
101
|
+
struct um_op *op2 = um_op_checkout(machine);
|
102
|
+
op2->state = OP_schedule;
|
103
|
+
op2->fiber = op->fiber;
|
104
|
+
op2->resume_value = Qnil;
|
105
|
+
um_runqueue_push(machine, op2);
|
106
|
+
}
|
107
|
+
um_op_result_push(machine, op, cqe->res, cqe->flags);
|
108
|
+
|
109
|
+
if (!(cqe->flags & IORING_CQE_F_MORE))
|
110
|
+
op->state = OP_completed;
|
111
|
+
}
|
112
|
+
|
60
113
|
inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
|
61
114
|
struct um_op *op = (struct um_op *)cqe->user_data;
|
62
|
-
if (!op) return;
|
115
|
+
if (unlikely(!op)) return;
|
63
116
|
|
64
117
|
switch (op->state) {
|
65
118
|
case OP_submitted:
|
66
|
-
if (cqe->res == -ECANCELED) {
|
119
|
+
if (unlikely(cqe->res == -ECANCELED)) {
|
67
120
|
um_op_checkin(machine, op);
|
121
|
+
break;
|
68
122
|
}
|
69
|
-
|
70
|
-
op
|
71
|
-
|
72
|
-
op
|
73
|
-
um_runqueue_push(machine, op);
|
74
|
-
}
|
123
|
+
if (!op->is_multishot)
|
124
|
+
um_handle_submitted_op_cqe_single(machine, op, cqe);
|
125
|
+
else
|
126
|
+
um_handle_submitted_op_cqe_multi(machine, op, cqe);
|
75
127
|
break;
|
76
128
|
case OP_abandonned:
|
77
129
|
// op has been abandonned by the I/O method, so we need to cleanup (check
|
78
130
|
// the op in to the free list).
|
79
131
|
um_op_checkin(machine, op);
|
132
|
+
break;
|
80
133
|
default:
|
81
134
|
// TODO: invalid state, should raise!
|
82
135
|
}
|
@@ -173,19 +226,26 @@ static inline void um_cancel_op(struct um *machine, struct um_op *op) {
|
|
173
226
|
io_uring_prep_cancel64(sqe, (long long)op, 0);
|
174
227
|
}
|
175
228
|
|
176
|
-
static inline VALUE um_await_op(struct um *machine, struct um_op *op) {
|
229
|
+
static inline VALUE um_await_op(struct um *machine, struct um_op *op, int *result, int *flags) {
|
177
230
|
op->fiber = rb_fiber_current();
|
178
231
|
VALUE v = um_fiber_switch(machine);
|
179
|
-
|
180
232
|
int is_exception = um_value_is_exception_p(v);
|
181
233
|
|
182
|
-
if (is_exception && op->state == OP_submitted) {
|
234
|
+
if (unlikely(is_exception && op->state == OP_submitted)) {
|
183
235
|
um_cancel_op(machine, op);
|
184
236
|
op->state = OP_abandonned;
|
185
237
|
}
|
186
|
-
else
|
187
|
-
|
188
|
-
|
238
|
+
else {
|
239
|
+
// We copy over the CQE result and flags, since the op is immediately
|
240
|
+
// checked in.
|
241
|
+
if (result) *result = op->cqe_result;
|
242
|
+
if (flags) *flags = op->cqe_flags;
|
243
|
+
if (!op->is_multishot)
|
244
|
+
um_op_checkin(machine, op);
|
245
|
+
}
|
246
|
+
|
247
|
+
if (unlikely(is_exception)) um_raise_exception(v);
|
248
|
+
return v;
|
189
249
|
}
|
190
250
|
|
191
251
|
inline VALUE um_await(struct um *machine) {
|
@@ -216,13 +276,14 @@ inline void um_interrupt(struct um *machine, VALUE fiber, VALUE value) {
|
|
216
276
|
}
|
217
277
|
}
|
218
278
|
|
219
|
-
struct
|
279
|
+
struct op_ensure_ctx {
|
220
280
|
struct um *machine;
|
221
281
|
struct um_op *op;
|
282
|
+
int bgid;
|
222
283
|
};
|
223
284
|
|
224
285
|
VALUE um_timeout_ensure(VALUE arg) {
|
225
|
-
struct
|
286
|
+
struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
|
226
287
|
|
227
288
|
if (ctx->op->state == OP_submitted) {
|
228
289
|
// A CQE has not yet been received, we cancel the timeout and abandon the op
|
@@ -242,26 +303,155 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
|
|
242
303
|
if (!ID_new) ID_new = rb_intern("new");
|
243
304
|
|
244
305
|
struct um_op *op = um_op_checkout(machine);
|
245
|
-
|
306
|
+
op->ts = um_double_to_timespec(NUM2DBL(interval));
|
246
307
|
|
247
308
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
248
|
-
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
309
|
+
io_uring_prep_timeout(sqe, &op->ts, 0, 0);
|
249
310
|
op->state = OP_submitted;
|
250
311
|
op->fiber = rb_fiber_current();
|
251
312
|
op->resume_value = rb_funcall(class, ID_new, 0);
|
252
313
|
|
253
|
-
struct
|
314
|
+
struct op_ensure_ctx ctx = { .machine = machine, .op = op };
|
254
315
|
return rb_ensure(rb_yield, Qnil, um_timeout_ensure, (VALUE)&ctx);
|
255
316
|
}
|
256
317
|
|
257
318
|
inline VALUE um_sleep(struct um *machine, double duration) {
|
258
319
|
struct um_op *op = um_op_checkout(machine);
|
259
|
-
|
320
|
+
op->ts = um_double_to_timespec(duration);
|
321
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
322
|
+
int result = 0;
|
323
|
+
|
324
|
+
io_uring_prep_timeout(sqe, &op->ts, 0, 0);
|
325
|
+
op->state = OP_submitted;
|
326
|
+
|
327
|
+
return um_await_op(machine, op, &result, NULL);
|
328
|
+
}
|
329
|
+
|
330
|
+
inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
|
331
|
+
struct um_op *op = um_op_checkout(machine);
|
332
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
333
|
+
int result = 0;
|
334
|
+
int flags = 0;
|
335
|
+
|
336
|
+
void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
|
337
|
+
io_uring_prep_read(sqe, fd, ptr, maxlen, -1);
|
338
|
+
op->state = OP_submitted;
|
260
339
|
|
340
|
+
um_await_op(machine, op, &result, &flags);
|
341
|
+
|
342
|
+
um_raise_on_system_error(result);
|
343
|
+
um_update_read_buffer(machine, buffer, buffer_offset, result, flags);
|
344
|
+
return INT2FIX(result);
|
345
|
+
}
|
346
|
+
|
347
|
+
VALUE um_multishot_ensure(VALUE arg) {
|
348
|
+
struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
|
349
|
+
switch (ctx->op->state) {
|
350
|
+
case OP_submitted:
|
351
|
+
um_cancel_op(ctx->machine, ctx->op);
|
352
|
+
break;
|
353
|
+
case OP_completed:
|
354
|
+
um_op_checkin(ctx->machine, ctx->op);
|
355
|
+
break;
|
356
|
+
default:
|
357
|
+
}
|
358
|
+
return Qnil;
|
359
|
+
}
|
360
|
+
|
361
|
+
VALUE um_read_each_safe_loop(VALUE arg) {
|
362
|
+
struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
|
363
|
+
int result = 0;
|
364
|
+
int flags = 0;
|
365
|
+
int total = 0;
|
366
|
+
|
367
|
+
while (1) {
|
368
|
+
um_await_op(ctx->machine, ctx->op, NULL, NULL);
|
369
|
+
if (!ctx->op->results_head) {
|
370
|
+
// TODO: raise, this shouldn't happen
|
371
|
+
printf("no result found!\n");
|
372
|
+
}
|
373
|
+
while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
|
374
|
+
if (likely(result > 0)) {
|
375
|
+
total += result;
|
376
|
+
VALUE buf = get_string_from_buffer_ring(ctx->machine, ctx->bgid, result, flags);
|
377
|
+
rb_yield(buf);
|
378
|
+
}
|
379
|
+
else
|
380
|
+
return INT2FIX(total);
|
381
|
+
}
|
382
|
+
}
|
383
|
+
}
|
384
|
+
|
385
|
+
VALUE um_read_each(struct um *machine, int fd, int bgid) {
|
386
|
+
struct um_op *op = um_op_checkout(machine);
|
261
387
|
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
262
|
-
|
388
|
+
|
389
|
+
op->is_multishot = 1;
|
390
|
+
io_uring_prep_read_multishot(sqe, fd, 0, -1, bgid);
|
263
391
|
op->state = OP_submitted;
|
264
392
|
|
265
|
-
|
393
|
+
struct op_ensure_ctx ctx = { .machine = machine, .op = op, .bgid = bgid };
|
394
|
+
return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
|
266
395
|
}
|
267
396
|
|
397
|
+
VALUE um_write(struct um *machine, int fd, VALUE buffer, int len) {
|
398
|
+
struct um_op *op = um_op_checkout(machine);
|
399
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
400
|
+
int result = 0;
|
401
|
+
int flags = 0;
|
402
|
+
|
403
|
+
io_uring_prep_write(sqe, fd, RSTRING_PTR(buffer), len, -1);
|
404
|
+
op->state = OP_submitted;
|
405
|
+
|
406
|
+
um_await_op(machine, op, &result, &flags);
|
407
|
+
um_raise_on_system_error(result);
|
408
|
+
return INT2FIX(result);
|
409
|
+
}
|
410
|
+
|
411
|
+
VALUE um_accept(struct um *machine, int fd) {
|
412
|
+
struct um_op *op = um_op_checkout(machine);
|
413
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
414
|
+
struct sockaddr addr;
|
415
|
+
socklen_t len;
|
416
|
+
int result = 0;
|
417
|
+
int flags = 0;
|
418
|
+
io_uring_prep_accept(sqe, fd, &addr, &len, 0);
|
419
|
+
op->state = OP_submitted;
|
420
|
+
|
421
|
+
um_await_op(machine, op, &result, &flags);
|
422
|
+
um_raise_on_system_error(result);
|
423
|
+
return INT2FIX(result);
|
424
|
+
}
|
425
|
+
|
426
|
+
VALUE um_accept_each_safe_loop(VALUE arg) {
|
427
|
+
struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
|
428
|
+
int result = 0;
|
429
|
+
int flags = 0;
|
430
|
+
|
431
|
+
while (1) {
|
432
|
+
um_await_op(ctx->machine, ctx->op, NULL, NULL);
|
433
|
+
if (!ctx->op->results_head) {
|
434
|
+
// TODO: raise, this shouldn't happen
|
435
|
+
printf("no result found!\n");
|
436
|
+
}
|
437
|
+
while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
|
438
|
+
if (likely(result > 0))
|
439
|
+
rb_yield(INT2FIX(result));
|
440
|
+
else
|
441
|
+
return Qnil;
|
442
|
+
}
|
443
|
+
}
|
444
|
+
}
|
445
|
+
|
446
|
+
VALUE um_accept_each(struct um *machine, int fd) {
|
447
|
+
struct um_op *op = um_op_checkout(machine);
|
448
|
+
struct io_uring_sqe *sqe = um_get_sqe(machine, op);
|
449
|
+
struct sockaddr addr;
|
450
|
+
socklen_t len;
|
451
|
+
io_uring_prep_multishot_accept(sqe, fd, &addr, &len, 0);
|
452
|
+
op->state = OP_submitted;
|
453
|
+
op->is_multishot = 1;
|
454
|
+
|
455
|
+
struct op_ensure_ctx ctx = { .machine = machine, .op = op };
|
456
|
+
return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
|
457
|
+
}
|
data/ext/um/um.h
CHANGED
@@ -29,13 +29,27 @@ enum op_state {
|
|
29
29
|
OP_schedule, // the corresponding fiber is scheduled
|
30
30
|
};
|
31
31
|
|
32
|
+
struct um_result_entry {
|
33
|
+
struct um_result_entry *next;
|
34
|
+
|
35
|
+
int result;
|
36
|
+
int flags;
|
37
|
+
};
|
38
|
+
|
32
39
|
struct um_op {
|
33
40
|
enum op_state state;
|
34
41
|
struct um_op *prev;
|
35
42
|
struct um_op *next;
|
43
|
+
|
44
|
+
// linked list for multishot results
|
45
|
+
struct um_result_entry *results_head;
|
46
|
+
struct um_result_entry *results_tail;
|
36
47
|
|
37
48
|
VALUE fiber;
|
38
49
|
VALUE resume_value;
|
50
|
+
int is_multishot;
|
51
|
+
struct __kernel_timespec ts;
|
52
|
+
|
39
53
|
int cqe_result;
|
40
54
|
int cqe_flags;
|
41
55
|
};
|
@@ -51,7 +65,9 @@ struct buf_ring_descriptor {
|
|
51
65
|
#define BUFFER_RING_MAX_COUNT 10
|
52
66
|
|
53
67
|
struct um {
|
54
|
-
struct um_op *
|
68
|
+
struct um_op *op_freelist;
|
69
|
+
struct um_result_entry *result_freelist;
|
70
|
+
|
55
71
|
struct um_op *runqueue_head;
|
56
72
|
struct um_op *runqueue_tail;
|
57
73
|
|
@@ -67,31 +83,43 @@ struct um {
|
|
67
83
|
|
68
84
|
extern VALUE cUM;
|
69
85
|
|
86
|
+
void um_setup(struct um *machine);
|
87
|
+
void um_teardown(struct um *machine);
|
88
|
+
void um_free_op_linked_list(struct um *machine, struct um_op *op);
|
89
|
+
void um_free_result_linked_list(struct um *machine, struct um_result_entry *entry);
|
90
|
+
|
70
91
|
struct __kernel_timespec um_double_to_timespec(double value);
|
92
|
+
int um_value_is_exception_p(VALUE v);
|
93
|
+
VALUE um_raise_exception(VALUE v);
|
94
|
+
void um_raise_on_system_error(int result);
|
71
95
|
|
72
|
-
void
|
96
|
+
void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs);
|
97
|
+
void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, int result, int flags);
|
98
|
+
VALUE get_string_from_buffer_ring(struct um *machine, int bgid, int result, int flags);
|
73
99
|
|
74
|
-
void um_free_linked_list(struct um_op *op);
|
75
100
|
VALUE um_fiber_switch(struct um *machine);
|
76
101
|
VALUE um_await(struct um *machine);
|
77
102
|
|
78
103
|
void um_op_checkin(struct um *machine, struct um_op *op);
|
79
104
|
struct um_op* um_op_checkout(struct um *machine);
|
80
|
-
|
81
|
-
|
105
|
+
void um_op_result_push(struct um *machine, struct um_op *op, int result, int flags);
|
106
|
+
int um_op_result_shift(struct um *machine, struct um_op *op, int *result, int *flags);
|
82
107
|
|
83
108
|
struct um_op *um_runqueue_find_by_fiber(struct um *machine, VALUE fiber);
|
84
109
|
void um_runqueue_push(struct um *machine, struct um_op *op);
|
85
110
|
struct um_op *um_runqueue_shift(struct um *machine);
|
86
111
|
void um_runqueue_unshift(struct um *machine, struct um_op *op);
|
87
112
|
|
88
|
-
int um_value_is_exception_p(VALUE v);
|
89
|
-
|
90
|
-
|
91
113
|
void um_schedule(struct um *machine, VALUE fiber, VALUE value);
|
92
114
|
void um_interrupt(struct um *machine, VALUE fiber, VALUE value);
|
93
115
|
VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
|
94
116
|
|
95
117
|
VALUE um_sleep(struct um *machine, double duration);
|
118
|
+
VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
|
119
|
+
VALUE um_read_each(struct um *machine, int fd, int bgid);
|
120
|
+
VALUE um_write(struct um *machine, int fd, VALUE buffer, int len);
|
121
|
+
|
122
|
+
VALUE um_accept(struct um *machine, int fd);
|
123
|
+
VALUE um_accept_each(struct um *machine, int fd);
|
96
124
|
|
97
125
|
#endif // UM_H
|
data/ext/um/um_class.c
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
#include "um.h"
|
2
|
+
#include <sys/mman.h>
|
2
3
|
|
3
4
|
VALUE cUM;
|
4
5
|
|
@@ -13,7 +14,7 @@ static void UM_compact(void *ptr) {
|
|
13
14
|
}
|
14
15
|
|
15
16
|
static void UM_free(void *ptr) {
|
16
|
-
|
17
|
+
um_teardown((struct um *)ptr);
|
17
18
|
free(ptr);
|
18
19
|
}
|
19
20
|
|
@@ -42,37 +43,58 @@ inline struct um *get_machine(VALUE self) {
|
|
42
43
|
|
43
44
|
VALUE UM_initialize(VALUE self) {
|
44
45
|
struct um *machine = RTYPEDDATA_DATA(self);
|
46
|
+
um_setup(machine);
|
47
|
+
return self;
|
48
|
+
}
|
49
|
+
|
50
|
+
VALUE UM_setup_buffer_ring(VALUE self, VALUE size, VALUE count) {
|
51
|
+
struct um *machine = get_machine(self);
|
45
52
|
|
46
|
-
machine->
|
47
|
-
|
48
|
-
|
49
|
-
machine->
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
53
|
+
if (machine->buffer_ring_count == BUFFER_RING_MAX_COUNT)
|
54
|
+
rb_raise(rb_eRuntimeError, "Cannot setup more than BUFFER_RING_MAX_COUNT buffer rings");
|
55
|
+
|
56
|
+
struct buf_ring_descriptor *desc = machine->buffer_rings + machine->buffer_ring_count;
|
57
|
+
desc->buf_count = NUM2UINT(count);
|
58
|
+
desc->buf_size = NUM2UINT(size);
|
59
|
+
|
60
|
+
desc->br_size = sizeof(struct io_uring_buf) * desc->buf_count;
|
61
|
+
void *mapped = mmap(
|
62
|
+
NULL, desc->br_size, PROT_READ | PROT_WRITE,
|
63
|
+
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0
|
64
|
+
);
|
65
|
+
if (mapped == MAP_FAILED)
|
66
|
+
rb_raise(rb_eRuntimeError, "Failed to allocate buffer ring");
|
67
|
+
|
68
|
+
desc->br = (struct io_uring_buf_ring *)mapped;
|
69
|
+
io_uring_buf_ring_init(desc->br);
|
70
|
+
|
71
|
+
unsigned bg_id = machine->buffer_ring_count;
|
72
|
+
struct io_uring_buf_reg reg = {
|
73
|
+
.ring_addr = (unsigned long)desc->br,
|
74
|
+
.ring_entries = desc->buf_count,
|
75
|
+
.bgid = bg_id
|
76
|
+
};
|
77
|
+
int ret = io_uring_register_buf_ring(&machine->ring, ®, 0);
|
78
|
+
if (ret) {
|
79
|
+
munmap(desc->br, desc->br_size);
|
80
|
+
rb_syserr_fail(-ret, strerror(-ret));
|
81
|
+
}
|
82
|
+
|
83
|
+
desc->buf_base = malloc(desc->buf_count * desc->buf_size);
|
84
|
+
if (!desc->buf_base) {
|
85
|
+
io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, bg_id);
|
86
|
+
rb_raise(rb_eRuntimeError, "Failed to allocate buffers");
|
72
87
|
}
|
73
|
-
machine->ring_initialized = 1;
|
74
88
|
|
75
|
-
|
89
|
+
int mask = io_uring_buf_ring_mask(desc->buf_count);
|
90
|
+
for (unsigned i = 0; i < desc->buf_count; i++) {
|
91
|
+
io_uring_buf_ring_add(
|
92
|
+
desc->br, desc->buf_base + i * desc->buf_size, desc->buf_size,
|
93
|
+
i, mask, i);
|
94
|
+
}
|
95
|
+
io_uring_buf_ring_advance(desc->br, desc->buf_count);
|
96
|
+
machine->buffer_ring_count++;
|
97
|
+
return UINT2NUM(bg_id);
|
76
98
|
}
|
77
99
|
|
78
100
|
VALUE UM_pending_count(VALUE self) {
|
@@ -114,6 +136,46 @@ VALUE UM_sleep(VALUE self, VALUE duration) {
|
|
114
136
|
return duration;
|
115
137
|
}
|
116
138
|
|
139
|
+
VALUE UM_read(int argc, VALUE *argv, VALUE self) {
|
140
|
+
struct um *machine = get_machine(self);
|
141
|
+
VALUE fd;
|
142
|
+
VALUE buffer;
|
143
|
+
VALUE maxlen;
|
144
|
+
VALUE buffer_offset;
|
145
|
+
rb_scan_args(argc, argv, "31", &fd, &buffer, &maxlen, &buffer_offset);
|
146
|
+
|
147
|
+
return um_read(
|
148
|
+
machine, NUM2INT(fd), buffer, NUM2INT(maxlen),
|
149
|
+
NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset)
|
150
|
+
);
|
151
|
+
}
|
152
|
+
|
153
|
+
VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
|
154
|
+
struct um *machine = get_machine(self);
|
155
|
+
return um_read_each(machine, NUM2INT(fd), NUM2INT(bgid));
|
156
|
+
}
|
157
|
+
|
158
|
+
VALUE UM_write(int argc, VALUE *argv, VALUE self) {
|
159
|
+
struct um *machine = get_machine(self);
|
160
|
+
VALUE fd;
|
161
|
+
VALUE buffer;
|
162
|
+
VALUE len;
|
163
|
+
rb_scan_args(argc, argv, "21", &fd, &buffer, &len);
|
164
|
+
|
165
|
+
int bytes = NIL_P(len) ? RSTRING_LEN(buffer) : NUM2INT(len);
|
166
|
+
return um_write(machine, NUM2INT(fd), buffer, bytes);
|
167
|
+
}
|
168
|
+
|
169
|
+
VALUE UM_accept(VALUE self, VALUE fd) {
|
170
|
+
struct um *machine = get_machine(self);
|
171
|
+
return um_accept(machine, NUM2INT(fd));
|
172
|
+
}
|
173
|
+
|
174
|
+
VALUE UM_accept_each(VALUE self, VALUE fd) {
|
175
|
+
struct um *machine = get_machine(self);
|
176
|
+
return um_accept_each(machine, NUM2INT(fd));
|
177
|
+
}
|
178
|
+
|
117
179
|
void Init_UM(void) {
|
118
180
|
rb_ext_ractor_safe(true);
|
119
181
|
|
@@ -121,8 +183,7 @@ void Init_UM(void) {
|
|
121
183
|
rb_define_alloc_func(cUM, UM_allocate);
|
122
184
|
|
123
185
|
rb_define_method(cUM, "initialize", UM_initialize, 0);
|
124
|
-
|
125
|
-
|
186
|
+
rb_define_method(cUM, "setup_buffer_ring", UM_setup_buffer_ring, 2);
|
126
187
|
rb_define_method(cUM, "pending_count", UM_pending_count, 0);
|
127
188
|
|
128
189
|
rb_define_method(cUM, "snooze", UM_snooze, 0);
|
@@ -132,6 +193,12 @@ void Init_UM(void) {
|
|
132
193
|
rb_define_method(cUM, "timeout", UM_timeout, 2);
|
133
194
|
|
134
195
|
rb_define_method(cUM, "sleep", UM_sleep, 1);
|
196
|
+
rb_define_method(cUM, "read", UM_read, -1);
|
197
|
+
rb_define_method(cUM, "read_each", UM_read_each, 2);
|
198
|
+
rb_define_method(cUM, "write", UM_write, -1);
|
199
|
+
|
200
|
+
rb_define_method(cUM, "accept", UM_accept, 1);
|
201
|
+
rb_define_method(cUM, "accept_each", UM_accept_each, 1);
|
135
202
|
|
136
203
|
// rb_define_method(cUM, "emit", UM_emit, 1);
|
137
204
|
|