iou 0.1 → 0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +35 -0
- data/CHANGELOG.md +10 -0
- data/README.md +4 -4
- data/TODO.md +27 -1
- data/examples/event_loop.rb +1 -1
- data/examples/fibers.rb +105 -0
- data/examples/http_server_multishot.rb +1 -3
- data/examples/http_server_simpler.rb +34 -0
- data/ext/iou/iou.h +41 -6
- data/ext/iou/iou_ext.c +2 -2
- data/ext/iou/op_ctx.c +125 -0
- data/ext/iou/{iou.c → ring.c} +87 -69
- data/lib/iou/version.rb +1 -1
- data/test/test_iou.rb +68 -32
- metadata +8 -4
- data/ext/iou/op_spec_data.c +0 -61
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f240c18e2449f95a1a41d0af6ddea012c5ade81125c50149930f084499d8721f
|
4
|
+
data.tar.gz: 49c12b7fa7876af3666d93a891d6edac4f7b50ca034c683bc667de3b4bd6410f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 38194dea0e61f9a2801d74255e5a1db6cdae3739c45667a6dada382a9362698c4eaf46120e04ec2fdb51f6dea995fc30493c3957e3027dc2c3d767d2663b3f36
|
7
|
+
data.tar.gz: e4cfcb063713986a788a344f13492eacc76ae5f0db5111367fe4ef91bc8effdb333e850f3f043fb222fc6516e52533de10ed6bc17c99b20221f57f32134974b2
|
@@ -0,0 +1,35 @@
|
|
1
|
+
name: Tests
|
2
|
+
|
3
|
+
on: [push, pull_request]
|
4
|
+
|
5
|
+
concurrency:
|
6
|
+
group: tests-${{ format('{0}-{1}', github.head_ref || github.run_number, github.job) }}
|
7
|
+
cancel-in-progress: true
|
8
|
+
|
9
|
+
jobs:
|
10
|
+
build:
|
11
|
+
strategy:
|
12
|
+
fail-fast: false
|
13
|
+
matrix:
|
14
|
+
# macos-latest uses arm64, macos-13 uses x86
|
15
|
+
os: [ubuntu-latest]
|
16
|
+
ruby: ['3.3', 'head']
|
17
|
+
|
18
|
+
name: ${{matrix.os}}, ${{matrix.ruby}}
|
19
|
+
|
20
|
+
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
21
|
+
|
22
|
+
runs-on: ${{matrix.os}}
|
23
|
+
steps:
|
24
|
+
- uses: actions/checkout@v4
|
25
|
+
with:
|
26
|
+
submodules: recursive
|
27
|
+
|
28
|
+
- uses: ruby/setup-ruby@v1
|
29
|
+
with:
|
30
|
+
ruby-version: ${{matrix.ruby}}
|
31
|
+
bundler-cache: true # 'bundle install' and cache
|
32
|
+
- name: Compile C-extension
|
33
|
+
run: bundle exec rake compile
|
34
|
+
- name: Run tests
|
35
|
+
run: bundle exec rake test
|
data/CHANGELOG.md
ADDED
@@ -0,0 +1,10 @@
|
|
1
|
+
# 2024-09-09 Version 0.2
|
2
|
+
|
3
|
+
- Add UTF8 encoding option for multishot read.
|
4
|
+
- Implement `OpCtx` as wrapper for op specs. This removes the need to call
|
5
|
+
`rb_hash_aref` upon completion (except for pulling the ctx from the
|
6
|
+
`pending_ops` hash), leading to a significant performance improvement.
|
7
|
+
|
8
|
+
# 2024-09-08 Version 0.1
|
9
|
+
|
10
|
+
- First working version.
|
data/README.md
CHANGED
@@ -100,7 +100,7 @@ ring.wait_for_completion
|
|
100
100
|
|
101
101
|
Examples for using IOU can be found in the examples directory:
|
102
102
|
|
103
|
-
-
|
104
|
-
-
|
105
|
-
-
|
106
|
-
-
|
103
|
+
- Echo server
|
104
|
+
- HTTP server
|
105
|
+
- Event loop (in the style of EventMachine)
|
106
|
+
- Fiber-based concurrency
|
data/TODO.md
CHANGED
@@ -1,4 +1,30 @@
|
|
1
|
-
|
1
|
+
## io_uring ops
|
2
2
|
|
3
3
|
- [ ] recv
|
4
4
|
- [ ] send
|
5
|
+
- [ ] recvmsg
|
6
|
+
- [ ] sendmsg
|
7
|
+
- [ ] multishot recv
|
8
|
+
- [ ] multishot recvmsg
|
9
|
+
- [ ] poll
|
10
|
+
- [ ] multishot poll
|
11
|
+
- [ ] shutdown
|
12
|
+
- [ ] connect
|
13
|
+
- [ ] socket
|
14
|
+
- [ ] openat
|
15
|
+
- [ ] splice
|
16
|
+
- [ ] wait
|
17
|
+
|
18
|
+
- [ ] support for linking requests
|
19
|
+
|
20
|
+
```ruby
|
21
|
+
ring.prep_write(fd: fd, buffer: 'foo', link: true)
|
22
|
+
ring.prep_slice(fd: fd, src: src_fd, len: 4096)
|
23
|
+
```
|
24
|
+
|
25
|
+
- [ ] link timeout
|
26
|
+
|
27
|
+
```ruby
|
28
|
+
# read or timeout in 3 seconds
|
29
|
+
ring.prep_read(fd: fd, buffer: +'', len: 4096, timeout: 3)
|
30
|
+
```
|
data/examples/event_loop.rb
CHANGED
data/examples/fibers.rb
ADDED
@@ -0,0 +1,105 @@
|
|
1
|
+
require_relative '../lib/iou'
|
2
|
+
require 'socket'
|
3
|
+
require 'fiber'
|
4
|
+
|
5
|
+
class ::Fiber
|
6
|
+
attr_accessor :__op_id
|
7
|
+
end
|
8
|
+
|
9
|
+
class Scheduler
|
10
|
+
class Cancel < Exception
|
11
|
+
end
|
12
|
+
|
13
|
+
attr_reader :ring
|
14
|
+
|
15
|
+
def initialize
|
16
|
+
@ring = IOU::Ring.new
|
17
|
+
@runqueue = []
|
18
|
+
end
|
19
|
+
|
20
|
+
def switchpoint
|
21
|
+
while true
|
22
|
+
f, v = @runqueue.shift
|
23
|
+
if f
|
24
|
+
return f.transfer(v)
|
25
|
+
end
|
26
|
+
|
27
|
+
@ring.process_completions
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def fiber_wait(op_id)
|
32
|
+
Fiber.current.__op_id = op_id
|
33
|
+
v = switchpoint
|
34
|
+
Fiber.current.__op_id = nil
|
35
|
+
raise v if v.is_a?(Exception)
|
36
|
+
|
37
|
+
v
|
38
|
+
end
|
39
|
+
|
40
|
+
def read(**args)
|
41
|
+
f = Fiber.current
|
42
|
+
id = ring.prep_read(**args) do |c|
|
43
|
+
if c[:result] < 0
|
44
|
+
@runqueue << [f, RuntimeError.new('error')]
|
45
|
+
else
|
46
|
+
@runqueue << [f, c[:buffer]]
|
47
|
+
end
|
48
|
+
end
|
49
|
+
fiber_wait(id)
|
50
|
+
end
|
51
|
+
|
52
|
+
def write(**args)
|
53
|
+
f = Fiber.current
|
54
|
+
id = ring.prep_write(**args) do |c|
|
55
|
+
if c[:result] < 0
|
56
|
+
@runqueue << [f, RuntimeError.new('error')]
|
57
|
+
else
|
58
|
+
@runqueue << [f, c[:result]]
|
59
|
+
end
|
60
|
+
end
|
61
|
+
fiber_wait(id)
|
62
|
+
end
|
63
|
+
|
64
|
+
def sleep(interval)
|
65
|
+
f = Fiber.current
|
66
|
+
id = ring.prep_timeout(interval: interval) do |c|
|
67
|
+
if c[:result] == Errno::ECANCELED::Errno
|
68
|
+
@runqueue << [f, c[:result]]
|
69
|
+
else
|
70
|
+
@runqueue << [f, c[:result]]
|
71
|
+
end
|
72
|
+
end
|
73
|
+
fiber_wait(id)
|
74
|
+
end
|
75
|
+
|
76
|
+
def cancel_fiber_op(f)
|
77
|
+
op_id = f.__op_id
|
78
|
+
if op_id
|
79
|
+
ring.prep_cancel(op_id)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def move_on_after(interval)
|
84
|
+
f = Fiber.current
|
85
|
+
cancel_id = ring.prep_timeout(interval: interval) do |c|
|
86
|
+
if c[:result] != Errno::ECANCELED::Errno
|
87
|
+
cancel_fiber_op(f)
|
88
|
+
end
|
89
|
+
end
|
90
|
+
v = yield
|
91
|
+
ring.prep_cancel(cancel_id)
|
92
|
+
v
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
s = Scheduler.new
|
97
|
+
|
98
|
+
puts "Going to sleep..."
|
99
|
+
s.sleep 3
|
100
|
+
puts "Woke up"
|
101
|
+
|
102
|
+
s.move_on_after(1) do
|
103
|
+
puts "Going to sleep (move on after 1 second)"
|
104
|
+
s.sleep 3
|
105
|
+
end
|
@@ -22,9 +22,7 @@ def setup_connection(fd)
|
|
22
22
|
|
23
23
|
parser = Http::Parser.new
|
24
24
|
parser.on_message_complete = -> {
|
25
|
-
http_send_response(fd, "Hello, world!\n")
|
26
|
-
@ring.prep_close(fd: fd)
|
27
|
-
end
|
25
|
+
http_send_response(fd, "Hello, world!\n")
|
28
26
|
}
|
29
27
|
|
30
28
|
http_prep_read(fd, parser)
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require_relative '../lib/iou'
|
2
|
+
require 'socket'
|
3
|
+
require 'http/parser'
|
4
|
+
|
5
|
+
socket = TCPServer.open('127.0.0.1', 1234)
|
6
|
+
puts 'Listening on port 1234... (multishot read)'
|
7
|
+
|
8
|
+
@ring = IOU::Ring.new
|
9
|
+
@buffer_group = @ring.setup_buffer_ring(count: 1024, size: 4096)
|
10
|
+
|
11
|
+
@ring.prep_accept(fd: socket.fileno, multishot: true) do |c|
|
12
|
+
http_handle_connection(c[:result]) if c[:result] > 0
|
13
|
+
end
|
14
|
+
|
15
|
+
def http_handle_connection(fd)
|
16
|
+
parser = Http::Parser.new
|
17
|
+
parser.on_message_complete = -> { http_send_response(fd, "Hello, world!\n") }
|
18
|
+
|
19
|
+
@ring.prep_read(fd: fd, multishot: true, buffer_group: @buffer_group) do |c|
|
20
|
+
if c[:result] > 0
|
21
|
+
parser << c[:buffer]
|
22
|
+
else
|
23
|
+
puts "Connection closed on fd #{fd}"
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def http_send_response(fd, body)
|
29
|
+
msg = "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nConnection: keep-alive\r\nContent-Length: #{body.bytesize}\r\n\r\n#{body}"
|
30
|
+
@ring.prep_write(fd: fd, buffer: msg)
|
31
|
+
end
|
32
|
+
|
33
|
+
trap('SIGINT') { exit! }
|
34
|
+
@ring.process_completions_loop
|
data/ext/iou/iou.h
CHANGED
@@ -48,19 +48,54 @@ struct sa_data {
|
|
48
48
|
socklen_t len;
|
49
49
|
};
|
50
50
|
|
51
|
-
|
51
|
+
struct read_data {
|
52
|
+
VALUE buffer;
|
53
|
+
int buffer_offset;
|
54
|
+
unsigned bg_id;
|
55
|
+
int utf8_encoding;
|
56
|
+
};
|
57
|
+
|
58
|
+
enum op_type {
|
59
|
+
OP_accept,
|
60
|
+
OP_cancel,
|
61
|
+
OP_close,
|
62
|
+
OP_emit,
|
63
|
+
OP_nop,
|
64
|
+
OP_read,
|
65
|
+
OP_timeout,
|
66
|
+
OP_write
|
67
|
+
};
|
68
|
+
|
69
|
+
typedef struct OpCtx_t {
|
70
|
+
enum op_type type;
|
71
|
+
VALUE spec;
|
72
|
+
VALUE proc;
|
52
73
|
union {
|
53
74
|
struct __kernel_timespec ts;
|
54
75
|
struct sa_data sa;
|
76
|
+
struct read_data rd;
|
55
77
|
} data;
|
56
|
-
|
78
|
+
int stop_signal;
|
79
|
+
} OpCtx_t;
|
57
80
|
|
58
81
|
extern VALUE mIOU;
|
59
|
-
extern VALUE
|
82
|
+
extern VALUE cOpCtx;
|
83
|
+
|
84
|
+
enum op_type OpCtx_type_get(VALUE self);
|
85
|
+
void OpCtx_type_set(VALUE self, enum op_type type);
|
86
|
+
|
87
|
+
VALUE OpCtx_spec_get(VALUE self);
|
88
|
+
VALUE OpCtx_proc_get(VALUE self);
|
89
|
+
|
90
|
+
struct __kernel_timespec *OpCtx_ts_get(VALUE self);
|
91
|
+
void OpCtx_ts_set(VALUE self, VALUE value);
|
92
|
+
|
93
|
+
struct sa_data *OpCtx_sa_get(VALUE self);
|
60
94
|
|
61
|
-
struct
|
62
|
-
void
|
95
|
+
struct read_data *OpCtx_rd_get(VALUE self);
|
96
|
+
void OpCtx_rd_set(VALUE self, VALUE buffer, int buffer_offset, unsigned bg_id, int utf8_encoding);
|
63
97
|
|
64
|
-
|
98
|
+
int OpCtx_stop_signal_p(VALUE self);
|
99
|
+
void OpCtx_stop_signal_set(VALUE self);
|
65
100
|
|
66
101
|
#endif // IOU_H
|
data/ext/iou/iou_ext.c
CHANGED
data/ext/iou/op_ctx.c
ADDED
@@ -0,0 +1,125 @@
|
|
1
|
+
#include "iou.h"
|
2
|
+
|
3
|
+
VALUE cOpCtx;
|
4
|
+
|
5
|
+
static void OpCtx_mark(void *ptr) {
|
6
|
+
OpCtx_t *ctx = ptr;
|
7
|
+
rb_gc_mark_movable(ctx->spec);
|
8
|
+
rb_gc_mark_movable(ctx->proc);
|
9
|
+
}
|
10
|
+
|
11
|
+
static void OpCtx_compact(void *ptr) {
|
12
|
+
OpCtx_t *ctx = ptr;
|
13
|
+
ctx->spec = rb_gc_location(ctx->spec);
|
14
|
+
ctx->proc = rb_gc_location(ctx->proc);
|
15
|
+
}
|
16
|
+
|
17
|
+
static size_t OpCtx_size(const void *ptr) {
|
18
|
+
return sizeof(OpCtx_t);
|
19
|
+
}
|
20
|
+
|
21
|
+
static const rb_data_type_t OpCtx_type = {
|
22
|
+
"OpCtx",
|
23
|
+
{OpCtx_mark, 0, OpCtx_size, OpCtx_compact},
|
24
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
25
|
+
};
|
26
|
+
|
27
|
+
static VALUE OpCtx_allocate(VALUE klass) {
|
28
|
+
OpCtx_t *osd = ALLOC(OpCtx_t);
|
29
|
+
|
30
|
+
return TypedData_Wrap_Struct(klass, &OpCtx_type, osd);
|
31
|
+
}
|
32
|
+
|
33
|
+
VALUE OpCtx_initialize(VALUE self, VALUE spec, VALUE proc) {
|
34
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
35
|
+
RB_OBJ_WRITE(self, &osd->spec, spec);
|
36
|
+
RB_OBJ_WRITE(self, &osd->proc, proc);
|
37
|
+
memset(&osd->data, 0, sizeof(osd->data));
|
38
|
+
osd->stop_signal = 0;
|
39
|
+
return self;
|
40
|
+
}
|
41
|
+
|
42
|
+
VALUE OpCtx_spec(VALUE self) {
|
43
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
44
|
+
return osd->spec;
|
45
|
+
}
|
46
|
+
|
47
|
+
inline enum op_type OpCtx_type_get(VALUE self) {
|
48
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
49
|
+
return osd->type;
|
50
|
+
}
|
51
|
+
|
52
|
+
inline void OpCtx_type_set(VALUE self, enum op_type type) {
|
53
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
54
|
+
osd->type = type;
|
55
|
+
}
|
56
|
+
|
57
|
+
inline VALUE OpCtx_spec_get(VALUE self) {
|
58
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
59
|
+
return osd->spec;
|
60
|
+
}
|
61
|
+
|
62
|
+
inline VALUE OpCtx_proc_get(VALUE self) {
|
63
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
64
|
+
return osd->proc;
|
65
|
+
}
|
66
|
+
|
67
|
+
struct __kernel_timespec *OpCtx_ts_get(VALUE self) {
|
68
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
69
|
+
return &osd->data.ts;
|
70
|
+
}
|
71
|
+
|
72
|
+
inline struct __kernel_timespec double_to_timespec(double value) {
|
73
|
+
double integral;
|
74
|
+
double fraction = modf(value, &integral);
|
75
|
+
struct __kernel_timespec ts;
|
76
|
+
ts.tv_sec = integral;
|
77
|
+
ts.tv_nsec = floor(fraction * 1000000000);
|
78
|
+
return ts;
|
79
|
+
}
|
80
|
+
|
81
|
+
inline struct __kernel_timespec value_to_timespec(VALUE value) {
|
82
|
+
return double_to_timespec(NUM2DBL(value));
|
83
|
+
}
|
84
|
+
|
85
|
+
inline void OpCtx_ts_set(VALUE self, VALUE value) {
|
86
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
87
|
+
osd->data.ts = value_to_timespec(value);
|
88
|
+
}
|
89
|
+
|
90
|
+
inline struct sa_data *OpCtx_sa_get(VALUE self) {
|
91
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
92
|
+
return &osd->data.sa;
|
93
|
+
}
|
94
|
+
|
95
|
+
inline struct read_data *OpCtx_rd_get(VALUE self) {
|
96
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
97
|
+
return &osd->data.rd;
|
98
|
+
}
|
99
|
+
|
100
|
+
inline void OpCtx_rd_set(VALUE self, VALUE buffer, int buffer_offset, unsigned bg_id, int utf8_encoding) {
|
101
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
102
|
+
osd->data.rd.buffer = buffer;
|
103
|
+
osd->data.rd.buffer_offset = buffer_offset;
|
104
|
+
osd->data.rd.bg_id = bg_id;
|
105
|
+
osd->data.rd.utf8_encoding = utf8_encoding;
|
106
|
+
}
|
107
|
+
|
108
|
+
inline int OpCtx_stop_signal_p(VALUE self) {
|
109
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
110
|
+
return osd->stop_signal;
|
111
|
+
}
|
112
|
+
|
113
|
+
inline void OpCtx_stop_signal_set(VALUE self) {
|
114
|
+
OpCtx_t *osd = RTYPEDDATA_DATA(self);
|
115
|
+
osd->stop_signal = 1;
|
116
|
+
}
|
117
|
+
|
118
|
+
void Init_OpCtx(void) {
|
119
|
+
mIOU = rb_define_module("IOU");
|
120
|
+
cOpCtx = rb_define_class_under(mIOU, "OpCtx", rb_cObject);
|
121
|
+
rb_define_alloc_func(cOpCtx, OpCtx_allocate);
|
122
|
+
|
123
|
+
rb_define_method(cOpCtx, "initialize", OpCtx_initialize, 2);
|
124
|
+
rb_define_method(cOpCtx, "spec", OpCtx_spec, 0);
|
125
|
+
}
|
data/ext/iou/{iou.c → ring.c}
RENAMED
@@ -220,22 +220,28 @@ VALUE IOU_setup_buffer_ring(VALUE self, VALUE opts) {
|
|
220
220
|
return UINT2NUM(bg_id);
|
221
221
|
}
|
222
222
|
|
223
|
-
inline
|
223
|
+
static inline VALUE setup_op_ctx(IOU_t *iou, enum op_type type, VALUE op, VALUE id, VALUE spec) {
|
224
224
|
rb_hash_aset(spec, SYM_id, id);
|
225
225
|
rb_hash_aset(spec, SYM_op, op);
|
226
|
-
|
227
|
-
|
228
|
-
|
226
|
+
VALUE block_proc = rb_block_given_p() ? rb_block_proc() : Qnil;
|
227
|
+
if (block_proc != Qnil)
|
228
|
+
rb_hash_aset(spec, SYM_block, block_proc);
|
229
|
+
VALUE ctx = rb_funcall(cOpCtx, rb_intern("new"), 2, spec, block_proc);
|
230
|
+
OpCtx_type_set(ctx, type);
|
231
|
+
rb_hash_aset(iou->pending_ops, id, ctx);
|
232
|
+
return ctx;
|
229
233
|
}
|
230
234
|
|
231
|
-
VALUE IOU_emit(VALUE self, VALUE
|
235
|
+
VALUE IOU_emit(VALUE self, VALUE spec) {
|
232
236
|
IOU_t *iou = get_iou(self);
|
233
237
|
unsigned id_i = ++iou->op_counter;
|
234
238
|
VALUE id = UINT2NUM(id_i);
|
235
239
|
|
236
240
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
237
241
|
sqe->user_data = id_i;
|
238
|
-
|
242
|
+
VALUE ctx = setup_op_ctx(iou, OP_emit, SYM_emit, id, spec);
|
243
|
+
if (rb_hash_aref(spec, SYM_signal) == SYM_stop)
|
244
|
+
OpCtx_stop_signal_set(ctx);
|
239
245
|
|
240
246
|
io_uring_prep_nop(sqe);
|
241
247
|
|
@@ -256,13 +262,11 @@ VALUE IOU_prep_accept(VALUE self, VALUE spec) {
|
|
256
262
|
VALUE fd = values[0];
|
257
263
|
VALUE multishot = rb_hash_aref(spec, SYM_multishot);
|
258
264
|
|
259
|
-
VALUE spec_data = rb_funcall(cOpSpecData, rb_intern("new"), 0);
|
260
265
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
261
266
|
sqe->user_data = id_i;
|
262
|
-
rb_hash_aset(spec, SYM_spec_data, spec_data);
|
263
|
-
store_spec(iou, spec, id, SYM_accept);
|
264
267
|
|
265
|
-
|
268
|
+
VALUE ctx = setup_op_ctx(iou, OP_accept, SYM_accept, id, spec);
|
269
|
+
struct sa_data *sa = OpCtx_sa_get(ctx);
|
266
270
|
if (RTEST(multishot))
|
267
271
|
io_uring_prep_multishot_accept(sqe, NUM2INT(fd), &sa->addr, &sa->len, 0);
|
268
272
|
else
|
@@ -310,7 +314,8 @@ VALUE IOU_prep_close(VALUE self, VALUE spec) {
|
|
310
314
|
|
311
315
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
312
316
|
sqe->user_data = id_i;
|
313
|
-
|
317
|
+
|
318
|
+
setup_op_ctx(iou, OP_close, SYM_close, id, spec);
|
314
319
|
|
315
320
|
io_uring_prep_close(sqe, NUM2INT(fd));
|
316
321
|
iou->unsubmitted_sqes++;
|
@@ -358,10 +363,13 @@ VALUE prep_read_multishot(IOU_t *iou, VALUE spec) {
|
|
358
363
|
get_required_kwargs(spec, values, 2, SYM_fd, SYM_buffer_group);
|
359
364
|
int fd = NUM2INT(values[0]);
|
360
365
|
unsigned bg_id = NUM2UINT(values[1]);
|
366
|
+
int utf8 = RTEST(rb_hash_aref(spec, SYM_utf8));
|
361
367
|
|
362
368
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
363
369
|
sqe->user_data = id_i;
|
364
|
-
|
370
|
+
|
371
|
+
VALUE ctx = setup_op_ctx(iou, OP_read, SYM_read, id, spec);
|
372
|
+
OpCtx_rd_set(ctx, Qnil, 0, bg_id, utf8);
|
365
373
|
|
366
374
|
io_uring_prep_read_multishot(sqe, fd, 0, -1, bg_id);
|
367
375
|
iou->unsubmitted_sqes++;
|
@@ -387,10 +395,13 @@ VALUE IOU_prep_read(VALUE self, VALUE spec) {
|
|
387
395
|
|
388
396
|
VALUE buffer_offset = rb_hash_aref(spec, SYM_buffer_offset);
|
389
397
|
int buffer_offset_i = NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset);
|
398
|
+
int utf8 = RTEST(rb_hash_aref(spec, SYM_utf8));
|
390
399
|
|
391
400
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
392
401
|
sqe->user_data = id_i;
|
393
|
-
|
402
|
+
|
403
|
+
VALUE ctx = setup_op_ctx(iou, OP_read, SYM_read, id, spec);
|
404
|
+
OpCtx_rd_set(ctx, buffer, buffer_offset_i, 0, utf8);
|
394
405
|
|
395
406
|
void *ptr = prepare_read_buffer(buffer, len_i, buffer_offset_i);
|
396
407
|
io_uring_prep_read(sqe, NUM2INT(fd), ptr, len_i, -1);
|
@@ -409,15 +420,13 @@ VALUE IOU_prep_timeout(VALUE self, VALUE spec) {
|
|
409
420
|
VALUE multishot = rb_hash_aref(spec, SYM_multishot);
|
410
421
|
unsigned flags = RTEST(multishot) ? IORING_TIMEOUT_MULTISHOT : 0;
|
411
422
|
|
412
|
-
VALUE spec_data = rb_funcall(cOpSpecData, rb_intern("new"), 0);
|
413
|
-
OpSpecData_ts_set(spec_data, interval);
|
414
|
-
|
415
423
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
416
424
|
sqe->user_data = id_i;
|
417
|
-
rb_hash_aset(spec, SYM_spec_data, spec_data);
|
418
|
-
store_spec(iou, spec, id, SYM_timeout);
|
419
425
|
|
420
|
-
|
426
|
+
VALUE ctx = setup_op_ctx(iou, OP_timeout, SYM_timeout, id, spec);
|
427
|
+
OpCtx_ts_set(ctx, interval);
|
428
|
+
|
429
|
+
io_uring_prep_timeout(sqe, OpCtx_ts_get(ctx), 0, flags);
|
421
430
|
iou->unsubmitted_sqes++;
|
422
431
|
return id;
|
423
432
|
}
|
@@ -436,7 +445,8 @@ VALUE IOU_prep_write(VALUE self, VALUE spec) {
|
|
436
445
|
|
437
446
|
struct io_uring_sqe *sqe = get_sqe(iou);
|
438
447
|
sqe->user_data = id_i;
|
439
|
-
|
448
|
+
|
449
|
+
setup_op_ctx(iou, OP_write, SYM_write, id, spec);
|
440
450
|
|
441
451
|
io_uring_prep_write(sqe, NUM2INT(fd), RSTRING_PTR(buffer), nbytes, -1);
|
442
452
|
iou->unsubmitted_sqes++;
|
@@ -476,106 +486,112 @@ void *wait_for_completion_without_gvl(void *ptr) {
|
|
476
486
|
return NULL;
|
477
487
|
}
|
478
488
|
|
479
|
-
static inline void update_read_buffer_from_buffer_ring(IOU_t *iou, VALUE
|
489
|
+
static inline void update_read_buffer_from_buffer_ring(IOU_t *iou, VALUE ctx, struct io_uring_cqe *cqe) {
|
480
490
|
VALUE buf = Qnil;
|
481
491
|
if (cqe->res == 0) {
|
482
492
|
buf = rb_str_new_literal("");
|
483
493
|
goto done;
|
484
494
|
}
|
485
495
|
|
486
|
-
|
496
|
+
struct read_data *rd = OpCtx_rd_get(ctx);
|
487
497
|
unsigned buf_idx = cqe->flags >> IORING_CQE_BUFFER_SHIFT;
|
488
498
|
|
489
|
-
struct buf_ring_descriptor *desc = iou->brs + bg_id;
|
499
|
+
struct buf_ring_descriptor *desc = iou->brs + rd->bg_id;
|
490
500
|
char *src = desc->buf_base + desc->buf_size * buf_idx;
|
491
|
-
buf = rb_str_new(src, cqe->res);
|
501
|
+
buf = rd->utf8_encoding ? rb_utf8_str_new(src, cqe->res) : rb_str_new(src, cqe->res);
|
492
502
|
|
493
|
-
//
|
503
|
+
// add buffer back to buffer ring
|
494
504
|
io_uring_buf_ring_add(
|
495
505
|
desc->br, src, desc->buf_size, buf_idx,
|
496
506
|
io_uring_buf_ring_mask(desc->buf_count), 0
|
497
507
|
);
|
498
508
|
io_uring_buf_ring_advance(desc->br, 1);
|
499
509
|
done:
|
500
|
-
rb_hash_aset(
|
510
|
+
rb_hash_aset(OpCtx_spec_get(ctx), SYM_buffer, buf);
|
501
511
|
RB_GC_GUARD(buf);
|
502
512
|
return;
|
503
513
|
}
|
504
514
|
|
505
|
-
static inline void update_read_buffer(IOU_t *iou, VALUE
|
515
|
+
static inline void update_read_buffer(IOU_t *iou, VALUE ctx, struct io_uring_cqe *cqe) {
|
506
516
|
if (cqe->res < 0) return;
|
507
517
|
|
508
518
|
if (cqe->flags & IORING_CQE_F_BUFFER) {
|
509
|
-
update_read_buffer_from_buffer_ring(iou,
|
519
|
+
update_read_buffer_from_buffer_ring(iou, ctx, cqe);
|
510
520
|
return;
|
511
521
|
}
|
512
522
|
|
513
523
|
if (cqe->res == 0) return;
|
514
524
|
|
515
|
-
|
516
|
-
|
517
|
-
int buffer_offset_i = NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset);
|
518
|
-
adjust_read_buffer_len(buffer, cqe->res, buffer_offset_i);
|
519
|
-
}
|
520
|
-
|
521
|
-
inline int is_stop_signal(VALUE op, VALUE spec) {
|
522
|
-
return (op == SYM_emit) && (rb_hash_aref(spec, SYM_signal) == SYM_stop);
|
525
|
+
struct read_data *rd = OpCtx_rd_get(ctx);
|
526
|
+
adjust_read_buffer_len(rd->buffer, cqe->res, rd->buffer_offset);
|
523
527
|
}
|
524
528
|
|
525
|
-
static inline VALUE
|
529
|
+
static inline VALUE get_cqe_ctx(IOU_t *iou, struct io_uring_cqe *cqe, int *stop_flag, VALUE *spec) {
|
526
530
|
VALUE id = UINT2NUM(cqe->user_data);
|
527
|
-
VALUE
|
531
|
+
VALUE ctx = rb_hash_aref(iou->pending_ops, id);
|
528
532
|
VALUE result = INT2NUM(cqe->res);
|
529
|
-
if (NIL_P(
|
530
|
-
|
533
|
+
if (NIL_P(ctx)) {
|
534
|
+
*spec = make_empty_op_with_result(id, result);
|
535
|
+
return Qnil;
|
536
|
+
}
|
531
537
|
|
532
538
|
// post completion work
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
539
|
+
switch (OpCtx_type_get(ctx)) {
|
540
|
+
case OP_read:
|
541
|
+
update_read_buffer(iou, ctx, cqe);
|
542
|
+
break;
|
543
|
+
case OP_emit:
|
544
|
+
if (stop_flag && OpCtx_stop_signal_p(ctx))
|
545
|
+
*stop_flag = 1;
|
546
|
+
break;
|
547
|
+
default:
|
548
|
+
}
|
538
549
|
|
539
550
|
// for multishot ops, the IORING_CQE_F_MORE flag indicates more completions
|
540
551
|
// will be coming, so we need to keep the spec. Otherwise, we remove it.
|
541
552
|
if (!(cqe->flags & IORING_CQE_F_MORE))
|
542
553
|
rb_hash_delete(iou->pending_ops, id);
|
543
554
|
|
544
|
-
|
545
|
-
|
546
|
-
|
555
|
+
*spec = OpCtx_spec_get(ctx);
|
556
|
+
rb_hash_aset(*spec, SYM_result, result);
|
557
|
+
RB_GC_GUARD(ctx);
|
558
|
+
return ctx;
|
547
559
|
}
|
548
560
|
|
549
561
|
VALUE IOU_wait_for_completion(VALUE self) {
|
550
562
|
IOU_t *iou = get_iou(self);
|
551
563
|
|
552
|
-
wait_for_completion_ctx_t
|
564
|
+
wait_for_completion_ctx_t cqe_ctx = {
|
553
565
|
.iou = iou
|
554
566
|
};
|
555
567
|
|
556
|
-
rb_thread_call_without_gvl(wait_for_completion_without_gvl, (void *)&
|
568
|
+
rb_thread_call_without_gvl(wait_for_completion_without_gvl, (void *)&cqe_ctx, RUBY_UBF_IO, 0);
|
557
569
|
|
558
|
-
if (unlikely(
|
559
|
-
rb_syserr_fail(-
|
570
|
+
if (unlikely(cqe_ctx.ret < 0)) {
|
571
|
+
rb_syserr_fail(-cqe_ctx.ret, strerror(-cqe_ctx.ret));
|
560
572
|
}
|
561
|
-
io_uring_cqe_seen(&iou->ring,
|
562
|
-
|
573
|
+
io_uring_cqe_seen(&iou->ring, cqe_ctx.cqe);
|
574
|
+
|
575
|
+
VALUE spec = Qnil;
|
576
|
+
get_cqe_ctx(iou, cqe_ctx.cqe, 0, &spec);
|
577
|
+
return spec;
|
563
578
|
}
|
564
579
|
|
565
|
-
static inline void process_cqe(IOU_t *iou, struct io_uring_cqe *cqe, int *stop_flag) {
|
580
|
+
static inline void process_cqe(IOU_t *iou, struct io_uring_cqe *cqe, int block_given, int *stop_flag) {
|
566
581
|
if (stop_flag) *stop_flag = 0;
|
567
|
-
VALUE spec
|
582
|
+
VALUE spec;
|
583
|
+
VALUE ctx = get_cqe_ctx(iou, cqe, stop_flag, &spec);
|
568
584
|
if (stop_flag && *stop_flag) return;
|
569
585
|
|
570
|
-
if (
|
586
|
+
if (block_given)
|
571
587
|
rb_yield(spec);
|
572
|
-
else {
|
573
|
-
VALUE
|
574
|
-
if (RTEST(
|
575
|
-
rb_proc_call_with_block_kw(
|
588
|
+
else if (ctx != Qnil) {
|
589
|
+
VALUE proc = OpCtx_proc_get(ctx);
|
590
|
+
if (RTEST(proc))
|
591
|
+
rb_proc_call_with_block_kw(proc, 1, &spec, Qnil, Qnil);
|
576
592
|
}
|
577
593
|
|
578
|
-
RB_GC_GUARD(
|
594
|
+
RB_GC_GUARD(ctx);
|
579
595
|
}
|
580
596
|
|
581
597
|
// copied from liburing/queue.c
|
@@ -585,7 +601,7 @@ static inline bool cq_ring_needs_flush(struct io_uring *ring) {
|
|
585
601
|
|
586
602
|
// adapted from io_uring_peek_batch_cqe in liburing/queue.c
|
587
603
|
// this peeks at cqes and handles each available cqe
|
588
|
-
static inline int process_ready_cqes(IOU_t *iou, int *stop_flag) {
|
604
|
+
static inline int process_ready_cqes(IOU_t *iou, int block_given, int *stop_flag) {
|
589
605
|
unsigned total_count = 0;
|
590
606
|
|
591
607
|
iterate:
|
@@ -596,7 +612,7 @@ iterate:
|
|
596
612
|
io_uring_for_each_cqe(&iou->ring, head, cqe) {
|
597
613
|
++count;
|
598
614
|
if (stop_flag) *stop_flag = 0;
|
599
|
-
process_cqe(iou, cqe, stop_flag);
|
615
|
+
process_cqe(iou, cqe, block_given, stop_flag);
|
600
616
|
if (stop_flag && *stop_flag)
|
601
617
|
break;
|
602
618
|
}
|
@@ -618,6 +634,7 @@ done:
|
|
618
634
|
|
619
635
|
VALUE IOU_process_completions(int argc, VALUE *argv, VALUE self) {
|
620
636
|
IOU_t *iou = get_iou(self);
|
637
|
+
int block_given = rb_block_given_p();
|
621
638
|
VALUE wait;
|
622
639
|
|
623
640
|
rb_scan_args(argc, argv, "01", &wait);
|
@@ -639,15 +656,16 @@ VALUE IOU_process_completions(int argc, VALUE *argv, VALUE self) {
|
|
639
656
|
}
|
640
657
|
++count;
|
641
658
|
io_uring_cqe_seen(&iou->ring, ctx.cqe);
|
642
|
-
process_cqe(iou, ctx.cqe, 0);
|
659
|
+
process_cqe(iou, ctx.cqe, block_given, 0);
|
643
660
|
}
|
644
661
|
|
645
|
-
count += process_ready_cqes(iou, 0);
|
662
|
+
count += process_ready_cqes(iou, block_given, 0);
|
646
663
|
return UINT2NUM(count);
|
647
664
|
}
|
648
665
|
|
649
666
|
VALUE IOU_process_completions_loop(VALUE self) {
|
650
667
|
IOU_t *iou = get_iou(self);
|
668
|
+
int block_given = rb_block_given_p();
|
651
669
|
int stop_flag = 0;
|
652
670
|
wait_for_completion_ctx_t ctx = { .iou = iou };
|
653
671
|
|
@@ -663,14 +681,14 @@ VALUE IOU_process_completions_loop(VALUE self) {
|
|
663
681
|
rb_syserr_fail(-ctx.ret, strerror(-ctx.ret));
|
664
682
|
}
|
665
683
|
io_uring_cqe_seen(&iou->ring, ctx.cqe);
|
666
|
-
process_cqe(iou, ctx.cqe, &stop_flag);
|
684
|
+
process_cqe(iou, ctx.cqe, block_given, &stop_flag);
|
667
685
|
if (stop_flag) goto done;
|
668
686
|
|
669
|
-
process_ready_cqes(iou, &stop_flag);
|
687
|
+
process_ready_cqes(iou, block_given, &stop_flag);
|
670
688
|
if (stop_flag) goto done;
|
671
689
|
}
|
672
690
|
done:
|
673
|
-
return self;
|
691
|
+
return self;
|
674
692
|
}
|
675
693
|
|
676
694
|
#define MAKE_SYM(sym) ID2SYM(rb_intern(sym))
|
data/lib/iou/version.rb
CHANGED
data/test/test_iou.rb
CHANGED
@@ -16,7 +16,7 @@ class IOURingTest < IOURingBaseTest
|
|
16
16
|
assert_equal({}, ring.pending_ops)
|
17
17
|
|
18
18
|
id = ring.prep_timeout(interval: 1)
|
19
|
-
spec = ring.pending_ops[id]
|
19
|
+
spec = ring.pending_ops[id].spec
|
20
20
|
assert_equal id, spec[:id]
|
21
21
|
assert_equal :timeout, spec[:op]
|
22
22
|
assert_equal 1, spec[:interval]
|
@@ -46,7 +46,7 @@ class PrepTimeoutTest < IOURingBaseTest
|
|
46
46
|
assert_equal id, c[:id]
|
47
47
|
assert_equal :timeout, c[:op]
|
48
48
|
assert_equal interval, c[:interval]
|
49
|
-
assert_equal -Errno::ETIME::Errno, c[:result]
|
49
|
+
assert_equal (-Errno::ETIME::Errno), c[:result]
|
50
50
|
end
|
51
51
|
|
52
52
|
def test_prep_timeout_invalid_args
|
@@ -74,7 +74,7 @@ class PrepCancelTest < IOURingBaseTest
|
|
74
74
|
assert_equal timeout_id, c[:id]
|
75
75
|
assert_equal :timeout, c[:op]
|
76
76
|
assert_equal interval, c[:interval]
|
77
|
-
assert_equal -Errno::ECANCELED::Errno, c[:result]
|
77
|
+
assert_equal (-Errno::ECANCELED::Errno), c[:result]
|
78
78
|
end
|
79
79
|
|
80
80
|
def test_prep_cancel_kw
|
@@ -94,7 +94,7 @@ class PrepCancelTest < IOURingBaseTest
|
|
94
94
|
assert_equal timeout_id, c[:id]
|
95
95
|
assert_equal :timeout, c[:op]
|
96
96
|
assert_equal interval, c[:interval]
|
97
|
-
assert_equal -Errno::ECANCELED::Errno, c[:result]
|
97
|
+
assert_equal (-Errno::ECANCELED::Errno), c[:result]
|
98
98
|
end
|
99
99
|
|
100
100
|
def test_prep_cancel_invalid_args
|
@@ -111,7 +111,7 @@ class PrepCancelTest < IOURingBaseTest
|
|
111
111
|
ring.submit
|
112
112
|
c = ring.wait_for_completion
|
113
113
|
assert_equal cancel_id, c[:id]
|
114
|
-
assert_equal -Errno::ENOENT::Errno, c[:result]
|
114
|
+
assert_equal (-Errno::ENOENT::Errno), c[:result]
|
115
115
|
end
|
116
116
|
end
|
117
117
|
|
@@ -124,9 +124,9 @@ class PrepTimeoutMultishotTest < IOURingBaseTest
|
|
124
124
|
t0 = monotonic_clock
|
125
125
|
id = ring.prep_timeout(interval: interval, multishot: true) do |c|
|
126
126
|
case c[:result]
|
127
|
-
when -Errno::ETIME::Errno
|
127
|
+
when (-Errno::ETIME::Errno)
|
128
128
|
count += 1
|
129
|
-
when -Errno::ECANCELED::Errno
|
129
|
+
when (-Errno::ECANCELED::Errno)
|
130
130
|
cancelled = true
|
131
131
|
end
|
132
132
|
end
|
@@ -151,7 +151,7 @@ class PrepTimeoutMultishotTest < IOURingBaseTest
|
|
151
151
|
|
152
152
|
ring.prep_cancel(id)
|
153
153
|
ring.submit
|
154
|
-
|
154
|
+
ring.process_completions(true)
|
155
155
|
assert_equal true, cancelled
|
156
156
|
assert_equal 3, count
|
157
157
|
assert_nil ring.pending_ops[id]
|
@@ -207,7 +207,7 @@ class PrepWriteTest < IOURingBaseTest
|
|
207
207
|
end
|
208
208
|
|
209
209
|
def test_prep_write_invalid_fd
|
210
|
-
r,
|
210
|
+
r, _w = IO.pipe
|
211
211
|
s = 'foobar'
|
212
212
|
|
213
213
|
id = ring.prep_write(fd: r.fileno, buffer: s)
|
@@ -220,7 +220,7 @@ class PrepWriteTest < IOURingBaseTest
|
|
220
220
|
assert_equal id, c[:id]
|
221
221
|
assert_equal :write, c[:op]
|
222
222
|
assert_equal r.fileno, c[:fd]
|
223
|
-
assert_equal -Errno::EBADF::Errno, c[:result]
|
223
|
+
assert_equal (-Errno::EBADF::Errno), c[:result]
|
224
224
|
end
|
225
225
|
end
|
226
226
|
|
@@ -266,7 +266,8 @@ class PrepNopTest < IOURingBaseTest
|
|
266
266
|
assert_nil c[:op]
|
267
267
|
assert_equal 0, c[:result]
|
268
268
|
ensure
|
269
|
-
signaller
|
269
|
+
signaller&.kill rescue nil
|
270
|
+
waiter&.kill rescue nil
|
270
271
|
end
|
271
272
|
end
|
272
273
|
|
@@ -301,9 +302,9 @@ class ProcessCompletionsTest < IOURingBaseTest
|
|
301
302
|
def test_process_completions_with_block
|
302
303
|
r, w = IO.pipe
|
303
304
|
|
304
|
-
|
305
|
-
|
306
|
-
|
305
|
+
ring.prep_write(fd: w.fileno, buffer: 'foo')
|
306
|
+
ring.prep_write(fd: w.fileno, buffer: 'bar')
|
307
|
+
ring.prep_write(fd: w.fileno, buffer: 'baz')
|
307
308
|
ring.submit
|
308
309
|
sleep 0.01
|
309
310
|
|
@@ -326,8 +327,8 @@ class ProcessCompletionsTest < IOURingBaseTest
|
|
326
327
|
def test_process_completions_op_with_block
|
327
328
|
cc = []
|
328
329
|
|
329
|
-
|
330
|
-
|
330
|
+
ring.prep_timeout(interval: 0.01) { cc << 1 }
|
331
|
+
ring.prep_timeout(interval: 0.02) { cc << 2 }
|
331
332
|
ring.submit
|
332
333
|
|
333
334
|
ret = ring.process_completions
|
@@ -344,8 +345,8 @@ class ProcessCompletionsTest < IOURingBaseTest
|
|
344
345
|
def test_process_completions_op_with_block_no_submit
|
345
346
|
cc = []
|
346
347
|
|
347
|
-
|
348
|
-
|
348
|
+
ring.prep_timeout(interval: 0.01) { cc << 1 }
|
349
|
+
ring.prep_timeout(interval: 0.02) { cc << 2 }
|
349
350
|
|
350
351
|
ret = ring.process_completions
|
351
352
|
assert_equal 0, ret
|
@@ -406,7 +407,7 @@ class PrepReadTest < IOURingBaseTest
|
|
406
407
|
end
|
407
408
|
|
408
409
|
def test_prep_read_bad_fd
|
409
|
-
|
410
|
+
_r, w = IO.pipe
|
410
411
|
|
411
412
|
id = ring.prep_read(fd: w.fileno, buffer: +'', len: 8192)
|
412
413
|
assert_equal 1, id
|
@@ -418,7 +419,7 @@ class PrepReadTest < IOURingBaseTest
|
|
418
419
|
assert_equal id, c[:id]
|
419
420
|
assert_equal :read, c[:op]
|
420
421
|
assert_equal w.fileno, c[:fd]
|
421
|
-
assert_equal -Errno::EBADF::Errno, c[:result]
|
422
|
+
assert_equal (-Errno::EBADF::Errno), c[:result]
|
422
423
|
end
|
423
424
|
|
424
425
|
def test_prep_read_with_block
|
@@ -513,7 +514,7 @@ end
|
|
513
514
|
|
514
515
|
class PrepCloseTest < IOURingBaseTest
|
515
516
|
def test_prep_close
|
516
|
-
|
517
|
+
_r, w = IO.pipe
|
517
518
|
fd = w.fileno
|
518
519
|
|
519
520
|
id = ring.prep_close(fd: fd)
|
@@ -538,7 +539,7 @@ class PrepCloseTest < IOURingBaseTest
|
|
538
539
|
assert_equal id, c[:id]
|
539
540
|
assert_equal :close, c[:op]
|
540
541
|
assert_equal fd, c[:fd]
|
541
|
-
assert_equal -Errno::EBADF::Errno, c[:result]
|
542
|
+
assert_equal (-Errno::EBADF::Errno), c[:result]
|
542
543
|
|
543
544
|
end
|
544
545
|
|
@@ -560,7 +561,7 @@ class PrepCloseTest < IOURingBaseTest
|
|
560
561
|
assert_equal id, c[:id]
|
561
562
|
assert_equal :close, c[:op]
|
562
563
|
assert_equal 9999, c[:fd]
|
563
|
-
assert_equal -Errno::EBADF::Errno, c[:result]
|
564
|
+
assert_equal (-Errno::EBADF::Errno), c[:result]
|
564
565
|
end
|
565
566
|
end
|
566
567
|
|
@@ -581,7 +582,7 @@ class PrepAcceptTest < IOURingBaseTest
|
|
581
582
|
ring.submit
|
582
583
|
|
583
584
|
t = Thread.new do
|
584
|
-
|
585
|
+
TCPSocket.new('127.0.0.1', @port)
|
585
586
|
end
|
586
587
|
|
587
588
|
c = ring.wait_for_completion
|
@@ -611,7 +612,7 @@ class PrepAcceptTest < IOURingBaseTest
|
|
611
612
|
assert_equal id, c[:id]
|
612
613
|
assert_equal :accept, c[:op]
|
613
614
|
assert_equal STDIN.fileno, c[:fd]
|
614
|
-
assert_equal -Errno::ENOTSOCK::Errno, c[:result]
|
615
|
+
assert_equal (-Errno::ENOTSOCK::Errno), c[:result]
|
615
616
|
end
|
616
617
|
|
617
618
|
def test_prep_accept_multishot
|
@@ -622,7 +623,7 @@ class PrepAcceptTest < IOURingBaseTest
|
|
622
623
|
|
623
624
|
connect = -> {
|
624
625
|
tt << Thread.new do
|
625
|
-
|
626
|
+
TCPSocket.new('127.0.0.1', @port)
|
626
627
|
end
|
627
628
|
}
|
628
629
|
|
@@ -707,7 +708,6 @@ class PrepReadMultishotTest < IOURingBaseTest
|
|
707
708
|
def test_prep_read_multishot
|
708
709
|
r, w = IO.pipe
|
709
710
|
|
710
|
-
bb = []
|
711
711
|
bgid = ring.setup_buffer_ring(size: 4096, count: 1024)
|
712
712
|
assert_equal 0, bgid
|
713
713
|
|
@@ -717,6 +717,10 @@ class PrepReadMultishotTest < IOURingBaseTest
|
|
717
717
|
|
718
718
|
w << 'foo'
|
719
719
|
c = ring.wait_for_completion
|
720
|
+
|
721
|
+
# make sure the OS supports this op (the liburing docs are not clear)
|
722
|
+
skip if c[:result] == (-Errno::EINVAL::Errno)
|
723
|
+
|
720
724
|
assert_kind_of Hash, c
|
721
725
|
assert_equal id, c[:id]
|
722
726
|
assert_equal :read, c[:op]
|
@@ -747,13 +751,8 @@ class PrepReadMultishotTest < IOURingBaseTest
|
|
747
751
|
end
|
748
752
|
|
749
753
|
def test_prep_read_multishot_utf8
|
750
|
-
# checking for UTF-8 incurs a serious performance degradation. We'll leave
|
751
|
-
# it for later...
|
752
|
-
skip
|
753
|
-
|
754
754
|
r, w = IO.pipe
|
755
755
|
|
756
|
-
bb = []
|
757
756
|
bgid = ring.setup_buffer_ring(size: 4096, count: 1024)
|
758
757
|
assert_equal 0, bgid
|
759
758
|
|
@@ -763,6 +762,10 @@ class PrepReadMultishotTest < IOURingBaseTest
|
|
763
762
|
|
764
763
|
w << 'foo'
|
765
764
|
c = ring.wait_for_completion
|
765
|
+
|
766
|
+
# make sure the OS supports this op (the liburing docs are not clear)
|
767
|
+
skip if c[:result] == (-Errno::EINVAL::Errno)
|
768
|
+
|
766
769
|
assert_kind_of Hash, c
|
767
770
|
assert_equal id, c[:id]
|
768
771
|
assert_equal :read, c[:op]
|
@@ -792,3 +795,36 @@ class PrepReadMultishotTest < IOURingBaseTest
|
|
792
795
|
assert_nil ring.pending_ops[id]
|
793
796
|
end
|
794
797
|
end
|
798
|
+
|
799
|
+
class OpCtxTest < IOURingBaseTest
|
800
|
+
def test_ctx_spec
|
801
|
+
id = ring.emit(foo: :bar)
|
802
|
+
assert_equal({ foo: :bar, id: 1, op: :emit }, ring.pending_ops[id].spec)
|
803
|
+
end
|
804
|
+
|
805
|
+
def test_ctx_type
|
806
|
+
id = ring.emit(v: 1)
|
807
|
+
assert_equal 1, id
|
808
|
+
assert_equal :emit, ring.pending_ops[id].spec[:op]
|
809
|
+
|
810
|
+
id = ring.prep_timeout(interval: 1)
|
811
|
+
assert_equal 2, id
|
812
|
+
assert_equal :timeout, ring.pending_ops[id].spec[:op]
|
813
|
+
|
814
|
+
id = ring.prep_read(fd: STDIN.fileno, buffer: +'', len: 42)
|
815
|
+
assert_equal 3, id
|
816
|
+
assert_equal :read, ring.pending_ops[id].spec[:op]
|
817
|
+
|
818
|
+
id = ring.prep_write(fd: STDOUT.fileno, buffer: '')
|
819
|
+
assert_equal 4, id
|
820
|
+
assert_equal :write, ring.pending_ops[id].spec[:op]
|
821
|
+
|
822
|
+
id = ring.prep_accept(fd: STDIN.fileno)
|
823
|
+
assert_equal 5, id
|
824
|
+
assert_equal :accept, ring.pending_ops[id].spec[:op]
|
825
|
+
|
826
|
+
id = ring.prep_close(fd: STDIN.fileno)
|
827
|
+
assert_equal 6, id
|
828
|
+
assert_equal :close, ring.pending_ops[id].spec[:op]
|
829
|
+
end
|
830
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: iou
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: '0.
|
4
|
+
version: '0.2'
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sharon Rosner
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-09-
|
11
|
+
date: 2024-09-09 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake-compiler
|
@@ -75,8 +75,10 @@ extra_rdoc_files:
|
|
75
75
|
- README.md
|
76
76
|
files:
|
77
77
|
- ".github/dependabot.yml"
|
78
|
+
- ".github/workflows/test.yml"
|
78
79
|
- ".gitignore"
|
79
80
|
- ".gitmodules"
|
81
|
+
- CHANGELOG.md
|
80
82
|
- Gemfile
|
81
83
|
- LICENSE
|
82
84
|
- README.md
|
@@ -84,13 +86,15 @@ files:
|
|
84
86
|
- TODO.md
|
85
87
|
- examples/echo_server.rb
|
86
88
|
- examples/event_loop.rb
|
89
|
+
- examples/fibers.rb
|
87
90
|
- examples/http_server.rb
|
88
91
|
- examples/http_server_multishot.rb
|
92
|
+
- examples/http_server_simpler.rb
|
89
93
|
- ext/iou/extconf.rb
|
90
|
-
- ext/iou/iou.c
|
91
94
|
- ext/iou/iou.h
|
92
95
|
- ext/iou/iou_ext.c
|
93
|
-
- ext/iou/
|
96
|
+
- ext/iou/op_ctx.c
|
97
|
+
- ext/iou/ring.c
|
94
98
|
- iou.gemspec
|
95
99
|
- lib/iou.rb
|
96
100
|
- lib/iou/version.rb
|
data/ext/iou/op_spec_data.c
DELETED
@@ -1,61 +0,0 @@
|
|
1
|
-
#include "iou.h"
|
2
|
-
|
3
|
-
VALUE cOpSpecData;
|
4
|
-
|
5
|
-
static size_t OpSpecData_size(const void *ptr) {
|
6
|
-
return sizeof(OpSpecData_t);
|
7
|
-
}
|
8
|
-
|
9
|
-
static const rb_data_type_t OpSpecData_type = {
|
10
|
-
"OpSpecData",
|
11
|
-
{0, 0, OpSpecData_size, 0},
|
12
|
-
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
13
|
-
};
|
14
|
-
|
15
|
-
static VALUE OpSpecData_allocate(VALUE klass) {
|
16
|
-
OpSpecData_t *osd = ALLOC(OpSpecData_t);
|
17
|
-
|
18
|
-
return TypedData_Wrap_Struct(klass, &OpSpecData_type, osd);
|
19
|
-
}
|
20
|
-
|
21
|
-
VALUE OpSpecData_initialize(VALUE self) {
|
22
|
-
OpSpecData_t *osd = RTYPEDDATA_DATA(self);
|
23
|
-
memset(&osd->data, 0, sizeof(osd->data));
|
24
|
-
return self;
|
25
|
-
}
|
26
|
-
|
27
|
-
struct __kernel_timespec *OpSpecData_ts_get(VALUE self) {
|
28
|
-
OpSpecData_t *osd = RTYPEDDATA_DATA(self);
|
29
|
-
return &osd->data.ts;
|
30
|
-
}
|
31
|
-
|
32
|
-
inline struct __kernel_timespec double_to_timespec(double value) {
|
33
|
-
double integral;
|
34
|
-
double fraction = modf(value, &integral);
|
35
|
-
struct __kernel_timespec ts;
|
36
|
-
ts.tv_sec = integral;
|
37
|
-
ts.tv_nsec = floor(fraction * 1000000000);
|
38
|
-
return ts;
|
39
|
-
}
|
40
|
-
|
41
|
-
inline struct __kernel_timespec value_to_timespec(VALUE value) {
|
42
|
-
return double_to_timespec(NUM2DBL(value));
|
43
|
-
}
|
44
|
-
|
45
|
-
void OpSpecData_ts_set(VALUE self, VALUE value) {
|
46
|
-
OpSpecData_t *osd = RTYPEDDATA_DATA(self);
|
47
|
-
osd->data.ts = value_to_timespec(value);
|
48
|
-
}
|
49
|
-
|
50
|
-
struct sa_data *OpSpecData_sa_get(VALUE self) {
|
51
|
-
OpSpecData_t *osd = RTYPEDDATA_DATA(self);
|
52
|
-
return &osd->data.sa;
|
53
|
-
}
|
54
|
-
|
55
|
-
void Init_OpSpecData(void) {
|
56
|
-
mIOU = rb_define_module("IOU");
|
57
|
-
cOpSpecData = rb_define_class_under(mIOU, "OpSpecData", rb_cObject);
|
58
|
-
rb_define_alloc_func(cOpSpecData, OpSpecData_allocate);
|
59
|
-
|
60
|
-
rb_define_method(cOpSpecData, "initialize", OpSpecData_initialize, 0);
|
61
|
-
}
|