evt 0.3.1 → 0.3.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +12 -14
- data/evt.gemspec +1 -0
- data/ext/evt/epoll.h +2 -8
- data/ext/evt/evt.c +0 -1
- data/ext/evt/evt.h +0 -3
- data/ext/evt/uring.h +16 -26
- data/lib/evt/backends/bundled.rb +12 -8
- data/lib/evt/backends/epoll.rb +0 -4
- data/lib/evt/backends/iocp.rb +0 -4
- data/lib/evt/backends/kqueue.rb +0 -4
- data/lib/evt/backends/select.rb +3 -4
- data/lib/evt/backends/uring.rb +1 -5
- data/lib/evt/version.rb +1 -1
- metadata +16 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 945c1a802be8b4e03b88c44138a139367288409b27c83a6f2904d0ecc94d2a40
|
4
|
+
data.tar.gz: 7b88e16f4a082f536c5b0eb9a7f68e67e6e1f5ab4586dbe771f7c57b9004447a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f042da98ff5f85de3b4fce59a2726f2f4c46a1da8817abb524d700cd0413fc4118cb6c0a766a69abdc7e4ae5f0acdd35bc653b0c33116239c1e776f4c3218f23
|
7
|
+
data.tar.gz: 1824a4935d712e62268b9cad01f52c445ccecfcaa859b11fd93749f28ef3f259370b011af395ae269c9c03544b7ab5c1bb6ad3afd18cc8eea4a6d576e151ff76
|
data/README.md
CHANGED
@@ -15,13 +15,13 @@ The Event Library that designed for Ruby 3.0 Fiber Scheduler.
|
|
15
15
|
|
16
16
|
| | Linux | Windows | macOS | FreeBSD |
|
17
17
|
| --------------- | ----------- | ------------| ----------- | ----------- |
|
18
|
-
| io_uring |
|
18
|
+
| io_uring | ⚠️ (See 1) | ❌ | ❌ | ❌ |
|
19
19
|
| epoll | ✅ (See 2) | ❌ | ❌ | ❌ |
|
20
20
|
| kqueue | ❌ | ❌ | ✅ (⚠️ See 5) | ✅ |
|
21
21
|
| IOCP | ❌ | ❌ (⚠️See 3) | ❌ | ❌ |
|
22
22
|
| Ruby (`IO.select`) | ✅ Fallback | ✅ (⚠️See 4) | ✅ Fallback | ✅ Fallback |
|
23
23
|
|
24
|
-
1. when liburing is installed
|
24
|
+
1. when liburing is installed. (Currently fixing)
|
25
25
|
2. when kernel version >= 2.6.9
|
26
26
|
3. WOULD NOT WORK until `FILE_FLAG_OVERLAPPED` is included in I/O initialization process.
|
27
27
|
4. Some I/Os are not able to be nonblock under Windows. See [Scheduler Docs](https://docs.ruby-lang.org/en/master/doc/scheduler_md.html#label-IO).
|
@@ -29,22 +29,20 @@ The Event Library that designed for Ruby 3.0 Fiber Scheduler.
|
|
29
29
|
|
30
30
|
### Benchmark
|
31
31
|
|
32
|
-
The benchmark is running under `v0.
|
32
|
+
The benchmark is running under `v0.3.1` version. See `example.rb` in [midori](https://github.com/midori-rb/midori.rb) for test code, the test is running under a single-thread server.
|
33
33
|
|
34
34
|
The test command is `wrk -t4 -c8192 -d30s http://localhost:3001`.
|
35
35
|
|
36
36
|
All of the systems have set their file descriptor limit to maximum.
|
37
|
-
|
38
|
-
|
39
|
-
|
|
40
|
-
|
|
41
|
-
| Linux | Ryzen 2700x | 64GB |
|
42
|
-
| Linux | Ryzen 2700x | 64GB |
|
43
|
-
|
|
44
|
-
| macOS | i7-6820HQ | 16GB |
|
45
|
-
|
46
|
-
The benchmark uses an invalid parser, and `wrk` is very error-sensitive. The benchmark can't close the connection properly.
|
47
|
-
Use a valid parser, recent updates to my [midori](https://github.com/midori-rb/midori.rb) is able to use Ruby scheduler, which could achives 247k+ req/s on single thread with `kqueue` and 647k+ req/s with `epoll`.
|
37
|
+
On systems raising "Fiber unable to allocate memory", `sudo sysctl -w vm.max_map_count=1000000` is set.
|
38
|
+
|
39
|
+
| OS | CPU | Memory | Backend | req/s |
|
40
|
+
| ----- | ----------- | ------ | ---------------------- | --------------|
|
41
|
+
| Linux | Ryzen 2700x | 64GB | epoll | 1853259.47 |
|
42
|
+
| Linux | Ryzen 2700x | 64GB | io_uring | require fixes |
|
43
|
+
| Linux | Ryzen 2700x | 64GB | IO.select (using poll) | 1636849.15 |
|
44
|
+
| macOS | i7-6820HQ | 16GB | kqueue | 247370.37 |
|
45
|
+
| macOS | i7-6820HQ | 16GB | IO.select (using poll) | 323391.38 |
|
48
46
|
|
49
47
|
## Install
|
50
48
|
|
data/evt.gemspec
CHANGED
data/ext/evt/epoll.h
CHANGED
@@ -25,20 +25,14 @@ VALUE method_scheduler_epoll_register(VALUE self, VALUE io, VALUE interest) {
|
|
25
25
|
event.events |= EPOLLOUT;
|
26
26
|
}
|
27
27
|
|
28
|
+
event.events |=EPOLLONESHOT;
|
29
|
+
|
28
30
|
event.data.ptr = (void*) io;
|
29
31
|
|
30
32
|
epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event);
|
31
33
|
return Qnil;
|
32
34
|
}
|
33
35
|
|
34
|
-
VALUE method_scheduler_epoll_deregister(VALUE self, VALUE io) {
|
35
|
-
ID id_fileno = rb_intern("fileno");
|
36
|
-
int epfd = NUM2INT(rb_iv_get(self, "@epfd"));
|
37
|
-
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
38
|
-
epoll_ctl(epfd, EPOLL_CTL_DEL, fd, NULL); // Require Linux 2.6.9 for NULL event.
|
39
|
-
return Qnil;
|
40
|
-
}
|
41
|
-
|
42
36
|
VALUE method_scheduler_epoll_wait(VALUE self) {
|
43
37
|
int n, epfd, i, event_flag, timeout;
|
44
38
|
VALUE next_timeout, obj_io, readables, writables, result;
|
data/ext/evt/evt.c
CHANGED
@@ -24,7 +24,6 @@ void Init_evt_ext()
|
|
24
24
|
rb_define_singleton_method(Bundled, "epoll_backend", method_scheduler_epoll_backend, 0);
|
25
25
|
rb_define_method(Bundled, "epoll_init_selector", method_scheduler_epoll_init, 0);
|
26
26
|
rb_define_method(Bundled, "epoll_register", method_scheduler_epoll_register, 2);
|
27
|
-
rb_define_method(Bundled, "epoll_deregister", method_scheduler_epoll_deregister, 1);
|
28
27
|
rb_define_method(Bundled, "epoll_wait", method_scheduler_epoll_wait, 0);
|
29
28
|
#endif
|
30
29
|
#if HAVE_SYS_EVENT_H
|
data/ext/evt/evt.h
CHANGED
@@ -13,7 +13,6 @@ void Init_evt_ext();
|
|
13
13
|
#if HAVE_LIBURING_H
|
14
14
|
VALUE method_scheduler_uring_init(VALUE self);
|
15
15
|
VALUE method_scheduler_uring_register(VALUE self, VALUE io, VALUE interest);
|
16
|
-
VALUE method_scheduler_uring_deregister(VALUE self, VALUE io);
|
17
16
|
VALUE method_scheduler_uring_wait(VALUE self);
|
18
17
|
VALUE method_scheduler_uring_backend(VALUE klass);
|
19
18
|
VALUE method_scheduler_uring_io_read(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length);
|
@@ -46,7 +45,6 @@ void Init_evt_ext();
|
|
46
45
|
#if HAVE_SYS_EPOLL_H
|
47
46
|
VALUE method_scheduler_epoll_init(VALUE self);
|
48
47
|
VALUE method_scheduler_epoll_register(VALUE self, VALUE io, VALUE interest);
|
49
|
-
VALUE method_scheduler_epoll_deregister(VALUE self, VALUE io);
|
50
48
|
VALUE method_scheduler_epoll_wait(VALUE self);
|
51
49
|
VALUE method_scheduler_epoll_backend(VALUE klass);
|
52
50
|
#include <sys/epoll.h>
|
@@ -55,7 +53,6 @@ void Init_evt_ext();
|
|
55
53
|
#if HAVE_SYS_EVENT_H
|
56
54
|
VALUE method_scheduler_kqueue_init(VALUE self);
|
57
55
|
VALUE method_scheduler_kqueue_register(VALUE self, VALUE io, VALUE interest);
|
58
|
-
VALUE method_scheduler_kqueue_deregister(VALUE self, VALUE io);
|
59
56
|
VALUE method_scheduler_kqueue_wait(VALUE self);
|
60
57
|
VALUE method_scheduler_kqueue_backend(VALUE klass);
|
61
58
|
#include <sys/event.h>
|
data/ext/evt/uring.h
CHANGED
@@ -79,6 +79,12 @@ VALUE method_scheduler_uring_wait(VALUE self) {
|
|
79
79
|
iovs = rb_ary_new();
|
80
80
|
|
81
81
|
TypedData_Get_Struct(rb_iv_get(self, "@ring"), struct io_uring, &type_uring_payload, ring);
|
82
|
+
|
83
|
+
struct __kernel_timespec ts;
|
84
|
+
ts.tv_sec = NUM2INT(next_timeout);
|
85
|
+
ts.tv_nsec = 0;
|
86
|
+
|
87
|
+
io_uring_wait_cqe_timeout(ring, cqes, &ts);
|
82
88
|
ret = io_uring_peek_batch_cqe(ring, cqes, URING_MAX_EVENTS);
|
83
89
|
|
84
90
|
for (i = 0; i < ret; i++) {
|
@@ -94,21 +100,14 @@ VALUE method_scheduler_uring_wait(VALUE self) {
|
|
94
100
|
rb_funcall(writables, id_push, 1, obj_io);
|
95
101
|
}
|
96
102
|
} else {
|
97
|
-
|
103
|
+
VALUE v = rb_ary_new2(2);
|
104
|
+
rb_ary_store(v, 0, obj_io);
|
105
|
+
rb_ary_store(v, 1, obj_io);
|
106
|
+
rb_funcall(iovs, id_push, 1, SIZET2NUM(cqes[i]->res));
|
98
107
|
}
|
99
108
|
io_uring_cqe_seen(ring, cqes[i]);
|
100
109
|
}
|
101
110
|
|
102
|
-
if (ret == 0) {
|
103
|
-
if (next_timeout != Qnil && NUM2INT(next_timeout) != -1) {
|
104
|
-
// sleep
|
105
|
-
time = next_timeout / 1000;
|
106
|
-
rb_funcall(rb_mKernel, id_sleep, 1, rb_float_new(time));
|
107
|
-
} else {
|
108
|
-
rb_funcall(rb_mKernel, id_sleep, 1, rb_float_new(0.001)); // To avoid infinite loop
|
109
|
-
}
|
110
|
-
}
|
111
|
-
|
112
111
|
result = rb_ary_new2(3);
|
113
112
|
rb_ary_store(result, 0, readables);
|
114
113
|
rb_ary_store(result, 1, writables);
|
@@ -132,27 +131,23 @@ VALUE method_scheduler_uring_io_read(VALUE self, VALUE io, VALUE buffer, VALUE o
|
|
132
131
|
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
133
132
|
|
134
133
|
read_buffer = (char*) xmalloc(NUM2SIZET(length));
|
135
|
-
struct iovec iov = {
|
136
|
-
.iov_base = read_buffer,
|
137
|
-
.iov_len = NUM2SIZET(length),
|
138
|
-
};
|
139
134
|
|
140
135
|
data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
|
141
136
|
data->is_poll = false;
|
142
137
|
data->io = io;
|
143
138
|
data->poll_mask = 0;
|
144
139
|
|
145
|
-
|
140
|
+
io_uring_prep_read(sqe, fd, read_buffer, 1, NUM2SIZET(length), NUM2SIZET(offset));
|
146
141
|
io_uring_sqe_set_data(sqe, data);
|
147
142
|
io_uring_submit(ring);
|
148
143
|
|
144
|
+
VALUE ret = rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
|
145
|
+
|
149
146
|
VALUE result = rb_str_new(read_buffer, strlen(read_buffer));
|
150
147
|
if (buffer != Qnil) {
|
151
148
|
rb_str_append(buffer, result);
|
152
149
|
}
|
153
|
-
|
154
|
-
rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
|
155
|
-
return result;
|
150
|
+
return ret;
|
156
151
|
}
|
157
152
|
|
158
153
|
VALUE method_scheduler_uring_io_write(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
|
@@ -170,21 +165,16 @@ VALUE method_scheduler_uring_io_write(VALUE self, VALUE io, VALUE buffer, VALUE
|
|
170
165
|
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
171
166
|
|
172
167
|
write_buffer = StringValueCStr(buffer);
|
173
|
-
struct iovec iov = {
|
174
|
-
.iov_base = write_buffer,
|
175
|
-
.iov_len = NUM2SIZET(length),
|
176
|
-
};
|
177
168
|
|
178
169
|
data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
|
179
170
|
data->is_poll = false;
|
180
171
|
data->io = io;
|
181
172
|
data->poll_mask = 0;
|
182
173
|
|
183
|
-
|
174
|
+
io_uring_prep_write(sqe, fd, write_buffer, NUM2SIZET(length), NUM2SIZET(offset));
|
184
175
|
io_uring_sqe_set_data(sqe, data);
|
185
176
|
io_uring_submit(ring);
|
186
|
-
rb_funcall(Fiber, rb_intern("yield"), 0);
|
187
|
-
return length;
|
177
|
+
return rb_funcall(Fiber, rb_intern("yield"), 0);
|
188
178
|
}
|
189
179
|
|
190
180
|
VALUE method_scheduler_uring_backend(VALUE klass) {
|
data/lib/evt/backends/bundled.rb
CHANGED
@@ -23,7 +23,7 @@ class Evt::Bundled
|
|
23
23
|
attr_reader :waiting
|
24
24
|
|
25
25
|
def next_timeout
|
26
|
-
_fiber, timeout = @waiting.min_by{|key, value| value}
|
26
|
+
_fiber, timeout = @waiting.min_by{ |key, value| value }
|
27
27
|
|
28
28
|
if timeout
|
29
29
|
offset = timeout - current_time
|
@@ -49,9 +49,10 @@ class Evt::Bundled
|
|
49
49
|
end
|
50
50
|
|
51
51
|
unless iovs.nil?
|
52
|
-
iovs&.each do |
|
52
|
+
iovs&.each do |v|
|
53
|
+
io, ret = v
|
53
54
|
fiber = @iovs.delete(io)
|
54
|
-
fiber&.resume
|
55
|
+
fiber&.resume(ret)
|
55
56
|
end
|
56
57
|
end
|
57
58
|
|
@@ -63,7 +64,7 @@ class Evt::Bundled
|
|
63
64
|
|
64
65
|
waiting.each do |fiber, timeout|
|
65
66
|
if timeout <= time
|
66
|
-
fiber.resume
|
67
|
+
fiber.resume if fiber.is_a? Fiber and fiber.alive?
|
67
68
|
else
|
68
69
|
@waiting[fiber] = timeout
|
69
70
|
end
|
@@ -78,7 +79,7 @@ class Evt::Bundled
|
|
78
79
|
end
|
79
80
|
|
80
81
|
ready.each do |fiber|
|
81
|
-
fiber.resume
|
82
|
+
fiber.resume if fiber.is_a? Fiber and fiber.alive?
|
82
83
|
end
|
83
84
|
end
|
84
85
|
end
|
@@ -100,7 +101,6 @@ class Evt::Bundled
|
|
100
101
|
@writable[io] = Fiber.current unless (events & IO::WRITABLE).zero?
|
101
102
|
self.register(io, events)
|
102
103
|
Fiber.yield
|
103
|
-
self.deregister(io)
|
104
104
|
true
|
105
105
|
end
|
106
106
|
|
@@ -150,8 +150,8 @@ class Evt::Bundled
|
|
150
150
|
end
|
151
151
|
|
152
152
|
# Collect closed streams in readables and writables
|
153
|
-
def collect
|
154
|
-
if @collect_counter < COLLECT_COUNTER_MAX
|
153
|
+
def collect(force=false)
|
154
|
+
if @collect_counter < COLLECT_COUNTER_MAX and !force
|
155
155
|
@collect_counter += 1
|
156
156
|
return
|
157
157
|
end
|
@@ -165,6 +165,10 @@ class Evt::Bundled
|
|
165
165
|
@writable.keys.each do |io|
|
166
166
|
@writable.delete(io) if io.closed?
|
167
167
|
end
|
168
|
+
|
169
|
+
@iovs.keys.each do |io|
|
170
|
+
@iovs.delete(io) if io.closed?
|
171
|
+
end
|
168
172
|
end
|
169
173
|
|
170
174
|
# Intercept the creation of a non-blocking fiber.
|
data/lib/evt/backends/epoll.rb
CHANGED
data/lib/evt/backends/iocp.rb
CHANGED
data/lib/evt/backends/kqueue.rb
CHANGED
data/lib/evt/backends/select.rb
CHANGED
data/lib/evt/backends/uring.rb
CHANGED
@@ -14,11 +14,7 @@ class Evt::Uring < Evt::Bundled
|
|
14
14
|
end
|
15
15
|
|
16
16
|
def register(io, interest)
|
17
|
-
uring_register(io,
|
18
|
-
end
|
19
|
-
|
20
|
-
def deregister(io)
|
21
|
-
# io_uring running under one-shot mode, no need to deregister
|
17
|
+
uring_register(io, interest)
|
22
18
|
end
|
23
19
|
|
24
20
|
def io_read(io, buffer, offset, length)
|
data/lib/evt/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: evt
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.3.
|
4
|
+
version: 0.3.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Delton Ding
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-12-
|
11
|
+
date: 2020-12-23 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake-compiler
|
@@ -38,6 +38,20 @@ dependencies:
|
|
38
38
|
- - "~>"
|
39
39
|
- !ruby/object:Gem::Version
|
40
40
|
version: 0.20.0
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: minitest-reporters
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '1.4'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '1.4'
|
41
55
|
description: A low-level Event Handler designed for Ruby 3 Scheduler for better performance
|
42
56
|
email:
|
43
57
|
- dsh0416@gmail.com
|