evt 0.1.0 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +51 -0
- data/.gitignore +4 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +7 -0
- data/README.md +51 -20
- data/Rakefile +16 -0
- data/evt.gemspec +27 -0
- data/ext/evt/epoll.h +91 -0
- data/ext/evt/evt.c +34 -0
- data/ext/evt/evt.h +82 -0
- data/ext/evt/extconf.rb +12 -0
- data/ext/evt/iocp.h +126 -0
- data/ext/evt/kqueue.h +97 -0
- data/ext/evt/select.h +36 -0
- data/ext/evt/uring.h +201 -0
- data/lib/evt.rb +8 -0
- data/lib/evt/scheduler.rb +121 -0
- data/lib/evt/version.rb +5 -0
- metadata +40 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 792dc614f6216687c600f101b8fac515a71fc0249c4ca6af3f880cb71ee17eff
|
4
|
+
data.tar.gz: c64b75645bd2adb22a3947c7b239c9cde05a8fb1ac42a6e08f2cfe653bc20a84
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: '016090b4ba576eb10896099d3879445d3c65d3b8f6d9475731786639282c5e7f37ba1029e6d1201b165f9bfbf20193fb13017b5ea6af90792b60989f33691899'
|
7
|
+
data.tar.gz: 83d15ffca0e28b5c96f241197bbd97eabe1a935e01f71c6e4bb94a2eb9ad413148b2318d1b55a4df4b11444e989508b32cd7da9f1b19b839512d13b92b7034b9
|
@@ -0,0 +1,51 @@
|
|
1
|
+
name: CI Tests
|
2
|
+
on:
|
3
|
+
pull_request:
|
4
|
+
push:
|
5
|
+
branches:
|
6
|
+
- master
|
7
|
+
schedule:
|
8
|
+
- cron: '0 7 * * SUN'
|
9
|
+
jobs:
|
10
|
+
test:
|
11
|
+
strategy:
|
12
|
+
fail-fast: false
|
13
|
+
matrix:
|
14
|
+
include:
|
15
|
+
- { os: ubuntu-20.04, ruby: '3.0' }
|
16
|
+
- { os: ubuntu-20.04, ruby: ruby-head }
|
17
|
+
- { os: macos-11.0, ruby: '3.0' }
|
18
|
+
- { os: macos-11.0, ruby: ruby-head }
|
19
|
+
- { os: windows-2019, ruby: mingw }
|
20
|
+
- { os: windows-2019, ruby: mswin }
|
21
|
+
name: ${{ matrix.os }} ${{ matrix.ruby }}
|
22
|
+
runs-on: ${{ matrix.os }}
|
23
|
+
timeout-minutes: 5
|
24
|
+
steps:
|
25
|
+
- uses: actions/checkout@v2
|
26
|
+
- uses: ruby/setup-ruby@master
|
27
|
+
with:
|
28
|
+
ruby-version: ${{ matrix.ruby }}
|
29
|
+
bundler-cache: false
|
30
|
+
- name: Install Dependencies
|
31
|
+
run: |
|
32
|
+
gem install bundler
|
33
|
+
bundle install --jobs 4 --retry 3
|
34
|
+
- name: Compile
|
35
|
+
run: rake compile
|
36
|
+
- name: Test
|
37
|
+
run: rake
|
38
|
+
build:
|
39
|
+
runs-on: ubuntu-20.04
|
40
|
+
steps:
|
41
|
+
- uses: actions/checkout@v2
|
42
|
+
- uses: ruby/setup-ruby@master
|
43
|
+
with:
|
44
|
+
ruby-version: '3.0'
|
45
|
+
bundler-cache: false
|
46
|
+
- name: Install Dependencies
|
47
|
+
run: |
|
48
|
+
gem install bundler
|
49
|
+
bundle install --jobs 4 --retry 3
|
50
|
+
- name: Build
|
51
|
+
run: gem build evt.gemspec
|
data/.gitignore
CHANGED
data/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,74 @@
|
|
1
|
+
# Contributor Covenant Code of Conduct
|
2
|
+
|
3
|
+
## Our Pledge
|
4
|
+
|
5
|
+
In the interest of fostering an open and welcoming environment, we as
|
6
|
+
contributors and maintainers pledge to making participation in our project and
|
7
|
+
our community a harassment-free experience for everyone, regardless of age, body
|
8
|
+
size, disability, ethnicity, gender identity and expression, level of experience,
|
9
|
+
nationality, personal appearance, race, religion, or sexual identity and
|
10
|
+
orientation.
|
11
|
+
|
12
|
+
## Our Standards
|
13
|
+
|
14
|
+
Examples of behavior that contributes to creating a positive environment
|
15
|
+
include:
|
16
|
+
|
17
|
+
* Using welcoming and inclusive language
|
18
|
+
* Being respectful of differing viewpoints and experiences
|
19
|
+
* Gracefully accepting constructive criticism
|
20
|
+
* Focusing on what is best for the community
|
21
|
+
* Showing empathy towards other community members
|
22
|
+
|
23
|
+
Examples of unacceptable behavior by participants include:
|
24
|
+
|
25
|
+
* The use of sexualized language or imagery and unwelcome sexual attention or
|
26
|
+
advances
|
27
|
+
* Trolling, insulting/derogatory comments, and personal or political attacks
|
28
|
+
* Public or private harassment
|
29
|
+
* Publishing others' private information, such as a physical or electronic
|
30
|
+
address, without explicit permission
|
31
|
+
* Other conduct which could reasonably be considered inappropriate in a
|
32
|
+
professional setting
|
33
|
+
|
34
|
+
## Our Responsibilities
|
35
|
+
|
36
|
+
Project maintainers are responsible for clarifying the standards of acceptable
|
37
|
+
behavior and are expected to take appropriate and fair corrective action in
|
38
|
+
response to any instances of unacceptable behavior.
|
39
|
+
|
40
|
+
Project maintainers have the right and responsibility to remove, edit, or
|
41
|
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
42
|
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
43
|
+
permanently any contributor for other behaviors that they deem inappropriate,
|
44
|
+
threatening, offensive, or harmful.
|
45
|
+
|
46
|
+
## Scope
|
47
|
+
|
48
|
+
This Code of Conduct applies both within project spaces and in public spaces
|
49
|
+
when an individual is representing the project or its community. Examples of
|
50
|
+
representing a project or community include using an official project e-mail
|
51
|
+
address, posting via an official social media account, or acting as an appointed
|
52
|
+
representative at an online or offline event. Representation of a project may be
|
53
|
+
further defined and clarified by project maintainers.
|
54
|
+
|
55
|
+
## Enforcement
|
56
|
+
|
57
|
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
58
|
+
reported by contacting the project team at dsh0416@gmail.com. All
|
59
|
+
complaints will be reviewed and investigated and will result in a response that
|
60
|
+
is deemed necessary and appropriate to the circumstances. The project team is
|
61
|
+
obligated to maintain confidentiality with regard to the reporter of an incident.
|
62
|
+
Further details of specific enforcement policies may be posted separately.
|
63
|
+
|
64
|
+
Project maintainers who do not follow or enforce the Code of Conduct in good
|
65
|
+
faith may face temporary or permanent repercussions as determined by other
|
66
|
+
members of the project's leadership.
|
67
|
+
|
68
|
+
## Attribution
|
69
|
+
|
70
|
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
71
|
+
available at [https://contributor-covenant.org/version/1/4][version]
|
72
|
+
|
73
|
+
[homepage]: https://contributor-covenant.org
|
74
|
+
[version]: https://contributor-covenant.org/version/1/4/
|
data/Gemfile
ADDED
data/README.md
CHANGED
@@ -1,40 +1,71 @@
|
|
1
1
|
# Evt
|
2
2
|
|
3
|
-
|
3
|
+
The Event Library that designed for Ruby 3.0.
|
4
4
|
|
5
|
-
|
5
|
+
**This gem is still under development, APIs and features are not stable. Advices and PRs are highly welcome.**
|
6
6
|
|
7
|
-
|
7
|
+
![CI Tests](https://github.com/dsh0416/evt/workflows/CI%20Tests/badge.svg)
|
8
8
|
|
9
|
-
|
9
|
+
## Features
|
10
10
|
|
11
|
-
```ruby
|
12
|
-
gem 'evt'
|
13
|
-
```
|
14
11
|
|
15
|
-
And then execute:
|
16
12
|
|
17
|
-
|
13
|
+
### IO Backend Support
|
18
14
|
|
19
|
-
|
15
|
+
| | Linux | Windows | macOS | FreeBSD |
|
16
|
+
| --------------- | ----------- | ------------| ----------- | ----------- |
|
17
|
+
| io_uring | ✅ (See 1) | ❌ | ❌ | ❌ |
|
18
|
+
| epoll | ✅ (See 2) | ❌ | ❌ | ❌ |
|
19
|
+
| kqueue | ❌ | ❌ | ✅ (⚠️ See 5) | ✅ |
|
20
|
+
| IOCP | ❌ | ❌ (⚠️See 3) | ❌ | ❌ |
|
21
|
+
| Ruby (`select`) | ✅ Fallback | ✅ (⚠️See 4) | ✅ Fallback | ✅ Fallback |
|
20
22
|
|
21
|
-
|
23
|
+
1. when liburing is installed
|
24
|
+
2. when kernel version >= 2.6.8
|
25
|
+
3. WOULD NOT WORK until `FILE_FLAG_OVERLAPPED` is included in I/O initialization process.
|
26
|
+
4. Some I/Os are not able to be nonblock under Windows. See [Scheduler Docs](https://docs.ruby-lang.org/en/master/doc/scheduler_md.html#label-IO).
|
27
|
+
5. `kqueue` performance in Darwin is very poor. **MAY BE DISABLED IN THE FUTURE.**
|
22
28
|
|
23
|
-
##
|
29
|
+
## Install
|
30
|
+
|
31
|
+
```bash
|
32
|
+
gem install evt
|
33
|
+
```
|
24
34
|
|
25
|
-
|
35
|
+
## Usage
|
26
36
|
|
27
|
-
|
37
|
+
```ruby
|
38
|
+
require 'evt'
|
28
39
|
|
29
|
-
|
40
|
+
rd, wr = IO.pipe
|
41
|
+
scheduler = Evt::Scheduler.new
|
30
42
|
|
31
|
-
|
43
|
+
Fiber.set_scheduler scheduler
|
32
44
|
|
33
|
-
|
45
|
+
Fiber.schedule do
|
46
|
+
message = rd.read(20)
|
47
|
+
puts message
|
48
|
+
rd.close
|
49
|
+
end
|
34
50
|
|
35
|
-
|
51
|
+
Fiber.schedule do
|
52
|
+
wr.write("Hello World")
|
53
|
+
wr.close
|
54
|
+
end
|
36
55
|
|
56
|
+
scheduler.run
|
37
57
|
|
38
|
-
|
58
|
+
# "Hello World"
|
59
|
+
```
|
39
60
|
|
40
|
-
|
61
|
+
## Roadmap
|
62
|
+
|
63
|
+
- [x] Support epoll/kqueue/select
|
64
|
+
- [x] Upgrade to the latest Scheduler API
|
65
|
+
- [x] Support io_uring
|
66
|
+
- [x] Support iov features of io_uring
|
67
|
+
- [x] Support IOCP (**NOT ENABLED YET**)
|
68
|
+
- [x] Setup tests with Ruby 3
|
69
|
+
- [ ] Support IOCP with iov features
|
70
|
+
- [ ] Setup more tests for production purpose
|
71
|
+
- [ ] Documentation for usages
|
data/Rakefile
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
require "bundler/gem_tasks"
|
2
|
+
require "rake/testtask"
|
3
|
+
require 'rake/extensiontask'
|
4
|
+
|
5
|
+
spec = Gem::Specification.load('evt.gemspec')
|
6
|
+
Rake::ExtensionTask.new('evt_ext', spec) do |ext|
|
7
|
+
ext.ext_dir = "ext/evt"
|
8
|
+
end
|
9
|
+
|
10
|
+
Rake::TestTask.new(:test) do |t|
|
11
|
+
t.libs << "test"
|
12
|
+
t.libs << "lib"
|
13
|
+
t.test_files = FileList["test/**/*_test.rb"]
|
14
|
+
end
|
15
|
+
|
16
|
+
task :default => :test
|
data/evt.gemspec
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
require_relative 'lib/evt/version'
|
2
|
+
|
3
|
+
Gem::Specification.new do |spec|
|
4
|
+
spec.name = "evt"
|
5
|
+
spec.version = Evt::VERSION
|
6
|
+
spec.authors = ["Delton Ding"]
|
7
|
+
spec.email = ["dsh0416@gmail.com"]
|
8
|
+
|
9
|
+
spec.summary = "A low-level Event Handler designed for Ruby 3 Scheduler"
|
10
|
+
spec.description = "A low-level Event Handler designed for Ruby 3 Scheduler for better performance"
|
11
|
+
spec.homepage = "https://github.com/dsh0416/evt"
|
12
|
+
spec.license = 'BSD-3-Clause'
|
13
|
+
spec.required_ruby_version = '>= 2.8.0.dev'
|
14
|
+
|
15
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
16
|
+
spec.metadata["source_code_uri"] = "https://github.com/dsh0416/evt"
|
17
|
+
|
18
|
+
# Specify which files should be added to the gem when it is released.
|
19
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
20
|
+
spec.files = Dir.chdir(File.expand_path('..', __FILE__)) do
|
21
|
+
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features|.vscode)/}) }
|
22
|
+
end
|
23
|
+
spec.require_paths = ["lib"]
|
24
|
+
spec.extensions = ['ext/evt/extconf.rb']
|
25
|
+
|
26
|
+
spec.add_development_dependency 'rake-compiler', '~> 1.0'
|
27
|
+
end
|
data/ext/evt/epoll.h
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
#ifndef EPOLL_H
|
2
|
+
#define EPOLL_G
|
3
|
+
#include "evt.h"
|
4
|
+
|
5
|
+
#if HAVE_SYS_EPOLL_H
|
6
|
+
VALUE method_scheduler_init(VALUE self) {
|
7
|
+
rb_iv_set(self, "@epfd", INT2NUM(epoll_create(1))); // Size of epoll is ignored after Linux 2.6.8.
|
8
|
+
return Qnil;
|
9
|
+
}
|
10
|
+
|
11
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
|
12
|
+
struct epoll_event event;
|
13
|
+
ID id_fileno = rb_intern("fileno");
|
14
|
+
int epfd = NUM2INT(rb_iv_get(self, "@epfd"));
|
15
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
16
|
+
int ruby_interest = NUM2INT(interest);
|
17
|
+
int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
18
|
+
int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
19
|
+
|
20
|
+
if (ruby_interest & readable) {
|
21
|
+
event.events |= EPOLLIN;
|
22
|
+
}
|
23
|
+
|
24
|
+
if (ruby_interest & writable) {
|
25
|
+
event.events |= EPOLLOUT;
|
26
|
+
}
|
27
|
+
|
28
|
+
event.data.ptr = (void*) io;
|
29
|
+
|
30
|
+
epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event);
|
31
|
+
return Qnil;
|
32
|
+
}
|
33
|
+
|
34
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io) {
|
35
|
+
ID id_fileno = rb_intern("fileno");
|
36
|
+
int epfd = NUM2INT(rb_iv_get(self, "@epfd"));
|
37
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
38
|
+
epoll_ctl(epfd, EPOLL_CTL_DEL, fd, NULL); // Require Linux 2.6.9 for NULL event.
|
39
|
+
return Qnil;
|
40
|
+
}
|
41
|
+
|
42
|
+
VALUE method_scheduler_wait(VALUE self) {
|
43
|
+
int n, epfd, i, event_flag, timeout;
|
44
|
+
VALUE next_timeout, obj_io, readables, writables, result;
|
45
|
+
ID id_next_timeout = rb_intern("next_timeout");
|
46
|
+
ID id_push = rb_intern("push");
|
47
|
+
|
48
|
+
epfd = NUM2INT(rb_iv_get(self, "@epfd"));
|
49
|
+
next_timeout = rb_funcall(self, id_next_timeout, 0);
|
50
|
+
readables = rb_ary_new();
|
51
|
+
writables = rb_ary_new();
|
52
|
+
|
53
|
+
if (next_timeout == Qnil) {
|
54
|
+
timeout = -1;
|
55
|
+
} else {
|
56
|
+
timeout = NUM2INT(next_timeout);
|
57
|
+
}
|
58
|
+
|
59
|
+
struct epoll_event* events = (struct epoll_event*) xmalloc(sizeof(struct epoll_event) * EPOLL_MAX_EVENTS);
|
60
|
+
|
61
|
+
n = epoll_wait(epfd, events, EPOLL_MAX_EVENTS, timeout);
|
62
|
+
if (n < 0) {
|
63
|
+
rb_raise(rb_eIOError, "unable to call epoll_wait");
|
64
|
+
}
|
65
|
+
|
66
|
+
for (i = 0; i < n; i++) {
|
67
|
+
event_flag = events[i].events;
|
68
|
+
if (event_flag & EPOLLIN) {
|
69
|
+
obj_io = (VALUE) events[i].data.ptr;
|
70
|
+
rb_funcall(readables, id_push, 1, obj_io);
|
71
|
+
}
|
72
|
+
|
73
|
+
if (event_flag & EPOLLOUT) {
|
74
|
+
obj_io = (VALUE) events[i].data.ptr;
|
75
|
+
rb_funcall(writables, id_push, 1, obj_io);
|
76
|
+
}
|
77
|
+
}
|
78
|
+
|
79
|
+
result = rb_ary_new2(2);
|
80
|
+
rb_ary_store(result, 0, readables);
|
81
|
+
rb_ary_store(result, 1, writables);
|
82
|
+
|
83
|
+
xfree(events);
|
84
|
+
return result;
|
85
|
+
}
|
86
|
+
|
87
|
+
VALUE method_scheduler_backend(VALUE klass) {
|
88
|
+
return rb_str_new_cstr("epoll");
|
89
|
+
}
|
90
|
+
#endif
|
91
|
+
#endif
|
data/ext/evt/evt.c
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
#ifndef EVT_C
|
2
|
+
#define EVT_C
|
3
|
+
|
4
|
+
#include "evt.h"
|
5
|
+
|
6
|
+
void Init_evt_ext()
|
7
|
+
{
|
8
|
+
Evt = rb_define_module("Evt");
|
9
|
+
Scheduler = rb_define_class_under(Evt, "Scheduler", rb_cObject);
|
10
|
+
Payload = rb_define_class_under(Scheduler, "Payload", rb_cObject);
|
11
|
+
Fiber = rb_define_class("Fiber", rb_cObject);
|
12
|
+
rb_define_singleton_method(Scheduler, "backend", method_scheduler_backend, 0);
|
13
|
+
rb_define_method(Scheduler, "init_selector", method_scheduler_init, 0);
|
14
|
+
rb_define_method(Scheduler, "register", method_scheduler_register, 2);
|
15
|
+
rb_define_method(Scheduler, "deregister", method_scheduler_deregister, 1);
|
16
|
+
rb_define_method(Scheduler, "wait", method_scheduler_wait, 0);
|
17
|
+
|
18
|
+
#if HAVELIBURING_H
|
19
|
+
rb_define_method(Scheduler, "io_read", method_scheduler_io_read, 4);
|
20
|
+
rb_define_method(Scheduler, "io_write", method_scheduler_io_read, 4);
|
21
|
+
#endif
|
22
|
+
}
|
23
|
+
|
24
|
+
#if HAVE_LIBURING_H
|
25
|
+
#include "uring.h"
|
26
|
+
#elif HAVE_SYS_EPOLL_H
|
27
|
+
#include "epoll.h"
|
28
|
+
#elif HAVE_SYS_EVENT_H
|
29
|
+
#include "kqueue.h"
|
30
|
+
#elif HAVE_WINDOWS_H
|
31
|
+
#include "select.h"
|
32
|
+
// #include "iocp.h"
|
33
|
+
#endif
|
34
|
+
#endif
|
data/ext/evt/evt.h
ADDED
@@ -0,0 +1,82 @@
|
|
1
|
+
#ifndef EVT_H
|
2
|
+
#define EVT_H
|
3
|
+
|
4
|
+
#include <ruby.h>
|
5
|
+
|
6
|
+
VALUE Evt = Qnil;
|
7
|
+
VALUE Scheduler = Qnil;
|
8
|
+
VALUE Payload = Qnil;
|
9
|
+
VALUE Fiber = Qnil;
|
10
|
+
|
11
|
+
void Init_evt_ext();
|
12
|
+
VALUE method_scheduler_init(VALUE self);
|
13
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest);
|
14
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io);
|
15
|
+
VALUE method_scheduler_wait(VALUE self);
|
16
|
+
VALUE method_scheduler_backend(VALUE klass);
|
17
|
+
#if HAVE_LIBURING_H
|
18
|
+
VALUE method_scheduler_io_read(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length);
|
19
|
+
VALUE method_scheduler_io_write(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length);
|
20
|
+
#endif
|
21
|
+
|
22
|
+
#if HAV_WINDOWS_H
|
23
|
+
VALUE method_scheduler_io_read(VALUE io, VALUE buffer, VALUE offset, VALUE length);
|
24
|
+
VALUE method_scheduler_io_write(VALUE io, VALUE buffer, VALUE offset, VALUE length);
|
25
|
+
#endif
|
26
|
+
|
27
|
+
#if HAVE_LIBURING_H
|
28
|
+
#include <liburing.h>
|
29
|
+
|
30
|
+
#define URING_ENTRIES 64
|
31
|
+
#define URING_MAX_EVENTS 64
|
32
|
+
|
33
|
+
struct uring_data {
|
34
|
+
bool is_poll;
|
35
|
+
short poll_mask;
|
36
|
+
VALUE io;
|
37
|
+
};
|
38
|
+
|
39
|
+
void uring_payload_free(void* data);
|
40
|
+
size_t uring_payload_size(const void* data);
|
41
|
+
|
42
|
+
static const rb_data_type_t type_uring_payload = {
|
43
|
+
.wrap_struct_name = "uring_payload",
|
44
|
+
.function = {
|
45
|
+
.dmark = NULL,
|
46
|
+
.dfree = uring_payload_free,
|
47
|
+
.dsize = uring_payload_size,
|
48
|
+
},
|
49
|
+
.data = NULL,
|
50
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
51
|
+
};
|
52
|
+
#elif HAVE_SYS_EPOLL_H
|
53
|
+
#include <sys/epoll.h>
|
54
|
+
#define EPOLL_MAX_EVENTS 64
|
55
|
+
#elif HAVE_SYS_EVENT_H
|
56
|
+
#include <sys/event.h>
|
57
|
+
#define KQUEUE_MAX_EVENTS 64
|
58
|
+
#elif HAVE_WINDOWS_H
|
59
|
+
// #include <Windows.h>
|
60
|
+
// #define IOCP_MAX_EVENTS 64
|
61
|
+
|
62
|
+
// struct iocp_data {
|
63
|
+
// VALUE io;
|
64
|
+
// bool is_poll;
|
65
|
+
// int interest;
|
66
|
+
// };
|
67
|
+
|
68
|
+
// void iocp_payload_free(void* data);
|
69
|
+
// size_t iocp_payload_size(const void* data);
|
70
|
+
|
71
|
+
// static const rb_data_type_t type_iocp_payload = {
|
72
|
+
// .wrap_struct_name = "iocp_payload",
|
73
|
+
// .function = {
|
74
|
+
// .dmark = NULL,
|
75
|
+
// .dfree = iocp_payload_free,
|
76
|
+
// .dsize = iocp_payload_size,
|
77
|
+
// },
|
78
|
+
// .data = NULL,
|
79
|
+
// .flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
80
|
+
// };
|
81
|
+
#endif
|
82
|
+
#endif
|
data/ext/evt/extconf.rb
ADDED
@@ -0,0 +1,12 @@
|
|
1
|
+
require 'mkmf'
|
2
|
+
extension_name = 'evt_ext'
|
3
|
+
dir_config(extension_name)
|
4
|
+
|
5
|
+
have_library('uring')
|
6
|
+
have_header('liburing.h')
|
7
|
+
have_header('sys/epoll.h')
|
8
|
+
have_header('sys/event.h')
|
9
|
+
have_header('Windows.h')
|
10
|
+
|
11
|
+
create_header
|
12
|
+
create_makefile(extension_name)
|
data/ext/evt/iocp.h
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
#ifndef IOCP_H
|
2
|
+
#define IOCP_H
|
3
|
+
#include "evt.h"
|
4
|
+
|
5
|
+
#if HAVE_WINDOWS_H
|
6
|
+
void iocp_payload_free(void* data) {
|
7
|
+
CloseHandle((HANDLE) data);
|
8
|
+
}
|
9
|
+
|
10
|
+
size_t iocp_payload_size(const void* data) {
|
11
|
+
return sizeof(HANDLE);
|
12
|
+
}
|
13
|
+
|
14
|
+
VALUE method_scheduler_init(VALUE self) {
|
15
|
+
HANDLE iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
|
16
|
+
rb_iv_set(self, "@iocp", TypedData_Wrap_Struct(Payload, &type_iocp_payload, iocp));
|
17
|
+
return Qnil;
|
18
|
+
}
|
19
|
+
|
20
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
|
21
|
+
HANDLE iocp;
|
22
|
+
VALUE iocp_obj = rb_iv_get(self, "@iocp");
|
23
|
+
struct iocp_data* data;
|
24
|
+
TypedData_Get_Struct(iocp_obj, HANDLE, &type_iocp_payload, iocp);
|
25
|
+
int fd = NUM2INT(rb_funcallv(io, rb_intern("fileno"), 0, 0));
|
26
|
+
HANDLE io_handler = (HANDLE)rb_w32_get_osfhandle(fd);
|
27
|
+
|
28
|
+
int ruby_interest = NUM2INT(interest);
|
29
|
+
int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
30
|
+
int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
31
|
+
data = (struct iocp_data*) xmalloc(sizeof(struct iocp_data));
|
32
|
+
data->io = io;
|
33
|
+
data->is_poll = true;
|
34
|
+
data->interest = 0;
|
35
|
+
|
36
|
+
if (ruby_interest & readable) {
|
37
|
+
interest |= readable;
|
38
|
+
}
|
39
|
+
|
40
|
+
if (ruby_interest & writable) {
|
41
|
+
interest |= writable;
|
42
|
+
}
|
43
|
+
|
44
|
+
HANDLE res = CreateIoCompletionPort(io_handler, iocp, (ULONG_PTR) data, 0);
|
45
|
+
printf("IO at address: 0x%08x\n", (void *)data);
|
46
|
+
|
47
|
+
return Qnil;
|
48
|
+
}
|
49
|
+
|
50
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io) {
|
51
|
+
return Qnil;
|
52
|
+
}
|
53
|
+
|
54
|
+
VALUE method_scheduler_wait(VALUE self) {
|
55
|
+
ID id_next_timeout = rb_intern("next_timeout");
|
56
|
+
ID id_push = rb_intern("push");
|
57
|
+
VALUE iocp_obj = rb_iv_get(self, "@iocp");
|
58
|
+
VALUE next_timeout = rb_funcall(self, id_next_timeout, 0);
|
59
|
+
|
60
|
+
int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
61
|
+
int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
62
|
+
|
63
|
+
HANDLE iocp;
|
64
|
+
OVERLAPPED_ENTRY lpCompletionPortEntries[IOCP_MAX_EVENTS];
|
65
|
+
ULONG ulNumEntriesRemoved;
|
66
|
+
TypedData_Get_Struct(iocp_obj, HANDLE, &type_iocp_payload, iocp);
|
67
|
+
|
68
|
+
DWORD timeout;
|
69
|
+
if (next_timeout == Qnil) {
|
70
|
+
timeout = 0x5000;
|
71
|
+
} else {
|
72
|
+
timeout = NUM2INT(next_timeout) * 1000; // seconds to milliseconds
|
73
|
+
}
|
74
|
+
|
75
|
+
DWORD NumberOfBytesTransferred;
|
76
|
+
LPOVERLAPPED pOverlapped;
|
77
|
+
ULONG_PTR CompletionKey;
|
78
|
+
|
79
|
+
BOOL res = GetQueuedCompletionStatus(iocp, &NumberOfBytesTransferred, &CompletionKey, &pOverlapped, timeout);
|
80
|
+
// BOOL res = GetQueuedCompletionStatusEx(
|
81
|
+
// iocp, lpCompletionPortEntries, IOCP_MAX_EVENTS, &ulNumEntriesRemoved, timeout, TRUE);
|
82
|
+
|
83
|
+
VALUE result = rb_ary_new2(2);
|
84
|
+
|
85
|
+
VALUE readables = rb_ary_new();
|
86
|
+
VALUE writables = rb_ary_new();
|
87
|
+
|
88
|
+
rb_ary_store(result, 0, readables);
|
89
|
+
rb_ary_store(result, 1, writables);
|
90
|
+
|
91
|
+
if (!result) {
|
92
|
+
return result;
|
93
|
+
}
|
94
|
+
|
95
|
+
printf("--------- Received! ---------\n");
|
96
|
+
printf("Received IO at address: 0x%08x\n", (void *)CompletionKey);
|
97
|
+
printf("dwNumberOfBytesTransferred: %lld\n", NumberOfBytesTransferred);
|
98
|
+
|
99
|
+
// if (ulNumEntriesRemoved > 0) {
|
100
|
+
// printf("Entries: %ld\n", ulNumEntriesRemoved);
|
101
|
+
// }
|
102
|
+
|
103
|
+
// for (ULONG i = 0; i < ulNumEntriesRemoved; i++) {
|
104
|
+
// OVERLAPPED_ENTRY entry = lpCompletionPortEntries[i];
|
105
|
+
|
106
|
+
// struct iocp_data *data = (struct iocp_data*) entry.lpCompletionKey;
|
107
|
+
|
108
|
+
// int interest = data->interest;
|
109
|
+
// VALUE obj_io = data->io;
|
110
|
+
// if (interest & readable) {
|
111
|
+
// rb_funcall(readables, id_push, 1, obj_io);
|
112
|
+
// } else if (interest & writable) {
|
113
|
+
// rb_funcall(writables, id_push, 1, obj_io);
|
114
|
+
// }
|
115
|
+
|
116
|
+
// xfree(data);
|
117
|
+
// }
|
118
|
+
|
119
|
+
return result;
|
120
|
+
}
|
121
|
+
|
122
|
+
VALUE method_scheduler_backend(VALUE klass) {
|
123
|
+
return rb_str_new_cstr("iocp");
|
124
|
+
}
|
125
|
+
#endif
|
126
|
+
#endif
|
data/ext/evt/kqueue.h
ADDED
@@ -0,0 +1,97 @@
|
|
1
|
+
#ifndef KQUEUE_H
|
2
|
+
#define KQUEUE_H
|
3
|
+
#include "evt.h"
|
4
|
+
|
5
|
+
#if HAVE_SYS_EVENT_H
|
6
|
+
|
7
|
+
VALUE method_scheduler_init(VALUE self) {
|
8
|
+
rb_iv_set(self, "@kq", INT2NUM(kqueue()));
|
9
|
+
return Qnil;
|
10
|
+
}
|
11
|
+
|
12
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
|
13
|
+
struct kevent event;
|
14
|
+
u_short event_flags = 0;
|
15
|
+
ID id_fileno = rb_intern("fileno");
|
16
|
+
int kq = NUM2INT(rb_iv_get(self, "@kq"));
|
17
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
18
|
+
int ruby_interest = NUM2INT(interest);
|
19
|
+
int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
20
|
+
int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
21
|
+
|
22
|
+
if (ruby_interest & readable) {
|
23
|
+
event_flags |= EVFILT_READ;
|
24
|
+
}
|
25
|
+
|
26
|
+
if (ruby_interest & writable) {
|
27
|
+
event_flags |= EVFILT_WRITE;
|
28
|
+
}
|
29
|
+
|
30
|
+
EV_SET(&event, fd, event_flags, EV_ADD|EV_ENABLE, 0, 0, (void*) io);
|
31
|
+
kevent(kq, &event, 1, NULL, 0, NULL); // TODO: Check the return value
|
32
|
+
return Qnil;
|
33
|
+
}
|
34
|
+
|
35
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io) {
|
36
|
+
struct kevent event;
|
37
|
+
ID id_fileno = rb_intern("fileno");
|
38
|
+
int kq = NUM2INT(rb_iv_get(self, "@kq"));
|
39
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
40
|
+
EV_SET(&event, fd, 0, EV_DELETE, 0, 0, (void*) io);
|
41
|
+
kevent(kq, &event, 1, NULL, 0, (void*) io); // TODO: Check the return value
|
42
|
+
return Qnil;
|
43
|
+
}
|
44
|
+
|
45
|
+
VALUE method_scheduler_wait(VALUE self) {
|
46
|
+
int n, kq, i;
|
47
|
+
u_short event_flags = 0;
|
48
|
+
|
49
|
+
struct kevent* events; // Event Triggered
|
50
|
+
struct timespec timeout;
|
51
|
+
VALUE next_timeout, obj_io, readables, writables, result;
|
52
|
+
ID id_next_timeout = rb_intern("next_timeout");
|
53
|
+
ID id_push = rb_intern("push");
|
54
|
+
|
55
|
+
kq = NUM2INT(rb_iv_get(self, "@kq"));
|
56
|
+
next_timeout = rb_funcall(self, id_next_timeout, 0);
|
57
|
+
readables = rb_ary_new();
|
58
|
+
writables = rb_ary_new();
|
59
|
+
|
60
|
+
events = (struct kevent*) xmalloc(sizeof(struct kevent) * KQUEUE_MAX_EVENTS);
|
61
|
+
|
62
|
+
if (next_timeout == Qnil || NUM2INT(next_timeout) == -1) {
|
63
|
+
n = kevent(kq, NULL, 0, events, KQUEUE_MAX_EVENTS, NULL);
|
64
|
+
} else {
|
65
|
+
timeout.tv_sec = next_timeout / 1000;
|
66
|
+
timeout.tv_nsec = next_timeout % 1000 * 1000 * 1000;
|
67
|
+
n = kevent(kq, NULL, 0, events, KQUEUE_MAX_EVENTS, &timeout);
|
68
|
+
}
|
69
|
+
|
70
|
+
// TODO: Check if n >= 0
|
71
|
+
for (i = 0; i < n; i++) {
|
72
|
+
event_flags = events[i].filter;
|
73
|
+
printf("event flags: %d\n", event_flags);
|
74
|
+
if (event_flags & EVFILT_READ) {
|
75
|
+
obj_io = (VALUE) events[i].udata;
|
76
|
+
rb_funcall(readables, id_push, 1, obj_io);
|
77
|
+
}
|
78
|
+
|
79
|
+
if (event_flags & EVFILT_WRITE) {
|
80
|
+
obj_io = (VALUE) events[i].udata;
|
81
|
+
rb_funcall(writables, id_push, 1, obj_io);
|
82
|
+
}
|
83
|
+
}
|
84
|
+
|
85
|
+
result = rb_ary_new2(2);
|
86
|
+
rb_ary_store(result, 0, readables);
|
87
|
+
rb_ary_store(result, 1, writables);
|
88
|
+
|
89
|
+
xfree(events);
|
90
|
+
return result;
|
91
|
+
}
|
92
|
+
|
93
|
+
VALUE method_scheduler_backend(VALUE klass) {
|
94
|
+
return rb_str_new_cstr("kqueue");
|
95
|
+
}
|
96
|
+
#endif
|
97
|
+
#endif
|
data/ext/evt/select.h
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
#ifndef SELECT_H
|
2
|
+
#define SELECT_H
|
3
|
+
#include "evt.h"
|
4
|
+
|
5
|
+
VALUE method_scheduler_init(VALUE self) {
|
6
|
+
return Qnil;
|
7
|
+
}
|
8
|
+
|
9
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
|
10
|
+
return Qnil;
|
11
|
+
}
|
12
|
+
|
13
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io) {
|
14
|
+
return Qnil;
|
15
|
+
}
|
16
|
+
|
17
|
+
VALUE method_scheduler_wait(VALUE self) {
|
18
|
+
// return IO.select(@readable.keys, @writable.keys, [], next_timeout)
|
19
|
+
VALUE readable, writable, readable_keys, writable_keys, next_timeout;
|
20
|
+
ID id_select = rb_intern("select");
|
21
|
+
ID id_next_timeout = rb_intern("next_timeout");
|
22
|
+
|
23
|
+
readable = rb_iv_get(self, "@readable");
|
24
|
+
writable = rb_iv_get(self, "@writable");
|
25
|
+
|
26
|
+
readable_keys = rb_funcall(readable, rb_intern("keys"), 0);
|
27
|
+
writable_keys = rb_funcall(writable, rb_intern("keys"), 0);
|
28
|
+
next_timeout = rb_funcall(self, id_next_timeout, 0);
|
29
|
+
|
30
|
+
return rb_funcall(rb_cIO, id_select, 4, readable_keys, writable_keys, rb_ary_new(), next_timeout);
|
31
|
+
}
|
32
|
+
|
33
|
+
VALUE method_scheduler_backend(VALUE klass) {
|
34
|
+
return rb_str_new_cstr("ruby");
|
35
|
+
}
|
36
|
+
#endif
|
data/ext/evt/uring.h
ADDED
@@ -0,0 +1,201 @@
|
|
1
|
+
#ifndef URING_H
|
2
|
+
#define URING_H
|
3
|
+
#include "evt.h"
|
4
|
+
#if HAVE_LIBURING_H
|
5
|
+
void uring_payload_free(void* data) {
|
6
|
+
// TODO: free the uring_data structs if the payload is freed before all IO responds
|
7
|
+
io_uring_queue_exit((struct io_uring*) data);
|
8
|
+
xfree(data);
|
9
|
+
}
|
10
|
+
|
11
|
+
size_t uring_payload_size(const void* data) {
|
12
|
+
return sizeof(struct io_uring);
|
13
|
+
}
|
14
|
+
|
15
|
+
VALUE method_scheduler_init(VALUE self) {
|
16
|
+
int ret;
|
17
|
+
struct io_uring* ring;
|
18
|
+
ring = xmalloc(sizeof(struct io_uring));
|
19
|
+
ret = io_uring_queue_init(URING_ENTRIES, ring, 0);
|
20
|
+
if (ret < 0) {
|
21
|
+
rb_raise(rb_eIOError, "unable to initalize io_uring");
|
22
|
+
}
|
23
|
+
rb_iv_set(self, "@ring", TypedData_Wrap_Struct(Payload, &type_uring_payload, ring));
|
24
|
+
return Qnil;
|
25
|
+
}
|
26
|
+
|
27
|
+
VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
|
28
|
+
VALUE ring_obj;
|
29
|
+
struct io_uring* ring;
|
30
|
+
struct io_uring_sqe *sqe;
|
31
|
+
struct uring_data *data;
|
32
|
+
short poll_mask = 0;
|
33
|
+
ID id_fileno = rb_intern("fileno");
|
34
|
+
|
35
|
+
ring_obj = rb_iv_get(self, "@ring");
|
36
|
+
TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
|
37
|
+
sqe = io_uring_get_sqe(ring);
|
38
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
39
|
+
|
40
|
+
int ruby_interest = NUM2INT(interest);
|
41
|
+
int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
42
|
+
int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
43
|
+
|
44
|
+
if (ruby_interest & readable) {
|
45
|
+
poll_mask |= POLL_IN;
|
46
|
+
}
|
47
|
+
|
48
|
+
if (ruby_interest & writable) {
|
49
|
+
poll_mask |= POLL_OUT;
|
50
|
+
}
|
51
|
+
|
52
|
+
data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
|
53
|
+
data->is_poll = true;
|
54
|
+
data->io = io;
|
55
|
+
data->poll_mask = poll_mask;
|
56
|
+
|
57
|
+
io_uring_prep_poll_add(sqe, fd, poll_mask);
|
58
|
+
io_uring_sqe_set_data(sqe, data);
|
59
|
+
io_uring_submit(ring);
|
60
|
+
return Qnil;
|
61
|
+
}
|
62
|
+
|
63
|
+
VALUE method_scheduler_deregister(VALUE self, VALUE io) {
|
64
|
+
// io_uring runs under oneshot mode. No need to deregister.
|
65
|
+
return Qnil;
|
66
|
+
}
|
67
|
+
|
68
|
+
VALUE method_scheduler_wait(VALUE self) {
|
69
|
+
struct io_uring* ring;
|
70
|
+
struct io_uring_cqe *cqes[URING_MAX_EVENTS];
|
71
|
+
struct uring_data *data;
|
72
|
+
VALUE next_timeout, obj_io, readables, writables, iovs, result;
|
73
|
+
unsigned ret, i;
|
74
|
+
double time = 0.0;
|
75
|
+
short poll_events;
|
76
|
+
|
77
|
+
ID id_next_timeout = rb_intern("next_timeout");
|
78
|
+
ID id_push = rb_intern("push");
|
79
|
+
ID id_sleep = rb_intern("sleep");
|
80
|
+
|
81
|
+
next_timeout = rb_funcall(self, id_next_timeout, 0);
|
82
|
+
readables = rb_ary_new();
|
83
|
+
writables = rb_ary_new();
|
84
|
+
iovs = rb_ary_new();
|
85
|
+
|
86
|
+
TypedData_Get_Struct(rb_iv_get(self, "@ring"), struct io_uring, &type_uring_payload, ring);
|
87
|
+
ret = io_uring_peek_batch_cqe(ring, cqes, URING_MAX_EVENTS);
|
88
|
+
|
89
|
+
for (i = 0; i < ret; i++) {
|
90
|
+
data = (struct uring_data*) io_uring_cqe_get_data(cqes[i]);
|
91
|
+
poll_events = data->poll_mask;
|
92
|
+
obj_io = data->io;
|
93
|
+
if (!data->is_poll) {
|
94
|
+
rb_funcall(iovs, id_push, 1, obj_io);
|
95
|
+
}
|
96
|
+
|
97
|
+
if (poll_events & POLL_IN) {
|
98
|
+
rb_funcall(readables, id_push, 1, obj_io);
|
99
|
+
}
|
100
|
+
|
101
|
+
if (poll_events & POLL_OUT) {
|
102
|
+
rb_funcall(writables, id_push, 1, obj_io);
|
103
|
+
}
|
104
|
+
xfree(data);
|
105
|
+
}
|
106
|
+
|
107
|
+
if (ret == 0) {
|
108
|
+
if (next_timeout != Qnil && NUM2INT(next_timeout) != -1) {
|
109
|
+
// sleep
|
110
|
+
time = next_timeout / 1000;
|
111
|
+
rb_funcall(rb_mKernel, id_sleep, 1, RFLOAT_VALUE(time));
|
112
|
+
} else {
|
113
|
+
rb_funcall(rb_mKernel, id_sleep, 1, RFLOAT_VALUE(0.001)); // To avoid infinite loop
|
114
|
+
}
|
115
|
+
}
|
116
|
+
|
117
|
+
result = rb_ary_new2(3);
|
118
|
+
rb_ary_store(result, 0, readables);
|
119
|
+
rb_ary_store(result, 1, writables);
|
120
|
+
rb_ary_store(result, 2, iovs);
|
121
|
+
|
122
|
+
return result;
|
123
|
+
}
|
124
|
+
|
125
|
+
VALUE method_scheduler_io_read(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
|
126
|
+
struct io_uring* ring;
|
127
|
+
struct uring_data *data;
|
128
|
+
char* read_buffer;
|
129
|
+
ID id_fileno = rb_intern("fileno");
|
130
|
+
// @iov[io] = Fiber.current
|
131
|
+
VALUE iovs = rb_iv_get(self, "@iovs");
|
132
|
+
rb_hash_aset(iovs, io, rb_funcall(Fiber, rb_intern("current"), 0));
|
133
|
+
// register
|
134
|
+
VALUE ring_obj = rb_iv_get(self, "@ring");
|
135
|
+
TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
|
136
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
|
137
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
138
|
+
|
139
|
+
read_buffer = (char*) xmalloc(NUM2SIZET(length));
|
140
|
+
struct iovec iov = {
|
141
|
+
.iov_base = read_buffer,
|
142
|
+
.iov_len = NUM2SIZET(length),
|
143
|
+
};
|
144
|
+
|
145
|
+
data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
|
146
|
+
data->is_poll = false;
|
147
|
+
data->io = io;
|
148
|
+
data->poll_mask = 0;
|
149
|
+
|
150
|
+
io_uring_prep_readv(sqe, fd, &iov, 1, NUM2SIZET(offset));
|
151
|
+
io_uring_sqe_set_data(sqe, data);
|
152
|
+
io_uring_submit(ring);
|
153
|
+
|
154
|
+
VALUE result = rb_str_new(read_buffer, strlen(read_buffer));
|
155
|
+
xfree(read_buffer);
|
156
|
+
if (buffer != Qnil) {
|
157
|
+
rb_str_append(buffer, result);
|
158
|
+
}
|
159
|
+
|
160
|
+
rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
|
161
|
+
return result;
|
162
|
+
}
|
163
|
+
|
164
|
+
VALUE method_scheduler_io_write(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
|
165
|
+
struct io_uring* ring;
|
166
|
+
struct uring_data *data;
|
167
|
+
char* write_buffer;
|
168
|
+
ID id_fileno = rb_intern("fileno");
|
169
|
+
// @iov[io] = Fiber.current
|
170
|
+
VALUE iovs = rb_iv_get(self, "@iovs");
|
171
|
+
rb_hash_aset(iovs, io, rb_funcall(Fiber, rb_intern("current"), 0));
|
172
|
+
// register
|
173
|
+
VALUE ring_obj = rb_iv_get(self, "@ring");
|
174
|
+
TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
|
175
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
|
176
|
+
int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
|
177
|
+
|
178
|
+
write_buffer = StringValueCStr(buffer);
|
179
|
+
struct iovec iov = {
|
180
|
+
.iov_base = write_buffer,
|
181
|
+
.iov_len = NUM2SIZET(length),
|
182
|
+
};
|
183
|
+
|
184
|
+
data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
|
185
|
+
data->is_poll = false;
|
186
|
+
data->io = io;
|
187
|
+
data->poll_mask = 0;
|
188
|
+
|
189
|
+
io_uring_prep_writev(sqe, fd, &iov, 1, NUM2SIZET(offset));
|
190
|
+
io_uring_sqe_set_data(sqe, data);
|
191
|
+
io_uring_submit(ring);
|
192
|
+
rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
|
193
|
+
return length;
|
194
|
+
}
|
195
|
+
|
196
|
+
VALUE method_scheduler_backend(VALUE klass) {
|
197
|
+
return rb_str_new_cstr("liburing");
|
198
|
+
}
|
199
|
+
|
200
|
+
#endif
|
201
|
+
#endif
|
data/lib/evt.rb
ADDED
@@ -0,0 +1,121 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'fiber'
|
4
|
+
require 'socket'
|
5
|
+
require 'io/nonblock'
|
6
|
+
|
7
|
+
class Evt::Scheduler
|
8
|
+
def initialize
|
9
|
+
@readable = {}
|
10
|
+
@writable = {}
|
11
|
+
@iovs = {}
|
12
|
+
@waiting = {}
|
13
|
+
|
14
|
+
@lock = Mutex.new
|
15
|
+
@locking = 0
|
16
|
+
@ready = []
|
17
|
+
|
18
|
+
@ios = ObjectSpace::WeakMap.new
|
19
|
+
init_selector
|
20
|
+
end
|
21
|
+
|
22
|
+
attr_reader :readable
|
23
|
+
attr_reader :writable
|
24
|
+
attr_reader :waiting
|
25
|
+
|
26
|
+
def next_timeout
|
27
|
+
_fiber, timeout = @waiting.min_by{|key, value| value}
|
28
|
+
|
29
|
+
if timeout
|
30
|
+
offset = timeout - current_time
|
31
|
+
offset < 0 ? 0 : offset
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def run
|
36
|
+
while @readable.any? or @writable.any? or @waiting.any? or @iovs.any? or @locking.positive?
|
37
|
+
readable, writable, iovs = self.wait
|
38
|
+
|
39
|
+
readable&.each do |io|
|
40
|
+
fiber = @readable.delete(io)
|
41
|
+
fiber&.resume
|
42
|
+
end
|
43
|
+
|
44
|
+
writable&.each do |io|
|
45
|
+
fiber = @writable.delete(io)
|
46
|
+
fiber&.resume
|
47
|
+
end
|
48
|
+
|
49
|
+
unless iovs.nil?
|
50
|
+
iovs&.each do |io|
|
51
|
+
fiber = @iovs.delete(io)
|
52
|
+
fiber&.resume
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
if @waiting.any?
|
57
|
+
time = current_time
|
58
|
+
waiting = @waiting
|
59
|
+
@waiting = {}
|
60
|
+
|
61
|
+
waiting.each do |fiber, timeout|
|
62
|
+
if timeout <= time
|
63
|
+
fiber.resume
|
64
|
+
else
|
65
|
+
@waiting[fiber] = timeout
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
if @ready.any?
|
71
|
+
ready = nil
|
72
|
+
|
73
|
+
@lock.synchronize do
|
74
|
+
ready, @ready = @ready, []
|
75
|
+
end
|
76
|
+
|
77
|
+
ready.each do |fiber|
|
78
|
+
fiber.resume
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def current_time
|
85
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
86
|
+
end
|
87
|
+
|
88
|
+
def io_wait(io, events, duration)
|
89
|
+
@readable[io] = Fiber.current unless (events & IO::READABLE).zero?
|
90
|
+
@writable[io] = Fiber.current unless (events & IO::WRITABLE).zero?
|
91
|
+
self.register(io, events)
|
92
|
+
Fiber.yield
|
93
|
+
self.deregister(io)
|
94
|
+
true
|
95
|
+
end
|
96
|
+
|
97
|
+
def kernel_sleep(duration = nil)
|
98
|
+
@waiting[Fiber.current] = current_time + duration if duration.nil?
|
99
|
+
Fiber.yield
|
100
|
+
true
|
101
|
+
end
|
102
|
+
|
103
|
+
def mutex_lock(mutex)
|
104
|
+
@locking += 1
|
105
|
+
Fiber.yield
|
106
|
+
ensure
|
107
|
+
@locking -= 1
|
108
|
+
end
|
109
|
+
|
110
|
+
def mutex_unlock(mutex, fiber)
|
111
|
+
@lock.synchronize do
|
112
|
+
@ready << fiber
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def fiber(&block)
|
117
|
+
fiber = Fiber.new(blocking: false, &block)
|
118
|
+
fiber.resume
|
119
|
+
fiber
|
120
|
+
end
|
121
|
+
end
|
data/lib/evt/version.rb
ADDED
metadata
CHANGED
@@ -1,27 +1,59 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: evt
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Delton Ding
|
8
8
|
autorequire:
|
9
|
-
bindir:
|
9
|
+
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
12
|
-
dependencies:
|
11
|
+
date: 2020-12-21 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: rake-compiler
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.0'
|
20
|
+
type: :development
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.0'
|
13
27
|
description: A low-level Event Handler designed for Ruby 3 Scheduler for better performance
|
14
28
|
email:
|
15
29
|
- dsh0416@gmail.com
|
16
30
|
executables: []
|
17
|
-
extensions:
|
31
|
+
extensions:
|
32
|
+
- ext/evt/extconf.rb
|
18
33
|
extra_rdoc_files: []
|
19
34
|
files:
|
35
|
+
- ".github/workflows/test.yml"
|
20
36
|
- ".gitignore"
|
37
|
+
- CODE_OF_CONDUCT.md
|
38
|
+
- Gemfile
|
21
39
|
- LICENSE
|
22
40
|
- README.md
|
41
|
+
- Rakefile
|
42
|
+
- evt.gemspec
|
43
|
+
- ext/evt/epoll.h
|
44
|
+
- ext/evt/evt.c
|
45
|
+
- ext/evt/evt.h
|
46
|
+
- ext/evt/extconf.rb
|
47
|
+
- ext/evt/iocp.h
|
48
|
+
- ext/evt/kqueue.h
|
49
|
+
- ext/evt/select.h
|
50
|
+
- ext/evt/uring.h
|
51
|
+
- lib/evt.rb
|
52
|
+
- lib/evt/scheduler.rb
|
53
|
+
- lib/evt/version.rb
|
23
54
|
homepage: https://github.com/dsh0416/evt
|
24
|
-
licenses:
|
55
|
+
licenses:
|
56
|
+
- BSD-3-Clause
|
25
57
|
metadata:
|
26
58
|
homepage_uri: https://github.com/dsh0416/evt
|
27
59
|
source_code_uri: https://github.com/dsh0416/evt
|
@@ -33,14 +65,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
33
65
|
requirements:
|
34
66
|
- - ">="
|
35
67
|
- !ruby/object:Gem::Version
|
36
|
-
version: 2.
|
68
|
+
version: 2.8.0.dev
|
37
69
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
38
70
|
requirements:
|
39
71
|
- - ">="
|
40
72
|
- !ruby/object:Gem::Version
|
41
73
|
version: '0'
|
42
74
|
requirements: []
|
43
|
-
rubygems_version: 3.
|
75
|
+
rubygems_version: 3.2.2
|
44
76
|
signing_key:
|
45
77
|
specification_version: 4
|
46
78
|
summary: A low-level Event Handler designed for Ruby 3 Scheduler
|