evt 0.1.4 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,82 @@
1
+ #ifndef EVT_H
2
+ #define EVT_H
3
+
1
4
  #include <ruby.h>
2
5
 
3
6
  VALUE Evt = Qnil;
4
7
  VALUE Scheduler = Qnil;
8
+ VALUE Payload = Qnil;
9
+ VALUE Fiber = Qnil;
5
10
 
6
11
  void Init_evt_ext();
7
12
  VALUE method_scheduler_init(VALUE self);
8
13
  VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest);
9
14
  VALUE method_scheduler_deregister(VALUE self, VALUE io);
10
15
  VALUE method_scheduler_wait(VALUE self);
11
- VALUE method_scheduler_backend();
16
+ VALUE method_scheduler_backend(VALUE klass);
17
+ #if HAVE_LIBURING_H
18
+ VALUE method_scheduler_io_read(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length);
19
+ VALUE method_scheduler_io_write(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length);
20
+ #endif
21
+
22
+ #if HAV_WINDOWS_H
23
+ VALUE method_scheduler_io_read(VALUE io, VALUE buffer, VALUE offset, VALUE length);
24
+ VALUE method_scheduler_io_write(VALUE io, VALUE buffer, VALUE offset, VALUE length);
25
+ #endif
26
+
27
+ #if HAVE_LIBURING_H
28
+ #include <liburing.h>
29
+
30
+ #define URING_ENTRIES 64
31
+ #define URING_MAX_EVENTS 64
32
+
33
+ struct uring_data {
34
+ bool is_poll;
35
+ short poll_mask;
36
+ VALUE io;
37
+ };
38
+
39
+ void uring_payload_free(void* data);
40
+ size_t uring_payload_size(const void* data);
41
+
42
+ static const rb_data_type_t type_uring_payload = {
43
+ .wrap_struct_name = "uring_payload",
44
+ .function = {
45
+ .dmark = NULL,
46
+ .dfree = uring_payload_free,
47
+ .dsize = uring_payload_size,
48
+ },
49
+ .data = NULL,
50
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
51
+ };
52
+ #elif HAVE_SYS_EPOLL_H
53
+ #include <sys/epoll.h>
54
+ #define EPOLL_MAX_EVENTS 64
55
+ #elif HAVE_SYS_EVENT_H
56
+ #include <sys/event.h>
57
+ #define KQUEUE_MAX_EVENTS 64
58
+ #elif HAVE_WINDOWS_H
59
+ // #include <Windows.h>
60
+ // #define IOCP_MAX_EVENTS 64
61
+
62
+ // struct iocp_data {
63
+ // VALUE io;
64
+ // bool is_poll;
65
+ // int interest;
66
+ // };
67
+
68
+ // void iocp_payload_free(void* data);
69
+ // size_t iocp_payload_size(const void* data);
70
+
71
+ // static const rb_data_type_t type_iocp_payload = {
72
+ // .wrap_struct_name = "iocp_payload",
73
+ // .function = {
74
+ // .dmark = NULL,
75
+ // .dfree = iocp_payload_free,
76
+ // .dsize = iocp_payload_size,
77
+ // },
78
+ // .data = NULL,
79
+ // .flags = RUBY_TYPED_FREE_IMMEDIATELY,
80
+ // };
81
+ #endif
82
+ #endif
@@ -1,5 +1,12 @@
1
1
  require 'mkmf'
2
2
  extension_name = 'evt_ext'
3
- create_header
4
3
  dir_config(extension_name)
5
- create_makefile(extension_name)
4
+
5
+ have_library('uring')
6
+ have_header('liburing.h')
7
+ have_header('sys/epoll.h')
8
+ have_header('sys/event.h')
9
+ have_header('Windows.h')
10
+
11
+ create_header
12
+ create_makefile(extension_name)
@@ -0,0 +1,126 @@
1
+ #ifndef IOCP_H
2
+ #define IOCP_H
3
+ #include "evt.h"
4
+
5
+ #if HAVE_WINDOWS_H
6
+ void iocp_payload_free(void* data) {
7
+ CloseHandle((HANDLE) data);
8
+ }
9
+
10
+ size_t iocp_payload_size(const void* data) {
11
+ return sizeof(HANDLE);
12
+ }
13
+
14
+ VALUE method_scheduler_init(VALUE self) {
15
+ HANDLE iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
16
+ rb_iv_set(self, "@iocp", TypedData_Wrap_Struct(Payload, &type_iocp_payload, iocp));
17
+ return Qnil;
18
+ }
19
+
20
+ VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
21
+ HANDLE iocp;
22
+ VALUE iocp_obj = rb_iv_get(self, "@iocp");
23
+ struct iocp_data* data;
24
+ TypedData_Get_Struct(iocp_obj, HANDLE, &type_iocp_payload, iocp);
25
+ int fd = NUM2INT(rb_funcallv(io, rb_intern("fileno"), 0, 0));
26
+ HANDLE io_handler = (HANDLE)rb_w32_get_osfhandle(fd);
27
+
28
+ int ruby_interest = NUM2INT(interest);
29
+ int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
30
+ int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
31
+ data = (struct iocp_data*) xmalloc(sizeof(struct iocp_data));
32
+ data->io = io;
33
+ data->is_poll = true;
34
+ data->interest = 0;
35
+
36
+ if (ruby_interest & readable) {
37
+ interest |= readable;
38
+ }
39
+
40
+ if (ruby_interest & writable) {
41
+ interest |= writable;
42
+ }
43
+
44
+ HANDLE res = CreateIoCompletionPort(io_handler, iocp, (ULONG_PTR) data, 0);
45
+ printf("IO at address: 0x%08x\n", (void *)data);
46
+
47
+ return Qnil;
48
+ }
49
+
50
+ VALUE method_scheduler_deregister(VALUE self, VALUE io) {
51
+ return Qnil;
52
+ }
53
+
54
+ VALUE method_scheduler_wait(VALUE self) {
55
+ ID id_next_timeout = rb_intern("next_timeout");
56
+ ID id_push = rb_intern("push");
57
+ VALUE iocp_obj = rb_iv_get(self, "@iocp");
58
+ VALUE next_timeout = rb_funcall(self, id_next_timeout, 0);
59
+
60
+ int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
61
+ int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
62
+
63
+ HANDLE iocp;
64
+ OVERLAPPED_ENTRY lpCompletionPortEntries[IOCP_MAX_EVENTS];
65
+ ULONG ulNumEntriesRemoved;
66
+ TypedData_Get_Struct(iocp_obj, HANDLE, &type_iocp_payload, iocp);
67
+
68
+ DWORD timeout;
69
+ if (next_timeout == Qnil) {
70
+ timeout = 0x5000;
71
+ } else {
72
+ timeout = NUM2INT(next_timeout) * 1000; // seconds to milliseconds
73
+ }
74
+
75
+ DWORD NumberOfBytesTransferred;
76
+ LPOVERLAPPED pOverlapped;
77
+ ULONG_PTR CompletionKey;
78
+
79
+ BOOL res = GetQueuedCompletionStatus(iocp, &NumberOfBytesTransferred, &CompletionKey, &pOverlapped, timeout);
80
+ // BOOL res = GetQueuedCompletionStatusEx(
81
+ // iocp, lpCompletionPortEntries, IOCP_MAX_EVENTS, &ulNumEntriesRemoved, timeout, TRUE);
82
+
83
+ VALUE result = rb_ary_new2(2);
84
+
85
+ VALUE readables = rb_ary_new();
86
+ VALUE writables = rb_ary_new();
87
+
88
+ rb_ary_store(result, 0, readables);
89
+ rb_ary_store(result, 1, writables);
90
+
91
+ if (!result) {
92
+ return result;
93
+ }
94
+
95
+ printf("--------- Received! ---------\n");
96
+ printf("Received IO at address: 0x%08x\n", (void *)CompletionKey);
97
+ printf("dwNumberOfBytesTransferred: %lld\n", NumberOfBytesTransferred);
98
+
99
+ // if (ulNumEntriesRemoved > 0) {
100
+ // printf("Entries: %ld\n", ulNumEntriesRemoved);
101
+ // }
102
+
103
+ // for (ULONG i = 0; i < ulNumEntriesRemoved; i++) {
104
+ // OVERLAPPED_ENTRY entry = lpCompletionPortEntries[i];
105
+
106
+ // struct iocp_data *data = (struct iocp_data*) entry.lpCompletionKey;
107
+
108
+ // int interest = data->interest;
109
+ // VALUE obj_io = data->io;
110
+ // if (interest & readable) {
111
+ // rb_funcall(readables, id_push, 1, obj_io);
112
+ // } else if (interest & writable) {
113
+ // rb_funcall(writables, id_push, 1, obj_io);
114
+ // }
115
+
116
+ // xfree(data);
117
+ // }
118
+
119
+ return result;
120
+ }
121
+
122
+ VALUE method_scheduler_backend(VALUE klass) {
123
+ return rb_str_new_cstr("iocp");
124
+ }
125
+ #endif
126
+ #endif
@@ -0,0 +1,97 @@
1
+ #ifndef KQUEUE_H
2
+ #define KQUEUE_H
3
+ #include "evt.h"
4
+
5
+ #if HAVE_SYS_EVENT_H
6
+
7
+ VALUE method_scheduler_init(VALUE self) {
8
+ rb_iv_set(self, "@kq", INT2NUM(kqueue()));
9
+ return Qnil;
10
+ }
11
+
12
+ VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
13
+ struct kevent event;
14
+ u_short event_flags = 0;
15
+ ID id_fileno = rb_intern("fileno");
16
+ int kq = NUM2INT(rb_iv_get(self, "@kq"));
17
+ int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
18
+ int ruby_interest = NUM2INT(interest);
19
+ int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
20
+ int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
21
+
22
+ if (ruby_interest & readable) {
23
+ event_flags |= EVFILT_READ;
24
+ }
25
+
26
+ if (ruby_interest & writable) {
27
+ event_flags |= EVFILT_WRITE;
28
+ }
29
+
30
+ EV_SET(&event, fd, event_flags, EV_ADD|EV_ENABLE, 0, 0, (void*) io);
31
+ kevent(kq, &event, 1, NULL, 0, NULL); // TODO: Check the return value
32
+ return Qnil;
33
+ }
34
+
35
+ VALUE method_scheduler_deregister(VALUE self, VALUE io) {
36
+ struct kevent event;
37
+ ID id_fileno = rb_intern("fileno");
38
+ int kq = NUM2INT(rb_iv_get(self, "@kq"));
39
+ int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
40
+ EV_SET(&event, fd, 0, EV_DELETE, 0, 0, (void*) io);
41
+ kevent(kq, &event, 1, NULL, 0, (void*) io); // TODO: Check the return value
42
+ return Qnil;
43
+ }
44
+
45
+ VALUE method_scheduler_wait(VALUE self) {
46
+ int n, kq, i;
47
+ u_short event_flags = 0;
48
+
49
+ struct kevent* events; // Event Triggered
50
+ struct timespec timeout;
51
+ VALUE next_timeout, obj_io, readables, writables, result;
52
+ ID id_next_timeout = rb_intern("next_timeout");
53
+ ID id_push = rb_intern("push");
54
+
55
+ kq = NUM2INT(rb_iv_get(self, "@kq"));
56
+ next_timeout = rb_funcall(self, id_next_timeout, 0);
57
+ readables = rb_ary_new();
58
+ writables = rb_ary_new();
59
+
60
+ events = (struct kevent*) xmalloc(sizeof(struct kevent) * KQUEUE_MAX_EVENTS);
61
+
62
+ if (next_timeout == Qnil || NUM2INT(next_timeout) == -1) {
63
+ n = kevent(kq, NULL, 0, events, KQUEUE_MAX_EVENTS, NULL);
64
+ } else {
65
+ timeout.tv_sec = next_timeout / 1000;
66
+ timeout.tv_nsec = next_timeout % 1000 * 1000 * 1000;
67
+ n = kevent(kq, NULL, 0, events, KQUEUE_MAX_EVENTS, &timeout);
68
+ }
69
+
70
+ // TODO: Check if n >= 0
71
+ for (i = 0; i < n; i++) {
72
+ event_flags = events[i].filter;
73
+ printf("event flags: %d\n", event_flags);
74
+ if (event_flags & EVFILT_READ) {
75
+ obj_io = (VALUE) events[i].udata;
76
+ rb_funcall(readables, id_push, 1, obj_io);
77
+ }
78
+
79
+ if (event_flags & EVFILT_WRITE) {
80
+ obj_io = (VALUE) events[i].udata;
81
+ rb_funcall(writables, id_push, 1, obj_io);
82
+ }
83
+ }
84
+
85
+ result = rb_ary_new2(2);
86
+ rb_ary_store(result, 0, readables);
87
+ rb_ary_store(result, 1, writables);
88
+
89
+ xfree(events);
90
+ return result;
91
+ }
92
+
93
+ VALUE method_scheduler_backend(VALUE klass) {
94
+ return rb_str_new_cstr("kqueue");
95
+ }
96
+ #endif
97
+ #endif
@@ -0,0 +1,36 @@
1
+ #ifndef SELECT_H
2
+ #define SELECT_H
3
+ #include "evt.h"
4
+
5
+ VALUE method_scheduler_init(VALUE self) {
6
+ return Qnil;
7
+ }
8
+
9
+ VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
10
+ return Qnil;
11
+ }
12
+
13
+ VALUE method_scheduler_deregister(VALUE self, VALUE io) {
14
+ return Qnil;
15
+ }
16
+
17
+ VALUE method_scheduler_wait(VALUE self) {
18
+ // return IO.select(@readable.keys, @writable.keys, [], next_timeout)
19
+ VALUE readable, writable, readable_keys, writable_keys, next_timeout;
20
+ ID id_select = rb_intern("select");
21
+ ID id_next_timeout = rb_intern("next_timeout");
22
+
23
+ readable = rb_iv_get(self, "@readable");
24
+ writable = rb_iv_get(self, "@writable");
25
+
26
+ readable_keys = rb_funcall(readable, rb_intern("keys"), 0);
27
+ writable_keys = rb_funcall(writable, rb_intern("keys"), 0);
28
+ next_timeout = rb_funcall(self, id_next_timeout, 0);
29
+
30
+ return rb_funcall(rb_cIO, id_select, 4, readable_keys, writable_keys, rb_ary_new(), next_timeout);
31
+ }
32
+
33
+ VALUE method_scheduler_backend(VALUE klass) {
34
+ return rb_str_new_cstr("ruby");
35
+ }
36
+ #endif
@@ -0,0 +1,201 @@
1
+ #ifndef URING_H
2
+ #define URING_H
3
+ #include "evt.h"
4
+ #if HAVE_LIBURING_H
5
+ void uring_payload_free(void* data) {
6
+ // TODO: free the uring_data structs if the payload is freed before all IO responds
7
+ io_uring_queue_exit((struct io_uring*) data);
8
+ xfree(data);
9
+ }
10
+
11
+ size_t uring_payload_size(const void* data) {
12
+ return sizeof(struct io_uring);
13
+ }
14
+
15
+ VALUE method_scheduler_init(VALUE self) {
16
+ int ret;
17
+ struct io_uring* ring;
18
+ ring = xmalloc(sizeof(struct io_uring));
19
+ ret = io_uring_queue_init(URING_ENTRIES, ring, 0);
20
+ if (ret < 0) {
21
+ rb_raise(rb_eIOError, "unable to initalize io_uring");
22
+ }
23
+ rb_iv_set(self, "@ring", TypedData_Wrap_Struct(Payload, &type_uring_payload, ring));
24
+ return Qnil;
25
+ }
26
+
27
+ VALUE method_scheduler_register(VALUE self, VALUE io, VALUE interest) {
28
+ VALUE ring_obj;
29
+ struct io_uring* ring;
30
+ struct io_uring_sqe *sqe;
31
+ struct uring_data *data;
32
+ short poll_mask = 0;
33
+ ID id_fileno = rb_intern("fileno");
34
+
35
+ ring_obj = rb_iv_get(self, "@ring");
36
+ TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
37
+ sqe = io_uring_get_sqe(ring);
38
+ int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
39
+
40
+ int ruby_interest = NUM2INT(interest);
41
+ int readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
42
+ int writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
43
+
44
+ if (ruby_interest & readable) {
45
+ poll_mask |= POLL_IN;
46
+ }
47
+
48
+ if (ruby_interest & writable) {
49
+ poll_mask |= POLL_OUT;
50
+ }
51
+
52
+ data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
53
+ data->is_poll = true;
54
+ data->io = io;
55
+ data->poll_mask = poll_mask;
56
+
57
+ io_uring_prep_poll_add(sqe, fd, poll_mask);
58
+ io_uring_sqe_set_data(sqe, data);
59
+ io_uring_submit(ring);
60
+ return Qnil;
61
+ }
62
+
63
+ VALUE method_scheduler_deregister(VALUE self, VALUE io) {
64
+ // io_uring runs under oneshot mode. No need to deregister.
65
+ return Qnil;
66
+ }
67
+
68
+ VALUE method_scheduler_wait(VALUE self) {
69
+ struct io_uring* ring;
70
+ struct io_uring_cqe *cqes[URING_MAX_EVENTS];
71
+ struct uring_data *data;
72
+ VALUE next_timeout, obj_io, readables, writables, iovs, result;
73
+ unsigned ret, i;
74
+ double time = 0.0;
75
+ short poll_events;
76
+
77
+ ID id_next_timeout = rb_intern("next_timeout");
78
+ ID id_push = rb_intern("push");
79
+ ID id_sleep = rb_intern("sleep");
80
+
81
+ next_timeout = rb_funcall(self, id_next_timeout, 0);
82
+ readables = rb_ary_new();
83
+ writables = rb_ary_new();
84
+ iovs = rb_ary_new();
85
+
86
+ TypedData_Get_Struct(rb_iv_get(self, "@ring"), struct io_uring, &type_uring_payload, ring);
87
+ ret = io_uring_peek_batch_cqe(ring, cqes, URING_MAX_EVENTS);
88
+
89
+ for (i = 0; i < ret; i++) {
90
+ data = (struct uring_data*) io_uring_cqe_get_data(cqes[i]);
91
+ poll_events = data->poll_mask;
92
+ obj_io = data->io;
93
+ if (!data->is_poll) {
94
+ rb_funcall(iovs, id_push, 1, obj_io);
95
+ }
96
+
97
+ if (poll_events & POLL_IN) {
98
+ rb_funcall(readables, id_push, 1, obj_io);
99
+ }
100
+
101
+ if (poll_events & POLL_OUT) {
102
+ rb_funcall(writables, id_push, 1, obj_io);
103
+ }
104
+ xfree(data);
105
+ }
106
+
107
+ if (ret == 0) {
108
+ if (next_timeout != Qnil && NUM2INT(next_timeout) != -1) {
109
+ // sleep
110
+ time = next_timeout / 1000;
111
+ rb_funcall(rb_mKernel, id_sleep, 1, RFLOAT_VALUE(time));
112
+ } else {
113
+ rb_funcall(rb_mKernel, id_sleep, 1, RFLOAT_VALUE(0.001)); // To avoid infinite loop
114
+ }
115
+ }
116
+
117
+ result = rb_ary_new2(3);
118
+ rb_ary_store(result, 0, readables);
119
+ rb_ary_store(result, 1, writables);
120
+ rb_ary_store(result, 2, iovs);
121
+
122
+ return result;
123
+ }
124
+
125
+ VALUE method_scheduler_io_read(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
126
+ struct io_uring* ring;
127
+ struct uring_data *data;
128
+ char* read_buffer;
129
+ ID id_fileno = rb_intern("fileno");
130
+ // @iov[io] = Fiber.current
131
+ VALUE iovs = rb_iv_get(self, "@iovs");
132
+ rb_hash_aset(iovs, io, rb_funcall(Fiber, rb_intern("current"), 0));
133
+ // register
134
+ VALUE ring_obj = rb_iv_get(self, "@ring");
135
+ TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
136
+ struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
137
+ int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
138
+
139
+ read_buffer = (char*) xmalloc(NUM2SIZET(length));
140
+ struct iovec iov = {
141
+ .iov_base = read_buffer,
142
+ .iov_len = NUM2SIZET(length),
143
+ };
144
+
145
+ data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
146
+ data->is_poll = false;
147
+ data->io = io;
148
+ data->poll_mask = 0;
149
+
150
+ io_uring_prep_readv(sqe, fd, &iov, 1, NUM2SIZET(offset));
151
+ io_uring_sqe_set_data(sqe, data);
152
+ io_uring_submit(ring);
153
+
154
+ VALUE result = rb_str_new(read_buffer, strlen(read_buffer));
155
+ xfree(read_buffer);
156
+ if (buffer != Qnil) {
157
+ rb_str_append(buffer, result);
158
+ }
159
+
160
+ rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
161
+ return result;
162
+ }
163
+
164
+ VALUE method_scheduler_io_write(VALUE self, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
165
+ struct io_uring* ring;
166
+ struct uring_data *data;
167
+ char* write_buffer;
168
+ ID id_fileno = rb_intern("fileno");
169
+ // @iov[io] = Fiber.current
170
+ VALUE iovs = rb_iv_get(self, "@iovs");
171
+ rb_hash_aset(iovs, io, rb_funcall(Fiber, rb_intern("current"), 0));
172
+ // register
173
+ VALUE ring_obj = rb_iv_get(self, "@ring");
174
+ TypedData_Get_Struct(ring_obj, struct io_uring, &type_uring_payload, ring);
175
+ struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
176
+ int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
177
+
178
+ write_buffer = StringValueCStr(buffer);
179
+ struct iovec iov = {
180
+ .iov_base = write_buffer,
181
+ .iov_len = NUM2SIZET(length),
182
+ };
183
+
184
+ data = (struct uring_data*) xmalloc(sizeof(struct uring_data));
185
+ data->is_poll = false;
186
+ data->io = io;
187
+ data->poll_mask = 0;
188
+
189
+ io_uring_prep_writev(sqe, fd, &iov, 1, NUM2SIZET(offset));
190
+ io_uring_sqe_set_data(sqe, data);
191
+ io_uring_submit(ring);
192
+ rb_funcall(Fiber, rb_intern("yield"), 0); // Fiber.yield
193
+ return length;
194
+ }
195
+
196
+ VALUE method_scheduler_backend(VALUE klass) {
197
+ return rb_str_new_cstr("liburing");
198
+ }
199
+
200
+ #endif
201
+ #endif