event 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/event/backend.o +0 -0
- data/ext/event/backend/backend.c +119 -49
- data/ext/event/backend/backend.h +75 -8
- data/ext/event/backend/epoll.c +78 -48
- data/ext/event/backend/kqueue.c +77 -40
- data/ext/event/backend/uring.c +220 -91
- data/ext/event/event.bundle +0 -0
- data/ext/event/event.o +0 -0
- data/ext/event/extconf.rb +5 -0
- data/ext/event/kqueue.o +0 -0
- data/ext/event/mkmf.log +95 -0
- data/lib/event/backend/select.rb +63 -35
- data/lib/event/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 71ed23db589d68545077cbf271afe2145054b040e3c8001b052366c8588a76e1
|
4
|
+
data.tar.gz: 4c4e1e991044cc3abfbdd388dbfc6e4254de4ac3e97bec9faab60142c1f63525
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2f9ec25ff8d890e41589099ca347fc7c1ca1279dcc40c0065b759ac9e9c04ede4a9ad772f4beaadb794360b0af0a132db6f4e664ff36503995cc02a6ab3f7ff1
|
7
|
+
data.tar.gz: 6767cd4dfc6bc9c69bcfdc9ecb08e327315480919805fcca78c31c32eed19a7decbbdd2fc858897b678db06d58d40d935e77697e0a67ca1ee8408188da02861a
|
data/ext/event/backend.o
CHANGED
Binary file
|
data/ext/event/backend/backend.c
CHANGED
@@ -21,88 +21,158 @@
|
|
21
21
|
#include "backend.h"
|
22
22
|
#include <fcntl.h>
|
23
23
|
|
24
|
-
|
25
|
-
static
|
26
|
-
|
27
|
-
void Init_Event_Backend(VALUE Event_Backend) {
|
28
|
-
id_transfer = rb_intern("transfer");
|
29
|
-
id_wait = rb_intern("wait");
|
30
|
-
// id_alive_p = rb_intern("alive?");
|
31
|
-
rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
|
32
|
-
}
|
24
|
+
#ifndef HAVE__RB_FIBER_TRANSFER
|
25
|
+
static ID id_transfer;
|
33
26
|
|
34
27
|
VALUE
|
35
|
-
|
36
|
-
#ifdef HAVE__RB_FIBER_TRANSFER
|
37
|
-
return rb_fiber_transfer(fiber, 0, NULL);
|
38
|
-
#else
|
28
|
+
Event_Backend_fiber_transfer(VALUE fiber) {
|
39
29
|
return rb_funcall(fiber, id_transfer, 0);
|
40
|
-
#endif
|
41
30
|
}
|
42
31
|
|
43
32
|
VALUE
|
44
|
-
|
45
|
-
// if (!RTEST(rb_fiber_alive_p(fiber))) {
|
46
|
-
// return Qnil;
|
47
|
-
// }
|
48
|
-
|
49
|
-
#ifdef HAVE__RB_FIBER_TRANSFER
|
50
|
-
return rb_fiber_transfer(fiber, 1, &result);
|
51
|
-
#else
|
33
|
+
Event_Backend_fiber_transfer_result(VALUE fiber, VALUE result) {
|
52
34
|
return rb_funcall(fiber, id_transfer, 1, result);
|
35
|
+
}
|
53
36
|
#endif
|
37
|
+
|
38
|
+
#ifndef HAVE_RB_IO_DESCRIPTOR
|
39
|
+
static ID id_fileno;
|
40
|
+
|
41
|
+
int Event_Backend_io_descriptor(VALUE io) {
|
42
|
+
return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
|
54
43
|
}
|
44
|
+
#endif
|
45
|
+
|
46
|
+
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
47
|
+
static ID id_wait;
|
48
|
+
static VALUE rb_Process_Status = Qnil;
|
55
49
|
|
56
50
|
VALUE Event_Backend_process_status_wait(rb_pid_t pid)
|
57
51
|
{
|
58
52
|
return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
|
59
53
|
}
|
54
|
+
#endif
|
60
55
|
|
61
|
-
|
62
|
-
|
63
|
-
|
56
|
+
int Event_Backend_nonblock_set(int file_descriptor)
|
57
|
+
{
|
58
|
+
int flags = fcntl(file_descriptor, F_GETFL, 0);
|
59
|
+
|
60
|
+
if (!(flags & O_NONBLOCK)) {
|
61
|
+
fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
|
64
62
|
}
|
65
63
|
|
66
|
-
return
|
64
|
+
return flags;
|
67
65
|
}
|
68
66
|
|
69
|
-
|
70
|
-
|
71
|
-
|
67
|
+
void Event_Backend_nonblock_restore(int file_descriptor, int flags)
|
68
|
+
{
|
69
|
+
if (!(flags & O_NONBLOCK)) {
|
70
|
+
fcntl(file_descriptor, F_SETFL, flags & ~flags);
|
71
|
+
}
|
72
|
+
}
|
73
|
+
|
74
|
+
void Init_Event_Backend(VALUE Event_Backend) {
|
75
|
+
#ifndef HAVE_RB_IO_DESCRIPTOR
|
76
|
+
id_fileno = rb_intern("fileno");
|
77
|
+
#endif
|
78
|
+
|
79
|
+
#ifndef HAVE__RB_FIBER_TRANSFER
|
80
|
+
id_transfer = rb_intern("transfer");
|
81
|
+
#endif
|
72
82
|
|
73
|
-
|
83
|
+
#ifndef HAVE_RB_PROCESS_STATUS_WAIT
|
84
|
+
id_wait = rb_intern("wait");
|
85
|
+
rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
|
86
|
+
#endif
|
87
|
+
}
|
88
|
+
|
89
|
+
struct wait_and_transfer_arguments {
|
90
|
+
struct Event_Backend *backend;
|
91
|
+
struct Event_Backend_Queue *waiting;
|
92
|
+
};
|
93
|
+
|
94
|
+
static void queue_pop(struct Event_Backend *backend, struct Event_Backend_Queue *waiting) {
|
95
|
+
if (waiting->behind) {
|
96
|
+
waiting->behind->infront = waiting->infront;
|
97
|
+
} else {
|
98
|
+
backend->waiting = waiting->infront;
|
99
|
+
}
|
74
100
|
|
75
|
-
if (
|
76
|
-
|
101
|
+
if (waiting->infront) {
|
102
|
+
waiting->infront->behind = waiting->behind;
|
103
|
+
} else {
|
104
|
+
backend->ready = waiting->behind;
|
105
|
+
}
|
106
|
+
}
|
107
|
+
|
108
|
+
static void queue_push(struct Event_Backend *backend, struct Event_Backend_Queue *waiting) {
|
109
|
+
if (backend->waiting) {
|
110
|
+
backend->waiting->behind = waiting;
|
111
|
+
waiting->infront = backend->waiting;
|
77
112
|
} else {
|
78
|
-
|
113
|
+
backend->ready = waiting;
|
79
114
|
}
|
80
115
|
|
81
|
-
|
116
|
+
backend->waiting = waiting;
|
117
|
+
}
|
118
|
+
|
119
|
+
static VALUE wait_and_transfer(VALUE fiber) {
|
120
|
+
return Event_Backend_fiber_transfer(fiber);
|
82
121
|
}
|
83
122
|
|
84
|
-
|
85
|
-
|
123
|
+
static VALUE wait_and_transfer_ensure(VALUE _arguments) {
|
124
|
+
struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
|
86
125
|
|
87
|
-
|
88
|
-
|
89
|
-
|
126
|
+
queue_pop(arguments->backend, arguments->waiting);
|
127
|
+
|
128
|
+
return Qnil;
|
90
129
|
}
|
91
130
|
|
92
|
-
|
131
|
+
void Event_Backend_wait_and_transfer(struct Event_Backend *backend, VALUE fiber)
|
93
132
|
{
|
94
|
-
|
133
|
+
struct Event_Backend_Queue waiting = {
|
134
|
+
.behind = NULL,
|
135
|
+
.infront = NULL,
|
136
|
+
.fiber = rb_fiber_current()
|
137
|
+
};
|
95
138
|
|
96
|
-
|
97
|
-
fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
|
98
|
-
}
|
139
|
+
queue_push(backend, &waiting);
|
99
140
|
|
100
|
-
|
141
|
+
struct wait_and_transfer_arguments arguments = {
|
142
|
+
.backend = backend,
|
143
|
+
.waiting = &waiting,
|
144
|
+
};
|
145
|
+
|
146
|
+
rb_ensure(wait_and_transfer, fiber, wait_and_transfer_ensure, (VALUE)&arguments);
|
101
147
|
}
|
102
148
|
|
103
|
-
void
|
149
|
+
void Event_Backend_ready_pop(struct Event_Backend *backend)
|
104
150
|
{
|
105
|
-
|
106
|
-
|
151
|
+
// Get the current tail and head of the queue:
|
152
|
+
struct Event_Backend_Queue *waiting = backend->waiting;
|
153
|
+
|
154
|
+
// Process from head to tail in order:
|
155
|
+
// During this, more items may be appended to tail.
|
156
|
+
while (backend->ready) {
|
157
|
+
struct Event_Backend_Queue *ready = backend->ready;
|
158
|
+
|
159
|
+
Event_Backend_fiber_transfer(ready->fiber);
|
160
|
+
|
161
|
+
if (ready == waiting) break;
|
107
162
|
}
|
108
|
-
}
|
163
|
+
}
|
164
|
+
|
165
|
+
void Event_Backend_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
|
166
|
+
{
|
167
|
+
if ((stop->tv_nsec - start->tv_nsec) < 0) {
|
168
|
+
duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
|
169
|
+
duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
|
170
|
+
} else {
|
171
|
+
duration->tv_sec = stop->tv_sec - start->tv_sec;
|
172
|
+
duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
|
173
|
+
}
|
174
|
+
}
|
175
|
+
|
176
|
+
void Event_Backend_current_time(struct timespec *time) {
|
177
|
+
clock_gettime(CLOCK_MONOTONIC, time);
|
178
|
+
}
|
data/ext/event/backend/backend.h
CHANGED
@@ -20,6 +20,13 @@
|
|
20
20
|
|
21
21
|
#include <ruby.h>
|
22
22
|
#include <ruby/thread.h>
|
23
|
+
#include <ruby/io.h>
|
24
|
+
|
25
|
+
#ifdef HAVE_RUBY_IO_BUFFER_H
|
26
|
+
#include <ruby/io/buffer.h>
|
27
|
+
#endif
|
28
|
+
|
29
|
+
#include <time.h>
|
23
30
|
|
24
31
|
enum Event {
|
25
32
|
READABLE = 1,
|
@@ -29,17 +36,77 @@ enum Event {
|
|
29
36
|
HANGUP = 16
|
30
37
|
};
|
31
38
|
|
32
|
-
void
|
33
|
-
Init_Event_Backend();
|
39
|
+
void Init_Event_Backend();
|
34
40
|
|
35
|
-
|
36
|
-
|
41
|
+
#ifdef HAVE__RB_FIBER_TRANSFER
|
42
|
+
#define Event_Backend_fiber_transfer(fiber) rb_fiber_transfer(fiber, 0, NULL)
|
43
|
+
#define Event_Backend_fiber_transfer_result(fiber, argument) rb_fiber_transfer(fiber, 1, &argument)
|
44
|
+
#else
|
45
|
+
VALUE Event_Backend_fiber_transfer(VALUE fiber);
|
46
|
+
VALUE Event_Backend_fiber_transfer_result(VALUE fiber, VALUE argument);
|
47
|
+
#endif
|
37
48
|
|
38
|
-
|
49
|
+
#ifdef HAVE_RB_IO_DESCRIPTOR
|
50
|
+
#define Event_Backend_io_descriptor(io) rb_io_descriptor(io)
|
51
|
+
#else
|
52
|
+
int Event_Backend_io_descriptor(VALUE io);
|
53
|
+
#endif
|
39
54
|
|
40
|
-
|
41
|
-
|
42
|
-
|
55
|
+
#ifdef HAVE_RB_PROCESS_STATUS_WAIT
|
56
|
+
#define Event_Backend_process_status_wait(pid) rb_process_status_wait(pid)
|
57
|
+
#else
|
58
|
+
VALUE Event_Backend_process_status_wait(rb_pid_t pid);
|
59
|
+
#endif
|
43
60
|
|
44
61
|
int Event_Backend_nonblock_set(int file_descriptor);
|
45
62
|
void Event_Backend_nonblock_restore(int file_descriptor, int flags);
|
63
|
+
|
64
|
+
struct Event_Backend_Queue {
|
65
|
+
struct Event_Backend_Queue *behind;
|
66
|
+
struct Event_Backend_Queue *infront;
|
67
|
+
|
68
|
+
VALUE fiber;
|
69
|
+
};
|
70
|
+
|
71
|
+
struct Event_Backend {
|
72
|
+
VALUE loop;
|
73
|
+
|
74
|
+
// Append to waiting.
|
75
|
+
struct Event_Backend_Queue *waiting;
|
76
|
+
// Process from ready.
|
77
|
+
struct Event_Backend_Queue *ready;
|
78
|
+
};
|
79
|
+
|
80
|
+
inline
|
81
|
+
void Event_Backend_initialize(struct Event_Backend *backend, VALUE loop) {
|
82
|
+
backend->loop = loop;
|
83
|
+
backend->waiting = NULL;
|
84
|
+
backend->ready = NULL;
|
85
|
+
}
|
86
|
+
|
87
|
+
inline
|
88
|
+
void Event_Backend_mark(struct Event_Backend *backend) {
|
89
|
+
rb_gc_mark(backend->loop);
|
90
|
+
|
91
|
+
struct Event_Backend_Queue *ready = backend->ready;
|
92
|
+
while (ready) {
|
93
|
+
rb_gc_mark(ready->fiber);
|
94
|
+
ready = ready->behind;
|
95
|
+
}
|
96
|
+
}
|
97
|
+
|
98
|
+
void Event_Backend_wait_and_transfer(struct Event_Backend *backend, VALUE fiber);
|
99
|
+
|
100
|
+
inline
|
101
|
+
void Event_Backend_defer(struct Event_Backend *backend)
|
102
|
+
{
|
103
|
+
Event_Backend_wait_and_transfer(backend, backend->loop);
|
104
|
+
}
|
105
|
+
|
106
|
+
void Event_Backend_ready_pop(struct Event_Backend *backend);
|
107
|
+
|
108
|
+
void Event_Backend_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
|
109
|
+
void Event_Backend_current_time(struct timespec *time);
|
110
|
+
|
111
|
+
#define PRINTF_TIMESPEC "%lld.%.9ld"
|
112
|
+
#define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
|
data/ext/event/backend/epoll.c
CHANGED
@@ -28,19 +28,18 @@
|
|
28
28
|
#include "pidfd.c"
|
29
29
|
|
30
30
|
static VALUE Event_Backend_EPoll = Qnil;
|
31
|
-
static ID id_fileno;
|
32
31
|
|
33
32
|
enum {EPOLL_MAX_EVENTS = 64};
|
34
33
|
|
35
34
|
struct Event_Backend_EPoll {
|
36
|
-
|
35
|
+
struct Event_Backend backend;
|
37
36
|
int descriptor;
|
38
37
|
};
|
39
38
|
|
40
39
|
void Event_Backend_EPoll_Type_mark(void *_data)
|
41
40
|
{
|
42
41
|
struct Event_Backend_EPoll *data = _data;
|
43
|
-
|
42
|
+
Event_Backend_mark(&data->backend);
|
44
43
|
}
|
45
44
|
|
46
45
|
static
|
@@ -80,7 +79,7 @@ VALUE Event_Backend_EPoll_allocate(VALUE self) {
|
|
80
79
|
struct Event_Backend_EPoll *data = NULL;
|
81
80
|
VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
82
81
|
|
83
|
-
data->
|
82
|
+
Event_Backend_initialize(&data->backend, Qnil);
|
84
83
|
data->descriptor = -1;
|
85
84
|
|
86
85
|
return instance;
|
@@ -90,7 +89,7 @@ VALUE Event_Backend_EPoll_initialize(VALUE self, VALUE loop) {
|
|
90
89
|
struct Event_Backend_EPoll *data = NULL;
|
91
90
|
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
92
91
|
|
93
|
-
data->
|
92
|
+
Event_Backend_initialize(&data->backend, loop);
|
94
93
|
int result = epoll_create1(EPOLL_CLOEXEC);
|
95
94
|
|
96
95
|
if (result == -1) {
|
@@ -113,6 +112,33 @@ VALUE Event_Backend_EPoll_close(VALUE self) {
|
|
113
112
|
return Qnil;
|
114
113
|
}
|
115
114
|
|
115
|
+
VALUE Event_Backend_EPoll_transfer(VALUE self, VALUE fiber)
|
116
|
+
{
|
117
|
+
struct Event_Backend_EPoll *data = NULL;
|
118
|
+
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
119
|
+
|
120
|
+
Event_Backend_wait_and_transfer(&data->backend, fiber);
|
121
|
+
|
122
|
+
return Qnil;
|
123
|
+
}
|
124
|
+
|
125
|
+
VALUE Event_Backend_EPoll_defer(VALUE self)
|
126
|
+
{
|
127
|
+
struct Event_Backend_EPoll *data = NULL;
|
128
|
+
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
129
|
+
|
130
|
+
Event_Backend_defer(&data->backend);
|
131
|
+
|
132
|
+
return Qnil;
|
133
|
+
}
|
134
|
+
|
135
|
+
VALUE Event_Backend_EPoll_ready_p(VALUE self) {
|
136
|
+
struct Event_Backend_EPoll *data = NULL;
|
137
|
+
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
138
|
+
|
139
|
+
return data->backend.ready ? Qtrue : Qfalse;
|
140
|
+
}
|
141
|
+
|
116
142
|
struct process_wait_arguments {
|
117
143
|
struct Event_Backend_EPoll *data;
|
118
144
|
pid_t pid;
|
@@ -124,7 +150,7 @@ static
|
|
124
150
|
VALUE process_wait_transfer(VALUE _arguments) {
|
125
151
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
126
152
|
|
127
|
-
|
153
|
+
Event_Backend_fiber_transfer(arguments->data->backend.loop);
|
128
154
|
|
129
155
|
return Event_Backend_process_status_wait(arguments->pid);
|
130
156
|
}
|
@@ -143,7 +169,7 @@ VALUE process_wait_ensure(VALUE _arguments) {
|
|
143
169
|
VALUE Event_Backend_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
|
144
170
|
struct Event_Backend_EPoll *data = NULL;
|
145
171
|
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
146
|
-
|
172
|
+
|
147
173
|
struct process_wait_arguments process_wait_arguments = {
|
148
174
|
.data = data,
|
149
175
|
.pid = NUM2PIDT(pid),
|
@@ -217,7 +243,7 @@ static
|
|
217
243
|
VALUE io_wait_transfer(VALUE _arguments) {
|
218
244
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
219
245
|
|
220
|
-
VALUE result =
|
246
|
+
VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
|
221
247
|
|
222
248
|
return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
|
223
249
|
};
|
@@ -228,7 +254,7 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
|
|
228
254
|
|
229
255
|
struct epoll_event event = {0};
|
230
256
|
|
231
|
-
int descriptor =
|
257
|
+
int descriptor = Event_Backend_io_descriptor(io);
|
232
258
|
int duplicate = -1;
|
233
259
|
|
234
260
|
event.events = epoll_flags_from_events(NUM2INT(events));
|
@@ -264,6 +290,8 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
|
|
264
290
|
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
|
265
291
|
}
|
266
292
|
|
293
|
+
#ifdef HAVE_RUBY_IO_BUFFER_H
|
294
|
+
|
267
295
|
struct io_read_arguments {
|
268
296
|
VALUE self;
|
269
297
|
VALUE fiber;
|
@@ -274,7 +302,6 @@ struct io_read_arguments {
|
|
274
302
|
int descriptor;
|
275
303
|
|
276
304
|
VALUE buffer;
|
277
|
-
size_t offset;
|
278
305
|
size_t length;
|
279
306
|
};
|
280
307
|
|
@@ -282,18 +309,22 @@ static
|
|
282
309
|
VALUE io_read_loop(VALUE _arguments) {
|
283
310
|
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
284
311
|
|
285
|
-
|
312
|
+
void *base;
|
313
|
+
size_t size;
|
314
|
+
rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
|
315
|
+
|
316
|
+
size_t offset = 0;
|
286
317
|
size_t length = arguments->length;
|
287
|
-
size_t total = 0;
|
288
318
|
|
289
319
|
while (length > 0) {
|
290
|
-
|
291
|
-
ssize_t result = read(arguments->descriptor,
|
320
|
+
size_t maximum_size = size - offset;
|
321
|
+
ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
|
292
322
|
|
293
|
-
if (result
|
323
|
+
if (result == 0) {
|
324
|
+
break;
|
325
|
+
} else if (result > 0) {
|
294
326
|
offset += result;
|
295
327
|
length -= result;
|
296
|
-
total += result;
|
297
328
|
} else if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
298
329
|
Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
|
299
330
|
} else {
|
@@ -301,9 +332,7 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
301
332
|
}
|
302
333
|
}
|
303
334
|
|
304
|
-
|
305
|
-
|
306
|
-
return SIZET2NUM(total);
|
335
|
+
return SIZET2NUM(offset);
|
307
336
|
}
|
308
337
|
|
309
338
|
static
|
@@ -315,16 +344,11 @@ VALUE io_read_ensure(VALUE _arguments) {
|
|
315
344
|
return Qnil;
|
316
345
|
}
|
317
346
|
|
318
|
-
VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE
|
319
|
-
|
320
|
-
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
321
|
-
|
322
|
-
int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
|
347
|
+
VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
348
|
+
int descriptor = Event_Backend_io_descriptor(io);
|
323
349
|
|
324
|
-
size_t offset = NUM2SIZET(_offset);
|
325
350
|
size_t length = NUM2SIZET(_length);
|
326
351
|
|
327
|
-
|
328
352
|
struct io_read_arguments io_read_arguments = {
|
329
353
|
.self = self,
|
330
354
|
.fiber = fiber,
|
@@ -333,7 +357,6 @@ VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
|
|
333
357
|
.flags = Event_Backend_nonblock_set(descriptor),
|
334
358
|
.descriptor = descriptor,
|
335
359
|
.buffer = buffer,
|
336
|
-
.offset = offset,
|
337
360
|
.length = length,
|
338
361
|
};
|
339
362
|
|
@@ -350,7 +373,6 @@ struct io_write_arguments {
|
|
350
373
|
int descriptor;
|
351
374
|
|
352
375
|
VALUE buffer;
|
353
|
-
size_t offset;
|
354
376
|
size_t length;
|
355
377
|
};
|
356
378
|
|
@@ -358,18 +380,23 @@ static
|
|
358
380
|
VALUE io_write_loop(VALUE _arguments) {
|
359
381
|
struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
|
360
382
|
|
361
|
-
|
383
|
+
const void *base;
|
384
|
+
size_t size;
|
385
|
+
rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
|
386
|
+
|
387
|
+
size_t offset = 0;
|
362
388
|
size_t length = arguments->length;
|
363
|
-
|
389
|
+
|
390
|
+
if (length > size) {
|
391
|
+
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
392
|
+
}
|
364
393
|
|
365
394
|
while (length > 0) {
|
366
|
-
|
367
|
-
ssize_t result = write(arguments->descriptor, buffer+offset, length);
|
395
|
+
ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
|
368
396
|
|
369
397
|
if (result >= 0) {
|
370
|
-
length -= result;
|
371
398
|
offset += result;
|
372
|
-
|
399
|
+
length -= result;
|
373
400
|
} else if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
374
401
|
Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
|
375
402
|
} else {
|
@@ -377,7 +404,7 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
377
404
|
}
|
378
405
|
}
|
379
406
|
|
380
|
-
return SIZET2NUM(
|
407
|
+
return SIZET2NUM(offset);
|
381
408
|
};
|
382
409
|
|
383
410
|
static
|
@@ -389,13 +416,9 @@ VALUE io_write_ensure(VALUE _arguments) {
|
|
389
416
|
return Qnil;
|
390
417
|
};
|
391
418
|
|
392
|
-
VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE
|
393
|
-
|
394
|
-
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
419
|
+
VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
420
|
+
int descriptor = Event_Backend_io_descriptor(io);
|
395
421
|
|
396
|
-
int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
|
397
|
-
|
398
|
-
size_t offset = NUM2SIZET(_offset);
|
399
422
|
size_t length = NUM2SIZET(_length);
|
400
423
|
|
401
424
|
struct io_write_arguments io_write_arguments = {
|
@@ -406,13 +429,14 @@ VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
|
|
406
429
|
.flags = Event_Backend_nonblock_set(descriptor),
|
407
430
|
.descriptor = descriptor,
|
408
431
|
.buffer = buffer,
|
409
|
-
.offset = offset,
|
410
432
|
.length = length,
|
411
433
|
};
|
412
434
|
|
413
435
|
return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
|
414
436
|
}
|
415
437
|
|
438
|
+
#endif
|
439
|
+
|
416
440
|
static
|
417
441
|
int make_timeout(VALUE duration) {
|
418
442
|
if (duration == Qnil) {
|
@@ -472,6 +496,8 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
|
|
472
496
|
struct Event_Backend_EPoll *data = NULL;
|
473
497
|
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
474
498
|
|
499
|
+
Event_Backend_ready_pop(&data->backend);
|
500
|
+
|
475
501
|
struct select_arguments arguments = {
|
476
502
|
.data = data,
|
477
503
|
.timeout = 0
|
@@ -482,7 +508,7 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
|
|
482
508
|
if (arguments.count == 0) {
|
483
509
|
arguments.timeout = make_timeout(duration);
|
484
510
|
|
485
|
-
if (arguments.timeout != 0) {
|
511
|
+
if (!data->backend.ready && arguments.timeout != 0) {
|
486
512
|
select_internal_without_gvl(&arguments);
|
487
513
|
}
|
488
514
|
}
|
@@ -493,25 +519,29 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
|
|
493
519
|
|
494
520
|
// fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
|
495
521
|
|
496
|
-
|
522
|
+
Event_Backend_fiber_transfer_result(fiber, result);
|
497
523
|
}
|
498
524
|
|
499
525
|
return INT2NUM(arguments.count);
|
500
526
|
}
|
501
527
|
|
502
528
|
void Init_Event_Backend_EPoll(VALUE Event_Backend) {
|
503
|
-
id_fileno = rb_intern("fileno");
|
504
|
-
|
505
529
|
Event_Backend_EPoll = rb_define_class_under(Event_Backend, "EPoll", rb_cObject);
|
506
530
|
|
507
531
|
rb_define_alloc_func(Event_Backend_EPoll, Event_Backend_EPoll_allocate);
|
508
532
|
rb_define_method(Event_Backend_EPoll, "initialize", Event_Backend_EPoll_initialize, 1);
|
533
|
+
rb_define_method(Event_Backend_EPoll, "transfer", Event_Backend_EPoll_transfer, 1);
|
534
|
+
rb_define_method(Event_Backend_EPoll, "defer", Event_Backend_EPoll_defer, 0);
|
535
|
+
rb_define_method(Event_Backend_EPoll, "ready?", Event_Backend_EPoll_ready_p, 0);
|
509
536
|
rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
|
510
537
|
rb_define_method(Event_Backend_EPoll, "close", Event_Backend_EPoll_close, 0);
|
511
538
|
|
512
539
|
rb_define_method(Event_Backend_EPoll, "io_wait", Event_Backend_EPoll_io_wait, 3);
|
513
|
-
|
514
|
-
|
540
|
+
|
541
|
+
#ifdef HAVE_RUBY_IO_BUFFER_H
|
542
|
+
rb_define_method(Event_Backend_EPoll, "io_read", Event_Backend_EPoll_io_read, 4);
|
543
|
+
rb_define_method(Event_Backend_EPoll, "io_write", Event_Backend_EPoll_io_write, 4);
|
544
|
+
#endif
|
515
545
|
|
516
546
|
rb_define_method(Event_Backend_EPoll, "process_wait", Event_Backend_EPoll_process_wait, 3);
|
517
547
|
}
|