nio4r 2.5.2 → 2.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/workflow.yml +43 -0
- data/.rubocop.yml +30 -11
- data/CHANGES.md +24 -1
- data/Gemfile +1 -1
- data/README.md +6 -24
- data/examples/echo_server.rb +2 -2
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +3 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -31
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/selector.c +50 -51
- data/lib/nio/bytebuffer.rb +4 -0
- data/lib/nio/monitor.rb +1 -1
- data/lib/nio/selector.rb +1 -1
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +2 -2
- data/spec/nio/bytebuffer_spec.rb +0 -1
- data/spec/nio/selectables/ssl_socket_spec.rb +3 -1
- data/spec/nio/selectables/udp_socket_spec.rb +2 -2
- data/spec/nio/selector_spec.rb +0 -1
- metadata +11 -12
- data/.travis.yml +0 -44
- data/Guardfile +0 -10
- data/appveyor.yml +0 -40
data/ext/nio4r/extconf.rb
CHANGED
@@ -4,6 +4,7 @@ require "rubygems"
|
|
4
4
|
|
5
5
|
# Write a dummy Makefile on Windows because we use the pure Ruby implementation there
|
6
6
|
if Gem.win_platform?
|
7
|
+
require "devkit" if RUBY_PLATFORM.include?("mingw")
|
7
8
|
File.write("Makefile", "all install::\n")
|
8
9
|
File.write("nio4r_ext.so", "")
|
9
10
|
exit
|
@@ -21,6 +22,8 @@ $defs << "-DEV_USE_KQUEUE" if have_header("sys/event.h") && have_header("s
|
|
21
22
|
$defs << "-DEV_USE_PORT" if have_type("port_event_t", "port.h")
|
22
23
|
$defs << "-DHAVE_SYS_RESOURCE_H" if have_header("sys/resource.h")
|
23
24
|
|
25
|
+
$defs << "-DEV_STANDALONE" # prevent libev from assuming "config.h" exists
|
26
|
+
|
24
27
|
CONFIG["optflags"] << " -fno-strict-aliasing" unless RUBY_PLATFORM =~ /mswin/
|
25
28
|
|
26
29
|
dir_config "nio4r_ext"
|
data/ext/nio4r/libev.h
CHANGED
data/ext/nio4r/monitor.c
CHANGED
@@ -4,6 +4,7 @@
|
|
4
4
|
*/
|
5
5
|
|
6
6
|
#include "nio4r.h"
|
7
|
+
#include <assert.h>
|
7
8
|
|
8
9
|
static VALUE mNIO = Qnil;
|
9
10
|
static VALUE cNIO_Monitor = Qnil;
|
@@ -60,12 +61,14 @@ void Init_NIO_Monitor()
|
|
60
61
|
static VALUE NIO_Monitor_allocate(VALUE klass)
|
61
62
|
{
|
62
63
|
struct NIO_Monitor *monitor = (struct NIO_Monitor *)xmalloc(sizeof(struct NIO_Monitor));
|
63
|
-
|
64
|
+
assert(monitor);
|
65
|
+
*monitor = (struct NIO_Monitor){.self = Qnil};
|
64
66
|
return Data_Wrap_Struct(klass, NIO_Monitor_mark, NIO_Monitor_free, monitor);
|
65
67
|
}
|
66
68
|
|
67
69
|
static void NIO_Monitor_mark(struct NIO_Monitor *monitor)
|
68
70
|
{
|
71
|
+
rb_gc_mark(monitor->self);
|
69
72
|
}
|
70
73
|
|
71
74
|
static void NIO_Monitor_free(struct NIO_Monitor *monitor)
|
@@ -84,15 +87,14 @@ static VALUE NIO_Monitor_initialize(VALUE self, VALUE io, VALUE interests, VALUE
|
|
84
87
|
|
85
88
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
86
89
|
|
87
|
-
if(interests_id == rb_intern("r")) {
|
90
|
+
if (interests_id == rb_intern("r")) {
|
88
91
|
monitor->interests = EV_READ;
|
89
|
-
} else if(interests_id == rb_intern("w")) {
|
92
|
+
} else if (interests_id == rb_intern("w")) {
|
90
93
|
monitor->interests = EV_WRITE;
|
91
|
-
} else if(interests_id == rb_intern("rw")) {
|
94
|
+
} else if (interests_id == rb_intern("rw")) {
|
92
95
|
monitor->interests = EV_READ | EV_WRITE;
|
93
96
|
} else {
|
94
|
-
rb_raise(rb_eArgError, "invalid event type %s (must be :r, :w, or :rw)",
|
95
|
-
RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
97
|
+
rb_raise(rb_eArgError, "invalid event type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
96
98
|
}
|
97
99
|
|
98
100
|
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
@@ -112,7 +114,7 @@ static VALUE NIO_Monitor_initialize(VALUE self, VALUE io, VALUE interests, VALUE
|
|
112
114
|
monitor->selector = selector;
|
113
115
|
|
114
116
|
if (monitor->interests) {
|
115
|
-
|
117
|
+
ev_io_start(selector->ev_loop, &monitor->ev_io);
|
116
118
|
}
|
117
119
|
|
118
120
|
return Qnil;
|
@@ -127,17 +129,17 @@ static VALUE NIO_Monitor_close(int argc, VALUE *argv, VALUE self)
|
|
127
129
|
rb_scan_args(argc, argv, "01", &deregister);
|
128
130
|
selector = rb_ivar_get(self, rb_intern("selector"));
|
129
131
|
|
130
|
-
if(selector != Qnil) {
|
132
|
+
if (selector != Qnil) {
|
131
133
|
/* if ev_loop is 0, it means that the loop has been stopped already (see NIO_Selector_shutdown) */
|
132
|
-
if(monitor->interests && monitor->selector->ev_loop) {
|
133
|
-
|
134
|
+
if (monitor->interests && monitor->selector->ev_loop) {
|
135
|
+
ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io);
|
134
136
|
}
|
135
137
|
|
136
138
|
monitor->selector = 0;
|
137
139
|
rb_ivar_set(self, rb_intern("selector"), Qnil);
|
138
|
-
|
140
|
+
|
139
141
|
/* Default value is true */
|
140
|
-
if(deregister == Qtrue || deregister == Qnil) {
|
142
|
+
if (deregister == Qtrue || deregister == Qnil) {
|
141
143
|
rb_funcall(selector, rb_intern("deregister"), 1, rb_ivar_get(self, rb_intern("io")));
|
142
144
|
}
|
143
145
|
}
|
@@ -165,7 +167,7 @@ static VALUE NIO_Monitor_interests(VALUE self)
|
|
165
167
|
|
166
168
|
static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests)
|
167
169
|
{
|
168
|
-
if(NIL_P(interests)) {
|
170
|
+
if (NIL_P(interests)) {
|
169
171
|
NIO_Monitor_update_interests(self, 0);
|
170
172
|
} else {
|
171
173
|
NIO_Monitor_update_interests(self, NIO_Monitor_symbol2interest(interests));
|
@@ -174,7 +176,8 @@ static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests)
|
|
174
176
|
return rb_ivar_get(self, rb_intern("interests"));
|
175
177
|
}
|
176
178
|
|
177
|
-
static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest)
|
179
|
+
static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest)
|
180
|
+
{
|
178
181
|
struct NIO_Monitor *monitor;
|
179
182
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
180
183
|
|
@@ -184,7 +187,8 @@ static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest) {
|
|
184
187
|
return rb_ivar_get(self, rb_intern("interests"));
|
185
188
|
}
|
186
189
|
|
187
|
-
static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest)
|
190
|
+
static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest)
|
191
|
+
{
|
188
192
|
struct NIO_Monitor *monitor;
|
189
193
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
190
194
|
|
@@ -214,11 +218,11 @@ static VALUE NIO_Monitor_readiness(VALUE self)
|
|
214
218
|
struct NIO_Monitor *monitor;
|
215
219
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
216
220
|
|
217
|
-
if((monitor->revents & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
|
221
|
+
if ((monitor->revents & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
|
218
222
|
return ID2SYM(rb_intern("rw"));
|
219
|
-
} else if(monitor->revents & EV_READ) {
|
223
|
+
} else if (monitor->revents & EV_READ) {
|
220
224
|
return ID2SYM(rb_intern("r"));
|
221
|
-
} else if(monitor->revents & EV_WRITE) {
|
225
|
+
} else if (monitor->revents & EV_WRITE) {
|
222
226
|
return ID2SYM(rb_intern("w"));
|
223
227
|
} else {
|
224
228
|
return Qnil;
|
@@ -230,7 +234,7 @@ static VALUE NIO_Monitor_is_readable(VALUE self)
|
|
230
234
|
struct NIO_Monitor *monitor;
|
231
235
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
232
236
|
|
233
|
-
if(monitor->revents & EV_READ) {
|
237
|
+
if (monitor->revents & EV_READ) {
|
234
238
|
return Qtrue;
|
235
239
|
} else {
|
236
240
|
return Qfalse;
|
@@ -242,7 +246,7 @@ static VALUE NIO_Monitor_is_writable(VALUE self)
|
|
242
246
|
struct NIO_Monitor *monitor;
|
243
247
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
244
248
|
|
245
|
-
if(monitor->revents & EV_WRITE) {
|
249
|
+
if (monitor->revents & EV_WRITE) {
|
246
250
|
return Qtrue;
|
247
251
|
} else {
|
248
252
|
return Qfalse;
|
@@ -256,15 +260,14 @@ static int NIO_Monitor_symbol2interest(VALUE interests)
|
|
256
260
|
ID interests_id;
|
257
261
|
interests_id = SYM2ID(interests);
|
258
262
|
|
259
|
-
if(interests_id == rb_intern("r")) {
|
263
|
+
if (interests_id == rb_intern("r")) {
|
260
264
|
return EV_READ;
|
261
|
-
} else if(interests_id == rb_intern("w")) {
|
265
|
+
} else if (interests_id == rb_intern("w")) {
|
262
266
|
return EV_WRITE;
|
263
|
-
} else if(interests_id == rb_intern("rw")) {
|
267
|
+
} else if (interests_id == rb_intern("rw")) {
|
264
268
|
return EV_READ | EV_WRITE;
|
265
269
|
} else {
|
266
|
-
rb_raise(rb_eArgError, "invalid interest type %s (must be :r, :w, or :rw)",
|
267
|
-
RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
270
|
+
rb_raise(rb_eArgError, "invalid interest type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
268
271
|
}
|
269
272
|
}
|
270
273
|
|
@@ -274,12 +277,12 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
274
277
|
struct NIO_Monitor *monitor;
|
275
278
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
276
279
|
|
277
|
-
if(NIO_Monitor_is_closed(self) == Qtrue) {
|
280
|
+
if (NIO_Monitor_is_closed(self) == Qtrue) {
|
278
281
|
rb_raise(rb_eEOFError, "monitor is closed");
|
279
282
|
}
|
280
283
|
|
281
|
-
if(interests) {
|
282
|
-
switch(interests) {
|
284
|
+
if (interests) {
|
285
|
+
switch (interests) {
|
283
286
|
case EV_READ:
|
284
287
|
interests_id = rb_intern("r");
|
285
288
|
break;
|
@@ -298,9 +301,9 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
298
301
|
rb_ivar_set(self, rb_intern("interests"), Qnil);
|
299
302
|
}
|
300
303
|
|
301
|
-
if(monitor->interests != interests) {
|
304
|
+
if (monitor->interests != interests) {
|
302
305
|
// If the monitor currently has interests, we should stop it.
|
303
|
-
if(monitor->interests) {
|
306
|
+
if (monitor->interests) {
|
304
307
|
ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io);
|
305
308
|
}
|
306
309
|
|
@@ -309,7 +312,7 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
309
312
|
ev_io_set(&monitor->ev_io, monitor->ev_io.fd, monitor->interests);
|
310
313
|
|
311
314
|
// If we are interested in events, schedule the monitor back into the event loop:
|
312
|
-
if(monitor->interests) {
|
315
|
+
if (monitor->interests) {
|
313
316
|
ev_io_start(monitor->selector->ev_loop, &monitor->ev_io);
|
314
317
|
}
|
315
318
|
}
|
data/ext/nio4r/nio4r.h
CHANGED
@@ -6,12 +6,11 @@
|
|
6
6
|
#ifndef NIO4R_H
|
7
7
|
#define NIO4R_H
|
8
8
|
|
9
|
+
#include "libev.h"
|
9
10
|
#include "ruby.h"
|
10
11
|
#include "ruby/io.h"
|
11
|
-
#include "libev.h"
|
12
12
|
|
13
|
-
struct NIO_Selector
|
14
|
-
{
|
13
|
+
struct NIO_Selector {
|
15
14
|
struct ev_loop *ev_loop;
|
16
15
|
struct ev_timer timer; /* for timeouts */
|
17
16
|
struct ev_io wakeup;
|
@@ -24,31 +23,27 @@ struct NIO_Selector
|
|
24
23
|
VALUE ready_array;
|
25
24
|
};
|
26
25
|
|
27
|
-
struct NIO_callback_data
|
28
|
-
{
|
26
|
+
struct NIO_callback_data {
|
29
27
|
VALUE *monitor;
|
30
28
|
struct NIO_Selector *selector;
|
31
29
|
};
|
32
30
|
|
33
|
-
struct NIO_Monitor
|
34
|
-
{
|
31
|
+
struct NIO_Monitor {
|
35
32
|
VALUE self;
|
36
33
|
int interests, revents;
|
37
34
|
struct ev_io ev_io;
|
38
35
|
struct NIO_Selector *selector;
|
39
36
|
};
|
40
37
|
|
41
|
-
struct NIO_ByteBuffer
|
42
|
-
{
|
38
|
+
struct NIO_ByteBuffer {
|
43
39
|
char *buffer;
|
44
40
|
int position, limit, capacity, mark;
|
45
41
|
};
|
46
42
|
|
47
|
-
|
48
43
|
#ifdef GetReadFile
|
49
|
-
#
|
44
|
+
#define FPTR_TO_FD(fptr) (fileno(GetReadFile(fptr)))
|
50
45
|
#else
|
51
|
-
#
|
46
|
+
#define FPTR_TO_FD(fptr) fptr->fd
|
52
47
|
#endif /* GetReadFile */
|
53
48
|
|
54
49
|
/* Thunk between libev callbacks in NIO::Monitors and NIO::Selectors */
|
data/ext/nio4r/selector.c
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
|
6
6
|
#include "nio4r.h"
|
7
7
|
#ifdef HAVE_RUBYSIG_H
|
8
|
-
#
|
8
|
+
#include "rubysig.h"
|
9
9
|
#endif
|
10
10
|
|
11
11
|
#ifdef HAVE_UNISTD_H
|
@@ -14,11 +14,11 @@
|
|
14
14
|
#include <io.h>
|
15
15
|
#endif
|
16
16
|
|
17
|
-
#include <fcntl.h>
|
18
17
|
#include <assert.h>
|
18
|
+
#include <fcntl.h>
|
19
19
|
|
20
20
|
static VALUE mNIO = Qnil;
|
21
|
-
static VALUE cNIO_Monitor
|
21
|
+
static VALUE cNIO_Monitor = Qnil;
|
22
22
|
static VALUE cNIO_Selector = Qnil;
|
23
23
|
|
24
24
|
/* Allocator/deallocator */
|
@@ -80,7 +80,7 @@ void Init_NIO_Selector()
|
|
80
80
|
rb_define_method(cNIO_Selector, "closed?", NIO_Selector_closed, 0);
|
81
81
|
rb_define_method(cNIO_Selector, "empty?", NIO_Selector_is_empty, 0);
|
82
82
|
|
83
|
-
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor",
|
83
|
+
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor", rb_cObject);
|
84
84
|
}
|
85
85
|
|
86
86
|
/* Create the libev event loop and incoming event buffer */
|
@@ -95,13 +95,12 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
95
95
|
safety. Pipes are nice and safe to use between threads.
|
96
96
|
|
97
97
|
Note that Java NIO uses this same mechanism */
|
98
|
-
if(pipe(fds) < 0) {
|
98
|
+
if (pipe(fds) < 0) {
|
99
99
|
rb_sys_fail("pipe");
|
100
100
|
}
|
101
101
|
|
102
102
|
/* Use non-blocking reads/writes during wakeup, in case the buffer is full */
|
103
|
-
if(fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 ||
|
104
|
-
fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
103
|
+
if (fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
105
104
|
rb_sys_fail("fcntl");
|
106
105
|
}
|
107
106
|
|
@@ -127,7 +126,7 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
127
126
|
/* NIO selectors store all Ruby objects in instance variables so mark is a stub */
|
128
127
|
static void NIO_Selector_mark(struct NIO_Selector *selector)
|
129
128
|
{
|
130
|
-
if(selector->ready_array != Qnil) {
|
129
|
+
if (selector->ready_array != Qnil) {
|
131
130
|
rb_gc_mark(selector->ready_array);
|
132
131
|
}
|
133
132
|
}
|
@@ -136,14 +135,14 @@ static void NIO_Selector_mark(struct NIO_Selector *selector)
|
|
136
135
|
Called by both NIO::Selector#close and the finalizer below */
|
137
136
|
static void NIO_Selector_shutdown(struct NIO_Selector *selector)
|
138
137
|
{
|
139
|
-
if(selector->closed) {
|
138
|
+
if (selector->closed) {
|
140
139
|
return;
|
141
140
|
}
|
142
141
|
|
143
142
|
close(selector->wakeup_reader);
|
144
143
|
close(selector->wakeup_writer);
|
145
144
|
|
146
|
-
if(selector->ev_loop) {
|
145
|
+
if (selector->ev_loop) {
|
147
146
|
ev_loop_destroy(selector->ev_loop);
|
148
147
|
selector->ev_loop = 0;
|
149
148
|
}
|
@@ -159,27 +158,28 @@ static void NIO_Selector_free(struct NIO_Selector *selector)
|
|
159
158
|
}
|
160
159
|
|
161
160
|
/* Return an array of symbols for supported backends */
|
162
|
-
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
161
|
+
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
162
|
+
{
|
163
163
|
unsigned int backends = ev_supported_backends();
|
164
164
|
VALUE result = rb_ary_new();
|
165
165
|
|
166
|
-
if(backends & EVBACKEND_EPOLL) {
|
166
|
+
if (backends & EVBACKEND_EPOLL) {
|
167
167
|
rb_ary_push(result, ID2SYM(rb_intern("epoll")));
|
168
168
|
}
|
169
169
|
|
170
|
-
if(backends & EVBACKEND_POLL) {
|
170
|
+
if (backends & EVBACKEND_POLL) {
|
171
171
|
rb_ary_push(result, ID2SYM(rb_intern("poll")));
|
172
172
|
}
|
173
173
|
|
174
|
-
if(backends & EVBACKEND_KQUEUE) {
|
174
|
+
if (backends & EVBACKEND_KQUEUE) {
|
175
175
|
rb_ary_push(result, ID2SYM(rb_intern("kqueue")));
|
176
176
|
}
|
177
177
|
|
178
|
-
if(backends & EVBACKEND_SELECT) {
|
178
|
+
if (backends & EVBACKEND_SELECT) {
|
179
179
|
rb_ary_push(result, ID2SYM(rb_intern("select")));
|
180
180
|
}
|
181
181
|
|
182
|
-
if(backends & EVBACKEND_PORT) {
|
182
|
+
if (backends & EVBACKEND_PORT) {
|
183
183
|
rb_ary_push(result, ID2SYM(rb_intern("port")));
|
184
184
|
}
|
185
185
|
|
@@ -201,27 +201,25 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
201
201
|
|
202
202
|
rb_scan_args(argc, argv, "01", &backend);
|
203
203
|
|
204
|
-
if(backend != Qnil) {
|
205
|
-
if(!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
206
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
207
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
204
|
+
if (backend != Qnil) {
|
205
|
+
if (!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
206
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
208
207
|
}
|
209
208
|
|
210
209
|
backend_id = SYM2ID(backend);
|
211
210
|
|
212
|
-
if(backend_id == rb_intern("epoll")) {
|
211
|
+
if (backend_id == rb_intern("epoll")) {
|
213
212
|
flags = EVBACKEND_EPOLL;
|
214
|
-
} else if(backend_id == rb_intern("poll")) {
|
213
|
+
} else if (backend_id == rb_intern("poll")) {
|
215
214
|
flags = EVBACKEND_POLL;
|
216
|
-
} else if(backend_id == rb_intern("kqueue")) {
|
215
|
+
} else if (backend_id == rb_intern("kqueue")) {
|
217
216
|
flags = EVBACKEND_KQUEUE;
|
218
|
-
} else if(backend_id == rb_intern("select")) {
|
217
|
+
} else if (backend_id == rb_intern("select")) {
|
219
218
|
flags = EVBACKEND_SELECT;
|
220
|
-
} else if(backend_id == rb_intern("port")) {
|
219
|
+
} else if (backend_id == rb_intern("port")) {
|
221
220
|
flags = EVBACKEND_PORT;
|
222
221
|
} else {
|
223
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
224
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
222
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
225
223
|
}
|
226
224
|
}
|
227
225
|
|
@@ -229,7 +227,7 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
229
227
|
assert(!selector->ev_loop);
|
230
228
|
|
231
229
|
selector->ev_loop = ev_loop_new(flags);
|
232
|
-
if(!selector->ev_loop) {
|
230
|
+
if (!selector->ev_loop) {
|
233
231
|
rb_raise(rb_eIOError, "error initializing event loop");
|
234
232
|
}
|
235
233
|
|
@@ -245,11 +243,12 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
245
243
|
return Qnil;
|
246
244
|
}
|
247
245
|
|
248
|
-
static VALUE NIO_Selector_backend(VALUE self)
|
246
|
+
static VALUE NIO_Selector_backend(VALUE self)
|
247
|
+
{
|
249
248
|
struct NIO_Selector *selector;
|
250
249
|
|
251
250
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
252
|
-
if(selector->closed) {
|
251
|
+
if (selector->closed) {
|
253
252
|
rb_raise(rb_eIOError, "selector is closed");
|
254
253
|
}
|
255
254
|
|
@@ -277,7 +276,7 @@ static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE *args), VA
|
|
277
276
|
current_thread = rb_thread_current();
|
278
277
|
lock_holder = rb_ivar_get(self, rb_intern("lock_holder"));
|
279
278
|
|
280
|
-
if(lock_holder != current_thread) {
|
279
|
+
if (lock_holder != current_thread) {
|
281
280
|
lock = rb_ivar_get(self, rb_intern("lock"));
|
282
281
|
rb_funcall(lock, rb_intern("lock"), 0);
|
283
282
|
rb_ivar_set(self, rb_intern("lock_holder"), current_thread);
|
@@ -306,7 +305,7 @@ static VALUE NIO_Selector_unlock(VALUE self)
|
|
306
305
|
/* Register an IO object with the selector for the given interests */
|
307
306
|
static VALUE NIO_Selector_register(VALUE self, VALUE io, VALUE interests)
|
308
307
|
{
|
309
|
-
VALUE args[3] = {self, io, interests};
|
308
|
+
VALUE args[3] = { self, io, interests };
|
310
309
|
return NIO_Selector_synchronize(self, NIO_Selector_register_synchronized, args);
|
311
310
|
}
|
312
311
|
|
@@ -322,14 +321,14 @@ static VALUE NIO_Selector_register_synchronized(VALUE *args)
|
|
322
321
|
interests = args[2];
|
323
322
|
|
324
323
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
325
|
-
if(selector->closed) {
|
324
|
+
if (selector->closed) {
|
326
325
|
rb_raise(rb_eIOError, "selector is closed");
|
327
326
|
}
|
328
327
|
|
329
328
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
330
329
|
monitor = rb_hash_lookup(selectables, io);
|
331
330
|
|
332
|
-
if(monitor != Qnil)
|
331
|
+
if (monitor != Qnil)
|
333
332
|
rb_raise(rb_eArgError, "this IO is already registered with selector");
|
334
333
|
|
335
334
|
/* Create a new NIO::Monitor */
|
@@ -346,7 +345,7 @@ static VALUE NIO_Selector_register_synchronized(VALUE *args)
|
|
346
345
|
/* Deregister an IO object from the selector */
|
347
346
|
static VALUE NIO_Selector_deregister(VALUE self, VALUE io)
|
348
347
|
{
|
349
|
-
VALUE args[2] = {self, io};
|
348
|
+
VALUE args[2] = { self, io };
|
350
349
|
return NIO_Selector_synchronize(self, NIO_Selector_deregister_synchronized, args);
|
351
350
|
}
|
352
351
|
|
@@ -361,7 +360,7 @@ static VALUE NIO_Selector_deregister_synchronized(VALUE *args)
|
|
361
360
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
362
361
|
monitor = rb_hash_delete(selectables, io);
|
363
362
|
|
364
|
-
if(monitor != Qnil) {
|
363
|
+
if (monitor != Qnil) {
|
365
364
|
rb_funcall(monitor, rb_intern("close"), 1, Qfalse);
|
366
365
|
}
|
367
366
|
|
@@ -385,7 +384,7 @@ static VALUE NIO_Selector_select(int argc, VALUE *argv, VALUE self)
|
|
385
384
|
|
386
385
|
rb_scan_args(argc, argv, "01", &timeout);
|
387
386
|
|
388
|
-
if(timeout != Qnil && NUM2DBL(timeout) < 0) {
|
387
|
+
if (timeout != Qnil && NUM2DBL(timeout) < 0) {
|
389
388
|
rb_raise(rb_eArgError, "time interval must be positive");
|
390
389
|
}
|
391
390
|
|
@@ -404,26 +403,26 @@ static VALUE NIO_Selector_select_synchronized(VALUE *args)
|
|
404
403
|
|
405
404
|
Data_Get_Struct(args[0], struct NIO_Selector, selector);
|
406
405
|
|
407
|
-
if(selector->closed) {
|
406
|
+
if (selector->closed) {
|
408
407
|
rb_raise(rb_eIOError, "selector is closed");
|
409
408
|
}
|
410
409
|
|
411
|
-
if(!rb_block_given_p()) {
|
410
|
+
if (!rb_block_given_p()) {
|
412
411
|
selector->ready_array = rb_ary_new();
|
413
412
|
}
|
414
413
|
|
415
414
|
ready = NIO_Selector_run(selector, args[1]);
|
416
415
|
|
417
416
|
/* Timeout */
|
418
|
-
if(ready < 0) {
|
419
|
-
if(!rb_block_given_p()) {
|
417
|
+
if (ready < 0) {
|
418
|
+
if (!rb_block_given_p()) {
|
420
419
|
selector->ready_array = Qnil;
|
421
420
|
}
|
422
421
|
|
423
422
|
return Qnil;
|
424
423
|
}
|
425
424
|
|
426
|
-
if(rb_block_given_p()) {
|
425
|
+
if (rb_block_given_p()) {
|
427
426
|
return INT2NUM(ready);
|
428
427
|
} else {
|
429
428
|
ready_array = selector->ready_array;
|
@@ -441,12 +440,12 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
441
440
|
selector->selecting = 1;
|
442
441
|
selector->wakeup_fired = 0;
|
443
442
|
|
444
|
-
if(timeout == Qnil) {
|
443
|
+
if (timeout == Qnil) {
|
445
444
|
/* Don't fire a wakeup timeout if we weren't passed one */
|
446
445
|
ev_timer_stop(selector->ev_loop, &selector->timer);
|
447
446
|
} else {
|
448
447
|
timeout_val = NUM2DBL(timeout);
|
449
|
-
if(timeout_val == 0) {
|
448
|
+
if (timeout_val == 0) {
|
450
449
|
/* If we've been given an explicit timeout of 0, perform a non-blocking
|
451
450
|
select operation */
|
452
451
|
ev_run_flags = EVRUN_NOWAIT;
|
@@ -462,7 +461,7 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
462
461
|
result = selector->ready_count;
|
463
462
|
selector->selecting = selector->ready_count = 0;
|
464
463
|
|
465
|
-
if(result > 0 || selector->wakeup_fired) {
|
464
|
+
if (result > 0 || selector->wakeup_fired) {
|
466
465
|
selector->wakeup_fired = 0;
|
467
466
|
return result;
|
468
467
|
} else {
|
@@ -476,7 +475,7 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
476
475
|
struct NIO_Selector *selector;
|
477
476
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
478
477
|
|
479
|
-
if(selector->closed) {
|
478
|
+
if (selector->closed) {
|
480
479
|
rb_raise(rb_eIOError, "selector is closed");
|
481
480
|
}
|
482
481
|
|
@@ -489,7 +488,7 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
489
488
|
/* Close the selector and free system resources */
|
490
489
|
static VALUE NIO_Selector_close(VALUE self)
|
491
490
|
{
|
492
|
-
VALUE args[1] = {self};
|
491
|
+
VALUE args[1] = { self };
|
493
492
|
return NIO_Selector_synchronize(self, NIO_Selector_close_synchronized, args);
|
494
493
|
}
|
495
494
|
|
@@ -507,7 +506,7 @@ static VALUE NIO_Selector_close_synchronized(VALUE *args)
|
|
507
506
|
/* Is the selector closed? */
|
508
507
|
static VALUE NIO_Selector_closed(VALUE self)
|
509
508
|
{
|
510
|
-
VALUE args[1] = {self};
|
509
|
+
VALUE args[1] = { self };
|
511
510
|
return NIO_Selector_synchronize(self, NIO_Selector_closed_synchronized, args);
|
512
511
|
}
|
513
512
|
|
@@ -528,7 +527,6 @@ static VALUE NIO_Selector_is_empty(VALUE self)
|
|
528
527
|
return rb_funcall(selectables, rb_intern("empty?"), 0) == Qtrue ? Qtrue : Qfalse;
|
529
528
|
}
|
530
529
|
|
531
|
-
|
532
530
|
/* Called whenever a timeout fires on the event loop */
|
533
531
|
static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents)
|
534
532
|
{
|
@@ -542,7 +540,8 @@ static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *
|
|
542
540
|
selector->selecting = 0;
|
543
541
|
|
544
542
|
/* Drain the wakeup pipe, giving us level-triggered behavior */
|
545
|
-
while(read(selector->wakeup_reader, buffer, 128) > 0)
|
543
|
+
while (read(selector->wakeup_reader, buffer, 128) > 0)
|
544
|
+
;
|
546
545
|
}
|
547
546
|
|
548
547
|
/* libev callback fired whenever a monitor gets an event */
|
@@ -558,7 +557,7 @@ void NIO_Selector_monitor_callback(struct ev_loop *ev_loop, struct ev_io *io, in
|
|
558
557
|
selector->ready_count++;
|
559
558
|
monitor_data->revents = revents;
|
560
559
|
|
561
|
-
if(rb_block_given_p()) {
|
560
|
+
if (rb_block_given_p()) {
|
562
561
|
rb_yield(monitor);
|
563
562
|
} else {
|
564
563
|
assert(selector->ready_array != Qnil);
|