nio4r 2.5.3 → 2.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES.md +6 -0
- data/README.md +5 -24
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +2 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -32
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/selector.c +50 -51
- data/lib/nio/version.rb +1 -1
- metadata +5 -5
- data/Guardfile +0 -10
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3353e688cab0a1d45f509edcb0a5bc5fa3beb6faca9e6e5703fe75ecbb220ed9
|
4
|
+
data.tar.gz: d5186d282adfa316128165ca4d2ccae5322a0605b15508d47170f9f7517e3346
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 05c2472803019de225a4d813f302bad6810fecf193adad4291c4b58e0e5aaa34d02b60945bc6641be55103324b82eee001988641d2b9937f63d18e7d00a03f8d
|
7
|
+
data.tar.gz: fcbb1f1aa622df5e82df481e40567c153d15ca2e59ca21fbd3aff00006326e1f8ec44ff108c59fc8e8124bcc3b8e99f23d6291fe7ca27092d7af6ac22be71eb5
|
data/CHANGES.md
CHANGED
data/README.md
CHANGED
@@ -1,17 +1,10 @@
|
|
1
1
|
# 
|
2
2
|
|
3
3
|
[](http://rubygems.org/gems/nio4r)
|
4
|
-
[](https://ci.appveyor.com/project/tarcieri/nio4r/branch/master)
|
4
|
+
[](https://github.com/socketry/nio4r/actions?query=workflow:nio4r)
|
6
5
|
[](https://codeclimate.com/github/socketry/nio4r)
|
7
6
|
[](https://coveralls.io/r/socketry/nio4r)
|
8
7
|
[](http://www.rubydoc.info/gems/nio4r/2.2.0)
|
9
|
-
[](https://github.com/socketry/nio4r/blob/master/LICENSE.txt)
|
10
|
-
|
11
|
-
_NOTE: This is the 2.x **stable** branch of nio4r. For the 1.x **legacy** branch,
|
12
|
-
please see:_
|
13
|
-
|
14
|
-
https://github.com/socketry/nio4r/tree/1-x-stable
|
15
8
|
|
16
9
|
**New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for
|
17
10
|
scalable network clients and servers. Modeled after the Java NIO API, but
|
@@ -25,13 +18,13 @@ writing.
|
|
25
18
|
## Projects using nio4r
|
26
19
|
|
27
20
|
* [ActionCable]: Rails 5 WebSocket protocol, uses nio4r for a WebSocket server
|
28
|
-
* [Celluloid
|
29
|
-
* [
|
21
|
+
* [Celluloid]: Actor-based concurrency framework, uses nio4r for async I/O
|
22
|
+
* [Async]: Asynchronous I/O framework for Ruby
|
30
23
|
* [Puma]: Ruby/Rack web server built for concurrency
|
31
24
|
|
32
25
|
[ActionCable]: https://rubygems.org/gems/actioncable
|
33
|
-
[Celluloid
|
34
|
-
[
|
26
|
+
[Celluloid]: https://github.com/celluloid/celluloid-io
|
27
|
+
[Async]: https://github.com/socketry/async
|
35
28
|
[Puma]: https://github.com/puma/puma
|
36
29
|
|
37
30
|
## Goals
|
@@ -43,7 +36,6 @@ writing.
|
|
43
36
|
|
44
37
|
## Supported platforms
|
45
38
|
|
46
|
-
* Ruby 2.3
|
47
39
|
* Ruby 2.4
|
48
40
|
* Ruby 2.5
|
49
41
|
* Ruby 2.6
|
@@ -57,17 +49,6 @@ writing.
|
|
57
49
|
* **Java NIO**: JRuby extension which wraps the Java NIO subsystem
|
58
50
|
* **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter
|
59
51
|
|
60
|
-
## Discussion
|
61
|
-
|
62
|
-
For discussion and general help with nio4r, email
|
63
|
-
[socketry+subscribe@googlegroups.com][subscribe]
|
64
|
-
or join on the web via the [Google Group].
|
65
|
-
|
66
|
-
We're also on IRC at ##socketry on irc.freenode.net.
|
67
|
-
|
68
|
-
[subscribe]: mailto:socketry+subscribe@googlegroups.com
|
69
|
-
[google group]: https://groups.google.com/group/socketry
|
70
|
-
|
71
52
|
## Documentation
|
72
53
|
|
73
54
|
[Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki)
|
@@ -0,0 +1,16 @@
|
|
1
|
+
---
|
2
|
+
Language: Cpp
|
3
|
+
BasedOnStyle: WebKit
|
4
|
+
AllowAllParametersOfDeclarationOnNextLine: false
|
5
|
+
BinPackArguments: false
|
6
|
+
BinPackParameters: false
|
7
|
+
AlignConsecutiveMacros: false
|
8
|
+
AlignConsecutiveAssignments: false
|
9
|
+
BreakBeforeBraces: Linux
|
10
|
+
BraceWrapping:
|
11
|
+
AfterControlStatement: Never
|
12
|
+
IndentCaseLabels: true
|
13
|
+
PointerAlignment: Right
|
14
|
+
SpaceBeforeParens: ControlStatements
|
15
|
+
IndentWidth: 4
|
16
|
+
...
|
data/ext/nio4r/bytebuffer.c
CHANGED
@@ -42,7 +42,7 @@ void Init_NIO_ByteBuffer()
|
|
42
42
|
cNIO_ByteBuffer = rb_define_class_under(mNIO, "ByteBuffer", rb_cObject);
|
43
43
|
rb_define_alloc_func(cNIO_ByteBuffer, NIO_ByteBuffer_allocate);
|
44
44
|
|
45
|
-
cNIO_ByteBuffer_OverflowError
|
45
|
+
cNIO_ByteBuffer_OverflowError = rb_define_class_under(cNIO_ByteBuffer, "OverflowError", rb_eIOError);
|
46
46
|
cNIO_ByteBuffer_UnderflowError = rb_define_class_under(cNIO_ByteBuffer, "UnderflowError", rb_eIOError);
|
47
47
|
cNIO_ByteBuffer_MarkUnsetError = rb_define_class_under(cNIO_ByteBuffer, "MarkUnsetError", rb_eIOError);
|
48
48
|
|
@@ -85,8 +85,8 @@ static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *buffer)
|
|
85
85
|
|
86
86
|
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *buffer)
|
87
87
|
{
|
88
|
-
if(buffer->buffer)
|
89
|
-
|
88
|
+
if (buffer->buffer)
|
89
|
+
xfree(buffer->buffer);
|
90
90
|
xfree(buffer);
|
91
91
|
}
|
92
92
|
|
@@ -133,17 +133,17 @@ static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position)
|
|
133
133
|
|
134
134
|
pos = NUM2INT(new_position);
|
135
135
|
|
136
|
-
if(pos < 0) {
|
136
|
+
if (pos < 0) {
|
137
137
|
rb_raise(rb_eArgError, "negative position given");
|
138
138
|
}
|
139
139
|
|
140
|
-
if(pos > buffer->limit) {
|
140
|
+
if (pos > buffer->limit) {
|
141
141
|
rb_raise(rb_eArgError, "specified position exceeds limit");
|
142
142
|
}
|
143
143
|
|
144
144
|
buffer->position = pos;
|
145
145
|
|
146
|
-
if(buffer->mark > buffer->position) {
|
146
|
+
if (buffer->mark > buffer->position) {
|
147
147
|
buffer->mark = MARK_UNSET;
|
148
148
|
}
|
149
149
|
|
@@ -166,21 +166,21 @@ static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit)
|
|
166
166
|
|
167
167
|
lim = NUM2INT(new_limit);
|
168
168
|
|
169
|
-
if(lim < 0) {
|
169
|
+
if (lim < 0) {
|
170
170
|
rb_raise(rb_eArgError, "negative limit given");
|
171
171
|
}
|
172
172
|
|
173
|
-
if(lim > buffer->capacity) {
|
173
|
+
if (lim > buffer->capacity) {
|
174
174
|
rb_raise(rb_eArgError, "specified limit exceeds capacity");
|
175
175
|
}
|
176
176
|
|
177
177
|
buffer->limit = lim;
|
178
178
|
|
179
|
-
if(buffer->position > lim) {
|
179
|
+
if (buffer->position > lim) {
|
180
180
|
buffer->position = lim;
|
181
181
|
}
|
182
182
|
|
183
|
-
if(buffer->mark > lim) {
|
183
|
+
if (buffer->mark > lim) {
|
184
184
|
buffer->mark = MARK_UNSET;
|
185
185
|
}
|
186
186
|
|
@@ -220,17 +220,17 @@ static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self)
|
|
220
220
|
|
221
221
|
rb_scan_args(argc, argv, "01", &length);
|
222
222
|
|
223
|
-
if(length == Qnil) {
|
223
|
+
if (length == Qnil) {
|
224
224
|
len = buffer->limit - buffer->position;
|
225
225
|
} else {
|
226
226
|
len = NUM2INT(length);
|
227
227
|
}
|
228
228
|
|
229
|
-
if(len < 0) {
|
229
|
+
if (len < 0) {
|
230
230
|
rb_raise(rb_eArgError, "negative length given");
|
231
231
|
}
|
232
232
|
|
233
|
-
if(len > buffer->limit - buffer->position) {
|
233
|
+
if (len > buffer->limit - buffer->position) {
|
234
234
|
rb_raise(cNIO_ByteBuffer_UnderflowError, "not enough data in buffer");
|
235
235
|
}
|
236
236
|
|
@@ -248,11 +248,11 @@ static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index)
|
|
248
248
|
|
249
249
|
i = NUM2INT(index);
|
250
250
|
|
251
|
-
if(i < 0) {
|
251
|
+
if (i < 0) {
|
252
252
|
rb_raise(rb_eArgError, "negative index given");
|
253
253
|
}
|
254
254
|
|
255
|
-
if(i >= buffer->limit) {
|
255
|
+
if (i >= buffer->limit) {
|
256
256
|
rb_raise(rb_eArgError, "specified index exceeds limit");
|
257
257
|
}
|
258
258
|
|
@@ -268,7 +268,7 @@ static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string)
|
|
268
268
|
StringValue(string);
|
269
269
|
length = RSTRING_LEN(string);
|
270
270
|
|
271
|
-
if(length > buffer->limit - buffer->position) {
|
271
|
+
if (length > buffer->limit - buffer->position) {
|
272
272
|
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
273
273
|
}
|
274
274
|
|
@@ -289,14 +289,14 @@ static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE io)
|
|
289
289
|
rb_io_set_nonblock(fptr);
|
290
290
|
|
291
291
|
nbytes = buffer->limit - buffer->position;
|
292
|
-
if(nbytes == 0) {
|
292
|
+
if (nbytes == 0) {
|
293
293
|
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
294
294
|
}
|
295
295
|
|
296
296
|
bytes_read = read(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
297
297
|
|
298
|
-
if(bytes_read < 0) {
|
299
|
-
if(errno == EAGAIN) {
|
298
|
+
if (bytes_read < 0) {
|
299
|
+
if (errno == EAGAIN) {
|
300
300
|
return INT2NUM(0);
|
301
301
|
} else {
|
302
302
|
rb_sys_fail("write");
|
@@ -319,14 +319,14 @@ static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE io)
|
|
319
319
|
rb_io_set_nonblock(fptr);
|
320
320
|
|
321
321
|
nbytes = buffer->limit - buffer->position;
|
322
|
-
if(nbytes == 0) {
|
322
|
+
if (nbytes == 0) {
|
323
323
|
rb_raise(cNIO_ByteBuffer_UnderflowError, "no data remaining in buffer");
|
324
324
|
}
|
325
325
|
|
326
326
|
bytes_written = write(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
327
327
|
|
328
|
-
if(bytes_written < 0) {
|
329
|
-
if(errno == EAGAIN) {
|
328
|
+
if (bytes_written < 0) {
|
329
|
+
if (errno == EAGAIN) {
|
330
330
|
return INT2NUM(0);
|
331
331
|
} else {
|
332
332
|
rb_sys_fail("write");
|
@@ -375,7 +375,7 @@ static VALUE NIO_ByteBuffer_reset(VALUE self)
|
|
375
375
|
struct NIO_ByteBuffer *buffer;
|
376
376
|
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
377
377
|
|
378
|
-
if(buffer->mark < 0) {
|
378
|
+
if (buffer->mark < 0) {
|
379
379
|
rb_raise(cNIO_ByteBuffer_MarkUnsetError, "mark has not been set");
|
380
380
|
} else {
|
381
381
|
buffer->position = buffer->mark;
|
@@ -402,8 +402,8 @@ static VALUE NIO_ByteBuffer_each(VALUE self)
|
|
402
402
|
struct NIO_ByteBuffer *buffer;
|
403
403
|
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
404
404
|
|
405
|
-
if(rb_block_given_p()) {
|
406
|
-
for(i = 0; i < buffer->limit; i++) {
|
405
|
+
if (rb_block_given_p()) {
|
406
|
+
for (i = 0; i < buffer->limit; i++) {
|
407
407
|
rb_yield(INT2NUM(buffer->buffer[i]));
|
408
408
|
}
|
409
409
|
} else {
|
@@ -421,9 +421,8 @@ static VALUE NIO_ByteBuffer_inspect(VALUE self)
|
|
421
421
|
return rb_sprintf(
|
422
422
|
"#<%s:%p @position=%d @limit=%d @capacity=%d>",
|
423
423
|
rb_class2name(CLASS_OF(self)),
|
424
|
-
(void*)self,
|
424
|
+
(void *)self,
|
425
425
|
buffer->position,
|
426
426
|
buffer->limit,
|
427
|
-
buffer->capacity
|
428
|
-
);
|
427
|
+
buffer->capacity);
|
429
428
|
}
|
data/ext/nio4r/extconf.rb
CHANGED
@@ -22,6 +22,8 @@ $defs << "-DEV_USE_KQUEUE" if have_header("sys/event.h") && have_header("s
|
|
22
22
|
$defs << "-DEV_USE_PORT" if have_type("port_event_t", "port.h")
|
23
23
|
$defs << "-DHAVE_SYS_RESOURCE_H" if have_header("sys/resource.h")
|
24
24
|
|
25
|
+
$defs << "-DEV_STANDALONE" # prevent libev from assuming "config.h" exists
|
26
|
+
|
25
27
|
CONFIG["optflags"] << " -fno-strict-aliasing" unless RUBY_PLATFORM =~ /mswin/
|
26
28
|
|
27
29
|
dir_config "nio4r_ext"
|
data/ext/nio4r/libev.h
CHANGED
data/ext/nio4r/monitor.c
CHANGED
@@ -4,6 +4,7 @@
|
|
4
4
|
*/
|
5
5
|
|
6
6
|
#include "nio4r.h"
|
7
|
+
#include <assert.h>
|
7
8
|
|
8
9
|
static VALUE mNIO = Qnil;
|
9
10
|
static VALUE cNIO_Monitor = Qnil;
|
@@ -60,13 +61,14 @@ void Init_NIO_Monitor()
|
|
60
61
|
static VALUE NIO_Monitor_allocate(VALUE klass)
|
61
62
|
{
|
62
63
|
struct NIO_Monitor *monitor = (struct NIO_Monitor *)xmalloc(sizeof(struct NIO_Monitor));
|
63
|
-
|
64
|
+
assert(monitor);
|
65
|
+
*monitor = (struct NIO_Monitor){.self = Qnil};
|
64
66
|
return Data_Wrap_Struct(klass, NIO_Monitor_mark, NIO_Monitor_free, monitor);
|
65
67
|
}
|
66
68
|
|
67
69
|
static void NIO_Monitor_mark(struct NIO_Monitor *monitor)
|
68
70
|
{
|
69
|
-
|
71
|
+
rb_gc_mark(monitor->self);
|
70
72
|
}
|
71
73
|
|
72
74
|
static void NIO_Monitor_free(struct NIO_Monitor *monitor)
|
@@ -85,15 +87,14 @@ static VALUE NIO_Monitor_initialize(VALUE self, VALUE io, VALUE interests, VALUE
|
|
85
87
|
|
86
88
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
87
89
|
|
88
|
-
if(interests_id == rb_intern("r")) {
|
90
|
+
if (interests_id == rb_intern("r")) {
|
89
91
|
monitor->interests = EV_READ;
|
90
|
-
} else if(interests_id == rb_intern("w")) {
|
92
|
+
} else if (interests_id == rb_intern("w")) {
|
91
93
|
monitor->interests = EV_WRITE;
|
92
|
-
} else if(interests_id == rb_intern("rw")) {
|
94
|
+
} else if (interests_id == rb_intern("rw")) {
|
93
95
|
monitor->interests = EV_READ | EV_WRITE;
|
94
96
|
} else {
|
95
|
-
rb_raise(rb_eArgError, "invalid event type %s (must be :r, :w, or :rw)",
|
96
|
-
RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
97
|
+
rb_raise(rb_eArgError, "invalid event type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
97
98
|
}
|
98
99
|
|
99
100
|
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
@@ -113,7 +114,7 @@ static VALUE NIO_Monitor_initialize(VALUE self, VALUE io, VALUE interests, VALUE
|
|
113
114
|
monitor->selector = selector;
|
114
115
|
|
115
116
|
if (monitor->interests) {
|
116
|
-
|
117
|
+
ev_io_start(selector->ev_loop, &monitor->ev_io);
|
117
118
|
}
|
118
119
|
|
119
120
|
return Qnil;
|
@@ -128,17 +129,17 @@ static VALUE NIO_Monitor_close(int argc, VALUE *argv, VALUE self)
|
|
128
129
|
rb_scan_args(argc, argv, "01", &deregister);
|
129
130
|
selector = rb_ivar_get(self, rb_intern("selector"));
|
130
131
|
|
131
|
-
if(selector != Qnil) {
|
132
|
+
if (selector != Qnil) {
|
132
133
|
/* if ev_loop is 0, it means that the loop has been stopped already (see NIO_Selector_shutdown) */
|
133
|
-
if(monitor->interests && monitor->selector->ev_loop) {
|
134
|
-
|
134
|
+
if (monitor->interests && monitor->selector->ev_loop) {
|
135
|
+
ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io);
|
135
136
|
}
|
136
137
|
|
137
138
|
monitor->selector = 0;
|
138
139
|
rb_ivar_set(self, rb_intern("selector"), Qnil);
|
139
|
-
|
140
|
+
|
140
141
|
/* Default value is true */
|
141
|
-
if(deregister == Qtrue || deregister == Qnil) {
|
142
|
+
if (deregister == Qtrue || deregister == Qnil) {
|
142
143
|
rb_funcall(selector, rb_intern("deregister"), 1, rb_ivar_get(self, rb_intern("io")));
|
143
144
|
}
|
144
145
|
}
|
@@ -166,7 +167,7 @@ static VALUE NIO_Monitor_interests(VALUE self)
|
|
166
167
|
|
167
168
|
static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests)
|
168
169
|
{
|
169
|
-
if(NIL_P(interests)) {
|
170
|
+
if (NIL_P(interests)) {
|
170
171
|
NIO_Monitor_update_interests(self, 0);
|
171
172
|
} else {
|
172
173
|
NIO_Monitor_update_interests(self, NIO_Monitor_symbol2interest(interests));
|
@@ -175,7 +176,8 @@ static VALUE NIO_Monitor_set_interests(VALUE self, VALUE interests)
|
|
175
176
|
return rb_ivar_get(self, rb_intern("interests"));
|
176
177
|
}
|
177
178
|
|
178
|
-
static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest)
|
179
|
+
static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest)
|
180
|
+
{
|
179
181
|
struct NIO_Monitor *monitor;
|
180
182
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
181
183
|
|
@@ -185,7 +187,8 @@ static VALUE NIO_Monitor_add_interest(VALUE self, VALUE interest) {
|
|
185
187
|
return rb_ivar_get(self, rb_intern("interests"));
|
186
188
|
}
|
187
189
|
|
188
|
-
static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest)
|
190
|
+
static VALUE NIO_Monitor_remove_interest(VALUE self, VALUE interest)
|
191
|
+
{
|
189
192
|
struct NIO_Monitor *monitor;
|
190
193
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
191
194
|
|
@@ -215,11 +218,11 @@ static VALUE NIO_Monitor_readiness(VALUE self)
|
|
215
218
|
struct NIO_Monitor *monitor;
|
216
219
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
217
220
|
|
218
|
-
if((monitor->revents & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
|
221
|
+
if ((monitor->revents & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
|
219
222
|
return ID2SYM(rb_intern("rw"));
|
220
|
-
} else if(monitor->revents & EV_READ) {
|
223
|
+
} else if (monitor->revents & EV_READ) {
|
221
224
|
return ID2SYM(rb_intern("r"));
|
222
|
-
} else if(monitor->revents & EV_WRITE) {
|
225
|
+
} else if (monitor->revents & EV_WRITE) {
|
223
226
|
return ID2SYM(rb_intern("w"));
|
224
227
|
} else {
|
225
228
|
return Qnil;
|
@@ -231,7 +234,7 @@ static VALUE NIO_Monitor_is_readable(VALUE self)
|
|
231
234
|
struct NIO_Monitor *monitor;
|
232
235
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
233
236
|
|
234
|
-
if(monitor->revents & EV_READ) {
|
237
|
+
if (monitor->revents & EV_READ) {
|
235
238
|
return Qtrue;
|
236
239
|
} else {
|
237
240
|
return Qfalse;
|
@@ -243,7 +246,7 @@ static VALUE NIO_Monitor_is_writable(VALUE self)
|
|
243
246
|
struct NIO_Monitor *monitor;
|
244
247
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
245
248
|
|
246
|
-
if(monitor->revents & EV_WRITE) {
|
249
|
+
if (monitor->revents & EV_WRITE) {
|
247
250
|
return Qtrue;
|
248
251
|
} else {
|
249
252
|
return Qfalse;
|
@@ -257,15 +260,14 @@ static int NIO_Monitor_symbol2interest(VALUE interests)
|
|
257
260
|
ID interests_id;
|
258
261
|
interests_id = SYM2ID(interests);
|
259
262
|
|
260
|
-
if(interests_id == rb_intern("r")) {
|
263
|
+
if (interests_id == rb_intern("r")) {
|
261
264
|
return EV_READ;
|
262
|
-
} else if(interests_id == rb_intern("w")) {
|
265
|
+
} else if (interests_id == rb_intern("w")) {
|
263
266
|
return EV_WRITE;
|
264
|
-
} else if(interests_id == rb_intern("rw")) {
|
267
|
+
} else if (interests_id == rb_intern("rw")) {
|
265
268
|
return EV_READ | EV_WRITE;
|
266
269
|
} else {
|
267
|
-
rb_raise(rb_eArgError, "invalid interest type %s (must be :r, :w, or :rw)",
|
268
|
-
RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
270
|
+
rb_raise(rb_eArgError, "invalid interest type %s (must be :r, :w, or :rw)", RSTRING_PTR(rb_funcall(interests, rb_intern("inspect"), 0)));
|
269
271
|
}
|
270
272
|
}
|
271
273
|
|
@@ -275,12 +277,12 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
275
277
|
struct NIO_Monitor *monitor;
|
276
278
|
Data_Get_Struct(self, struct NIO_Monitor, monitor);
|
277
279
|
|
278
|
-
if(NIO_Monitor_is_closed(self) == Qtrue) {
|
280
|
+
if (NIO_Monitor_is_closed(self) == Qtrue) {
|
279
281
|
rb_raise(rb_eEOFError, "monitor is closed");
|
280
282
|
}
|
281
283
|
|
282
|
-
if(interests) {
|
283
|
-
switch(interests) {
|
284
|
+
if (interests) {
|
285
|
+
switch (interests) {
|
284
286
|
case EV_READ:
|
285
287
|
interests_id = rb_intern("r");
|
286
288
|
break;
|
@@ -299,9 +301,9 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
299
301
|
rb_ivar_set(self, rb_intern("interests"), Qnil);
|
300
302
|
}
|
301
303
|
|
302
|
-
if(monitor->interests != interests) {
|
304
|
+
if (monitor->interests != interests) {
|
303
305
|
// If the monitor currently has interests, we should stop it.
|
304
|
-
if(monitor->interests) {
|
306
|
+
if (monitor->interests) {
|
305
307
|
ev_io_stop(monitor->selector->ev_loop, &monitor->ev_io);
|
306
308
|
}
|
307
309
|
|
@@ -310,7 +312,7 @@ static void NIO_Monitor_update_interests(VALUE self, int interests)
|
|
310
312
|
ev_io_set(&monitor->ev_io, monitor->ev_io.fd, monitor->interests);
|
311
313
|
|
312
314
|
// If we are interested in events, schedule the monitor back into the event loop:
|
313
|
-
if(monitor->interests) {
|
315
|
+
if (monitor->interests) {
|
314
316
|
ev_io_start(monitor->selector->ev_loop, &monitor->ev_io);
|
315
317
|
}
|
316
318
|
}
|
data/ext/nio4r/nio4r.h
CHANGED
@@ -6,12 +6,11 @@
|
|
6
6
|
#ifndef NIO4R_H
|
7
7
|
#define NIO4R_H
|
8
8
|
|
9
|
+
#include "libev.h"
|
9
10
|
#include "ruby.h"
|
10
11
|
#include "ruby/io.h"
|
11
|
-
#include "libev.h"
|
12
12
|
|
13
|
-
struct NIO_Selector
|
14
|
-
{
|
13
|
+
struct NIO_Selector {
|
15
14
|
struct ev_loop *ev_loop;
|
16
15
|
struct ev_timer timer; /* for timeouts */
|
17
16
|
struct ev_io wakeup;
|
@@ -24,31 +23,27 @@ struct NIO_Selector
|
|
24
23
|
VALUE ready_array;
|
25
24
|
};
|
26
25
|
|
27
|
-
struct NIO_callback_data
|
28
|
-
{
|
26
|
+
struct NIO_callback_data {
|
29
27
|
VALUE *monitor;
|
30
28
|
struct NIO_Selector *selector;
|
31
29
|
};
|
32
30
|
|
33
|
-
struct NIO_Monitor
|
34
|
-
{
|
31
|
+
struct NIO_Monitor {
|
35
32
|
VALUE self;
|
36
33
|
int interests, revents;
|
37
34
|
struct ev_io ev_io;
|
38
35
|
struct NIO_Selector *selector;
|
39
36
|
};
|
40
37
|
|
41
|
-
struct NIO_ByteBuffer
|
42
|
-
{
|
38
|
+
struct NIO_ByteBuffer {
|
43
39
|
char *buffer;
|
44
40
|
int position, limit, capacity, mark;
|
45
41
|
};
|
46
42
|
|
47
|
-
|
48
43
|
#ifdef GetReadFile
|
49
|
-
#
|
44
|
+
#define FPTR_TO_FD(fptr) (fileno(GetReadFile(fptr)))
|
50
45
|
#else
|
51
|
-
#
|
46
|
+
#define FPTR_TO_FD(fptr) fptr->fd
|
52
47
|
#endif /* GetReadFile */
|
53
48
|
|
54
49
|
/* Thunk between libev callbacks in NIO::Monitors and NIO::Selectors */
|
data/ext/nio4r/selector.c
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
|
6
6
|
#include "nio4r.h"
|
7
7
|
#ifdef HAVE_RUBYSIG_H
|
8
|
-
#
|
8
|
+
#include "rubysig.h"
|
9
9
|
#endif
|
10
10
|
|
11
11
|
#ifdef HAVE_UNISTD_H
|
@@ -14,11 +14,11 @@
|
|
14
14
|
#include <io.h>
|
15
15
|
#endif
|
16
16
|
|
17
|
-
#include <fcntl.h>
|
18
17
|
#include <assert.h>
|
18
|
+
#include <fcntl.h>
|
19
19
|
|
20
20
|
static VALUE mNIO = Qnil;
|
21
|
-
static VALUE cNIO_Monitor
|
21
|
+
static VALUE cNIO_Monitor = Qnil;
|
22
22
|
static VALUE cNIO_Selector = Qnil;
|
23
23
|
|
24
24
|
/* Allocator/deallocator */
|
@@ -80,7 +80,7 @@ void Init_NIO_Selector()
|
|
80
80
|
rb_define_method(cNIO_Selector, "closed?", NIO_Selector_closed, 0);
|
81
81
|
rb_define_method(cNIO_Selector, "empty?", NIO_Selector_is_empty, 0);
|
82
82
|
|
83
|
-
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor",
|
83
|
+
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor", rb_cObject);
|
84
84
|
}
|
85
85
|
|
86
86
|
/* Create the libev event loop and incoming event buffer */
|
@@ -95,13 +95,12 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
95
95
|
safety. Pipes are nice and safe to use between threads.
|
96
96
|
|
97
97
|
Note that Java NIO uses this same mechanism */
|
98
|
-
if(pipe(fds) < 0) {
|
98
|
+
if (pipe(fds) < 0) {
|
99
99
|
rb_sys_fail("pipe");
|
100
100
|
}
|
101
101
|
|
102
102
|
/* Use non-blocking reads/writes during wakeup, in case the buffer is full */
|
103
|
-
if(fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 ||
|
104
|
-
fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
103
|
+
if (fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
105
104
|
rb_sys_fail("fcntl");
|
106
105
|
}
|
107
106
|
|
@@ -127,7 +126,7 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
127
126
|
/* NIO selectors store all Ruby objects in instance variables so mark is a stub */
|
128
127
|
static void NIO_Selector_mark(struct NIO_Selector *selector)
|
129
128
|
{
|
130
|
-
if(selector->ready_array != Qnil) {
|
129
|
+
if (selector->ready_array != Qnil) {
|
131
130
|
rb_gc_mark(selector->ready_array);
|
132
131
|
}
|
133
132
|
}
|
@@ -136,14 +135,14 @@ static void NIO_Selector_mark(struct NIO_Selector *selector)
|
|
136
135
|
Called by both NIO::Selector#close and the finalizer below */
|
137
136
|
static void NIO_Selector_shutdown(struct NIO_Selector *selector)
|
138
137
|
{
|
139
|
-
if(selector->closed) {
|
138
|
+
if (selector->closed) {
|
140
139
|
return;
|
141
140
|
}
|
142
141
|
|
143
142
|
close(selector->wakeup_reader);
|
144
143
|
close(selector->wakeup_writer);
|
145
144
|
|
146
|
-
if(selector->ev_loop) {
|
145
|
+
if (selector->ev_loop) {
|
147
146
|
ev_loop_destroy(selector->ev_loop);
|
148
147
|
selector->ev_loop = 0;
|
149
148
|
}
|
@@ -159,27 +158,28 @@ static void NIO_Selector_free(struct NIO_Selector *selector)
|
|
159
158
|
}
|
160
159
|
|
161
160
|
/* Return an array of symbols for supported backends */
|
162
|
-
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
161
|
+
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
162
|
+
{
|
163
163
|
unsigned int backends = ev_supported_backends();
|
164
164
|
VALUE result = rb_ary_new();
|
165
165
|
|
166
|
-
if(backends & EVBACKEND_EPOLL) {
|
166
|
+
if (backends & EVBACKEND_EPOLL) {
|
167
167
|
rb_ary_push(result, ID2SYM(rb_intern("epoll")));
|
168
168
|
}
|
169
169
|
|
170
|
-
if(backends & EVBACKEND_POLL) {
|
170
|
+
if (backends & EVBACKEND_POLL) {
|
171
171
|
rb_ary_push(result, ID2SYM(rb_intern("poll")));
|
172
172
|
}
|
173
173
|
|
174
|
-
if(backends & EVBACKEND_KQUEUE) {
|
174
|
+
if (backends & EVBACKEND_KQUEUE) {
|
175
175
|
rb_ary_push(result, ID2SYM(rb_intern("kqueue")));
|
176
176
|
}
|
177
177
|
|
178
|
-
if(backends & EVBACKEND_SELECT) {
|
178
|
+
if (backends & EVBACKEND_SELECT) {
|
179
179
|
rb_ary_push(result, ID2SYM(rb_intern("select")));
|
180
180
|
}
|
181
181
|
|
182
|
-
if(backends & EVBACKEND_PORT) {
|
182
|
+
if (backends & EVBACKEND_PORT) {
|
183
183
|
rb_ary_push(result, ID2SYM(rb_intern("port")));
|
184
184
|
}
|
185
185
|
|
@@ -201,27 +201,25 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
201
201
|
|
202
202
|
rb_scan_args(argc, argv, "01", &backend);
|
203
203
|
|
204
|
-
if(backend != Qnil) {
|
205
|
-
if(!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
206
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
207
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
204
|
+
if (backend != Qnil) {
|
205
|
+
if (!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
206
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
208
207
|
}
|
209
208
|
|
210
209
|
backend_id = SYM2ID(backend);
|
211
210
|
|
212
|
-
if(backend_id == rb_intern("epoll")) {
|
211
|
+
if (backend_id == rb_intern("epoll")) {
|
213
212
|
flags = EVBACKEND_EPOLL;
|
214
|
-
} else if(backend_id == rb_intern("poll")) {
|
213
|
+
} else if (backend_id == rb_intern("poll")) {
|
215
214
|
flags = EVBACKEND_POLL;
|
216
|
-
} else if(backend_id == rb_intern("kqueue")) {
|
215
|
+
} else if (backend_id == rb_intern("kqueue")) {
|
217
216
|
flags = EVBACKEND_KQUEUE;
|
218
|
-
} else if(backend_id == rb_intern("select")) {
|
217
|
+
} else if (backend_id == rb_intern("select")) {
|
219
218
|
flags = EVBACKEND_SELECT;
|
220
|
-
} else if(backend_id == rb_intern("port")) {
|
219
|
+
} else if (backend_id == rb_intern("port")) {
|
221
220
|
flags = EVBACKEND_PORT;
|
222
221
|
} else {
|
223
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
224
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
222
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
225
223
|
}
|
226
224
|
}
|
227
225
|
|
@@ -229,7 +227,7 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
229
227
|
assert(!selector->ev_loop);
|
230
228
|
|
231
229
|
selector->ev_loop = ev_loop_new(flags);
|
232
|
-
if(!selector->ev_loop) {
|
230
|
+
if (!selector->ev_loop) {
|
233
231
|
rb_raise(rb_eIOError, "error initializing event loop");
|
234
232
|
}
|
235
233
|
|
@@ -245,11 +243,12 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
245
243
|
return Qnil;
|
246
244
|
}
|
247
245
|
|
248
|
-
static VALUE NIO_Selector_backend(VALUE self)
|
246
|
+
static VALUE NIO_Selector_backend(VALUE self)
|
247
|
+
{
|
249
248
|
struct NIO_Selector *selector;
|
250
249
|
|
251
250
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
252
|
-
if(selector->closed) {
|
251
|
+
if (selector->closed) {
|
253
252
|
rb_raise(rb_eIOError, "selector is closed");
|
254
253
|
}
|
255
254
|
|
@@ -277,7 +276,7 @@ static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE *args), VA
|
|
277
276
|
current_thread = rb_thread_current();
|
278
277
|
lock_holder = rb_ivar_get(self, rb_intern("lock_holder"));
|
279
278
|
|
280
|
-
if(lock_holder != current_thread) {
|
279
|
+
if (lock_holder != current_thread) {
|
281
280
|
lock = rb_ivar_get(self, rb_intern("lock"));
|
282
281
|
rb_funcall(lock, rb_intern("lock"), 0);
|
283
282
|
rb_ivar_set(self, rb_intern("lock_holder"), current_thread);
|
@@ -306,7 +305,7 @@ static VALUE NIO_Selector_unlock(VALUE self)
|
|
306
305
|
/* Register an IO object with the selector for the given interests */
|
307
306
|
static VALUE NIO_Selector_register(VALUE self, VALUE io, VALUE interests)
|
308
307
|
{
|
309
|
-
VALUE args[3] = {self, io, interests};
|
308
|
+
VALUE args[3] = { self, io, interests };
|
310
309
|
return NIO_Selector_synchronize(self, NIO_Selector_register_synchronized, args);
|
311
310
|
}
|
312
311
|
|
@@ -322,14 +321,14 @@ static VALUE NIO_Selector_register_synchronized(VALUE *args)
|
|
322
321
|
interests = args[2];
|
323
322
|
|
324
323
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
325
|
-
if(selector->closed) {
|
324
|
+
if (selector->closed) {
|
326
325
|
rb_raise(rb_eIOError, "selector is closed");
|
327
326
|
}
|
328
327
|
|
329
328
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
330
329
|
monitor = rb_hash_lookup(selectables, io);
|
331
330
|
|
332
|
-
if(monitor != Qnil)
|
331
|
+
if (monitor != Qnil)
|
333
332
|
rb_raise(rb_eArgError, "this IO is already registered with selector");
|
334
333
|
|
335
334
|
/* Create a new NIO::Monitor */
|
@@ -346,7 +345,7 @@ static VALUE NIO_Selector_register_synchronized(VALUE *args)
|
|
346
345
|
/* Deregister an IO object from the selector */
|
347
346
|
static VALUE NIO_Selector_deregister(VALUE self, VALUE io)
|
348
347
|
{
|
349
|
-
VALUE args[2] = {self, io};
|
348
|
+
VALUE args[2] = { self, io };
|
350
349
|
return NIO_Selector_synchronize(self, NIO_Selector_deregister_synchronized, args);
|
351
350
|
}
|
352
351
|
|
@@ -361,7 +360,7 @@ static VALUE NIO_Selector_deregister_synchronized(VALUE *args)
|
|
361
360
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
362
361
|
monitor = rb_hash_delete(selectables, io);
|
363
362
|
|
364
|
-
if(monitor != Qnil) {
|
363
|
+
if (monitor != Qnil) {
|
365
364
|
rb_funcall(monitor, rb_intern("close"), 1, Qfalse);
|
366
365
|
}
|
367
366
|
|
@@ -385,7 +384,7 @@ static VALUE NIO_Selector_select(int argc, VALUE *argv, VALUE self)
|
|
385
384
|
|
386
385
|
rb_scan_args(argc, argv, "01", &timeout);
|
387
386
|
|
388
|
-
if(timeout != Qnil && NUM2DBL(timeout) < 0) {
|
387
|
+
if (timeout != Qnil && NUM2DBL(timeout) < 0) {
|
389
388
|
rb_raise(rb_eArgError, "time interval must be positive");
|
390
389
|
}
|
391
390
|
|
@@ -404,26 +403,26 @@ static VALUE NIO_Selector_select_synchronized(VALUE *args)
|
|
404
403
|
|
405
404
|
Data_Get_Struct(args[0], struct NIO_Selector, selector);
|
406
405
|
|
407
|
-
if(selector->closed) {
|
406
|
+
if (selector->closed) {
|
408
407
|
rb_raise(rb_eIOError, "selector is closed");
|
409
408
|
}
|
410
409
|
|
411
|
-
if(!rb_block_given_p()) {
|
410
|
+
if (!rb_block_given_p()) {
|
412
411
|
selector->ready_array = rb_ary_new();
|
413
412
|
}
|
414
413
|
|
415
414
|
ready = NIO_Selector_run(selector, args[1]);
|
416
415
|
|
417
416
|
/* Timeout */
|
418
|
-
if(ready < 0) {
|
419
|
-
if(!rb_block_given_p()) {
|
417
|
+
if (ready < 0) {
|
418
|
+
if (!rb_block_given_p()) {
|
420
419
|
selector->ready_array = Qnil;
|
421
420
|
}
|
422
421
|
|
423
422
|
return Qnil;
|
424
423
|
}
|
425
424
|
|
426
|
-
if(rb_block_given_p()) {
|
425
|
+
if (rb_block_given_p()) {
|
427
426
|
return INT2NUM(ready);
|
428
427
|
} else {
|
429
428
|
ready_array = selector->ready_array;
|
@@ -441,12 +440,12 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
441
440
|
selector->selecting = 1;
|
442
441
|
selector->wakeup_fired = 0;
|
443
442
|
|
444
|
-
if(timeout == Qnil) {
|
443
|
+
if (timeout == Qnil) {
|
445
444
|
/* Don't fire a wakeup timeout if we weren't passed one */
|
446
445
|
ev_timer_stop(selector->ev_loop, &selector->timer);
|
447
446
|
} else {
|
448
447
|
timeout_val = NUM2DBL(timeout);
|
449
|
-
if(timeout_val == 0) {
|
448
|
+
if (timeout_val == 0) {
|
450
449
|
/* If we've been given an explicit timeout of 0, perform a non-blocking
|
451
450
|
select operation */
|
452
451
|
ev_run_flags = EVRUN_NOWAIT;
|
@@ -462,7 +461,7 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
462
461
|
result = selector->ready_count;
|
463
462
|
selector->selecting = selector->ready_count = 0;
|
464
463
|
|
465
|
-
if(result > 0 || selector->wakeup_fired) {
|
464
|
+
if (result > 0 || selector->wakeup_fired) {
|
466
465
|
selector->wakeup_fired = 0;
|
467
466
|
return result;
|
468
467
|
} else {
|
@@ -476,7 +475,7 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
476
475
|
struct NIO_Selector *selector;
|
477
476
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
478
477
|
|
479
|
-
if(selector->closed) {
|
478
|
+
if (selector->closed) {
|
480
479
|
rb_raise(rb_eIOError, "selector is closed");
|
481
480
|
}
|
482
481
|
|
@@ -489,7 +488,7 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
489
488
|
/* Close the selector and free system resources */
|
490
489
|
static VALUE NIO_Selector_close(VALUE self)
|
491
490
|
{
|
492
|
-
VALUE args[1] = {self};
|
491
|
+
VALUE args[1] = { self };
|
493
492
|
return NIO_Selector_synchronize(self, NIO_Selector_close_synchronized, args);
|
494
493
|
}
|
495
494
|
|
@@ -507,7 +506,7 @@ static VALUE NIO_Selector_close_synchronized(VALUE *args)
|
|
507
506
|
/* Is the selector closed? */
|
508
507
|
static VALUE NIO_Selector_closed(VALUE self)
|
509
508
|
{
|
510
|
-
VALUE args[1] = {self};
|
509
|
+
VALUE args[1] = { self };
|
511
510
|
return NIO_Selector_synchronize(self, NIO_Selector_closed_synchronized, args);
|
512
511
|
}
|
513
512
|
|
@@ -528,7 +527,6 @@ static VALUE NIO_Selector_is_empty(VALUE self)
|
|
528
527
|
return rb_funcall(selectables, rb_intern("empty?"), 0) == Qtrue ? Qtrue : Qfalse;
|
529
528
|
}
|
530
529
|
|
531
|
-
|
532
530
|
/* Called whenever a timeout fires on the event loop */
|
533
531
|
static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents)
|
534
532
|
{
|
@@ -542,7 +540,8 @@ static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *
|
|
542
540
|
selector->selecting = 0;
|
543
541
|
|
544
542
|
/* Drain the wakeup pipe, giving us level-triggered behavior */
|
545
|
-
while(read(selector->wakeup_reader, buffer, 128) > 0)
|
543
|
+
while (read(selector->wakeup_reader, buffer, 128) > 0)
|
544
|
+
;
|
546
545
|
}
|
547
546
|
|
548
547
|
/* libev callback fired whenever a monitor gets an event */
|
@@ -558,7 +557,7 @@ void NIO_Selector_monitor_callback(struct ev_loop *ev_loop, struct ev_io *io, in
|
|
558
557
|
selector->ready_count++;
|
559
558
|
monitor_data->revents = revents;
|
560
559
|
|
561
|
-
if(rb_block_given_p()) {
|
560
|
+
if (rb_block_given_p()) {
|
562
561
|
rb_yield(monitor);
|
563
562
|
} else {
|
564
563
|
assert(selector->ready_array != Qnil);
|
data/lib/nio/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: nio4r
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.5.
|
4
|
+
version: 2.5.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Tony Arcieri
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-09-
|
11
|
+
date: 2020-09-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -53,7 +53,6 @@ files:
|
|
53
53
|
- ".rubocop.yml"
|
54
54
|
- CHANGES.md
|
55
55
|
- Gemfile
|
56
|
-
- Guardfile
|
57
56
|
- README.md
|
58
57
|
- Rakefile
|
59
58
|
- examples/echo_server.rb
|
@@ -71,6 +70,7 @@ files:
|
|
71
70
|
- ext/libev/ev_vars.h
|
72
71
|
- ext/libev/ev_win32.c
|
73
72
|
- ext/libev/ev_wrap.h
|
73
|
+
- ext/nio4r/.clang-format
|
74
74
|
- ext/nio4r/bytebuffer.c
|
75
75
|
- ext/nio4r/extconf.rb
|
76
76
|
- ext/nio4r/libev.h
|
@@ -108,8 +108,8 @@ licenses:
|
|
108
108
|
metadata:
|
109
109
|
bug_tracker_uri: https://github.com/socketry/nio4r/issues
|
110
110
|
changelog_uri: https://github.com/socketry/nio4r/blob/master/CHANGES.md
|
111
|
-
documentation_uri: https://www.rubydoc.info/gems/nio4r/2.5.
|
112
|
-
source_code_uri: https://github.com/socketry/nio4r/tree/v2.5.
|
111
|
+
documentation_uri: https://www.rubydoc.info/gems/nio4r/2.5.4
|
112
|
+
source_code_uri: https://github.com/socketry/nio4r/tree/v2.5.4
|
113
113
|
wiki_uri: https://github.com/socketry/nio4r/wiki
|
114
114
|
post_install_message:
|
115
115
|
rdoc_options: []
|
data/Guardfile
DELETED