nio4r 2.5.2 → 2.5.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/workflow.yml +61 -0
- data/.rubocop.yml +30 -11
- data/CHANGES.md +60 -1
- data/Gemfile +2 -4
- data/README.md +7 -58
- data/examples/echo_server.rb +2 -2
- data/ext/libev/Changes +71 -2
- data/ext/libev/ev.c +611 -198
- data/ext/libev/ev.h +25 -22
- data/ext/libev/ev_epoll.c +16 -14
- data/ext/libev/ev_iouring.c +694 -0
- data/ext/libev/ev_kqueue.c +4 -4
- data/ext/libev/ev_linuxaio.c +78 -100
- data/ext/libev/ev_poll.c +6 -6
- data/ext/libev/ev_port.c +3 -3
- data/ext/libev/ev_select.c +6 -6
- data/ext/libev/ev_vars.h +34 -0
- data/ext/libev/ev_win32.c +2 -2
- data/ext/libev/ev_wrap.h +56 -0
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +11 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -31
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/nio4r_ext.c +1 -1
- data/ext/nio4r/org/nio4r/ByteBuffer.java +2 -0
- data/ext/nio4r/org/nio4r/Monitor.java +1 -0
- data/ext/nio4r/org/nio4r/Selector.java +8 -10
- data/ext/nio4r/selector.c +89 -75
- data/lib/nio/bytebuffer.rb +4 -0
- data/lib/nio/monitor.rb +1 -1
- data/lib/nio/selector.rb +12 -10
- data/lib/nio/version.rb +1 -1
- data/lib/nio.rb +20 -1
- data/license.md +66 -0
- data/nio4r.gemspec +2 -2
- data/rakelib/extension.rake +1 -2
- data/spec/nio/bytebuffer_spec.rb +0 -1
- data/spec/nio/selectables/udp_socket_spec.rb +2 -2
- data/spec/nio/selector_spec.rb +4 -1
- data/spec/spec_helper.rb +2 -3
- metadata +13 -12
- data/.travis.yml +0 -44
- data/Guardfile +0 -10
- data/appveyor.yml +0 -40
data/ext/nio4r/selector.c
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
|
6
6
|
#include "nio4r.h"
|
7
7
|
#ifdef HAVE_RUBYSIG_H
|
8
|
-
#
|
8
|
+
#include "rubysig.h"
|
9
9
|
#endif
|
10
10
|
|
11
11
|
#ifdef HAVE_UNISTD_H
|
@@ -14,11 +14,11 @@
|
|
14
14
|
#include <io.h>
|
15
15
|
#endif
|
16
16
|
|
17
|
-
#include <fcntl.h>
|
18
17
|
#include <assert.h>
|
18
|
+
#include <fcntl.h>
|
19
19
|
|
20
20
|
static VALUE mNIO = Qnil;
|
21
|
-
static VALUE cNIO_Monitor
|
21
|
+
static VALUE cNIO_Monitor = Qnil;
|
22
22
|
static VALUE cNIO_Selector = Qnil;
|
23
23
|
|
24
24
|
/* Allocator/deallocator */
|
@@ -43,13 +43,13 @@ static VALUE NIO_Selector_closed(VALUE self);
|
|
43
43
|
static VALUE NIO_Selector_is_empty(VALUE self);
|
44
44
|
|
45
45
|
/* Internal functions */
|
46
|
-
static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE
|
46
|
+
static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE arg), VALUE arg);
|
47
47
|
static VALUE NIO_Selector_unlock(VALUE lock);
|
48
|
-
static VALUE NIO_Selector_register_synchronized(VALUE
|
49
|
-
static VALUE NIO_Selector_deregister_synchronized(VALUE
|
50
|
-
static VALUE NIO_Selector_select_synchronized(VALUE
|
51
|
-
static VALUE NIO_Selector_close_synchronized(VALUE
|
52
|
-
static VALUE NIO_Selector_closed_synchronized(VALUE
|
48
|
+
static VALUE NIO_Selector_register_synchronized(VALUE arg);
|
49
|
+
static VALUE NIO_Selector_deregister_synchronized(VALUE arg);
|
50
|
+
static VALUE NIO_Selector_select_synchronized(VALUE arg);
|
51
|
+
static VALUE NIO_Selector_close_synchronized(VALUE arg);
|
52
|
+
static VALUE NIO_Selector_closed_synchronized(VALUE arg);
|
53
53
|
|
54
54
|
static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout);
|
55
55
|
static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents);
|
@@ -62,7 +62,7 @@ static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *
|
|
62
62
|
#define BUSYWAIT_INTERVAL 0.01
|
63
63
|
|
64
64
|
/* Selectors wait for events */
|
65
|
-
void Init_NIO_Selector()
|
65
|
+
void Init_NIO_Selector(void)
|
66
66
|
{
|
67
67
|
mNIO = rb_define_module("NIO");
|
68
68
|
cNIO_Selector = rb_define_class_under(mNIO, "Selector", rb_cObject);
|
@@ -80,7 +80,7 @@ void Init_NIO_Selector()
|
|
80
80
|
rb_define_method(cNIO_Selector, "closed?", NIO_Selector_closed, 0);
|
81
81
|
rb_define_method(cNIO_Selector, "empty?", NIO_Selector_is_empty, 0);
|
82
82
|
|
83
|
-
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor",
|
83
|
+
cNIO_Monitor = rb_define_class_under(mNIO, "Monitor", rb_cObject);
|
84
84
|
}
|
85
85
|
|
86
86
|
/* Create the libev event loop and incoming event buffer */
|
@@ -95,13 +95,12 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
95
95
|
safety. Pipes are nice and safe to use between threads.
|
96
96
|
|
97
97
|
Note that Java NIO uses this same mechanism */
|
98
|
-
if(pipe(fds) < 0) {
|
98
|
+
if (pipe(fds) < 0) {
|
99
99
|
rb_sys_fail("pipe");
|
100
100
|
}
|
101
101
|
|
102
102
|
/* Use non-blocking reads/writes during wakeup, in case the buffer is full */
|
103
|
-
if(fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 ||
|
104
|
-
fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
103
|
+
if (fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) {
|
105
104
|
rb_sys_fail("fcntl");
|
106
105
|
}
|
107
106
|
|
@@ -127,7 +126,7 @@ static VALUE NIO_Selector_allocate(VALUE klass)
|
|
127
126
|
/* NIO selectors store all Ruby objects in instance variables so mark is a stub */
|
128
127
|
static void NIO_Selector_mark(struct NIO_Selector *selector)
|
129
128
|
{
|
130
|
-
if(selector->ready_array != Qnil) {
|
129
|
+
if (selector->ready_array != Qnil) {
|
131
130
|
rb_gc_mark(selector->ready_array);
|
132
131
|
}
|
133
132
|
}
|
@@ -136,14 +135,14 @@ static void NIO_Selector_mark(struct NIO_Selector *selector)
|
|
136
135
|
Called by both NIO::Selector#close and the finalizer below */
|
137
136
|
static void NIO_Selector_shutdown(struct NIO_Selector *selector)
|
138
137
|
{
|
139
|
-
if(selector->closed) {
|
138
|
+
if (selector->closed) {
|
140
139
|
return;
|
141
140
|
}
|
142
141
|
|
143
142
|
close(selector->wakeup_reader);
|
144
143
|
close(selector->wakeup_writer);
|
145
144
|
|
146
|
-
if(selector->ev_loop) {
|
145
|
+
if (selector->ev_loop) {
|
147
146
|
ev_loop_destroy(selector->ev_loop);
|
148
147
|
selector->ev_loop = 0;
|
149
148
|
}
|
@@ -159,30 +158,39 @@ static void NIO_Selector_free(struct NIO_Selector *selector)
|
|
159
158
|
}
|
160
159
|
|
161
160
|
/* Return an array of symbols for supported backends */
|
162
|
-
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
161
|
+
static VALUE NIO_Selector_supported_backends(VALUE klass)
|
162
|
+
{
|
163
163
|
unsigned int backends = ev_supported_backends();
|
164
164
|
VALUE result = rb_ary_new();
|
165
165
|
|
166
|
-
if(backends & EVBACKEND_EPOLL) {
|
166
|
+
if (backends & EVBACKEND_EPOLL) {
|
167
167
|
rb_ary_push(result, ID2SYM(rb_intern("epoll")));
|
168
168
|
}
|
169
169
|
|
170
|
-
if(backends & EVBACKEND_POLL) {
|
170
|
+
if (backends & EVBACKEND_POLL) {
|
171
171
|
rb_ary_push(result, ID2SYM(rb_intern("poll")));
|
172
172
|
}
|
173
173
|
|
174
|
-
if(backends & EVBACKEND_KQUEUE) {
|
174
|
+
if (backends & EVBACKEND_KQUEUE) {
|
175
175
|
rb_ary_push(result, ID2SYM(rb_intern("kqueue")));
|
176
176
|
}
|
177
177
|
|
178
|
-
if(backends & EVBACKEND_SELECT) {
|
178
|
+
if (backends & EVBACKEND_SELECT) {
|
179
179
|
rb_ary_push(result, ID2SYM(rb_intern("select")));
|
180
180
|
}
|
181
181
|
|
182
|
-
if(backends & EVBACKEND_PORT) {
|
182
|
+
if (backends & EVBACKEND_PORT) {
|
183
183
|
rb_ary_push(result, ID2SYM(rb_intern("port")));
|
184
184
|
}
|
185
185
|
|
186
|
+
if (backends & EVBACKEND_LINUXAIO) {
|
187
|
+
rb_ary_push(result, ID2SYM(rb_intern("linuxaio")));
|
188
|
+
}
|
189
|
+
|
190
|
+
if (backends & EVBACKEND_IOURING) {
|
191
|
+
rb_ary_push(result, ID2SYM(rb_intern("io_uring")));
|
192
|
+
}
|
193
|
+
|
186
194
|
return result;
|
187
195
|
}
|
188
196
|
|
@@ -201,27 +209,29 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
201
209
|
|
202
210
|
rb_scan_args(argc, argv, "01", &backend);
|
203
211
|
|
204
|
-
if(backend != Qnil) {
|
205
|
-
if(!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
206
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
207
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
212
|
+
if (backend != Qnil) {
|
213
|
+
if (!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
|
214
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
208
215
|
}
|
209
216
|
|
210
217
|
backend_id = SYM2ID(backend);
|
211
218
|
|
212
|
-
if(backend_id == rb_intern("epoll")) {
|
219
|
+
if (backend_id == rb_intern("epoll")) {
|
213
220
|
flags = EVBACKEND_EPOLL;
|
214
|
-
} else if(backend_id == rb_intern("poll")) {
|
221
|
+
} else if (backend_id == rb_intern("poll")) {
|
215
222
|
flags = EVBACKEND_POLL;
|
216
|
-
} else if(backend_id == rb_intern("kqueue")) {
|
223
|
+
} else if (backend_id == rb_intern("kqueue")) {
|
217
224
|
flags = EVBACKEND_KQUEUE;
|
218
|
-
} else if(backend_id == rb_intern("select")) {
|
225
|
+
} else if (backend_id == rb_intern("select")) {
|
219
226
|
flags = EVBACKEND_SELECT;
|
220
|
-
} else if(backend_id == rb_intern("port")) {
|
227
|
+
} else if (backend_id == rb_intern("port")) {
|
221
228
|
flags = EVBACKEND_PORT;
|
229
|
+
} else if (backend_id == rb_intern("linuxaio")) {
|
230
|
+
flags = EVBACKEND_LINUXAIO;
|
231
|
+
} else if (backend_id == rb_intern("io_uring")) {
|
232
|
+
flags = EVBACKEND_IOURING;
|
222
233
|
} else {
|
223
|
-
rb_raise(rb_eArgError, "unsupported backend: %s",
|
224
|
-
RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
234
|
+
rb_raise(rb_eArgError, "unsupported backend: %s", RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
|
225
235
|
}
|
226
236
|
}
|
227
237
|
|
@@ -229,7 +239,7 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
229
239
|
assert(!selector->ev_loop);
|
230
240
|
|
231
241
|
selector->ev_loop = ev_loop_new(flags);
|
232
|
-
if(!selector->ev_loop) {
|
242
|
+
if (!selector->ev_loop) {
|
233
243
|
rb_raise(rb_eIOError, "error initializing event loop");
|
234
244
|
}
|
235
245
|
|
@@ -245,11 +255,12 @@ static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
|
|
245
255
|
return Qnil;
|
246
256
|
}
|
247
257
|
|
248
|
-
static VALUE NIO_Selector_backend(VALUE self)
|
258
|
+
static VALUE NIO_Selector_backend(VALUE self)
|
259
|
+
{
|
249
260
|
struct NIO_Selector *selector;
|
250
261
|
|
251
262
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
252
|
-
if(selector->closed) {
|
263
|
+
if (selector->closed) {
|
253
264
|
rb_raise(rb_eIOError, "selector is closed");
|
254
265
|
}
|
255
266
|
|
@@ -264,29 +275,33 @@ static VALUE NIO_Selector_backend(VALUE self) {
|
|
264
275
|
return ID2SYM(rb_intern("select"));
|
265
276
|
case EVBACKEND_PORT:
|
266
277
|
return ID2SYM(rb_intern("port"));
|
278
|
+
case EVBACKEND_LINUXAIO:
|
279
|
+
return ID2SYM(rb_intern("linuxaio"));
|
280
|
+
case EVBACKEND_IOURING:
|
281
|
+
return ID2SYM(rb_intern("io_uring"));
|
267
282
|
}
|
268
283
|
|
269
284
|
return ID2SYM(rb_intern("unknown"));
|
270
285
|
}
|
271
286
|
|
272
287
|
/* Synchronize around a reentrant selector lock */
|
273
|
-
static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE
|
288
|
+
static VALUE NIO_Selector_synchronize(VALUE self, VALUE (*func)(VALUE arg), VALUE arg)
|
274
289
|
{
|
275
290
|
VALUE current_thread, lock_holder, lock;
|
276
291
|
|
277
292
|
current_thread = rb_thread_current();
|
278
293
|
lock_holder = rb_ivar_get(self, rb_intern("lock_holder"));
|
279
294
|
|
280
|
-
if(lock_holder != current_thread) {
|
295
|
+
if (lock_holder != current_thread) {
|
281
296
|
lock = rb_ivar_get(self, rb_intern("lock"));
|
282
297
|
rb_funcall(lock, rb_intern("lock"), 0);
|
283
298
|
rb_ivar_set(self, rb_intern("lock_holder"), current_thread);
|
284
299
|
|
285
300
|
/* We've acquired the lock, so ensure we unlock it */
|
286
|
-
return rb_ensure(func, (VALUE)
|
301
|
+
return rb_ensure(func, (VALUE)arg, NIO_Selector_unlock, self);
|
287
302
|
} else {
|
288
303
|
/* We already hold the selector lock, so no need to unlock it */
|
289
|
-
return func(
|
304
|
+
return func(arg);
|
290
305
|
}
|
291
306
|
}
|
292
307
|
|
@@ -307,29 +322,30 @@ static VALUE NIO_Selector_unlock(VALUE self)
|
|
307
322
|
static VALUE NIO_Selector_register(VALUE self, VALUE io, VALUE interests)
|
308
323
|
{
|
309
324
|
VALUE args[3] = {self, io, interests};
|
310
|
-
return NIO_Selector_synchronize(self, NIO_Selector_register_synchronized, args);
|
325
|
+
return NIO_Selector_synchronize(self, NIO_Selector_register_synchronized, (VALUE)args);
|
311
326
|
}
|
312
327
|
|
313
328
|
/* Internal implementation of register after acquiring mutex */
|
314
|
-
static VALUE NIO_Selector_register_synchronized(VALUE
|
329
|
+
static VALUE NIO_Selector_register_synchronized(VALUE _args)
|
315
330
|
{
|
316
331
|
VALUE self, io, interests, selectables, monitor;
|
317
332
|
VALUE monitor_args[3];
|
318
333
|
struct NIO_Selector *selector;
|
319
334
|
|
335
|
+
VALUE *args = (VALUE *)_args;
|
320
336
|
self = args[0];
|
321
337
|
io = args[1];
|
322
338
|
interests = args[2];
|
323
339
|
|
324
340
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
325
|
-
if(selector->closed) {
|
341
|
+
if (selector->closed) {
|
326
342
|
rb_raise(rb_eIOError, "selector is closed");
|
327
343
|
}
|
328
344
|
|
329
345
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
330
346
|
monitor = rb_hash_lookup(selectables, io);
|
331
347
|
|
332
|
-
if(monitor != Qnil)
|
348
|
+
if (monitor != Qnil)
|
333
349
|
rb_raise(rb_eArgError, "this IO is already registered with selector");
|
334
350
|
|
335
351
|
/* Create a new NIO::Monitor */
|
@@ -347,21 +363,22 @@ static VALUE NIO_Selector_register_synchronized(VALUE *args)
|
|
347
363
|
static VALUE NIO_Selector_deregister(VALUE self, VALUE io)
|
348
364
|
{
|
349
365
|
VALUE args[2] = {self, io};
|
350
|
-
return NIO_Selector_synchronize(self, NIO_Selector_deregister_synchronized, args);
|
366
|
+
return NIO_Selector_synchronize(self, NIO_Selector_deregister_synchronized, (VALUE)args);
|
351
367
|
}
|
352
368
|
|
353
369
|
/* Internal implementation of register after acquiring mutex */
|
354
|
-
static VALUE NIO_Selector_deregister_synchronized(VALUE
|
370
|
+
static VALUE NIO_Selector_deregister_synchronized(VALUE _args)
|
355
371
|
{
|
356
372
|
VALUE self, io, selectables, monitor;
|
357
373
|
|
374
|
+
VALUE *args = (VALUE *)_args;
|
358
375
|
self = args[0];
|
359
376
|
io = args[1];
|
360
377
|
|
361
378
|
selectables = rb_ivar_get(self, rb_intern("selectables"));
|
362
379
|
monitor = rb_hash_delete(selectables, io);
|
363
380
|
|
364
|
-
if(monitor != Qnil) {
|
381
|
+
if (monitor != Qnil) {
|
365
382
|
rb_funcall(monitor, rb_intern("close"), 1, Qfalse);
|
366
383
|
}
|
367
384
|
|
@@ -381,49 +398,48 @@ static VALUE NIO_Selector_is_registered(VALUE self, VALUE io)
|
|
381
398
|
static VALUE NIO_Selector_select(int argc, VALUE *argv, VALUE self)
|
382
399
|
{
|
383
400
|
VALUE timeout;
|
384
|
-
VALUE args[2];
|
385
401
|
|
386
402
|
rb_scan_args(argc, argv, "01", &timeout);
|
387
403
|
|
388
|
-
if(timeout != Qnil && NUM2DBL(timeout) < 0) {
|
404
|
+
if (timeout != Qnil && NUM2DBL(timeout) < 0) {
|
389
405
|
rb_raise(rb_eArgError, "time interval must be positive");
|
390
406
|
}
|
391
407
|
|
392
|
-
args[
|
393
|
-
|
394
|
-
|
395
|
-
return NIO_Selector_synchronize(self, NIO_Selector_select_synchronized, args);
|
408
|
+
VALUE args[2] = {self, timeout};
|
409
|
+
return NIO_Selector_synchronize(self, NIO_Selector_select_synchronized, (VALUE)args);
|
396
410
|
}
|
397
411
|
|
398
412
|
/* Internal implementation of select with the selector lock held */
|
399
|
-
static VALUE NIO_Selector_select_synchronized(VALUE
|
413
|
+
static VALUE NIO_Selector_select_synchronized(VALUE _args)
|
400
414
|
{
|
401
415
|
int ready;
|
402
416
|
VALUE ready_array;
|
403
417
|
struct NIO_Selector *selector;
|
404
418
|
|
419
|
+
VALUE *args = (VALUE *)_args;
|
420
|
+
|
405
421
|
Data_Get_Struct(args[0], struct NIO_Selector, selector);
|
406
422
|
|
407
|
-
if(selector->closed) {
|
423
|
+
if (selector->closed) {
|
408
424
|
rb_raise(rb_eIOError, "selector is closed");
|
409
425
|
}
|
410
426
|
|
411
|
-
if(!rb_block_given_p()) {
|
427
|
+
if (!rb_block_given_p()) {
|
412
428
|
selector->ready_array = rb_ary_new();
|
413
429
|
}
|
414
430
|
|
415
431
|
ready = NIO_Selector_run(selector, args[1]);
|
416
432
|
|
417
433
|
/* Timeout */
|
418
|
-
if(ready < 0) {
|
419
|
-
if(!rb_block_given_p()) {
|
434
|
+
if (ready < 0) {
|
435
|
+
if (!rb_block_given_p()) {
|
420
436
|
selector->ready_array = Qnil;
|
421
437
|
}
|
422
438
|
|
423
439
|
return Qnil;
|
424
440
|
}
|
425
441
|
|
426
|
-
if(rb_block_given_p()) {
|
442
|
+
if (rb_block_given_p()) {
|
427
443
|
return INT2NUM(ready);
|
428
444
|
} else {
|
429
445
|
ready_array = selector->ready_array;
|
@@ -441,12 +457,12 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
441
457
|
selector->selecting = 1;
|
442
458
|
selector->wakeup_fired = 0;
|
443
459
|
|
444
|
-
if(timeout == Qnil) {
|
460
|
+
if (timeout == Qnil) {
|
445
461
|
/* Don't fire a wakeup timeout if we weren't passed one */
|
446
462
|
ev_timer_stop(selector->ev_loop, &selector->timer);
|
447
463
|
} else {
|
448
464
|
timeout_val = NUM2DBL(timeout);
|
449
|
-
if(timeout_val == 0) {
|
465
|
+
if (timeout_val == 0) {
|
450
466
|
/* If we've been given an explicit timeout of 0, perform a non-blocking
|
451
467
|
select operation */
|
452
468
|
ev_run_flags = EVRUN_NOWAIT;
|
@@ -462,7 +478,7 @@ static int NIO_Selector_run(struct NIO_Selector *selector, VALUE timeout)
|
|
462
478
|
result = selector->ready_count;
|
463
479
|
selector->selecting = selector->ready_count = 0;
|
464
480
|
|
465
|
-
if(result > 0 || selector->wakeup_fired) {
|
481
|
+
if (result > 0 || selector->wakeup_fired) {
|
466
482
|
selector->wakeup_fired = 0;
|
467
483
|
return result;
|
468
484
|
} else {
|
@@ -476,7 +492,7 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
476
492
|
struct NIO_Selector *selector;
|
477
493
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
478
494
|
|
479
|
-
if(selector->closed) {
|
495
|
+
if (selector->closed) {
|
480
496
|
rb_raise(rb_eIOError, "selector is closed");
|
481
497
|
}
|
482
498
|
|
@@ -489,14 +505,13 @@ static VALUE NIO_Selector_wakeup(VALUE self)
|
|
489
505
|
/* Close the selector and free system resources */
|
490
506
|
static VALUE NIO_Selector_close(VALUE self)
|
491
507
|
{
|
492
|
-
|
493
|
-
return NIO_Selector_synchronize(self, NIO_Selector_close_synchronized, args);
|
508
|
+
return NIO_Selector_synchronize(self, NIO_Selector_close_synchronized, self);
|
494
509
|
}
|
495
510
|
|
496
|
-
static VALUE NIO_Selector_close_synchronized(VALUE
|
511
|
+
static VALUE NIO_Selector_close_synchronized(VALUE self)
|
497
512
|
{
|
498
513
|
struct NIO_Selector *selector;
|
499
|
-
|
514
|
+
|
500
515
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
501
516
|
|
502
517
|
NIO_Selector_shutdown(selector);
|
@@ -507,14 +522,13 @@ static VALUE NIO_Selector_close_synchronized(VALUE *args)
|
|
507
522
|
/* Is the selector closed? */
|
508
523
|
static VALUE NIO_Selector_closed(VALUE self)
|
509
524
|
{
|
510
|
-
|
511
|
-
return NIO_Selector_synchronize(self, NIO_Selector_closed_synchronized, args);
|
525
|
+
return NIO_Selector_synchronize(self, NIO_Selector_closed_synchronized, self);
|
512
526
|
}
|
513
527
|
|
514
|
-
static VALUE NIO_Selector_closed_synchronized(VALUE
|
528
|
+
static VALUE NIO_Selector_closed_synchronized(VALUE self)
|
515
529
|
{
|
516
530
|
struct NIO_Selector *selector;
|
517
|
-
|
531
|
+
|
518
532
|
Data_Get_Struct(self, struct NIO_Selector, selector);
|
519
533
|
|
520
534
|
return selector->closed ? Qtrue : Qfalse;
|
@@ -528,7 +542,6 @@ static VALUE NIO_Selector_is_empty(VALUE self)
|
|
528
542
|
return rb_funcall(selectables, rb_intern("empty?"), 0) == Qtrue ? Qtrue : Qfalse;
|
529
543
|
}
|
530
544
|
|
531
|
-
|
532
545
|
/* Called whenever a timeout fires on the event loop */
|
533
546
|
static void NIO_Selector_timeout_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents)
|
534
547
|
{
|
@@ -542,7 +555,8 @@ static void NIO_Selector_wakeup_callback(struct ev_loop *ev_loop, struct ev_io *
|
|
542
555
|
selector->selecting = 0;
|
543
556
|
|
544
557
|
/* Drain the wakeup pipe, giving us level-triggered behavior */
|
545
|
-
while(read(selector->wakeup_reader, buffer, 128) > 0)
|
558
|
+
while (read(selector->wakeup_reader, buffer, 128) > 0)
|
559
|
+
;
|
546
560
|
}
|
547
561
|
|
548
562
|
/* libev callback fired whenever a monitor gets an event */
|
@@ -558,7 +572,7 @@ void NIO_Selector_monitor_callback(struct ev_loop *ev_loop, struct ev_io *io, in
|
|
558
572
|
selector->ready_count++;
|
559
573
|
monitor_data->revents = revents;
|
560
574
|
|
561
|
-
if(rb_block_given_p()) {
|
575
|
+
if (rb_block_given_p()) {
|
562
576
|
rb_yield(monitor);
|
563
577
|
} else {
|
564
578
|
assert(selector->ready_array != Qnil);
|
data/lib/nio/bytebuffer.rb
CHANGED
@@ -24,6 +24,7 @@ module NIO
|
|
24
24
|
# @return [NIO::ByteBuffer]
|
25
25
|
def initialize(capacity)
|
26
26
|
raise TypeError, "no implicit conversion of #{capacity.class} to Integer" unless capacity.is_a?(Integer)
|
27
|
+
|
27
28
|
@capacity = capacity
|
28
29
|
clear
|
29
30
|
end
|
@@ -119,9 +120,11 @@ module NIO
|
|
119
120
|
# @return [self]
|
120
121
|
def put(str)
|
121
122
|
raise TypeError, "expected String, got #{str.class}" unless str.respond_to?(:to_str)
|
123
|
+
|
122
124
|
str = str.to_str
|
123
125
|
|
124
126
|
raise OverflowError, "buffer is full" if str.length > @limit - @position
|
127
|
+
|
125
128
|
@buffer[@position...str.length] = str
|
126
129
|
@position += str.length
|
127
130
|
self
|
@@ -188,6 +191,7 @@ module NIO
|
|
188
191
|
# @raise [NIO::ByteBuffer::MarkUnsetError] mark has not been set (call `#mark` first)
|
189
192
|
def reset
|
190
193
|
raise MarkUnsetError, "mark has not been set" unless @mark
|
194
|
+
|
191
195
|
@position = @mark
|
192
196
|
self
|
193
197
|
end
|
data/lib/nio/monitor.rb
CHANGED
data/lib/nio/selector.rb
CHANGED
@@ -14,7 +14,7 @@ module NIO
|
|
14
14
|
|
15
15
|
# Create a new NIO::Selector
|
16
16
|
def initialize(backend = :ruby)
|
17
|
-
raise ArgumentError, "unsupported backend: #{backend}" unless
|
17
|
+
raise ArgumentError, "unsupported backend: #{backend}" unless [:ruby, nil].include?(backend)
|
18
18
|
|
19
19
|
@selectables = {}
|
20
20
|
@lock = Mutex.new
|
@@ -26,14 +26,16 @@ module NIO
|
|
26
26
|
|
27
27
|
# Return a symbol representing the backend I/O multiplexing mechanism used.
|
28
28
|
# Supported backends are:
|
29
|
-
# * :ruby
|
30
|
-
# * :java
|
31
|
-
# * :epoll
|
32
|
-
# * :poll
|
33
|
-
# * :kqueue
|
34
|
-
# * :select
|
35
|
-
# * :port
|
36
|
-
# * :
|
29
|
+
# * :ruby - pure Ruby (i.e IO.select)
|
30
|
+
# * :java - Java NIO on JRuby
|
31
|
+
# * :epoll - libev w\ Linux epoll
|
32
|
+
# * :poll - libev w\ POSIX poll
|
33
|
+
# * :kqueue - libev w\ BSD kqueue
|
34
|
+
# * :select - libev w\ SysV select
|
35
|
+
# * :port - libev w\ I/O completion ports
|
36
|
+
# * :linuxaio - libev w\ Linux AIO io_submit (experimental)
|
37
|
+
# * :io_uring - libev w\ Linux io_uring (experimental)
|
38
|
+
# * :unknown - libev w\ unknown backend
|
37
39
|
def backend
|
38
40
|
:ruby
|
39
41
|
end
|
@@ -44,7 +46,7 @@ module NIO
|
|
44
46
|
# * :w - is the IO writeable?
|
45
47
|
# * :rw - is the IO either readable or writeable?
|
46
48
|
def register(io, interest)
|
47
|
-
unless io.is_a?
|
49
|
+
unless defined?(::OpenSSL) && io.is_a?(::OpenSSL::SSL::SSLSocket)
|
48
50
|
io = IO.try_convert(io)
|
49
51
|
end
|
50
52
|
|
data/lib/nio/version.rb
CHANGED
data/lib/nio.rb
CHANGED
@@ -12,9 +12,28 @@ module NIO
|
|
12
12
|
def self.engine
|
13
13
|
ENGINE
|
14
14
|
end
|
15
|
+
|
16
|
+
def self.pure?(env = ENV)
|
17
|
+
# The user has explicitly opted in to non-native implementation:
|
18
|
+
if env["NIO4R_PURE"] == "true"
|
19
|
+
return true
|
20
|
+
end
|
21
|
+
|
22
|
+
# Native Ruby on Windows is not supported:
|
23
|
+
if (Gem.win_platform? && !defined?(JRUBY_VERSION))
|
24
|
+
return true
|
25
|
+
end
|
26
|
+
|
27
|
+
# M1 native extension is crashing on M1 (arm64):
|
28
|
+
# if RUBY_PLATFORM =~ /darwin/ && RUBY_PLATFORM =~ /arm64/
|
29
|
+
# return true
|
30
|
+
# end
|
31
|
+
|
32
|
+
return false
|
33
|
+
end
|
15
34
|
end
|
16
35
|
|
17
|
-
if
|
36
|
+
if NIO.pure?
|
18
37
|
require "nio/monitor"
|
19
38
|
require "nio/selector"
|
20
39
|
require "nio/bytebuffer"
|
data/license.md
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
# MIT License
|
2
|
+
|
3
|
+
Copyright, 2011-2020, by Tony Arcieri.
|
4
|
+
Copyright, 2012, by Logan Bowers.
|
5
|
+
Copyright, 2013, by FURUHASHI Sadayuki.
|
6
|
+
Copyright, 2013, by Stephen von Takach.
|
7
|
+
Copyright, 2013, by Tim Carey-Smith.
|
8
|
+
Copyright, 2013, by brainopia.
|
9
|
+
Copyright, 2013, by Luis Lavena.
|
10
|
+
Copyright, 2014, by SHIBATA Hiroshi.
|
11
|
+
Copyright, 2014, by Sergey Avseyev.
|
12
|
+
Copyright, 2014, by JohnnyT.
|
13
|
+
Copyright, 2015-2017, by Tiago Cardoso.
|
14
|
+
Copyright, 2015, by Daniel Berger.
|
15
|
+
Copyright, 2015, by Upekshe.
|
16
|
+
Copyright, 2015-2016, by UpeksheJay.
|
17
|
+
Copyright, 2015, by Vladimir Kochnev.
|
18
|
+
Copyright, 2016-2018, by Jun Aruga.
|
19
|
+
Copyright, 2016, by Omer Katz.
|
20
|
+
Copyright, 2016-2021, by Olle Jonsson.
|
21
|
+
Copyright, 2017, by usa.
|
22
|
+
Copyright, 2017, by HoneyryderChuck.
|
23
|
+
Copyright, 2017, by tompng.
|
24
|
+
Copyright, 2018-2021, by Samuel Williams.
|
25
|
+
Copyright, 2019, by Cédric Boutillier.
|
26
|
+
Copyright, 2019-2020, by MSP-Greg.
|
27
|
+
Copyright, 2019-2020, by Benoit Daloze.
|
28
|
+
Copyright, 2019, by Jesús Burgos Maciá.
|
29
|
+
Copyright, 2019, by Thomas Kuntz.
|
30
|
+
Copyright, 2019, by Orien Madgwick.
|
31
|
+
Copyright, 2019, by Thomas Dziedzic.
|
32
|
+
Copyright, 2019, by Zhang Kang.
|
33
|
+
Copyright, 2020, by eladeyal-intel.
|
34
|
+
Copyright, 2020, by Pedro Paiva.
|
35
|
+
Copyright, 2020, by Bo.
|
36
|
+
Copyright, 2020, by Charles Oliver Nutter.
|
37
|
+
Copyright, 2020-2021, by Joao Fernandes.
|
38
|
+
Copyright, 2021, by Jun Jiang.
|
39
|
+
Copyright, 2021, by Jeffrey Martin.
|
40
|
+
Copyright, 2021, by Pavel Lobashov.
|
41
|
+
|
42
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
43
|
+
of this software and associated documentation files (the "Software"), to deal
|
44
|
+
in the Software without restriction, including without limitation the rights
|
45
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
46
|
+
copies of the Software, and to permit persons to whom the Software is
|
47
|
+
furnished to do so, subject to the following conditions:
|
48
|
+
|
49
|
+
The above copyright notice and this permission notice shall be included in all
|
50
|
+
copies or substantial portions of the Software.
|
51
|
+
|
52
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
53
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
54
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
55
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
56
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
57
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
58
|
+
SOFTWARE.
|
59
|
+
|
60
|
+
## libev
|
61
|
+
|
62
|
+
Released under the BSD license. See [ext/libev/LICENSE] for details.
|
63
|
+
|
64
|
+
Copyright, 2007-2019, by Marc Alexander Lehmann.
|
65
|
+
|
66
|
+
[ext/libev/LICENSE]: https://github.com/socketry/nio4r/blob/master/ext/libev/LICENSE
|
data/nio4r.gemspec
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require File.expand_path("
|
3
|
+
require File.expand_path("lib/nio/version", __dir__)
|
4
4
|
|
5
5
|
Gem::Specification.new do |spec|
|
6
6
|
spec.authors = ["Tony Arcieri"]
|
@@ -28,7 +28,7 @@ Gem::Specification.new do |spec|
|
|
28
28
|
"wiki_uri" => "https://github.com/socketry/nio4r/wiki"
|
29
29
|
}
|
30
30
|
|
31
|
-
spec.required_ruby_version = ">= 2.
|
31
|
+
spec.required_ruby_version = ">= 2.4"
|
32
32
|
|
33
33
|
if defined? JRUBY_VERSION
|
34
34
|
spec.files << "lib/nio4r_ext.jar"
|
data/rakelib/extension.rake
CHANGED
data/spec/nio/bytebuffer_spec.rb
CHANGED