nio4r 2.5.2 → 2.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/workflow.yml +61 -0
  3. data/.mailmap +16 -0
  4. data/.rubocop.yml +30 -11
  5. data/Gemfile +6 -6
  6. data/{CHANGES.md → changes.md} +78 -1
  7. data/examples/echo_server.rb +9 -2
  8. data/ext/libev/Changes +71 -2
  9. data/ext/libev/ev.c +611 -198
  10. data/ext/libev/ev.h +25 -22
  11. data/ext/libev/ev_epoll.c +16 -14
  12. data/ext/libev/ev_iouring.c +694 -0
  13. data/ext/libev/ev_kqueue.c +4 -4
  14. data/ext/libev/ev_linuxaio.c +78 -100
  15. data/ext/libev/ev_poll.c +6 -6
  16. data/ext/libev/ev_port.c +3 -3
  17. data/ext/libev/ev_select.c +6 -6
  18. data/ext/libev/ev_vars.h +34 -0
  19. data/ext/libev/ev_win32.c +2 -2
  20. data/ext/libev/ev_wrap.h +56 -0
  21. data/ext/nio4r/.clang-format +16 -0
  22. data/ext/nio4r/bytebuffer.c +101 -65
  23. data/ext/nio4r/extconf.rb +26 -0
  24. data/ext/nio4r/libev.h +1 -3
  25. data/ext/nio4r/monitor.c +81 -53
  26. data/ext/nio4r/nio4r.h +6 -15
  27. data/ext/nio4r/nio4r_ext.c +1 -1
  28. data/ext/nio4r/org/nio4r/ByteBuffer.java +2 -0
  29. data/ext/nio4r/org/nio4r/Monitor.java +1 -0
  30. data/ext/nio4r/org/nio4r/Selector.java +8 -10
  31. data/ext/nio4r/selector.c +132 -93
  32. data/lib/nio/bytebuffer.rb +10 -0
  33. data/lib/nio/monitor.rb +8 -1
  34. data/lib/nio/selector.rb +27 -10
  35. data/lib/nio/version.rb +6 -1
  36. data/lib/nio.rb +29 -1
  37. data/lib/nio4r.rb +5 -0
  38. data/license.md +77 -0
  39. data/nio4r.gemspec +6 -5
  40. data/rakelib/extension.rake +1 -2
  41. data/readme.md +91 -0
  42. data/spec/nio/acceptables_spec.rb +4 -0
  43. data/spec/nio/bytebuffer_spec.rb +6 -1
  44. data/spec/nio/monitor_spec.rb +7 -0
  45. data/spec/nio/selectables/pipe_spec.rb +6 -0
  46. data/spec/nio/selectables/ssl_socket_spec.rb +7 -0
  47. data/spec/nio/selectables/tcp_socket_spec.rb +7 -0
  48. data/spec/nio/selectables/udp_socket_spec.rb +9 -2
  49. data/spec/nio/selector_spec.rb +16 -1
  50. data/spec/spec_helper.rb +7 -2
  51. data/spec/support/selectable_examples.rb +8 -0
  52. metadata +20 -16
  53. data/.travis.yml +0 -44
  54. data/Guardfile +0 -10
  55. data/README.md +0 -150
  56. data/appveyor.yml +0 -40
data/ext/libev/ev.h CHANGED
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * libev native API header
3
3
  *
4
- * Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007-2020 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -151,7 +151,10 @@ EV_CPP(extern "C" {)
151
151
 
152
152
  /*****************************************************************************/
153
153
 
154
- typedef double ev_tstamp;
154
+ #ifndef EV_TSTAMP_T
155
+ # define EV_TSTAMP_T double
156
+ #endif
157
+ typedef EV_TSTAMP_T ev_tstamp;
155
158
 
156
159
  #include <string.h> /* for memmove */
157
160
 
@@ -212,7 +215,7 @@ struct ev_loop;
212
215
  /*****************************************************************************/
213
216
 
214
217
  #define EV_VERSION_MAJOR 4
215
- #define EV_VERSION_MINOR 27
218
+ #define EV_VERSION_MINOR 33
216
219
 
217
220
  /* eventmask, revents, events... */
218
221
  enum {
@@ -389,14 +392,12 @@ typedef struct ev_stat
389
392
  } ev_stat;
390
393
  #endif
391
394
 
392
- #if EV_IDLE_ENABLE
393
395
  /* invoked when the nothing else needs to be done, keeps the process from blocking */
394
396
  /* revent EV_IDLE */
395
397
  typedef struct ev_idle
396
398
  {
397
399
  EV_WATCHER (ev_idle)
398
400
  } ev_idle;
399
- #endif
400
401
 
401
402
  /* invoked for each run of the mainloop, just before the blocking call */
402
403
  /* you can still change events in any way you like */
@@ -413,23 +414,19 @@ typedef struct ev_check
413
414
  EV_WATCHER (ev_check)
414
415
  } ev_check;
415
416
 
416
- #if EV_FORK_ENABLE
417
417
  /* the callback gets invoked before check in the child process when a fork was detected */
418
418
  /* revent EV_FORK */
419
419
  typedef struct ev_fork
420
420
  {
421
421
  EV_WATCHER (ev_fork)
422
422
  } ev_fork;
423
- #endif
424
423
 
425
- #if EV_CLEANUP_ENABLE
426
424
  /* is invoked just before the loop gets destroyed */
427
425
  /* revent EV_CLEANUP */
428
426
  typedef struct ev_cleanup
429
427
  {
430
428
  EV_WATCHER (ev_cleanup)
431
429
  } ev_cleanup;
432
- #endif
433
430
 
434
431
  #if EV_EMBED_ENABLE
435
432
  /* used to embed an event loop inside another */
@@ -439,16 +436,18 @@ typedef struct ev_embed
439
436
  EV_WATCHER (ev_embed)
440
437
 
441
438
  struct ev_loop *other; /* ro */
439
+ #undef EV_IO_ENABLE
440
+ #define EV_IO_ENABLE 1
442
441
  ev_io io; /* private */
442
+ #undef EV_PREPARE_ENABLE
443
+ #define EV_PREPARE_ENABLE 1
443
444
  ev_prepare prepare; /* private */
444
445
  ev_check check; /* unused */
445
446
  ev_timer timer; /* unused */
446
447
  ev_periodic periodic; /* unused */
447
448
  ev_idle idle; /* unused */
448
449
  ev_fork fork; /* private */
449
- #if EV_CLEANUP_ENABLE
450
450
  ev_cleanup cleanup; /* unused */
451
- #endif
452
451
  } ev_embed;
453
452
  #endif
454
453
 
@@ -501,17 +500,18 @@ union ev_any_watcher
501
500
  /* flag bits for ev_default_loop and ev_loop_new */
502
501
  enum {
503
502
  /* the default */
504
- EVFLAG_AUTO = 0x00000000U, /* not quite a mask */
503
+ EVFLAG_AUTO = 0x00000000U, /* not quite a mask */
505
504
  /* flag bits */
506
- EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */
507
- EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */
505
+ EVFLAG_NOENV = 0x01000000U, /* do NOT consult environment */
506
+ EVFLAG_FORKCHECK = 0x02000000U, /* check for a fork in each iteration */
508
507
  /* debugging/feature disable */
509
- EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */
508
+ EVFLAG_NOINOTIFY = 0x00100000U, /* do not attempt to use inotify */
510
509
  #if EV_COMPAT3
511
- EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */
510
+ EVFLAG_NOSIGFD = 0, /* compatibility to pre-3.9 */
512
511
  #endif
513
- EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */
514
- EVFLAG_NOSIGMASK = 0x00400000U /* avoid modifying the signal mask */
512
+ EVFLAG_SIGNALFD = 0x00200000U, /* attempt to use signalfd */
513
+ EVFLAG_NOSIGMASK = 0x00400000U, /* avoid modifying the signal mask */
514
+ EVFLAG_NOTIMERFD = 0x00800000U /* avoid creating a timerfd */
515
515
  };
516
516
 
517
517
  /* method bits to be ored together */
@@ -522,8 +522,9 @@ enum {
522
522
  EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
523
523
  EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
524
524
  EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
525
- EVBACKEND_LINUXAIO = 0x00000040U, /* Linuix AIO */
526
- EVBACKEND_ALL = 0x0000007FU, /* all known backends */
525
+ EVBACKEND_LINUXAIO = 0x00000040U, /* linux AIO, 4.19+ */
526
+ EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */
527
+ EVBACKEND_ALL = 0x000000FFU, /* all known backends */
527
528
  EVBACKEND_MASK = 0x0000FFFFU /* all future backends */
528
529
  };
529
530
 
@@ -655,6 +656,8 @@ EV_API_DECL void ev_unref (EV_P) EV_NOEXCEPT;
655
656
  */
656
657
  EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT;
657
658
 
659
+ EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */
660
+
658
661
  # if EV_FEATURE_API
659
662
  EV_API_DECL unsigned int ev_iteration (EV_P) EV_NOEXCEPT; /* number of loop iterations */
660
663
  EV_API_DECL unsigned int ev_depth (EV_P) EV_NOEXCEPT; /* #ev_loop enters - #ev_loop leaves */
@@ -672,7 +675,6 @@ EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending
672
675
  EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT;
673
676
 
674
677
  EV_API_DECL unsigned int ev_pending_count (EV_P) EV_NOEXCEPT; /* number of pending events, if any */
675
- EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */
676
678
 
677
679
  /*
678
680
  * stop/start the timer handling.
@@ -692,6 +694,7 @@ EV_API_DECL void ev_resume (EV_P) EV_NOEXCEPT;
692
694
  ev_set_cb ((ev), cb_); \
693
695
  } while (0)
694
696
 
697
+ #define ev_io_modify(ev,events_) do { (ev)->events = (ev)->events & EV__IOFDSET | (events_); } while (0)
695
698
  #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0)
696
699
  #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0)
697
700
  #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0)
@@ -737,6 +740,7 @@ EV_API_DECL void ev_resume (EV_P) EV_NOEXCEPT;
737
740
  #define ev_periodic_at(ev) (+((ev_watcher_time *)(ev))->at)
738
741
 
739
742
  #ifndef ev_set_cb
743
+ /* memmove is used here to avoid strict aliasing violations, and hopefully is optimized out by any reasonable compiler */
740
744
  # define ev_set_cb(ev,cb_) (ev_cb_ (ev) = (cb_), memmove (&((ev_watcher *)(ev))->cb, &ev_cb_ (ev), sizeof (ev_cb_ (ev))))
741
745
  #endif
742
746
 
@@ -853,4 +857,3 @@ EV_API_DECL void ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT;
853
857
  EV_CPP(})
854
858
 
855
859
  #endif
856
-
data/ext/libev/ev_epoll.c CHANGED
@@ -93,10 +93,10 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
93
93
  ev.events = (nev & EV_READ ? EPOLLIN : 0)
94
94
  | (nev & EV_WRITE ? EPOLLOUT : 0);
95
95
 
96
- if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
96
+ if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
97
97
  return;
98
98
 
99
- if (expect_true (errno == ENOENT))
99
+ if (ecb_expect_true (errno == ENOENT))
100
100
  {
101
101
  /* if ENOENT then the fd went away, so try to do the right thing */
102
102
  if (!nev)
@@ -105,7 +105,7 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
105
105
  if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
106
106
  return;
107
107
  }
108
- else if (expect_true (errno == EEXIST))
108
+ else if (ecb_expect_true (errno == EEXIST))
109
109
  {
110
110
  /* EEXIST means we ignored a previous DEL, but the fd is still active */
111
111
  /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
@@ -115,7 +115,7 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
115
115
  if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
116
116
  return;
117
117
  }
118
- else if (expect_true (errno == EPERM))
118
+ else if (ecb_expect_true (errno == EPERM))
119
119
  {
120
120
  /* EPERM means the fd is always ready, but epoll is too snobbish */
121
121
  /* to handle it, unlike select or poll. */
@@ -146,16 +146,16 @@ epoll_poll (EV_P_ ev_tstamp timeout)
146
146
  int i;
147
147
  int eventcnt;
148
148
 
149
- if (expect_false (epoll_epermcnt))
150
- timeout = 0.;
149
+ if (ecb_expect_false (epoll_epermcnt))
150
+ timeout = EV_TS_CONST (0.);
151
151
 
152
152
  /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
153
153
  /* the default libev max wait time, however. */
154
154
  EV_RELEASE_CB;
155
- eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3);
155
+ eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout));
156
156
  EV_ACQUIRE_CB;
157
157
 
158
- if (expect_false (eventcnt < 0))
158
+ if (ecb_expect_false (eventcnt < 0))
159
159
  {
160
160
  if (errno != EINTR)
161
161
  ev_syserr ("(libev) epoll_wait");
@@ -178,14 +178,14 @@ epoll_poll (EV_P_ ev_tstamp timeout)
178
178
  * other spurious notifications will be found by epoll_ctl, below
179
179
  * we assume that fd is always in range, as we never shrink the anfds array
180
180
  */
181
- if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
181
+ if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
182
182
  {
183
183
  /* recreate kernel state */
184
184
  postfork |= 2;
185
185
  continue;
186
186
  }
187
187
 
188
- if (expect_false (got & ~want))
188
+ if (ecb_expect_false (got & ~want))
189
189
  {
190
190
  anfds [fd].emask = want;
191
191
 
@@ -197,6 +197,8 @@ epoll_poll (EV_P_ ev_tstamp timeout)
197
197
  * above with the gencounter check (== our fd is not the event fd), and
198
198
  * partially here, when epoll_ctl returns an error (== a child has the fd
199
199
  * but we closed it).
200
+ * note: for events such as POLLHUP, where we can't know whether it refers
201
+ * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls.
200
202
  */
201
203
  ev->events = (want & EV_READ ? EPOLLIN : 0)
202
204
  | (want & EV_WRITE ? EPOLLOUT : 0);
@@ -214,7 +216,7 @@ epoll_poll (EV_P_ ev_tstamp timeout)
214
216
  }
215
217
 
216
218
  /* if the receive array was full, increase its size */
217
- if (expect_false (eventcnt == epoll_eventmax))
219
+ if (ecb_expect_false (eventcnt == epoll_eventmax))
218
220
  {
219
221
  ev_free (epoll_events);
220
222
  epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
@@ -264,7 +266,7 @@ epoll_init (EV_P_ int flags)
264
266
  if ((backend_fd = epoll_epoll_create ()) < 0)
265
267
  return 0;
266
268
 
267
- backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */
269
+ backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */
268
270
  backend_modify = epoll_modify;
269
271
  backend_poll = epoll_poll;
270
272
 
@@ -282,8 +284,8 @@ epoll_destroy (EV_P)
282
284
  array_free (epoll_eperm, EMPTY);
283
285
  }
284
286
 
285
- inline_size
286
- void
287
+ ecb_cold
288
+ static void
287
289
  epoll_fork (EV_P)
288
290
  {
289
291
  close (backend_fd);