nio4r 1.2.1 → 2.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/workflow.yml +43 -0
  3. data/.gitignore +1 -0
  4. data/.rspec +0 -1
  5. data/.rubocop.yml +70 -31
  6. data/CHANGES.md +190 -42
  7. data/Gemfile +8 -4
  8. data/Guardfile +10 -0
  9. data/README.md +102 -147
  10. data/Rakefile +3 -4
  11. data/examples/echo_server.rb +3 -2
  12. data/ext/libev/Changes +44 -13
  13. data/ext/libev/README +2 -1
  14. data/ext/libev/ev.c +314 -225
  15. data/ext/libev/ev.h +90 -88
  16. data/ext/libev/ev_epoll.c +30 -16
  17. data/ext/libev/ev_kqueue.c +19 -9
  18. data/ext/libev/ev_linuxaio.c +642 -0
  19. data/ext/libev/ev_poll.c +19 -11
  20. data/ext/libev/ev_port.c +13 -6
  21. data/ext/libev/ev_select.c +4 -2
  22. data/ext/libev/ev_vars.h +14 -3
  23. data/ext/libev/ev_wrap.h +16 -0
  24. data/ext/nio4r/bytebuffer.c +429 -0
  25. data/ext/nio4r/extconf.rb +17 -30
  26. data/ext/nio4r/monitor.c +113 -49
  27. data/ext/nio4r/nio4r.h +11 -13
  28. data/ext/nio4r/org/nio4r/ByteBuffer.java +293 -0
  29. data/ext/nio4r/org/nio4r/Monitor.java +175 -0
  30. data/ext/nio4r/org/nio4r/Nio4r.java +22 -391
  31. data/ext/nio4r/org/nio4r/Selector.java +299 -0
  32. data/ext/nio4r/selector.c +155 -68
  33. data/lib/nio.rb +4 -4
  34. data/lib/nio/bytebuffer.rb +229 -0
  35. data/lib/nio/monitor.rb +73 -11
  36. data/lib/nio/selector.rb +64 -21
  37. data/lib/nio/version.rb +1 -1
  38. data/nio4r.gemspec +34 -20
  39. data/{tasks → rakelib}/extension.rake +4 -0
  40. data/{tasks → rakelib}/rspec.rake +2 -0
  41. data/{tasks → rakelib}/rubocop.rake +2 -0
  42. data/spec/nio/acceptables_spec.rb +5 -5
  43. data/spec/nio/bytebuffer_spec.rb +354 -0
  44. data/spec/nio/monitor_spec.rb +128 -79
  45. data/spec/nio/selectables/pipe_spec.rb +12 -3
  46. data/spec/nio/selectables/ssl_socket_spec.rb +61 -29
  47. data/spec/nio/selectables/tcp_socket_spec.rb +47 -34
  48. data/spec/nio/selectables/udp_socket_spec.rb +24 -7
  49. data/spec/nio/selector_spec.rb +65 -16
  50. data/spec/spec_helper.rb +12 -3
  51. data/spec/support/selectable_examples.rb +45 -18
  52. metadata +33 -23
  53. data/.rubocop_todo.yml +0 -35
  54. data/.travis.yml +0 -27
  55. data/LICENSE.txt +0 -20
  56. data/ext/libev/README.embed +0 -3
  57. data/ext/libev/test_libev_win32.c +0 -123
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
@@ -5,11 +7,13 @@ gemspec
5
7
  gem "jruby-openssl" if defined? JRUBY_VERSION
6
8
 
7
9
  group :development do
8
- gem "rake-compiler"
10
+ gem "guard-rspec", require: false
11
+ gem "pry", require: false
9
12
  end
10
13
 
11
- group :test do
12
- gem "rspec", "~> 3.0"
13
- gem "rubocop", "0.36.0"
14
+ group :development, :test do
14
15
  gem "coveralls", require: false
16
+ gem "rake-compiler", require: false
17
+ gem "rspec", "~> 3.7", require: false
18
+ gem "rubocop", "0.82.0", require: false
15
19
  end
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ directories %w[lib spec]
4
+ clearing :on
5
+
6
+ guard :rspec, cmd: "bundle exec rspec" do
7
+ watch(%r{^spec/.+_spec\.rb$})
8
+ watch(%r{^lib/(.+)\.rb$}) { |m| "spec/#{m[1]}_spec.rb" }
9
+ watch("spec/spec_helper.rb") { "spec" }
10
+ end
data/README.md CHANGED
@@ -1,196 +1,151 @@
1
- ![nio4r](https://raw.github.com/celluloid/nio4r/master/logo.png)
2
- ===============
1
+ # ![nio4r](https://raw.github.com/socketry/nio4r/master/logo.png)
2
+
3
3
  [![Gem Version](https://badge.fury.io/rb/nio4r.svg)](http://rubygems.org/gems/nio4r)
4
- [![Build Status](https://secure.travis-ci.org/celluloid/nio4r.svg?branch=master)](http://travis-ci.org/celluloid/nio4r)
5
- [![Code Climate](https://codeclimate.com/github/celluloid/nio4r.svg)](https://codeclimate.com/github/celluloid/nio4r)
6
- [![Coverage Status](https://coveralls.io/repos/celluloid/nio4r/badge.svg?branch=master)](https://coveralls.io/r/celluloid/nio4r)
7
- [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/celluloid/nio4r/blob/master/LICENSE.txt)
4
+ [![Travis CI Status](https://secure.travis-ci.org/socketry/nio4r.svg?branch=master)](http://travis-ci.org/socketry/nio4r)
5
+ [![Appveyor Status](https://ci.appveyor.com/api/projects/status/1ru8x81v91vaewax/branch/master?svg=true)](https://ci.appveyor.com/project/tarcieri/nio4r/branch/master)
6
+ [![Code Climate](https://codeclimate.com/github/socketry/nio4r.svg)](https://codeclimate.com/github/socketry/nio4r)
7
+ [![Coverage Status](https://coveralls.io/repos/socketry/nio4r/badge.svg?branch=master)](https://coveralls.io/r/socketry/nio4r)
8
+ [![Yard Docs](https://img.shields.io/badge/yard-docs-blue.svg)](http://www.rubydoc.info/gems/nio4r/2.2.0)
9
+ [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/socketry/nio4r/blob/master/LICENSE.txt)
10
+
11
+ _NOTE: This is the 2.x **stable** branch of nio4r. For the 1.x **legacy** branch,
12
+ please see:_
8
13
 
9
- nio4r: New IO for Ruby.
14
+ https://github.com/socketry/nio4r/tree/1-x-stable
10
15
 
11
- nio4r provides an abstract, cross-platform stateful I/O selector API for Ruby.
16
+ **New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for
17
+ scalable network clients and servers. Modeled after the Java NIO API, but
18
+ simplified for ease-of-use.
19
+
20
+ **nio4r** provides an abstract, cross-platform stateful I/O selector API for Ruby.
12
21
  I/O selectors are the heart of "reactor"-based event loops, and monitor
13
22
  multiple I/O objects for various types of readiness, e.g. ready for reading or
14
23
  writing.
15
24
 
16
- The most similar API provided by Ruby today is Kernel.select, however the
17
- select API requires you to pass in arrays of all of the I/O objects you're
18
- interested in every time. nio4r provides a more object-oriented API that lets
19
- you register I/O objects with a selector then handle them when they're selected
20
- for various types of events.
25
+ ## Projects using nio4r
26
+
27
+ * [ActionCable]: Rails 5 WebSocket protocol, uses nio4r for a WebSocket server
28
+ * [Celluloid::IO]: Actor-based concurrency framework, uses nio4r for async I/O
29
+ * [Socketry Async]: Asynchronous I/O framework for Ruby
30
+ * [Puma]: Ruby/Rack web server built for concurrency
21
31
 
22
- nio4r is modeled after the Java NIO API, but simplified for ease-of-use.
32
+ [ActionCable]: https://rubygems.org/gems/actioncable
33
+ [Celluloid::IO]: https://github.com/celluloid/celluloid-io
34
+ [Socketry Async]: https://github.com/socketry/async
35
+ [Puma]: https://github.com/puma/puma
23
36
 
24
- Its goals are:
37
+ ## Goals
25
38
 
26
39
  * Expose high-level interfaces for stateful IO selectors
27
40
  * Keep the API small to maximize both portability and performance across many
28
41
  different OSes and Ruby VMs
29
42
  * Provide inherently thread-safe facilities for working with IO objects
30
43
 
31
- [Celluloid::IO](https://github.com/celluloid/celluloid-io) uses nio4r to
32
- monitor multiple IO objects from a single Celluloid actor.
44
+ ## Supported platforms
33
45
 
34
- Supported Platforms
35
- -------------------
46
+ * Ruby 2.3
47
+ * Ruby 2.4
48
+ * Ruby 2.5
49
+ * Ruby 2.6
50
+ * Ruby 2.7
51
+ * [JRuby](https://github.com/jruby/jruby)
52
+ * [TruffleRuby](https://github.com/oracle/truffleruby)
36
53
 
37
- nio4r is known to work on the following Ruby implementations:
54
+ ## Supported backends
38
55
 
39
- * MRI/YARV 1.9.3, 2.0.0, 2.1.0
40
- * JRuby 1.7.x
41
- * Rubinius 2.x
42
- * A pure Ruby implementation based on Kernel.select is also provided
56
+ * **libev**: MRI C extension targeting multiple native IO selector APIs (e.g epoll, kqueue)
57
+ * **Java NIO**: JRuby extension which wraps the Java NIO subsystem
58
+ * **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter
43
59
 
44
- Platform notes:
60
+ ## Discussion
45
61
 
46
- * MRI/YARV and Rubinius implement nio4r with a C extension based on libev,
47
- which provides a high performance binding to native IO APIs
48
- * JRuby uses a Java extension based on the high performance Java NIO subsystem
49
- * A pure Ruby implementation is also provided for Ruby implementations which
50
- don't implement the MRI C extension API
62
+ For discussion and general help with nio4r, email
63
+ [socketry+subscribe@googlegroups.com][subscribe]
64
+ or join on the web via the [Google Group].
51
65
 
52
- Usage
53
- -----
66
+ We're also on IRC at ##socketry on irc.freenode.net.
54
67
 
55
- ### Selectors
68
+ [subscribe]: mailto:socketry+subscribe@googlegroups.com
69
+ [google group]: https://groups.google.com/group/socketry
56
70
 
57
- The NIO::Selector class is the main API provided by nio4r. Use it where you
58
- might otherwise use Kernel.select, but want to monitor the same set of IO
59
- objects across multiple select calls rather than having to reregister them
60
- every single time:
71
+ ## Documentation
61
72
 
62
- ```ruby
63
- require 'nio'
73
+ [Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki)
74
+ for more detailed documentation and usage notes:
64
75
 
65
- selector = NIO::Selector.new
66
- ```
76
+ * [Getting Started]: Introduction to nio4r's components
77
+ * [Selectors]: monitor multiple `IO` objects for readiness events
78
+ * [Monitors]: control interests and inspect readiness for specific `IO` objects
79
+ * [Byte Buffers]: fixed-size native buffers for high-performance I/O
67
80
 
68
- To monitor IO objects, attach them to the selector with the NIO::Selector#register
69
- method, monitoring them for read readiness with the :r parameter, write
70
- readiness with the :w parameter, or both with :rw.
81
+ [Getting Started]: https://github.com/socketry/nio4r/wiki/Getting-Started
82
+ [Selectors]: https://github.com/socketry/nio4r/wiki/Selectors
83
+ [Monitors]: https://github.com/socketry/nio4r/wiki/Monitors
84
+ [Byte Buffers]: https://github.com/socketry/nio4r/wiki/Byte-Buffers
71
85
 
72
- ```ruby
73
- >> reader, writer = IO.pipe
74
- => [#<IO:0xf30>, #<IO:0xf34>]
75
- >> monitor = selector.register(reader, :r)
76
- => #<NIO::Monitor:0xfbc>
77
- ```
86
+ See also:
78
87
 
79
- After registering an IO object with the selector, you'll get a NIO::Monitor
80
- object which you can use for managing how a particular IO object is being
81
- monitored. Monitors will store an arbitrary value of your choice, which
82
- provides an easy way to implement callbacks:
88
+ * [YARD API documentation](http://www.rubydoc.info/gems/nio4r/frames)
83
89
 
84
- ```ruby
85
- >> monitor = selector.register(reader, :r)
86
- => #<NIO::Monitor:0xfbc>
87
- >> monitor.value = proc { puts "Got some data: #{monitor.io.read_nonblock(4096)}" }
88
- => #<Proc:0x1000@(irb):4>
89
- ```
90
+ ## Non-goals
90
91
 
91
- The main method of importance is NIO::Selector#select, which monitors all
92
- registered IO objects and returns an array of monitors that are ready.
93
-
94
- ```ruby
95
- >> writer << "Hi there!"
96
- => #<IO:0x103c>
97
- >> ready = selector.select
98
- => [#<NIO::Monitor:0xfbc>]
99
- >> ready.each { |m| m.value.call }
100
- Got some data: Hi there!
101
- => [#<NIO::Monitor:0xfbc>]
102
- ```
103
-
104
- By default, NIO::Selector#select will block indefinitely until one of the IO
105
- objects being monitored becomes ready. However, you can also pass a timeout to
106
- wait in seconds to NIO::Selector#select just like you can with Kernel.select:
92
+ **nio4r** is not a full-featured event framework like [EventMachine] or [Cool.io].
93
+ Instead, nio4r is the sort of thing you might write a library like that on
94
+ top of. nio4r provides a minimal API such that individual Ruby implementers
95
+ may choose to produce optimized versions for their platform, without having
96
+ to maintain a large codebase.
107
97
 
108
- ```ruby
109
- ready = selector.select(15) # Wait 15 seconds
110
- ```
98
+ [EventMachine]: https://github.com/eventmachine/eventmachine
99
+ [Cool.io]: https://coolio.github.io/
111
100
 
112
- If a timeout occurs, ready will be nil.
101
+ ## Releases
113
102
 
114
- You can avoid allocating an array each time you call NIO::Selector#select by
115
- passing a block to select. The block will be called for each ready monitor
116
- object, with that object passed as an argument. The number of ready monitors
117
- is returned as a Fixnum:
103
+ ### CRuby
118
104
 
119
- ```ruby
120
- >> selector.select { |m| m.value.call }
121
- Got some data: Hi there!
122
- => 1
123
105
  ```
124
-
125
- When you're done monitoring a particular IO object, just deregister it from
126
- the selector:
127
-
128
- ```ruby
129
- selector.deregister(reader)
106
+ rake clean
107
+ rake release
130
108
  ```
131
109
 
132
- ### Monitors
133
-
134
- Monitors provide methods which let you introspect on why a particular IO
135
- object was selected. These methods are not thread safe unless you are holding
136
- the selector lock (i.e. if you're in a block passed to #select). Only use them
137
- if you aren't concerned with thread safety, or you're within a #select
138
- block:
139
-
140
- - ***#interests***: what this monitor is interested in (:r, :w, or :rw)
141
- - ***#interests=***: change the current interests for a monitor (to :r, :w, or :rw)
142
- - ***#readiness***: what I/O operations the monitored object is ready for
143
- - ***#readable?***: was the IO readable last time it was selected?
144
- - ***#writable?***: was the IO writable last time it was selected?
145
-
146
- Monitors also support a ***#value*** and ***#value=*** method for storing a
147
- handle to an arbitrary object of your choice (e.g. a proc)
148
-
149
- ### Flow Control
110
+ ### JRuby
150
111
 
151
- For information on how to compose nio4r selectors inside of event loops,
152
- please read the [Flow Control Guide on the
153
- Wiki](https://github.com/celluloid/nio4r/wiki/Basic-Flow-Control)
112
+ You might need to delete `Gemfile.lock` before trying to `bundle install`.
154
113
 
155
- Concurrency
156
- -----------
114
+ ```
115
+ rake clean
116
+ rake compile
117
+ rake release
118
+ ```
157
119
 
158
- nio4r provides internal locking to ensure that it's safe to use from multiple
159
- concurrent threads. Only one thread can select on a NIO::Selector at a given
160
- time, and while a thread is selecting other threads are blocked from
161
- registering or deregistering IO objects. Once a pending select has completed,
162
- requests to register/unregister IO objects will be processed.
120
+ ## License
163
121
 
164
- NIO::Selector#wakeup allows one thread to unblock another thread that's in the
165
- middle of an NIO::Selector#select operation. This lets other threads that need
166
- to communicate immediately with the selector unblock it so it can process
167
- other events that it's not presently selecting on.
122
+ Released under the MIT license.
168
123
 
169
- What nio4r is not
170
- -----------------
124
+ Copyright, 2019, by Tony Arcieri.
125
+ Copyright, 2019, by [Samuel G. D. Williams](http://www.codeotaku.com/samuel-williams).
171
126
 
172
- nio4r is not a full-featured event framework like EventMachine or Cool.io.
173
- Instead, nio4r is the sort of thing you might write a library like that on
174
- top of. nio4r provides a minimal API such that individual Ruby implementers
175
- may choose to produce optimized versions for their platform, without having
176
- to maintain a large codebase.
127
+ Permission is hereby granted, free of charge, to any person obtaining a copy
128
+ of this software and associated documentation files (the "Software"), to deal
129
+ in the Software without restriction, including without limitation the rights
130
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
131
+ copies of the Software, and to permit persons to whom the Software is
132
+ furnished to do so, subject to the following conditions:
177
133
 
178
- As of the time of writing, the current implementation is (approximately):
134
+ The above copyright notice and this permission notice shall be included in
135
+ all copies or substantial portions of the Software.
179
136
 
180
- * 200 lines of Ruby code
181
- * 700 lines of "custom" C code (not counting libev)
182
- * 400 lines of Java code
137
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
138
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
139
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
140
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
141
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
142
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
143
+ THE SOFTWARE.
183
144
 
184
- nio4r is also not a replacement for Kinder Gentler IO (KGIO), a set of
185
- advanced Ruby IO APIs. At some point in the future nio4r might provide a
186
- cross-platform implementation that uses KGIO on CRubies, and Java NIO on JRuby,
187
- however this is not the case today.
145
+ ### libev
188
146
 
189
- License
190
- -------
147
+ Released under the BSD license. See [ext/libev/LICENSE] for details.
191
148
 
192
- Copyright (c) 2011-2016 Tony Arcieri. Distributed under the MIT License.
193
- See LICENSE.txt for further details.
149
+ Copyright, 2007-2019, by Marc Alexander Lehmann.
194
150
 
195
- Includes libev 4.22. Copyright (c) 2007-2015 Marc Alexander Lehmann.
196
- Distributed under the BSD license. See ext/libev/LICENSE for details.
151
+ [ext/libev/LICENSE]: https://github.com/socketry/nio4r/blob/master/ext/libev/LICENSE
data/Rakefile CHANGED
@@ -1,9 +1,8 @@
1
- #!/usr/bin/env rake
1
+ # frozen_string_literal: true
2
+
2
3
  require "bundler/gem_tasks"
3
4
  require "rake/clean"
4
5
 
5
- Dir[File.expand_path("../tasks/**/*.rake", __FILE__)].each { |task| load task }
6
-
7
- task default: %w(compile spec rubocop)
6
+ task default: %w[compile spec rubocop]
8
7
 
9
8
  CLEAN.include "**/*.o", "**/*.so", "**/*.bundle", "**/*.jar", "pkg", "tmp"
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
2
3
 
3
- $LOAD_PATH.push File.expand_path("../../lib", __FILE__)
4
+ $LOAD_PATH.push File.expand_path("../lib", __dir__)
4
5
  require "nio"
5
6
  require "socket"
6
7
 
@@ -18,7 +19,7 @@ class EchoServer
18
19
 
19
20
  def run
20
21
  loop do
21
- @selector.select { |monitor| monitor.value.call(monitor) }
22
+ @selector.select { |monitor| monitor.value.call }
22
23
  end
23
24
  end
24
25
 
@@ -1,18 +1,49 @@
1
1
  Revision history for libev, a high-performance and full-featured event loop.
2
2
 
3
- TODO: ev_loop_wakeup
4
- TODO: EV_STANDALONE == NO_HASSEL (do not use clock_gettime in ev_standalone)
5
- TODO: faq, process a thing in each iteration
6
- TODO: dbeugging tips, ev_verify, ev_init twice
7
- TODO: ev_break for immediate exit (EVBREAK_NOW?)
8
- TODO: ev_feed_child_event
9
- TODO: document the special problem of signals around fork.
10
- TODO: store pid for each signal
11
- TODO: document file descriptor usage per loop
12
- TODO: store loop pid_t and compare isndie signal handler,store 1 for same, 2 for differign pid, clean up in loop_fork
13
- TODO: embed watchers need updating when fd changes
14
- TODO: document portability requirements for atomic pointer access
15
- TODO: document requirements for function pointers and calling conventions.
3
+ 4.27 Thu Jun 27 22:43:44 CEST 2019
4
+ - linux aio backend almost complete rewritten to work around its
5
+ limitations.
6
+ - epoll backend now mandatory for linux aio backend.
7
+ - fail assertions more aggressively on invalid fd's detected
8
+ in the event loop, do not just silently fd_kill in case of
9
+ user error.
10
+ - ev_io_start/ev_io_stop now verify the watcher fd using
11
+ a syscall when EV_VERIFY is 2 or higher.
12
+
13
+ 4.26 (EV only)
14
+ - update to libecb 0x00010006.
15
+ - new experimental linux aio backend (linux 4.18+).
16
+ - removed redundant 0-ptr check in ev_once.
17
+ - updated/extended ev_set_allocator documentation.
18
+ - replaced EMPTY2 macro by array_needsize_noinit.
19
+ - minor code cleanups.
20
+ - epoll backend now uses epoll_create1 also after fork.
21
+
22
+ 4.25 Fri Dec 21 07:49:20 CET 2018
23
+ - INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
24
+ (EV_THROW still provided) and now uses noexcept on C++11 or newer.
25
+ - move the darwin select workaround highe rin ev.c, as newer versions of
26
+ darwin managed to break their broken select even more.
27
+ - ANDROID => __ANDROID__ (reported by enh@google.com).
28
+ - disable epoll_create1 on android because it has broken header files
29
+ and google is unwilling to fix them (reported by enh@google.com).
30
+ - avoid a minor compilation warning on win32.
31
+ - c++: remove deprecated dynamic throw() specifications.
32
+ - c++: improve the (unsupported) bad_loop exception class.
33
+ - backport perl ev_periodic example to C, untested.
34
+ - update libecb, biggets change is to include a memory fence
35
+ in ECB_MEMORY_FENCE_RELEASE on x86/amd64.
36
+ - minor autoconf/automake modernisation.
37
+
38
+ 4.24 Wed Dec 28 05:19:55 CET 2016
39
+ - bump version to 4.24, as the release tarball inexplicably
40
+ didn't have the right version in ev.h, even though the cvs-tagged
41
+ version did have the right one (reported by Ales Teska).
42
+
43
+ 4.23 Wed Nov 16 18:23:41 CET 2016
44
+ - move some declarations at the beginning to help certain retarded
45
+ microsoft compilers, even though their documentation claims
46
+ otherwise (reported by Ruslan Osmanov).
16
47
 
17
48
  4.22 Sun Dec 20 22:11:50 CET 2015
18
49
  - when epoll detects unremovable fds in the fd set, rebuild
@@ -18,7 +18,8 @@ ABOUT
18
18
  - extensive and detailed, readable documentation (not doxygen garbage).
19
19
  - fully supports fork, can detect fork in various ways and automatically
20
20
  re-arms kernel mechanisms that do not support fork.
21
- - highly optimised select, poll, epoll, kqueue and event ports backends.
21
+ - highly optimised select, poll, linux epoll, linux aio, bsd kqueue
22
+ and solaris event ports backends.
22
23
  - filesystem object (path) watching (with optional linux inotify support).
23
24
  - wallclock-based times (using absolute time, cron-like).
24
25
  - relative timers/timeouts (handle time jumps).
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * libev event processing core, watcher management
3
3
  *
4
- * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -39,6 +39,11 @@
39
39
 
40
40
  /* ########## NIO4R PATCHERY HO! ########## */
41
41
  #include "ruby.h"
42
+ #include "ruby/thread.h"
43
+
44
+ #ifdef __APPLE__
45
+ #include <AvailabilityMacros.h>
46
+ #endif
42
47
  /* ######################################## */
43
48
 
44
49
  /* this big block deduces configuration from config.h */
@@ -121,6 +126,15 @@
121
126
  # define EV_USE_EPOLL 0
122
127
  # endif
123
128
 
129
+ # if HAVE_LINUX_AIO_ABI_H
130
+ # ifndef EV_USE_LINUXAIO
131
+ # define EV_USE_LINUXAIO EV_FEATURE_BACKENDS
132
+ # endif
133
+ # else
134
+ # undef EV_USE_LINUXAIO
135
+ # define EV_USE_LINUXAIO 0
136
+ # endif
137
+
124
138
  # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
125
139
  # ifndef EV_USE_KQUEUE
126
140
  # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
@@ -168,6 +182,16 @@
168
182
 
169
183
  #endif
170
184
 
185
+ /* OS X, in its infinite idiocy, actually HARDCODES
186
+ * a limit of 1024 into their select. Where people have brains,
187
+ * OS X engineers apparently have a vacuum. Or maybe they were
188
+ * ordered to have a vacuum, or they do anything for money.
189
+ * This might help. Or not.
190
+ * Note that this must be defined early, as other include files
191
+ * will rely on this define as well.
192
+ */
193
+ #define _DARWIN_UNLIMITED_SELECT 1
194
+
171
195
  #include <stdlib.h>
172
196
  #include <string.h>
173
197
  #include <fcntl.h>
@@ -215,14 +239,6 @@
215
239
  # undef EV_AVOID_STDIO
216
240
  #endif
217
241
 
218
- /* OS X, in its infinite idiocy, actually HARDCODES
219
- * a limit of 1024 into their select. Where people have brains,
220
- * OS X engineers apparently have a vacuum. Or maybe they were
221
- * ordered to have a vacuum, or they do anything for money.
222
- * This might help. Or not.
223
- */
224
- #define _DARWIN_UNLIMITED_SELECT 1
225
-
226
242
  /* this block tries to deduce configuration from header-defined symbols and defaults */
227
243
 
228
244
  /* try to deduce the maximum number of signals on this platform */
@@ -369,7 +385,7 @@
369
385
  # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
370
386
  #endif
371
387
 
372
- #ifdef ANDROID
388
+ #ifdef __ANDROID__
373
389
  /* supposedly, android doesn't typedef fd_mask */
374
390
  # undef EV_USE_SELECT
375
391
  # define EV_USE_SELECT 0
@@ -423,6 +439,14 @@
423
439
  # endif
424
440
  #endif
425
441
 
442
+ #if EV_USE_LINUXAIO
443
+ # include <sys/syscall.h>
444
+ # if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */
445
+ # undef EV_USE_LINUXAIO
446
+ # define EV_USE_LINUXAIO 0
447
+ # endif
448
+ #endif
449
+
426
450
  #if EV_USE_INOTIFY
427
451
  # include <sys/statfs.h>
428
452
  # include <sys/inotify.h>
@@ -538,7 +562,7 @@ struct signalfd_siginfo
538
562
  #define ECB_H
539
563
 
540
564
  /* 16 bits major, 16 bits minor */
541
- #define ECB_VERSION 0x00010005
565
+ #define ECB_VERSION 0x00010006
542
566
 
543
567
  #ifdef _WIN32
544
568
  typedef signed char int8_t;
@@ -613,6 +637,8 @@ struct signalfd_siginfo
613
637
 
614
638
  #define ECB_CPP (__cplusplus+0)
615
639
  #define ECB_CPP11 (__cplusplus >= 201103L)
640
+ #define ECB_CPP14 (__cplusplus >= 201402L)
641
+ #define ECB_CPP17 (__cplusplus >= 201703L)
616
642
 
617
643
  #if ECB_CPP
618
644
  #define ECB_C 0
@@ -624,6 +650,7 @@ struct signalfd_siginfo
624
650
 
625
651
  #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
626
652
  #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
653
+ #define ECB_C17 (ECB_STDC_VERSION >= 201710L)
627
654
 
628
655
  #if ECB_CPP
629
656
  #define ECB_EXTERN_C extern "C"
@@ -659,14 +686,15 @@ struct signalfd_siginfo
659
686
 
660
687
  #ifndef ECB_MEMORY_FENCE
661
688
  #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
689
+ #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
662
690
  #if __i386 || __i386__
663
691
  #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
664
692
  #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
665
- #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
693
+ #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
666
694
  #elif ECB_GCC_AMD64
667
695
  #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
668
696
  #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
669
- #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
697
+ #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
670
698
  #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
671
699
  #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
672
700
  #elif defined __ARM_ARCH_2__ \
@@ -718,12 +746,14 @@ struct signalfd_siginfo
718
746
  #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
719
747
  #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
720
748
  #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
749
+ #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
721
750
 
722
751
  #elif ECB_CLANG_EXTENSION(c_atomic)
723
752
  /* see comment below (stdatomic.h) about the C11 memory model. */
724
753
  #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
725
754
  #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
726
755
  #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
756
+ #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
727
757
 
728
758
  #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
729
759
  #define ECB_MEMORY_FENCE __sync_synchronize ()
@@ -743,9 +773,10 @@ struct signalfd_siginfo
743
773
  #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
744
774
  #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
745
775
  #include <mbarrier.h>
746
- #define ECB_MEMORY_FENCE __machine_rw_barrier ()
747
- #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
748
- #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
776
+ #define ECB_MEMORY_FENCE __machine_rw_barrier ()
777
+ #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
778
+ #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
779
+ #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
749
780
  #elif __xlC__
750
781
  #define ECB_MEMORY_FENCE __sync ()
751
782
  #endif
@@ -756,15 +787,9 @@ struct signalfd_siginfo
756
787
  /* we assume that these memory fences work on all variables/all memory accesses, */
757
788
  /* not just C11 atomics and atomic accesses */
758
789
  #include <stdatomic.h>
759
- /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
760
- /* any fence other than seq_cst, which isn't very efficient for us. */
761
- /* Why that is, we don't know - either the C11 memory model is quite useless */
762
- /* for most usages, or gcc and clang have a bug */
763
- /* I *currently* lean towards the latter, and inefficiently implement */
764
- /* all three of ecb's fences as a seq_cst fence */
765
- /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
766
- /* for all __atomic_thread_fence's except seq_cst */
767
790
  #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
791
+ #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
792
+ #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
768
793
  #endif
769
794
  #endif
770
795
 
@@ -794,6 +819,10 @@ struct signalfd_siginfo
794
819
  #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
795
820
  #endif
796
821
 
822
+ #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
823
+ #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
824
+ #endif
825
+
797
826
  /*****************************************************************************/
798
827
 
799
828
  #if ECB_CPP
@@ -1533,7 +1562,7 @@ ecb_binary32_to_binary16 (uint32_t x)
1533
1562
  #if EV_FEATURE_CODE
1534
1563
  # define inline_speed ecb_inline
1535
1564
  #else
1536
- # define inline_speed static noinline
1565
+ # define inline_speed noinline static
1537
1566
  #endif
1538
1567
 
1539
1568
  #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
@@ -1544,8 +1573,7 @@ ecb_binary32_to_binary16 (uint32_t x)
1544
1573
  # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
1545
1574
  #endif
1546
1575
 
1547
- #define EMPTY /* required for microsofts broken pseudo-c compiler */
1548
- #define EMPTY2(a,b) /* used to suppress some warnings */
1576
+ #define EMPTY /* required for microsofts broken pseudo-c compiler */
1549
1577
 
1550
1578
  typedef ev_watcher *W;
1551
1579
  typedef ev_watcher_list *WL;
@@ -1580,6 +1608,10 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
1580
1608
 
1581
1609
  /*****************************************************************************/
1582
1610
 
1611
+ #if EV_USE_LINUXAIO
1612
+ # include <linux/aio_abi.h> /* probably only needed for aio_context_t */
1613
+ #endif
1614
+
1583
1615
  /* define a suitable floor function (only used by periodics atm) */
1584
1616
 
1585
1617
  #if EV_USE_FLOOR
@@ -1590,7 +1622,8 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
1590
1622
  #include <float.h>
1591
1623
 
1592
1624
  /* a floor() replacement function, should be independent of ev_tstamp type */
1593
- static ev_tstamp noinline
1625
+ noinline
1626
+ static ev_tstamp
1594
1627
  ev_floor (ev_tstamp v)
1595
1628
  {
1596
1629
  /* the choice of shift factor is not terribly important */
@@ -1632,7 +1665,8 @@ ev_floor (ev_tstamp v)
1632
1665
  # include <sys/utsname.h>
1633
1666
  #endif
1634
1667
 
1635
- static unsigned int noinline ecb_cold
1668
+ noinline ecb_cold
1669
+ static unsigned int
1636
1670
  ev_linux_version (void)
1637
1671
  {
1638
1672
  #ifdef __linux
@@ -1671,22 +1705,25 @@ ev_linux_version (void)
1671
1705
  /*****************************************************************************/
1672
1706
 
1673
1707
  #if EV_AVOID_STDIO
1674
- static void noinline ecb_cold
1708
+ noinline ecb_cold
1709
+ static void
1675
1710
  ev_printerr (const char *msg)
1676
1711
  {
1677
1712
  write (STDERR_FILENO, msg, strlen (msg));
1678
1713
  }
1679
1714
  #endif
1680
1715
 
1681
- static void (*syserr_cb)(const char *msg) EV_THROW;
1716
+ static void (*syserr_cb)(const char *msg) EV_NOEXCEPT;
1682
1717
 
1683
- void ecb_cold
1684
- ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
1718
+ ecb_cold
1719
+ void
1720
+ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1685
1721
  {
1686
1722
  syserr_cb = cb;
1687
1723
  }
1688
1724
 
1689
- static void noinline ecb_cold
1725
+ noinline ecb_cold
1726
+ static void
1690
1727
  ev_syserr (const char *msg)
1691
1728
  {
1692
1729
  if (!msg)
@@ -1709,7 +1746,7 @@ ev_syserr (const char *msg)
1709
1746
  }
1710
1747
 
1711
1748
  static void *
1712
- ev_realloc_emul (void *ptr, long size) EV_THROW
1749
+ ev_realloc_emul (void *ptr, size_t size) EV_NOEXCEPT
1713
1750
  {
1714
1751
  /* some systems, notably openbsd and darwin, fail to properly
1715
1752
  * implement realloc (x, 0) (as required by both ansi c-89 and
@@ -1725,16 +1762,17 @@ ev_realloc_emul (void *ptr, long size) EV_THROW
1725
1762
  return 0;
1726
1763
  }
1727
1764
 
1728
- static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
1765
+ static void *(*alloc)(void *ptr, size_t size) EV_NOEXCEPT = ev_realloc_emul;
1729
1766
 
1730
- void ecb_cold
1731
- ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
1767
+ ecb_cold
1768
+ void
1769
+ ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT
1732
1770
  {
1733
1771
  alloc = cb;
1734
1772
  }
1735
1773
 
1736
1774
  inline_speed void *
1737
- ev_realloc (void *ptr, long size)
1775
+ ev_realloc (void *ptr, size_t size)
1738
1776
  {
1739
1777
  ptr = alloc (ptr, size);
1740
1778
 
@@ -1765,7 +1803,7 @@ typedef struct
1765
1803
  WL head;
1766
1804
  unsigned char events; /* the events watched for */
1767
1805
  unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1768
- unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
1806
+ unsigned char emask; /* some backends store the actual kernel mask in here */
1769
1807
  unsigned char unused;
1770
1808
  #if EV_USE_EPOLL
1771
1809
  unsigned int egen; /* generation counter to counter epoll bugs */
@@ -1855,7 +1893,7 @@ typedef struct
1855
1893
 
1856
1894
  #ifndef EV_HAVE_EV_TIME
1857
1895
  ev_tstamp
1858
- ev_time (void) EV_THROW
1896
+ ev_time (void) EV_NOEXCEPT
1859
1897
  {
1860
1898
  #if EV_USE_REALTIME
1861
1899
  if (expect_true (have_realtime))
@@ -1889,14 +1927,14 @@ get_clock (void)
1889
1927
 
1890
1928
  #if EV_MULTIPLICITY
1891
1929
  ev_tstamp
1892
- ev_now (EV_P) EV_THROW
1930
+ ev_now (EV_P) EV_NOEXCEPT
1893
1931
  {
1894
1932
  return ev_rt_now;
1895
1933
  }
1896
1934
  #endif
1897
1935
 
1898
1936
  void
1899
- ev_sleep (ev_tstamp delay) EV_THROW
1937
+ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1900
1938
  {
1901
1939
  if (delay > 0.)
1902
1940
  {
@@ -1906,6 +1944,8 @@ ev_sleep (ev_tstamp delay) EV_THROW
1906
1944
  EV_TS_SET (ts, delay);
1907
1945
  nanosleep (&ts, 0);
1908
1946
  #elif defined _WIN32
1947
+ /* maybe this should round up, as ms is very low resolution */
1948
+ /* compared to select (µs) or nanosleep (ns) */
1909
1949
  Sleep ((unsigned long)(delay * 1e3));
1910
1950
  #else
1911
1951
  struct timeval tv;
@@ -1946,23 +1986,26 @@ array_nextsize (int elem, int cur, int cnt)
1946
1986
  return ncur;
1947
1987
  }
1948
1988
 
1949
- static void * noinline ecb_cold
1989
+ noinline ecb_cold
1990
+ static void *
1950
1991
  array_realloc (int elem, void *base, int *cur, int cnt)
1951
1992
  {
1952
1993
  *cur = array_nextsize (elem, *cur, cnt);
1953
1994
  return ev_realloc (base, elem * *cur);
1954
1995
  }
1955
1996
 
1956
- #define array_init_zero(base,count) \
1957
- memset ((void *)(base), 0, sizeof (*(base)) * (count))
1997
+ #define array_needsize_noinit(base,offset,count)
1998
+
1999
+ #define array_needsize_zerofill(base,offset,count) \
2000
+ memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
1958
2001
 
1959
2002
  #define array_needsize(type,base,cur,cnt,init) \
1960
2003
  if (expect_false ((cnt) > (cur))) \
1961
2004
  { \
1962
- int ecb_unused ocur_ = (cur); \
2005
+ ecb_unused int ocur_ = (cur); \
1963
2006
  (base) = (type *)array_realloc \
1964
2007
  (sizeof (type), (base), &(cur), (cnt)); \
1965
- init ((base) + (ocur_), (cur) - ocur_); \
2008
+ init ((base), ocur_, ((cur) - ocur_)); \
1966
2009
  }
1967
2010
 
1968
2011
  #if 0
@@ -1981,13 +2024,15 @@ array_realloc (int elem, void *base, int *cur, int cnt)
1981
2024
  /*****************************************************************************/
1982
2025
 
1983
2026
  /* dummy callback for pending events */
1984
- static void noinline
2027
+ noinline
2028
+ static void
1985
2029
  pendingcb (EV_P_ ev_prepare *w, int revents)
1986
2030
  {
1987
2031
  }
1988
2032
 
1989
- void noinline
1990
- ev_feed_event (EV_P_ void *w, int revents) EV_THROW
2033
+ noinline
2034
+ void
2035
+ ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
1991
2036
  {
1992
2037
  W w_ = (W)w;
1993
2038
  int pri = ABSPRI (w_);
@@ -1997,7 +2042,7 @@ ev_feed_event (EV_P_ void *w, int revents) EV_THROW
1997
2042
  else
1998
2043
  {
1999
2044
  w_->pending = ++pendingcnt [pri];
2000
- array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
2045
+ array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
2001
2046
  pendings [pri][w_->pending - 1].w = w_;
2002
2047
  pendings [pri][w_->pending - 1].events = revents;
2003
2048
  }
@@ -2008,7 +2053,7 @@ ev_feed_event (EV_P_ void *w, int revents) EV_THROW
2008
2053
  inline_speed void
2009
2054
  feed_reverse (EV_P_ W w)
2010
2055
  {
2011
- array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
2056
+ array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit);
2012
2057
  rfeeds [rfeedcnt++] = w;
2013
2058
  }
2014
2059
 
@@ -2058,7 +2103,7 @@ fd_event (EV_P_ int fd, int revents)
2058
2103
  }
2059
2104
 
2060
2105
  void
2061
- ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
2106
+ ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT
2062
2107
  {
2063
2108
  if (fd >= 0 && fd < anfdmax)
2064
2109
  fd_event_nocheck (EV_A_ fd, revents);
@@ -2105,7 +2150,7 @@ fd_reify (EV_P)
2105
2150
  unsigned char o_events = anfd->events;
2106
2151
  unsigned char o_reify = anfd->reify;
2107
2152
 
2108
- anfd->reify = 0;
2153
+ anfd->reify = 0;
2109
2154
 
2110
2155
  /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2111
2156
  {
@@ -2126,7 +2171,8 @@ fd_reify (EV_P)
2126
2171
  }
2127
2172
 
2128
2173
  /* something about the given fd changed */
2129
- inline_size void
2174
+ inline_size
2175
+ void
2130
2176
  fd_change (EV_P_ int fd, int flags)
2131
2177
  {
2132
2178
  unsigned char reify = anfds [fd].reify;
@@ -2135,13 +2181,13 @@ fd_change (EV_P_ int fd, int flags)
2135
2181
  if (expect_true (!reify))
2136
2182
  {
2137
2183
  ++fdchangecnt;
2138
- array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
2184
+ array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
2139
2185
  fdchanges [fdchangecnt - 1] = fd;
2140
2186
  }
2141
2187
  }
2142
2188
 
2143
2189
  /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
2144
- inline_speed void ecb_cold
2190
+ inline_speed ecb_cold void
2145
2191
  fd_kill (EV_P_ int fd)
2146
2192
  {
2147
2193
  ev_io *w;
@@ -2154,7 +2200,7 @@ fd_kill (EV_P_ int fd)
2154
2200
  }
2155
2201
 
2156
2202
  /* check whether the given fd is actually valid, for error recovery */
2157
- inline_size int ecb_cold
2203
+ inline_size ecb_cold int
2158
2204
  fd_valid (int fd)
2159
2205
  {
2160
2206
  #ifdef _WIN32
@@ -2165,7 +2211,8 @@ fd_valid (int fd)
2165
2211
  }
2166
2212
 
2167
2213
  /* called on EBADF to verify fds */
2168
- static void noinline ecb_cold
2214
+ noinline ecb_cold
2215
+ static void
2169
2216
  fd_ebadf (EV_P)
2170
2217
  {
2171
2218
  int fd;
@@ -2177,7 +2224,8 @@ fd_ebadf (EV_P)
2177
2224
  }
2178
2225
 
2179
2226
  /* called on ENOMEM in select/poll to kill some fds and retry */
2180
- static void noinline ecb_cold
2227
+ noinline ecb_cold
2228
+ static void
2181
2229
  fd_enomem (EV_P)
2182
2230
  {
2183
2231
  int fd;
@@ -2191,7 +2239,8 @@ fd_enomem (EV_P)
2191
2239
  }
2192
2240
 
2193
2241
  /* usually called after fork if backend needs to re-arm all fds from scratch */
2194
- static void noinline
2242
+ noinline
2243
+ static void
2195
2244
  fd_rearm_all (EV_P)
2196
2245
  {
2197
2246
  int fd;
@@ -2382,7 +2431,8 @@ static ANSIG signals [EV_NSIG - 1];
2382
2431
 
2383
2432
  #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2384
2433
 
2385
- static void noinline ecb_cold
2434
+ noinline ecb_cold
2435
+ static void
2386
2436
  evpipe_init (EV_P)
2387
2437
  {
2388
2438
  if (!ev_is_active (&pipe_w))
@@ -2463,7 +2513,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2463
2513
  #ifdef _WIN32
2464
2514
  WSABUF buf;
2465
2515
  DWORD sent;
2466
- buf.buf = &buf;
2516
+ buf.buf = (char *)&buf;
2467
2517
  buf.len = 1;
2468
2518
  WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);
2469
2519
  #else
@@ -2545,7 +2595,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
2545
2595
  /*****************************************************************************/
2546
2596
 
2547
2597
  void
2548
- ev_feed_signal (int signum) EV_THROW
2598
+ ev_feed_signal (int signum) EV_NOEXCEPT
2549
2599
  {
2550
2600
  #if EV_MULTIPLICITY
2551
2601
  EV_P;
@@ -2570,8 +2620,9 @@ ev_sighandler (int signum)
2570
2620
  ev_feed_signal (signum);
2571
2621
  }
2572
2622
 
2573
- void noinline
2574
- ev_feed_signal_event (EV_P_ int signum) EV_THROW
2623
+ noinline
2624
+ void
2625
+ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2575
2626
  {
2576
2627
  WL w;
2577
2628
 
@@ -2690,6 +2741,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
2690
2741
  #if EV_USE_EPOLL
2691
2742
  # include "ev_epoll.c"
2692
2743
  #endif
2744
+ #if EV_USE_LINUXAIO
2745
+ # include "ev_linuxaio.c"
2746
+ #endif
2693
2747
  #if EV_USE_POLL
2694
2748
  # include "ev_poll.c"
2695
2749
  #endif
@@ -2697,20 +2751,20 @@ childcb (EV_P_ ev_signal *sw, int revents)
2697
2751
  # include "ev_select.c"
2698
2752
  #endif
2699
2753
 
2700
- int ecb_cold
2701
- ev_version_major (void) EV_THROW
2754
+ ecb_cold int
2755
+ ev_version_major (void) EV_NOEXCEPT
2702
2756
  {
2703
2757
  return EV_VERSION_MAJOR;
2704
2758
  }
2705
2759
 
2706
- int ecb_cold
2707
- ev_version_minor (void) EV_THROW
2760
+ ecb_cold int
2761
+ ev_version_minor (void) EV_NOEXCEPT
2708
2762
  {
2709
2763
  return EV_VERSION_MINOR;
2710
2764
  }
2711
2765
 
2712
2766
  /* return true if we are running with elevated privileges and should ignore env variables */
2713
- int inline_size ecb_cold
2767
+ inline_size ecb_cold int
2714
2768
  enable_secure (void)
2715
2769
  {
2716
2770
  #ifdef _WIN32
@@ -2721,44 +2775,58 @@ enable_secure (void)
2721
2775
  #endif
2722
2776
  }
2723
2777
 
2724
- unsigned int ecb_cold
2725
- ev_supported_backends (void) EV_THROW
2778
+ ecb_cold
2779
+ unsigned int
2780
+ ev_supported_backends (void) EV_NOEXCEPT
2726
2781
  {
2727
2782
  unsigned int flags = 0;
2728
2783
 
2729
- if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2730
- if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
2731
- if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2732
- if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2733
- if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
2784
+ if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2785
+ if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2786
+ if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2787
+
2788
+ #ifdef EV_USE_LINUXAIO
2789
+ if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2790
+ #endif
2791
+
2792
+ if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2793
+ if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2734
2794
 
2735
2795
  return flags;
2736
2796
  }
2737
2797
 
2738
- unsigned int ecb_cold
2739
- ev_recommended_backends (void) EV_THROW
2798
+ ecb_cold
2799
+ unsigned int
2800
+ ev_recommended_backends (void) EV_NOEXCEPT
2740
2801
  {
2741
2802
  unsigned int flags = ev_supported_backends ();
2742
2803
 
2743
- #ifndef __NetBSD__
2804
+ #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_14)
2805
+ /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
2806
+ #elif defined(__NetBSD__)
2744
2807
  /* kqueue is borked on everything but netbsd apparently */
2745
2808
  /* it usually doesn't work correctly on anything but sockets and pipes */
2746
- flags &= ~EVBACKEND_KQUEUE;
2747
- #endif
2748
- #ifdef __APPLE__
2809
+ #else
2749
2810
  /* only select works correctly on that "unix-certified" platform */
2750
2811
  flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
2751
2812
  flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
2752
2813
  #endif
2814
+
2753
2815
  #ifdef __FreeBSD__
2754
2816
  flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
2755
2817
  #endif
2756
2818
 
2819
+ /* TODO: linuxaio is very experimental */
2820
+ #if !EV_RECOMMEND_LINUXAIO
2821
+ flags &= ~EVBACKEND_LINUXAIO;
2822
+ #endif
2823
+
2757
2824
  return flags;
2758
2825
  }
2759
2826
 
2760
- unsigned int ecb_cold
2761
- ev_embeddable_backends (void) EV_THROW
2827
+ ecb_cold
2828
+ unsigned int
2829
+ ev_embeddable_backends (void) EV_NOEXCEPT
2762
2830
  {
2763
2831
  int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
2764
2832
 
@@ -2770,56 +2838,56 @@ ev_embeddable_backends (void) EV_THROW
2770
2838
  }
2771
2839
 
2772
2840
  unsigned int
2773
- ev_backend (EV_P) EV_THROW
2841
+ ev_backend (EV_P) EV_NOEXCEPT
2774
2842
  {
2775
2843
  return backend;
2776
2844
  }
2777
2845
 
2778
2846
  #if EV_FEATURE_API
2779
2847
  unsigned int
2780
- ev_iteration (EV_P) EV_THROW
2848
+ ev_iteration (EV_P) EV_NOEXCEPT
2781
2849
  {
2782
2850
  return loop_count;
2783
2851
  }
2784
2852
 
2785
2853
  unsigned int
2786
- ev_depth (EV_P) EV_THROW
2854
+ ev_depth (EV_P) EV_NOEXCEPT
2787
2855
  {
2788
2856
  return loop_depth;
2789
2857
  }
2790
2858
 
2791
2859
  void
2792
- ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
2860
+ ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT
2793
2861
  {
2794
2862
  io_blocktime = interval;
2795
2863
  }
2796
2864
 
2797
2865
  void
2798
- ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
2866
+ ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT
2799
2867
  {
2800
2868
  timeout_blocktime = interval;
2801
2869
  }
2802
2870
 
2803
2871
  void
2804
- ev_set_userdata (EV_P_ void *data) EV_THROW
2872
+ ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT
2805
2873
  {
2806
2874
  userdata = data;
2807
2875
  }
2808
2876
 
2809
2877
  void *
2810
- ev_userdata (EV_P) EV_THROW
2878
+ ev_userdata (EV_P) EV_NOEXCEPT
2811
2879
  {
2812
2880
  return userdata;
2813
2881
  }
2814
2882
 
2815
2883
  void
2816
- ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW
2884
+ ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT
2817
2885
  {
2818
2886
  invoke_cb = invoke_pending_cb;
2819
2887
  }
2820
2888
 
2821
2889
  void
2822
- ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
2890
+ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT
2823
2891
  {
2824
2892
  release_cb = release;
2825
2893
  acquire_cb = acquire;
@@ -2827,8 +2895,9 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV
2827
2895
  #endif
2828
2896
 
2829
2897
  /* initialise a loop structure, must be zero-initialised */
2830
- static void noinline ecb_cold
2831
- loop_init (EV_P_ unsigned int flags) EV_THROW
2898
+ noinline ecb_cold
2899
+ static void
2900
+ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2832
2901
  {
2833
2902
  if (!backend)
2834
2903
  {
@@ -2896,22 +2965,25 @@ loop_init (EV_P_ unsigned int flags) EV_THROW
2896
2965
  flags |= ev_recommended_backends ();
2897
2966
 
2898
2967
  #if EV_USE_IOCP
2899
- if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
2968
+ if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
2900
2969
  #endif
2901
2970
  #if EV_USE_PORT
2902
- if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2971
+ if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2903
2972
  #endif
2904
2973
  #if EV_USE_KQUEUE
2905
- if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
2974
+ if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
2975
+ #endif
2976
+ #if EV_USE_LINUXAIO
2977
+ if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2906
2978
  #endif
2907
2979
  #if EV_USE_EPOLL
2908
- if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
2980
+ if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
2909
2981
  #endif
2910
2982
  #if EV_USE_POLL
2911
- if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
2983
+ if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
2912
2984
  #endif
2913
2985
  #if EV_USE_SELECT
2914
- if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
2986
+ if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags);
2915
2987
  #endif
2916
2988
 
2917
2989
  ev_prepare_init (&pending_w, pendingcb);
@@ -2924,7 +2996,8 @@ loop_init (EV_P_ unsigned int flags) EV_THROW
2924
2996
  }
2925
2997
 
2926
2998
  /* free up a loop structure */
2927
- void ecb_cold
2999
+ ecb_cold
3000
+ void
2928
3001
  ev_loop_destroy (EV_P)
2929
3002
  {
2930
3003
  int i;
@@ -2975,22 +3048,25 @@ ev_loop_destroy (EV_P)
2975
3048
  close (backend_fd);
2976
3049
 
2977
3050
  #if EV_USE_IOCP
2978
- if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
3051
+ if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
2979
3052
  #endif
2980
3053
  #if EV_USE_PORT
2981
- if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
3054
+ if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
2982
3055
  #endif
2983
3056
  #if EV_USE_KQUEUE
2984
- if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
3057
+ if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3058
+ #endif
3059
+ #if EV_USE_LINUXAIO
3060
+ if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
2985
3061
  #endif
2986
3062
  #if EV_USE_EPOLL
2987
- if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
3063
+ if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
2988
3064
  #endif
2989
3065
  #if EV_USE_POLL
2990
- if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
3066
+ if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
2991
3067
  #endif
2992
3068
  #if EV_USE_SELECT
2993
- if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
3069
+ if (backend == EVBACKEND_SELECT ) select_destroy (EV_A);
2994
3070
  #endif
2995
3071
 
2996
3072
  for (i = NUMPRI; i--; )
@@ -3042,13 +3118,16 @@ inline_size void
3042
3118
  loop_fork (EV_P)
3043
3119
  {
3044
3120
  #if EV_USE_PORT
3045
- if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3121
+ if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3046
3122
  #endif
3047
3123
  #if EV_USE_KQUEUE
3048
- if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
3124
+ if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3125
+ #endif
3126
+ #if EV_USE_LINUXAIO
3127
+ if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3049
3128
  #endif
3050
3129
  #if EV_USE_EPOLL
3051
- if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3130
+ if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3052
3131
  #endif
3053
3132
  #if EV_USE_INOTIFY
3054
3133
  infy_fork (EV_A);
@@ -3076,8 +3155,9 @@ loop_fork (EV_P)
3076
3155
 
3077
3156
  #if EV_MULTIPLICITY
3078
3157
 
3079
- struct ev_loop * ecb_cold
3080
- ev_loop_new (unsigned int flags) EV_THROW
3158
+ ecb_cold
3159
+ struct ev_loop *
3160
+ ev_loop_new (unsigned int flags) EV_NOEXCEPT
3081
3161
  {
3082
3162
  EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
3083
3163
 
@@ -3094,7 +3174,8 @@ ev_loop_new (unsigned int flags) EV_THROW
3094
3174
  #endif /* multiplicity */
3095
3175
 
3096
3176
  #if EV_VERIFY
3097
- static void noinline ecb_cold
3177
+ noinline ecb_cold
3178
+ static void
3098
3179
  verify_watcher (EV_P_ W w)
3099
3180
  {
3100
3181
  assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
@@ -3103,7 +3184,8 @@ verify_watcher (EV_P_ W w)
3103
3184
  assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3104
3185
  }
3105
3186
 
3106
- static void noinline ecb_cold
3187
+ noinline ecb_cold
3188
+ static void
3107
3189
  verify_heap (EV_P_ ANHE *heap, int N)
3108
3190
  {
3109
3191
  int i;
@@ -3118,7 +3200,8 @@ verify_heap (EV_P_ ANHE *heap, int N)
3118
3200
  }
3119
3201
  }
3120
3202
 
3121
- static void noinline ecb_cold
3203
+ noinline ecb_cold
3204
+ static void
3122
3205
  array_verify (EV_P_ W *ws, int cnt)
3123
3206
  {
3124
3207
  while (cnt--)
@@ -3131,7 +3214,7 @@ array_verify (EV_P_ W *ws, int cnt)
3131
3214
 
3132
3215
  #if EV_FEATURE_API
3133
3216
  void ecb_cold
3134
- ev_verify (EV_P) EV_THROW
3217
+ ev_verify (EV_P) EV_NOEXCEPT
3135
3218
  {
3136
3219
  #if EV_VERIFY
3137
3220
  int i;
@@ -3217,11 +3300,12 @@ ev_verify (EV_P) EV_THROW
3217
3300
  #endif
3218
3301
 
3219
3302
  #if EV_MULTIPLICITY
3220
- struct ev_loop * ecb_cold
3303
+ ecb_cold
3304
+ struct ev_loop *
3221
3305
  #else
3222
3306
  int
3223
3307
  #endif
3224
- ev_default_loop (unsigned int flags) EV_THROW
3308
+ ev_default_loop (unsigned int flags) EV_NOEXCEPT
3225
3309
  {
3226
3310
  if (!ev_default_loop_ptr)
3227
3311
  {
@@ -3250,7 +3334,7 @@ ev_default_loop (unsigned int flags) EV_THROW
3250
3334
  }
3251
3335
 
3252
3336
  void
3253
- ev_loop_fork (EV_P) EV_THROW
3337
+ ev_loop_fork (EV_P) EV_NOEXCEPT
3254
3338
  {
3255
3339
  postfork = 1;
3256
3340
  }
@@ -3264,7 +3348,7 @@ ev_invoke (EV_P_ void *w, int revents)
3264
3348
  }
3265
3349
 
3266
3350
  unsigned int
3267
- ev_pending_count (EV_P) EV_THROW
3351
+ ev_pending_count (EV_P) EV_NOEXCEPT
3268
3352
  {
3269
3353
  int pri;
3270
3354
  unsigned int count = 0;
@@ -3275,15 +3359,17 @@ ev_pending_count (EV_P) EV_THROW
3275
3359
  return count;
3276
3360
  }
3277
3361
 
3278
- void noinline
3362
+ noinline
3363
+ void
3279
3364
  ev_invoke_pending (EV_P)
3280
3365
  {
3281
3366
  pendingpri = NUMPRI;
3282
3367
 
3283
- while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
3368
+ do
3284
3369
  {
3285
3370
  --pendingpri;
3286
3371
 
3372
+ /* pendingpri possibly gets modified in the inner loop */
3287
3373
  while (pendingcnt [pendingpri])
3288
3374
  {
3289
3375
  ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
@@ -3293,6 +3379,7 @@ ev_invoke_pending (EV_P)
3293
3379
  EV_FREQUENT_CHECK;
3294
3380
  }
3295
3381
  }
3382
+ while (pendingpri);
3296
3383
  }
3297
3384
 
3298
3385
  #if EV_IDLE_ENABLE
@@ -3360,7 +3447,8 @@ timers_reify (EV_P)
3360
3447
 
3361
3448
  #if EV_PERIODIC_ENABLE
3362
3449
 
3363
- static void noinline
3450
+ noinline
3451
+ static void
3364
3452
  periodic_recalc (EV_P_ ev_periodic *w)
3365
3453
  {
3366
3454
  ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
@@ -3428,7 +3516,8 @@ periodics_reify (EV_P)
3428
3516
 
3429
3517
  /* simply recalculate all periodics */
3430
3518
  /* TODO: maybe ensure that at least one event happens when jumping forward? */
3431
- static void noinline ecb_cold
3519
+ noinline ecb_cold
3520
+ static void
3432
3521
  periodics_reschedule (EV_P)
3433
3522
  {
3434
3523
  int i;
@@ -3451,7 +3540,8 @@ periodics_reschedule (EV_P)
3451
3540
  #endif
3452
3541
 
3453
3542
  /* adjust all timers by a given offset */
3454
- static void noinline ecb_cold
3543
+ noinline ecb_cold
3544
+ static void
3455
3545
  timers_reschedule (EV_P_ ev_tstamp adjust)
3456
3546
  {
3457
3547
  int i;
@@ -3536,29 +3626,27 @@ time_update (EV_P_ ev_tstamp max_block)
3536
3626
  }
3537
3627
 
3538
3628
  /* ########## NIO4R PATCHERY HO! ########## */
3539
- #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
3540
3629
  struct ev_poll_args {
3541
3630
  struct ev_loop *loop;
3542
3631
  ev_tstamp waittime;
3543
3632
  };
3544
3633
 
3545
3634
  static
3546
- VALUE ev_backend_poll(void *ptr)
3635
+ void * ev_backend_poll(void *ptr)
3547
3636
  {
3548
3637
  struct ev_poll_args *args = (struct ev_poll_args *)ptr;
3549
3638
  struct ev_loop *loop = args->loop;
3550
3639
  backend_poll (EV_A_ args->waittime);
3640
+
3641
+ return NULL;
3551
3642
  }
3552
- #endif
3553
3643
  /* ######################################## */
3554
3644
 
3555
3645
  int
3556
3646
  ev_run (EV_P_ int flags)
3557
3647
  {
3558
3648
  /* ########## NIO4R PATCHERY HO! ########## */
3559
- #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
3560
- struct ev_poll_args poll_args;
3561
- #endif
3649
+ struct ev_poll_args poll_args;
3562
3650
  /* ######################################## */
3563
3651
 
3564
3652
  #if EV_FEATURE_API
@@ -3719,25 +3807,9 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3719
3807
  #######################################################################
3720
3808
  */
3721
3809
 
3722
- /*
3723
- simulate to rb_thread_call_without_gvl using rb_theread_blocking_region.
3724
- https://github.com/brianmario/mysql2/blob/master/ext/mysql2/client.h#L8
3725
- */
3726
-
3727
- #ifndef HAVE_RB_THREAD_CALL_WITHOUT_GVL
3728
- #ifdef HAVE_RB_THREAD_BLOCKING_REGION
3729
- #define rb_thread_call_without_gvl(func, data1, ubf, data2) \
3730
- rb_thread_blocking_region((rb_blocking_function_t *)func, data1, ubf, data2)
3731
- #endif
3732
- #endif
3733
-
3734
- #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
3735
3810
  poll_args.loop = loop;
3736
3811
  poll_args.waittime = waittime;
3737
3812
  rb_thread_call_without_gvl(ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0);
3738
- #else
3739
- backend_poll (EV_A_ waittime);
3740
- #endif
3741
3813
  /*
3742
3814
  ############################# END PATCHERY ############################
3743
3815
  */
@@ -3794,37 +3866,37 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3794
3866
  }
3795
3867
 
3796
3868
  void
3797
- ev_break (EV_P_ int how) EV_THROW
3869
+ ev_break (EV_P_ int how) EV_NOEXCEPT
3798
3870
  {
3799
3871
  loop_done = how;
3800
3872
  }
3801
3873
 
3802
3874
  void
3803
- ev_ref (EV_P) EV_THROW
3875
+ ev_ref (EV_P) EV_NOEXCEPT
3804
3876
  {
3805
3877
  ++activecnt;
3806
3878
  }
3807
3879
 
3808
3880
  void
3809
- ev_unref (EV_P) EV_THROW
3881
+ ev_unref (EV_P) EV_NOEXCEPT
3810
3882
  {
3811
3883
  --activecnt;
3812
3884
  }
3813
3885
 
3814
3886
  void
3815
- ev_now_update (EV_P) EV_THROW
3887
+ ev_now_update (EV_P) EV_NOEXCEPT
3816
3888
  {
3817
3889
  time_update (EV_A_ 1e100);
3818
3890
  }
3819
3891
 
3820
3892
  void
3821
- ev_suspend (EV_P) EV_THROW
3893
+ ev_suspend (EV_P) EV_NOEXCEPT
3822
3894
  {
3823
3895
  ev_now_update (EV_A);
3824
3896
  }
3825
3897
 
3826
3898
  void
3827
- ev_resume (EV_P) EV_THROW
3899
+ ev_resume (EV_P) EV_NOEXCEPT
3828
3900
  {
3829
3901
  ev_tstamp mn_prev = mn_now;
3830
3902
 
@@ -3873,7 +3945,7 @@ clear_pending (EV_P_ W w)
3873
3945
  }
3874
3946
 
3875
3947
  int
3876
- ev_clear_pending (EV_P_ void *w) EV_THROW
3948
+ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3877
3949
  {
3878
3950
  W w_ = (W)w;
3879
3951
  int pending = w_->pending;
@@ -3915,8 +3987,9 @@ ev_stop (EV_P_ W w)
3915
3987
 
3916
3988
  /*****************************************************************************/
3917
3989
 
3918
- void noinline
3919
- ev_io_start (EV_P_ ev_io *w) EV_THROW
3990
+ noinline
3991
+ void
3992
+ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3920
3993
  {
3921
3994
  int fd = w->fd;
3922
3995
 
@@ -3926,10 +3999,13 @@ ev_io_start (EV_P_ ev_io *w) EV_THROW
3926
3999
  assert (("libev: ev_io_start called with negative fd", fd >= 0));
3927
4000
  assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
3928
4001
 
4002
+ #if EV_VERIFY >= 2
4003
+ assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd)));
4004
+ #endif
3929
4005
  EV_FREQUENT_CHECK;
3930
4006
 
3931
4007
  ev_start (EV_A_ (W)w, 1);
3932
- array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
4008
+ array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
3933
4009
  wlist_add (&anfds[fd].head, (WL)w);
3934
4010
 
3935
4011
  /* common bug, apparently */
@@ -3941,8 +4017,9 @@ ev_io_start (EV_P_ ev_io *w) EV_THROW
3941
4017
  EV_FREQUENT_CHECK;
3942
4018
  }
3943
4019
 
3944
- void noinline
3945
- ev_io_stop (EV_P_ ev_io *w) EV_THROW
4020
+ noinline
4021
+ void
4022
+ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
3946
4023
  {
3947
4024
  clear_pending (EV_A_ (W)w);
3948
4025
  if (expect_false (!ev_is_active (w)))
@@ -3950,6 +4027,9 @@ ev_io_stop (EV_P_ ev_io *w) EV_THROW
3950
4027
 
3951
4028
  assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
3952
4029
 
4030
+ #if EV_VERIFY >= 2
4031
+ assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd)));
4032
+ #endif
3953
4033
  EV_FREQUENT_CHECK;
3954
4034
 
3955
4035
  wlist_del (&anfds[w->fd].head, (WL)w);
@@ -3960,8 +4040,9 @@ ev_io_stop (EV_P_ ev_io *w) EV_THROW
3960
4040
  EV_FREQUENT_CHECK;
3961
4041
  }
3962
4042
 
3963
- void noinline
3964
- ev_timer_start (EV_P_ ev_timer *w) EV_THROW
4043
+ noinline
4044
+ void
4045
+ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
3965
4046
  {
3966
4047
  if (expect_false (ev_is_active (w)))
3967
4048
  return;
@@ -3974,7 +4055,7 @@ ev_timer_start (EV_P_ ev_timer *w) EV_THROW
3974
4055
 
3975
4056
  ++timercnt;
3976
4057
  ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
3977
- array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
4058
+ array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit);
3978
4059
  ANHE_w (timers [ev_active (w)]) = (WT)w;
3979
4060
  ANHE_at_cache (timers [ev_active (w)]);
3980
4061
  upheap (timers, ev_active (w));
@@ -3984,8 +4065,9 @@ ev_timer_start (EV_P_ ev_timer *w) EV_THROW
3984
4065
  /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
3985
4066
  }
3986
4067
 
3987
- void noinline
3988
- ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
4068
+ noinline
4069
+ void
4070
+ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
3989
4071
  {
3990
4072
  clear_pending (EV_A_ (W)w);
3991
4073
  if (expect_false (!ev_is_active (w)))
@@ -4014,8 +4096,9 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
4014
4096
  EV_FREQUENT_CHECK;
4015
4097
  }
4016
4098
 
4017
- void noinline
4018
- ev_timer_again (EV_P_ ev_timer *w) EV_THROW
4099
+ noinline
4100
+ void
4101
+ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4019
4102
  {
4020
4103
  EV_FREQUENT_CHECK;
4021
4104
 
@@ -4042,14 +4125,15 @@ ev_timer_again (EV_P_ ev_timer *w) EV_THROW
4042
4125
  }
4043
4126
 
4044
4127
  ev_tstamp
4045
- ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
4128
+ ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4046
4129
  {
4047
4130
  return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
4048
4131
  }
4049
4132
 
4050
4133
  #if EV_PERIODIC_ENABLE
4051
- void noinline
4052
- ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
4134
+ noinline
4135
+ void
4136
+ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4053
4137
  {
4054
4138
  if (expect_false (ev_is_active (w)))
4055
4139
  return;
@@ -4068,7 +4152,7 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
4068
4152
 
4069
4153
  ++periodiccnt;
4070
4154
  ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
4071
- array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
4155
+ array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit);
4072
4156
  ANHE_w (periodics [ev_active (w)]) = (WT)w;
4073
4157
  ANHE_at_cache (periodics [ev_active (w)]);
4074
4158
  upheap (periodics, ev_active (w));
@@ -4078,8 +4162,9 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
4078
4162
  /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4079
4163
  }
4080
4164
 
4081
- void noinline
4082
- ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
4165
+ noinline
4166
+ void
4167
+ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4083
4168
  {
4084
4169
  clear_pending (EV_A_ (W)w);
4085
4170
  if (expect_false (!ev_is_active (w)))
@@ -4106,8 +4191,9 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
4106
4191
  EV_FREQUENT_CHECK;
4107
4192
  }
4108
4193
 
4109
- void noinline
4110
- ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
4194
+ noinline
4195
+ void
4196
+ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4111
4197
  {
4112
4198
  /* TODO: use adjustheap and recalculation */
4113
4199
  ev_periodic_stop (EV_A_ w);
@@ -4121,8 +4207,9 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
4121
4207
 
4122
4208
  #if EV_SIGNAL_ENABLE
4123
4209
 
4124
- void noinline
4125
- ev_signal_start (EV_P_ ev_signal *w) EV_THROW
4210
+ noinline
4211
+ void
4212
+ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4126
4213
  {
4127
4214
  if (expect_false (ev_is_active (w)))
4128
4215
  return;
@@ -4203,8 +4290,9 @@ ev_signal_start (EV_P_ ev_signal *w) EV_THROW
4203
4290
  EV_FREQUENT_CHECK;
4204
4291
  }
4205
4292
 
4206
- void noinline
4207
- ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
4293
+ noinline
4294
+ void
4295
+ ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4208
4296
  {
4209
4297
  clear_pending (EV_A_ (W)w);
4210
4298
  if (expect_false (!ev_is_active (w)))
@@ -4245,7 +4333,7 @@ ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
4245
4333
  #if EV_CHILD_ENABLE
4246
4334
 
4247
4335
  void
4248
- ev_child_start (EV_P_ ev_child *w) EV_THROW
4336
+ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4249
4337
  {
4250
4338
  #if EV_MULTIPLICITY
4251
4339
  assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
@@ -4262,7 +4350,7 @@ ev_child_start (EV_P_ ev_child *w) EV_THROW
4262
4350
  }
4263
4351
 
4264
4352
  void
4265
- ev_child_stop (EV_P_ ev_child *w) EV_THROW
4353
+ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4266
4354
  {
4267
4355
  clear_pending (EV_A_ (W)w);
4268
4356
  if (expect_false (!ev_is_active (w)))
@@ -4289,14 +4377,15 @@ ev_child_stop (EV_P_ ev_child *w) EV_THROW
4289
4377
  #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4290
4378
  #define MIN_STAT_INTERVAL 0.1074891
4291
4379
 
4292
- static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4380
+ noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4293
4381
 
4294
4382
  #if EV_USE_INOTIFY
4295
4383
 
4296
4384
  /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4297
4385
  # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4298
4386
 
4299
- static void noinline
4387
+ noinline
4388
+ static void
4300
4389
  infy_add (EV_P_ ev_stat *w)
4301
4390
  {
4302
4391
  w->wd = inotify_add_watch (fs_fd, w->path,
@@ -4370,7 +4459,8 @@ infy_add (EV_P_ ev_stat *w)
4370
4459
  if (ev_is_active (&w->timer)) ev_unref (EV_A);
4371
4460
  }
4372
4461
 
4373
- static void noinline
4462
+ noinline
4463
+ static void
4374
4464
  infy_del (EV_P_ ev_stat *w)
4375
4465
  {
4376
4466
  int slot;
@@ -4387,7 +4477,8 @@ infy_del (EV_P_ ev_stat *w)
4387
4477
  inotify_rm_watch (fs_fd, wd);
4388
4478
  }
4389
4479
 
4390
- static void noinline
4480
+ noinline
4481
+ static void
4391
4482
  infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4392
4483
  {
4393
4484
  if (slot < 0)
@@ -4433,7 +4524,8 @@ infy_cb (EV_P_ ev_io *w, int revents)
4433
4524
  }
4434
4525
  }
4435
4526
 
4436
- inline_size void ecb_cold
4527
+ inline_size ecb_cold
4528
+ void
4437
4529
  ev_check_2625 (EV_P)
4438
4530
  {
4439
4531
  /* kernels < 2.6.25 are borked
@@ -4533,7 +4625,7 @@ infy_fork (EV_P)
4533
4625
  #endif
4534
4626
 
4535
4627
  void
4536
- ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
4628
+ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
4537
4629
  {
4538
4630
  if (lstat (w->path, &w->attr) < 0)
4539
4631
  w->attr.st_nlink = 0;
@@ -4541,7 +4633,8 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
4541
4633
  w->attr.st_nlink = 1;
4542
4634
  }
4543
4635
 
4544
- static void noinline
4636
+ noinline
4637
+ static void
4545
4638
  stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4546
4639
  {
4547
4640
  ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
@@ -4582,7 +4675,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4582
4675
  }
4583
4676
 
4584
4677
  void
4585
- ev_stat_start (EV_P_ ev_stat *w) EV_THROW
4678
+ ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4586
4679
  {
4587
4680
  if (expect_false (ev_is_active (w)))
4588
4681
  return;
@@ -4613,7 +4706,7 @@ ev_stat_start (EV_P_ ev_stat *w) EV_THROW
4613
4706
  }
4614
4707
 
4615
4708
  void
4616
- ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
4709
+ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4617
4710
  {
4618
4711
  clear_pending (EV_A_ (W)w);
4619
4712
  if (expect_false (!ev_is_active (w)))
@@ -4639,7 +4732,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
4639
4732
 
4640
4733
  #if EV_IDLE_ENABLE
4641
4734
  void
4642
- ev_idle_start (EV_P_ ev_idle *w) EV_THROW
4735
+ ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4643
4736
  {
4644
4737
  if (expect_false (ev_is_active (w)))
4645
4738
  return;
@@ -4654,7 +4747,7 @@ ev_idle_start (EV_P_ ev_idle *w) EV_THROW
4654
4747
  ++idleall;
4655
4748
  ev_start (EV_A_ (W)w, active);
4656
4749
 
4657
- array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
4750
+ array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit);
4658
4751
  idles [ABSPRI (w)][active - 1] = w;
4659
4752
  }
4660
4753
 
@@ -4662,7 +4755,7 @@ ev_idle_start (EV_P_ ev_idle *w) EV_THROW
4662
4755
  }
4663
4756
 
4664
4757
  void
4665
- ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
4758
+ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4666
4759
  {
4667
4760
  clear_pending (EV_A_ (W)w);
4668
4761
  if (expect_false (!ev_is_active (w)))
@@ -4686,7 +4779,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
4686
4779
 
4687
4780
  #if EV_PREPARE_ENABLE
4688
4781
  void
4689
- ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
4782
+ ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4690
4783
  {
4691
4784
  if (expect_false (ev_is_active (w)))
4692
4785
  return;
@@ -4694,14 +4787,14 @@ ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
4694
4787
  EV_FREQUENT_CHECK;
4695
4788
 
4696
4789
  ev_start (EV_A_ (W)w, ++preparecnt);
4697
- array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
4790
+ array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit);
4698
4791
  prepares [preparecnt - 1] = w;
4699
4792
 
4700
4793
  EV_FREQUENT_CHECK;
4701
4794
  }
4702
4795
 
4703
4796
  void
4704
- ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
4797
+ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4705
4798
  {
4706
4799
  clear_pending (EV_A_ (W)w);
4707
4800
  if (expect_false (!ev_is_active (w)))
@@ -4724,7 +4817,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
4724
4817
 
4725
4818
  #if EV_CHECK_ENABLE
4726
4819
  void
4727
- ev_check_start (EV_P_ ev_check *w) EV_THROW
4820
+ ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4728
4821
  {
4729
4822
  if (expect_false (ev_is_active (w)))
4730
4823
  return;
@@ -4732,14 +4825,14 @@ ev_check_start (EV_P_ ev_check *w) EV_THROW
4732
4825
  EV_FREQUENT_CHECK;
4733
4826
 
4734
4827
  ev_start (EV_A_ (W)w, ++checkcnt);
4735
- array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
4828
+ array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit);
4736
4829
  checks [checkcnt - 1] = w;
4737
4830
 
4738
4831
  EV_FREQUENT_CHECK;
4739
4832
  }
4740
4833
 
4741
4834
  void
4742
- ev_check_stop (EV_P_ ev_check *w) EV_THROW
4835
+ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4743
4836
  {
4744
4837
  clear_pending (EV_A_ (W)w);
4745
4838
  if (expect_false (!ev_is_active (w)))
@@ -4761,8 +4854,9 @@ ev_check_stop (EV_P_ ev_check *w) EV_THROW
4761
4854
  #endif
4762
4855
 
4763
4856
  #if EV_EMBED_ENABLE
4764
- void noinline
4765
- ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
4857
+ noinline
4858
+ void
4859
+ ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4766
4860
  {
4767
4861
  ev_run (w->other, EVRUN_NOWAIT);
4768
4862
  }
@@ -4820,7 +4914,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4820
4914
  #endif
4821
4915
 
4822
4916
  void
4823
- ev_embed_start (EV_P_ ev_embed *w) EV_THROW
4917
+ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4824
4918
  {
4825
4919
  if (expect_false (ev_is_active (w)))
4826
4920
  return;
@@ -4851,7 +4945,7 @@ ev_embed_start (EV_P_ ev_embed *w) EV_THROW
4851
4945
  }
4852
4946
 
4853
4947
  void
4854
- ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
4948
+ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4855
4949
  {
4856
4950
  clear_pending (EV_A_ (W)w);
4857
4951
  if (expect_false (!ev_is_active (w)))
@@ -4871,7 +4965,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
4871
4965
 
4872
4966
  #if EV_FORK_ENABLE
4873
4967
  void
4874
- ev_fork_start (EV_P_ ev_fork *w) EV_THROW
4968
+ ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4875
4969
  {
4876
4970
  if (expect_false (ev_is_active (w)))
4877
4971
  return;
@@ -4879,14 +4973,14 @@ ev_fork_start (EV_P_ ev_fork *w) EV_THROW
4879
4973
  EV_FREQUENT_CHECK;
4880
4974
 
4881
4975
  ev_start (EV_A_ (W)w, ++forkcnt);
4882
- array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
4976
+ array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit);
4883
4977
  forks [forkcnt - 1] = w;
4884
4978
 
4885
4979
  EV_FREQUENT_CHECK;
4886
4980
  }
4887
4981
 
4888
4982
  void
4889
- ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
4983
+ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4890
4984
  {
4891
4985
  clear_pending (EV_A_ (W)w);
4892
4986
  if (expect_false (!ev_is_active (w)))
@@ -4909,7 +5003,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
4909
5003
 
4910
5004
  #if EV_CLEANUP_ENABLE
4911
5005
  void
4912
- ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
5006
+ ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4913
5007
  {
4914
5008
  if (expect_false (ev_is_active (w)))
4915
5009
  return;
@@ -4917,7 +5011,7 @@ ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
4917
5011
  EV_FREQUENT_CHECK;
4918
5012
 
4919
5013
  ev_start (EV_A_ (W)w, ++cleanupcnt);
4920
- array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
5014
+ array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit);
4921
5015
  cleanups [cleanupcnt - 1] = w;
4922
5016
 
4923
5017
  /* cleanup watchers should never keep a refcount on the loop */
@@ -4926,7 +5020,7 @@ ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
4926
5020
  }
4927
5021
 
4928
5022
  void
4929
- ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
5023
+ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4930
5024
  {
4931
5025
  clear_pending (EV_A_ (W)w);
4932
5026
  if (expect_false (!ev_is_active (w)))
@@ -4950,7 +5044,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
4950
5044
 
4951
5045
  #if EV_ASYNC_ENABLE
4952
5046
  void
4953
- ev_async_start (EV_P_ ev_async *w) EV_THROW
5047
+ ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
4954
5048
  {
4955
5049
  if (expect_false (ev_is_active (w)))
4956
5050
  return;
@@ -4962,14 +5056,14 @@ ev_async_start (EV_P_ ev_async *w) EV_THROW
4962
5056
  EV_FREQUENT_CHECK;
4963
5057
 
4964
5058
  ev_start (EV_A_ (W)w, ++asynccnt);
4965
- array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
5059
+ array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit);
4966
5060
  asyncs [asynccnt - 1] = w;
4967
5061
 
4968
5062
  EV_FREQUENT_CHECK;
4969
5063
  }
4970
5064
 
4971
5065
  void
4972
- ev_async_stop (EV_P_ ev_async *w) EV_THROW
5066
+ ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
4973
5067
  {
4974
5068
  clear_pending (EV_A_ (W)w);
4975
5069
  if (expect_false (!ev_is_active (w)))
@@ -4990,7 +5084,7 @@ ev_async_stop (EV_P_ ev_async *w) EV_THROW
4990
5084
  }
4991
5085
 
4992
5086
  void
4993
- ev_async_send (EV_P_ ev_async *w) EV_THROW
5087
+ ev_async_send (EV_P_ ev_async *w) EV_NOEXCEPT
4994
5088
  {
4995
5089
  w->sent = 1;
4996
5090
  evpipe_write (EV_A_ &async_pending);
@@ -5037,16 +5131,10 @@ once_cb_to (EV_P_ ev_timer *w, int revents)
5037
5131
  }
5038
5132
 
5039
5133
  void
5040
- ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
5134
+ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_NOEXCEPT
5041
5135
  {
5042
5136
  struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
5043
5137
 
5044
- if (expect_false (!once))
5045
- {
5046
- cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
5047
- return;
5048
- }
5049
-
5050
5138
  once->cb = cb;
5051
5139
  once->arg = arg;
5052
5140
 
@@ -5068,8 +5156,9 @@ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, vo
5068
5156
  /*****************************************************************************/
5069
5157
 
5070
5158
  #if EV_WALK_ENABLE
5071
- void ecb_cold
5072
- ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
5159
+ ecb_cold
5160
+ void
5161
+ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
5073
5162
  {
5074
5163
  int i, j;
5075
5164
  ev_watcher_list *wl, *wn;