nio4r 2.5.2-java → 2.5.7-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/workflow.yml +47 -0
- data/.rubocop.yml +30 -11
- data/CHANGES.md +41 -1
- data/Gemfile +1 -1
- data/README.md +7 -24
- data/examples/echo_server.rb +2 -2
- data/ext/libev/Changes +71 -2
- data/ext/libev/ev.c +611 -198
- data/ext/libev/ev.h +25 -22
- data/ext/libev/ev_epoll.c +16 -14
- data/ext/libev/ev_iouring.c +694 -0
- data/ext/libev/ev_kqueue.c +4 -4
- data/ext/libev/ev_linuxaio.c +78 -100
- data/ext/libev/ev_poll.c +6 -6
- data/ext/libev/ev_port.c +3 -3
- data/ext/libev/ev_select.c +6 -6
- data/ext/libev/ev_vars.h +34 -0
- data/ext/libev/ev_win32.c +2 -2
- data/ext/libev/ev_wrap.h +56 -0
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +8 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -31
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/org/nio4r/ByteBuffer.java +2 -0
- data/ext/nio4r/org/nio4r/Monitor.java +1 -0
- data/ext/nio4r/org/nio4r/Selector.java +8 -10
- data/ext/nio4r/selector.c +66 -51
- data/lib/nio.rb +20 -1
- data/lib/nio/bytebuffer.rb +4 -0
- data/lib/nio/monitor.rb +1 -1
- data/lib/nio/selector.rb +12 -10
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +2 -2
- data/spec/nio/bytebuffer_spec.rb +0 -1
- data/spec/nio/selectables/ssl_socket_spec.rb +3 -1
- data/spec/nio/selectables/udp_socket_spec.rb +2 -2
- data/spec/nio/selector_spec.rb +4 -1
- data/spec/spec_helper.rb +2 -0
- metadata +11 -12
- data/.travis.yml +0 -44
- data/Guardfile +0 -10
- data/appveyor.yml +0 -40
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d6daa634b6f4cfbcdd530e3dbb65c555a35404e6896d7e7dbee481b8108c7358
|
4
|
+
data.tar.gz: c27efb4d745aa9ff88d214bc71b2bcbbe4ca85e247c7d95709723ba75408e1f8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e26a8cf8cfa5949ad8f3f47be63b22bf98963b9dfe56c2ec4aaa0929d55b6a02317ff736dac8e166049e9a41407b6eaeac0518161a43c83b7fb0d538797f7a0e
|
7
|
+
data.tar.gz: fdd8ed7bf531f84bd309406af5d5add9e87c4ba7ec0ee4da7e97c25bec2f572ca999c6f06dfd1c01310228d10c9cf1f959a79e4a88d160d1ed1080fbc29074cd
|
@@ -0,0 +1,47 @@
|
|
1
|
+
name: nio4r
|
2
|
+
|
3
|
+
on: [push, pull_request]
|
4
|
+
|
5
|
+
jobs:
|
6
|
+
build:
|
7
|
+
name: >-
|
8
|
+
${{matrix.os}}, ${{matrix.ruby}}
|
9
|
+
env:
|
10
|
+
CI: true
|
11
|
+
TESTOPTS: -v
|
12
|
+
|
13
|
+
runs-on: ${{matrix.os}}
|
14
|
+
strategy:
|
15
|
+
fail-fast: false
|
16
|
+
matrix:
|
17
|
+
os: [ ubuntu-20.04, ubuntu-18.04, macos-10.15, windows-2019 ]
|
18
|
+
ruby: [ head, 3.0, 2.7, 2.6, 2.5, 2.4, jruby, truffleruby-head ]
|
19
|
+
include:
|
20
|
+
- { os: ubuntu-16.04, ruby: 3.0 }
|
21
|
+
- { os: ubuntu-16.04, ruby: 2.4 }
|
22
|
+
exclude:
|
23
|
+
- { os: windows-2019, ruby: head }
|
24
|
+
- { os: windows-2019, ruby: jruby }
|
25
|
+
- { os: windows-2019, ruby: truffleruby-head }
|
26
|
+
|
27
|
+
steps:
|
28
|
+
- name: repo checkout
|
29
|
+
uses: actions/checkout@v2
|
30
|
+
|
31
|
+
- name: load ruby
|
32
|
+
uses: ruby/setup-ruby@v1
|
33
|
+
with:
|
34
|
+
ruby-version: ${{matrix.ruby}}
|
35
|
+
|
36
|
+
- name: RubyGems, Bundler Update
|
37
|
+
run: gem update --system --no-document --conservative
|
38
|
+
|
39
|
+
- name: bundle install
|
40
|
+
run: bundle install --path .bundle/gems --without development
|
41
|
+
|
42
|
+
- name: compile
|
43
|
+
run: bundle exec rake compile
|
44
|
+
|
45
|
+
- name: test
|
46
|
+
run: bundle exec rake spec
|
47
|
+
timeout-minutes: 10
|
data/.rubocop.yml
CHANGED
@@ -1,23 +1,40 @@
|
|
1
1
|
AllCops:
|
2
|
-
TargetRubyVersion: 2.
|
2
|
+
TargetRubyVersion: 2.4
|
3
3
|
DisplayCopNames: true
|
4
4
|
|
5
|
+
Layout/HashAlignment:
|
6
|
+
Enabled: false
|
7
|
+
|
8
|
+
Layout/LineLength:
|
9
|
+
Max: 128
|
10
|
+
|
11
|
+
Layout/SpaceAroundMethodCallOperator:
|
12
|
+
Enabled: false
|
13
|
+
|
5
14
|
Layout/SpaceInsideBlockBraces:
|
6
15
|
Enabled: false
|
7
16
|
|
8
17
|
Style/IfUnlessModifier:
|
9
18
|
Enabled: false
|
10
19
|
|
20
|
+
Style/UnpackFirst:
|
21
|
+
Enabled: false
|
22
|
+
|
11
23
|
#
|
12
24
|
# Lint
|
13
25
|
#
|
14
26
|
|
15
|
-
Lint/
|
27
|
+
Lint/SuppressedException:
|
16
28
|
Enabled: false
|
17
29
|
|
18
30
|
Lint/Loop:
|
19
31
|
Enabled: false
|
20
32
|
|
33
|
+
Lint/RaiseException:
|
34
|
+
Enabled: false
|
35
|
+
|
36
|
+
Lint/StructNewOverride:
|
37
|
+
Enabled: false
|
21
38
|
#
|
22
39
|
# Metrics
|
23
40
|
#
|
@@ -32,9 +49,6 @@ Metrics/BlockLength:
|
|
32
49
|
Metrics/ClassLength:
|
33
50
|
Max: 128
|
34
51
|
|
35
|
-
Metrics/LineLength:
|
36
|
-
Max: 128
|
37
|
-
|
38
52
|
Metrics/MethodLength:
|
39
53
|
CountComments: false
|
40
54
|
Max: 50
|
@@ -46,16 +60,12 @@ Metrics/PerceivedComplexity:
|
|
46
60
|
Max: 15
|
47
61
|
|
48
62
|
#
|
49
|
-
#
|
63
|
+
# Style
|
50
64
|
#
|
51
65
|
|
52
|
-
|
66
|
+
Style/ExponentialNotation:
|
53
67
|
Enabled: false
|
54
68
|
|
55
|
-
#
|
56
|
-
# Style
|
57
|
-
#
|
58
|
-
|
59
69
|
Style/FormatStringToken:
|
60
70
|
Enabled: false
|
61
71
|
|
@@ -65,6 +75,15 @@ Style/FrozenStringLiteralComment:
|
|
65
75
|
Style/GlobalVars:
|
66
76
|
Enabled: false
|
67
77
|
|
78
|
+
Style/HashEachMethods:
|
79
|
+
Enabled: false
|
80
|
+
|
81
|
+
Style/HashTransformKeys:
|
82
|
+
Enabled: false
|
83
|
+
|
84
|
+
Style/HashTransformValues:
|
85
|
+
Enabled: false
|
86
|
+
|
68
87
|
Style/NumericPredicate:
|
69
88
|
Enabled: false
|
70
89
|
|
data/CHANGES.md
CHANGED
@@ -1,3 +1,39 @@
|
|
1
|
+
## 2.5.5 (2021-02-05)
|
2
|
+
|
3
|
+
* [#256](https://github.com/socketry/nio4r/pull/256)
|
4
|
+
Use libev 4.33, featuring experimental `io_uring` support.
|
5
|
+
([@jcmfernandes])
|
6
|
+
|
7
|
+
* [#260](https://github.com/socketry/nio4r/pull/260)
|
8
|
+
Workaround for ARM-based macOS Ruby: Use pure Ruby for M1, since the native extension is crashing on M1 (arm64).
|
9
|
+
([@jasl])
|
10
|
+
|
11
|
+
* [#252](https://github.com/socketry/nio4r/pull/252)
|
12
|
+
JRuby: Fix javac -Xlint warnings
|
13
|
+
([@headius])
|
14
|
+
|
15
|
+
## 2.5.4 (2020-09-16)
|
16
|
+
|
17
|
+
* [#251](https://github.com/socketry/nio4r/issues/251)
|
18
|
+
Intermittent SEGV during GC.
|
19
|
+
([@boazsegev])
|
20
|
+
|
21
|
+
## 2.5.3 (2020-09-07)
|
22
|
+
|
23
|
+
* [#241](https://github.com/socketry/nio4r/issues/241)
|
24
|
+
Possible bug with Ruby >= 2.7.0 and `GC.compact`.
|
25
|
+
([@boazsegev])
|
26
|
+
|
27
|
+
## 2.5.2 (2019-09-24)
|
28
|
+
|
29
|
+
* [#220](https://github.com/socketry/nio4r/issues/220)
|
30
|
+
Update to libev-4.27 & fix assorted warnings.
|
31
|
+
([@ioquatix])
|
32
|
+
|
33
|
+
* [#225](https://github.com/socketry/nio4r/issues/225)
|
34
|
+
Avoid need for linux headers.
|
35
|
+
([@ioquatix])
|
36
|
+
|
1
37
|
## 2.4.0 (2019-07-07)
|
2
38
|
|
3
39
|
* [#211](https://github.com/socketry/nio4r/pull/211)
|
@@ -9,7 +45,7 @@
|
|
9
45
|
|
10
46
|
* Assorted fixes for TruffleRuby & JRuby.
|
11
47
|
([@eregon], [@olleolleolle])
|
12
|
-
|
48
|
+
Possible bug with Ruby >= 2.7.0 and `GC.compact`
|
13
49
|
* Update libev to v4.25.
|
14
50
|
([@ioquatix])
|
15
51
|
|
@@ -242,3 +278,7 @@
|
|
242
278
|
[@ioquatix]: https://github.com/ioquatix
|
243
279
|
[@eregon]: https://github.com/eregon
|
244
280
|
[@olleolleolle]: https://github.com/olleolleolle
|
281
|
+
[@boazsegev]: https://github.com/boazsegev
|
282
|
+
[@headius]: https://github.com/headius
|
283
|
+
[@jasl]: https://github.com/jasl
|
284
|
+
[@jcmfernandes]: https://github.com/jcmfernandes
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -1,17 +1,10 @@
|
|
1
1
|
# 
|
2
2
|
|
3
3
|
[](http://rubygems.org/gems/nio4r)
|
4
|
-
[](https://ci.appveyor.com/project/tarcieri/nio4r/branch/master)
|
4
|
+
[](https://github.com/socketry/nio4r/actions?query=workflow:nio4r)
|
6
5
|
[](https://codeclimate.com/github/socketry/nio4r)
|
7
6
|
[](https://coveralls.io/r/socketry/nio4r)
|
8
7
|
[](http://www.rubydoc.info/gems/nio4r/2.2.0)
|
9
|
-
[](https://github.com/socketry/nio4r/blob/master/LICENSE.txt)
|
10
|
-
|
11
|
-
_NOTE: This is the 2.x **stable** branch of nio4r. For the 1.x **legacy** branch,
|
12
|
-
please see:_
|
13
|
-
|
14
|
-
https://github.com/socketry/nio4r/tree/1-x-stable
|
15
8
|
|
16
9
|
**New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for
|
17
10
|
scalable network clients and servers. Modeled after the Java NIO API, but
|
@@ -25,13 +18,13 @@ writing.
|
|
25
18
|
## Projects using nio4r
|
26
19
|
|
27
20
|
* [ActionCable]: Rails 5 WebSocket protocol, uses nio4r for a WebSocket server
|
28
|
-
* [Celluloid
|
29
|
-
* [
|
21
|
+
* [Celluloid]: Actor-based concurrency framework, uses nio4r for async I/O
|
22
|
+
* [Async]: Asynchronous I/O framework for Ruby
|
30
23
|
* [Puma]: Ruby/Rack web server built for concurrency
|
31
24
|
|
32
25
|
[ActionCable]: https://rubygems.org/gems/actioncable
|
33
|
-
[Celluloid
|
34
|
-
[
|
26
|
+
[Celluloid]: https://github.com/celluloid/celluloid-io
|
27
|
+
[Async]: https://github.com/socketry/async
|
35
28
|
[Puma]: https://github.com/puma/puma
|
36
29
|
|
37
30
|
## Goals
|
@@ -43,10 +36,11 @@ writing.
|
|
43
36
|
|
44
37
|
## Supported platforms
|
45
38
|
|
46
|
-
* Ruby 2.3
|
47
39
|
* Ruby 2.4
|
48
40
|
* Ruby 2.5
|
49
41
|
* Ruby 2.6
|
42
|
+
* Ruby 2.7
|
43
|
+
* Ruby 3.0
|
50
44
|
* [JRuby](https://github.com/jruby/jruby)
|
51
45
|
* [TruffleRuby](https://github.com/oracle/truffleruby)
|
52
46
|
|
@@ -56,17 +50,6 @@ writing.
|
|
56
50
|
* **Java NIO**: JRuby extension which wraps the Java NIO subsystem
|
57
51
|
* **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter
|
58
52
|
|
59
|
-
## Discussion
|
60
|
-
|
61
|
-
For discussion and general help with nio4r, email
|
62
|
-
[socketry+subscribe@googlegroups.com][subscribe]
|
63
|
-
or join on the web via the [Google Group].
|
64
|
-
|
65
|
-
We're also on IRC at ##socketry on irc.freenode.net.
|
66
|
-
|
67
|
-
[subscribe]: mailto:socketry+subscribe@googlegroups.com
|
68
|
-
[google group]: https://groups.google.com/group/socketry
|
69
|
-
|
70
53
|
## Documentation
|
71
54
|
|
72
55
|
[Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki)
|
data/examples/echo_server.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
# frozen_string_literal: true
|
3
3
|
|
4
|
-
$LOAD_PATH.push File.expand_path("
|
4
|
+
$LOAD_PATH.push File.expand_path("../lib", __dir__)
|
5
5
|
require "nio"
|
6
6
|
require "socket"
|
7
7
|
|
@@ -19,7 +19,7 @@ class EchoServer
|
|
19
19
|
|
20
20
|
def run
|
21
21
|
loop do
|
22
|
-
@selector.select { |monitor| monitor.value.call
|
22
|
+
@selector.select { |monitor| monitor.value.call }
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
data/ext/libev/Changes
CHANGED
@@ -1,8 +1,77 @@
|
|
1
1
|
Revision history for libev, a high-performance and full-featured event loop.
|
2
2
|
|
3
|
+
TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter.
|
4
|
+
TODO: document EV_TSTAMP_T
|
5
|
+
|
6
|
+
4.33 Wed Mar 18 13:22:29 CET 2020
|
7
|
+
- no changes w.r.t. 4.32.
|
8
|
+
|
9
|
+
4.32 (EV only)
|
10
|
+
- the 4.31 timerfd code wrongly changed the priority of the signal
|
11
|
+
fd watcher, which is usually harmless unless signal fds are
|
12
|
+
also used (found via cpan tester service).
|
13
|
+
- the documentation wrongly claimed that user may modify fd and events
|
14
|
+
members in io watchers when the watcher was stopped
|
15
|
+
(found by b_jonas).
|
16
|
+
- new ev_io_modify mutator which changes only the events member,
|
17
|
+
which can be faster. also added ev::io::set (int events) method
|
18
|
+
to ev++.h.
|
19
|
+
- officially allow a zero events mask for io watchers. this should
|
20
|
+
work with older libev versions as well but was not officially
|
21
|
+
allowed before.
|
22
|
+
- do not wake up every minute when timerfd is used to detect timejumps.
|
23
|
+
- do not wake up every minute when periodics are disabled and we have
|
24
|
+
a monotonic clock.
|
25
|
+
- support a lot more "uncommon" compile time configurations,
|
26
|
+
such as ev_embed enabled but ev_timer disabled.
|
27
|
+
- use a start/stop wrapper class to reduce code duplication in
|
28
|
+
ev++.h and make it needlessly more c++-y.
|
29
|
+
- the linux aio backend is no longer compiled in by default.
|
30
|
+
- update to libecb version 0x00010008.
|
31
|
+
|
32
|
+
4.31 Fri Dec 20 21:58:29 CET 2019
|
33
|
+
- handle backends with minimum wait time a bit better by not
|
34
|
+
waiting in the presence of already-expired timers
|
35
|
+
(behaviour reported by Felipe Gasper).
|
36
|
+
- new feature: use timerfd to detect timejumps quickly,
|
37
|
+
can be disabled with the new EVFLAG_NOTIMERFD loop flag.
|
38
|
+
- document EV_USE_SIGNALFD feature macro.
|
39
|
+
|
40
|
+
4.30 (EV only)
|
41
|
+
- change non-autoconf test for __kernel_rwf_t by testing
|
42
|
+
LINUX_VERSION_CODE, the most direct test I could find.
|
43
|
+
- fix a bug in the io_uring backend that polled the wrong
|
44
|
+
backend fd, causing it to not work in many cases.
|
45
|
+
|
46
|
+
4.29 (EV only)
|
47
|
+
- add io uring autoconf and non-autoconf detection.
|
48
|
+
- disable io_uring when some header files are too old.
|
49
|
+
|
50
|
+
4.28 (EV only)
|
51
|
+
- linuxaio backend resulted in random memory corruption
|
52
|
+
when loop is forked.
|
53
|
+
- linuxaio backend might have tried to cancel an iocb
|
54
|
+
multiple times (was unable to trigger this).
|
55
|
+
- linuxaio backend now employs a generation counter to
|
56
|
+
avoid handling spurious events from cancelled requests.
|
57
|
+
- io_cancel can return EINTR, deal with it. also, assume
|
58
|
+
io_submit also returns EINTR.
|
59
|
+
- fix some other minor bugs in linuxaio backend.
|
60
|
+
- ev_tstamp type can now be overriden by defining EV_TSTAMP_T.
|
61
|
+
- cleanup: replace expect_true/false and noinline by their
|
62
|
+
libecb counterparts.
|
63
|
+
- move syscall infrastructure from ev_linuxaio.c to ev.c.
|
64
|
+
- prepare io_uring integration.
|
65
|
+
- tweak ev_floor.
|
66
|
+
- epoll, poll, win32 Sleep and other places that use millisecond
|
67
|
+
reslution now all try to round up times.
|
68
|
+
- solaris port backend didn't compile.
|
69
|
+
- abstract time constants into their macros, for more flexibility.
|
70
|
+
|
3
71
|
4.27 Thu Jun 27 22:43:44 CEST 2019
|
4
|
-
- linux aio backend almost
|
72
|
+
- linux aio backend almost completely rewritten to work around its
|
5
73
|
limitations.
|
74
|
+
- linux aio backend now requires linux 4.19+.
|
6
75
|
- epoll backend now mandatory for linux aio backend.
|
7
76
|
- fail assertions more aggressively on invalid fd's detected
|
8
77
|
in the event loop, do not just silently fd_kill in case of
|
@@ -22,7 +91,7 @@ Revision history for libev, a high-performance and full-featured event loop.
|
|
22
91
|
4.25 Fri Dec 21 07:49:20 CET 2018
|
23
92
|
- INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
|
24
93
|
(EV_THROW still provided) and now uses noexcept on C++11 or newer.
|
25
|
-
- move the darwin select workaround
|
94
|
+
- move the darwin select workaround higher in ev.c, as newer versions of
|
26
95
|
darwin managed to break their broken select even more.
|
27
96
|
- ANDROID => __ANDROID__ (reported by enh@google.com).
|
28
97
|
- disable epoll_create1 on android because it has broken header files
|
data/ext/libev/ev.c
CHANGED
@@ -116,7 +116,7 @@
|
|
116
116
|
# undef EV_USE_POLL
|
117
117
|
# define EV_USE_POLL 0
|
118
118
|
# endif
|
119
|
-
|
119
|
+
|
120
120
|
# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
|
121
121
|
# ifndef EV_USE_EPOLL
|
122
122
|
# define EV_USE_EPOLL EV_FEATURE_BACKENDS
|
@@ -125,16 +125,25 @@
|
|
125
125
|
# undef EV_USE_EPOLL
|
126
126
|
# define EV_USE_EPOLL 0
|
127
127
|
# endif
|
128
|
-
|
128
|
+
|
129
129
|
# if HAVE_LINUX_AIO_ABI_H
|
130
130
|
# ifndef EV_USE_LINUXAIO
|
131
|
-
# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS
|
131
|
+
# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
|
132
132
|
# endif
|
133
133
|
# else
|
134
134
|
# undef EV_USE_LINUXAIO
|
135
135
|
# define EV_USE_LINUXAIO 0
|
136
136
|
# endif
|
137
|
-
|
137
|
+
|
138
|
+
# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
|
139
|
+
# ifndef EV_USE_IOURING
|
140
|
+
# define EV_USE_IOURING EV_FEATURE_BACKENDS
|
141
|
+
# endif
|
142
|
+
# else
|
143
|
+
# undef EV_USE_IOURING
|
144
|
+
# define EV_USE_IOURING 0
|
145
|
+
# endif
|
146
|
+
|
138
147
|
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
|
139
148
|
# ifndef EV_USE_KQUEUE
|
140
149
|
# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
|
@@ -143,7 +152,7 @@
|
|
143
152
|
# undef EV_USE_KQUEUE
|
144
153
|
# define EV_USE_KQUEUE 0
|
145
154
|
# endif
|
146
|
-
|
155
|
+
|
147
156
|
# if HAVE_PORT_H && HAVE_PORT_CREATE
|
148
157
|
# ifndef EV_USE_PORT
|
149
158
|
# define EV_USE_PORT EV_FEATURE_BACKENDS
|
@@ -179,7 +188,16 @@
|
|
179
188
|
# undef EV_USE_EVENTFD
|
180
189
|
# define EV_USE_EVENTFD 0
|
181
190
|
# endif
|
182
|
-
|
191
|
+
|
192
|
+
# if HAVE_SYS_TIMERFD_H
|
193
|
+
# ifndef EV_USE_TIMERFD
|
194
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
195
|
+
# endif
|
196
|
+
# else
|
197
|
+
# undef EV_USE_TIMERFD
|
198
|
+
# define EV_USE_TIMERFD 0
|
199
|
+
# endif
|
200
|
+
|
183
201
|
#endif
|
184
202
|
|
185
203
|
/* OS X, in its infinite idiocy, actually HARDCODES
|
@@ -335,6 +353,14 @@
|
|
335
353
|
# define EV_USE_PORT 0
|
336
354
|
#endif
|
337
355
|
|
356
|
+
#ifndef EV_USE_LINUXAIO
|
357
|
+
# define EV_USE_LINUXAIO 0
|
358
|
+
#endif
|
359
|
+
|
360
|
+
#ifndef EV_USE_IOURING
|
361
|
+
# define EV_USE_IOURING 0
|
362
|
+
#endif
|
363
|
+
|
338
364
|
#ifndef EV_USE_INOTIFY
|
339
365
|
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
|
340
366
|
# define EV_USE_INOTIFY EV_FEATURE_OS
|
@@ -367,6 +393,14 @@
|
|
367
393
|
# endif
|
368
394
|
#endif
|
369
395
|
|
396
|
+
#ifndef EV_USE_TIMERFD
|
397
|
+
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
|
398
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
399
|
+
# else
|
400
|
+
# define EV_USE_TIMERFD 0
|
401
|
+
# endif
|
402
|
+
#endif
|
403
|
+
|
370
404
|
#if 0 /* debugging */
|
371
405
|
# define EV_VERIFY 3
|
372
406
|
# define EV_USE_4HEAP 1
|
@@ -409,6 +443,7 @@
|
|
409
443
|
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
|
410
444
|
# undef EV_USE_MONOTONIC
|
411
445
|
# define EV_USE_MONOTONIC 1
|
446
|
+
# define EV_NEED_SYSCALL 1
|
412
447
|
# else
|
413
448
|
# undef EV_USE_CLOCK_SYSCALL
|
414
449
|
# define EV_USE_CLOCK_SYSCALL 0
|
@@ -441,12 +476,29 @@
|
|
441
476
|
|
442
477
|
#if EV_USE_LINUXAIO
|
443
478
|
# include <sys/syscall.h>
|
444
|
-
# if
|
479
|
+
# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
|
480
|
+
# define EV_NEED_SYSCALL 1
|
481
|
+
# else
|
445
482
|
# undef EV_USE_LINUXAIO
|
446
483
|
# define EV_USE_LINUXAIO 0
|
447
484
|
# endif
|
448
485
|
#endif
|
449
486
|
|
487
|
+
#if EV_USE_IOURING
|
488
|
+
# include <sys/syscall.h>
|
489
|
+
# if !SYS_io_uring_setup && __linux && !__alpha
|
490
|
+
# define SYS_io_uring_setup 425
|
491
|
+
# define SYS_io_uring_enter 426
|
492
|
+
# define SYS_io_uring_wregister 427
|
493
|
+
# endif
|
494
|
+
# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
|
495
|
+
# define EV_NEED_SYSCALL 1
|
496
|
+
# else
|
497
|
+
# undef EV_USE_IOURING
|
498
|
+
# define EV_USE_IOURING 0
|
499
|
+
# endif
|
500
|
+
#endif
|
501
|
+
|
450
502
|
#if EV_USE_INOTIFY
|
451
503
|
# include <sys/statfs.h>
|
452
504
|
# include <sys/inotify.h>
|
@@ -458,7 +510,7 @@
|
|
458
510
|
#endif
|
459
511
|
|
460
512
|
#if EV_USE_EVENTFD
|
461
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
513
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
462
514
|
# include <stdint.h>
|
463
515
|
# ifndef EFD_NONBLOCK
|
464
516
|
# define EFD_NONBLOCK O_NONBLOCK
|
@@ -474,7 +526,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
474
526
|
#endif
|
475
527
|
|
476
528
|
#if EV_USE_SIGNALFD
|
477
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
529
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
478
530
|
# include <stdint.h>
|
479
531
|
# ifndef SFD_NONBLOCK
|
480
532
|
# define SFD_NONBLOCK O_NONBLOCK
|
@@ -486,7 +538,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
486
538
|
# define SFD_CLOEXEC 02000000
|
487
539
|
# endif
|
488
540
|
# endif
|
489
|
-
EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
|
541
|
+
EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
|
490
542
|
|
491
543
|
struct signalfd_siginfo
|
492
544
|
{
|
@@ -495,7 +547,17 @@ struct signalfd_siginfo
|
|
495
547
|
};
|
496
548
|
#endif
|
497
549
|
|
498
|
-
|
550
|
+
/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
|
551
|
+
#if EV_USE_TIMERFD
|
552
|
+
# include <sys/timerfd.h>
|
553
|
+
/* timerfd is only used for periodics */
|
554
|
+
# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
|
555
|
+
# undef EV_USE_TIMERFD
|
556
|
+
# define EV_USE_TIMERFD 0
|
557
|
+
# endif
|
558
|
+
#endif
|
559
|
+
|
560
|
+
/*****************************************************************************/
|
499
561
|
|
500
562
|
#if EV_VERIFY >= 3
|
501
563
|
# define EV_FREQUENT_CHECK ev_verify (EV_A)
|
@@ -510,18 +572,34 @@ struct signalfd_siginfo
|
|
510
572
|
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
|
511
573
|
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
|
512
574
|
|
513
|
-
#define MIN_TIMEJUMP
|
514
|
-
#define MAX_BLOCKTIME
|
575
|
+
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
|
576
|
+
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
|
577
|
+
#define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
|
578
|
+
|
579
|
+
/* find a portable timestamp that is "always" in the future but fits into time_t.
|
580
|
+
* this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
|
581
|
+
* and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
|
582
|
+
#define EV_TSTAMP_HUGE \
|
583
|
+
(sizeof (time_t) >= 8 ? 10000000000000. \
|
584
|
+
: 0 < (time_t)4294967295 ? 4294967295. \
|
585
|
+
: 2147483647.) \
|
515
586
|
|
516
|
-
#
|
517
|
-
#define
|
587
|
+
#ifndef EV_TS_CONST
|
588
|
+
# define EV_TS_CONST(nv) nv
|
589
|
+
# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
|
590
|
+
# define EV_TS_FROM_USEC(us) us * 1e-6
|
591
|
+
# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
|
592
|
+
# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
|
593
|
+
# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
|
594
|
+
# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
|
595
|
+
#endif
|
518
596
|
|
519
597
|
/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
|
520
598
|
/* ECB.H BEGIN */
|
521
599
|
/*
|
522
600
|
* libecb - http://software.schmorp.de/pkg/libecb
|
523
601
|
*
|
524
|
-
* Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
|
602
|
+
* Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
|
525
603
|
* Copyright (©) 2011 Emanuele Giaquinta
|
526
604
|
* All rights reserved.
|
527
605
|
*
|
@@ -562,15 +640,23 @@ struct signalfd_siginfo
|
|
562
640
|
#define ECB_H
|
563
641
|
|
564
642
|
/* 16 bits major, 16 bits minor */
|
565
|
-
#define ECB_VERSION
|
643
|
+
#define ECB_VERSION 0x00010008
|
566
644
|
|
567
|
-
#
|
645
|
+
#include <string.h> /* for memcpy */
|
646
|
+
|
647
|
+
#if defined (_WIN32) && !defined (__MINGW32__)
|
568
648
|
typedef signed char int8_t;
|
569
649
|
typedef unsigned char uint8_t;
|
650
|
+
typedef signed char int_fast8_t;
|
651
|
+
typedef unsigned char uint_fast8_t;
|
570
652
|
typedef signed short int16_t;
|
571
653
|
typedef unsigned short uint16_t;
|
654
|
+
typedef signed int int_fast16_t;
|
655
|
+
typedef unsigned int uint_fast16_t;
|
572
656
|
typedef signed int int32_t;
|
573
657
|
typedef unsigned int uint32_t;
|
658
|
+
typedef signed int int_fast32_t;
|
659
|
+
typedef unsigned int uint_fast32_t;
|
574
660
|
#if __GNUC__
|
575
661
|
typedef signed long long int64_t;
|
576
662
|
typedef unsigned long long uint64_t;
|
@@ -578,6 +664,8 @@ struct signalfd_siginfo
|
|
578
664
|
typedef signed __int64 int64_t;
|
579
665
|
typedef unsigned __int64 uint64_t;
|
580
666
|
#endif
|
667
|
+
typedef int64_t int_fast64_t;
|
668
|
+
typedef uint64_t uint_fast64_t;
|
581
669
|
#ifdef _WIN64
|
582
670
|
#define ECB_PTRSIZE 8
|
583
671
|
typedef uint64_t uintptr_t;
|
@@ -599,6 +687,14 @@ struct signalfd_siginfo
|
|
599
687
|
#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
|
600
688
|
#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
|
601
689
|
|
690
|
+
#ifndef ECB_OPTIMIZE_SIZE
|
691
|
+
#if __OPTIMIZE_SIZE__
|
692
|
+
#define ECB_OPTIMIZE_SIZE 1
|
693
|
+
#else
|
694
|
+
#define ECB_OPTIMIZE_SIZE 0
|
695
|
+
#endif
|
696
|
+
#endif
|
697
|
+
|
602
698
|
/* work around x32 idiocy by defining proper macros */
|
603
699
|
#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
|
604
700
|
#if _ILP32
|
@@ -1114,6 +1210,44 @@ ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { retu
|
|
1114
1210
|
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
|
1115
1211
|
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
|
1116
1212
|
|
1213
|
+
#if ECB_CPP
|
1214
|
+
|
1215
|
+
inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
|
1216
|
+
inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
|
1217
|
+
inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
|
1218
|
+
inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
|
1219
|
+
|
1220
|
+
inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
|
1221
|
+
inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
|
1222
|
+
inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
|
1223
|
+
inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
|
1224
|
+
|
1225
|
+
inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
|
1226
|
+
inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
|
1227
|
+
inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
|
1228
|
+
inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
|
1229
|
+
|
1230
|
+
inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
|
1231
|
+
inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
|
1232
|
+
inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
|
1233
|
+
inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
|
1234
|
+
|
1235
|
+
inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
|
1236
|
+
inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
|
1237
|
+
inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
|
1238
|
+
|
1239
|
+
inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
|
1240
|
+
inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
|
1241
|
+
inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
|
1242
|
+
inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
|
1243
|
+
|
1244
|
+
inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
|
1245
|
+
inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
|
1246
|
+
inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
|
1247
|
+
inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
|
1248
|
+
|
1249
|
+
#endif
|
1250
|
+
|
1117
1251
|
#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
|
1118
1252
|
#if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
|
1119
1253
|
#define ecb_bswap16(x) __builtin_bswap16 (x)
|
@@ -1194,6 +1328,78 @@ ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_he
|
|
1194
1328
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void);
|
1195
1329
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
|
1196
1330
|
|
1331
|
+
/*****************************************************************************/
|
1332
|
+
/* unaligned load/store */
|
1333
|
+
|
1334
|
+
ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1335
|
+
ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1336
|
+
ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1337
|
+
|
1338
|
+
ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1339
|
+
ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1340
|
+
ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1341
|
+
|
1342
|
+
ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1343
|
+
ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1344
|
+
ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1345
|
+
|
1346
|
+
ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1347
|
+
ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1348
|
+
ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1349
|
+
|
1350
|
+
ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1351
|
+
ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1352
|
+
ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1353
|
+
|
1354
|
+
ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1355
|
+
ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1356
|
+
ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1357
|
+
|
1358
|
+
ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1359
|
+
ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1360
|
+
ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1361
|
+
|
1362
|
+
ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1363
|
+
ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1364
|
+
ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1365
|
+
|
1366
|
+
ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
|
1367
|
+
ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
|
1368
|
+
ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
|
1369
|
+
|
1370
|
+
ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
|
1371
|
+
ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
|
1372
|
+
ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
|
1373
|
+
|
1374
|
+
#if ECB_CPP
|
1375
|
+
|
1376
|
+
inline uint8_t ecb_bswap (uint8_t v) { return v; }
|
1377
|
+
inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
|
1378
|
+
inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
|
1379
|
+
inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
|
1380
|
+
|
1381
|
+
template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1382
|
+
template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1383
|
+
template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
|
1384
|
+
template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
|
1385
|
+
template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
|
1386
|
+
template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1387
|
+
template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
|
1388
|
+
template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
|
1389
|
+
|
1390
|
+
template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1391
|
+
template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1392
|
+
template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
|
1393
|
+
template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
|
1394
|
+
template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
|
1395
|
+
template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
|
1396
|
+
template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
|
1397
|
+
template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
|
1398
|
+
|
1399
|
+
#endif
|
1400
|
+
|
1401
|
+
/*****************************************************************************/
|
1402
|
+
|
1197
1403
|
#if ECB_GCC_VERSION(3,0) || ECB_C99
|
1198
1404
|
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
|
1199
1405
|
#else
|
@@ -1227,6 +1433,8 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he
|
|
1227
1433
|
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
|
1228
1434
|
#endif
|
1229
1435
|
|
1436
|
+
/*****************************************************************************/
|
1437
|
+
|
1230
1438
|
ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
|
1231
1439
|
ecb_function_ ecb_const uint32_t
|
1232
1440
|
ecb_binary16_to_binary32 (uint32_t x)
|
@@ -1344,7 +1552,6 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1344
1552
|
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
|
1345
1553
|
|| defined __aarch64__
|
1346
1554
|
#define ECB_STDFP 1
|
1347
|
-
#include <string.h> /* for memcpy */
|
1348
1555
|
#else
|
1349
1556
|
#define ECB_STDFP 0
|
1350
1557
|
#endif
|
@@ -1539,7 +1746,7 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1539
1746
|
#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
|
1540
1747
|
/* if your architecture doesn't need memory fences, e.g. because it is
|
1541
1748
|
* single-cpu/core, or if you use libev in a project that doesn't use libev
|
1542
|
-
* from multiple threads, then you can define
|
1749
|
+
* from multiple threads, then you can define ECB_NO_THREADS when compiling
|
1543
1750
|
* libev, in which cases the memory fences become nops.
|
1544
1751
|
* alternatively, you can remove this #error and link against libpthread,
|
1545
1752
|
* which will then provide the memory fences.
|
@@ -1553,18 +1760,80 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1553
1760
|
# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
|
1554
1761
|
#endif
|
1555
1762
|
|
1556
|
-
#define expect_false(cond) ecb_expect_false (cond)
|
1557
|
-
#define expect_true(cond) ecb_expect_true (cond)
|
1558
|
-
#define noinline ecb_noinline
|
1559
|
-
|
1560
1763
|
#define inline_size ecb_inline
|
1561
1764
|
|
1562
1765
|
#if EV_FEATURE_CODE
|
1563
1766
|
# define inline_speed ecb_inline
|
1564
1767
|
#else
|
1565
|
-
# define inline_speed
|
1768
|
+
# define inline_speed ecb_noinline static
|
1769
|
+
#endif
|
1770
|
+
|
1771
|
+
/*****************************************************************************/
|
1772
|
+
/* raw syscall wrappers */
|
1773
|
+
|
1774
|
+
#if EV_NEED_SYSCALL
|
1775
|
+
|
1776
|
+
#include <sys/syscall.h>
|
1777
|
+
|
1778
|
+
/*
|
1779
|
+
* define some syscall wrappers for common architectures
|
1780
|
+
* this is mostly for nice looks during debugging, not performance.
|
1781
|
+
* our syscalls return < 0, not == -1, on error. which is good
|
1782
|
+
* enough for linux aio.
|
1783
|
+
* TODO: arm is also common nowadays, maybe even mips and x86
|
1784
|
+
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
|
1785
|
+
*/
|
1786
|
+
#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
|
1787
|
+
/* the costly errno access probably kills this for size optimisation */
|
1788
|
+
|
1789
|
+
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
|
1790
|
+
({ \
|
1791
|
+
long res; \
|
1792
|
+
register unsigned long r6 __asm__ ("r9" ); \
|
1793
|
+
register unsigned long r5 __asm__ ("r8" ); \
|
1794
|
+
register unsigned long r4 __asm__ ("r10"); \
|
1795
|
+
register unsigned long r3 __asm__ ("rdx"); \
|
1796
|
+
register unsigned long r2 __asm__ ("rsi"); \
|
1797
|
+
register unsigned long r1 __asm__ ("rdi"); \
|
1798
|
+
if (narg >= 6) r6 = (unsigned long)(arg6); \
|
1799
|
+
if (narg >= 5) r5 = (unsigned long)(arg5); \
|
1800
|
+
if (narg >= 4) r4 = (unsigned long)(arg4); \
|
1801
|
+
if (narg >= 3) r3 = (unsigned long)(arg3); \
|
1802
|
+
if (narg >= 2) r2 = (unsigned long)(arg2); \
|
1803
|
+
if (narg >= 1) r1 = (unsigned long)(arg1); \
|
1804
|
+
__asm__ __volatile__ ( \
|
1805
|
+
"syscall\n\t" \
|
1806
|
+
: "=a" (res) \
|
1807
|
+
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
|
1808
|
+
: "cc", "r11", "cx", "memory"); \
|
1809
|
+
errno = -res; \
|
1810
|
+
res; \
|
1811
|
+
})
|
1812
|
+
|
1813
|
+
#endif
|
1814
|
+
|
1815
|
+
#ifdef ev_syscall
|
1816
|
+
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
|
1817
|
+
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
|
1818
|
+
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
|
1819
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
|
1820
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
|
1821
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
|
1822
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
|
1823
|
+
#else
|
1824
|
+
#define ev_syscall0(nr) syscall (nr)
|
1825
|
+
#define ev_syscall1(nr,arg1) syscall (nr, arg1)
|
1826
|
+
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
|
1827
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
|
1828
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
|
1829
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
|
1830
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
|
1566
1831
|
#endif
|
1567
1832
|
|
1833
|
+
#endif
|
1834
|
+
|
1835
|
+
/*****************************************************************************/
|
1836
|
+
|
1568
1837
|
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
|
1569
1838
|
|
1570
1839
|
#if EV_MINPRI == EV_MAXPRI
|
@@ -1622,7 +1891,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
|
|
1622
1891
|
#include <float.h>
|
1623
1892
|
|
1624
1893
|
/* a floor() replacement function, should be independent of ev_tstamp type */
|
1625
|
-
|
1894
|
+
ecb_noinline
|
1626
1895
|
static ev_tstamp
|
1627
1896
|
ev_floor (ev_tstamp v)
|
1628
1897
|
{
|
@@ -1633,26 +1902,26 @@ ev_floor (ev_tstamp v)
|
|
1633
1902
|
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
|
1634
1903
|
#endif
|
1635
1904
|
|
1636
|
-
/*
|
1637
|
-
if (
|
1905
|
+
/* special treatment for negative arguments */
|
1906
|
+
if (ecb_expect_false (v < 0.))
|
1907
|
+
{
|
1908
|
+
ev_tstamp f = -ev_floor (-v);
|
1909
|
+
|
1910
|
+
return f - (f == v ? 0 : 1);
|
1911
|
+
}
|
1912
|
+
|
1913
|
+
/* argument too large for an unsigned long? then reduce it */
|
1914
|
+
if (ecb_expect_false (v >= shift))
|
1638
1915
|
{
|
1639
1916
|
ev_tstamp f;
|
1640
1917
|
|
1641
1918
|
if (v == v - 1.)
|
1642
|
-
return v; /* very large
|
1919
|
+
return v; /* very large numbers are assumed to be integer */
|
1643
1920
|
|
1644
1921
|
f = shift * ev_floor (v * (1. / shift));
|
1645
1922
|
return f + ev_floor (v - f);
|
1646
1923
|
}
|
1647
1924
|
|
1648
|
-
/* special treatment for negative args? */
|
1649
|
-
if (expect_false (v < 0.))
|
1650
|
-
{
|
1651
|
-
ev_tstamp f = -ev_floor (-v);
|
1652
|
-
|
1653
|
-
return f - (f == v ? 0 : 1);
|
1654
|
-
}
|
1655
|
-
|
1656
1925
|
/* fits into an unsigned long */
|
1657
1926
|
return (unsigned long)v;
|
1658
1927
|
}
|
@@ -1665,7 +1934,7 @@ ev_floor (ev_tstamp v)
|
|
1665
1934
|
# include <sys/utsname.h>
|
1666
1935
|
#endif
|
1667
1936
|
|
1668
|
-
|
1937
|
+
ecb_noinline ecb_cold
|
1669
1938
|
static unsigned int
|
1670
1939
|
ev_linux_version (void)
|
1671
1940
|
{
|
@@ -1705,7 +1974,7 @@ ev_linux_version (void)
|
|
1705
1974
|
/*****************************************************************************/
|
1706
1975
|
|
1707
1976
|
#if EV_AVOID_STDIO
|
1708
|
-
|
1977
|
+
ecb_noinline ecb_cold
|
1709
1978
|
static void
|
1710
1979
|
ev_printerr (const char *msg)
|
1711
1980
|
{
|
@@ -1722,7 +1991,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
|
|
1722
1991
|
syserr_cb = cb;
|
1723
1992
|
}
|
1724
1993
|
|
1725
|
-
|
1994
|
+
ecb_noinline ecb_cold
|
1726
1995
|
static void
|
1727
1996
|
ev_syserr (const char *msg)
|
1728
1997
|
{
|
@@ -1804,7 +2073,7 @@ typedef struct
|
|
1804
2073
|
unsigned char events; /* the events watched for */
|
1805
2074
|
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
|
1806
2075
|
unsigned char emask; /* some backends store the actual kernel mask in here */
|
1807
|
-
unsigned char
|
2076
|
+
unsigned char eflags; /* flags field for use by backends */
|
1808
2077
|
#if EV_USE_EPOLL
|
1809
2078
|
unsigned int egen; /* generation counter to counter epoll bugs */
|
1810
2079
|
#endif
|
@@ -1868,7 +2137,7 @@ typedef struct
|
|
1868
2137
|
|
1869
2138
|
#else
|
1870
2139
|
|
1871
|
-
EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
|
2140
|
+
EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
|
1872
2141
|
#define VAR(name,decl) static decl;
|
1873
2142
|
#include "ev_vars.h"
|
1874
2143
|
#undef VAR
|
@@ -1878,8 +2147,8 @@ typedef struct
|
|
1878
2147
|
#endif
|
1879
2148
|
|
1880
2149
|
#if EV_FEATURE_API
|
1881
|
-
# define EV_RELEASE_CB if (
|
1882
|
-
# define EV_ACQUIRE_CB if (
|
2150
|
+
# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
|
2151
|
+
# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
|
1883
2152
|
# define EV_INVOKE_PENDING invoke_cb (EV_A)
|
1884
2153
|
#else
|
1885
2154
|
# define EV_RELEASE_CB (void)0
|
@@ -1896,17 +2165,19 @@ ev_tstamp
|
|
1896
2165
|
ev_time (void) EV_NOEXCEPT
|
1897
2166
|
{
|
1898
2167
|
#if EV_USE_REALTIME
|
1899
|
-
if (
|
2168
|
+
if (ecb_expect_true (have_realtime))
|
1900
2169
|
{
|
1901
2170
|
struct timespec ts;
|
1902
2171
|
clock_gettime (CLOCK_REALTIME, &ts);
|
1903
|
-
return
|
2172
|
+
return EV_TS_GET (ts);
|
1904
2173
|
}
|
1905
2174
|
#endif
|
1906
2175
|
|
1907
|
-
|
1908
|
-
|
1909
|
-
|
2176
|
+
{
|
2177
|
+
struct timeval tv;
|
2178
|
+
gettimeofday (&tv, 0);
|
2179
|
+
return EV_TV_GET (tv);
|
2180
|
+
}
|
1910
2181
|
}
|
1911
2182
|
#endif
|
1912
2183
|
|
@@ -1914,11 +2185,11 @@ inline_size ev_tstamp
|
|
1914
2185
|
get_clock (void)
|
1915
2186
|
{
|
1916
2187
|
#if EV_USE_MONOTONIC
|
1917
|
-
if (
|
2188
|
+
if (ecb_expect_true (have_monotonic))
|
1918
2189
|
{
|
1919
2190
|
struct timespec ts;
|
1920
2191
|
clock_gettime (CLOCK_MONOTONIC, &ts);
|
1921
|
-
return
|
2192
|
+
return EV_TS_GET (ts);
|
1922
2193
|
}
|
1923
2194
|
#endif
|
1924
2195
|
|
@@ -1936,7 +2207,7 @@ ev_now (EV_P) EV_NOEXCEPT
|
|
1936
2207
|
void
|
1937
2208
|
ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
1938
2209
|
{
|
1939
|
-
if (delay > 0.)
|
2210
|
+
if (delay > EV_TS_CONST (0.))
|
1940
2211
|
{
|
1941
2212
|
#if EV_USE_NANOSLEEP
|
1942
2213
|
struct timespec ts;
|
@@ -1946,7 +2217,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
|
1946
2217
|
#elif defined _WIN32
|
1947
2218
|
/* maybe this should round up, as ms is very low resolution */
|
1948
2219
|
/* compared to select (µs) or nanosleep (ns) */
|
1949
|
-
Sleep ((unsigned long)(delay
|
2220
|
+
Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
|
1950
2221
|
#else
|
1951
2222
|
struct timeval tv;
|
1952
2223
|
|
@@ -1986,7 +2257,7 @@ array_nextsize (int elem, int cur, int cnt)
|
|
1986
2257
|
return ncur;
|
1987
2258
|
}
|
1988
2259
|
|
1989
|
-
|
2260
|
+
ecb_noinline ecb_cold
|
1990
2261
|
static void *
|
1991
2262
|
array_realloc (int elem, void *base, int *cur, int cnt)
|
1992
2263
|
{
|
@@ -2000,7 +2271,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
2000
2271
|
memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
|
2001
2272
|
|
2002
2273
|
#define array_needsize(type,base,cur,cnt,init) \
|
2003
|
-
if (
|
2274
|
+
if (ecb_expect_false ((cnt) > (cur))) \
|
2004
2275
|
{ \
|
2005
2276
|
ecb_unused int ocur_ = (cur); \
|
2006
2277
|
(base) = (type *)array_realloc \
|
@@ -2024,20 +2295,20 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
2024
2295
|
/*****************************************************************************/
|
2025
2296
|
|
2026
2297
|
/* dummy callback for pending events */
|
2027
|
-
|
2298
|
+
ecb_noinline
|
2028
2299
|
static void
|
2029
2300
|
pendingcb (EV_P_ ev_prepare *w, int revents)
|
2030
2301
|
{
|
2031
2302
|
}
|
2032
2303
|
|
2033
|
-
|
2304
|
+
ecb_noinline
|
2034
2305
|
void
|
2035
2306
|
ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
|
2036
2307
|
{
|
2037
2308
|
W w_ = (W)w;
|
2038
2309
|
int pri = ABSPRI (w_);
|
2039
2310
|
|
2040
|
-
if (
|
2311
|
+
if (ecb_expect_false (w_->pending))
|
2041
2312
|
pendings [pri][w_->pending - 1].events |= revents;
|
2042
2313
|
else
|
2043
2314
|
{
|
@@ -2098,7 +2369,7 @@ fd_event (EV_P_ int fd, int revents)
|
|
2098
2369
|
{
|
2099
2370
|
ANFD *anfd = anfds + fd;
|
2100
2371
|
|
2101
|
-
if (
|
2372
|
+
if (ecb_expect_true (!anfd->reify))
|
2102
2373
|
fd_event_nocheck (EV_A_ fd, revents);
|
2103
2374
|
}
|
2104
2375
|
|
@@ -2116,8 +2387,20 @@ fd_reify (EV_P)
|
|
2116
2387
|
{
|
2117
2388
|
int i;
|
2118
2389
|
|
2390
|
+
/* most backends do not modify the fdchanges list in backend_modfiy.
|
2391
|
+
* except io_uring, which has fixed-size buffers which might force us
|
2392
|
+
* to handle events in backend_modify, causing fdchanges to be amended,
|
2393
|
+
* which could result in an endless loop.
|
2394
|
+
* to avoid this, we do not dynamically handle fds that were added
|
2395
|
+
* during fd_reify. that means that for those backends, fdchangecnt
|
2396
|
+
* might be non-zero during poll, which must cause them to not block.
|
2397
|
+
* to not put too much of a burden on other backends, this detail
|
2398
|
+
* needs to be handled in the backend.
|
2399
|
+
*/
|
2400
|
+
int changecnt = fdchangecnt;
|
2401
|
+
|
2119
2402
|
#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
|
2120
|
-
for (i = 0; i <
|
2403
|
+
for (i = 0; i < changecnt; ++i)
|
2121
2404
|
{
|
2122
2405
|
int fd = fdchanges [i];
|
2123
2406
|
ANFD *anfd = anfds + fd;
|
@@ -2141,7 +2424,7 @@ fd_reify (EV_P)
|
|
2141
2424
|
}
|
2142
2425
|
#endif
|
2143
2426
|
|
2144
|
-
for (i = 0; i <
|
2427
|
+
for (i = 0; i < changecnt; ++i)
|
2145
2428
|
{
|
2146
2429
|
int fd = fdchanges [i];
|
2147
2430
|
ANFD *anfd = anfds + fd;
|
@@ -2152,7 +2435,7 @@ fd_reify (EV_P)
|
|
2152
2435
|
|
2153
2436
|
anfd->reify = 0;
|
2154
2437
|
|
2155
|
-
/*if (
|
2438
|
+
/*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
|
2156
2439
|
{
|
2157
2440
|
anfd->events = 0;
|
2158
2441
|
|
@@ -2167,7 +2450,14 @@ fd_reify (EV_P)
|
|
2167
2450
|
backend_modify (EV_A_ fd, o_events, anfd->events);
|
2168
2451
|
}
|
2169
2452
|
|
2170
|
-
fdchangecnt
|
2453
|
+
/* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
|
2454
|
+
* this is a rare case (see beginning comment in this function), so we copy them to the
|
2455
|
+
* front and hope the backend handles this case.
|
2456
|
+
*/
|
2457
|
+
if (ecb_expect_false (fdchangecnt != changecnt))
|
2458
|
+
memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
|
2459
|
+
|
2460
|
+
fdchangecnt -= changecnt;
|
2171
2461
|
}
|
2172
2462
|
|
2173
2463
|
/* something about the given fd changed */
|
@@ -2176,9 +2466,9 @@ void
|
|
2176
2466
|
fd_change (EV_P_ int fd, int flags)
|
2177
2467
|
{
|
2178
2468
|
unsigned char reify = anfds [fd].reify;
|
2179
|
-
anfds [fd].reify
|
2469
|
+
anfds [fd].reify = reify | flags;
|
2180
2470
|
|
2181
|
-
if (
|
2471
|
+
if (ecb_expect_true (!reify))
|
2182
2472
|
{
|
2183
2473
|
++fdchangecnt;
|
2184
2474
|
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
|
@@ -2211,7 +2501,7 @@ fd_valid (int fd)
|
|
2211
2501
|
}
|
2212
2502
|
|
2213
2503
|
/* called on EBADF to verify fds */
|
2214
|
-
|
2504
|
+
ecb_noinline ecb_cold
|
2215
2505
|
static void
|
2216
2506
|
fd_ebadf (EV_P)
|
2217
2507
|
{
|
@@ -2224,7 +2514,7 @@ fd_ebadf (EV_P)
|
|
2224
2514
|
}
|
2225
2515
|
|
2226
2516
|
/* called on ENOMEM in select/poll to kill some fds and retry */
|
2227
|
-
|
2517
|
+
ecb_noinline ecb_cold
|
2228
2518
|
static void
|
2229
2519
|
fd_enomem (EV_P)
|
2230
2520
|
{
|
@@ -2239,7 +2529,7 @@ fd_enomem (EV_P)
|
|
2239
2529
|
}
|
2240
2530
|
|
2241
2531
|
/* usually called after fork if backend needs to re-arm all fds from scratch */
|
2242
|
-
|
2532
|
+
ecb_noinline
|
2243
2533
|
static void
|
2244
2534
|
fd_rearm_all (EV_P)
|
2245
2535
|
{
|
@@ -2303,19 +2593,19 @@ downheap (ANHE *heap, int N, int k)
|
|
2303
2593
|
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
|
2304
2594
|
|
2305
2595
|
/* find minimum child */
|
2306
|
-
if (
|
2596
|
+
if (ecb_expect_true (pos + DHEAP - 1 < E))
|
2307
2597
|
{
|
2308
2598
|
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2309
|
-
if ( ANHE_at (pos [1])
|
2310
|
-
if ( ANHE_at (pos [2])
|
2311
|
-
if ( ANHE_at (pos [3])
|
2599
|
+
if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2600
|
+
if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2601
|
+
if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2312
2602
|
}
|
2313
2603
|
else if (pos < E)
|
2314
2604
|
{
|
2315
2605
|
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2316
|
-
if (pos + 1 < E && ANHE_at (pos [1])
|
2317
|
-
if (pos + 2 < E && ANHE_at (pos [2])
|
2318
|
-
if (pos + 3 < E && ANHE_at (pos [3])
|
2606
|
+
if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2607
|
+
if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2608
|
+
if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2319
2609
|
}
|
2320
2610
|
else
|
2321
2611
|
break;
|
@@ -2333,7 +2623,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2333
2623
|
ev_active (ANHE_w (he)) = k;
|
2334
2624
|
}
|
2335
2625
|
|
2336
|
-
#else /* 4HEAP */
|
2626
|
+
#else /* not 4HEAP */
|
2337
2627
|
|
2338
2628
|
#define HEAP0 1
|
2339
2629
|
#define HPARENT(k) ((k) >> 1)
|
@@ -2360,7 +2650,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2360
2650
|
|
2361
2651
|
heap [k] = heap [c];
|
2362
2652
|
ev_active (ANHE_w (heap [k])) = k;
|
2363
|
-
|
2653
|
+
|
2364
2654
|
k = c;
|
2365
2655
|
}
|
2366
2656
|
|
@@ -2415,7 +2705,7 @@ reheap (ANHE *heap, int N)
|
|
2415
2705
|
|
2416
2706
|
/*****************************************************************************/
|
2417
2707
|
|
2418
|
-
/* associate signal watchers to a signal
|
2708
|
+
/* associate signal watchers to a signal */
|
2419
2709
|
typedef struct
|
2420
2710
|
{
|
2421
2711
|
EV_ATOMIC_T pending;
|
@@ -2431,7 +2721,7 @@ static ANSIG signals [EV_NSIG - 1];
|
|
2431
2721
|
|
2432
2722
|
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
2433
2723
|
|
2434
|
-
|
2724
|
+
ecb_noinline ecb_cold
|
2435
2725
|
static void
|
2436
2726
|
evpipe_init (EV_P)
|
2437
2727
|
{
|
@@ -2482,7 +2772,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
|
|
2482
2772
|
{
|
2483
2773
|
ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
|
2484
2774
|
|
2485
|
-
if (
|
2775
|
+
if (ecb_expect_true (*flag))
|
2486
2776
|
return;
|
2487
2777
|
|
2488
2778
|
*flag = 1;
|
@@ -2569,7 +2859,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
|
|
2569
2859
|
ECB_MEMORY_FENCE;
|
2570
2860
|
|
2571
2861
|
for (i = EV_NSIG - 1; i--; )
|
2572
|
-
if (
|
2862
|
+
if (ecb_expect_false (signals [i].pending))
|
2573
2863
|
ev_feed_signal_event (EV_A_ i + 1);
|
2574
2864
|
}
|
2575
2865
|
#endif
|
@@ -2620,13 +2910,13 @@ ev_sighandler (int signum)
|
|
2620
2910
|
ev_feed_signal (signum);
|
2621
2911
|
}
|
2622
2912
|
|
2623
|
-
|
2913
|
+
ecb_noinline
|
2624
2914
|
void
|
2625
2915
|
ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
2626
2916
|
{
|
2627
2917
|
WL w;
|
2628
2918
|
|
2629
|
-
if (
|
2919
|
+
if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
|
2630
2920
|
return;
|
2631
2921
|
|
2632
2922
|
--signum;
|
@@ -2635,7 +2925,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
|
2635
2925
|
/* it is permissible to try to feed a signal to the wrong loop */
|
2636
2926
|
/* or, likely more useful, feeding a signal nobody is waiting for */
|
2637
2927
|
|
2638
|
-
if (
|
2928
|
+
if (ecb_expect_false (signals [signum].loop != EV_A))
|
2639
2929
|
return;
|
2640
2930
|
#endif
|
2641
2931
|
|
@@ -2729,6 +3019,57 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2729
3019
|
|
2730
3020
|
/*****************************************************************************/
|
2731
3021
|
|
3022
|
+
#if EV_USE_TIMERFD
|
3023
|
+
|
3024
|
+
static void periodics_reschedule (EV_P);
|
3025
|
+
|
3026
|
+
static void
|
3027
|
+
timerfdcb (EV_P_ ev_io *iow, int revents)
|
3028
|
+
{
|
3029
|
+
struct itimerspec its = { 0 };
|
3030
|
+
|
3031
|
+
its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
|
3032
|
+
timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
|
3033
|
+
|
3034
|
+
ev_rt_now = ev_time ();
|
3035
|
+
/* periodics_reschedule only needs ev_rt_now */
|
3036
|
+
/* but maybe in the future we want the full treatment. */
|
3037
|
+
/*
|
3038
|
+
now_floor = EV_TS_CONST (0.);
|
3039
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3040
|
+
*/
|
3041
|
+
#if EV_PERIODIC_ENABLE
|
3042
|
+
periodics_reschedule (EV_A);
|
3043
|
+
#endif
|
3044
|
+
}
|
3045
|
+
|
3046
|
+
ecb_noinline ecb_cold
|
3047
|
+
static void
|
3048
|
+
evtimerfd_init (EV_P)
|
3049
|
+
{
|
3050
|
+
if (!ev_is_active (&timerfd_w))
|
3051
|
+
{
|
3052
|
+
timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
|
3053
|
+
|
3054
|
+
if (timerfd >= 0)
|
3055
|
+
{
|
3056
|
+
fd_intern (timerfd); /* just to be sure */
|
3057
|
+
|
3058
|
+
ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
|
3059
|
+
ev_set_priority (&timerfd_w, EV_MINPRI);
|
3060
|
+
ev_io_start (EV_A_ &timerfd_w);
|
3061
|
+
ev_unref (EV_A); /* watcher should not keep loop alive */
|
3062
|
+
|
3063
|
+
/* (re-) arm timer */
|
3064
|
+
timerfdcb (EV_A_ 0, 0);
|
3065
|
+
}
|
3066
|
+
}
|
3067
|
+
}
|
3068
|
+
|
3069
|
+
#endif
|
3070
|
+
|
3071
|
+
/*****************************************************************************/
|
3072
|
+
|
2732
3073
|
#if EV_USE_IOCP
|
2733
3074
|
# include "ev_iocp.c"
|
2734
3075
|
#endif
|
@@ -2744,6 +3085,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2744
3085
|
#if EV_USE_LINUXAIO
|
2745
3086
|
# include "ev_linuxaio.c"
|
2746
3087
|
#endif
|
3088
|
+
#if EV_USE_IOURING
|
3089
|
+
# include "ev_iouring.c"
|
3090
|
+
#endif
|
2747
3091
|
#if EV_USE_POLL
|
2748
3092
|
# include "ev_poll.c"
|
2749
3093
|
#endif
|
@@ -2781,17 +3125,14 @@ ev_supported_backends (void) EV_NOEXCEPT
|
|
2781
3125
|
{
|
2782
3126
|
unsigned int flags = 0;
|
2783
3127
|
|
2784
|
-
if (EV_USE_PORT
|
2785
|
-
if (EV_USE_KQUEUE
|
2786
|
-
if (EV_USE_EPOLL
|
2787
|
-
|
2788
|
-
|
2789
|
-
if (
|
2790
|
-
|
3128
|
+
if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
|
3129
|
+
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
|
3130
|
+
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
|
3131
|
+
if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */
|
3132
|
+
if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
|
3133
|
+
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
|
3134
|
+
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
|
2791
3135
|
|
2792
|
-
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
|
2793
|
-
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
|
2794
|
-
|
2795
3136
|
return flags;
|
2796
3137
|
}
|
2797
3138
|
|
@@ -2801,24 +3142,29 @@ ev_recommended_backends (void) EV_NOEXCEPT
|
|
2801
3142
|
{
|
2802
3143
|
unsigned int flags = ev_supported_backends ();
|
2803
3144
|
|
2804
|
-
|
2805
|
-
|
2806
|
-
#elif defined(__NetBSD__)
|
2807
|
-
/* kqueue is borked on everything but netbsd apparently */
|
2808
|
-
/* it usually doesn't work correctly on anything but sockets and pipes */
|
2809
|
-
#else
|
3145
|
+
/* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
|
3146
|
+
#if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14)
|
2810
3147
|
/* only select works correctly on that "unix-certified" platform */
|
2811
3148
|
flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
|
2812
3149
|
flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
|
2813
3150
|
#endif
|
2814
3151
|
|
3152
|
+
#if !defined(__NetBSD__) && !defined(__APPLE__)
|
3153
|
+
/* kqueue is borked on everything but netbsd and osx >= 10.12.2 apparently */
|
3154
|
+
/* it usually doesn't work correctly on anything but sockets and pipes */
|
3155
|
+
flags &= ~EVBACKEND_KQUEUE;
|
3156
|
+
#endif
|
3157
|
+
|
2815
3158
|
#ifdef __FreeBSD__
|
2816
3159
|
flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
|
2817
3160
|
#endif
|
2818
3161
|
|
2819
|
-
|
2820
|
-
|
3162
|
+
#ifdef __linux__
|
3163
|
+
/* NOTE: linuxaio is very experimental, never recommend */
|
2821
3164
|
flags &= ~EVBACKEND_LINUXAIO;
|
3165
|
+
|
3166
|
+
/* NOTE: io_uring is super experimental, never recommend */
|
3167
|
+
flags &= ~EVBACKEND_IOURING;
|
2822
3168
|
#endif
|
2823
3169
|
|
2824
3170
|
return flags;
|
@@ -2828,12 +3174,14 @@ ecb_cold
|
|
2828
3174
|
unsigned int
|
2829
3175
|
ev_embeddable_backends (void) EV_NOEXCEPT
|
2830
3176
|
{
|
2831
|
-
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
|
3177
|
+
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
|
2832
3178
|
|
2833
3179
|
/* epoll embeddability broken on all linux versions up to at least 2.6.23 */
|
2834
3180
|
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
|
2835
3181
|
flags &= ~EVBACKEND_EPOLL;
|
2836
3182
|
|
3183
|
+
/* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
|
3184
|
+
|
2837
3185
|
return flags;
|
2838
3186
|
}
|
2839
3187
|
|
@@ -2895,7 +3243,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
|
|
2895
3243
|
#endif
|
2896
3244
|
|
2897
3245
|
/* initialise a loop structure, must be zero-initialised */
|
2898
|
-
|
3246
|
+
ecb_noinline ecb_cold
|
2899
3247
|
static void
|
2900
3248
|
loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
2901
3249
|
{
|
@@ -2960,6 +3308,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
|
2960
3308
|
#if EV_USE_SIGNALFD
|
2961
3309
|
sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
|
2962
3310
|
#endif
|
3311
|
+
#if EV_USE_TIMERFD
|
3312
|
+
timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
|
3313
|
+
#endif
|
2963
3314
|
|
2964
3315
|
if (!(flags & EVBACKEND_MASK))
|
2965
3316
|
flags |= ev_recommended_backends ();
|
@@ -2973,6 +3324,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
|
2973
3324
|
#if EV_USE_KQUEUE
|
2974
3325
|
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
|
2975
3326
|
#endif
|
3327
|
+
#if EV_USE_IOURING
|
3328
|
+
if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
|
3329
|
+
#endif
|
2976
3330
|
#if EV_USE_LINUXAIO
|
2977
3331
|
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
|
2978
3332
|
#endif
|
@@ -3010,7 +3364,7 @@ ev_loop_destroy (EV_P)
|
|
3010
3364
|
|
3011
3365
|
#if EV_CLEANUP_ENABLE
|
3012
3366
|
/* queue cleanup watchers (and execute them) */
|
3013
|
-
if (
|
3367
|
+
if (ecb_expect_false (cleanupcnt))
|
3014
3368
|
{
|
3015
3369
|
queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
|
3016
3370
|
EV_INVOKE_PENDING;
|
@@ -3039,6 +3393,11 @@ ev_loop_destroy (EV_P)
|
|
3039
3393
|
close (sigfd);
|
3040
3394
|
#endif
|
3041
3395
|
|
3396
|
+
#if EV_USE_TIMERFD
|
3397
|
+
if (ev_is_active (&timerfd_w))
|
3398
|
+
close (timerfd);
|
3399
|
+
#endif
|
3400
|
+
|
3042
3401
|
#if EV_USE_INOTIFY
|
3043
3402
|
if (fs_fd >= 0)
|
3044
3403
|
close (fs_fd);
|
@@ -3056,6 +3415,9 @@ ev_loop_destroy (EV_P)
|
|
3056
3415
|
#if EV_USE_KQUEUE
|
3057
3416
|
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
|
3058
3417
|
#endif
|
3418
|
+
#if EV_USE_IOURING
|
3419
|
+
if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
|
3420
|
+
#endif
|
3059
3421
|
#if EV_USE_LINUXAIO
|
3060
3422
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
|
3061
3423
|
#endif
|
@@ -3123,6 +3485,9 @@ loop_fork (EV_P)
|
|
3123
3485
|
#if EV_USE_KQUEUE
|
3124
3486
|
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
|
3125
3487
|
#endif
|
3488
|
+
#if EV_USE_IOURING
|
3489
|
+
if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
|
3490
|
+
#endif
|
3126
3491
|
#if EV_USE_LINUXAIO
|
3127
3492
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
|
3128
3493
|
#endif
|
@@ -3133,22 +3498,44 @@ loop_fork (EV_P)
|
|
3133
3498
|
infy_fork (EV_A);
|
3134
3499
|
#endif
|
3135
3500
|
|
3136
|
-
|
3137
|
-
if (ev_is_active (&pipe_w) && postfork != 2)
|
3501
|
+
if (postfork != 2)
|
3138
3502
|
{
|
3139
|
-
|
3503
|
+
#if EV_USE_SIGNALFD
|
3504
|
+
/* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
|
3505
|
+
#endif
|
3140
3506
|
|
3141
|
-
|
3142
|
-
|
3507
|
+
#if EV_USE_TIMERFD
|
3508
|
+
if (ev_is_active (&timerfd_w))
|
3509
|
+
{
|
3510
|
+
ev_ref (EV_A);
|
3511
|
+
ev_io_stop (EV_A_ &timerfd_w);
|
3143
3512
|
|
3144
|
-
|
3145
|
-
|
3513
|
+
close (timerfd);
|
3514
|
+
timerfd = -2;
|
3146
3515
|
|
3147
|
-
|
3148
|
-
|
3149
|
-
|
3516
|
+
evtimerfd_init (EV_A);
|
3517
|
+
/* reschedule periodics, in case we missed something */
|
3518
|
+
ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
|
3519
|
+
}
|
3520
|
+
#endif
|
3521
|
+
|
3522
|
+
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
3523
|
+
if (ev_is_active (&pipe_w))
|
3524
|
+
{
|
3525
|
+
/* pipe_write_wanted must be false now, so modifying fd vars should be safe */
|
3526
|
+
|
3527
|
+
ev_ref (EV_A);
|
3528
|
+
ev_io_stop (EV_A_ &pipe_w);
|
3529
|
+
|
3530
|
+
if (evpipe [0] >= 0)
|
3531
|
+
EV_WIN32_CLOSE_FD (evpipe [0]);
|
3532
|
+
|
3533
|
+
evpipe_init (EV_A);
|
3534
|
+
/* iterate over everything, in case we missed something before */
|
3535
|
+
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3536
|
+
}
|
3537
|
+
#endif
|
3150
3538
|
}
|
3151
|
-
#endif
|
3152
3539
|
|
3153
3540
|
postfork = 0;
|
3154
3541
|
}
|
@@ -3174,7 +3561,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
|
|
3174
3561
|
#endif /* multiplicity */
|
3175
3562
|
|
3176
3563
|
#if EV_VERIFY
|
3177
|
-
|
3564
|
+
ecb_noinline ecb_cold
|
3178
3565
|
static void
|
3179
3566
|
verify_watcher (EV_P_ W w)
|
3180
3567
|
{
|
@@ -3184,7 +3571,7 @@ verify_watcher (EV_P_ W w)
|
|
3184
3571
|
assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
|
3185
3572
|
}
|
3186
3573
|
|
3187
|
-
|
3574
|
+
ecb_noinline ecb_cold
|
3188
3575
|
static void
|
3189
3576
|
verify_heap (EV_P_ ANHE *heap, int N)
|
3190
3577
|
{
|
@@ -3200,7 +3587,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
|
|
3200
3587
|
}
|
3201
3588
|
}
|
3202
3589
|
|
3203
|
-
|
3590
|
+
ecb_noinline ecb_cold
|
3204
3591
|
static void
|
3205
3592
|
array_verify (EV_P_ W *ws, int cnt)
|
3206
3593
|
{
|
@@ -3359,7 +3746,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
|
|
3359
3746
|
return count;
|
3360
3747
|
}
|
3361
3748
|
|
3362
|
-
|
3749
|
+
ecb_noinline
|
3363
3750
|
void
|
3364
3751
|
ev_invoke_pending (EV_P)
|
3365
3752
|
{
|
@@ -3388,7 +3775,7 @@ ev_invoke_pending (EV_P)
|
|
3388
3775
|
inline_size void
|
3389
3776
|
idle_reify (EV_P)
|
3390
3777
|
{
|
3391
|
-
if (
|
3778
|
+
if (ecb_expect_false (idleall))
|
3392
3779
|
{
|
3393
3780
|
int pri;
|
3394
3781
|
|
@@ -3428,7 +3815,7 @@ timers_reify (EV_P)
|
|
3428
3815
|
if (ev_at (w) < mn_now)
|
3429
3816
|
ev_at (w) = mn_now;
|
3430
3817
|
|
3431
|
-
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
|
3818
|
+
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
|
3432
3819
|
|
3433
3820
|
ANHE_at_cache (timers [HEAP0]);
|
3434
3821
|
downheap (timers, timercnt, HEAP0);
|
@@ -3447,7 +3834,7 @@ timers_reify (EV_P)
|
|
3447
3834
|
|
3448
3835
|
#if EV_PERIODIC_ENABLE
|
3449
3836
|
|
3450
|
-
|
3837
|
+
ecb_noinline
|
3451
3838
|
static void
|
3452
3839
|
periodic_recalc (EV_P_ ev_periodic *w)
|
3453
3840
|
{
|
@@ -3460,7 +3847,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
|
|
3460
3847
|
ev_tstamp nat = at + w->interval;
|
3461
3848
|
|
3462
3849
|
/* when resolution fails us, we use ev_rt_now */
|
3463
|
-
if (
|
3850
|
+
if (ecb_expect_false (nat == at))
|
3464
3851
|
{
|
3465
3852
|
at = ev_rt_now;
|
3466
3853
|
break;
|
@@ -3516,7 +3903,7 @@ periodics_reify (EV_P)
|
|
3516
3903
|
|
3517
3904
|
/* simply recalculate all periodics */
|
3518
3905
|
/* TODO: maybe ensure that at least one event happens when jumping forward? */
|
3519
|
-
|
3906
|
+
ecb_noinline ecb_cold
|
3520
3907
|
static void
|
3521
3908
|
periodics_reschedule (EV_P)
|
3522
3909
|
{
|
@@ -3540,7 +3927,7 @@ periodics_reschedule (EV_P)
|
|
3540
3927
|
#endif
|
3541
3928
|
|
3542
3929
|
/* adjust all timers by a given offset */
|
3543
|
-
|
3930
|
+
ecb_noinline ecb_cold
|
3544
3931
|
static void
|
3545
3932
|
timers_reschedule (EV_P_ ev_tstamp adjust)
|
3546
3933
|
{
|
@@ -3560,7 +3947,7 @@ inline_speed void
|
|
3560
3947
|
time_update (EV_P_ ev_tstamp max_block)
|
3561
3948
|
{
|
3562
3949
|
#if EV_USE_MONOTONIC
|
3563
|
-
if (
|
3950
|
+
if (ecb_expect_true (have_monotonic))
|
3564
3951
|
{
|
3565
3952
|
int i;
|
3566
3953
|
ev_tstamp odiff = rtmn_diff;
|
@@ -3569,7 +3956,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3569
3956
|
|
3570
3957
|
/* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
|
3571
3958
|
/* interpolate in the meantime */
|
3572
|
-
if (
|
3959
|
+
if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
|
3573
3960
|
{
|
3574
3961
|
ev_rt_now = rtmn_diff + mn_now;
|
3575
3962
|
return;
|
@@ -3593,7 +3980,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3593
3980
|
|
3594
3981
|
diff = odiff - rtmn_diff;
|
3595
3982
|
|
3596
|
-
if (
|
3983
|
+
if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
|
3597
3984
|
return; /* all is well */
|
3598
3985
|
|
3599
3986
|
ev_rt_now = ev_time ();
|
@@ -3612,7 +3999,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3612
3999
|
{
|
3613
4000
|
ev_rt_now = ev_time ();
|
3614
4001
|
|
3615
|
-
if (
|
4002
|
+
if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
|
3616
4003
|
{
|
3617
4004
|
/* adjust timers. this is easy, as the offset is the same for all of them */
|
3618
4005
|
timers_reschedule (EV_A_ ev_rt_now - mn_now);
|
@@ -3666,8 +4053,8 @@ ev_run (EV_P_ int flags)
|
|
3666
4053
|
#endif
|
3667
4054
|
|
3668
4055
|
#ifndef _WIN32
|
3669
|
-
if (
|
3670
|
-
if (
|
4056
|
+
if (ecb_expect_false (curpid)) /* penalise the forking check even more */
|
4057
|
+
if (ecb_expect_false (getpid () != curpid))
|
3671
4058
|
{
|
3672
4059
|
curpid = getpid ();
|
3673
4060
|
postfork = 1;
|
@@ -3676,7 +4063,7 @@ ev_run (EV_P_ int flags)
|
|
3676
4063
|
|
3677
4064
|
#if EV_FORK_ENABLE
|
3678
4065
|
/* we might have forked, so queue fork handlers */
|
3679
|
-
if (
|
4066
|
+
if (ecb_expect_false (postfork))
|
3680
4067
|
if (forkcnt)
|
3681
4068
|
{
|
3682
4069
|
queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
|
@@ -3686,18 +4073,18 @@ ev_run (EV_P_ int flags)
|
|
3686
4073
|
|
3687
4074
|
#if EV_PREPARE_ENABLE
|
3688
4075
|
/* queue prepare watchers (and execute them) */
|
3689
|
-
if (
|
4076
|
+
if (ecb_expect_false (preparecnt))
|
3690
4077
|
{
|
3691
4078
|
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
|
3692
4079
|
EV_INVOKE_PENDING;
|
3693
4080
|
}
|
3694
4081
|
#endif
|
3695
4082
|
|
3696
|
-
if (
|
4083
|
+
if (ecb_expect_false (loop_done))
|
3697
4084
|
break;
|
3698
4085
|
|
3699
4086
|
/* we might have forked, so reify kernel state if necessary */
|
3700
|
-
if (
|
4087
|
+
if (ecb_expect_false (postfork))
|
3701
4088
|
loop_fork (EV_A);
|
3702
4089
|
|
3703
4090
|
/* update fd-related kernel structures */
|
@@ -3712,16 +4099,28 @@ ev_run (EV_P_ int flags)
|
|
3712
4099
|
ev_tstamp prev_mn_now = mn_now;
|
3713
4100
|
|
3714
4101
|
/* update time to cancel out callback processing overhead */
|
3715
|
-
time_update (EV_A_
|
4102
|
+
time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
|
3716
4103
|
|
3717
4104
|
/* from now on, we want a pipe-wake-up */
|
3718
4105
|
pipe_write_wanted = 1;
|
3719
4106
|
|
3720
4107
|
ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
|
3721
4108
|
|
3722
|
-
if (
|
4109
|
+
if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
|
3723
4110
|
{
|
3724
|
-
waittime = MAX_BLOCKTIME;
|
4111
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME);
|
4112
|
+
|
4113
|
+
#if EV_USE_TIMERFD
|
4114
|
+
/* sleep a lot longer when we can reliably detect timejumps */
|
4115
|
+
if (ecb_expect_true (timerfd >= 0))
|
4116
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4117
|
+
#endif
|
4118
|
+
#if !EV_PERIODIC_ENABLE
|
4119
|
+
/* without periodics but with monotonic clock there is no need */
|
4120
|
+
/* for any time jump detection, so sleep longer */
|
4121
|
+
if (ecb_expect_true (have_monotonic))
|
4122
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4123
|
+
#endif
|
3725
4124
|
|
3726
4125
|
if (timercnt)
|
3727
4126
|
{
|
@@ -3738,23 +4137,28 @@ ev_run (EV_P_ int flags)
|
|
3738
4137
|
#endif
|
3739
4138
|
|
3740
4139
|
/* don't let timeouts decrease the waittime below timeout_blocktime */
|
3741
|
-
if (
|
4140
|
+
if (ecb_expect_false (waittime < timeout_blocktime))
|
3742
4141
|
waittime = timeout_blocktime;
|
3743
4142
|
|
3744
|
-
/*
|
3745
|
-
|
3746
|
-
|
3747
|
-
|
4143
|
+
/* now there are two more special cases left, either we have
|
4144
|
+
* already-expired timers, so we should not sleep, or we have timers
|
4145
|
+
* that expire very soon, in which case we need to wait for a minimum
|
4146
|
+
* amount of time for some event loop backends.
|
4147
|
+
*/
|
4148
|
+
if (ecb_expect_false (waittime < backend_mintime))
|
4149
|
+
waittime = waittime <= EV_TS_CONST (0.)
|
4150
|
+
? EV_TS_CONST (0.)
|
4151
|
+
: backend_mintime;
|
3748
4152
|
|
3749
4153
|
/* extra check because io_blocktime is commonly 0 */
|
3750
|
-
if (
|
4154
|
+
if (ecb_expect_false (io_blocktime))
|
3751
4155
|
{
|
3752
4156
|
sleeptime = io_blocktime - (mn_now - prev_mn_now);
|
3753
4157
|
|
3754
4158
|
if (sleeptime > waittime - backend_mintime)
|
3755
4159
|
sleeptime = waittime - backend_mintime;
|
3756
4160
|
|
3757
|
-
if (
|
4161
|
+
if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
|
3758
4162
|
{
|
3759
4163
|
ev_sleep (sleeptime);
|
3760
4164
|
waittime -= sleeptime;
|
@@ -3825,7 +4229,6 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3825
4229
|
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3826
4230
|
}
|
3827
4231
|
|
3828
|
-
|
3829
4232
|
/* update ev_rt_now, do magic */
|
3830
4233
|
time_update (EV_A_ waittime + sleeptime);
|
3831
4234
|
}
|
@@ -3843,13 +4246,13 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3843
4246
|
|
3844
4247
|
#if EV_CHECK_ENABLE
|
3845
4248
|
/* queue check watchers, to be executed first */
|
3846
|
-
if (
|
4249
|
+
if (ecb_expect_false (checkcnt))
|
3847
4250
|
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
|
3848
4251
|
#endif
|
3849
4252
|
|
3850
4253
|
EV_INVOKE_PENDING;
|
3851
4254
|
}
|
3852
|
-
while (
|
4255
|
+
while (ecb_expect_true (
|
3853
4256
|
activecnt
|
3854
4257
|
&& !loop_done
|
3855
4258
|
&& !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
|
@@ -3886,7 +4289,7 @@ ev_unref (EV_P) EV_NOEXCEPT
|
|
3886
4289
|
void
|
3887
4290
|
ev_now_update (EV_P) EV_NOEXCEPT
|
3888
4291
|
{
|
3889
|
-
time_update (EV_A_
|
4292
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3890
4293
|
}
|
3891
4294
|
|
3892
4295
|
void
|
@@ -3923,7 +4326,7 @@ wlist_del (WL *head, WL elem)
|
|
3923
4326
|
{
|
3924
4327
|
while (*head)
|
3925
4328
|
{
|
3926
|
-
if (
|
4329
|
+
if (ecb_expect_true (*head == elem))
|
3927
4330
|
{
|
3928
4331
|
*head = elem->next;
|
3929
4332
|
break;
|
@@ -3950,7 +4353,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
|
|
3950
4353
|
W w_ = (W)w;
|
3951
4354
|
int pending = w_->pending;
|
3952
4355
|
|
3953
|
-
if (
|
4356
|
+
if (ecb_expect_true (pending))
|
3954
4357
|
{
|
3955
4358
|
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
|
3956
4359
|
p->w = (W)&pending_w;
|
@@ -3987,13 +4390,13 @@ ev_stop (EV_P_ W w)
|
|
3987
4390
|
|
3988
4391
|
/*****************************************************************************/
|
3989
4392
|
|
3990
|
-
|
4393
|
+
ecb_noinline
|
3991
4394
|
void
|
3992
4395
|
ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
3993
4396
|
{
|
3994
4397
|
int fd = w->fd;
|
3995
4398
|
|
3996
|
-
if (
|
4399
|
+
if (ecb_expect_false (ev_is_active (w)))
|
3997
4400
|
return;
|
3998
4401
|
|
3999
4402
|
assert (("libev: ev_io_start called with negative fd", fd >= 0));
|
@@ -4017,12 +4420,12 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
4017
4420
|
EV_FREQUENT_CHECK;
|
4018
4421
|
}
|
4019
4422
|
|
4020
|
-
|
4423
|
+
ecb_noinline
|
4021
4424
|
void
|
4022
4425
|
ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
4023
4426
|
{
|
4024
4427
|
clear_pending (EV_A_ (W)w);
|
4025
|
-
if (
|
4428
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4026
4429
|
return;
|
4027
4430
|
|
4028
4431
|
assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
|
@@ -4040,11 +4443,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
4040
4443
|
EV_FREQUENT_CHECK;
|
4041
4444
|
}
|
4042
4445
|
|
4043
|
-
|
4446
|
+
ecb_noinline
|
4044
4447
|
void
|
4045
4448
|
ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4046
4449
|
{
|
4047
|
-
if (
|
4450
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4048
4451
|
return;
|
4049
4452
|
|
4050
4453
|
ev_at (w) += mn_now;
|
@@ -4065,12 +4468,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4065
4468
|
/*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
|
4066
4469
|
}
|
4067
4470
|
|
4068
|
-
|
4471
|
+
ecb_noinline
|
4069
4472
|
void
|
4070
4473
|
ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4071
4474
|
{
|
4072
4475
|
clear_pending (EV_A_ (W)w);
|
4073
|
-
if (
|
4476
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4074
4477
|
return;
|
4075
4478
|
|
4076
4479
|
EV_FREQUENT_CHECK;
|
@@ -4082,7 +4485,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4082
4485
|
|
4083
4486
|
--timercnt;
|
4084
4487
|
|
4085
|
-
if (
|
4488
|
+
if (ecb_expect_true (active < timercnt + HEAP0))
|
4086
4489
|
{
|
4087
4490
|
timers [active] = timers [timercnt + HEAP0];
|
4088
4491
|
adjustheap (timers, timercnt, active);
|
@@ -4096,7 +4499,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4096
4499
|
EV_FREQUENT_CHECK;
|
4097
4500
|
}
|
4098
4501
|
|
4099
|
-
|
4502
|
+
ecb_noinline
|
4100
4503
|
void
|
4101
4504
|
ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4102
4505
|
{
|
@@ -4127,17 +4530,22 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4127
4530
|
ev_tstamp
|
4128
4531
|
ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4129
4532
|
{
|
4130
|
-
return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
|
4533
|
+
return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
|
4131
4534
|
}
|
4132
4535
|
|
4133
4536
|
#if EV_PERIODIC_ENABLE
|
4134
|
-
|
4537
|
+
ecb_noinline
|
4135
4538
|
void
|
4136
4539
|
ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4137
4540
|
{
|
4138
|
-
if (
|
4541
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4139
4542
|
return;
|
4140
4543
|
|
4544
|
+
#if EV_USE_TIMERFD
|
4545
|
+
if (timerfd == -2)
|
4546
|
+
evtimerfd_init (EV_A);
|
4547
|
+
#endif
|
4548
|
+
|
4141
4549
|
if (w->reschedule_cb)
|
4142
4550
|
ev_at (w) = w->reschedule_cb (w, ev_rt_now);
|
4143
4551
|
else if (w->interval)
|
@@ -4162,12 +4570,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4162
4570
|
/*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
|
4163
4571
|
}
|
4164
4572
|
|
4165
|
-
|
4573
|
+
ecb_noinline
|
4166
4574
|
void
|
4167
4575
|
ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4168
4576
|
{
|
4169
4577
|
clear_pending (EV_A_ (W)w);
|
4170
|
-
if (
|
4578
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4171
4579
|
return;
|
4172
4580
|
|
4173
4581
|
EV_FREQUENT_CHECK;
|
@@ -4179,7 +4587,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4179
4587
|
|
4180
4588
|
--periodiccnt;
|
4181
4589
|
|
4182
|
-
if (
|
4590
|
+
if (ecb_expect_true (active < periodiccnt + HEAP0))
|
4183
4591
|
{
|
4184
4592
|
periodics [active] = periodics [periodiccnt + HEAP0];
|
4185
4593
|
adjustheap (periodics, periodiccnt, active);
|
@@ -4191,7 +4599,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4191
4599
|
EV_FREQUENT_CHECK;
|
4192
4600
|
}
|
4193
4601
|
|
4194
|
-
|
4602
|
+
ecb_noinline
|
4195
4603
|
void
|
4196
4604
|
ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4197
4605
|
{
|
@@ -4207,11 +4615,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4207
4615
|
|
4208
4616
|
#if EV_SIGNAL_ENABLE
|
4209
4617
|
|
4210
|
-
|
4618
|
+
ecb_noinline
|
4211
4619
|
void
|
4212
4620
|
ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4213
4621
|
{
|
4214
|
-
if (
|
4622
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4215
4623
|
return;
|
4216
4624
|
|
4217
4625
|
assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
|
@@ -4290,12 +4698,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
|
4290
4698
|
EV_FREQUENT_CHECK;
|
4291
4699
|
}
|
4292
4700
|
|
4293
|
-
|
4701
|
+
ecb_noinline
|
4294
4702
|
void
|
4295
4703
|
ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4296
4704
|
{
|
4297
4705
|
clear_pending (EV_A_ (W)w);
|
4298
|
-
if (
|
4706
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4299
4707
|
return;
|
4300
4708
|
|
4301
4709
|
EV_FREQUENT_CHECK;
|
@@ -4338,7 +4746,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4338
4746
|
#if EV_MULTIPLICITY
|
4339
4747
|
assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
|
4340
4748
|
#endif
|
4341
|
-
if (
|
4749
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4342
4750
|
return;
|
4343
4751
|
|
4344
4752
|
EV_FREQUENT_CHECK;
|
@@ -4353,7 +4761,7 @@ void
|
|
4353
4761
|
ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
4354
4762
|
{
|
4355
4763
|
clear_pending (EV_A_ (W)w);
|
4356
|
-
if (
|
4764
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4357
4765
|
return;
|
4358
4766
|
|
4359
4767
|
EV_FREQUENT_CHECK;
|
@@ -4377,14 +4785,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4377
4785
|
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
|
4378
4786
|
#define MIN_STAT_INTERVAL 0.1074891
|
4379
4787
|
|
4380
|
-
|
4788
|
+
ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
|
4381
4789
|
|
4382
4790
|
#if EV_USE_INOTIFY
|
4383
4791
|
|
4384
4792
|
/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
|
4385
4793
|
# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
|
4386
4794
|
|
4387
|
-
|
4795
|
+
ecb_noinline
|
4388
4796
|
static void
|
4389
4797
|
infy_add (EV_P_ ev_stat *w)
|
4390
4798
|
{
|
@@ -4459,7 +4867,7 @@ infy_add (EV_P_ ev_stat *w)
|
|
4459
4867
|
if (ev_is_active (&w->timer)) ev_unref (EV_A);
|
4460
4868
|
}
|
4461
4869
|
|
4462
|
-
|
4870
|
+
ecb_noinline
|
4463
4871
|
static void
|
4464
4872
|
infy_del (EV_P_ ev_stat *w)
|
4465
4873
|
{
|
@@ -4477,7 +4885,7 @@ infy_del (EV_P_ ev_stat *w)
|
|
4477
4885
|
inotify_rm_watch (fs_fd, wd);
|
4478
4886
|
}
|
4479
4887
|
|
4480
|
-
|
4888
|
+
ecb_noinline
|
4481
4889
|
static void
|
4482
4890
|
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
|
4483
4891
|
{
|
@@ -4633,7 +5041,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4633
5041
|
w->attr.st_nlink = 1;
|
4634
5042
|
}
|
4635
5043
|
|
4636
|
-
|
5044
|
+
ecb_noinline
|
4637
5045
|
static void
|
4638
5046
|
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
4639
5047
|
{
|
@@ -4677,7 +5085,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
|
4677
5085
|
void
|
4678
5086
|
ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4679
5087
|
{
|
4680
|
-
if (
|
5088
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4681
5089
|
return;
|
4682
5090
|
|
4683
5091
|
ev_stat_stat (EV_A_ w);
|
@@ -4709,7 +5117,7 @@ void
|
|
4709
5117
|
ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4710
5118
|
{
|
4711
5119
|
clear_pending (EV_A_ (W)w);
|
4712
|
-
if (
|
5120
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4713
5121
|
return;
|
4714
5122
|
|
4715
5123
|
EV_FREQUENT_CHECK;
|
@@ -4734,7 +5142,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4734
5142
|
void
|
4735
5143
|
ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4736
5144
|
{
|
4737
|
-
if (
|
5145
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4738
5146
|
return;
|
4739
5147
|
|
4740
5148
|
pri_adjust (EV_A_ (W)w);
|
@@ -4758,7 +5166,7 @@ void
|
|
4758
5166
|
ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4759
5167
|
{
|
4760
5168
|
clear_pending (EV_A_ (W)w);
|
4761
|
-
if (
|
5169
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4762
5170
|
return;
|
4763
5171
|
|
4764
5172
|
EV_FREQUENT_CHECK;
|
@@ -4781,7 +5189,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
|
4781
5189
|
void
|
4782
5190
|
ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4783
5191
|
{
|
4784
|
-
if (
|
5192
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4785
5193
|
return;
|
4786
5194
|
|
4787
5195
|
EV_FREQUENT_CHECK;
|
@@ -4797,7 +5205,7 @@ void
|
|
4797
5205
|
ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4798
5206
|
{
|
4799
5207
|
clear_pending (EV_A_ (W)w);
|
4800
|
-
if (
|
5208
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4801
5209
|
return;
|
4802
5210
|
|
4803
5211
|
EV_FREQUENT_CHECK;
|
@@ -4819,7 +5227,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
|
4819
5227
|
void
|
4820
5228
|
ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
|
4821
5229
|
{
|
4822
|
-
if (
|
5230
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4823
5231
|
return;
|
4824
5232
|
|
4825
5233
|
EV_FREQUENT_CHECK;
|
@@ -4835,7 +5243,7 @@ void
|
|
4835
5243
|
ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
4836
5244
|
{
|
4837
5245
|
clear_pending (EV_A_ (W)w);
|
4838
|
-
if (
|
5246
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4839
5247
|
return;
|
4840
5248
|
|
4841
5249
|
EV_FREQUENT_CHECK;
|
@@ -4854,7 +5262,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
|
4854
5262
|
#endif
|
4855
5263
|
|
4856
5264
|
#if EV_EMBED_ENABLE
|
4857
|
-
|
5265
|
+
ecb_noinline
|
4858
5266
|
void
|
4859
5267
|
ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4860
5268
|
{
|
@@ -4888,6 +5296,7 @@ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
|
|
4888
5296
|
}
|
4889
5297
|
}
|
4890
5298
|
|
5299
|
+
#if EV_FORK_ENABLE
|
4891
5300
|
static void
|
4892
5301
|
embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
4893
5302
|
{
|
@@ -4904,6 +5313,7 @@ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
|
4904
5313
|
|
4905
5314
|
ev_embed_start (EV_A_ w);
|
4906
5315
|
}
|
5316
|
+
#endif
|
4907
5317
|
|
4908
5318
|
#if 0
|
4909
5319
|
static void
|
@@ -4916,7 +5326,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
|
|
4916
5326
|
void
|
4917
5327
|
ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4918
5328
|
{
|
4919
|
-
if (
|
5329
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4920
5330
|
return;
|
4921
5331
|
|
4922
5332
|
{
|
@@ -4934,8 +5344,10 @@ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4934
5344
|
ev_set_priority (&w->prepare, EV_MINPRI);
|
4935
5345
|
ev_prepare_start (EV_A_ &w->prepare);
|
4936
5346
|
|
5347
|
+
#if EV_FORK_ENABLE
|
4937
5348
|
ev_fork_init (&w->fork, embed_fork_cb);
|
4938
5349
|
ev_fork_start (EV_A_ &w->fork);
|
5350
|
+
#endif
|
4939
5351
|
|
4940
5352
|
/*ev_idle_init (&w->idle, e,bed_idle_cb);*/
|
4941
5353
|
|
@@ -4948,14 +5360,16 @@ void
|
|
4948
5360
|
ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4949
5361
|
{
|
4950
5362
|
clear_pending (EV_A_ (W)w);
|
4951
|
-
if (
|
5363
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4952
5364
|
return;
|
4953
5365
|
|
4954
5366
|
EV_FREQUENT_CHECK;
|
4955
5367
|
|
4956
5368
|
ev_io_stop (EV_A_ &w->io);
|
4957
5369
|
ev_prepare_stop (EV_A_ &w->prepare);
|
5370
|
+
#if EV_FORK_ENABLE
|
4958
5371
|
ev_fork_stop (EV_A_ &w->fork);
|
5372
|
+
#endif
|
4959
5373
|
|
4960
5374
|
ev_stop (EV_A_ (W)w);
|
4961
5375
|
|
@@ -4967,7 +5381,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4967
5381
|
void
|
4968
5382
|
ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4969
5383
|
{
|
4970
|
-
if (
|
5384
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4971
5385
|
return;
|
4972
5386
|
|
4973
5387
|
EV_FREQUENT_CHECK;
|
@@ -4983,7 +5397,7 @@ void
|
|
4983
5397
|
ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4984
5398
|
{
|
4985
5399
|
clear_pending (EV_A_ (W)w);
|
4986
|
-
if (
|
5400
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4987
5401
|
return;
|
4988
5402
|
|
4989
5403
|
EV_FREQUENT_CHECK;
|
@@ -5005,7 +5419,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
|
5005
5419
|
void
|
5006
5420
|
ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
5007
5421
|
{
|
5008
|
-
if (
|
5422
|
+
if (ecb_expect_false (ev_is_active (w)))
|
5009
5423
|
return;
|
5010
5424
|
|
5011
5425
|
EV_FREQUENT_CHECK;
|
@@ -5023,7 +5437,7 @@ void
|
|
5023
5437
|
ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
5024
5438
|
{
|
5025
5439
|
clear_pending (EV_A_ (W)w);
|
5026
|
-
if (
|
5440
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
5027
5441
|
return;
|
5028
5442
|
|
5029
5443
|
EV_FREQUENT_CHECK;
|
@@ -5046,7 +5460,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
|
5046
5460
|
void
|
5047
5461
|
ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
|
5048
5462
|
{
|
5049
|
-
if (
|
5463
|
+
if (ecb_expect_false (ev_is_active (w)))
|
5050
5464
|
return;
|
5051
5465
|
|
5052
5466
|
w->sent = 0;
|
@@ -5066,7 +5480,7 @@ void
|
|
5066
5480
|
ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
|
5067
5481
|
{
|
5068
5482
|
clear_pending (EV_A_ (W)w);
|
5069
|
-
if (
|
5483
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
5070
5484
|
return;
|
5071
5485
|
|
5072
5486
|
EV_FREQUENT_CHECK;
|
@@ -5273,4 +5687,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
|
|
5273
5687
|
#if EV_MULTIPLICITY
|
5274
5688
|
#include "ev_wrap.h"
|
5275
5689
|
#endif
|
5276
|
-
|