libev_scheduler 0.1 → 0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +25 -0
- data/CHANGELOG.md +3 -0
- data/Gemfile.lock +1 -1
- data/README.md +201 -2
- data/ext/libev/Changes +71 -2
- data/ext/libev/ev.c +625 -201
- data/ext/libev/ev.h +25 -22
- data/ext/libev/ev_epoll.c +16 -14
- data/ext/libev/ev_iouring.c +694 -0
- data/ext/libev/ev_kqueue.c +4 -4
- data/ext/libev/ev_linuxaio.c +78 -100
- data/ext/libev/ev_poll.c +6 -6
- data/ext/libev/ev_port.c +3 -3
- data/ext/libev/ev_select.c +6 -6
- data/ext/libev/ev_vars.h +34 -0
- data/ext/libev/ev_win32.c +2 -2
- data/ext/libev/ev_wrap.h +56 -0
- data/lib/libev_scheduler/version.rb +1 -1
- metadata +4 -3
- data/.github/test.yml +0 -31
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 921cd6937f62740e46edef5d1b3d135112276bab83149f896afb4ea6961a2f3f
|
4
|
+
data.tar.gz: afc0691185932a84c55ceac285eb24cb393f249e4c05176e744b4b3672a0e6c6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cccc65981647ef19401aa9e168e7a9137a2cddbe93e15a5061329b1cdc17e63ba1bbb62758ee43ceb16635021119240d77d9cc127e78763928f7d3046b243232
|
7
|
+
data.tar.gz: a317d8f63c2fdb83eea494809980d8b44ab584e4d32fbfd76b57e5e02b954efd0014ff4232f327cc098cb7429cafac94a790116e9ca984b1600bac5180b2e2de
|
@@ -0,0 +1,25 @@
|
|
1
|
+
name: Tests
|
2
|
+
|
3
|
+
on: [push, pull_request]
|
4
|
+
|
5
|
+
jobs:
|
6
|
+
test:
|
7
|
+
name: >-
|
8
|
+
libev_scheduler ${{matrix.os}}, ${{matrix.ruby}}
|
9
|
+
runs-on: ${{matrix.os}}
|
10
|
+
strategy:
|
11
|
+
matrix:
|
12
|
+
os: [ ubuntu-latest ]
|
13
|
+
ruby: [ head, '3.0' ]
|
14
|
+
steps:
|
15
|
+
- uses: actions/checkout@v2
|
16
|
+
- name: Set up Ruby
|
17
|
+
uses: actions/setup-ruby@v1
|
18
|
+
with:
|
19
|
+
ruby-version: ${{matrix.ruby}}
|
20
|
+
- name: Install dependencies
|
21
|
+
run: bundle install
|
22
|
+
- name: Compile C-extension
|
23
|
+
run: bundle exec rake compile
|
24
|
+
- name: Run tests
|
25
|
+
run: bundle exec rake test
|
data/CHANGELOG.md
CHANGED
data/Gemfile.lock
CHANGED
data/README.md
CHANGED
@@ -1,5 +1,204 @@
|
|
1
1
|
# libev_scheduler
|
2
2
|
|
3
|
-
|
3
|
+
<p align="center">
|
4
|
+
<a href="http://rubygems.org/gems/libev_scheduler">
|
5
|
+
<img src="https://badge.fury.io/rb/libev_scheduler.svg" alt="Ruby gem">
|
6
|
+
</a>
|
7
|
+
<a href="https://github.com/digital-fabric/libev_scheduler/actions?query=workflow%3ATests">
|
8
|
+
<img src="https://github.com/digital-fabric/libev_scheduler/workflows/Tests/badge.svg" alt="Tests">
|
9
|
+
</a>
|
10
|
+
<a href="https://github.com/digital-fabric/libev_scheduler/blob/master/LICENSE">
|
11
|
+
<img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="MIT License">
|
12
|
+
</a>
|
13
|
+
</p>
|
4
14
|
|
5
|
-
|
15
|
+
`libev_scheduler` is a libev-based fiber scheduler for Ruby 3.0 based on code
|
16
|
+
extracted from [Polyphony](https://github.com/digital-fabric/libev_scheduler).
|
17
|
+
|
18
|
+
## Installing
|
19
|
+
|
20
|
+
```bash
|
21
|
+
$ gem install libev_scheduler
|
22
|
+
```
|
23
|
+
|
24
|
+
## Usage
|
25
|
+
|
26
|
+
```ruby
|
27
|
+
Fiber.set_scheduler Libev::Scheduler.new
|
28
|
+
|
29
|
+
Fiber.schedule do
|
30
|
+
do_something_awesome
|
31
|
+
end
|
32
|
+
```
|
33
|
+
|
34
|
+
Also have a look at the included tests and examples.
|
35
|
+
|
36
|
+
## The scheduler implementation
|
37
|
+
|
38
|
+
The present gem uses
|
39
|
+
[libev](http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod) to provide a
|
40
|
+
performant, cross-platform fiber scheduler implementation for Ruby 3.0. The
|
41
|
+
bundled libev is version 4.33, which includes an (experimental) io_uring
|
42
|
+
backend (more below about io_uring).
|
43
|
+
|
44
|
+
## Some thoughts on the Ruby fiber scheduler interface
|
45
|
+
|
46
|
+
The fiber scheduler interface is a new feature in Ruby 3.0, aimed at
|
47
|
+
facilitating building fiber-based concurrent applications in Ruby. The current
|
48
|
+
[specification](https://docs.ruby-lang.org/en/master/Fiber/SchedulerInterface.html)
|
49
|
+
includes methods for:
|
50
|
+
|
51
|
+
- starting a non-blocking fiber
|
52
|
+
- waiting for an `IO` instance to become ready for reading or writing
|
53
|
+
- sleeping for a certain time duration
|
54
|
+
- waiting for a process to terminate
|
55
|
+
- otherwise pausing/resuming fibers (blocking/unblocking) for use with mutexes,
|
56
|
+
condition variables, queues etc.
|
57
|
+
|
58
|
+
However, the current design has some shortcomings that will need to be addressed
|
59
|
+
in order for this feature to become useful. Here are some of my thoughts on this
|
60
|
+
subject. Please do not take this as an attack on the wonderful work of the Ruby
|
61
|
+
core developers. Most probably I'm just some random guy being wrong on the
|
62
|
+
internet :-p.
|
63
|
+
|
64
|
+
### Two kinds of fibers
|
65
|
+
|
66
|
+
One of the changes made as part of the work on the fiber scheduler interface in
|
67
|
+
Ruby 3.0 was to distinguish between two kinds of fibers: a normal, blocking
|
68
|
+
fiber; and a non-blocking fiber, which can be used in conjunction with the fiber
|
69
|
+
scheduler. While this was probably done for the sake of backward compatibility,
|
70
|
+
I believe this is an error. In introduces ambiguity where previously there was
|
71
|
+
none and makes the API more complex that it could have been.
|
72
|
+
|
73
|
+
It seems to me that a more logical solution to the problem of maintaining the
|
74
|
+
blocking behaviour by default, would be have been to set the non-blocking mode
|
75
|
+
at the level of the thread, instead of the fiber. That also would have allowed
|
76
|
+
using the main fiber (of a given thread) in a non-blocking manner (see below).
|
77
|
+
|
78
|
+
### Performing blocking operations on the main fiber
|
79
|
+
|
80
|
+
While I didn't scratch the surface too much in terms of the limits of the fiber
|
81
|
+
scheduler interface, it looks pretty clear that the main fiber (in any thread)
|
82
|
+
cannot be used in a non-blocking manner. While fiber scheduler implementations
|
83
|
+
can in principle use `Fiber#transfer` to switch between fibers, which will allow
|
84
|
+
pausing and resuming the main fiber, it does not seem as if the current design
|
85
|
+
is really conductive to that.
|
86
|
+
|
87
|
+
### I/O readiness
|
88
|
+
|
89
|
+
In and of itself, checking for I/O readiness is nice, but it does not allow us
|
90
|
+
to leverage the full power of io_uring on Linux or IOCP in Windows. In order to
|
91
|
+
leverage the advantages offered by io_uring, for instance, a fiber scheduler
|
92
|
+
should be able to do much more than just check for I/O readiness. It should be
|
93
|
+
able, rather, to *perform* I/O operations including read/write, send/recv,
|
94
|
+
connect and accept.
|
95
|
+
|
96
|
+
This is of course no small undertaking, but the current Ruby [native I/O
|
97
|
+
code](https://github.com/ruby/ruby/blob/master/io.c), currently at almost 14
|
98
|
+
KLOCS, is IMHO ripe for some overhauling, and maybe some separation of concerns.
|
99
|
+
It seems to me that the API layer for the `IO` class could be separated from the
|
100
|
+
code that does the actual reading/writing etc. This is indeed the approach I
|
101
|
+
took with [Polyphony](https://github.com/digital-fabric/polyphony/), which
|
102
|
+
provides the same `IO` API for developers, but performs the I/O ops using a
|
103
|
+
libev- or io_uring-based backend. This design can then reap all of the benefits
|
104
|
+
of using io_uring. Such an approach could also allow us to implement I/O using
|
105
|
+
IOCP on Windows (currently we can't because this requires files to be opened
|
106
|
+
with `WSA_FLAG_OVERLAPPED`).
|
107
|
+
|
108
|
+
This is also the reason I have decided not to release a native io_uring-backed
|
109
|
+
fiber scheduler implementation (with code extracted from Polyphony), since I
|
110
|
+
don't believe it can provide any real benefit in terms of performance. If I/O
|
111
|
+
readiness is all that the fiber scheduler can do, it's probably best to just use
|
112
|
+
a cross-platform implementation such as libev, which can then use io_uring
|
113
|
+
behind the scenes.
|
114
|
+
|
115
|
+
### Waiting for processes
|
116
|
+
|
117
|
+
The problem with the current design is that the `#process_wait` method is
|
118
|
+
expected to return an instance of `Process::Status`. Unfortunately, this class
|
119
|
+
[cannot be
|
120
|
+
instantiated](https://github.com/ruby/ruby/blob/master/process.c#L8678), which
|
121
|
+
leads to a workaround using a separate thread.
|
122
|
+
|
123
|
+
Another difficulty associated with this is that for example on libev, a child
|
124
|
+
watcher can only be used on the default loop, which means only in the main
|
125
|
+
thread, as the child watcher implementation is based on receiving `SIGCHLD`.
|
126
|
+
|
127
|
+
An alternative solution would be to use `pidfd_open` and watch the returned fd
|
128
|
+
for readiness, but I don't know if this can be used on OSes other than linux.
|
129
|
+
|
130
|
+
While a cross-OS solution to the latter problem is potentially not too
|
131
|
+
difficult, the former problem is a real show-stopper. One solution might be to
|
132
|
+
change the API such that `#process_wait` returns an array containing the pid and
|
133
|
+
its status, for example. This can then be used to instantiate a
|
134
|
+
`Process::Status` object somewhere inside `Process.wait`.
|
135
|
+
|
136
|
+
### On having multiple alternative fiber scheduler implementations
|
137
|
+
|
138
|
+
It is unclear to me that there is really a need for multiple fiber scheduler
|
139
|
+
implementations. It seems to me that an approach using multiple backends
|
140
|
+
selected according to the OS, is much more appropriate. It's not like there's
|
141
|
+
going to be a dozen different implementations of fiber schedulers. Actually,
|
142
|
+
libev fits really nicely here, since it already includes all those different
|
143
|
+
backends.
|
144
|
+
|
145
|
+
|
146
|
+
Besides, the term "fiber scheduler" is a bit of a misnomer, since it doesn't
|
147
|
+
really deal with *scheduling* fibers, but really with *performing blocking
|
148
|
+
operations in a fiber-aware manner*. The scheduling part is in many ways trivial
|
149
|
+
(i.e. the scheduler holds an array of fibers ready to run), but the performing
|
150
|
+
of blocking operations is [much more
|
151
|
+
involved](https://github.com/digital-fabric/polyphony/blob/master/ext/polyphony/backend_io_uring.c).
|
152
|
+
|
153
|
+
There is of course quite a bit of interaction between the scheduling part and
|
154
|
+
the blocking operations part, but then again to me a more sensible design would
|
155
|
+
have been to do everything related to scheduling inside of the Ruby core code,
|
156
|
+
and then offload everything else to a `BlockingOperationsBackend`
|
157
|
+
implementation. Here's what it might look like:
|
158
|
+
|
159
|
+
```ruby
|
160
|
+
# example pseudo-code
|
161
|
+
class BlockingOperationsBackend
|
162
|
+
def poll(opts = {})
|
163
|
+
ev_run(@ev_loop)
|
164
|
+
end
|
165
|
+
|
166
|
+
def io_wait(io, opts)
|
167
|
+
fiber = Fiber.current
|
168
|
+
watcher = setup_watcher_for_io(io) do
|
169
|
+
Thread.current.schedule_fiber(fiber)
|
170
|
+
end
|
171
|
+
Fiber.yield
|
172
|
+
watcher.stop
|
173
|
+
end
|
174
|
+
|
175
|
+
...
|
176
|
+
end
|
177
|
+
```
|
178
|
+
|
179
|
+
The fiber scheduling part would provide a `Thread#schedule_fiber` method that
|
180
|
+
adds the given fiber to the thread's run queue, and the thread will know when to
|
181
|
+
call the backend's `#poll` method in order to poll for blocking operation
|
182
|
+
completions. For example:
|
183
|
+
|
184
|
+
```ruby
|
185
|
+
# somewhere in Ruby's kischkas:
|
186
|
+
class Thread
|
187
|
+
def schedule_fiber(fiber)
|
188
|
+
@run_queue << fiber
|
189
|
+
end
|
190
|
+
|
191
|
+
def run_fiber_scheduler
|
192
|
+
@backend.poll
|
193
|
+
@run_queue.each { |f| f.resume }
|
194
|
+
end
|
195
|
+
end
|
196
|
+
```
|
197
|
+
|
198
|
+
It seems to me this kind of design would be much easier to implement, and would
|
199
|
+
lead to a lot less code duplication. This design could also be extended later to
|
200
|
+
perform all kinds of blocking operations, such as reading/writing etc., as
|
201
|
+
discussed above.
|
202
|
+
|
203
|
+
Finally, such a design could also provide a C API for people writing extensions,
|
204
|
+
so they can rely on it whenever doing any blocking call.
|
data/ext/libev/Changes
CHANGED
@@ -1,8 +1,77 @@
|
|
1
1
|
Revision history for libev, a high-performance and full-featured event loop.
|
2
2
|
|
3
|
+
TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter.
|
4
|
+
TODO: document EV_TSTAMP_T
|
5
|
+
|
6
|
+
4.33 Wed Mar 18 13:22:29 CET 2020
|
7
|
+
- no changes w.r.t. 4.32.
|
8
|
+
|
9
|
+
4.32 (EV only)
|
10
|
+
- the 4.31 timerfd code wrongly changed the priority of the signal
|
11
|
+
fd watcher, which is usually harmless unless signal fds are
|
12
|
+
also used (found via cpan tester service).
|
13
|
+
- the documentation wrongly claimed that user may modify fd and events
|
14
|
+
members in io watchers when the watcher was stopped
|
15
|
+
(found by b_jonas).
|
16
|
+
- new ev_io_modify mutator which changes only the events member,
|
17
|
+
which can be faster. also added ev::io::set (int events) method
|
18
|
+
to ev++.h.
|
19
|
+
- officially allow a zero events mask for io watchers. this should
|
20
|
+
work with older libev versions as well but was not officially
|
21
|
+
allowed before.
|
22
|
+
- do not wake up every minute when timerfd is used to detect timejumps.
|
23
|
+
- do not wake up every minute when periodics are disabled and we have
|
24
|
+
a monotonic clock.
|
25
|
+
- support a lot more "uncommon" compile time configurations,
|
26
|
+
such as ev_embed enabled but ev_timer disabled.
|
27
|
+
- use a start/stop wrapper class to reduce code duplication in
|
28
|
+
ev++.h and make it needlessly more c++-y.
|
29
|
+
- the linux aio backend is no longer compiled in by default.
|
30
|
+
- update to libecb version 0x00010008.
|
31
|
+
|
32
|
+
4.31 Fri Dec 20 21:58:29 CET 2019
|
33
|
+
- handle backends with minimum wait time a bit better by not
|
34
|
+
waiting in the presence of already-expired timers
|
35
|
+
(behaviour reported by Felipe Gasper).
|
36
|
+
- new feature: use timerfd to detect timejumps quickly,
|
37
|
+
can be disabled with the new EVFLAG_NOTIMERFD loop flag.
|
38
|
+
- document EV_USE_SIGNALFD feature macro.
|
39
|
+
|
40
|
+
4.30 (EV only)
|
41
|
+
- change non-autoconf test for __kernel_rwf_t by testing
|
42
|
+
LINUX_VERSION_CODE, the most direct test I could find.
|
43
|
+
- fix a bug in the io_uring backend that polled the wrong
|
44
|
+
backend fd, causing it to not work in many cases.
|
45
|
+
|
46
|
+
4.29 (EV only)
|
47
|
+
- add io uring autoconf and non-autoconf detection.
|
48
|
+
- disable io_uring when some header files are too old.
|
49
|
+
|
50
|
+
4.28 (EV only)
|
51
|
+
- linuxaio backend resulted in random memory corruption
|
52
|
+
when loop is forked.
|
53
|
+
- linuxaio backend might have tried to cancel an iocb
|
54
|
+
multiple times (was unable to trigger this).
|
55
|
+
- linuxaio backend now employs a generation counter to
|
56
|
+
avoid handling spurious events from cancelled requests.
|
57
|
+
- io_cancel can return EINTR, deal with it. also, assume
|
58
|
+
io_submit also returns EINTR.
|
59
|
+
- fix some other minor bugs in linuxaio backend.
|
60
|
+
- ev_tstamp type can now be overriden by defining EV_TSTAMP_T.
|
61
|
+
- cleanup: replace expect_true/false and noinline by their
|
62
|
+
libecb counterparts.
|
63
|
+
- move syscall infrastructure from ev_linuxaio.c to ev.c.
|
64
|
+
- prepare io_uring integration.
|
65
|
+
- tweak ev_floor.
|
66
|
+
- epoll, poll, win32 Sleep and other places that use millisecond
|
67
|
+
reslution now all try to round up times.
|
68
|
+
- solaris port backend didn't compile.
|
69
|
+
- abstract time constants into their macros, for more flexibility.
|
70
|
+
|
3
71
|
4.27 Thu Jun 27 22:43:44 CEST 2019
|
4
|
-
- linux aio backend almost
|
72
|
+
- linux aio backend almost completely rewritten to work around its
|
5
73
|
limitations.
|
74
|
+
- linux aio backend now requires linux 4.19+.
|
6
75
|
- epoll backend now mandatory for linux aio backend.
|
7
76
|
- fail assertions more aggressively on invalid fd's detected
|
8
77
|
in the event loop, do not just silently fd_kill in case of
|
@@ -22,7 +91,7 @@ Revision history for libev, a high-performance and full-featured event loop.
|
|
22
91
|
4.25 Fri Dec 21 07:49:20 CET 2018
|
23
92
|
- INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
|
24
93
|
(EV_THROW still provided) and now uses noexcept on C++11 or newer.
|
25
|
-
- move the darwin select workaround
|
94
|
+
- move the darwin select workaround higher in ev.c, as newer versions of
|
26
95
|
darwin managed to break their broken select even more.
|
27
96
|
- ANDROID => __ANDROID__ (reported by enh@google.com).
|
28
97
|
- disable epoll_create1 on android because it has broken header files
|
data/ext/libev/ev.c
CHANGED
@@ -116,7 +116,7 @@
|
|
116
116
|
# undef EV_USE_POLL
|
117
117
|
# define EV_USE_POLL 0
|
118
118
|
# endif
|
119
|
-
|
119
|
+
|
120
120
|
# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
|
121
121
|
# ifndef EV_USE_EPOLL
|
122
122
|
# define EV_USE_EPOLL EV_FEATURE_BACKENDS
|
@@ -125,16 +125,25 @@
|
|
125
125
|
# undef EV_USE_EPOLL
|
126
126
|
# define EV_USE_EPOLL 0
|
127
127
|
# endif
|
128
|
-
|
128
|
+
|
129
129
|
# if HAVE_LINUX_AIO_ABI_H
|
130
130
|
# ifndef EV_USE_LINUXAIO
|
131
|
-
# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS
|
131
|
+
# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
|
132
132
|
# endif
|
133
133
|
# else
|
134
134
|
# undef EV_USE_LINUXAIO
|
135
135
|
# define EV_USE_LINUXAIO 0
|
136
136
|
# endif
|
137
|
-
|
137
|
+
|
138
|
+
# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
|
139
|
+
# ifndef EV_USE_IOURING
|
140
|
+
# define EV_USE_IOURING EV_FEATURE_BACKENDS
|
141
|
+
# endif
|
142
|
+
# else
|
143
|
+
# undef EV_USE_IOURING
|
144
|
+
# define EV_USE_IOURING 0
|
145
|
+
# endif
|
146
|
+
|
138
147
|
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
|
139
148
|
# ifndef EV_USE_KQUEUE
|
140
149
|
# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
|
@@ -143,7 +152,7 @@
|
|
143
152
|
# undef EV_USE_KQUEUE
|
144
153
|
# define EV_USE_KQUEUE 0
|
145
154
|
# endif
|
146
|
-
|
155
|
+
|
147
156
|
# if HAVE_PORT_H && HAVE_PORT_CREATE
|
148
157
|
# ifndef EV_USE_PORT
|
149
158
|
# define EV_USE_PORT EV_FEATURE_BACKENDS
|
@@ -179,7 +188,16 @@
|
|
179
188
|
# undef EV_USE_EVENTFD
|
180
189
|
# define EV_USE_EVENTFD 0
|
181
190
|
# endif
|
182
|
-
|
191
|
+
|
192
|
+
# if HAVE_SYS_TIMERFD_H
|
193
|
+
# ifndef EV_USE_TIMERFD
|
194
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
195
|
+
# endif
|
196
|
+
# else
|
197
|
+
# undef EV_USE_TIMERFD
|
198
|
+
# define EV_USE_TIMERFD 0
|
199
|
+
# endif
|
200
|
+
|
183
201
|
#endif
|
184
202
|
|
185
203
|
/* OS X, in its infinite idiocy, actually HARDCODES
|
@@ -335,6 +353,22 @@
|
|
335
353
|
# define EV_USE_PORT 0
|
336
354
|
#endif
|
337
355
|
|
356
|
+
#ifndef EV_USE_LINUXAIO
|
357
|
+
# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
|
358
|
+
# define EV_USE_LINUXAIO 0 /* was: 1, always off by default */
|
359
|
+
# else
|
360
|
+
# define EV_USE_LINUXAIO 0
|
361
|
+
# endif
|
362
|
+
#endif
|
363
|
+
|
364
|
+
#ifndef EV_USE_IOURING
|
365
|
+
# if __linux /* later checks might disable again */
|
366
|
+
# define EV_USE_IOURING 1
|
367
|
+
# else
|
368
|
+
# define EV_USE_IOURING 0
|
369
|
+
# endif
|
370
|
+
#endif
|
371
|
+
|
338
372
|
#ifndef EV_USE_INOTIFY
|
339
373
|
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
|
340
374
|
# define EV_USE_INOTIFY EV_FEATURE_OS
|
@@ -367,6 +401,14 @@
|
|
367
401
|
# endif
|
368
402
|
#endif
|
369
403
|
|
404
|
+
#ifndef EV_USE_TIMERFD
|
405
|
+
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
|
406
|
+
# define EV_USE_TIMERFD EV_FEATURE_OS
|
407
|
+
# else
|
408
|
+
# define EV_USE_TIMERFD 0
|
409
|
+
# endif
|
410
|
+
#endif
|
411
|
+
|
370
412
|
#if 0 /* debugging */
|
371
413
|
# define EV_VERIFY 3
|
372
414
|
# define EV_USE_4HEAP 1
|
@@ -409,6 +451,7 @@
|
|
409
451
|
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
|
410
452
|
# undef EV_USE_MONOTONIC
|
411
453
|
# define EV_USE_MONOTONIC 1
|
454
|
+
# define EV_NEED_SYSCALL 1
|
412
455
|
# else
|
413
456
|
# undef EV_USE_CLOCK_SYSCALL
|
414
457
|
# define EV_USE_CLOCK_SYSCALL 0
|
@@ -432,6 +475,14 @@
|
|
432
475
|
# define EV_USE_INOTIFY 0
|
433
476
|
#endif
|
434
477
|
|
478
|
+
#if __linux && EV_USE_IOURING
|
479
|
+
# include <linux/version.h>
|
480
|
+
# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
|
481
|
+
# undef EV_USE_IOURING
|
482
|
+
# define EV_USE_IOURING 0
|
483
|
+
# endif
|
484
|
+
#endif
|
485
|
+
|
435
486
|
#if !EV_USE_NANOSLEEP
|
436
487
|
/* hp-ux has it in sys/time.h, which we unconditionally include above */
|
437
488
|
# if !defined _WIN32 && !defined __hpux
|
@@ -441,12 +492,29 @@
|
|
441
492
|
|
442
493
|
#if EV_USE_LINUXAIO
|
443
494
|
# include <sys/syscall.h>
|
444
|
-
# if
|
495
|
+
# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
|
496
|
+
# define EV_NEED_SYSCALL 1
|
497
|
+
# else
|
445
498
|
# undef EV_USE_LINUXAIO
|
446
499
|
# define EV_USE_LINUXAIO 0
|
447
500
|
# endif
|
448
501
|
#endif
|
449
502
|
|
503
|
+
#if EV_USE_IOURING
|
504
|
+
# include <sys/syscall.h>
|
505
|
+
# if !SYS_io_uring_setup && __linux && !__alpha
|
506
|
+
# define SYS_io_uring_setup 425
|
507
|
+
# define SYS_io_uring_enter 426
|
508
|
+
# define SYS_io_uring_wregister 427
|
509
|
+
# endif
|
510
|
+
# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
|
511
|
+
# define EV_NEED_SYSCALL 1
|
512
|
+
# else
|
513
|
+
# undef EV_USE_IOURING
|
514
|
+
# define EV_USE_IOURING 0
|
515
|
+
# endif
|
516
|
+
#endif
|
517
|
+
|
450
518
|
#if EV_USE_INOTIFY
|
451
519
|
# include <sys/statfs.h>
|
452
520
|
# include <sys/inotify.h>
|
@@ -458,7 +526,7 @@
|
|
458
526
|
#endif
|
459
527
|
|
460
528
|
#if EV_USE_EVENTFD
|
461
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
529
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
462
530
|
# include <stdint.h>
|
463
531
|
# ifndef EFD_NONBLOCK
|
464
532
|
# define EFD_NONBLOCK O_NONBLOCK
|
@@ -474,7 +542,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
474
542
|
#endif
|
475
543
|
|
476
544
|
#if EV_USE_SIGNALFD
|
477
|
-
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
|
545
|
+
/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
|
478
546
|
# include <stdint.h>
|
479
547
|
# ifndef SFD_NONBLOCK
|
480
548
|
# define SFD_NONBLOCK O_NONBLOCK
|
@@ -486,7 +554,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
|
|
486
554
|
# define SFD_CLOEXEC 02000000
|
487
555
|
# endif
|
488
556
|
# endif
|
489
|
-
EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
|
557
|
+
EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
|
490
558
|
|
491
559
|
struct signalfd_siginfo
|
492
560
|
{
|
@@ -495,7 +563,17 @@ struct signalfd_siginfo
|
|
495
563
|
};
|
496
564
|
#endif
|
497
565
|
|
498
|
-
|
566
|
+
/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
|
567
|
+
#if EV_USE_TIMERFD
|
568
|
+
# include <sys/timerfd.h>
|
569
|
+
/* timerfd is only used for periodics */
|
570
|
+
# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
|
571
|
+
# undef EV_USE_TIMERFD
|
572
|
+
# define EV_USE_TIMERFD 0
|
573
|
+
# endif
|
574
|
+
#endif
|
575
|
+
|
576
|
+
/*****************************************************************************/
|
499
577
|
|
500
578
|
#if EV_VERIFY >= 3
|
501
579
|
# define EV_FREQUENT_CHECK ev_verify (EV_A)
|
@@ -510,18 +588,34 @@ struct signalfd_siginfo
|
|
510
588
|
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
|
511
589
|
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
|
512
590
|
|
513
|
-
#define MIN_TIMEJUMP
|
514
|
-
#define MAX_BLOCKTIME
|
591
|
+
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
|
592
|
+
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
|
593
|
+
#define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
|
594
|
+
|
595
|
+
/* find a portable timestamp that is "always" in the future but fits into time_t.
|
596
|
+
* this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
|
597
|
+
* and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
|
598
|
+
#define EV_TSTAMP_HUGE \
|
599
|
+
(sizeof (time_t) >= 8 ? 10000000000000. \
|
600
|
+
: 0 < (time_t)4294967295 ? 4294967295. \
|
601
|
+
: 2147483647.) \
|
515
602
|
|
516
|
-
#
|
517
|
-
#define
|
603
|
+
#ifndef EV_TS_CONST
|
604
|
+
# define EV_TS_CONST(nv) nv
|
605
|
+
# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
|
606
|
+
# define EV_TS_FROM_USEC(us) us * 1e-6
|
607
|
+
# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
|
608
|
+
# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
|
609
|
+
# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
|
610
|
+
# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
|
611
|
+
#endif
|
518
612
|
|
519
613
|
/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
|
520
614
|
/* ECB.H BEGIN */
|
521
615
|
/*
|
522
616
|
* libecb - http://software.schmorp.de/pkg/libecb
|
523
617
|
*
|
524
|
-
* Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
|
618
|
+
* Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
|
525
619
|
* Copyright (©) 2011 Emanuele Giaquinta
|
526
620
|
* All rights reserved.
|
527
621
|
*
|
@@ -562,15 +656,23 @@ struct signalfd_siginfo
|
|
562
656
|
#define ECB_H
|
563
657
|
|
564
658
|
/* 16 bits major, 16 bits minor */
|
565
|
-
#define ECB_VERSION
|
659
|
+
#define ECB_VERSION 0x00010008
|
566
660
|
|
567
|
-
#
|
661
|
+
#include <string.h> /* for memcpy */
|
662
|
+
|
663
|
+
#if defined (_WIN32) && !defined (__MINGW32__)
|
568
664
|
typedef signed char int8_t;
|
569
665
|
typedef unsigned char uint8_t;
|
666
|
+
typedef signed char int_fast8_t;
|
667
|
+
typedef unsigned char uint_fast8_t;
|
570
668
|
typedef signed short int16_t;
|
571
669
|
typedef unsigned short uint16_t;
|
670
|
+
typedef signed int int_fast16_t;
|
671
|
+
typedef unsigned int uint_fast16_t;
|
572
672
|
typedef signed int int32_t;
|
573
673
|
typedef unsigned int uint32_t;
|
674
|
+
typedef signed int int_fast32_t;
|
675
|
+
typedef unsigned int uint_fast32_t;
|
574
676
|
#if __GNUC__
|
575
677
|
typedef signed long long int64_t;
|
576
678
|
typedef unsigned long long uint64_t;
|
@@ -578,6 +680,8 @@ struct signalfd_siginfo
|
|
578
680
|
typedef signed __int64 int64_t;
|
579
681
|
typedef unsigned __int64 uint64_t;
|
580
682
|
#endif
|
683
|
+
typedef int64_t int_fast64_t;
|
684
|
+
typedef uint64_t uint_fast64_t;
|
581
685
|
#ifdef _WIN64
|
582
686
|
#define ECB_PTRSIZE 8
|
583
687
|
typedef uint64_t uintptr_t;
|
@@ -599,6 +703,14 @@ struct signalfd_siginfo
|
|
599
703
|
#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
|
600
704
|
#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
|
601
705
|
|
706
|
+
#ifndef ECB_OPTIMIZE_SIZE
|
707
|
+
#if __OPTIMIZE_SIZE__
|
708
|
+
#define ECB_OPTIMIZE_SIZE 1
|
709
|
+
#else
|
710
|
+
#define ECB_OPTIMIZE_SIZE 0
|
711
|
+
#endif
|
712
|
+
#endif
|
713
|
+
|
602
714
|
/* work around x32 idiocy by defining proper macros */
|
603
715
|
#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
|
604
716
|
#if _ILP32
|
@@ -1114,6 +1226,44 @@ ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { retu
|
|
1114
1226
|
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
|
1115
1227
|
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
|
1116
1228
|
|
1229
|
+
#if ECB_CPP
|
1230
|
+
|
1231
|
+
inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
|
1232
|
+
inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
|
1233
|
+
inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
|
1234
|
+
inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
|
1235
|
+
|
1236
|
+
inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
|
1237
|
+
inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
|
1238
|
+
inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
|
1239
|
+
inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
|
1240
|
+
|
1241
|
+
inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
|
1242
|
+
inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
|
1243
|
+
inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
|
1244
|
+
inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
|
1245
|
+
|
1246
|
+
inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
|
1247
|
+
inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
|
1248
|
+
inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
|
1249
|
+
inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
|
1250
|
+
|
1251
|
+
inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
|
1252
|
+
inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
|
1253
|
+
inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
|
1254
|
+
|
1255
|
+
inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
|
1256
|
+
inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
|
1257
|
+
inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
|
1258
|
+
inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
|
1259
|
+
|
1260
|
+
inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
|
1261
|
+
inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
|
1262
|
+
inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
|
1263
|
+
inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
|
1264
|
+
|
1265
|
+
#endif
|
1266
|
+
|
1117
1267
|
#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
|
1118
1268
|
#if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
|
1119
1269
|
#define ecb_bswap16(x) __builtin_bswap16 (x)
|
@@ -1194,6 +1344,78 @@ ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_he
|
|
1194
1344
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void);
|
1195
1345
|
ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
|
1196
1346
|
|
1347
|
+
/*****************************************************************************/
|
1348
|
+
/* unaligned load/store */
|
1349
|
+
|
1350
|
+
ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1351
|
+
ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1352
|
+
ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1353
|
+
|
1354
|
+
ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1355
|
+
ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1356
|
+
ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1357
|
+
|
1358
|
+
ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1359
|
+
ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1360
|
+
ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1361
|
+
|
1362
|
+
ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1363
|
+
ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1364
|
+
ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1365
|
+
|
1366
|
+
ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
|
1367
|
+
ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
|
1368
|
+
ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
|
1369
|
+
|
1370
|
+
ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
|
1371
|
+
ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
|
1372
|
+
ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
|
1373
|
+
|
1374
|
+
ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
|
1375
|
+
ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
|
1376
|
+
ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
|
1377
|
+
|
1378
|
+
ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1379
|
+
ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1380
|
+
ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
|
1381
|
+
|
1382
|
+
ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
|
1383
|
+
ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
|
1384
|
+
ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
|
1385
|
+
|
1386
|
+
ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
|
1387
|
+
ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
|
1388
|
+
ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
|
1389
|
+
|
1390
|
+
#if ECB_CPP
|
1391
|
+
|
1392
|
+
inline uint8_t ecb_bswap (uint8_t v) { return v; }
|
1393
|
+
inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
|
1394
|
+
inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
|
1395
|
+
inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
|
1396
|
+
|
1397
|
+
template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1398
|
+
template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1399
|
+
template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
|
1400
|
+
template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
|
1401
|
+
template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
|
1402
|
+
template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
|
1403
|
+
template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
|
1404
|
+
template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
|
1405
|
+
|
1406
|
+
template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
|
1407
|
+
template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
|
1408
|
+
template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
|
1409
|
+
template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
|
1410
|
+
template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
|
1411
|
+
template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
|
1412
|
+
template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
|
1413
|
+
template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
|
1414
|
+
|
1415
|
+
#endif
|
1416
|
+
|
1417
|
+
/*****************************************************************************/
|
1418
|
+
|
1197
1419
|
#if ECB_GCC_VERSION(3,0) || ECB_C99
|
1198
1420
|
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
|
1199
1421
|
#else
|
@@ -1227,6 +1449,8 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he
|
|
1227
1449
|
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
|
1228
1450
|
#endif
|
1229
1451
|
|
1452
|
+
/*****************************************************************************/
|
1453
|
+
|
1230
1454
|
ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
|
1231
1455
|
ecb_function_ ecb_const uint32_t
|
1232
1456
|
ecb_binary16_to_binary32 (uint32_t x)
|
@@ -1344,7 +1568,6 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1344
1568
|
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
|
1345
1569
|
|| defined __aarch64__
|
1346
1570
|
#define ECB_STDFP 1
|
1347
|
-
#include <string.h> /* for memcpy */
|
1348
1571
|
#else
|
1349
1572
|
#define ECB_STDFP 0
|
1350
1573
|
#endif
|
@@ -1539,7 +1762,7 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1539
1762
|
#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
|
1540
1763
|
/* if your architecture doesn't need memory fences, e.g. because it is
|
1541
1764
|
* single-cpu/core, or if you use libev in a project that doesn't use libev
|
1542
|
-
* from multiple threads, then you can define
|
1765
|
+
* from multiple threads, then you can define ECB_NO_THREADS when compiling
|
1543
1766
|
* libev, in which cases the memory fences become nops.
|
1544
1767
|
* alternatively, you can remove this #error and link against libpthread,
|
1545
1768
|
* which will then provide the memory fences.
|
@@ -1553,18 +1776,80 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|
1553
1776
|
# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
|
1554
1777
|
#endif
|
1555
1778
|
|
1556
|
-
#define expect_false(cond) ecb_expect_false (cond)
|
1557
|
-
#define expect_true(cond) ecb_expect_true (cond)
|
1558
|
-
#define noinline ecb_noinline
|
1559
|
-
|
1560
1779
|
#define inline_size ecb_inline
|
1561
1780
|
|
1562
1781
|
#if EV_FEATURE_CODE
|
1563
1782
|
# define inline_speed ecb_inline
|
1564
1783
|
#else
|
1565
|
-
# define inline_speed
|
1784
|
+
# define inline_speed ecb_noinline static
|
1566
1785
|
#endif
|
1567
1786
|
|
1787
|
+
/*****************************************************************************/
|
1788
|
+
/* raw syscall wrappers */
|
1789
|
+
|
1790
|
+
#if EV_NEED_SYSCALL
|
1791
|
+
|
1792
|
+
#include <sys/syscall.h>
|
1793
|
+
|
1794
|
+
/*
|
1795
|
+
* define some syscall wrappers for common architectures
|
1796
|
+
* this is mostly for nice looks during debugging, not performance.
|
1797
|
+
* our syscalls return < 0, not == -1, on error. which is good
|
1798
|
+
* enough for linux aio.
|
1799
|
+
* TODO: arm is also common nowadays, maybe even mips and x86
|
1800
|
+
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
|
1801
|
+
*/
|
1802
|
+
#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
|
1803
|
+
/* the costly errno access probably kills this for size optimisation */
|
1804
|
+
|
1805
|
+
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
|
1806
|
+
({ \
|
1807
|
+
long res; \
|
1808
|
+
register unsigned long r6 __asm__ ("r9" ); \
|
1809
|
+
register unsigned long r5 __asm__ ("r8" ); \
|
1810
|
+
register unsigned long r4 __asm__ ("r10"); \
|
1811
|
+
register unsigned long r3 __asm__ ("rdx"); \
|
1812
|
+
register unsigned long r2 __asm__ ("rsi"); \
|
1813
|
+
register unsigned long r1 __asm__ ("rdi"); \
|
1814
|
+
if (narg >= 6) r6 = (unsigned long)(arg6); \
|
1815
|
+
if (narg >= 5) r5 = (unsigned long)(arg5); \
|
1816
|
+
if (narg >= 4) r4 = (unsigned long)(arg4); \
|
1817
|
+
if (narg >= 3) r3 = (unsigned long)(arg3); \
|
1818
|
+
if (narg >= 2) r2 = (unsigned long)(arg2); \
|
1819
|
+
if (narg >= 1) r1 = (unsigned long)(arg1); \
|
1820
|
+
__asm__ __volatile__ ( \
|
1821
|
+
"syscall\n\t" \
|
1822
|
+
: "=a" (res) \
|
1823
|
+
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
|
1824
|
+
: "cc", "r11", "cx", "memory"); \
|
1825
|
+
errno = -res; \
|
1826
|
+
res; \
|
1827
|
+
})
|
1828
|
+
|
1829
|
+
#endif
|
1830
|
+
|
1831
|
+
#ifdef ev_syscall
|
1832
|
+
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
|
1833
|
+
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
|
1834
|
+
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
|
1835
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
|
1836
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
|
1837
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
|
1838
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
|
1839
|
+
#else
|
1840
|
+
#define ev_syscall0(nr) syscall (nr)
|
1841
|
+
#define ev_syscall1(nr,arg1) syscall (nr, arg1)
|
1842
|
+
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
|
1843
|
+
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
|
1844
|
+
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
|
1845
|
+
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
|
1846
|
+
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
|
1847
|
+
#endif
|
1848
|
+
|
1849
|
+
#endif
|
1850
|
+
|
1851
|
+
/*****************************************************************************/
|
1852
|
+
|
1568
1853
|
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
|
1569
1854
|
|
1570
1855
|
#if EV_MINPRI == EV_MAXPRI
|
@@ -1622,7 +1907,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
|
|
1622
1907
|
#include <float.h>
|
1623
1908
|
|
1624
1909
|
/* a floor() replacement function, should be independent of ev_tstamp type */
|
1625
|
-
|
1910
|
+
ecb_noinline
|
1626
1911
|
static ev_tstamp
|
1627
1912
|
ev_floor (ev_tstamp v)
|
1628
1913
|
{
|
@@ -1633,26 +1918,26 @@ ev_floor (ev_tstamp v)
|
|
1633
1918
|
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
|
1634
1919
|
#endif
|
1635
1920
|
|
1636
|
-
/*
|
1637
|
-
if (
|
1921
|
+
/* special treatment for negative arguments */
|
1922
|
+
if (ecb_expect_false (v < 0.))
|
1923
|
+
{
|
1924
|
+
ev_tstamp f = -ev_floor (-v);
|
1925
|
+
|
1926
|
+
return f - (f == v ? 0 : 1);
|
1927
|
+
}
|
1928
|
+
|
1929
|
+
/* argument too large for an unsigned long? then reduce it */
|
1930
|
+
if (ecb_expect_false (v >= shift))
|
1638
1931
|
{
|
1639
1932
|
ev_tstamp f;
|
1640
1933
|
|
1641
1934
|
if (v == v - 1.)
|
1642
|
-
return v; /* very large
|
1935
|
+
return v; /* very large numbers are assumed to be integer */
|
1643
1936
|
|
1644
1937
|
f = shift * ev_floor (v * (1. / shift));
|
1645
1938
|
return f + ev_floor (v - f);
|
1646
1939
|
}
|
1647
1940
|
|
1648
|
-
/* special treatment for negative args? */
|
1649
|
-
if (expect_false (v < 0.))
|
1650
|
-
{
|
1651
|
-
ev_tstamp f = -ev_floor (-v);
|
1652
|
-
|
1653
|
-
return f - (f == v ? 0 : 1);
|
1654
|
-
}
|
1655
|
-
|
1656
1941
|
/* fits into an unsigned long */
|
1657
1942
|
return (unsigned long)v;
|
1658
1943
|
}
|
@@ -1665,7 +1950,7 @@ ev_floor (ev_tstamp v)
|
|
1665
1950
|
# include <sys/utsname.h>
|
1666
1951
|
#endif
|
1667
1952
|
|
1668
|
-
|
1953
|
+
ecb_noinline ecb_cold
|
1669
1954
|
static unsigned int
|
1670
1955
|
ev_linux_version (void)
|
1671
1956
|
{
|
@@ -1705,7 +1990,7 @@ ev_linux_version (void)
|
|
1705
1990
|
/*****************************************************************************/
|
1706
1991
|
|
1707
1992
|
#if EV_AVOID_STDIO
|
1708
|
-
|
1993
|
+
ecb_noinline ecb_cold
|
1709
1994
|
static void
|
1710
1995
|
ev_printerr (const char *msg)
|
1711
1996
|
{
|
@@ -1722,7 +2007,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
|
|
1722
2007
|
syserr_cb = cb;
|
1723
2008
|
}
|
1724
2009
|
|
1725
|
-
|
2010
|
+
ecb_noinline ecb_cold
|
1726
2011
|
static void
|
1727
2012
|
ev_syserr (const char *msg)
|
1728
2013
|
{
|
@@ -1804,7 +2089,7 @@ typedef struct
|
|
1804
2089
|
unsigned char events; /* the events watched for */
|
1805
2090
|
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
|
1806
2091
|
unsigned char emask; /* some backends store the actual kernel mask in here */
|
1807
|
-
unsigned char
|
2092
|
+
unsigned char eflags; /* flags field for use by backends */
|
1808
2093
|
#if EV_USE_EPOLL
|
1809
2094
|
unsigned int egen; /* generation counter to counter epoll bugs */
|
1810
2095
|
#endif
|
@@ -1868,7 +2153,7 @@ typedef struct
|
|
1868
2153
|
|
1869
2154
|
#else
|
1870
2155
|
|
1871
|
-
EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
|
2156
|
+
EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
|
1872
2157
|
#define VAR(name,decl) static decl;
|
1873
2158
|
#include "ev_vars.h"
|
1874
2159
|
#undef VAR
|
@@ -1878,8 +2163,8 @@ typedef struct
|
|
1878
2163
|
#endif
|
1879
2164
|
|
1880
2165
|
#if EV_FEATURE_API
|
1881
|
-
# define EV_RELEASE_CB if (
|
1882
|
-
# define EV_ACQUIRE_CB if (
|
2166
|
+
# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
|
2167
|
+
# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
|
1883
2168
|
# define EV_INVOKE_PENDING invoke_cb (EV_A)
|
1884
2169
|
#else
|
1885
2170
|
# define EV_RELEASE_CB (void)0
|
@@ -1896,17 +2181,19 @@ ev_tstamp
|
|
1896
2181
|
ev_time (void) EV_NOEXCEPT
|
1897
2182
|
{
|
1898
2183
|
#if EV_USE_REALTIME
|
1899
|
-
if (
|
2184
|
+
if (ecb_expect_true (have_realtime))
|
1900
2185
|
{
|
1901
2186
|
struct timespec ts;
|
1902
2187
|
clock_gettime (CLOCK_REALTIME, &ts);
|
1903
|
-
return
|
2188
|
+
return EV_TS_GET (ts);
|
1904
2189
|
}
|
1905
2190
|
#endif
|
1906
2191
|
|
1907
|
-
|
1908
|
-
|
1909
|
-
|
2192
|
+
{
|
2193
|
+
struct timeval tv;
|
2194
|
+
gettimeofday (&tv, 0);
|
2195
|
+
return EV_TV_GET (tv);
|
2196
|
+
}
|
1910
2197
|
}
|
1911
2198
|
#endif
|
1912
2199
|
|
@@ -1914,11 +2201,11 @@ inline_size ev_tstamp
|
|
1914
2201
|
get_clock (void)
|
1915
2202
|
{
|
1916
2203
|
#if EV_USE_MONOTONIC
|
1917
|
-
if (
|
2204
|
+
if (ecb_expect_true (have_monotonic))
|
1918
2205
|
{
|
1919
2206
|
struct timespec ts;
|
1920
2207
|
clock_gettime (CLOCK_MONOTONIC, &ts);
|
1921
|
-
return
|
2208
|
+
return EV_TS_GET (ts);
|
1922
2209
|
}
|
1923
2210
|
#endif
|
1924
2211
|
|
@@ -1936,7 +2223,7 @@ ev_now (EV_P) EV_NOEXCEPT
|
|
1936
2223
|
void
|
1937
2224
|
ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
1938
2225
|
{
|
1939
|
-
if (delay > 0.)
|
2226
|
+
if (delay > EV_TS_CONST (0.))
|
1940
2227
|
{
|
1941
2228
|
#if EV_USE_NANOSLEEP
|
1942
2229
|
struct timespec ts;
|
@@ -1946,7 +2233,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
|
|
1946
2233
|
#elif defined _WIN32
|
1947
2234
|
/* maybe this should round up, as ms is very low resolution */
|
1948
2235
|
/* compared to select (µs) or nanosleep (ns) */
|
1949
|
-
Sleep ((unsigned long)(delay
|
2236
|
+
Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
|
1950
2237
|
#else
|
1951
2238
|
struct timeval tv;
|
1952
2239
|
|
@@ -1986,7 +2273,7 @@ array_nextsize (int elem, int cur, int cnt)
|
|
1986
2273
|
return ncur;
|
1987
2274
|
}
|
1988
2275
|
|
1989
|
-
|
2276
|
+
ecb_noinline ecb_cold
|
1990
2277
|
static void *
|
1991
2278
|
array_realloc (int elem, void *base, int *cur, int cnt)
|
1992
2279
|
{
|
@@ -2000,7 +2287,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
2000
2287
|
memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
|
2001
2288
|
|
2002
2289
|
#define array_needsize(type,base,cur,cnt,init) \
|
2003
|
-
if (
|
2290
|
+
if (ecb_expect_false ((cnt) > (cur))) \
|
2004
2291
|
{ \
|
2005
2292
|
ecb_unused int ocur_ = (cur); \
|
2006
2293
|
(base) = (type *)array_realloc \
|
@@ -2024,20 +2311,20 @@ array_realloc (int elem, void *base, int *cur, int cnt)
|
|
2024
2311
|
/*****************************************************************************/
|
2025
2312
|
|
2026
2313
|
/* dummy callback for pending events */
|
2027
|
-
|
2314
|
+
ecb_noinline
|
2028
2315
|
static void
|
2029
2316
|
pendingcb (EV_P_ ev_prepare *w, int revents)
|
2030
2317
|
{
|
2031
2318
|
}
|
2032
2319
|
|
2033
|
-
|
2320
|
+
ecb_noinline
|
2034
2321
|
void
|
2035
2322
|
ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
|
2036
2323
|
{
|
2037
2324
|
W w_ = (W)w;
|
2038
2325
|
int pri = ABSPRI (w_);
|
2039
2326
|
|
2040
|
-
if (
|
2327
|
+
if (ecb_expect_false (w_->pending))
|
2041
2328
|
pendings [pri][w_->pending - 1].events |= revents;
|
2042
2329
|
else
|
2043
2330
|
{
|
@@ -2098,7 +2385,7 @@ fd_event (EV_P_ int fd, int revents)
|
|
2098
2385
|
{
|
2099
2386
|
ANFD *anfd = anfds + fd;
|
2100
2387
|
|
2101
|
-
if (
|
2388
|
+
if (ecb_expect_true (!anfd->reify))
|
2102
2389
|
fd_event_nocheck (EV_A_ fd, revents);
|
2103
2390
|
}
|
2104
2391
|
|
@@ -2116,8 +2403,20 @@ fd_reify (EV_P)
|
|
2116
2403
|
{
|
2117
2404
|
int i;
|
2118
2405
|
|
2406
|
+
/* most backends do not modify the fdchanges list in backend_modfiy.
|
2407
|
+
* except io_uring, which has fixed-size buffers which might force us
|
2408
|
+
* to handle events in backend_modify, causing fdchanges to be amended,
|
2409
|
+
* which could result in an endless loop.
|
2410
|
+
* to avoid this, we do not dynamically handle fds that were added
|
2411
|
+
* during fd_reify. that means that for those backends, fdchangecnt
|
2412
|
+
* might be non-zero during poll, which must cause them to not block.
|
2413
|
+
* to not put too much of a burden on other backends, this detail
|
2414
|
+
* needs to be handled in the backend.
|
2415
|
+
*/
|
2416
|
+
int changecnt = fdchangecnt;
|
2417
|
+
|
2119
2418
|
#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
|
2120
|
-
for (i = 0; i <
|
2419
|
+
for (i = 0; i < changecnt; ++i)
|
2121
2420
|
{
|
2122
2421
|
int fd = fdchanges [i];
|
2123
2422
|
ANFD *anfd = anfds + fd;
|
@@ -2141,7 +2440,7 @@ fd_reify (EV_P)
|
|
2141
2440
|
}
|
2142
2441
|
#endif
|
2143
2442
|
|
2144
|
-
for (i = 0; i <
|
2443
|
+
for (i = 0; i < changecnt; ++i)
|
2145
2444
|
{
|
2146
2445
|
int fd = fdchanges [i];
|
2147
2446
|
ANFD *anfd = anfds + fd;
|
@@ -2152,7 +2451,7 @@ fd_reify (EV_P)
|
|
2152
2451
|
|
2153
2452
|
anfd->reify = 0;
|
2154
2453
|
|
2155
|
-
/*if (
|
2454
|
+
/*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
|
2156
2455
|
{
|
2157
2456
|
anfd->events = 0;
|
2158
2457
|
|
@@ -2167,7 +2466,14 @@ fd_reify (EV_P)
|
|
2167
2466
|
backend_modify (EV_A_ fd, o_events, anfd->events);
|
2168
2467
|
}
|
2169
2468
|
|
2170
|
-
fdchangecnt
|
2469
|
+
/* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
|
2470
|
+
* this is a rare case (see beginning comment in this function), so we copy them to the
|
2471
|
+
* front and hope the backend handles this case.
|
2472
|
+
*/
|
2473
|
+
if (ecb_expect_false (fdchangecnt != changecnt))
|
2474
|
+
memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
|
2475
|
+
|
2476
|
+
fdchangecnt -= changecnt;
|
2171
2477
|
}
|
2172
2478
|
|
2173
2479
|
/* something about the given fd changed */
|
@@ -2176,9 +2482,9 @@ void
|
|
2176
2482
|
fd_change (EV_P_ int fd, int flags)
|
2177
2483
|
{
|
2178
2484
|
unsigned char reify = anfds [fd].reify;
|
2179
|
-
anfds [fd].reify
|
2485
|
+
anfds [fd].reify = reify | flags;
|
2180
2486
|
|
2181
|
-
if (
|
2487
|
+
if (ecb_expect_true (!reify))
|
2182
2488
|
{
|
2183
2489
|
++fdchangecnt;
|
2184
2490
|
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
|
@@ -2211,7 +2517,7 @@ fd_valid (int fd)
|
|
2211
2517
|
}
|
2212
2518
|
|
2213
2519
|
/* called on EBADF to verify fds */
|
2214
|
-
|
2520
|
+
ecb_noinline ecb_cold
|
2215
2521
|
static void
|
2216
2522
|
fd_ebadf (EV_P)
|
2217
2523
|
{
|
@@ -2224,7 +2530,7 @@ fd_ebadf (EV_P)
|
|
2224
2530
|
}
|
2225
2531
|
|
2226
2532
|
/* called on ENOMEM in select/poll to kill some fds and retry */
|
2227
|
-
|
2533
|
+
ecb_noinline ecb_cold
|
2228
2534
|
static void
|
2229
2535
|
fd_enomem (EV_P)
|
2230
2536
|
{
|
@@ -2239,7 +2545,7 @@ fd_enomem (EV_P)
|
|
2239
2545
|
}
|
2240
2546
|
|
2241
2547
|
/* usually called after fork if backend needs to re-arm all fds from scratch */
|
2242
|
-
|
2548
|
+
ecb_noinline
|
2243
2549
|
static void
|
2244
2550
|
fd_rearm_all (EV_P)
|
2245
2551
|
{
|
@@ -2303,19 +2609,19 @@ downheap (ANHE *heap, int N, int k)
|
|
2303
2609
|
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
|
2304
2610
|
|
2305
2611
|
/* find minimum child */
|
2306
|
-
if (
|
2612
|
+
if (ecb_expect_true (pos + DHEAP - 1 < E))
|
2307
2613
|
{
|
2308
2614
|
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2309
|
-
if ( ANHE_at (pos [1])
|
2310
|
-
if ( ANHE_at (pos [2])
|
2311
|
-
if ( ANHE_at (pos [3])
|
2615
|
+
if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2616
|
+
if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2617
|
+
if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2312
2618
|
}
|
2313
2619
|
else if (pos < E)
|
2314
2620
|
{
|
2315
2621
|
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
|
2316
|
-
if (pos + 1 < E && ANHE_at (pos [1])
|
2317
|
-
if (pos + 2 < E && ANHE_at (pos [2])
|
2318
|
-
if (pos + 3 < E && ANHE_at (pos [3])
|
2622
|
+
if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
|
2623
|
+
if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
|
2624
|
+
if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
|
2319
2625
|
}
|
2320
2626
|
else
|
2321
2627
|
break;
|
@@ -2333,7 +2639,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2333
2639
|
ev_active (ANHE_w (he)) = k;
|
2334
2640
|
}
|
2335
2641
|
|
2336
|
-
#else /* 4HEAP */
|
2642
|
+
#else /* not 4HEAP */
|
2337
2643
|
|
2338
2644
|
#define HEAP0 1
|
2339
2645
|
#define HPARENT(k) ((k) >> 1)
|
@@ -2360,7 +2666,7 @@ downheap (ANHE *heap, int N, int k)
|
|
2360
2666
|
|
2361
2667
|
heap [k] = heap [c];
|
2362
2668
|
ev_active (ANHE_w (heap [k])) = k;
|
2363
|
-
|
2669
|
+
|
2364
2670
|
k = c;
|
2365
2671
|
}
|
2366
2672
|
|
@@ -2415,7 +2721,7 @@ reheap (ANHE *heap, int N)
|
|
2415
2721
|
|
2416
2722
|
/*****************************************************************************/
|
2417
2723
|
|
2418
|
-
/* associate signal watchers to a signal
|
2724
|
+
/* associate signal watchers to a signal */
|
2419
2725
|
typedef struct
|
2420
2726
|
{
|
2421
2727
|
EV_ATOMIC_T pending;
|
@@ -2431,7 +2737,7 @@ static ANSIG signals [EV_NSIG - 1];
|
|
2431
2737
|
|
2432
2738
|
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
2433
2739
|
|
2434
|
-
|
2740
|
+
ecb_noinline ecb_cold
|
2435
2741
|
static void
|
2436
2742
|
evpipe_init (EV_P)
|
2437
2743
|
{
|
@@ -2482,7 +2788,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
|
|
2482
2788
|
{
|
2483
2789
|
ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
|
2484
2790
|
|
2485
|
-
if (
|
2791
|
+
if (ecb_expect_true (*flag))
|
2486
2792
|
return;
|
2487
2793
|
|
2488
2794
|
*flag = 1;
|
@@ -2569,7 +2875,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
|
|
2569
2875
|
ECB_MEMORY_FENCE;
|
2570
2876
|
|
2571
2877
|
for (i = EV_NSIG - 1; i--; )
|
2572
|
-
if (
|
2878
|
+
if (ecb_expect_false (signals [i].pending))
|
2573
2879
|
ev_feed_signal_event (EV_A_ i + 1);
|
2574
2880
|
}
|
2575
2881
|
#endif
|
@@ -2620,13 +2926,13 @@ ev_sighandler (int signum)
|
|
2620
2926
|
ev_feed_signal (signum);
|
2621
2927
|
}
|
2622
2928
|
|
2623
|
-
|
2929
|
+
ecb_noinline
|
2624
2930
|
void
|
2625
2931
|
ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
2626
2932
|
{
|
2627
2933
|
WL w;
|
2628
2934
|
|
2629
|
-
if (
|
2935
|
+
if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
|
2630
2936
|
return;
|
2631
2937
|
|
2632
2938
|
--signum;
|
@@ -2635,7 +2941,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
|
|
2635
2941
|
/* it is permissible to try to feed a signal to the wrong loop */
|
2636
2942
|
/* or, likely more useful, feeding a signal nobody is waiting for */
|
2637
2943
|
|
2638
|
-
if (
|
2944
|
+
if (ecb_expect_false (signals [signum].loop != EV_A))
|
2639
2945
|
return;
|
2640
2946
|
#endif
|
2641
2947
|
|
@@ -2729,6 +3035,57 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2729
3035
|
|
2730
3036
|
/*****************************************************************************/
|
2731
3037
|
|
3038
|
+
#if EV_USE_TIMERFD
|
3039
|
+
|
3040
|
+
static void periodics_reschedule (EV_P);
|
3041
|
+
|
3042
|
+
static void
|
3043
|
+
timerfdcb (EV_P_ ev_io *iow, int revents)
|
3044
|
+
{
|
3045
|
+
struct itimerspec its = { 0 };
|
3046
|
+
|
3047
|
+
its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
|
3048
|
+
timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
|
3049
|
+
|
3050
|
+
ev_rt_now = ev_time ();
|
3051
|
+
/* periodics_reschedule only needs ev_rt_now */
|
3052
|
+
/* but maybe in the future we want the full treatment. */
|
3053
|
+
/*
|
3054
|
+
now_floor = EV_TS_CONST (0.);
|
3055
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3056
|
+
*/
|
3057
|
+
#if EV_PERIODIC_ENABLE
|
3058
|
+
periodics_reschedule (EV_A);
|
3059
|
+
#endif
|
3060
|
+
}
|
3061
|
+
|
3062
|
+
ecb_noinline ecb_cold
|
3063
|
+
static void
|
3064
|
+
evtimerfd_init (EV_P)
|
3065
|
+
{
|
3066
|
+
if (!ev_is_active (&timerfd_w))
|
3067
|
+
{
|
3068
|
+
timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
|
3069
|
+
|
3070
|
+
if (timerfd >= 0)
|
3071
|
+
{
|
3072
|
+
fd_intern (timerfd); /* just to be sure */
|
3073
|
+
|
3074
|
+
ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
|
3075
|
+
ev_set_priority (&timerfd_w, EV_MINPRI);
|
3076
|
+
ev_io_start (EV_A_ &timerfd_w);
|
3077
|
+
ev_unref (EV_A); /* watcher should not keep loop alive */
|
3078
|
+
|
3079
|
+
/* (re-) arm timer */
|
3080
|
+
timerfdcb (EV_A_ 0, 0);
|
3081
|
+
}
|
3082
|
+
}
|
3083
|
+
}
|
3084
|
+
|
3085
|
+
#endif
|
3086
|
+
|
3087
|
+
/*****************************************************************************/
|
3088
|
+
|
2732
3089
|
#if EV_USE_IOCP
|
2733
3090
|
# include "ev_iocp.c"
|
2734
3091
|
#endif
|
@@ -2744,6 +3101,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
|
|
2744
3101
|
#if EV_USE_LINUXAIO
|
2745
3102
|
# include "ev_linuxaio.c"
|
2746
3103
|
#endif
|
3104
|
+
#if EV_USE_IOURING
|
3105
|
+
# include "ev_iouring.c"
|
3106
|
+
#endif
|
2747
3107
|
#if EV_USE_POLL
|
2748
3108
|
# include "ev_poll.c"
|
2749
3109
|
#endif
|
@@ -2781,17 +3141,14 @@ ev_supported_backends (void) EV_NOEXCEPT
|
|
2781
3141
|
{
|
2782
3142
|
unsigned int flags = 0;
|
2783
3143
|
|
2784
|
-
if (EV_USE_PORT
|
2785
|
-
if (EV_USE_KQUEUE
|
2786
|
-
if (EV_USE_EPOLL
|
2787
|
-
|
2788
|
-
|
2789
|
-
if (
|
2790
|
-
|
3144
|
+
if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
|
3145
|
+
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
|
3146
|
+
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
|
3147
|
+
if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */
|
3148
|
+
if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
|
3149
|
+
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
|
3150
|
+
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
|
2791
3151
|
|
2792
|
-
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
|
2793
|
-
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
|
2794
|
-
|
2795
3152
|
return flags;
|
2796
3153
|
}
|
2797
3154
|
|
@@ -2801,24 +3158,27 @@ ev_recommended_backends (void) EV_NOEXCEPT
|
|
2801
3158
|
{
|
2802
3159
|
unsigned int flags = ev_supported_backends ();
|
2803
3160
|
|
2804
|
-
|
2805
|
-
|
2806
|
-
#elif defined(__NetBSD__)
|
2807
|
-
/* kqueue is borked on everything but netbsd apparently */
|
2808
|
-
/* it usually doesn't work correctly on anything but sockets and pipes */
|
2809
|
-
#else
|
3161
|
+
/* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
|
3162
|
+
#if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14)
|
2810
3163
|
/* only select works correctly on that "unix-certified" platform */
|
2811
3164
|
flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
|
2812
3165
|
flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
|
3166
|
+
#elif !defined(__NetBSD__)
|
3167
|
+
/* kqueue is borked on everything but netbsd apparently */
|
3168
|
+
/* it usually doesn't work correctly on anything but sockets and pipes */
|
3169
|
+
flags &= ~EVBACKEND_KQUEUE;
|
2813
3170
|
#endif
|
2814
3171
|
|
2815
3172
|
#ifdef __FreeBSD__
|
2816
3173
|
flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
|
2817
3174
|
#endif
|
2818
3175
|
|
2819
|
-
|
2820
|
-
|
3176
|
+
#ifdef __linux__
|
3177
|
+
/* NOTE: linuxaio is very experimental, never recommend */
|
2821
3178
|
flags &= ~EVBACKEND_LINUXAIO;
|
3179
|
+
|
3180
|
+
/* NOTE: io_uring is super experimental, never recommend */
|
3181
|
+
flags &= ~EVBACKEND_IOURING;
|
2822
3182
|
#endif
|
2823
3183
|
|
2824
3184
|
return flags;
|
@@ -2828,12 +3188,14 @@ ecb_cold
|
|
2828
3188
|
unsigned int
|
2829
3189
|
ev_embeddable_backends (void) EV_NOEXCEPT
|
2830
3190
|
{
|
2831
|
-
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
|
3191
|
+
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
|
2832
3192
|
|
2833
3193
|
/* epoll embeddability broken on all linux versions up to at least 2.6.23 */
|
2834
3194
|
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
|
2835
3195
|
flags &= ~EVBACKEND_EPOLL;
|
2836
3196
|
|
3197
|
+
/* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
|
3198
|
+
|
2837
3199
|
return flags;
|
2838
3200
|
}
|
2839
3201
|
|
@@ -2895,7 +3257,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
|
|
2895
3257
|
#endif
|
2896
3258
|
|
2897
3259
|
/* initialise a loop structure, must be zero-initialised */
|
2898
|
-
|
3260
|
+
ecb_noinline ecb_cold
|
2899
3261
|
static void
|
2900
3262
|
loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
2901
3263
|
{
|
@@ -2960,6 +3322,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
|
2960
3322
|
#if EV_USE_SIGNALFD
|
2961
3323
|
sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
|
2962
3324
|
#endif
|
3325
|
+
#if EV_USE_TIMERFD
|
3326
|
+
timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
|
3327
|
+
#endif
|
2963
3328
|
|
2964
3329
|
if (!(flags & EVBACKEND_MASK))
|
2965
3330
|
flags |= ev_recommended_backends ();
|
@@ -2973,6 +3338,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
|
|
2973
3338
|
#if EV_USE_KQUEUE
|
2974
3339
|
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
|
2975
3340
|
#endif
|
3341
|
+
#if EV_USE_IOURING
|
3342
|
+
if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
|
3343
|
+
#endif
|
2976
3344
|
#if EV_USE_LINUXAIO
|
2977
3345
|
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
|
2978
3346
|
#endif
|
@@ -3010,7 +3378,7 @@ ev_loop_destroy (EV_P)
|
|
3010
3378
|
|
3011
3379
|
#if EV_CLEANUP_ENABLE
|
3012
3380
|
/* queue cleanup watchers (and execute them) */
|
3013
|
-
if (
|
3381
|
+
if (ecb_expect_false (cleanupcnt))
|
3014
3382
|
{
|
3015
3383
|
queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
|
3016
3384
|
EV_INVOKE_PENDING;
|
@@ -3039,6 +3407,11 @@ ev_loop_destroy (EV_P)
|
|
3039
3407
|
close (sigfd);
|
3040
3408
|
#endif
|
3041
3409
|
|
3410
|
+
#if EV_USE_TIMERFD
|
3411
|
+
if (ev_is_active (&timerfd_w))
|
3412
|
+
close (timerfd);
|
3413
|
+
#endif
|
3414
|
+
|
3042
3415
|
#if EV_USE_INOTIFY
|
3043
3416
|
if (fs_fd >= 0)
|
3044
3417
|
close (fs_fd);
|
@@ -3056,6 +3429,9 @@ ev_loop_destroy (EV_P)
|
|
3056
3429
|
#if EV_USE_KQUEUE
|
3057
3430
|
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
|
3058
3431
|
#endif
|
3432
|
+
#if EV_USE_IOURING
|
3433
|
+
if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
|
3434
|
+
#endif
|
3059
3435
|
#if EV_USE_LINUXAIO
|
3060
3436
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
|
3061
3437
|
#endif
|
@@ -3123,6 +3499,9 @@ loop_fork (EV_P)
|
|
3123
3499
|
#if EV_USE_KQUEUE
|
3124
3500
|
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
|
3125
3501
|
#endif
|
3502
|
+
#if EV_USE_IOURING
|
3503
|
+
if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
|
3504
|
+
#endif
|
3126
3505
|
#if EV_USE_LINUXAIO
|
3127
3506
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
|
3128
3507
|
#endif
|
@@ -3133,22 +3512,44 @@ loop_fork (EV_P)
|
|
3133
3512
|
infy_fork (EV_A);
|
3134
3513
|
#endif
|
3135
3514
|
|
3136
|
-
|
3137
|
-
if (ev_is_active (&pipe_w) && postfork != 2)
|
3515
|
+
if (postfork != 2)
|
3138
3516
|
{
|
3139
|
-
|
3517
|
+
#if EV_USE_SIGNALFD
|
3518
|
+
/* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
|
3519
|
+
#endif
|
3140
3520
|
|
3141
|
-
|
3142
|
-
|
3521
|
+
#if EV_USE_TIMERFD
|
3522
|
+
if (ev_is_active (&timerfd_w))
|
3523
|
+
{
|
3524
|
+
ev_ref (EV_A);
|
3525
|
+
ev_io_stop (EV_A_ &timerfd_w);
|
3143
3526
|
|
3144
|
-
|
3145
|
-
|
3527
|
+
close (timerfd);
|
3528
|
+
timerfd = -2;
|
3529
|
+
|
3530
|
+
evtimerfd_init (EV_A);
|
3531
|
+
/* reschedule periodics, in case we missed something */
|
3532
|
+
ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
|
3533
|
+
}
|
3534
|
+
#endif
|
3146
3535
|
|
3147
|
-
|
3148
|
-
|
3149
|
-
|
3536
|
+
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
|
3537
|
+
if (ev_is_active (&pipe_w))
|
3538
|
+
{
|
3539
|
+
/* pipe_write_wanted must be false now, so modifying fd vars should be safe */
|
3540
|
+
|
3541
|
+
ev_ref (EV_A);
|
3542
|
+
ev_io_stop (EV_A_ &pipe_w);
|
3543
|
+
|
3544
|
+
if (evpipe [0] >= 0)
|
3545
|
+
EV_WIN32_CLOSE_FD (evpipe [0]);
|
3546
|
+
|
3547
|
+
evpipe_init (EV_A);
|
3548
|
+
/* iterate over everything, in case we missed something before */
|
3549
|
+
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3550
|
+
}
|
3551
|
+
#endif
|
3150
3552
|
}
|
3151
|
-
#endif
|
3152
3553
|
|
3153
3554
|
postfork = 0;
|
3154
3555
|
}
|
@@ -3174,7 +3575,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
|
|
3174
3575
|
#endif /* multiplicity */
|
3175
3576
|
|
3176
3577
|
#if EV_VERIFY
|
3177
|
-
|
3578
|
+
ecb_noinline ecb_cold
|
3178
3579
|
static void
|
3179
3580
|
verify_watcher (EV_P_ W w)
|
3180
3581
|
{
|
@@ -3184,7 +3585,7 @@ verify_watcher (EV_P_ W w)
|
|
3184
3585
|
assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
|
3185
3586
|
}
|
3186
3587
|
|
3187
|
-
|
3588
|
+
ecb_noinline ecb_cold
|
3188
3589
|
static void
|
3189
3590
|
verify_heap (EV_P_ ANHE *heap, int N)
|
3190
3591
|
{
|
@@ -3200,7 +3601,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
|
|
3200
3601
|
}
|
3201
3602
|
}
|
3202
3603
|
|
3203
|
-
|
3604
|
+
ecb_noinline ecb_cold
|
3204
3605
|
static void
|
3205
3606
|
array_verify (EV_P_ W *ws, int cnt)
|
3206
3607
|
{
|
@@ -3359,7 +3760,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
|
|
3359
3760
|
return count;
|
3360
3761
|
}
|
3361
3762
|
|
3362
|
-
|
3763
|
+
ecb_noinline
|
3363
3764
|
void
|
3364
3765
|
ev_invoke_pending (EV_P)
|
3365
3766
|
{
|
@@ -3388,7 +3789,7 @@ ev_invoke_pending (EV_P)
|
|
3388
3789
|
inline_size void
|
3389
3790
|
idle_reify (EV_P)
|
3390
3791
|
{
|
3391
|
-
if (
|
3792
|
+
if (ecb_expect_false (idleall))
|
3392
3793
|
{
|
3393
3794
|
int pri;
|
3394
3795
|
|
@@ -3428,7 +3829,7 @@ timers_reify (EV_P)
|
|
3428
3829
|
if (ev_at (w) < mn_now)
|
3429
3830
|
ev_at (w) = mn_now;
|
3430
3831
|
|
3431
|
-
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
|
3832
|
+
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
|
3432
3833
|
|
3433
3834
|
ANHE_at_cache (timers [HEAP0]);
|
3434
3835
|
downheap (timers, timercnt, HEAP0);
|
@@ -3447,7 +3848,7 @@ timers_reify (EV_P)
|
|
3447
3848
|
|
3448
3849
|
#if EV_PERIODIC_ENABLE
|
3449
3850
|
|
3450
|
-
|
3851
|
+
ecb_noinline
|
3451
3852
|
static void
|
3452
3853
|
periodic_recalc (EV_P_ ev_periodic *w)
|
3453
3854
|
{
|
@@ -3460,7 +3861,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
|
|
3460
3861
|
ev_tstamp nat = at + w->interval;
|
3461
3862
|
|
3462
3863
|
/* when resolution fails us, we use ev_rt_now */
|
3463
|
-
if (
|
3864
|
+
if (ecb_expect_false (nat == at))
|
3464
3865
|
{
|
3465
3866
|
at = ev_rt_now;
|
3466
3867
|
break;
|
@@ -3516,7 +3917,7 @@ periodics_reify (EV_P)
|
|
3516
3917
|
|
3517
3918
|
/* simply recalculate all periodics */
|
3518
3919
|
/* TODO: maybe ensure that at least one event happens when jumping forward? */
|
3519
|
-
|
3920
|
+
ecb_noinline ecb_cold
|
3520
3921
|
static void
|
3521
3922
|
periodics_reschedule (EV_P)
|
3522
3923
|
{
|
@@ -3540,7 +3941,7 @@ periodics_reschedule (EV_P)
|
|
3540
3941
|
#endif
|
3541
3942
|
|
3542
3943
|
/* adjust all timers by a given offset */
|
3543
|
-
|
3944
|
+
ecb_noinline ecb_cold
|
3544
3945
|
static void
|
3545
3946
|
timers_reschedule (EV_P_ ev_tstamp adjust)
|
3546
3947
|
{
|
@@ -3560,7 +3961,7 @@ inline_speed void
|
|
3560
3961
|
time_update (EV_P_ ev_tstamp max_block)
|
3561
3962
|
{
|
3562
3963
|
#if EV_USE_MONOTONIC
|
3563
|
-
if (
|
3964
|
+
if (ecb_expect_true (have_monotonic))
|
3564
3965
|
{
|
3565
3966
|
int i;
|
3566
3967
|
ev_tstamp odiff = rtmn_diff;
|
@@ -3569,7 +3970,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3569
3970
|
|
3570
3971
|
/* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
|
3571
3972
|
/* interpolate in the meantime */
|
3572
|
-
if (
|
3973
|
+
if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
|
3573
3974
|
{
|
3574
3975
|
ev_rt_now = rtmn_diff + mn_now;
|
3575
3976
|
return;
|
@@ -3593,7 +3994,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3593
3994
|
|
3594
3995
|
diff = odiff - rtmn_diff;
|
3595
3996
|
|
3596
|
-
if (
|
3997
|
+
if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
|
3597
3998
|
return; /* all is well */
|
3598
3999
|
|
3599
4000
|
ev_rt_now = ev_time ();
|
@@ -3612,7 +4013,7 @@ time_update (EV_P_ ev_tstamp max_block)
|
|
3612
4013
|
{
|
3613
4014
|
ev_rt_now = ev_time ();
|
3614
4015
|
|
3615
|
-
if (
|
4016
|
+
if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
|
3616
4017
|
{
|
3617
4018
|
/* adjust timers. this is easy, as the offset is the same for all of them */
|
3618
4019
|
timers_reschedule (EV_A_ ev_rt_now - mn_now);
|
@@ -3666,8 +4067,8 @@ ev_run (EV_P_ int flags)
|
|
3666
4067
|
#endif
|
3667
4068
|
|
3668
4069
|
#ifndef _WIN32
|
3669
|
-
if (
|
3670
|
-
if (
|
4070
|
+
if (ecb_expect_false (curpid)) /* penalise the forking check even more */
|
4071
|
+
if (ecb_expect_false (getpid () != curpid))
|
3671
4072
|
{
|
3672
4073
|
curpid = getpid ();
|
3673
4074
|
postfork = 1;
|
@@ -3676,7 +4077,7 @@ ev_run (EV_P_ int flags)
|
|
3676
4077
|
|
3677
4078
|
#if EV_FORK_ENABLE
|
3678
4079
|
/* we might have forked, so queue fork handlers */
|
3679
|
-
if (
|
4080
|
+
if (ecb_expect_false (postfork))
|
3680
4081
|
if (forkcnt)
|
3681
4082
|
{
|
3682
4083
|
queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
|
@@ -3686,18 +4087,18 @@ ev_run (EV_P_ int flags)
|
|
3686
4087
|
|
3687
4088
|
#if EV_PREPARE_ENABLE
|
3688
4089
|
/* queue prepare watchers (and execute them) */
|
3689
|
-
if (
|
4090
|
+
if (ecb_expect_false (preparecnt))
|
3690
4091
|
{
|
3691
4092
|
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
|
3692
4093
|
EV_INVOKE_PENDING;
|
3693
4094
|
}
|
3694
4095
|
#endif
|
3695
4096
|
|
3696
|
-
if (
|
4097
|
+
if (ecb_expect_false (loop_done))
|
3697
4098
|
break;
|
3698
4099
|
|
3699
4100
|
/* we might have forked, so reify kernel state if necessary */
|
3700
|
-
if (
|
4101
|
+
if (ecb_expect_false (postfork))
|
3701
4102
|
loop_fork (EV_A);
|
3702
4103
|
|
3703
4104
|
/* update fd-related kernel structures */
|
@@ -3712,16 +4113,28 @@ ev_run (EV_P_ int flags)
|
|
3712
4113
|
ev_tstamp prev_mn_now = mn_now;
|
3713
4114
|
|
3714
4115
|
/* update time to cancel out callback processing overhead */
|
3715
|
-
time_update (EV_A_
|
4116
|
+
time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
|
3716
4117
|
|
3717
4118
|
/* from now on, we want a pipe-wake-up */
|
3718
4119
|
pipe_write_wanted = 1;
|
3719
4120
|
|
3720
4121
|
ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
|
3721
4122
|
|
3722
|
-
if (
|
4123
|
+
if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
|
3723
4124
|
{
|
3724
|
-
waittime = MAX_BLOCKTIME;
|
4125
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME);
|
4126
|
+
|
4127
|
+
#if EV_USE_TIMERFD
|
4128
|
+
/* sleep a lot longer when we can reliably detect timejumps */
|
4129
|
+
if (ecb_expect_true (timerfd >= 0))
|
4130
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4131
|
+
#endif
|
4132
|
+
#if !EV_PERIODIC_ENABLE
|
4133
|
+
/* without periodics but with monotonic clock there is no need */
|
4134
|
+
/* for any time jump detection, so sleep longer */
|
4135
|
+
if (ecb_expect_true (have_monotonic))
|
4136
|
+
waittime = EV_TS_CONST (MAX_BLOCKTIME2);
|
4137
|
+
#endif
|
3725
4138
|
|
3726
4139
|
if (timercnt)
|
3727
4140
|
{
|
@@ -3738,23 +4151,28 @@ ev_run (EV_P_ int flags)
|
|
3738
4151
|
#endif
|
3739
4152
|
|
3740
4153
|
/* don't let timeouts decrease the waittime below timeout_blocktime */
|
3741
|
-
if (
|
4154
|
+
if (ecb_expect_false (waittime < timeout_blocktime))
|
3742
4155
|
waittime = timeout_blocktime;
|
3743
4156
|
|
3744
|
-
/*
|
3745
|
-
|
3746
|
-
|
3747
|
-
|
4157
|
+
/* now there are two more special cases left, either we have
|
4158
|
+
* already-expired timers, so we should not sleep, or we have timers
|
4159
|
+
* that expire very soon, in which case we need to wait for a minimum
|
4160
|
+
* amount of time for some event loop backends.
|
4161
|
+
*/
|
4162
|
+
if (ecb_expect_false (waittime < backend_mintime))
|
4163
|
+
waittime = waittime <= EV_TS_CONST (0.)
|
4164
|
+
? EV_TS_CONST (0.)
|
4165
|
+
: backend_mintime;
|
3748
4166
|
|
3749
4167
|
/* extra check because io_blocktime is commonly 0 */
|
3750
|
-
if (
|
4168
|
+
if (ecb_expect_false (io_blocktime))
|
3751
4169
|
{
|
3752
4170
|
sleeptime = io_blocktime - (mn_now - prev_mn_now);
|
3753
4171
|
|
3754
4172
|
if (sleeptime > waittime - backend_mintime)
|
3755
4173
|
sleeptime = waittime - backend_mintime;
|
3756
4174
|
|
3757
|
-
if (
|
4175
|
+
if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
|
3758
4176
|
{
|
3759
4177
|
ev_sleep (sleeptime);
|
3760
4178
|
waittime -= sleeptime;
|
@@ -3809,10 +4227,7 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3809
4227
|
|
3810
4228
|
poll_args.loop = loop;
|
3811
4229
|
poll_args.waittime = waittime;
|
3812
|
-
|
3813
4230
|
rb_thread_call_without_gvl(ev_backend_poll, (void *)&poll_args, RUBY_UBF_IO, 0);
|
3814
|
-
|
3815
|
-
// backend_poll (EV_A_ waittime);
|
3816
4231
|
/*
|
3817
4232
|
############################# END PATCHERY ############################
|
3818
4233
|
*/
|
@@ -3828,7 +4243,6 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3828
4243
|
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
|
3829
4244
|
}
|
3830
4245
|
|
3831
|
-
|
3832
4246
|
/* update ev_rt_now, do magic */
|
3833
4247
|
time_update (EV_A_ waittime + sleeptime);
|
3834
4248
|
}
|
@@ -3846,13 +4260,13 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
|
|
3846
4260
|
|
3847
4261
|
#if EV_CHECK_ENABLE
|
3848
4262
|
/* queue check watchers, to be executed first */
|
3849
|
-
if (
|
4263
|
+
if (ecb_expect_false (checkcnt))
|
3850
4264
|
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
|
3851
4265
|
#endif
|
3852
4266
|
|
3853
4267
|
EV_INVOKE_PENDING;
|
3854
4268
|
}
|
3855
|
-
while (
|
4269
|
+
while (ecb_expect_true (
|
3856
4270
|
activecnt
|
3857
4271
|
&& !loop_done
|
3858
4272
|
&& !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
|
@@ -3889,7 +4303,7 @@ ev_unref (EV_P) EV_NOEXCEPT
|
|
3889
4303
|
void
|
3890
4304
|
ev_now_update (EV_P) EV_NOEXCEPT
|
3891
4305
|
{
|
3892
|
-
time_update (EV_A_
|
4306
|
+
time_update (EV_A_ EV_TSTAMP_HUGE);
|
3893
4307
|
}
|
3894
4308
|
|
3895
4309
|
void
|
@@ -3926,7 +4340,7 @@ wlist_del (WL *head, WL elem)
|
|
3926
4340
|
{
|
3927
4341
|
while (*head)
|
3928
4342
|
{
|
3929
|
-
if (
|
4343
|
+
if (ecb_expect_true (*head == elem))
|
3930
4344
|
{
|
3931
4345
|
*head = elem->next;
|
3932
4346
|
break;
|
@@ -3953,7 +4367,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
|
|
3953
4367
|
W w_ = (W)w;
|
3954
4368
|
int pending = w_->pending;
|
3955
4369
|
|
3956
|
-
if (
|
4370
|
+
if (ecb_expect_true (pending))
|
3957
4371
|
{
|
3958
4372
|
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
|
3959
4373
|
p->w = (W)&pending_w;
|
@@ -3990,13 +4404,13 @@ ev_stop (EV_P_ W w)
|
|
3990
4404
|
|
3991
4405
|
/*****************************************************************************/
|
3992
4406
|
|
3993
|
-
|
4407
|
+
ecb_noinline
|
3994
4408
|
void
|
3995
4409
|
ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
3996
4410
|
{
|
3997
4411
|
int fd = w->fd;
|
3998
4412
|
|
3999
|
-
if (
|
4413
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4000
4414
|
return;
|
4001
4415
|
|
4002
4416
|
assert (("libev: ev_io_start called with negative fd", fd >= 0));
|
@@ -4020,12 +4434,12 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
4020
4434
|
EV_FREQUENT_CHECK;
|
4021
4435
|
}
|
4022
4436
|
|
4023
|
-
|
4437
|
+
ecb_noinline
|
4024
4438
|
void
|
4025
4439
|
ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
4026
4440
|
{
|
4027
4441
|
clear_pending (EV_A_ (W)w);
|
4028
|
-
if (
|
4442
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4029
4443
|
return;
|
4030
4444
|
|
4031
4445
|
assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
|
@@ -4043,11 +4457,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
|
|
4043
4457
|
EV_FREQUENT_CHECK;
|
4044
4458
|
}
|
4045
4459
|
|
4046
|
-
|
4460
|
+
ecb_noinline
|
4047
4461
|
void
|
4048
4462
|
ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4049
4463
|
{
|
4050
|
-
if (
|
4464
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4051
4465
|
return;
|
4052
4466
|
|
4053
4467
|
ev_at (w) += mn_now;
|
@@ -4068,12 +4482,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4068
4482
|
/*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
|
4069
4483
|
}
|
4070
4484
|
|
4071
|
-
|
4485
|
+
ecb_noinline
|
4072
4486
|
void
|
4073
4487
|
ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4074
4488
|
{
|
4075
4489
|
clear_pending (EV_A_ (W)w);
|
4076
|
-
if (
|
4490
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4077
4491
|
return;
|
4078
4492
|
|
4079
4493
|
EV_FREQUENT_CHECK;
|
@@ -4085,7 +4499,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4085
4499
|
|
4086
4500
|
--timercnt;
|
4087
4501
|
|
4088
|
-
if (
|
4502
|
+
if (ecb_expect_true (active < timercnt + HEAP0))
|
4089
4503
|
{
|
4090
4504
|
timers [active] = timers [timercnt + HEAP0];
|
4091
4505
|
adjustheap (timers, timercnt, active);
|
@@ -4099,7 +4513,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4099
4513
|
EV_FREQUENT_CHECK;
|
4100
4514
|
}
|
4101
4515
|
|
4102
|
-
|
4516
|
+
ecb_noinline
|
4103
4517
|
void
|
4104
4518
|
ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4105
4519
|
{
|
@@ -4130,17 +4544,22 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
|
|
4130
4544
|
ev_tstamp
|
4131
4545
|
ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
|
4132
4546
|
{
|
4133
|
-
return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
|
4547
|
+
return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
|
4134
4548
|
}
|
4135
4549
|
|
4136
4550
|
#if EV_PERIODIC_ENABLE
|
4137
|
-
|
4551
|
+
ecb_noinline
|
4138
4552
|
void
|
4139
4553
|
ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4140
4554
|
{
|
4141
|
-
if (
|
4555
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4142
4556
|
return;
|
4143
4557
|
|
4558
|
+
#if EV_USE_TIMERFD
|
4559
|
+
if (timerfd == -2)
|
4560
|
+
evtimerfd_init (EV_A);
|
4561
|
+
#endif
|
4562
|
+
|
4144
4563
|
if (w->reschedule_cb)
|
4145
4564
|
ev_at (w) = w->reschedule_cb (w, ev_rt_now);
|
4146
4565
|
else if (w->interval)
|
@@ -4165,12 +4584,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4165
4584
|
/*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
|
4166
4585
|
}
|
4167
4586
|
|
4168
|
-
|
4587
|
+
ecb_noinline
|
4169
4588
|
void
|
4170
4589
|
ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4171
4590
|
{
|
4172
4591
|
clear_pending (EV_A_ (W)w);
|
4173
|
-
if (
|
4592
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4174
4593
|
return;
|
4175
4594
|
|
4176
4595
|
EV_FREQUENT_CHECK;
|
@@ -4182,7 +4601,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4182
4601
|
|
4183
4602
|
--periodiccnt;
|
4184
4603
|
|
4185
|
-
if (
|
4604
|
+
if (ecb_expect_true (active < periodiccnt + HEAP0))
|
4186
4605
|
{
|
4187
4606
|
periodics [active] = periodics [periodiccnt + HEAP0];
|
4188
4607
|
adjustheap (periodics, periodiccnt, active);
|
@@ -4194,7 +4613,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4194
4613
|
EV_FREQUENT_CHECK;
|
4195
4614
|
}
|
4196
4615
|
|
4197
|
-
|
4616
|
+
ecb_noinline
|
4198
4617
|
void
|
4199
4618
|
ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
4200
4619
|
{
|
@@ -4210,11 +4629,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
|
|
4210
4629
|
|
4211
4630
|
#if EV_SIGNAL_ENABLE
|
4212
4631
|
|
4213
|
-
|
4632
|
+
ecb_noinline
|
4214
4633
|
void
|
4215
4634
|
ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4216
4635
|
{
|
4217
|
-
if (
|
4636
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4218
4637
|
return;
|
4219
4638
|
|
4220
4639
|
assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
|
@@ -4293,12 +4712,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
|
|
4293
4712
|
EV_FREQUENT_CHECK;
|
4294
4713
|
}
|
4295
4714
|
|
4296
|
-
|
4715
|
+
ecb_noinline
|
4297
4716
|
void
|
4298
4717
|
ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
|
4299
4718
|
{
|
4300
4719
|
clear_pending (EV_A_ (W)w);
|
4301
|
-
if (
|
4720
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4302
4721
|
return;
|
4303
4722
|
|
4304
4723
|
EV_FREQUENT_CHECK;
|
@@ -4341,7 +4760,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4341
4760
|
#if EV_MULTIPLICITY
|
4342
4761
|
assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
|
4343
4762
|
#endif
|
4344
|
-
if (
|
4763
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4345
4764
|
return;
|
4346
4765
|
|
4347
4766
|
EV_FREQUENT_CHECK;
|
@@ -4356,7 +4775,7 @@ void
|
|
4356
4775
|
ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
4357
4776
|
{
|
4358
4777
|
clear_pending (EV_A_ (W)w);
|
4359
|
-
if (
|
4778
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4360
4779
|
return;
|
4361
4780
|
|
4362
4781
|
EV_FREQUENT_CHECK;
|
@@ -4380,14 +4799,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
|
|
4380
4799
|
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
|
4381
4800
|
#define MIN_STAT_INTERVAL 0.1074891
|
4382
4801
|
|
4383
|
-
|
4802
|
+
ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
|
4384
4803
|
|
4385
4804
|
#if EV_USE_INOTIFY
|
4386
4805
|
|
4387
4806
|
/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
|
4388
4807
|
# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
|
4389
4808
|
|
4390
|
-
|
4809
|
+
ecb_noinline
|
4391
4810
|
static void
|
4392
4811
|
infy_add (EV_P_ ev_stat *w)
|
4393
4812
|
{
|
@@ -4462,7 +4881,7 @@ infy_add (EV_P_ ev_stat *w)
|
|
4462
4881
|
if (ev_is_active (&w->timer)) ev_unref (EV_A);
|
4463
4882
|
}
|
4464
4883
|
|
4465
|
-
|
4884
|
+
ecb_noinline
|
4466
4885
|
static void
|
4467
4886
|
infy_del (EV_P_ ev_stat *w)
|
4468
4887
|
{
|
@@ -4480,7 +4899,7 @@ infy_del (EV_P_ ev_stat *w)
|
|
4480
4899
|
inotify_rm_watch (fs_fd, wd);
|
4481
4900
|
}
|
4482
4901
|
|
4483
|
-
|
4902
|
+
ecb_noinline
|
4484
4903
|
static void
|
4485
4904
|
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
|
4486
4905
|
{
|
@@ -4636,7 +5055,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4636
5055
|
w->attr.st_nlink = 1;
|
4637
5056
|
}
|
4638
5057
|
|
4639
|
-
|
5058
|
+
ecb_noinline
|
4640
5059
|
static void
|
4641
5060
|
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
4642
5061
|
{
|
@@ -4680,7 +5099,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
|
|
4680
5099
|
void
|
4681
5100
|
ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4682
5101
|
{
|
4683
|
-
if (
|
5102
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4684
5103
|
return;
|
4685
5104
|
|
4686
5105
|
ev_stat_stat (EV_A_ w);
|
@@ -4712,7 +5131,7 @@ void
|
|
4712
5131
|
ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
4713
5132
|
{
|
4714
5133
|
clear_pending (EV_A_ (W)w);
|
4715
|
-
if (
|
5134
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4716
5135
|
return;
|
4717
5136
|
|
4718
5137
|
EV_FREQUENT_CHECK;
|
@@ -4737,7 +5156,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
|
|
4737
5156
|
void
|
4738
5157
|
ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4739
5158
|
{
|
4740
|
-
if (
|
5159
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4741
5160
|
return;
|
4742
5161
|
|
4743
5162
|
pri_adjust (EV_A_ (W)w);
|
@@ -4761,7 +5180,7 @@ void
|
|
4761
5180
|
ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
4762
5181
|
{
|
4763
5182
|
clear_pending (EV_A_ (W)w);
|
4764
|
-
if (
|
5183
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4765
5184
|
return;
|
4766
5185
|
|
4767
5186
|
EV_FREQUENT_CHECK;
|
@@ -4784,7 +5203,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
|
|
4784
5203
|
void
|
4785
5204
|
ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4786
5205
|
{
|
4787
|
-
if (
|
5206
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4788
5207
|
return;
|
4789
5208
|
|
4790
5209
|
EV_FREQUENT_CHECK;
|
@@ -4800,7 +5219,7 @@ void
|
|
4800
5219
|
ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
4801
5220
|
{
|
4802
5221
|
clear_pending (EV_A_ (W)w);
|
4803
|
-
if (
|
5222
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4804
5223
|
return;
|
4805
5224
|
|
4806
5225
|
EV_FREQUENT_CHECK;
|
@@ -4822,7 +5241,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
|
|
4822
5241
|
void
|
4823
5242
|
ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
|
4824
5243
|
{
|
4825
|
-
if (
|
5244
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4826
5245
|
return;
|
4827
5246
|
|
4828
5247
|
EV_FREQUENT_CHECK;
|
@@ -4838,7 +5257,7 @@ void
|
|
4838
5257
|
ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
4839
5258
|
{
|
4840
5259
|
clear_pending (EV_A_ (W)w);
|
4841
|
-
if (
|
5260
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4842
5261
|
return;
|
4843
5262
|
|
4844
5263
|
EV_FREQUENT_CHECK;
|
@@ -4857,7 +5276,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
|
|
4857
5276
|
#endif
|
4858
5277
|
|
4859
5278
|
#if EV_EMBED_ENABLE
|
4860
|
-
|
5279
|
+
ecb_noinline
|
4861
5280
|
void
|
4862
5281
|
ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4863
5282
|
{
|
@@ -4891,6 +5310,7 @@ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
|
|
4891
5310
|
}
|
4892
5311
|
}
|
4893
5312
|
|
5313
|
+
#if EV_FORK_ENABLE
|
4894
5314
|
static void
|
4895
5315
|
embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
4896
5316
|
{
|
@@ -4907,6 +5327,7 @@ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
|
|
4907
5327
|
|
4908
5328
|
ev_embed_start (EV_A_ w);
|
4909
5329
|
}
|
5330
|
+
#endif
|
4910
5331
|
|
4911
5332
|
#if 0
|
4912
5333
|
static void
|
@@ -4919,7 +5340,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
|
|
4919
5340
|
void
|
4920
5341
|
ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4921
5342
|
{
|
4922
|
-
if (
|
5343
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4923
5344
|
return;
|
4924
5345
|
|
4925
5346
|
{
|
@@ -4937,8 +5358,10 @@ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4937
5358
|
ev_set_priority (&w->prepare, EV_MINPRI);
|
4938
5359
|
ev_prepare_start (EV_A_ &w->prepare);
|
4939
5360
|
|
5361
|
+
#if EV_FORK_ENABLE
|
4940
5362
|
ev_fork_init (&w->fork, embed_fork_cb);
|
4941
5363
|
ev_fork_start (EV_A_ &w->fork);
|
5364
|
+
#endif
|
4942
5365
|
|
4943
5366
|
/*ev_idle_init (&w->idle, e,bed_idle_cb);*/
|
4944
5367
|
|
@@ -4951,14 +5374,16 @@ void
|
|
4951
5374
|
ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
4952
5375
|
{
|
4953
5376
|
clear_pending (EV_A_ (W)w);
|
4954
|
-
if (
|
5377
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4955
5378
|
return;
|
4956
5379
|
|
4957
5380
|
EV_FREQUENT_CHECK;
|
4958
5381
|
|
4959
5382
|
ev_io_stop (EV_A_ &w->io);
|
4960
5383
|
ev_prepare_stop (EV_A_ &w->prepare);
|
5384
|
+
#if EV_FORK_ENABLE
|
4961
5385
|
ev_fork_stop (EV_A_ &w->fork);
|
5386
|
+
#endif
|
4962
5387
|
|
4963
5388
|
ev_stop (EV_A_ (W)w);
|
4964
5389
|
|
@@ -4970,7 +5395,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
|
|
4970
5395
|
void
|
4971
5396
|
ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4972
5397
|
{
|
4973
|
-
if (
|
5398
|
+
if (ecb_expect_false (ev_is_active (w)))
|
4974
5399
|
return;
|
4975
5400
|
|
4976
5401
|
EV_FREQUENT_CHECK;
|
@@ -4986,7 +5411,7 @@ void
|
|
4986
5411
|
ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
4987
5412
|
{
|
4988
5413
|
clear_pending (EV_A_ (W)w);
|
4989
|
-
if (
|
5414
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
4990
5415
|
return;
|
4991
5416
|
|
4992
5417
|
EV_FREQUENT_CHECK;
|
@@ -5008,7 +5433,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
|
|
5008
5433
|
void
|
5009
5434
|
ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
5010
5435
|
{
|
5011
|
-
if (
|
5436
|
+
if (ecb_expect_false (ev_is_active (w)))
|
5012
5437
|
return;
|
5013
5438
|
|
5014
5439
|
EV_FREQUENT_CHECK;
|
@@ -5026,7 +5451,7 @@ void
|
|
5026
5451
|
ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
5027
5452
|
{
|
5028
5453
|
clear_pending (EV_A_ (W)w);
|
5029
|
-
if (
|
5454
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
5030
5455
|
return;
|
5031
5456
|
|
5032
5457
|
EV_FREQUENT_CHECK;
|
@@ -5049,7 +5474,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
|
|
5049
5474
|
void
|
5050
5475
|
ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
|
5051
5476
|
{
|
5052
|
-
if (
|
5477
|
+
if (ecb_expect_false (ev_is_active (w)))
|
5053
5478
|
return;
|
5054
5479
|
|
5055
5480
|
w->sent = 0;
|
@@ -5069,7 +5494,7 @@ void
|
|
5069
5494
|
ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
|
5070
5495
|
{
|
5071
5496
|
clear_pending (EV_A_ (W)w);
|
5072
|
-
if (
|
5497
|
+
if (ecb_expect_false (!ev_is_active (w)))
|
5073
5498
|
return;
|
5074
5499
|
|
5075
5500
|
EV_FREQUENT_CHECK;
|
@@ -5276,4 +5701,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
|
|
5276
5701
|
#if EV_MULTIPLICITY
|
5277
5702
|
#include "ev_wrap.h"
|
5278
5703
|
#endif
|
5279
|
-
|