nio4r 2.5.0-java → 2.5.5-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 145ed7000e981e17602b5571474c788cc15f843cf7b7d4265864e4f31295090e
4
- data.tar.gz: 2ea634d2cbc1b85511946f11fe0715eecc70b416b23bc241dcbf0f1b77bf2178
3
+ metadata.gz: '096be18a4a825023b8442cc0e778d560d3f26ab9c02e90f29c4d61f2d05d91c3'
4
+ data.tar.gz: 307111c66f8339770ebfba51b5ec6acb2896d807c9cdd667acdd9654903c8eac
5
5
  SHA512:
6
- metadata.gz: 22bfab6e33937899858afcc5450346a8f3b58cd78e4b60893034508821328db75a8f7d1b1232ff4e32767f26abc69ce025af6887c4b09c2a7c2e84e831265e07
7
- data.tar.gz: 0bbf87f1286e0c71d30ae7689384b8f53b9d0640f772d6b5a9b6c822ef1d9066bb2fbe3597e0cf65409ec911068208660607f382288f07ed4c898253223113c3
6
+ metadata.gz: eab4a86309cab8a9771eb371e9700ca56d1ea68944aa58987e09197a8f2050412e52f2d53b89046737932b51d1696f0ca7f5e18195603aba9e2f758ffbcc7151
7
+ data.tar.gz: 316e9a29c60fa6c284722309a96dff05d0cddc9c4c1b5c583ee5cfe3499ad1215c2ff74f084e29236d1cda457e343ace0250df36a01b8d8d5b0c5c550861f7f0
@@ -0,0 +1,49 @@
1
+ name: nio4r
2
+
3
+ on: [push, pull_request]
4
+
5
+ jobs:
6
+ build:
7
+ name: >-
8
+ ${{matrix.os}}, ${{matrix.ruby}}
9
+ env:
10
+ CI: true
11
+ TESTOPTS: -v
12
+
13
+ runs-on: ${{matrix.os}}
14
+ strategy:
15
+ fail-fast: false
16
+ matrix:
17
+ os: [ ubuntu-20.04, ubuntu-18.04, macos-10.15, windows-2019 ]
18
+ ruby: [ head, 3.0, 2.7, 2.6, 2.5, 2.4, jruby, truffleruby-head ]
19
+ include:
20
+ - { os: ubuntu-16.04, ruby: 3.0 }
21
+ - { os: ubuntu-16.04, ruby: 2.4 }
22
+ - { os: macos-11.0 , ruby: 3.0 }
23
+ - { os: macos-11.0 , ruby: 2.4 }
24
+ exclude:
25
+ - { os: windows-2019, ruby: head }
26
+ - { os: windows-2019, ruby: jruby }
27
+ - { os: windows-2019, ruby: truffleruby-head }
28
+
29
+ steps:
30
+ - name: repo checkout
31
+ uses: actions/checkout@v2
32
+
33
+ - name: load ruby
34
+ uses: ruby/setup-ruby@v1
35
+ with:
36
+ ruby-version: ${{matrix.ruby}}
37
+
38
+ - name: RubyGems, Bundler Update
39
+ run: gem update --system --no-document --conservative
40
+
41
+ - name: bundle install
42
+ run: bundle install --path .bundle/gems --without development
43
+
44
+ - name: compile
45
+ run: bundle exec rake compile
46
+
47
+ - name: test
48
+ run: bundle exec rake spec
49
+ timeout-minutes: 10
data/.rubocop.yml CHANGED
@@ -1,23 +1,40 @@
1
1
  AllCops:
2
- TargetRubyVersion: 2.3
2
+ TargetRubyVersion: 2.4
3
3
  DisplayCopNames: true
4
4
 
5
+ Layout/HashAlignment:
6
+ Enabled: false
7
+
8
+ Layout/LineLength:
9
+ Max: 128
10
+
11
+ Layout/SpaceAroundMethodCallOperator:
12
+ Enabled: false
13
+
5
14
  Layout/SpaceInsideBlockBraces:
6
15
  Enabled: false
7
16
 
8
17
  Style/IfUnlessModifier:
9
18
  Enabled: false
10
19
 
20
+ Style/UnpackFirst:
21
+ Enabled: false
22
+
11
23
  #
12
24
  # Lint
13
25
  #
14
26
 
15
- Lint/HandleExceptions:
27
+ Lint/SuppressedException:
16
28
  Enabled: false
17
29
 
18
30
  Lint/Loop:
19
31
  Enabled: false
20
32
 
33
+ Lint/RaiseException:
34
+ Enabled: false
35
+
36
+ Lint/StructNewOverride:
37
+ Enabled: false
21
38
  #
22
39
  # Metrics
23
40
  #
@@ -32,9 +49,6 @@ Metrics/BlockLength:
32
49
  Metrics/ClassLength:
33
50
  Max: 128
34
51
 
35
- Metrics/LineLength:
36
- Max: 128
37
-
38
52
  Metrics/MethodLength:
39
53
  CountComments: false
40
54
  Max: 50
@@ -46,16 +60,12 @@ Metrics/PerceivedComplexity:
46
60
  Max: 15
47
61
 
48
62
  #
49
- # Performance
63
+ # Style
50
64
  #
51
65
 
52
- Performance/RegexpMatch:
66
+ Style/ExponentialNotation:
53
67
  Enabled: false
54
68
 
55
- #
56
- # Style
57
- #
58
-
59
69
  Style/FormatStringToken:
60
70
  Enabled: false
61
71
 
@@ -65,6 +75,15 @@ Style/FrozenStringLiteralComment:
65
75
  Style/GlobalVars:
66
76
  Enabled: false
67
77
 
78
+ Style/HashEachMethods:
79
+ Enabled: false
80
+
81
+ Style/HashTransformKeys:
82
+ Enabled: false
83
+
84
+ Style/HashTransformValues:
85
+ Enabled: false
86
+
68
87
  Style/NumericPredicate:
69
88
  Enabled: false
70
89
 
data/CHANGES.md CHANGED
@@ -1,3 +1,25 @@
1
+ ## 2.5.4 (2020-09-16)
2
+
3
+ * [#251](https://github.com/socketry/nio4r/issues/251)
4
+ Intermittent SEGV during GC.
5
+ ([@boazsegev])
6
+
7
+ ## 2.5.3 (2020-09-07)
8
+
9
+ * [#241](https://github.com/socketry/nio4r/issues/241)
10
+ Possible bug with Ruby >= 2.7.0 and `GC.compact`.
11
+ ([@boazsegev])
12
+
13
+ ## 2.5.2 (2019-09-24)
14
+
15
+ * [#220](https://github.com/socketry/nio4r/issues/220)
16
+ Update to libev-4.27 & fix assorted warnings.
17
+ ([@ioquatix])
18
+
19
+ * [#225](https://github.com/socketry/nio4r/issues/225)
20
+ Avoid need for linux headers.
21
+ ([@ioquatix])
22
+
1
23
  ## 2.4.0 (2019-07-07)
2
24
 
3
25
  * [#211](https://github.com/socketry/nio4r/pull/211)
@@ -9,7 +31,7 @@
9
31
 
10
32
  * Assorted fixes for TruffleRuby & JRuby.
11
33
  ([@eregon], [@olleolleolle])
12
-
34
+ Possible bug with Ruby >= 2.7.0 and `GC.compact`
13
35
  * Update libev to v4.25.
14
36
  ([@ioquatix])
15
37
 
@@ -242,3 +264,4 @@
242
264
  [@ioquatix]: https://github.com/ioquatix
243
265
  [@eregon]: https://github.com/eregon
244
266
  [@olleolleolle]: https://github.com/olleolleolle
267
+ [@boazsegev]: https://github.com/boazsegev
data/Gemfile CHANGED
@@ -15,5 +15,5 @@ group :development, :test do
15
15
  gem "coveralls", require: false
16
16
  gem "rake-compiler", require: false
17
17
  gem "rspec", "~> 3.7", require: false
18
- gem "rubocop", "0.52.1", require: false
18
+ gem "rubocop", "0.82.0", require: false
19
19
  end
data/README.md CHANGED
@@ -1,17 +1,10 @@
1
1
  # ![nio4r](https://raw.github.com/socketry/nio4r/master/logo.png)
2
2
 
3
3
  [![Gem Version](https://badge.fury.io/rb/nio4r.svg)](http://rubygems.org/gems/nio4r)
4
- [![Travis CI Status](https://secure.travis-ci.org/socketry/nio4r.svg?branch=master)](http://travis-ci.org/socketry/nio4r)
5
- [![Appveyor Status](https://ci.appveyor.com/api/projects/status/1ru8x81v91vaewax/branch/master?svg=true)](https://ci.appveyor.com/project/tarcieri/nio4r/branch/master)
4
+ [![Build Status](https://github.com/socketry/nio4r/workflows/nio4r/badge.svg?branch=master&event=push)](https://github.com/socketry/nio4r/actions?query=workflow:nio4r)
6
5
  [![Code Climate](https://codeclimate.com/github/socketry/nio4r.svg)](https://codeclimate.com/github/socketry/nio4r)
7
6
  [![Coverage Status](https://coveralls.io/repos/socketry/nio4r/badge.svg?branch=master)](https://coveralls.io/r/socketry/nio4r)
8
7
  [![Yard Docs](https://img.shields.io/badge/yard-docs-blue.svg)](http://www.rubydoc.info/gems/nio4r/2.2.0)
9
- [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/socketry/nio4r/blob/master/LICENSE.txt)
10
-
11
- _NOTE: This is the 2.x **stable** branch of nio4r. For the 1.x **legacy** branch,
12
- please see:_
13
-
14
- https://github.com/socketry/nio4r/tree/1-x-stable
15
8
 
16
9
  **New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for
17
10
  scalable network clients and servers. Modeled after the Java NIO API, but
@@ -25,13 +18,13 @@ writing.
25
18
  ## Projects using nio4r
26
19
 
27
20
  * [ActionCable]: Rails 5 WebSocket protocol, uses nio4r for a WebSocket server
28
- * [Celluloid::IO]: Actor-based concurrency framework, uses nio4r for async I/O
29
- * [Socketry Async]: Asynchronous I/O framework for Ruby
21
+ * [Celluloid]: Actor-based concurrency framework, uses nio4r for async I/O
22
+ * [Async]: Asynchronous I/O framework for Ruby
30
23
  * [Puma]: Ruby/Rack web server built for concurrency
31
24
 
32
25
  [ActionCable]: https://rubygems.org/gems/actioncable
33
- [Celluloid::IO]: https://github.com/celluloid/celluloid-io
34
- [Socketry Async]: https://github.com/socketry/async
26
+ [Celluloid]: https://github.com/celluloid/celluloid-io
27
+ [Async]: https://github.com/socketry/async
35
28
  [Puma]: https://github.com/puma/puma
36
29
 
37
30
  ## Goals
@@ -43,10 +36,11 @@ writing.
43
36
 
44
37
  ## Supported platforms
45
38
 
46
- * Ruby 2.3
47
39
  * Ruby 2.4
48
40
  * Ruby 2.5
49
41
  * Ruby 2.6
42
+ * Ruby 2.7
43
+ * Ruby 3.0
50
44
  * [JRuby](https://github.com/jruby/jruby)
51
45
  * [TruffleRuby](https://github.com/oracle/truffleruby)
52
46
 
@@ -56,17 +50,6 @@ writing.
56
50
  * **Java NIO**: JRuby extension which wraps the Java NIO subsystem
57
51
  * **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter
58
52
 
59
- ## Discussion
60
-
61
- For discussion and general help with nio4r, email
62
- [socketry+subscribe@googlegroups.com][subscribe]
63
- or join on the web via the [Google Group].
64
-
65
- We're also on IRC at ##socketry on irc.freenode.net.
66
-
67
- [subscribe]: mailto:socketry+subscribe@googlegroups.com
68
- [google group]: https://groups.google.com/group/socketry
69
-
70
53
  ## Documentation
71
54
 
72
55
  [Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki)
@@ -97,11 +80,30 @@ to maintain a large codebase.
97
80
  [EventMachine]: https://github.com/eventmachine/eventmachine
98
81
  [Cool.io]: https://coolio.github.io/
99
82
 
83
+ ## Releases
84
+
85
+ ### CRuby
86
+
87
+ ```
88
+ rake clean
89
+ rake release
90
+ ```
91
+
92
+ ### JRuby
93
+
94
+ You might need to delete `Gemfile.lock` before trying to `bundle install`.
95
+
96
+ ```
97
+ rake clean
98
+ rake compile
99
+ rake release
100
+ ```
101
+
100
102
  ## License
101
103
 
102
104
  Released under the MIT license.
103
105
 
104
- Copyright, 2019, by Tony Arcieri.
106
+ Copyright, 2019, by Tony Arcieri.
105
107
  Copyright, 2019, by [Samuel G. D. Williams](http://www.codeotaku.com/samuel-williams).
106
108
 
107
109
  Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
2
  # frozen_string_literal: true
3
3
 
4
- $LOAD_PATH.push File.expand_path("../../lib", __FILE__)
4
+ $LOAD_PATH.push File.expand_path("../lib", __dir__)
5
5
  require "nio"
6
6
  require "socket"
7
7
 
@@ -19,7 +19,7 @@ class EchoServer
19
19
 
20
20
  def run
21
21
  loop do
22
- @selector.select { |monitor| monitor.value.call(monitor) }
22
+ @selector.select { |monitor| monitor.value.call }
23
23
  end
24
24
  end
25
25
 
data/ext/libev/Changes CHANGED
@@ -1,8 +1,77 @@
1
1
  Revision history for libev, a high-performance and full-featured event loop.
2
2
 
3
+ TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter.
4
+ TODO: document EV_TSTAMP_T
5
+
6
+ 4.33 Wed Mar 18 13:22:29 CET 2020
7
+ - no changes w.r.t. 4.32.
8
+
9
+ 4.32 (EV only)
10
+ - the 4.31 timerfd code wrongly changed the priority of the signal
11
+ fd watcher, which is usually harmless unless signal fds are
12
+ also used (found via cpan tester service).
13
+ - the documentation wrongly claimed that user may modify fd and events
14
+ members in io watchers when the watcher was stopped
15
+ (found by b_jonas).
16
+ - new ev_io_modify mutator which changes only the events member,
17
+ which can be faster. also added ev::io::set (int events) method
18
+ to ev++.h.
19
+ - officially allow a zero events mask for io watchers. this should
20
+ work with older libev versions as well but was not officially
21
+ allowed before.
22
+ - do not wake up every minute when timerfd is used to detect timejumps.
23
+ - do not wake up every minute when periodics are disabled and we have
24
+ a monotonic clock.
25
+ - support a lot more "uncommon" compile time configurations,
26
+ such as ev_embed enabled but ev_timer disabled.
27
+ - use a start/stop wrapper class to reduce code duplication in
28
+ ev++.h and make it needlessly more c++-y.
29
+ - the linux aio backend is no longer compiled in by default.
30
+ - update to libecb version 0x00010008.
31
+
32
+ 4.31 Fri Dec 20 21:58:29 CET 2019
33
+ - handle backends with minimum wait time a bit better by not
34
+ waiting in the presence of already-expired timers
35
+ (behaviour reported by Felipe Gasper).
36
+ - new feature: use timerfd to detect timejumps quickly,
37
+ can be disabled with the new EVFLAG_NOTIMERFD loop flag.
38
+ - document EV_USE_SIGNALFD feature macro.
39
+
40
+ 4.30 (EV only)
41
+ - change non-autoconf test for __kernel_rwf_t by testing
42
+ LINUX_VERSION_CODE, the most direct test I could find.
43
+ - fix a bug in the io_uring backend that polled the wrong
44
+ backend fd, causing it to not work in many cases.
45
+
46
+ 4.29 (EV only)
47
+ - add io uring autoconf and non-autoconf detection.
48
+ - disable io_uring when some header files are too old.
49
+
50
+ 4.28 (EV only)
51
+ - linuxaio backend resulted in random memory corruption
52
+ when loop is forked.
53
+ - linuxaio backend might have tried to cancel an iocb
54
+ multiple times (was unable to trigger this).
55
+ - linuxaio backend now employs a generation counter to
56
+ avoid handling spurious events from cancelled requests.
57
+ - io_cancel can return EINTR, deal with it. also, assume
58
+ io_submit also returns EINTR.
59
+ - fix some other minor bugs in linuxaio backend.
60
+ - ev_tstamp type can now be overriden by defining EV_TSTAMP_T.
61
+ - cleanup: replace expect_true/false and noinline by their
62
+ libecb counterparts.
63
+ - move syscall infrastructure from ev_linuxaio.c to ev.c.
64
+ - prepare io_uring integration.
65
+ - tweak ev_floor.
66
+ - epoll, poll, win32 Sleep and other places that use millisecond
67
+ reslution now all try to round up times.
68
+ - solaris port backend didn't compile.
69
+ - abstract time constants into their macros, for more flexibility.
70
+
3
71
  4.27 Thu Jun 27 22:43:44 CEST 2019
4
- - linux aio backend almost complete rewritten to work around its
72
+ - linux aio backend almost completely rewritten to work around its
5
73
  limitations.
74
+ - linux aio backend now requires linux 4.19+.
6
75
  - epoll backend now mandatory for linux aio backend.
7
76
  - fail assertions more aggressively on invalid fd's detected
8
77
  in the event loop, do not just silently fd_kill in case of
@@ -22,7 +91,7 @@ Revision history for libev, a high-performance and full-featured event loop.
22
91
  4.25 Fri Dec 21 07:49:20 CET 2018
23
92
  - INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
24
93
  (EV_THROW still provided) and now uses noexcept on C++11 or newer.
25
- - move the darwin select workaround highe rin ev.c, as newer versions of
94
+ - move the darwin select workaround higher in ev.c, as newer versions of
26
95
  darwin managed to break their broken select even more.
27
96
  - ANDROID => __ANDROID__ (reported by enh@google.com).
28
97
  - disable epoll_create1 on android because it has broken header files
data/ext/libev/ev.c CHANGED
@@ -116,7 +116,7 @@
116
116
  # undef EV_USE_POLL
117
117
  # define EV_USE_POLL 0
118
118
  # endif
119
-
119
+
120
120
  # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
121
121
  # ifndef EV_USE_EPOLL
122
122
  # define EV_USE_EPOLL EV_FEATURE_BACKENDS
@@ -125,16 +125,25 @@
125
125
  # undef EV_USE_EPOLL
126
126
  # define EV_USE_EPOLL 0
127
127
  # endif
128
-
128
+
129
129
  # if HAVE_LINUX_AIO_ABI_H
130
130
  # ifndef EV_USE_LINUXAIO
131
- # define EV_USE_LINUXAIO EV_FEATURE_BACKENDS
131
+ # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
132
132
  # endif
133
133
  # else
134
134
  # undef EV_USE_LINUXAIO
135
135
  # define EV_USE_LINUXAIO 0
136
136
  # endif
137
-
137
+
138
+ # if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
139
+ # ifndef EV_USE_IOURING
140
+ # define EV_USE_IOURING EV_FEATURE_BACKENDS
141
+ # endif
142
+ # else
143
+ # undef EV_USE_IOURING
144
+ # define EV_USE_IOURING 0
145
+ # endif
146
+
138
147
  # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
139
148
  # ifndef EV_USE_KQUEUE
140
149
  # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
@@ -143,7 +152,7 @@
143
152
  # undef EV_USE_KQUEUE
144
153
  # define EV_USE_KQUEUE 0
145
154
  # endif
146
-
155
+
147
156
  # if HAVE_PORT_H && HAVE_PORT_CREATE
148
157
  # ifndef EV_USE_PORT
149
158
  # define EV_USE_PORT EV_FEATURE_BACKENDS
@@ -179,7 +188,16 @@
179
188
  # undef EV_USE_EVENTFD
180
189
  # define EV_USE_EVENTFD 0
181
190
  # endif
182
-
191
+
192
+ # if HAVE_SYS_TIMERFD_H
193
+ # ifndef EV_USE_TIMERFD
194
+ # define EV_USE_TIMERFD EV_FEATURE_OS
195
+ # endif
196
+ # else
197
+ # undef EV_USE_TIMERFD
198
+ # define EV_USE_TIMERFD 0
199
+ # endif
200
+
183
201
  #endif
184
202
 
185
203
  /* OS X, in its infinite idiocy, actually HARDCODES
@@ -337,12 +355,20 @@
337
355
 
338
356
  #ifndef EV_USE_LINUXAIO
339
357
  # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
340
- # define EV_USE_LINUXAIO 1
358
+ # define EV_USE_LINUXAIO 0 /* was: 1, always off by default */
341
359
  # else
342
360
  # define EV_USE_LINUXAIO 0
343
361
  # endif
344
362
  #endif
345
363
 
364
+ #ifndef EV_USE_IOURING
365
+ # if __linux /* later checks might disable again */
366
+ # define EV_USE_IOURING 1
367
+ # else
368
+ # define EV_USE_IOURING 0
369
+ # endif
370
+ #endif
371
+
346
372
  #ifndef EV_USE_INOTIFY
347
373
  # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
348
374
  # define EV_USE_INOTIFY EV_FEATURE_OS
@@ -375,6 +401,14 @@
375
401
  # endif
376
402
  #endif
377
403
 
404
+ #ifndef EV_USE_TIMERFD
405
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
406
+ # define EV_USE_TIMERFD EV_FEATURE_OS
407
+ # else
408
+ # define EV_USE_TIMERFD 0
409
+ # endif
410
+ #endif
411
+
378
412
  #if 0 /* debugging */
379
413
  # define EV_VERIFY 3
380
414
  # define EV_USE_4HEAP 1
@@ -417,6 +451,7 @@
417
451
  # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
418
452
  # undef EV_USE_MONOTONIC
419
453
  # define EV_USE_MONOTONIC 1
454
+ # define EV_NEED_SYSCALL 1
420
455
  # else
421
456
  # undef EV_USE_CLOCK_SYSCALL
422
457
  # define EV_USE_CLOCK_SYSCALL 0
@@ -440,6 +475,14 @@
440
475
  # define EV_USE_INOTIFY 0
441
476
  #endif
442
477
 
478
+ #if __linux && EV_USE_IOURING
479
+ # include <linux/version.h>
480
+ # if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
481
+ # undef EV_USE_IOURING
482
+ # define EV_USE_IOURING 0
483
+ # endif
484
+ #endif
485
+
443
486
  #if !EV_USE_NANOSLEEP
444
487
  /* hp-ux has it in sys/time.h, which we unconditionally include above */
445
488
  # if !defined _WIN32 && !defined __hpux
@@ -449,12 +492,29 @@
449
492
 
450
493
  #if EV_USE_LINUXAIO
451
494
  # include <sys/syscall.h>
452
- # if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */
495
+ # if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
496
+ # define EV_NEED_SYSCALL 1
497
+ # else
453
498
  # undef EV_USE_LINUXAIO
454
499
  # define EV_USE_LINUXAIO 0
455
500
  # endif
456
501
  #endif
457
502
 
503
+ #if EV_USE_IOURING
504
+ # include <sys/syscall.h>
505
+ # if !SYS_io_uring_setup && __linux && !__alpha
506
+ # define SYS_io_uring_setup 425
507
+ # define SYS_io_uring_enter 426
508
+ # define SYS_io_uring_wregister 427
509
+ # endif
510
+ # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
511
+ # define EV_NEED_SYSCALL 1
512
+ # else
513
+ # undef EV_USE_IOURING
514
+ # define EV_USE_IOURING 0
515
+ # endif
516
+ #endif
517
+
458
518
  #if EV_USE_INOTIFY
459
519
  # include <sys/statfs.h>
460
520
  # include <sys/inotify.h>
@@ -466,7 +526,7 @@
466
526
  #endif
467
527
 
468
528
  #if EV_USE_EVENTFD
469
- /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
529
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
470
530
  # include <stdint.h>
471
531
  # ifndef EFD_NONBLOCK
472
532
  # define EFD_NONBLOCK O_NONBLOCK
@@ -482,7 +542,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
482
542
  #endif
483
543
 
484
544
  #if EV_USE_SIGNALFD
485
- /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
545
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
486
546
  # include <stdint.h>
487
547
  # ifndef SFD_NONBLOCK
488
548
  # define SFD_NONBLOCK O_NONBLOCK
@@ -494,7 +554,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
494
554
  # define SFD_CLOEXEC 02000000
495
555
  # endif
496
556
  # endif
497
- EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
557
+ EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
498
558
 
499
559
  struct signalfd_siginfo
500
560
  {
@@ -503,7 +563,17 @@ struct signalfd_siginfo
503
563
  };
504
564
  #endif
505
565
 
506
- /**/
566
+ /* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
567
+ #if EV_USE_TIMERFD
568
+ # include <sys/timerfd.h>
569
+ /* timerfd is only used for periodics */
570
+ # if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
571
+ # undef EV_USE_TIMERFD
572
+ # define EV_USE_TIMERFD 0
573
+ # endif
574
+ #endif
575
+
576
+ /*****************************************************************************/
507
577
 
508
578
  #if EV_VERIFY >= 3
509
579
  # define EV_FREQUENT_CHECK ev_verify (EV_A)
@@ -518,18 +588,34 @@ struct signalfd_siginfo
518
588
  #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
519
589
  /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
520
590
 
521
- #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
522
- #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
591
+ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
592
+ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
593
+ #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
523
594
 
524
- #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
525
- #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
595
+ /* find a portable timestamp that is "always" in the future but fits into time_t.
596
+ * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
597
+ * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
598
+ #define EV_TSTAMP_HUGE \
599
+ (sizeof (time_t) >= 8 ? 10000000000000. \
600
+ : 0 < (time_t)4294967295 ? 4294967295. \
601
+ : 2147483647.) \
602
+
603
+ #ifndef EV_TS_CONST
604
+ # define EV_TS_CONST(nv) nv
605
+ # define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
606
+ # define EV_TS_FROM_USEC(us) us * 1e-6
607
+ # define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
608
+ # define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
609
+ # define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
610
+ # define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
611
+ #endif
526
612
 
527
613
  /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
528
614
  /* ECB.H BEGIN */
529
615
  /*
530
616
  * libecb - http://software.schmorp.de/pkg/libecb
531
617
  *
532
- * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
618
+ * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
533
619
  * Copyright (©) 2011 Emanuele Giaquinta
534
620
  * All rights reserved.
535
621
  *
@@ -570,15 +656,23 @@ struct signalfd_siginfo
570
656
  #define ECB_H
571
657
 
572
658
  /* 16 bits major, 16 bits minor */
573
- #define ECB_VERSION 0x00010006
659
+ #define ECB_VERSION 0x00010008
574
660
 
575
- #ifdef _WIN32
661
+ #include <string.h> /* for memcpy */
662
+
663
+ #if defined (_WIN32) && !defined (__MINGW32__)
576
664
  typedef signed char int8_t;
577
665
  typedef unsigned char uint8_t;
666
+ typedef signed char int_fast8_t;
667
+ typedef unsigned char uint_fast8_t;
578
668
  typedef signed short int16_t;
579
669
  typedef unsigned short uint16_t;
670
+ typedef signed int int_fast16_t;
671
+ typedef unsigned int uint_fast16_t;
580
672
  typedef signed int int32_t;
581
673
  typedef unsigned int uint32_t;
674
+ typedef signed int int_fast32_t;
675
+ typedef unsigned int uint_fast32_t;
582
676
  #if __GNUC__
583
677
  typedef signed long long int64_t;
584
678
  typedef unsigned long long uint64_t;
@@ -586,6 +680,8 @@ struct signalfd_siginfo
586
680
  typedef signed __int64 int64_t;
587
681
  typedef unsigned __int64 uint64_t;
588
682
  #endif
683
+ typedef int64_t int_fast64_t;
684
+ typedef uint64_t uint_fast64_t;
589
685
  #ifdef _WIN64
590
686
  #define ECB_PTRSIZE 8
591
687
  typedef uint64_t uintptr_t;
@@ -607,6 +703,14 @@ struct signalfd_siginfo
607
703
  #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
608
704
  #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
609
705
 
706
+ #ifndef ECB_OPTIMIZE_SIZE
707
+ #if __OPTIMIZE_SIZE__
708
+ #define ECB_OPTIMIZE_SIZE 1
709
+ #else
710
+ #define ECB_OPTIMIZE_SIZE 0
711
+ #endif
712
+ #endif
713
+
610
714
  /* work around x32 idiocy by defining proper macros */
611
715
  #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
612
716
  #if _ILP32
@@ -1122,6 +1226,44 @@ ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { retu
1122
1226
  ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
1123
1227
  ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
1124
1228
 
1229
+ #if ECB_CPP
1230
+
1231
+ inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
1232
+ inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
1233
+ inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
1234
+ inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
1235
+
1236
+ inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
1237
+ inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
1238
+ inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
1239
+ inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
1240
+
1241
+ inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
1242
+ inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
1243
+ inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
1244
+ inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
1245
+
1246
+ inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
1247
+ inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
1248
+ inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
1249
+ inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
1250
+
1251
+ inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
1252
+ inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
1253
+ inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
1254
+
1255
+ inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
1256
+ inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
1257
+ inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
1258
+ inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
1259
+
1260
+ inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
1261
+ inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
1262
+ inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
1263
+ inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
1264
+
1265
+ #endif
1266
+
1125
1267
  #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
1126
1268
  #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
1127
1269
  #define ecb_bswap16(x) __builtin_bswap16 (x)
@@ -1202,6 +1344,78 @@ ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_he
1202
1344
  ecb_inline ecb_const ecb_bool ecb_little_endian (void);
1203
1345
  ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
1204
1346
 
1347
+ /*****************************************************************************/
1348
+ /* unaligned load/store */
1349
+
1350
+ ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1351
+ ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1352
+ ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1353
+
1354
+ ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1355
+ ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1356
+ ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1357
+
1358
+ ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1359
+ ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1360
+ ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1361
+
1362
+ ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
1363
+ ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
1364
+ ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
1365
+
1366
+ ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
1367
+ ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
1368
+ ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
1369
+
1370
+ ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1371
+ ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1372
+ ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1373
+
1374
+ ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1375
+ ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1376
+ ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1377
+
1378
+ ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
1379
+ ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
1380
+ ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
1381
+
1382
+ ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
1383
+ ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
1384
+ ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
1385
+
1386
+ ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
1387
+ ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
1388
+ ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
1389
+
1390
+ #if ECB_CPP
1391
+
1392
+ inline uint8_t ecb_bswap (uint8_t v) { return v; }
1393
+ inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
1394
+ inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
1395
+ inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
1396
+
1397
+ template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1398
+ template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1399
+ template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
1400
+ template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
1401
+ template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
1402
+ template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
1403
+ template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
1404
+ template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
1405
+
1406
+ template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1407
+ template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1408
+ template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
1409
+ template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
1410
+ template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
1411
+ template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
1412
+ template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
1413
+ template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
1414
+
1415
+ #endif
1416
+
1417
+ /*****************************************************************************/
1418
+
1205
1419
  #if ECB_GCC_VERSION(3,0) || ECB_C99
1206
1420
  #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1207
1421
  #else
@@ -1235,6 +1449,8 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he
1235
1449
  #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1236
1450
  #endif
1237
1451
 
1452
+ /*****************************************************************************/
1453
+
1238
1454
  ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1239
1455
  ecb_function_ ecb_const uint32_t
1240
1456
  ecb_binary16_to_binary32 (uint32_t x)
@@ -1352,7 +1568,6 @@ ecb_binary32_to_binary16 (uint32_t x)
1352
1568
  || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1353
1569
  || defined __aarch64__
1354
1570
  #define ECB_STDFP 1
1355
- #include <string.h> /* for memcpy */
1356
1571
  #else
1357
1572
  #define ECB_STDFP 0
1358
1573
  #endif
@@ -1547,7 +1762,7 @@ ecb_binary32_to_binary16 (uint32_t x)
1547
1762
  #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1548
1763
  /* if your architecture doesn't need memory fences, e.g. because it is
1549
1764
  * single-cpu/core, or if you use libev in a project that doesn't use libev
1550
- * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
1765
+ * from multiple threads, then you can define ECB_NO_THREADS when compiling
1551
1766
  * libev, in which cases the memory fences become nops.
1552
1767
  * alternatively, you can remove this #error and link against libpthread,
1553
1768
  * which will then provide the memory fences.
@@ -1561,18 +1776,80 @@ ecb_binary32_to_binary16 (uint32_t x)
1561
1776
  # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
1562
1777
  #endif
1563
1778
 
1564
- #define expect_false(cond) ecb_expect_false (cond)
1565
- #define expect_true(cond) ecb_expect_true (cond)
1566
- #define noinline ecb_noinline
1567
-
1568
1779
  #define inline_size ecb_inline
1569
1780
 
1570
1781
  #if EV_FEATURE_CODE
1571
1782
  # define inline_speed ecb_inline
1572
1783
  #else
1573
- # define inline_speed noinline static
1784
+ # define inline_speed ecb_noinline static
1785
+ #endif
1786
+
1787
+ /*****************************************************************************/
1788
+ /* raw syscall wrappers */
1789
+
1790
+ #if EV_NEED_SYSCALL
1791
+
1792
+ #include <sys/syscall.h>
1793
+
1794
+ /*
1795
+ * define some syscall wrappers for common architectures
1796
+ * this is mostly for nice looks during debugging, not performance.
1797
+ * our syscalls return < 0, not == -1, on error. which is good
1798
+ * enough for linux aio.
1799
+ * TODO: arm is also common nowadays, maybe even mips and x86
1800
+ * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1801
+ */
1802
+ #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
1803
+ /* the costly errno access probably kills this for size optimisation */
1804
+
1805
+ #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1806
+ ({ \
1807
+ long res; \
1808
+ register unsigned long r6 __asm__ ("r9" ); \
1809
+ register unsigned long r5 __asm__ ("r8" ); \
1810
+ register unsigned long r4 __asm__ ("r10"); \
1811
+ register unsigned long r3 __asm__ ("rdx"); \
1812
+ register unsigned long r2 __asm__ ("rsi"); \
1813
+ register unsigned long r1 __asm__ ("rdi"); \
1814
+ if (narg >= 6) r6 = (unsigned long)(arg6); \
1815
+ if (narg >= 5) r5 = (unsigned long)(arg5); \
1816
+ if (narg >= 4) r4 = (unsigned long)(arg4); \
1817
+ if (narg >= 3) r3 = (unsigned long)(arg3); \
1818
+ if (narg >= 2) r2 = (unsigned long)(arg2); \
1819
+ if (narg >= 1) r1 = (unsigned long)(arg1); \
1820
+ __asm__ __volatile__ ( \
1821
+ "syscall\n\t" \
1822
+ : "=a" (res) \
1823
+ : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1824
+ : "cc", "r11", "cx", "memory"); \
1825
+ errno = -res; \
1826
+ res; \
1827
+ })
1828
+
1829
+ #endif
1830
+
1831
+ #ifdef ev_syscall
1832
+ #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1833
+ #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1834
+ #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1835
+ #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1836
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1837
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1838
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1839
+ #else
1840
+ #define ev_syscall0(nr) syscall (nr)
1841
+ #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1842
+ #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1843
+ #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1844
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1845
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1846
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1847
+ #endif
1848
+
1574
1849
  #endif
1575
1850
 
1851
+ /*****************************************************************************/
1852
+
1576
1853
  #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1577
1854
 
1578
1855
  #if EV_MINPRI == EV_MAXPRI
@@ -1630,7 +1907,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
1630
1907
  #include <float.h>
1631
1908
 
1632
1909
  /* a floor() replacement function, should be independent of ev_tstamp type */
1633
- noinline
1910
+ ecb_noinline
1634
1911
  static ev_tstamp
1635
1912
  ev_floor (ev_tstamp v)
1636
1913
  {
@@ -1641,26 +1918,26 @@ ev_floor (ev_tstamp v)
1641
1918
  const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1642
1919
  #endif
1643
1920
 
1644
- /* argument too large for an unsigned long? */
1645
- if (expect_false (v >= shift))
1921
+ /* special treatment for negative arguments */
1922
+ if (ecb_expect_false (v < 0.))
1923
+ {
1924
+ ev_tstamp f = -ev_floor (-v);
1925
+
1926
+ return f - (f == v ? 0 : 1);
1927
+ }
1928
+
1929
+ /* argument too large for an unsigned long? then reduce it */
1930
+ if (ecb_expect_false (v >= shift))
1646
1931
  {
1647
1932
  ev_tstamp f;
1648
1933
 
1649
1934
  if (v == v - 1.)
1650
- return v; /* very large number */
1935
+ return v; /* very large numbers are assumed to be integer */
1651
1936
 
1652
1937
  f = shift * ev_floor (v * (1. / shift));
1653
1938
  return f + ev_floor (v - f);
1654
1939
  }
1655
1940
 
1656
- /* special treatment for negative args? */
1657
- if (expect_false (v < 0.))
1658
- {
1659
- ev_tstamp f = -ev_floor (-v);
1660
-
1661
- return f - (f == v ? 0 : 1);
1662
- }
1663
-
1664
1941
  /* fits into an unsigned long */
1665
1942
  return (unsigned long)v;
1666
1943
  }
@@ -1673,7 +1950,7 @@ ev_floor (ev_tstamp v)
1673
1950
  # include <sys/utsname.h>
1674
1951
  #endif
1675
1952
 
1676
- noinline ecb_cold
1953
+ ecb_noinline ecb_cold
1677
1954
  static unsigned int
1678
1955
  ev_linux_version (void)
1679
1956
  {
@@ -1713,7 +1990,7 @@ ev_linux_version (void)
1713
1990
  /*****************************************************************************/
1714
1991
 
1715
1992
  #if EV_AVOID_STDIO
1716
- noinline ecb_cold
1993
+ ecb_noinline ecb_cold
1717
1994
  static void
1718
1995
  ev_printerr (const char *msg)
1719
1996
  {
@@ -1730,7 +2007,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1730
2007
  syserr_cb = cb;
1731
2008
  }
1732
2009
 
1733
- noinline ecb_cold
2010
+ ecb_noinline ecb_cold
1734
2011
  static void
1735
2012
  ev_syserr (const char *msg)
1736
2013
  {
@@ -1754,7 +2031,7 @@ ev_syserr (const char *msg)
1754
2031
  }
1755
2032
 
1756
2033
  static void *
1757
- ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
2034
+ ev_realloc_emul (void *ptr, size_t size) EV_NOEXCEPT
1758
2035
  {
1759
2036
  /* some systems, notably openbsd and darwin, fail to properly
1760
2037
  * implement realloc (x, 0) (as required by both ansi c-89 and
@@ -1770,17 +2047,17 @@ ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
1770
2047
  return 0;
1771
2048
  }
1772
2049
 
1773
- static void *(*alloc)(void *ptr, long size) EV_NOEXCEPT = ev_realloc_emul;
2050
+ static void *(*alloc)(void *ptr, size_t size) EV_NOEXCEPT = ev_realloc_emul;
1774
2051
 
1775
2052
  ecb_cold
1776
2053
  void
1777
- ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT
2054
+ ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT
1778
2055
  {
1779
2056
  alloc = cb;
1780
2057
  }
1781
2058
 
1782
2059
  inline_speed void *
1783
- ev_realloc (void *ptr, long size)
2060
+ ev_realloc (void *ptr, size_t size)
1784
2061
  {
1785
2062
  ptr = alloc (ptr, size);
1786
2063
 
@@ -1812,7 +2089,7 @@ typedef struct
1812
2089
  unsigned char events; /* the events watched for */
1813
2090
  unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1814
2091
  unsigned char emask; /* some backends store the actual kernel mask in here */
1815
- unsigned char unused;
2092
+ unsigned char eflags; /* flags field for use by backends */
1816
2093
  #if EV_USE_EPOLL
1817
2094
  unsigned int egen; /* generation counter to counter epoll bugs */
1818
2095
  #endif
@@ -1876,7 +2153,7 @@ typedef struct
1876
2153
 
1877
2154
  #else
1878
2155
 
1879
- EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
2156
+ EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1880
2157
  #define VAR(name,decl) static decl;
1881
2158
  #include "ev_vars.h"
1882
2159
  #undef VAR
@@ -1886,8 +2163,8 @@ typedef struct
1886
2163
  #endif
1887
2164
 
1888
2165
  #if EV_FEATURE_API
1889
- # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
1890
- # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
2166
+ # define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
2167
+ # define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
1891
2168
  # define EV_INVOKE_PENDING invoke_cb (EV_A)
1892
2169
  #else
1893
2170
  # define EV_RELEASE_CB (void)0
@@ -1904,17 +2181,19 @@ ev_tstamp
1904
2181
  ev_time (void) EV_NOEXCEPT
1905
2182
  {
1906
2183
  #if EV_USE_REALTIME
1907
- if (expect_true (have_realtime))
2184
+ if (ecb_expect_true (have_realtime))
1908
2185
  {
1909
2186
  struct timespec ts;
1910
2187
  clock_gettime (CLOCK_REALTIME, &ts);
1911
- return ts.tv_sec + ts.tv_nsec * 1e-9;
2188
+ return EV_TS_GET (ts);
1912
2189
  }
1913
2190
  #endif
1914
2191
 
1915
- struct timeval tv;
1916
- gettimeofday (&tv, 0);
1917
- return tv.tv_sec + tv.tv_usec * 1e-6;
2192
+ {
2193
+ struct timeval tv;
2194
+ gettimeofday (&tv, 0);
2195
+ return EV_TV_GET (tv);
2196
+ }
1918
2197
  }
1919
2198
  #endif
1920
2199
 
@@ -1922,11 +2201,11 @@ inline_size ev_tstamp
1922
2201
  get_clock (void)
1923
2202
  {
1924
2203
  #if EV_USE_MONOTONIC
1925
- if (expect_true (have_monotonic))
2204
+ if (ecb_expect_true (have_monotonic))
1926
2205
  {
1927
2206
  struct timespec ts;
1928
2207
  clock_gettime (CLOCK_MONOTONIC, &ts);
1929
- return ts.tv_sec + ts.tv_nsec * 1e-9;
2208
+ return EV_TS_GET (ts);
1930
2209
  }
1931
2210
  #endif
1932
2211
 
@@ -1944,7 +2223,7 @@ ev_now (EV_P) EV_NOEXCEPT
1944
2223
  void
1945
2224
  ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1946
2225
  {
1947
- if (delay > 0.)
2226
+ if (delay > EV_TS_CONST (0.))
1948
2227
  {
1949
2228
  #if EV_USE_NANOSLEEP
1950
2229
  struct timespec ts;
@@ -1954,7 +2233,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1954
2233
  #elif defined _WIN32
1955
2234
  /* maybe this should round up, as ms is very low resolution */
1956
2235
  /* compared to select (µs) or nanosleep (ns) */
1957
- Sleep ((unsigned long)(delay * 1e3));
2236
+ Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
1958
2237
  #else
1959
2238
  struct timeval tv;
1960
2239
 
@@ -1994,7 +2273,7 @@ array_nextsize (int elem, int cur, int cnt)
1994
2273
  return ncur;
1995
2274
  }
1996
2275
 
1997
- noinline ecb_cold
2276
+ ecb_noinline ecb_cold
1998
2277
  static void *
1999
2278
  array_realloc (int elem, void *base, int *cur, int cnt)
2000
2279
  {
@@ -2008,7 +2287,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
2008
2287
  memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
2009
2288
 
2010
2289
  #define array_needsize(type,base,cur,cnt,init) \
2011
- if (expect_false ((cnt) > (cur))) \
2290
+ if (ecb_expect_false ((cnt) > (cur))) \
2012
2291
  { \
2013
2292
  ecb_unused int ocur_ = (cur); \
2014
2293
  (base) = (type *)array_realloc \
@@ -2032,20 +2311,20 @@ array_realloc (int elem, void *base, int *cur, int cnt)
2032
2311
  /*****************************************************************************/
2033
2312
 
2034
2313
  /* dummy callback for pending events */
2035
- noinline
2314
+ ecb_noinline
2036
2315
  static void
2037
2316
  pendingcb (EV_P_ ev_prepare *w, int revents)
2038
2317
  {
2039
2318
  }
2040
2319
 
2041
- noinline
2320
+ ecb_noinline
2042
2321
  void
2043
2322
  ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
2044
2323
  {
2045
2324
  W w_ = (W)w;
2046
2325
  int pri = ABSPRI (w_);
2047
2326
 
2048
- if (expect_false (w_->pending))
2327
+ if (ecb_expect_false (w_->pending))
2049
2328
  pendings [pri][w_->pending - 1].events |= revents;
2050
2329
  else
2051
2330
  {
@@ -2106,7 +2385,7 @@ fd_event (EV_P_ int fd, int revents)
2106
2385
  {
2107
2386
  ANFD *anfd = anfds + fd;
2108
2387
 
2109
- if (expect_true (!anfd->reify))
2388
+ if (ecb_expect_true (!anfd->reify))
2110
2389
  fd_event_nocheck (EV_A_ fd, revents);
2111
2390
  }
2112
2391
 
@@ -2124,8 +2403,20 @@ fd_reify (EV_P)
2124
2403
  {
2125
2404
  int i;
2126
2405
 
2406
+ /* most backends do not modify the fdchanges list in backend_modfiy.
2407
+ * except io_uring, which has fixed-size buffers which might force us
2408
+ * to handle events in backend_modify, causing fdchanges to be amended,
2409
+ * which could result in an endless loop.
2410
+ * to avoid this, we do not dynamically handle fds that were added
2411
+ * during fd_reify. that means that for those backends, fdchangecnt
2412
+ * might be non-zero during poll, which must cause them to not block.
2413
+ * to not put too much of a burden on other backends, this detail
2414
+ * needs to be handled in the backend.
2415
+ */
2416
+ int changecnt = fdchangecnt;
2417
+
2127
2418
  #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
2128
- for (i = 0; i < fdchangecnt; ++i)
2419
+ for (i = 0; i < changecnt; ++i)
2129
2420
  {
2130
2421
  int fd = fdchanges [i];
2131
2422
  ANFD *anfd = anfds + fd;
@@ -2149,7 +2440,7 @@ fd_reify (EV_P)
2149
2440
  }
2150
2441
  #endif
2151
2442
 
2152
- for (i = 0; i < fdchangecnt; ++i)
2443
+ for (i = 0; i < changecnt; ++i)
2153
2444
  {
2154
2445
  int fd = fdchanges [i];
2155
2446
  ANFD *anfd = anfds + fd;
@@ -2160,7 +2451,7 @@ fd_reify (EV_P)
2160
2451
 
2161
2452
  anfd->reify = 0;
2162
2453
 
2163
- /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2454
+ /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2164
2455
  {
2165
2456
  anfd->events = 0;
2166
2457
 
@@ -2175,7 +2466,14 @@ fd_reify (EV_P)
2175
2466
  backend_modify (EV_A_ fd, o_events, anfd->events);
2176
2467
  }
2177
2468
 
2178
- fdchangecnt = 0;
2469
+ /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
2470
+ * this is a rare case (see beginning comment in this function), so we copy them to the
2471
+ * front and hope the backend handles this case.
2472
+ */
2473
+ if (ecb_expect_false (fdchangecnt != changecnt))
2474
+ memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
2475
+
2476
+ fdchangecnt -= changecnt;
2179
2477
  }
2180
2478
 
2181
2479
  /* something about the given fd changed */
@@ -2184,9 +2482,9 @@ void
2184
2482
  fd_change (EV_P_ int fd, int flags)
2185
2483
  {
2186
2484
  unsigned char reify = anfds [fd].reify;
2187
- anfds [fd].reify |= flags;
2485
+ anfds [fd].reify = reify | flags;
2188
2486
 
2189
- if (expect_true (!reify))
2487
+ if (ecb_expect_true (!reify))
2190
2488
  {
2191
2489
  ++fdchangecnt;
2192
2490
  array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
@@ -2219,7 +2517,7 @@ fd_valid (int fd)
2219
2517
  }
2220
2518
 
2221
2519
  /* called on EBADF to verify fds */
2222
- noinline ecb_cold
2520
+ ecb_noinline ecb_cold
2223
2521
  static void
2224
2522
  fd_ebadf (EV_P)
2225
2523
  {
@@ -2232,7 +2530,7 @@ fd_ebadf (EV_P)
2232
2530
  }
2233
2531
 
2234
2532
  /* called on ENOMEM in select/poll to kill some fds and retry */
2235
- noinline ecb_cold
2533
+ ecb_noinline ecb_cold
2236
2534
  static void
2237
2535
  fd_enomem (EV_P)
2238
2536
  {
@@ -2247,7 +2545,7 @@ fd_enomem (EV_P)
2247
2545
  }
2248
2546
 
2249
2547
  /* usually called after fork if backend needs to re-arm all fds from scratch */
2250
- noinline
2548
+ ecb_noinline
2251
2549
  static void
2252
2550
  fd_rearm_all (EV_P)
2253
2551
  {
@@ -2311,19 +2609,19 @@ downheap (ANHE *heap, int N, int k)
2311
2609
  ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
2312
2610
 
2313
2611
  /* find minimum child */
2314
- if (expect_true (pos + DHEAP - 1 < E))
2612
+ if (ecb_expect_true (pos + DHEAP - 1 < E))
2315
2613
  {
2316
2614
  /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2317
- if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2318
- if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2319
- if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2615
+ if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2616
+ if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2617
+ if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2320
2618
  }
2321
2619
  else if (pos < E)
2322
2620
  {
2323
2621
  /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2324
- if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2325
- if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2326
- if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2622
+ if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2623
+ if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2624
+ if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2327
2625
  }
2328
2626
  else
2329
2627
  break;
@@ -2341,7 +2639,7 @@ downheap (ANHE *heap, int N, int k)
2341
2639
  ev_active (ANHE_w (he)) = k;
2342
2640
  }
2343
2641
 
2344
- #else /* 4HEAP */
2642
+ #else /* not 4HEAP */
2345
2643
 
2346
2644
  #define HEAP0 1
2347
2645
  #define HPARENT(k) ((k) >> 1)
@@ -2368,7 +2666,7 @@ downheap (ANHE *heap, int N, int k)
2368
2666
 
2369
2667
  heap [k] = heap [c];
2370
2668
  ev_active (ANHE_w (heap [k])) = k;
2371
-
2669
+
2372
2670
  k = c;
2373
2671
  }
2374
2672
 
@@ -2423,7 +2721,7 @@ reheap (ANHE *heap, int N)
2423
2721
 
2424
2722
  /*****************************************************************************/
2425
2723
 
2426
- /* associate signal watchers to a signal signal */
2724
+ /* associate signal watchers to a signal */
2427
2725
  typedef struct
2428
2726
  {
2429
2727
  EV_ATOMIC_T pending;
@@ -2439,7 +2737,7 @@ static ANSIG signals [EV_NSIG - 1];
2439
2737
 
2440
2738
  #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2441
2739
 
2442
- noinline ecb_cold
2740
+ ecb_noinline ecb_cold
2443
2741
  static void
2444
2742
  evpipe_init (EV_P)
2445
2743
  {
@@ -2490,7 +2788,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2490
2788
  {
2491
2789
  ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
2492
2790
 
2493
- if (expect_true (*flag))
2791
+ if (ecb_expect_true (*flag))
2494
2792
  return;
2495
2793
 
2496
2794
  *flag = 1;
@@ -2577,7 +2875,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
2577
2875
  ECB_MEMORY_FENCE;
2578
2876
 
2579
2877
  for (i = EV_NSIG - 1; i--; )
2580
- if (expect_false (signals [i].pending))
2878
+ if (ecb_expect_false (signals [i].pending))
2581
2879
  ev_feed_signal_event (EV_A_ i + 1);
2582
2880
  }
2583
2881
  #endif
@@ -2628,13 +2926,13 @@ ev_sighandler (int signum)
2628
2926
  ev_feed_signal (signum);
2629
2927
  }
2630
2928
 
2631
- noinline
2929
+ ecb_noinline
2632
2930
  void
2633
2931
  ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2634
2932
  {
2635
2933
  WL w;
2636
2934
 
2637
- if (expect_false (signum <= 0 || signum >= EV_NSIG))
2935
+ if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
2638
2936
  return;
2639
2937
 
2640
2938
  --signum;
@@ -2643,7 +2941,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2643
2941
  /* it is permissible to try to feed a signal to the wrong loop */
2644
2942
  /* or, likely more useful, feeding a signal nobody is waiting for */
2645
2943
 
2646
- if (expect_false (signals [signum].loop != EV_A))
2944
+ if (ecb_expect_false (signals [signum].loop != EV_A))
2647
2945
  return;
2648
2946
  #endif
2649
2947
 
@@ -2737,6 +3035,57 @@ childcb (EV_P_ ev_signal *sw, int revents)
2737
3035
 
2738
3036
  /*****************************************************************************/
2739
3037
 
3038
+ #if EV_USE_TIMERFD
3039
+
3040
+ static void periodics_reschedule (EV_P);
3041
+
3042
+ static void
3043
+ timerfdcb (EV_P_ ev_io *iow, int revents)
3044
+ {
3045
+ struct itimerspec its = { 0 };
3046
+
3047
+ its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
3048
+ timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
3049
+
3050
+ ev_rt_now = ev_time ();
3051
+ /* periodics_reschedule only needs ev_rt_now */
3052
+ /* but maybe in the future we want the full treatment. */
3053
+ /*
3054
+ now_floor = EV_TS_CONST (0.);
3055
+ time_update (EV_A_ EV_TSTAMP_HUGE);
3056
+ */
3057
+ #if EV_PERIODIC_ENABLE
3058
+ periodics_reschedule (EV_A);
3059
+ #endif
3060
+ }
3061
+
3062
+ ecb_noinline ecb_cold
3063
+ static void
3064
+ evtimerfd_init (EV_P)
3065
+ {
3066
+ if (!ev_is_active (&timerfd_w))
3067
+ {
3068
+ timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
3069
+
3070
+ if (timerfd >= 0)
3071
+ {
3072
+ fd_intern (timerfd); /* just to be sure */
3073
+
3074
+ ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
3075
+ ev_set_priority (&timerfd_w, EV_MINPRI);
3076
+ ev_io_start (EV_A_ &timerfd_w);
3077
+ ev_unref (EV_A); /* watcher should not keep loop alive */
3078
+
3079
+ /* (re-) arm timer */
3080
+ timerfdcb (EV_A_ 0, 0);
3081
+ }
3082
+ }
3083
+ }
3084
+
3085
+ #endif
3086
+
3087
+ /*****************************************************************************/
3088
+
2740
3089
  #if EV_USE_IOCP
2741
3090
  # include "ev_iocp.c"
2742
3091
  #endif
@@ -2752,6 +3101,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
2752
3101
  #if EV_USE_LINUXAIO
2753
3102
  # include "ev_linuxaio.c"
2754
3103
  #endif
3104
+ #if EV_USE_IOURING
3105
+ # include "ev_iouring.c"
3106
+ #endif
2755
3107
  #if EV_USE_POLL
2756
3108
  # include "ev_poll.c"
2757
3109
  #endif
@@ -2789,13 +3141,14 @@ ev_supported_backends (void) EV_NOEXCEPT
2789
3141
  {
2790
3142
  unsigned int flags = 0;
2791
3143
 
2792
- if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2793
- if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2794
- if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2795
- if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2796
- if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2797
- if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2798
-
3144
+ if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
3145
+ if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
3146
+ if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
3147
+ if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */
3148
+ if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
3149
+ if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
3150
+ if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
3151
+
2799
3152
  return flags;
2800
3153
  }
2801
3154
 
@@ -2805,24 +3158,27 @@ ev_recommended_backends (void) EV_NOEXCEPT
2805
3158
  {
2806
3159
  unsigned int flags = ev_supported_backends ();
2807
3160
 
2808
- #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_14)
2809
- /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
2810
- #elif defined(__NetBSD__)
2811
- /* kqueue is borked on everything but netbsd apparently */
2812
- /* it usually doesn't work correctly on anything but sockets and pipes */
2813
- #else
3161
+ /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
3162
+ #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14)
2814
3163
  /* only select works correctly on that "unix-certified" platform */
2815
3164
  flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
2816
3165
  flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
3166
+ #elif !defined(__NetBSD__)
3167
+ /* kqueue is borked on everything but netbsd apparently */
3168
+ /* it usually doesn't work correctly on anything but sockets and pipes */
3169
+ flags &= ~EVBACKEND_KQUEUE;
2817
3170
  #endif
2818
3171
 
2819
3172
  #ifdef __FreeBSD__
2820
3173
  flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
2821
3174
  #endif
2822
3175
 
2823
- /* TODO: linuxaio is very experimental */
2824
- #if !EV_RECOMMEND_LINUXAIO
3176
+ #ifdef __linux__
3177
+ /* NOTE: linuxaio is very experimental, never recommend */
2825
3178
  flags &= ~EVBACKEND_LINUXAIO;
3179
+
3180
+ /* NOTE: io_uring is super experimental, never recommend */
3181
+ flags &= ~EVBACKEND_IOURING;
2826
3182
  #endif
2827
3183
 
2828
3184
  return flags;
@@ -2832,12 +3188,14 @@ ecb_cold
2832
3188
  unsigned int
2833
3189
  ev_embeddable_backends (void) EV_NOEXCEPT
2834
3190
  {
2835
- int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
3191
+ int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
2836
3192
 
2837
3193
  /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2838
3194
  if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2839
3195
  flags &= ~EVBACKEND_EPOLL;
2840
3196
 
3197
+ /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
3198
+
2841
3199
  return flags;
2842
3200
  }
2843
3201
 
@@ -2899,7 +3257,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
2899
3257
  #endif
2900
3258
 
2901
3259
  /* initialise a loop structure, must be zero-initialised */
2902
- noinline ecb_cold
3260
+ ecb_noinline ecb_cold
2903
3261
  static void
2904
3262
  loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2905
3263
  {
@@ -2964,6 +3322,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2964
3322
  #if EV_USE_SIGNALFD
2965
3323
  sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2966
3324
  #endif
3325
+ #if EV_USE_TIMERFD
3326
+ timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3327
+ #endif
2967
3328
 
2968
3329
  if (!(flags & EVBACKEND_MASK))
2969
3330
  flags |= ev_recommended_backends ();
@@ -2977,6 +3338,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2977
3338
  #if EV_USE_KQUEUE
2978
3339
  if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
2979
3340
  #endif
3341
+ #if EV_USE_IOURING
3342
+ if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
3343
+ #endif
2980
3344
  #if EV_USE_LINUXAIO
2981
3345
  if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2982
3346
  #endif
@@ -3014,7 +3378,7 @@ ev_loop_destroy (EV_P)
3014
3378
 
3015
3379
  #if EV_CLEANUP_ENABLE
3016
3380
  /* queue cleanup watchers (and execute them) */
3017
- if (expect_false (cleanupcnt))
3381
+ if (ecb_expect_false (cleanupcnt))
3018
3382
  {
3019
3383
  queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
3020
3384
  EV_INVOKE_PENDING;
@@ -3043,6 +3407,11 @@ ev_loop_destroy (EV_P)
3043
3407
  close (sigfd);
3044
3408
  #endif
3045
3409
 
3410
+ #if EV_USE_TIMERFD
3411
+ if (ev_is_active (&timerfd_w))
3412
+ close (timerfd);
3413
+ #endif
3414
+
3046
3415
  #if EV_USE_INOTIFY
3047
3416
  if (fs_fd >= 0)
3048
3417
  close (fs_fd);
@@ -3060,6 +3429,9 @@ ev_loop_destroy (EV_P)
3060
3429
  #if EV_USE_KQUEUE
3061
3430
  if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3062
3431
  #endif
3432
+ #if EV_USE_IOURING
3433
+ if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3434
+ #endif
3063
3435
  #if EV_USE_LINUXAIO
3064
3436
  if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3065
3437
  #endif
@@ -3127,6 +3499,9 @@ loop_fork (EV_P)
3127
3499
  #if EV_USE_KQUEUE
3128
3500
  if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3129
3501
  #endif
3502
+ #if EV_USE_IOURING
3503
+ if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3504
+ #endif
3130
3505
  #if EV_USE_LINUXAIO
3131
3506
  if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3132
3507
  #endif
@@ -3137,22 +3512,44 @@ loop_fork (EV_P)
3137
3512
  infy_fork (EV_A);
3138
3513
  #endif
3139
3514
 
3140
- #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3141
- if (ev_is_active (&pipe_w) && postfork != 2)
3515
+ if (postfork != 2)
3142
3516
  {
3143
- /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3517
+ #if EV_USE_SIGNALFD
3518
+ /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3519
+ #endif
3144
3520
 
3145
- ev_ref (EV_A);
3146
- ev_io_stop (EV_A_ &pipe_w);
3521
+ #if EV_USE_TIMERFD
3522
+ if (ev_is_active (&timerfd_w))
3523
+ {
3524
+ ev_ref (EV_A);
3525
+ ev_io_stop (EV_A_ &timerfd_w);
3526
+
3527
+ close (timerfd);
3528
+ timerfd = -2;
3147
3529
 
3148
- if (evpipe [0] >= 0)
3149
- EV_WIN32_CLOSE_FD (evpipe [0]);
3530
+ evtimerfd_init (EV_A);
3531
+ /* reschedule periodics, in case we missed something */
3532
+ ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3533
+ }
3534
+ #endif
3150
3535
 
3151
- evpipe_init (EV_A);
3152
- /* iterate over everything, in case we missed something before */
3153
- ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3536
+ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3537
+ if (ev_is_active (&pipe_w))
3538
+ {
3539
+ /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3540
+
3541
+ ev_ref (EV_A);
3542
+ ev_io_stop (EV_A_ &pipe_w);
3543
+
3544
+ if (evpipe [0] >= 0)
3545
+ EV_WIN32_CLOSE_FD (evpipe [0]);
3546
+
3547
+ evpipe_init (EV_A);
3548
+ /* iterate over everything, in case we missed something before */
3549
+ ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3550
+ }
3551
+ #endif
3154
3552
  }
3155
- #endif
3156
3553
 
3157
3554
  postfork = 0;
3158
3555
  }
@@ -3178,7 +3575,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
3178
3575
  #endif /* multiplicity */
3179
3576
 
3180
3577
  #if EV_VERIFY
3181
- noinline ecb_cold
3578
+ ecb_noinline ecb_cold
3182
3579
  static void
3183
3580
  verify_watcher (EV_P_ W w)
3184
3581
  {
@@ -3188,7 +3585,7 @@ verify_watcher (EV_P_ W w)
3188
3585
  assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3189
3586
  }
3190
3587
 
3191
- noinline ecb_cold
3588
+ ecb_noinline ecb_cold
3192
3589
  static void
3193
3590
  verify_heap (EV_P_ ANHE *heap, int N)
3194
3591
  {
@@ -3204,7 +3601,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
3204
3601
  }
3205
3602
  }
3206
3603
 
3207
- noinline ecb_cold
3604
+ ecb_noinline ecb_cold
3208
3605
  static void
3209
3606
  array_verify (EV_P_ W *ws, int cnt)
3210
3607
  {
@@ -3363,7 +3760,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
3363
3760
  return count;
3364
3761
  }
3365
3762
 
3366
- noinline
3763
+ ecb_noinline
3367
3764
  void
3368
3765
  ev_invoke_pending (EV_P)
3369
3766
  {
@@ -3392,7 +3789,7 @@ ev_invoke_pending (EV_P)
3392
3789
  inline_size void
3393
3790
  idle_reify (EV_P)
3394
3791
  {
3395
- if (expect_false (idleall))
3792
+ if (ecb_expect_false (idleall))
3396
3793
  {
3397
3794
  int pri;
3398
3795
 
@@ -3432,7 +3829,7 @@ timers_reify (EV_P)
3432
3829
  if (ev_at (w) < mn_now)
3433
3830
  ev_at (w) = mn_now;
3434
3831
 
3435
- assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
3832
+ assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3436
3833
 
3437
3834
  ANHE_at_cache (timers [HEAP0]);
3438
3835
  downheap (timers, timercnt, HEAP0);
@@ -3451,7 +3848,7 @@ timers_reify (EV_P)
3451
3848
 
3452
3849
  #if EV_PERIODIC_ENABLE
3453
3850
 
3454
- noinline
3851
+ ecb_noinline
3455
3852
  static void
3456
3853
  periodic_recalc (EV_P_ ev_periodic *w)
3457
3854
  {
@@ -3464,7 +3861,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
3464
3861
  ev_tstamp nat = at + w->interval;
3465
3862
 
3466
3863
  /* when resolution fails us, we use ev_rt_now */
3467
- if (expect_false (nat == at))
3864
+ if (ecb_expect_false (nat == at))
3468
3865
  {
3469
3866
  at = ev_rt_now;
3470
3867
  break;
@@ -3520,7 +3917,7 @@ periodics_reify (EV_P)
3520
3917
 
3521
3918
  /* simply recalculate all periodics */
3522
3919
  /* TODO: maybe ensure that at least one event happens when jumping forward? */
3523
- noinline ecb_cold
3920
+ ecb_noinline ecb_cold
3524
3921
  static void
3525
3922
  periodics_reschedule (EV_P)
3526
3923
  {
@@ -3544,7 +3941,7 @@ periodics_reschedule (EV_P)
3544
3941
  #endif
3545
3942
 
3546
3943
  /* adjust all timers by a given offset */
3547
- noinline ecb_cold
3944
+ ecb_noinline ecb_cold
3548
3945
  static void
3549
3946
  timers_reschedule (EV_P_ ev_tstamp adjust)
3550
3947
  {
@@ -3564,7 +3961,7 @@ inline_speed void
3564
3961
  time_update (EV_P_ ev_tstamp max_block)
3565
3962
  {
3566
3963
  #if EV_USE_MONOTONIC
3567
- if (expect_true (have_monotonic))
3964
+ if (ecb_expect_true (have_monotonic))
3568
3965
  {
3569
3966
  int i;
3570
3967
  ev_tstamp odiff = rtmn_diff;
@@ -3573,7 +3970,7 @@ time_update (EV_P_ ev_tstamp max_block)
3573
3970
 
3574
3971
  /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3575
3972
  /* interpolate in the meantime */
3576
- if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
3973
+ if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3577
3974
  {
3578
3975
  ev_rt_now = rtmn_diff + mn_now;
3579
3976
  return;
@@ -3597,7 +3994,7 @@ time_update (EV_P_ ev_tstamp max_block)
3597
3994
 
3598
3995
  diff = odiff - rtmn_diff;
3599
3996
 
3600
- if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
3997
+ if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3601
3998
  return; /* all is well */
3602
3999
 
3603
4000
  ev_rt_now = ev_time ();
@@ -3616,7 +4013,7 @@ time_update (EV_P_ ev_tstamp max_block)
3616
4013
  {
3617
4014
  ev_rt_now = ev_time ();
3618
4015
 
3619
- if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
4016
+ if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3620
4017
  {
3621
4018
  /* adjust timers. this is easy, as the offset is the same for all of them */
3622
4019
  timers_reschedule (EV_A_ ev_rt_now - mn_now);
@@ -3636,11 +4033,13 @@ struct ev_poll_args {
3636
4033
  };
3637
4034
 
3638
4035
  static
3639
- VALUE ev_backend_poll(void *ptr)
4036
+ void * ev_backend_poll(void *ptr)
3640
4037
  {
3641
4038
  struct ev_poll_args *args = (struct ev_poll_args *)ptr;
3642
4039
  struct ev_loop *loop = args->loop;
3643
4040
  backend_poll (EV_A_ args->waittime);
4041
+
4042
+ return NULL;
3644
4043
  }
3645
4044
  /* ######################################## */
3646
4045
 
@@ -3668,8 +4067,8 @@ ev_run (EV_P_ int flags)
3668
4067
  #endif
3669
4068
 
3670
4069
  #ifndef _WIN32
3671
- if (expect_false (curpid)) /* penalise the forking check even more */
3672
- if (expect_false (getpid () != curpid))
4070
+ if (ecb_expect_false (curpid)) /* penalise the forking check even more */
4071
+ if (ecb_expect_false (getpid () != curpid))
3673
4072
  {
3674
4073
  curpid = getpid ();
3675
4074
  postfork = 1;
@@ -3678,7 +4077,7 @@ ev_run (EV_P_ int flags)
3678
4077
 
3679
4078
  #if EV_FORK_ENABLE
3680
4079
  /* we might have forked, so queue fork handlers */
3681
- if (expect_false (postfork))
4080
+ if (ecb_expect_false (postfork))
3682
4081
  if (forkcnt)
3683
4082
  {
3684
4083
  queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
@@ -3688,18 +4087,18 @@ ev_run (EV_P_ int flags)
3688
4087
 
3689
4088
  #if EV_PREPARE_ENABLE
3690
4089
  /* queue prepare watchers (and execute them) */
3691
- if (expect_false (preparecnt))
4090
+ if (ecb_expect_false (preparecnt))
3692
4091
  {
3693
4092
  queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
3694
4093
  EV_INVOKE_PENDING;
3695
4094
  }
3696
4095
  #endif
3697
4096
 
3698
- if (expect_false (loop_done))
4097
+ if (ecb_expect_false (loop_done))
3699
4098
  break;
3700
4099
 
3701
4100
  /* we might have forked, so reify kernel state if necessary */
3702
- if (expect_false (postfork))
4101
+ if (ecb_expect_false (postfork))
3703
4102
  loop_fork (EV_A);
3704
4103
 
3705
4104
  /* update fd-related kernel structures */
@@ -3714,16 +4113,28 @@ ev_run (EV_P_ int flags)
3714
4113
  ev_tstamp prev_mn_now = mn_now;
3715
4114
 
3716
4115
  /* update time to cancel out callback processing overhead */
3717
- time_update (EV_A_ 1e100);
4116
+ time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3718
4117
 
3719
4118
  /* from now on, we want a pipe-wake-up */
3720
4119
  pipe_write_wanted = 1;
3721
4120
 
3722
4121
  ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3723
4122
 
3724
- if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
4123
+ if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3725
4124
  {
3726
- waittime = MAX_BLOCKTIME;
4125
+ waittime = EV_TS_CONST (MAX_BLOCKTIME);
4126
+
4127
+ #if EV_USE_TIMERFD
4128
+ /* sleep a lot longer when we can reliably detect timejumps */
4129
+ if (ecb_expect_true (timerfd >= 0))
4130
+ waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4131
+ #endif
4132
+ #if !EV_PERIODIC_ENABLE
4133
+ /* without periodics but with monotonic clock there is no need */
4134
+ /* for any time jump detection, so sleep longer */
4135
+ if (ecb_expect_true (have_monotonic))
4136
+ waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4137
+ #endif
3727
4138
 
3728
4139
  if (timercnt)
3729
4140
  {
@@ -3740,23 +4151,28 @@ ev_run (EV_P_ int flags)
3740
4151
  #endif
3741
4152
 
3742
4153
  /* don't let timeouts decrease the waittime below timeout_blocktime */
3743
- if (expect_false (waittime < timeout_blocktime))
4154
+ if (ecb_expect_false (waittime < timeout_blocktime))
3744
4155
  waittime = timeout_blocktime;
3745
4156
 
3746
- /* at this point, we NEED to wait, so we have to ensure */
3747
- /* to pass a minimum nonzero value to the backend */
3748
- if (expect_false (waittime < backend_mintime))
3749
- waittime = backend_mintime;
4157
+ /* now there are two more special cases left, either we have
4158
+ * already-expired timers, so we should not sleep, or we have timers
4159
+ * that expire very soon, in which case we need to wait for a minimum
4160
+ * amount of time for some event loop backends.
4161
+ */
4162
+ if (ecb_expect_false (waittime < backend_mintime))
4163
+ waittime = waittime <= EV_TS_CONST (0.)
4164
+ ? EV_TS_CONST (0.)
4165
+ : backend_mintime;
3750
4166
 
3751
4167
  /* extra check because io_blocktime is commonly 0 */
3752
- if (expect_false (io_blocktime))
4168
+ if (ecb_expect_false (io_blocktime))
3753
4169
  {
3754
4170
  sleeptime = io_blocktime - (mn_now - prev_mn_now);
3755
4171
 
3756
4172
  if (sleeptime > waittime - backend_mintime)
3757
4173
  sleeptime = waittime - backend_mintime;
3758
4174
 
3759
- if (expect_true (sleeptime > 0.))
4175
+ if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3760
4176
  {
3761
4177
  ev_sleep (sleeptime);
3762
4178
  waittime -= sleeptime;
@@ -3827,7 +4243,6 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3827
4243
  ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3828
4244
  }
3829
4245
 
3830
-
3831
4246
  /* update ev_rt_now, do magic */
3832
4247
  time_update (EV_A_ waittime + sleeptime);
3833
4248
  }
@@ -3845,13 +4260,13 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3845
4260
 
3846
4261
  #if EV_CHECK_ENABLE
3847
4262
  /* queue check watchers, to be executed first */
3848
- if (expect_false (checkcnt))
4263
+ if (ecb_expect_false (checkcnt))
3849
4264
  queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
3850
4265
  #endif
3851
4266
 
3852
4267
  EV_INVOKE_PENDING;
3853
4268
  }
3854
- while (expect_true (
4269
+ while (ecb_expect_true (
3855
4270
  activecnt
3856
4271
  && !loop_done
3857
4272
  && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
@@ -3888,7 +4303,7 @@ ev_unref (EV_P) EV_NOEXCEPT
3888
4303
  void
3889
4304
  ev_now_update (EV_P) EV_NOEXCEPT
3890
4305
  {
3891
- time_update (EV_A_ 1e100);
4306
+ time_update (EV_A_ EV_TSTAMP_HUGE);
3892
4307
  }
3893
4308
 
3894
4309
  void
@@ -3925,7 +4340,7 @@ wlist_del (WL *head, WL elem)
3925
4340
  {
3926
4341
  while (*head)
3927
4342
  {
3928
- if (expect_true (*head == elem))
4343
+ if (ecb_expect_true (*head == elem))
3929
4344
  {
3930
4345
  *head = elem->next;
3931
4346
  break;
@@ -3952,7 +4367,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3952
4367
  W w_ = (W)w;
3953
4368
  int pending = w_->pending;
3954
4369
 
3955
- if (expect_true (pending))
4370
+ if (ecb_expect_true (pending))
3956
4371
  {
3957
4372
  ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
3958
4373
  p->w = (W)&pending_w;
@@ -3989,13 +4404,13 @@ ev_stop (EV_P_ W w)
3989
4404
 
3990
4405
  /*****************************************************************************/
3991
4406
 
3992
- noinline
4407
+ ecb_noinline
3993
4408
  void
3994
4409
  ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3995
4410
  {
3996
4411
  int fd = w->fd;
3997
4412
 
3998
- if (expect_false (ev_is_active (w)))
4413
+ if (ecb_expect_false (ev_is_active (w)))
3999
4414
  return;
4000
4415
 
4001
4416
  assert (("libev: ev_io_start called with negative fd", fd >= 0));
@@ -4019,12 +4434,12 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
4019
4434
  EV_FREQUENT_CHECK;
4020
4435
  }
4021
4436
 
4022
- noinline
4437
+ ecb_noinline
4023
4438
  void
4024
4439
  ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
4025
4440
  {
4026
4441
  clear_pending (EV_A_ (W)w);
4027
- if (expect_false (!ev_is_active (w)))
4442
+ if (ecb_expect_false (!ev_is_active (w)))
4028
4443
  return;
4029
4444
 
4030
4445
  assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
@@ -4042,11 +4457,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
4042
4457
  EV_FREQUENT_CHECK;
4043
4458
  }
4044
4459
 
4045
- noinline
4460
+ ecb_noinline
4046
4461
  void
4047
4462
  ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
4048
4463
  {
4049
- if (expect_false (ev_is_active (w)))
4464
+ if (ecb_expect_false (ev_is_active (w)))
4050
4465
  return;
4051
4466
 
4052
4467
  ev_at (w) += mn_now;
@@ -4067,12 +4482,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
4067
4482
  /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
4068
4483
  }
4069
4484
 
4070
- noinline
4485
+ ecb_noinline
4071
4486
  void
4072
4487
  ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4073
4488
  {
4074
4489
  clear_pending (EV_A_ (W)w);
4075
- if (expect_false (!ev_is_active (w)))
4490
+ if (ecb_expect_false (!ev_is_active (w)))
4076
4491
  return;
4077
4492
 
4078
4493
  EV_FREQUENT_CHECK;
@@ -4084,7 +4499,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4084
4499
 
4085
4500
  --timercnt;
4086
4501
 
4087
- if (expect_true (active < timercnt + HEAP0))
4502
+ if (ecb_expect_true (active < timercnt + HEAP0))
4088
4503
  {
4089
4504
  timers [active] = timers [timercnt + HEAP0];
4090
4505
  adjustheap (timers, timercnt, active);
@@ -4098,7 +4513,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4098
4513
  EV_FREQUENT_CHECK;
4099
4514
  }
4100
4515
 
4101
- noinline
4516
+ ecb_noinline
4102
4517
  void
4103
4518
  ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4104
4519
  {
@@ -4129,17 +4544,22 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4129
4544
  ev_tstamp
4130
4545
  ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4131
4546
  {
4132
- return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
4547
+ return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4133
4548
  }
4134
4549
 
4135
4550
  #if EV_PERIODIC_ENABLE
4136
- noinline
4551
+ ecb_noinline
4137
4552
  void
4138
4553
  ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4139
4554
  {
4140
- if (expect_false (ev_is_active (w)))
4555
+ if (ecb_expect_false (ev_is_active (w)))
4141
4556
  return;
4142
4557
 
4558
+ #if EV_USE_TIMERFD
4559
+ if (timerfd == -2)
4560
+ evtimerfd_init (EV_A);
4561
+ #endif
4562
+
4143
4563
  if (w->reschedule_cb)
4144
4564
  ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4145
4565
  else if (w->interval)
@@ -4164,12 +4584,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4164
4584
  /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4165
4585
  }
4166
4586
 
4167
- noinline
4587
+ ecb_noinline
4168
4588
  void
4169
4589
  ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4170
4590
  {
4171
4591
  clear_pending (EV_A_ (W)w);
4172
- if (expect_false (!ev_is_active (w)))
4592
+ if (ecb_expect_false (!ev_is_active (w)))
4173
4593
  return;
4174
4594
 
4175
4595
  EV_FREQUENT_CHECK;
@@ -4181,7 +4601,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4181
4601
 
4182
4602
  --periodiccnt;
4183
4603
 
4184
- if (expect_true (active < periodiccnt + HEAP0))
4604
+ if (ecb_expect_true (active < periodiccnt + HEAP0))
4185
4605
  {
4186
4606
  periodics [active] = periodics [periodiccnt + HEAP0];
4187
4607
  adjustheap (periodics, periodiccnt, active);
@@ -4193,7 +4613,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4193
4613
  EV_FREQUENT_CHECK;
4194
4614
  }
4195
4615
 
4196
- noinline
4616
+ ecb_noinline
4197
4617
  void
4198
4618
  ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4199
4619
  {
@@ -4209,11 +4629,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4209
4629
 
4210
4630
  #if EV_SIGNAL_ENABLE
4211
4631
 
4212
- noinline
4632
+ ecb_noinline
4213
4633
  void
4214
4634
  ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4215
4635
  {
4216
- if (expect_false (ev_is_active (w)))
4636
+ if (ecb_expect_false (ev_is_active (w)))
4217
4637
  return;
4218
4638
 
4219
4639
  assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
@@ -4292,12 +4712,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4292
4712
  EV_FREQUENT_CHECK;
4293
4713
  }
4294
4714
 
4295
- noinline
4715
+ ecb_noinline
4296
4716
  void
4297
4717
  ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4298
4718
  {
4299
4719
  clear_pending (EV_A_ (W)w);
4300
- if (expect_false (!ev_is_active (w)))
4720
+ if (ecb_expect_false (!ev_is_active (w)))
4301
4721
  return;
4302
4722
 
4303
4723
  EV_FREQUENT_CHECK;
@@ -4340,7 +4760,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4340
4760
  #if EV_MULTIPLICITY
4341
4761
  assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
4342
4762
  #endif
4343
- if (expect_false (ev_is_active (w)))
4763
+ if (ecb_expect_false (ev_is_active (w)))
4344
4764
  return;
4345
4765
 
4346
4766
  EV_FREQUENT_CHECK;
@@ -4355,7 +4775,7 @@ void
4355
4775
  ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4356
4776
  {
4357
4777
  clear_pending (EV_A_ (W)w);
4358
- if (expect_false (!ev_is_active (w)))
4778
+ if (ecb_expect_false (!ev_is_active (w)))
4359
4779
  return;
4360
4780
 
4361
4781
  EV_FREQUENT_CHECK;
@@ -4379,14 +4799,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4379
4799
  #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4380
4800
  #define MIN_STAT_INTERVAL 0.1074891
4381
4801
 
4382
- noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4802
+ ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4383
4803
 
4384
4804
  #if EV_USE_INOTIFY
4385
4805
 
4386
4806
  /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4387
4807
  # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4388
4808
 
4389
- noinline
4809
+ ecb_noinline
4390
4810
  static void
4391
4811
  infy_add (EV_P_ ev_stat *w)
4392
4812
  {
@@ -4461,7 +4881,7 @@ infy_add (EV_P_ ev_stat *w)
4461
4881
  if (ev_is_active (&w->timer)) ev_unref (EV_A);
4462
4882
  }
4463
4883
 
4464
- noinline
4884
+ ecb_noinline
4465
4885
  static void
4466
4886
  infy_del (EV_P_ ev_stat *w)
4467
4887
  {
@@ -4479,7 +4899,7 @@ infy_del (EV_P_ ev_stat *w)
4479
4899
  inotify_rm_watch (fs_fd, wd);
4480
4900
  }
4481
4901
 
4482
- noinline
4902
+ ecb_noinline
4483
4903
  static void
4484
4904
  infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4485
4905
  {
@@ -4635,7 +5055,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
4635
5055
  w->attr.st_nlink = 1;
4636
5056
  }
4637
5057
 
4638
- noinline
5058
+ ecb_noinline
4639
5059
  static void
4640
5060
  stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4641
5061
  {
@@ -4679,7 +5099,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4679
5099
  void
4680
5100
  ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4681
5101
  {
4682
- if (expect_false (ev_is_active (w)))
5102
+ if (ecb_expect_false (ev_is_active (w)))
4683
5103
  return;
4684
5104
 
4685
5105
  ev_stat_stat (EV_A_ w);
@@ -4711,7 +5131,7 @@ void
4711
5131
  ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4712
5132
  {
4713
5133
  clear_pending (EV_A_ (W)w);
4714
- if (expect_false (!ev_is_active (w)))
5134
+ if (ecb_expect_false (!ev_is_active (w)))
4715
5135
  return;
4716
5136
 
4717
5137
  EV_FREQUENT_CHECK;
@@ -4736,7 +5156,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4736
5156
  void
4737
5157
  ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4738
5158
  {
4739
- if (expect_false (ev_is_active (w)))
5159
+ if (ecb_expect_false (ev_is_active (w)))
4740
5160
  return;
4741
5161
 
4742
5162
  pri_adjust (EV_A_ (W)w);
@@ -4760,7 +5180,7 @@ void
4760
5180
  ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4761
5181
  {
4762
5182
  clear_pending (EV_A_ (W)w);
4763
- if (expect_false (!ev_is_active (w)))
5183
+ if (ecb_expect_false (!ev_is_active (w)))
4764
5184
  return;
4765
5185
 
4766
5186
  EV_FREQUENT_CHECK;
@@ -4783,7 +5203,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4783
5203
  void
4784
5204
  ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4785
5205
  {
4786
- if (expect_false (ev_is_active (w)))
5206
+ if (ecb_expect_false (ev_is_active (w)))
4787
5207
  return;
4788
5208
 
4789
5209
  EV_FREQUENT_CHECK;
@@ -4799,7 +5219,7 @@ void
4799
5219
  ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4800
5220
  {
4801
5221
  clear_pending (EV_A_ (W)w);
4802
- if (expect_false (!ev_is_active (w)))
5222
+ if (ecb_expect_false (!ev_is_active (w)))
4803
5223
  return;
4804
5224
 
4805
5225
  EV_FREQUENT_CHECK;
@@ -4821,7 +5241,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4821
5241
  void
4822
5242
  ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4823
5243
  {
4824
- if (expect_false (ev_is_active (w)))
5244
+ if (ecb_expect_false (ev_is_active (w)))
4825
5245
  return;
4826
5246
 
4827
5247
  EV_FREQUENT_CHECK;
@@ -4837,7 +5257,7 @@ void
4837
5257
  ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4838
5258
  {
4839
5259
  clear_pending (EV_A_ (W)w);
4840
- if (expect_false (!ev_is_active (w)))
5260
+ if (ecb_expect_false (!ev_is_active (w)))
4841
5261
  return;
4842
5262
 
4843
5263
  EV_FREQUENT_CHECK;
@@ -4856,7 +5276,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4856
5276
  #endif
4857
5277
 
4858
5278
  #if EV_EMBED_ENABLE
4859
- noinline
5279
+ ecb_noinline
4860
5280
  void
4861
5281
  ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4862
5282
  {
@@ -4890,6 +5310,7 @@ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
4890
5310
  }
4891
5311
  }
4892
5312
 
5313
+ #if EV_FORK_ENABLE
4893
5314
  static void
4894
5315
  embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4895
5316
  {
@@ -4906,6 +5327,7 @@ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4906
5327
 
4907
5328
  ev_embed_start (EV_A_ w);
4908
5329
  }
5330
+ #endif
4909
5331
 
4910
5332
  #if 0
4911
5333
  static void
@@ -4918,7 +5340,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4918
5340
  void
4919
5341
  ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4920
5342
  {
4921
- if (expect_false (ev_is_active (w)))
5343
+ if (ecb_expect_false (ev_is_active (w)))
4922
5344
  return;
4923
5345
 
4924
5346
  {
@@ -4936,8 +5358,10 @@ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4936
5358
  ev_set_priority (&w->prepare, EV_MINPRI);
4937
5359
  ev_prepare_start (EV_A_ &w->prepare);
4938
5360
 
5361
+ #if EV_FORK_ENABLE
4939
5362
  ev_fork_init (&w->fork, embed_fork_cb);
4940
5363
  ev_fork_start (EV_A_ &w->fork);
5364
+ #endif
4941
5365
 
4942
5366
  /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
4943
5367
 
@@ -4950,14 +5374,16 @@ void
4950
5374
  ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4951
5375
  {
4952
5376
  clear_pending (EV_A_ (W)w);
4953
- if (expect_false (!ev_is_active (w)))
5377
+ if (ecb_expect_false (!ev_is_active (w)))
4954
5378
  return;
4955
5379
 
4956
5380
  EV_FREQUENT_CHECK;
4957
5381
 
4958
5382
  ev_io_stop (EV_A_ &w->io);
4959
5383
  ev_prepare_stop (EV_A_ &w->prepare);
5384
+ #if EV_FORK_ENABLE
4960
5385
  ev_fork_stop (EV_A_ &w->fork);
5386
+ #endif
4961
5387
 
4962
5388
  ev_stop (EV_A_ (W)w);
4963
5389
 
@@ -4969,7 +5395,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4969
5395
  void
4970
5396
  ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4971
5397
  {
4972
- if (expect_false (ev_is_active (w)))
5398
+ if (ecb_expect_false (ev_is_active (w)))
4973
5399
  return;
4974
5400
 
4975
5401
  EV_FREQUENT_CHECK;
@@ -4985,7 +5411,7 @@ void
4985
5411
  ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4986
5412
  {
4987
5413
  clear_pending (EV_A_ (W)w);
4988
- if (expect_false (!ev_is_active (w)))
5414
+ if (ecb_expect_false (!ev_is_active (w)))
4989
5415
  return;
4990
5416
 
4991
5417
  EV_FREQUENT_CHECK;
@@ -5007,7 +5433,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
5007
5433
  void
5008
5434
  ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5009
5435
  {
5010
- if (expect_false (ev_is_active (w)))
5436
+ if (ecb_expect_false (ev_is_active (w)))
5011
5437
  return;
5012
5438
 
5013
5439
  EV_FREQUENT_CHECK;
@@ -5025,7 +5451,7 @@ void
5025
5451
  ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5026
5452
  {
5027
5453
  clear_pending (EV_A_ (W)w);
5028
- if (expect_false (!ev_is_active (w)))
5454
+ if (ecb_expect_false (!ev_is_active (w)))
5029
5455
  return;
5030
5456
 
5031
5457
  EV_FREQUENT_CHECK;
@@ -5048,7 +5474,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5048
5474
  void
5049
5475
  ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
5050
5476
  {
5051
- if (expect_false (ev_is_active (w)))
5477
+ if (ecb_expect_false (ev_is_active (w)))
5052
5478
  return;
5053
5479
 
5054
5480
  w->sent = 0;
@@ -5068,7 +5494,7 @@ void
5068
5494
  ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
5069
5495
  {
5070
5496
  clear_pending (EV_A_ (W)w);
5071
- if (expect_false (!ev_is_active (w)))
5497
+ if (ecb_expect_false (!ev_is_active (w)))
5072
5498
  return;
5073
5499
 
5074
5500
  EV_FREQUENT_CHECK;
@@ -5275,4 +5701,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
5275
5701
  #if EV_MULTIPLICITY
5276
5702
  #include "ev_wrap.h"
5277
5703
  #endif
5278
-