nio4r 2.5.1-java → 2.5.6-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 792f1fd684b428d03378912739674e9faaed4595c7a725427e4614d013598db9
4
- data.tar.gz: 3a1f23002d8abd22652e98b5f6e6cea4e74f3f3789d9b97bdc75c5ad0e4e1c49
3
+ metadata.gz: aace72b9a6be158138445b5d7c1031a14103590ae4a29bb48658e97feb14b780
4
+ data.tar.gz: 603a4e6762b3a5a8cf1e0e1c0cc7a5a961e05b17a1794440719c50245829e72d
5
5
  SHA512:
6
- metadata.gz: 81e6b8975cb42cfd1d80ec5a6f6a33728525656e47ce82f3d95c0fdc300ddb7ba3cdc95efa4685288f855a545fbfd1941321c9fe8d47acfa34f25643c9cfbb2b
7
- data.tar.gz: 47e45cf0742ced1552699591ffd137db85cfc5d8080e5c9dedf756770940be7923281dfc204b859167c6b40d2be35f757573da36b70195968389a9f1b1817a60
6
+ metadata.gz: e65f9831285fcc9318ea41cef6e6be39d20a24d618f1e9311f5d8441ab2f8accedaa106476ea664813c086bbaf63b1c06b14b5af26b17abbbc9136a69df8698d
7
+ data.tar.gz: f9a2e37aaf6f3f45d9f024ca98ff8e96c2beac4660d8aa5dfb8f8c809743f12cc2d8d92d759ef68e4a5ea1b0c4a1a84915df56485ac369980a381fd8b84ec6dc
@@ -0,0 +1,47 @@
1
+ name: nio4r
2
+
3
+ on: [push, pull_request]
4
+
5
+ jobs:
6
+ build:
7
+ name: >-
8
+ ${{matrix.os}}, ${{matrix.ruby}}
9
+ env:
10
+ CI: true
11
+ TESTOPTS: -v
12
+
13
+ runs-on: ${{matrix.os}}
14
+ strategy:
15
+ fail-fast: false
16
+ matrix:
17
+ os: [ ubuntu-20.04, ubuntu-18.04, macos-10.15, windows-2019 ]
18
+ ruby: [ head, 3.0, 2.7, 2.6, 2.5, 2.4, jruby, truffleruby-head ]
19
+ include:
20
+ - { os: ubuntu-16.04, ruby: 3.0 }
21
+ - { os: ubuntu-16.04, ruby: 2.4 }
22
+ exclude:
23
+ - { os: windows-2019, ruby: head }
24
+ - { os: windows-2019, ruby: jruby }
25
+ - { os: windows-2019, ruby: truffleruby-head }
26
+
27
+ steps:
28
+ - name: repo checkout
29
+ uses: actions/checkout@v2
30
+
31
+ - name: load ruby
32
+ uses: ruby/setup-ruby@v1
33
+ with:
34
+ ruby-version: ${{matrix.ruby}}
35
+
36
+ - name: RubyGems, Bundler Update
37
+ run: gem update --system --no-document --conservative
38
+
39
+ - name: bundle install
40
+ run: bundle install --path .bundle/gems --without development
41
+
42
+ - name: compile
43
+ run: bundle exec rake compile
44
+
45
+ - name: test
46
+ run: bundle exec rake spec
47
+ timeout-minutes: 10
data/.rubocop.yml CHANGED
@@ -1,23 +1,40 @@
1
1
  AllCops:
2
- TargetRubyVersion: 2.3
2
+ TargetRubyVersion: 2.4
3
3
  DisplayCopNames: true
4
4
 
5
+ Layout/HashAlignment:
6
+ Enabled: false
7
+
8
+ Layout/LineLength:
9
+ Max: 128
10
+
11
+ Layout/SpaceAroundMethodCallOperator:
12
+ Enabled: false
13
+
5
14
  Layout/SpaceInsideBlockBraces:
6
15
  Enabled: false
7
16
 
8
17
  Style/IfUnlessModifier:
9
18
  Enabled: false
10
19
 
20
+ Style/UnpackFirst:
21
+ Enabled: false
22
+
11
23
  #
12
24
  # Lint
13
25
  #
14
26
 
15
- Lint/HandleExceptions:
27
+ Lint/SuppressedException:
16
28
  Enabled: false
17
29
 
18
30
  Lint/Loop:
19
31
  Enabled: false
20
32
 
33
+ Lint/RaiseException:
34
+ Enabled: false
35
+
36
+ Lint/StructNewOverride:
37
+ Enabled: false
21
38
  #
22
39
  # Metrics
23
40
  #
@@ -32,9 +49,6 @@ Metrics/BlockLength:
32
49
  Metrics/ClassLength:
33
50
  Max: 128
34
51
 
35
- Metrics/LineLength:
36
- Max: 128
37
-
38
52
  Metrics/MethodLength:
39
53
  CountComments: false
40
54
  Max: 50
@@ -46,16 +60,12 @@ Metrics/PerceivedComplexity:
46
60
  Max: 15
47
61
 
48
62
  #
49
- # Performance
63
+ # Style
50
64
  #
51
65
 
52
- Performance/RegexpMatch:
66
+ Style/ExponentialNotation:
53
67
  Enabled: false
54
68
 
55
- #
56
- # Style
57
- #
58
-
59
69
  Style/FormatStringToken:
60
70
  Enabled: false
61
71
 
@@ -65,6 +75,15 @@ Style/FrozenStringLiteralComment:
65
75
  Style/GlobalVars:
66
76
  Enabled: false
67
77
 
78
+ Style/HashEachMethods:
79
+ Enabled: false
80
+
81
+ Style/HashTransformKeys:
82
+ Enabled: false
83
+
84
+ Style/HashTransformValues:
85
+ Enabled: false
86
+
68
87
  Style/NumericPredicate:
69
88
  Enabled: false
70
89
 
data/CHANGES.md CHANGED
@@ -1,3 +1,39 @@
1
+ ## 2.5.5 (2021-02-05)
2
+
3
+ * [#256](https://github.com/socketry/nio4r/pull/256)
4
+ Use libev 4.33, featuring experimental `io_uring` support.
5
+ ([@jcmfernandes])
6
+
7
+ * [#260](https://github.com/socketry/nio4r/pull/260)
8
+ Workaround for ARM-based macOS Ruby: Use pure Ruby for M1, since the native extension is crashing on M1 (arm64).
9
+ ([@jasl])
10
+
11
+ * [#252](https://github.com/socketry/nio4r/pull/252)
12
+ JRuby: Fix javac -Xlint warnings
13
+ ([@headius])
14
+
15
+ ## 2.5.4 (2020-09-16)
16
+
17
+ * [#251](https://github.com/socketry/nio4r/issues/251)
18
+ Intermittent SEGV during GC.
19
+ ([@boazsegev])
20
+
21
+ ## 2.5.3 (2020-09-07)
22
+
23
+ * [#241](https://github.com/socketry/nio4r/issues/241)
24
+ Possible bug with Ruby >= 2.7.0 and `GC.compact`.
25
+ ([@boazsegev])
26
+
27
+ ## 2.5.2 (2019-09-24)
28
+
29
+ * [#220](https://github.com/socketry/nio4r/issues/220)
30
+ Update to libev-4.27 & fix assorted warnings.
31
+ ([@ioquatix])
32
+
33
+ * [#225](https://github.com/socketry/nio4r/issues/225)
34
+ Avoid need for linux headers.
35
+ ([@ioquatix])
36
+
1
37
  ## 2.4.0 (2019-07-07)
2
38
 
3
39
  * [#211](https://github.com/socketry/nio4r/pull/211)
@@ -9,7 +45,7 @@
9
45
 
10
46
  * Assorted fixes for TruffleRuby & JRuby.
11
47
  ([@eregon], [@olleolleolle])
12
-
48
+ Possible bug with Ruby >= 2.7.0 and `GC.compact`
13
49
  * Update libev to v4.25.
14
50
  ([@ioquatix])
15
51
 
@@ -242,3 +278,7 @@
242
278
  [@ioquatix]: https://github.com/ioquatix
243
279
  [@eregon]: https://github.com/eregon
244
280
  [@olleolleolle]: https://github.com/olleolleolle
281
+ [@boazsegev]: https://github.com/boazsegev
282
+ [@headius]: https://github.com/headius
283
+ [@jasl]: https://github.com/jasl
284
+ [@jcmfernandes]: https://github.com/jcmfernandes
data/Gemfile CHANGED
@@ -15,5 +15,5 @@ group :development, :test do
15
15
  gem "coveralls", require: false
16
16
  gem "rake-compiler", require: false
17
17
  gem "rspec", "~> 3.7", require: false
18
- gem "rubocop", "0.52.1", require: false
18
+ gem "rubocop", "0.82.0", require: false
19
19
  end
data/README.md CHANGED
@@ -1,17 +1,10 @@
1
1
  # ![nio4r](https://raw.github.com/socketry/nio4r/master/logo.png)
2
2
 
3
3
  [![Gem Version](https://badge.fury.io/rb/nio4r.svg)](http://rubygems.org/gems/nio4r)
4
- [![Travis CI Status](https://secure.travis-ci.org/socketry/nio4r.svg?branch=master)](http://travis-ci.org/socketry/nio4r)
5
- [![Appveyor Status](https://ci.appveyor.com/api/projects/status/1ru8x81v91vaewax/branch/master?svg=true)](https://ci.appveyor.com/project/tarcieri/nio4r/branch/master)
4
+ [![Build Status](https://github.com/socketry/nio4r/workflows/nio4r/badge.svg?branch=master&event=push)](https://github.com/socketry/nio4r/actions?query=workflow:nio4r)
6
5
  [![Code Climate](https://codeclimate.com/github/socketry/nio4r.svg)](https://codeclimate.com/github/socketry/nio4r)
7
6
  [![Coverage Status](https://coveralls.io/repos/socketry/nio4r/badge.svg?branch=master)](https://coveralls.io/r/socketry/nio4r)
8
7
  [![Yard Docs](https://img.shields.io/badge/yard-docs-blue.svg)](http://www.rubydoc.info/gems/nio4r/2.2.0)
9
- [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/socketry/nio4r/blob/master/LICENSE.txt)
10
-
11
- _NOTE: This is the 2.x **stable** branch of nio4r. For the 1.x **legacy** branch,
12
- please see:_
13
-
14
- https://github.com/socketry/nio4r/tree/1-x-stable
15
8
 
16
9
  **New I/O for Ruby (nio4r)**: cross-platform asynchronous I/O primitives for
17
10
  scalable network clients and servers. Modeled after the Java NIO API, but
@@ -25,13 +18,13 @@ writing.
25
18
  ## Projects using nio4r
26
19
 
27
20
  * [ActionCable]: Rails 5 WebSocket protocol, uses nio4r for a WebSocket server
28
- * [Celluloid::IO]: Actor-based concurrency framework, uses nio4r for async I/O
29
- * [Socketry Async]: Asynchronous I/O framework for Ruby
21
+ * [Celluloid]: Actor-based concurrency framework, uses nio4r for async I/O
22
+ * [Async]: Asynchronous I/O framework for Ruby
30
23
  * [Puma]: Ruby/Rack web server built for concurrency
31
24
 
32
25
  [ActionCable]: https://rubygems.org/gems/actioncable
33
- [Celluloid::IO]: https://github.com/celluloid/celluloid-io
34
- [Socketry Async]: https://github.com/socketry/async
26
+ [Celluloid]: https://github.com/celluloid/celluloid-io
27
+ [Async]: https://github.com/socketry/async
35
28
  [Puma]: https://github.com/puma/puma
36
29
 
37
30
  ## Goals
@@ -43,10 +36,11 @@ writing.
43
36
 
44
37
  ## Supported platforms
45
38
 
46
- * Ruby 2.3
47
39
  * Ruby 2.4
48
40
  * Ruby 2.5
49
41
  * Ruby 2.6
42
+ * Ruby 2.7
43
+ * Ruby 3.0
50
44
  * [JRuby](https://github.com/jruby/jruby)
51
45
  * [TruffleRuby](https://github.com/oracle/truffleruby)
52
46
 
@@ -56,17 +50,6 @@ writing.
56
50
  * **Java NIO**: JRuby extension which wraps the Java NIO subsystem
57
51
  * **Pure Ruby**: `Kernel.select`-based backend that should work on any Ruby interpreter
58
52
 
59
- ## Discussion
60
-
61
- For discussion and general help with nio4r, email
62
- [socketry+subscribe@googlegroups.com][subscribe]
63
- or join on the web via the [Google Group].
64
-
65
- We're also on IRC at ##socketry on irc.freenode.net.
66
-
67
- [subscribe]: mailto:socketry+subscribe@googlegroups.com
68
- [google group]: https://groups.google.com/group/socketry
69
-
70
53
  ## Documentation
71
54
 
72
55
  [Please see the nio4r wiki](https://github.com/socketry/nio4r/wiki)
@@ -120,7 +103,7 @@ rake release
120
103
 
121
104
  Released under the MIT license.
122
105
 
123
- Copyright, 2019, by Tony Arcieri.
106
+ Copyright, 2019, by Tony Arcieri.
124
107
  Copyright, 2019, by [Samuel G. D. Williams](http://www.codeotaku.com/samuel-williams).
125
108
 
126
109
  Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
2
  # frozen_string_literal: true
3
3
 
4
- $LOAD_PATH.push File.expand_path("../../lib", __FILE__)
4
+ $LOAD_PATH.push File.expand_path("../lib", __dir__)
5
5
  require "nio"
6
6
  require "socket"
7
7
 
@@ -19,7 +19,7 @@ class EchoServer
19
19
 
20
20
  def run
21
21
  loop do
22
- @selector.select { |monitor| monitor.value.call(monitor) }
22
+ @selector.select { |monitor| monitor.value.call }
23
23
  end
24
24
  end
25
25
 
data/ext/libev/Changes CHANGED
@@ -1,8 +1,77 @@
1
1
  Revision history for libev, a high-performance and full-featured event loop.
2
2
 
3
+ TODO: for next ABI/API change, consider moving EV__IOFDSSET into io->fd instead and provide a getter.
4
+ TODO: document EV_TSTAMP_T
5
+
6
+ 4.33 Wed Mar 18 13:22:29 CET 2020
7
+ - no changes w.r.t. 4.32.
8
+
9
+ 4.32 (EV only)
10
+ - the 4.31 timerfd code wrongly changed the priority of the signal
11
+ fd watcher, which is usually harmless unless signal fds are
12
+ also used (found via cpan tester service).
13
+ - the documentation wrongly claimed that user may modify fd and events
14
+ members in io watchers when the watcher was stopped
15
+ (found by b_jonas).
16
+ - new ev_io_modify mutator which changes only the events member,
17
+ which can be faster. also added ev::io::set (int events) method
18
+ to ev++.h.
19
+ - officially allow a zero events mask for io watchers. this should
20
+ work with older libev versions as well but was not officially
21
+ allowed before.
22
+ - do not wake up every minute when timerfd is used to detect timejumps.
23
+ - do not wake up every minute when periodics are disabled and we have
24
+ a monotonic clock.
25
+ - support a lot more "uncommon" compile time configurations,
26
+ such as ev_embed enabled but ev_timer disabled.
27
+ - use a start/stop wrapper class to reduce code duplication in
28
+ ev++.h and make it needlessly more c++-y.
29
+ - the linux aio backend is no longer compiled in by default.
30
+ - update to libecb version 0x00010008.
31
+
32
+ 4.31 Fri Dec 20 21:58:29 CET 2019
33
+ - handle backends with minimum wait time a bit better by not
34
+ waiting in the presence of already-expired timers
35
+ (behaviour reported by Felipe Gasper).
36
+ - new feature: use timerfd to detect timejumps quickly,
37
+ can be disabled with the new EVFLAG_NOTIMERFD loop flag.
38
+ - document EV_USE_SIGNALFD feature macro.
39
+
40
+ 4.30 (EV only)
41
+ - change non-autoconf test for __kernel_rwf_t by testing
42
+ LINUX_VERSION_CODE, the most direct test I could find.
43
+ - fix a bug in the io_uring backend that polled the wrong
44
+ backend fd, causing it to not work in many cases.
45
+
46
+ 4.29 (EV only)
47
+ - add io uring autoconf and non-autoconf detection.
48
+ - disable io_uring when some header files are too old.
49
+
50
+ 4.28 (EV only)
51
+ - linuxaio backend resulted in random memory corruption
52
+ when loop is forked.
53
+ - linuxaio backend might have tried to cancel an iocb
54
+ multiple times (was unable to trigger this).
55
+ - linuxaio backend now employs a generation counter to
56
+ avoid handling spurious events from cancelled requests.
57
+ - io_cancel can return EINTR, deal with it. also, assume
58
+ io_submit also returns EINTR.
59
+ - fix some other minor bugs in linuxaio backend.
60
+ - ev_tstamp type can now be overriden by defining EV_TSTAMP_T.
61
+ - cleanup: replace expect_true/false and noinline by their
62
+ libecb counterparts.
63
+ - move syscall infrastructure from ev_linuxaio.c to ev.c.
64
+ - prepare io_uring integration.
65
+ - tweak ev_floor.
66
+ - epoll, poll, win32 Sleep and other places that use millisecond
67
+ reslution now all try to round up times.
68
+ - solaris port backend didn't compile.
69
+ - abstract time constants into their macros, for more flexibility.
70
+
3
71
  4.27 Thu Jun 27 22:43:44 CEST 2019
4
- - linux aio backend almost complete rewritten to work around its
72
+ - linux aio backend almost completely rewritten to work around its
5
73
  limitations.
74
+ - linux aio backend now requires linux 4.19+.
6
75
  - epoll backend now mandatory for linux aio backend.
7
76
  - fail assertions more aggressively on invalid fd's detected
8
77
  in the event loop, do not just silently fd_kill in case of
@@ -22,7 +91,7 @@ Revision history for libev, a high-performance and full-featured event loop.
22
91
  4.25 Fri Dec 21 07:49:20 CET 2018
23
92
  - INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
24
93
  (EV_THROW still provided) and now uses noexcept on C++11 or newer.
25
- - move the darwin select workaround highe rin ev.c, as newer versions of
94
+ - move the darwin select workaround higher in ev.c, as newer versions of
26
95
  darwin managed to break their broken select even more.
27
96
  - ANDROID => __ANDROID__ (reported by enh@google.com).
28
97
  - disable epoll_create1 on android because it has broken header files
data/ext/libev/ev.c CHANGED
@@ -116,7 +116,7 @@
116
116
  # undef EV_USE_POLL
117
117
  # define EV_USE_POLL 0
118
118
  # endif
119
-
119
+
120
120
  # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
121
121
  # ifndef EV_USE_EPOLL
122
122
  # define EV_USE_EPOLL EV_FEATURE_BACKENDS
@@ -125,16 +125,25 @@
125
125
  # undef EV_USE_EPOLL
126
126
  # define EV_USE_EPOLL 0
127
127
  # endif
128
-
128
+
129
129
  # if HAVE_LINUX_AIO_ABI_H
130
130
  # ifndef EV_USE_LINUXAIO
131
- # define EV_USE_LINUXAIO EV_FEATURE_BACKENDS
131
+ # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
132
132
  # endif
133
133
  # else
134
134
  # undef EV_USE_LINUXAIO
135
135
  # define EV_USE_LINUXAIO 0
136
136
  # endif
137
-
137
+
138
+ # if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
139
+ # ifndef EV_USE_IOURING
140
+ # define EV_USE_IOURING EV_FEATURE_BACKENDS
141
+ # endif
142
+ # else
143
+ # undef EV_USE_IOURING
144
+ # define EV_USE_IOURING 0
145
+ # endif
146
+
138
147
  # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
139
148
  # ifndef EV_USE_KQUEUE
140
149
  # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
@@ -143,7 +152,7 @@
143
152
  # undef EV_USE_KQUEUE
144
153
  # define EV_USE_KQUEUE 0
145
154
  # endif
146
-
155
+
147
156
  # if HAVE_PORT_H && HAVE_PORT_CREATE
148
157
  # ifndef EV_USE_PORT
149
158
  # define EV_USE_PORT EV_FEATURE_BACKENDS
@@ -179,7 +188,16 @@
179
188
  # undef EV_USE_EVENTFD
180
189
  # define EV_USE_EVENTFD 0
181
190
  # endif
182
-
191
+
192
+ # if HAVE_SYS_TIMERFD_H
193
+ # ifndef EV_USE_TIMERFD
194
+ # define EV_USE_TIMERFD EV_FEATURE_OS
195
+ # endif
196
+ # else
197
+ # undef EV_USE_TIMERFD
198
+ # define EV_USE_TIMERFD 0
199
+ # endif
200
+
183
201
  #endif
184
202
 
185
203
  /* OS X, in its infinite idiocy, actually HARDCODES
@@ -336,11 +354,11 @@
336
354
  #endif
337
355
 
338
356
  #ifndef EV_USE_LINUXAIO
339
- # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
340
- # define EV_USE_LINUXAIO 1
341
- # else
342
357
  # define EV_USE_LINUXAIO 0
343
- # endif
358
+ #endif
359
+
360
+ #ifndef EV_USE_IOURING
361
+ # define EV_USE_IOURING 0
344
362
  #endif
345
363
 
346
364
  #ifndef EV_USE_INOTIFY
@@ -375,6 +393,14 @@
375
393
  # endif
376
394
  #endif
377
395
 
396
+ #ifndef EV_USE_TIMERFD
397
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
398
+ # define EV_USE_TIMERFD EV_FEATURE_OS
399
+ # else
400
+ # define EV_USE_TIMERFD 0
401
+ # endif
402
+ #endif
403
+
378
404
  #if 0 /* debugging */
379
405
  # define EV_VERIFY 3
380
406
  # define EV_USE_4HEAP 1
@@ -417,6 +443,7 @@
417
443
  # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
418
444
  # undef EV_USE_MONOTONIC
419
445
  # define EV_USE_MONOTONIC 1
446
+ # define EV_NEED_SYSCALL 1
420
447
  # else
421
448
  # undef EV_USE_CLOCK_SYSCALL
422
449
  # define EV_USE_CLOCK_SYSCALL 0
@@ -449,12 +476,29 @@
449
476
 
450
477
  #if EV_USE_LINUXAIO
451
478
  # include <sys/syscall.h>
452
- # if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */
479
+ # if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
480
+ # define EV_NEED_SYSCALL 1
481
+ # else
453
482
  # undef EV_USE_LINUXAIO
454
483
  # define EV_USE_LINUXAIO 0
455
484
  # endif
456
485
  #endif
457
486
 
487
+ #if EV_USE_IOURING
488
+ # include <sys/syscall.h>
489
+ # if !SYS_io_uring_setup && __linux && !__alpha
490
+ # define SYS_io_uring_setup 425
491
+ # define SYS_io_uring_enter 426
492
+ # define SYS_io_uring_wregister 427
493
+ # endif
494
+ # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
495
+ # define EV_NEED_SYSCALL 1
496
+ # else
497
+ # undef EV_USE_IOURING
498
+ # define EV_USE_IOURING 0
499
+ # endif
500
+ #endif
501
+
458
502
  #if EV_USE_INOTIFY
459
503
  # include <sys/statfs.h>
460
504
  # include <sys/inotify.h>
@@ -466,7 +510,7 @@
466
510
  #endif
467
511
 
468
512
  #if EV_USE_EVENTFD
469
- /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
513
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
470
514
  # include <stdint.h>
471
515
  # ifndef EFD_NONBLOCK
472
516
  # define EFD_NONBLOCK O_NONBLOCK
@@ -482,7 +526,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
482
526
  #endif
483
527
 
484
528
  #if EV_USE_SIGNALFD
485
- /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
529
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
486
530
  # include <stdint.h>
487
531
  # ifndef SFD_NONBLOCK
488
532
  # define SFD_NONBLOCK O_NONBLOCK
@@ -494,7 +538,7 @@ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
494
538
  # define SFD_CLOEXEC 02000000
495
539
  # endif
496
540
  # endif
497
- EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
541
+ EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
498
542
 
499
543
  struct signalfd_siginfo
500
544
  {
@@ -503,7 +547,17 @@ struct signalfd_siginfo
503
547
  };
504
548
  #endif
505
549
 
506
- /**/
550
+ /* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
551
+ #if EV_USE_TIMERFD
552
+ # include <sys/timerfd.h>
553
+ /* timerfd is only used for periodics */
554
+ # if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
555
+ # undef EV_USE_TIMERFD
556
+ # define EV_USE_TIMERFD 0
557
+ # endif
558
+ #endif
559
+
560
+ /*****************************************************************************/
507
561
 
508
562
  #if EV_VERIFY >= 3
509
563
  # define EV_FREQUENT_CHECK ev_verify (EV_A)
@@ -518,18 +572,34 @@ struct signalfd_siginfo
518
572
  #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
519
573
  /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
520
574
 
521
- #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
522
- #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
575
+ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
576
+ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
577
+ #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
523
578
 
524
- #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
525
- #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
579
+ /* find a portable timestamp that is "always" in the future but fits into time_t.
580
+ * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
581
+ * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
582
+ #define EV_TSTAMP_HUGE \
583
+ (sizeof (time_t) >= 8 ? 10000000000000. \
584
+ : 0 < (time_t)4294967295 ? 4294967295. \
585
+ : 2147483647.) \
586
+
587
+ #ifndef EV_TS_CONST
588
+ # define EV_TS_CONST(nv) nv
589
+ # define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
590
+ # define EV_TS_FROM_USEC(us) us * 1e-6
591
+ # define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
592
+ # define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
593
+ # define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
594
+ # define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
595
+ #endif
526
596
 
527
597
  /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
528
598
  /* ECB.H BEGIN */
529
599
  /*
530
600
  * libecb - http://software.schmorp.de/pkg/libecb
531
601
  *
532
- * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
602
+ * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
533
603
  * Copyright (©) 2011 Emanuele Giaquinta
534
604
  * All rights reserved.
535
605
  *
@@ -570,15 +640,23 @@ struct signalfd_siginfo
570
640
  #define ECB_H
571
641
 
572
642
  /* 16 bits major, 16 bits minor */
573
- #define ECB_VERSION 0x00010006
643
+ #define ECB_VERSION 0x00010008
574
644
 
575
- #ifdef _WIN32
645
+ #include <string.h> /* for memcpy */
646
+
647
+ #if defined (_WIN32) && !defined (__MINGW32__)
576
648
  typedef signed char int8_t;
577
649
  typedef unsigned char uint8_t;
650
+ typedef signed char int_fast8_t;
651
+ typedef unsigned char uint_fast8_t;
578
652
  typedef signed short int16_t;
579
653
  typedef unsigned short uint16_t;
654
+ typedef signed int int_fast16_t;
655
+ typedef unsigned int uint_fast16_t;
580
656
  typedef signed int int32_t;
581
657
  typedef unsigned int uint32_t;
658
+ typedef signed int int_fast32_t;
659
+ typedef unsigned int uint_fast32_t;
582
660
  #if __GNUC__
583
661
  typedef signed long long int64_t;
584
662
  typedef unsigned long long uint64_t;
@@ -586,6 +664,8 @@ struct signalfd_siginfo
586
664
  typedef signed __int64 int64_t;
587
665
  typedef unsigned __int64 uint64_t;
588
666
  #endif
667
+ typedef int64_t int_fast64_t;
668
+ typedef uint64_t uint_fast64_t;
589
669
  #ifdef _WIN64
590
670
  #define ECB_PTRSIZE 8
591
671
  typedef uint64_t uintptr_t;
@@ -607,6 +687,14 @@ struct signalfd_siginfo
607
687
  #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
608
688
  #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
609
689
 
690
+ #ifndef ECB_OPTIMIZE_SIZE
691
+ #if __OPTIMIZE_SIZE__
692
+ #define ECB_OPTIMIZE_SIZE 1
693
+ #else
694
+ #define ECB_OPTIMIZE_SIZE 0
695
+ #endif
696
+ #endif
697
+
610
698
  /* work around x32 idiocy by defining proper macros */
611
699
  #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
612
700
  #if _ILP32
@@ -1122,6 +1210,44 @@ ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { retu
1122
1210
  ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
1123
1211
  ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
1124
1212
 
1213
+ #if ECB_CPP
1214
+
1215
+ inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
1216
+ inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
1217
+ inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
1218
+ inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
1219
+
1220
+ inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
1221
+ inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
1222
+ inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
1223
+ inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
1224
+
1225
+ inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
1226
+ inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
1227
+ inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
1228
+ inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
1229
+
1230
+ inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
1231
+ inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
1232
+ inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
1233
+ inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
1234
+
1235
+ inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
1236
+ inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
1237
+ inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
1238
+
1239
+ inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
1240
+ inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
1241
+ inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
1242
+ inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
1243
+
1244
+ inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
1245
+ inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
1246
+ inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
1247
+ inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
1248
+
1249
+ #endif
1250
+
1125
1251
  #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
1126
1252
  #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
1127
1253
  #define ecb_bswap16(x) __builtin_bswap16 (x)
@@ -1202,6 +1328,78 @@ ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_he
1202
1328
  ecb_inline ecb_const ecb_bool ecb_little_endian (void);
1203
1329
  ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
1204
1330
 
1331
+ /*****************************************************************************/
1332
+ /* unaligned load/store */
1333
+
1334
+ ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1335
+ ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1336
+ ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1337
+
1338
+ ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1339
+ ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1340
+ ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1341
+
1342
+ ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1343
+ ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1344
+ ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1345
+
1346
+ ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
1347
+ ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
1348
+ ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
1349
+
1350
+ ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
1351
+ ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
1352
+ ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
1353
+
1354
+ ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1355
+ ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1356
+ ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1357
+
1358
+ ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1359
+ ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1360
+ ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1361
+
1362
+ ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
1363
+ ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
1364
+ ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
1365
+
1366
+ ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
1367
+ ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
1368
+ ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
1369
+
1370
+ ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
1371
+ ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
1372
+ ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
1373
+
1374
+ #if ECB_CPP
1375
+
1376
+ inline uint8_t ecb_bswap (uint8_t v) { return v; }
1377
+ inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
1378
+ inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
1379
+ inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
1380
+
1381
+ template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1382
+ template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1383
+ template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
1384
+ template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
1385
+ template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
1386
+ template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
1387
+ template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
1388
+ template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
1389
+
1390
+ template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1391
+ template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1392
+ template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
1393
+ template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
1394
+ template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
1395
+ template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
1396
+ template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
1397
+ template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
1398
+
1399
+ #endif
1400
+
1401
+ /*****************************************************************************/
1402
+
1205
1403
  #if ECB_GCC_VERSION(3,0) || ECB_C99
1206
1404
  #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1207
1405
  #else
@@ -1235,6 +1433,8 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he
1235
1433
  #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1236
1434
  #endif
1237
1435
 
1436
+ /*****************************************************************************/
1437
+
1238
1438
  ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1239
1439
  ecb_function_ ecb_const uint32_t
1240
1440
  ecb_binary16_to_binary32 (uint32_t x)
@@ -1352,7 +1552,6 @@ ecb_binary32_to_binary16 (uint32_t x)
1352
1552
  || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1353
1553
  || defined __aarch64__
1354
1554
  #define ECB_STDFP 1
1355
- #include <string.h> /* for memcpy */
1356
1555
  #else
1357
1556
  #define ECB_STDFP 0
1358
1557
  #endif
@@ -1547,7 +1746,7 @@ ecb_binary32_to_binary16 (uint32_t x)
1547
1746
  #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1548
1747
  /* if your architecture doesn't need memory fences, e.g. because it is
1549
1748
  * single-cpu/core, or if you use libev in a project that doesn't use libev
1550
- * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
1749
+ * from multiple threads, then you can define ECB_NO_THREADS when compiling
1551
1750
  * libev, in which cases the memory fences become nops.
1552
1751
  * alternatively, you can remove this #error and link against libpthread,
1553
1752
  * which will then provide the memory fences.
@@ -1561,18 +1760,80 @@ ecb_binary32_to_binary16 (uint32_t x)
1561
1760
  # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
1562
1761
  #endif
1563
1762
 
1564
- #define expect_false(cond) ecb_expect_false (cond)
1565
- #define expect_true(cond) ecb_expect_true (cond)
1566
- #define noinline ecb_noinline
1567
-
1568
1763
  #define inline_size ecb_inline
1569
1764
 
1570
1765
  #if EV_FEATURE_CODE
1571
1766
  # define inline_speed ecb_inline
1572
1767
  #else
1573
- # define inline_speed noinline static
1768
+ # define inline_speed ecb_noinline static
1769
+ #endif
1770
+
1771
+ /*****************************************************************************/
1772
+ /* raw syscall wrappers */
1773
+
1774
+ #if EV_NEED_SYSCALL
1775
+
1776
+ #include <sys/syscall.h>
1777
+
1778
+ /*
1779
+ * define some syscall wrappers for common architectures
1780
+ * this is mostly for nice looks during debugging, not performance.
1781
+ * our syscalls return < 0, not == -1, on error. which is good
1782
+ * enough for linux aio.
1783
+ * TODO: arm is also common nowadays, maybe even mips and x86
1784
+ * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1785
+ */
1786
+ #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
1787
+ /* the costly errno access probably kills this for size optimisation */
1788
+
1789
+ #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1790
+ ({ \
1791
+ long res; \
1792
+ register unsigned long r6 __asm__ ("r9" ); \
1793
+ register unsigned long r5 __asm__ ("r8" ); \
1794
+ register unsigned long r4 __asm__ ("r10"); \
1795
+ register unsigned long r3 __asm__ ("rdx"); \
1796
+ register unsigned long r2 __asm__ ("rsi"); \
1797
+ register unsigned long r1 __asm__ ("rdi"); \
1798
+ if (narg >= 6) r6 = (unsigned long)(arg6); \
1799
+ if (narg >= 5) r5 = (unsigned long)(arg5); \
1800
+ if (narg >= 4) r4 = (unsigned long)(arg4); \
1801
+ if (narg >= 3) r3 = (unsigned long)(arg3); \
1802
+ if (narg >= 2) r2 = (unsigned long)(arg2); \
1803
+ if (narg >= 1) r1 = (unsigned long)(arg1); \
1804
+ __asm__ __volatile__ ( \
1805
+ "syscall\n\t" \
1806
+ : "=a" (res) \
1807
+ : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1808
+ : "cc", "r11", "cx", "memory"); \
1809
+ errno = -res; \
1810
+ res; \
1811
+ })
1812
+
1813
+ #endif
1814
+
1815
+ #ifdef ev_syscall
1816
+ #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1817
+ #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1818
+ #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1819
+ #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1820
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1821
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1822
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1823
+ #else
1824
+ #define ev_syscall0(nr) syscall (nr)
1825
+ #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1826
+ #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1827
+ #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1828
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1829
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1830
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1574
1831
  #endif
1575
1832
 
1833
+ #endif
1834
+
1835
+ /*****************************************************************************/
1836
+
1576
1837
  #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1577
1838
 
1578
1839
  #if EV_MINPRI == EV_MAXPRI
@@ -1630,7 +1891,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
1630
1891
  #include <float.h>
1631
1892
 
1632
1893
  /* a floor() replacement function, should be independent of ev_tstamp type */
1633
- noinline
1894
+ ecb_noinline
1634
1895
  static ev_tstamp
1635
1896
  ev_floor (ev_tstamp v)
1636
1897
  {
@@ -1641,26 +1902,26 @@ ev_floor (ev_tstamp v)
1641
1902
  const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1642
1903
  #endif
1643
1904
 
1644
- /* argument too large for an unsigned long? */
1645
- if (expect_false (v >= shift))
1905
+ /* special treatment for negative arguments */
1906
+ if (ecb_expect_false (v < 0.))
1907
+ {
1908
+ ev_tstamp f = -ev_floor (-v);
1909
+
1910
+ return f - (f == v ? 0 : 1);
1911
+ }
1912
+
1913
+ /* argument too large for an unsigned long? then reduce it */
1914
+ if (ecb_expect_false (v >= shift))
1646
1915
  {
1647
1916
  ev_tstamp f;
1648
1917
 
1649
1918
  if (v == v - 1.)
1650
- return v; /* very large number */
1919
+ return v; /* very large numbers are assumed to be integer */
1651
1920
 
1652
1921
  f = shift * ev_floor (v * (1. / shift));
1653
1922
  return f + ev_floor (v - f);
1654
1923
  }
1655
1924
 
1656
- /* special treatment for negative args? */
1657
- if (expect_false (v < 0.))
1658
- {
1659
- ev_tstamp f = -ev_floor (-v);
1660
-
1661
- return f - (f == v ? 0 : 1);
1662
- }
1663
-
1664
1925
  /* fits into an unsigned long */
1665
1926
  return (unsigned long)v;
1666
1927
  }
@@ -1673,7 +1934,7 @@ ev_floor (ev_tstamp v)
1673
1934
  # include <sys/utsname.h>
1674
1935
  #endif
1675
1936
 
1676
- noinline ecb_cold
1937
+ ecb_noinline ecb_cold
1677
1938
  static unsigned int
1678
1939
  ev_linux_version (void)
1679
1940
  {
@@ -1713,7 +1974,7 @@ ev_linux_version (void)
1713
1974
  /*****************************************************************************/
1714
1975
 
1715
1976
  #if EV_AVOID_STDIO
1716
- noinline ecb_cold
1977
+ ecb_noinline ecb_cold
1717
1978
  static void
1718
1979
  ev_printerr (const char *msg)
1719
1980
  {
@@ -1730,7 +1991,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1730
1991
  syserr_cb = cb;
1731
1992
  }
1732
1993
 
1733
- noinline ecb_cold
1994
+ ecb_noinline ecb_cold
1734
1995
  static void
1735
1996
  ev_syserr (const char *msg)
1736
1997
  {
@@ -1754,7 +2015,7 @@ ev_syserr (const char *msg)
1754
2015
  }
1755
2016
 
1756
2017
  static void *
1757
- ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
2018
+ ev_realloc_emul (void *ptr, size_t size) EV_NOEXCEPT
1758
2019
  {
1759
2020
  /* some systems, notably openbsd and darwin, fail to properly
1760
2021
  * implement realloc (x, 0) (as required by both ansi c-89 and
@@ -1770,17 +2031,17 @@ ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
1770
2031
  return 0;
1771
2032
  }
1772
2033
 
1773
- static void *(*alloc)(void *ptr, long size) EV_NOEXCEPT = ev_realloc_emul;
2034
+ static void *(*alloc)(void *ptr, size_t size) EV_NOEXCEPT = ev_realloc_emul;
1774
2035
 
1775
2036
  ecb_cold
1776
2037
  void
1777
- ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT
2038
+ ev_set_allocator (void *(*cb)(void *ptr, size_t size) EV_NOEXCEPT) EV_NOEXCEPT
1778
2039
  {
1779
2040
  alloc = cb;
1780
2041
  }
1781
2042
 
1782
2043
  inline_speed void *
1783
- ev_realloc (void *ptr, long size)
2044
+ ev_realloc (void *ptr, size_t size)
1784
2045
  {
1785
2046
  ptr = alloc (ptr, size);
1786
2047
 
@@ -1812,7 +2073,7 @@ typedef struct
1812
2073
  unsigned char events; /* the events watched for */
1813
2074
  unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1814
2075
  unsigned char emask; /* some backends store the actual kernel mask in here */
1815
- unsigned char unused;
2076
+ unsigned char eflags; /* flags field for use by backends */
1816
2077
  #if EV_USE_EPOLL
1817
2078
  unsigned int egen; /* generation counter to counter epoll bugs */
1818
2079
  #endif
@@ -1876,7 +2137,7 @@ typedef struct
1876
2137
 
1877
2138
  #else
1878
2139
 
1879
- EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
2140
+ EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1880
2141
  #define VAR(name,decl) static decl;
1881
2142
  #include "ev_vars.h"
1882
2143
  #undef VAR
@@ -1886,8 +2147,8 @@ typedef struct
1886
2147
  #endif
1887
2148
 
1888
2149
  #if EV_FEATURE_API
1889
- # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
1890
- # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
2150
+ # define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
2151
+ # define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
1891
2152
  # define EV_INVOKE_PENDING invoke_cb (EV_A)
1892
2153
  #else
1893
2154
  # define EV_RELEASE_CB (void)0
@@ -1904,17 +2165,19 @@ ev_tstamp
1904
2165
  ev_time (void) EV_NOEXCEPT
1905
2166
  {
1906
2167
  #if EV_USE_REALTIME
1907
- if (expect_true (have_realtime))
2168
+ if (ecb_expect_true (have_realtime))
1908
2169
  {
1909
2170
  struct timespec ts;
1910
2171
  clock_gettime (CLOCK_REALTIME, &ts);
1911
- return ts.tv_sec + ts.tv_nsec * 1e-9;
2172
+ return EV_TS_GET (ts);
1912
2173
  }
1913
2174
  #endif
1914
2175
 
1915
- struct timeval tv;
1916
- gettimeofday (&tv, 0);
1917
- return tv.tv_sec + tv.tv_usec * 1e-6;
2176
+ {
2177
+ struct timeval tv;
2178
+ gettimeofday (&tv, 0);
2179
+ return EV_TV_GET (tv);
2180
+ }
1918
2181
  }
1919
2182
  #endif
1920
2183
 
@@ -1922,11 +2185,11 @@ inline_size ev_tstamp
1922
2185
  get_clock (void)
1923
2186
  {
1924
2187
  #if EV_USE_MONOTONIC
1925
- if (expect_true (have_monotonic))
2188
+ if (ecb_expect_true (have_monotonic))
1926
2189
  {
1927
2190
  struct timespec ts;
1928
2191
  clock_gettime (CLOCK_MONOTONIC, &ts);
1929
- return ts.tv_sec + ts.tv_nsec * 1e-9;
2192
+ return EV_TS_GET (ts);
1930
2193
  }
1931
2194
  #endif
1932
2195
 
@@ -1944,7 +2207,7 @@ ev_now (EV_P) EV_NOEXCEPT
1944
2207
  void
1945
2208
  ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1946
2209
  {
1947
- if (delay > 0.)
2210
+ if (delay > EV_TS_CONST (0.))
1948
2211
  {
1949
2212
  #if EV_USE_NANOSLEEP
1950
2213
  struct timespec ts;
@@ -1954,7 +2217,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1954
2217
  #elif defined _WIN32
1955
2218
  /* maybe this should round up, as ms is very low resolution */
1956
2219
  /* compared to select (µs) or nanosleep (ns) */
1957
- Sleep ((unsigned long)(delay * 1e3));
2220
+ Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
1958
2221
  #else
1959
2222
  struct timeval tv;
1960
2223
 
@@ -1994,7 +2257,7 @@ array_nextsize (int elem, int cur, int cnt)
1994
2257
  return ncur;
1995
2258
  }
1996
2259
 
1997
- noinline ecb_cold
2260
+ ecb_noinline ecb_cold
1998
2261
  static void *
1999
2262
  array_realloc (int elem, void *base, int *cur, int cnt)
2000
2263
  {
@@ -2008,7 +2271,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
2008
2271
  memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
2009
2272
 
2010
2273
  #define array_needsize(type,base,cur,cnt,init) \
2011
- if (expect_false ((cnt) > (cur))) \
2274
+ if (ecb_expect_false ((cnt) > (cur))) \
2012
2275
  { \
2013
2276
  ecb_unused int ocur_ = (cur); \
2014
2277
  (base) = (type *)array_realloc \
@@ -2032,20 +2295,20 @@ array_realloc (int elem, void *base, int *cur, int cnt)
2032
2295
  /*****************************************************************************/
2033
2296
 
2034
2297
  /* dummy callback for pending events */
2035
- noinline
2298
+ ecb_noinline
2036
2299
  static void
2037
2300
  pendingcb (EV_P_ ev_prepare *w, int revents)
2038
2301
  {
2039
2302
  }
2040
2303
 
2041
- noinline
2304
+ ecb_noinline
2042
2305
  void
2043
2306
  ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
2044
2307
  {
2045
2308
  W w_ = (W)w;
2046
2309
  int pri = ABSPRI (w_);
2047
2310
 
2048
- if (expect_false (w_->pending))
2311
+ if (ecb_expect_false (w_->pending))
2049
2312
  pendings [pri][w_->pending - 1].events |= revents;
2050
2313
  else
2051
2314
  {
@@ -2106,7 +2369,7 @@ fd_event (EV_P_ int fd, int revents)
2106
2369
  {
2107
2370
  ANFD *anfd = anfds + fd;
2108
2371
 
2109
- if (expect_true (!anfd->reify))
2372
+ if (ecb_expect_true (!anfd->reify))
2110
2373
  fd_event_nocheck (EV_A_ fd, revents);
2111
2374
  }
2112
2375
 
@@ -2124,8 +2387,20 @@ fd_reify (EV_P)
2124
2387
  {
2125
2388
  int i;
2126
2389
 
2390
+ /* most backends do not modify the fdchanges list in backend_modfiy.
2391
+ * except io_uring, which has fixed-size buffers which might force us
2392
+ * to handle events in backend_modify, causing fdchanges to be amended,
2393
+ * which could result in an endless loop.
2394
+ * to avoid this, we do not dynamically handle fds that were added
2395
+ * during fd_reify. that means that for those backends, fdchangecnt
2396
+ * might be non-zero during poll, which must cause them to not block.
2397
+ * to not put too much of a burden on other backends, this detail
2398
+ * needs to be handled in the backend.
2399
+ */
2400
+ int changecnt = fdchangecnt;
2401
+
2127
2402
  #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
2128
- for (i = 0; i < fdchangecnt; ++i)
2403
+ for (i = 0; i < changecnt; ++i)
2129
2404
  {
2130
2405
  int fd = fdchanges [i];
2131
2406
  ANFD *anfd = anfds + fd;
@@ -2149,7 +2424,7 @@ fd_reify (EV_P)
2149
2424
  }
2150
2425
  #endif
2151
2426
 
2152
- for (i = 0; i < fdchangecnt; ++i)
2427
+ for (i = 0; i < changecnt; ++i)
2153
2428
  {
2154
2429
  int fd = fdchanges [i];
2155
2430
  ANFD *anfd = anfds + fd;
@@ -2160,7 +2435,7 @@ fd_reify (EV_P)
2160
2435
 
2161
2436
  anfd->reify = 0;
2162
2437
 
2163
- /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2438
+ /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2164
2439
  {
2165
2440
  anfd->events = 0;
2166
2441
 
@@ -2175,7 +2450,14 @@ fd_reify (EV_P)
2175
2450
  backend_modify (EV_A_ fd, o_events, anfd->events);
2176
2451
  }
2177
2452
 
2178
- fdchangecnt = 0;
2453
+ /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
2454
+ * this is a rare case (see beginning comment in this function), so we copy them to the
2455
+ * front and hope the backend handles this case.
2456
+ */
2457
+ if (ecb_expect_false (fdchangecnt != changecnt))
2458
+ memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
2459
+
2460
+ fdchangecnt -= changecnt;
2179
2461
  }
2180
2462
 
2181
2463
  /* something about the given fd changed */
@@ -2184,9 +2466,9 @@ void
2184
2466
  fd_change (EV_P_ int fd, int flags)
2185
2467
  {
2186
2468
  unsigned char reify = anfds [fd].reify;
2187
- anfds [fd].reify |= flags;
2469
+ anfds [fd].reify = reify | flags;
2188
2470
 
2189
- if (expect_true (!reify))
2471
+ if (ecb_expect_true (!reify))
2190
2472
  {
2191
2473
  ++fdchangecnt;
2192
2474
  array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
@@ -2219,7 +2501,7 @@ fd_valid (int fd)
2219
2501
  }
2220
2502
 
2221
2503
  /* called on EBADF to verify fds */
2222
- noinline ecb_cold
2504
+ ecb_noinline ecb_cold
2223
2505
  static void
2224
2506
  fd_ebadf (EV_P)
2225
2507
  {
@@ -2232,7 +2514,7 @@ fd_ebadf (EV_P)
2232
2514
  }
2233
2515
 
2234
2516
  /* called on ENOMEM in select/poll to kill some fds and retry */
2235
- noinline ecb_cold
2517
+ ecb_noinline ecb_cold
2236
2518
  static void
2237
2519
  fd_enomem (EV_P)
2238
2520
  {
@@ -2247,7 +2529,7 @@ fd_enomem (EV_P)
2247
2529
  }
2248
2530
 
2249
2531
  /* usually called after fork if backend needs to re-arm all fds from scratch */
2250
- noinline
2532
+ ecb_noinline
2251
2533
  static void
2252
2534
  fd_rearm_all (EV_P)
2253
2535
  {
@@ -2311,19 +2593,19 @@ downheap (ANHE *heap, int N, int k)
2311
2593
  ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
2312
2594
 
2313
2595
  /* find minimum child */
2314
- if (expect_true (pos + DHEAP - 1 < E))
2596
+ if (ecb_expect_true (pos + DHEAP - 1 < E))
2315
2597
  {
2316
2598
  /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2317
- if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2318
- if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2319
- if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2599
+ if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2600
+ if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2601
+ if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2320
2602
  }
2321
2603
  else if (pos < E)
2322
2604
  {
2323
2605
  /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2324
- if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2325
- if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2326
- if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2606
+ if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2607
+ if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2608
+ if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2327
2609
  }
2328
2610
  else
2329
2611
  break;
@@ -2341,7 +2623,7 @@ downheap (ANHE *heap, int N, int k)
2341
2623
  ev_active (ANHE_w (he)) = k;
2342
2624
  }
2343
2625
 
2344
- #else /* 4HEAP */
2626
+ #else /* not 4HEAP */
2345
2627
 
2346
2628
  #define HEAP0 1
2347
2629
  #define HPARENT(k) ((k) >> 1)
@@ -2368,7 +2650,7 @@ downheap (ANHE *heap, int N, int k)
2368
2650
 
2369
2651
  heap [k] = heap [c];
2370
2652
  ev_active (ANHE_w (heap [k])) = k;
2371
-
2653
+
2372
2654
  k = c;
2373
2655
  }
2374
2656
 
@@ -2423,7 +2705,7 @@ reheap (ANHE *heap, int N)
2423
2705
 
2424
2706
  /*****************************************************************************/
2425
2707
 
2426
- /* associate signal watchers to a signal signal */
2708
+ /* associate signal watchers to a signal */
2427
2709
  typedef struct
2428
2710
  {
2429
2711
  EV_ATOMIC_T pending;
@@ -2439,7 +2721,7 @@ static ANSIG signals [EV_NSIG - 1];
2439
2721
 
2440
2722
  #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2441
2723
 
2442
- noinline ecb_cold
2724
+ ecb_noinline ecb_cold
2443
2725
  static void
2444
2726
  evpipe_init (EV_P)
2445
2727
  {
@@ -2490,7 +2772,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2490
2772
  {
2491
2773
  ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
2492
2774
 
2493
- if (expect_true (*flag))
2775
+ if (ecb_expect_true (*flag))
2494
2776
  return;
2495
2777
 
2496
2778
  *flag = 1;
@@ -2577,7 +2859,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
2577
2859
  ECB_MEMORY_FENCE;
2578
2860
 
2579
2861
  for (i = EV_NSIG - 1; i--; )
2580
- if (expect_false (signals [i].pending))
2862
+ if (ecb_expect_false (signals [i].pending))
2581
2863
  ev_feed_signal_event (EV_A_ i + 1);
2582
2864
  }
2583
2865
  #endif
@@ -2628,13 +2910,13 @@ ev_sighandler (int signum)
2628
2910
  ev_feed_signal (signum);
2629
2911
  }
2630
2912
 
2631
- noinline
2913
+ ecb_noinline
2632
2914
  void
2633
2915
  ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2634
2916
  {
2635
2917
  WL w;
2636
2918
 
2637
- if (expect_false (signum <= 0 || signum >= EV_NSIG))
2919
+ if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
2638
2920
  return;
2639
2921
 
2640
2922
  --signum;
@@ -2643,7 +2925,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2643
2925
  /* it is permissible to try to feed a signal to the wrong loop */
2644
2926
  /* or, likely more useful, feeding a signal nobody is waiting for */
2645
2927
 
2646
- if (expect_false (signals [signum].loop != EV_A))
2928
+ if (ecb_expect_false (signals [signum].loop != EV_A))
2647
2929
  return;
2648
2930
  #endif
2649
2931
 
@@ -2737,6 +3019,57 @@ childcb (EV_P_ ev_signal *sw, int revents)
2737
3019
 
2738
3020
  /*****************************************************************************/
2739
3021
 
3022
+ #if EV_USE_TIMERFD
3023
+
3024
+ static void periodics_reschedule (EV_P);
3025
+
3026
+ static void
3027
+ timerfdcb (EV_P_ ev_io *iow, int revents)
3028
+ {
3029
+ struct itimerspec its = { 0 };
3030
+
3031
+ its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
3032
+ timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
3033
+
3034
+ ev_rt_now = ev_time ();
3035
+ /* periodics_reschedule only needs ev_rt_now */
3036
+ /* but maybe in the future we want the full treatment. */
3037
+ /*
3038
+ now_floor = EV_TS_CONST (0.);
3039
+ time_update (EV_A_ EV_TSTAMP_HUGE);
3040
+ */
3041
+ #if EV_PERIODIC_ENABLE
3042
+ periodics_reschedule (EV_A);
3043
+ #endif
3044
+ }
3045
+
3046
+ ecb_noinline ecb_cold
3047
+ static void
3048
+ evtimerfd_init (EV_P)
3049
+ {
3050
+ if (!ev_is_active (&timerfd_w))
3051
+ {
3052
+ timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
3053
+
3054
+ if (timerfd >= 0)
3055
+ {
3056
+ fd_intern (timerfd); /* just to be sure */
3057
+
3058
+ ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
3059
+ ev_set_priority (&timerfd_w, EV_MINPRI);
3060
+ ev_io_start (EV_A_ &timerfd_w);
3061
+ ev_unref (EV_A); /* watcher should not keep loop alive */
3062
+
3063
+ /* (re-) arm timer */
3064
+ timerfdcb (EV_A_ 0, 0);
3065
+ }
3066
+ }
3067
+ }
3068
+
3069
+ #endif
3070
+
3071
+ /*****************************************************************************/
3072
+
2740
3073
  #if EV_USE_IOCP
2741
3074
  # include "ev_iocp.c"
2742
3075
  #endif
@@ -2752,6 +3085,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
2752
3085
  #if EV_USE_LINUXAIO
2753
3086
  # include "ev_linuxaio.c"
2754
3087
  #endif
3088
+ #if EV_USE_IOURING
3089
+ # include "ev_iouring.c"
3090
+ #endif
2755
3091
  #if EV_USE_POLL
2756
3092
  # include "ev_poll.c"
2757
3093
  #endif
@@ -2789,13 +3125,14 @@ ev_supported_backends (void) EV_NOEXCEPT
2789
3125
  {
2790
3126
  unsigned int flags = 0;
2791
3127
 
2792
- if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2793
- if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2794
- if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2795
- if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2796
- if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2797
- if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2798
-
3128
+ if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
3129
+ if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
3130
+ if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
3131
+ if (EV_USE_LINUXAIO && ev_linux_version () >= 0x041300) flags |= EVBACKEND_LINUXAIO; /* 4.19+ */
3132
+ if (EV_USE_IOURING && ev_linux_version () >= 0x050601 ) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
3133
+ if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
3134
+ if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
3135
+
2799
3136
  return flags;
2800
3137
  }
2801
3138
 
@@ -2805,24 +3142,29 @@ ev_recommended_backends (void) EV_NOEXCEPT
2805
3142
  {
2806
3143
  unsigned int flags = ev_supported_backends ();
2807
3144
 
2808
- #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_14)
2809
- /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
2810
- #elif defined(__NetBSD__)
2811
- /* kqueue is borked on everything but netbsd apparently */
2812
- /* it usually doesn't work correctly on anything but sockets and pipes */
2813
- #else
3145
+ /* apple has a poor track record but post 10.12.2 it seems to work sufficiently well */
3146
+ #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14)
2814
3147
  /* only select works correctly on that "unix-certified" platform */
2815
3148
  flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
2816
3149
  flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
2817
3150
  #endif
2818
3151
 
3152
+ #if !defined(__NetBSD__) && !defined(__APPLE__)
3153
+ /* kqueue is borked on everything but netbsd and osx >= 10.12.2 apparently */
3154
+ /* it usually doesn't work correctly on anything but sockets and pipes */
3155
+ flags &= ~EVBACKEND_KQUEUE;
3156
+ #endif
3157
+
2819
3158
  #ifdef __FreeBSD__
2820
3159
  flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
2821
3160
  #endif
2822
3161
 
2823
- /* TODO: linuxaio is very experimental */
2824
- #if !EV_RECOMMEND_LINUXAIO
3162
+ #ifdef __linux__
3163
+ /* NOTE: linuxaio is very experimental, never recommend */
2825
3164
  flags &= ~EVBACKEND_LINUXAIO;
3165
+
3166
+ /* NOTE: io_uring is super experimental, never recommend */
3167
+ flags &= ~EVBACKEND_IOURING;
2826
3168
  #endif
2827
3169
 
2828
3170
  return flags;
@@ -2832,12 +3174,14 @@ ecb_cold
2832
3174
  unsigned int
2833
3175
  ev_embeddable_backends (void) EV_NOEXCEPT
2834
3176
  {
2835
- int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
3177
+ int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
2836
3178
 
2837
3179
  /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2838
3180
  if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2839
3181
  flags &= ~EVBACKEND_EPOLL;
2840
3182
 
3183
+ /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
3184
+
2841
3185
  return flags;
2842
3186
  }
2843
3187
 
@@ -2899,7 +3243,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
2899
3243
  #endif
2900
3244
 
2901
3245
  /* initialise a loop structure, must be zero-initialised */
2902
- noinline ecb_cold
3246
+ ecb_noinline ecb_cold
2903
3247
  static void
2904
3248
  loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2905
3249
  {
@@ -2964,6 +3308,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2964
3308
  #if EV_USE_SIGNALFD
2965
3309
  sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2966
3310
  #endif
3311
+ #if EV_USE_TIMERFD
3312
+ timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3313
+ #endif
2967
3314
 
2968
3315
  if (!(flags & EVBACKEND_MASK))
2969
3316
  flags |= ev_recommended_backends ();
@@ -2977,6 +3324,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2977
3324
  #if EV_USE_KQUEUE
2978
3325
  if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
2979
3326
  #endif
3327
+ #if EV_USE_IOURING
3328
+ if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
3329
+ #endif
2980
3330
  #if EV_USE_LINUXAIO
2981
3331
  if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2982
3332
  #endif
@@ -3014,7 +3364,7 @@ ev_loop_destroy (EV_P)
3014
3364
 
3015
3365
  #if EV_CLEANUP_ENABLE
3016
3366
  /* queue cleanup watchers (and execute them) */
3017
- if (expect_false (cleanupcnt))
3367
+ if (ecb_expect_false (cleanupcnt))
3018
3368
  {
3019
3369
  queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
3020
3370
  EV_INVOKE_PENDING;
@@ -3043,6 +3393,11 @@ ev_loop_destroy (EV_P)
3043
3393
  close (sigfd);
3044
3394
  #endif
3045
3395
 
3396
+ #if EV_USE_TIMERFD
3397
+ if (ev_is_active (&timerfd_w))
3398
+ close (timerfd);
3399
+ #endif
3400
+
3046
3401
  #if EV_USE_INOTIFY
3047
3402
  if (fs_fd >= 0)
3048
3403
  close (fs_fd);
@@ -3060,6 +3415,9 @@ ev_loop_destroy (EV_P)
3060
3415
  #if EV_USE_KQUEUE
3061
3416
  if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3062
3417
  #endif
3418
+ #if EV_USE_IOURING
3419
+ if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3420
+ #endif
3063
3421
  #if EV_USE_LINUXAIO
3064
3422
  if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3065
3423
  #endif
@@ -3127,6 +3485,9 @@ loop_fork (EV_P)
3127
3485
  #if EV_USE_KQUEUE
3128
3486
  if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3129
3487
  #endif
3488
+ #if EV_USE_IOURING
3489
+ if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3490
+ #endif
3130
3491
  #if EV_USE_LINUXAIO
3131
3492
  if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3132
3493
  #endif
@@ -3137,22 +3498,44 @@ loop_fork (EV_P)
3137
3498
  infy_fork (EV_A);
3138
3499
  #endif
3139
3500
 
3140
- #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3141
- if (ev_is_active (&pipe_w) && postfork != 2)
3501
+ if (postfork != 2)
3142
3502
  {
3143
- /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3503
+ #if EV_USE_SIGNALFD
3504
+ /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3505
+ #endif
3144
3506
 
3145
- ev_ref (EV_A);
3146
- ev_io_stop (EV_A_ &pipe_w);
3507
+ #if EV_USE_TIMERFD
3508
+ if (ev_is_active (&timerfd_w))
3509
+ {
3510
+ ev_ref (EV_A);
3511
+ ev_io_stop (EV_A_ &timerfd_w);
3512
+
3513
+ close (timerfd);
3514
+ timerfd = -2;
3515
+
3516
+ evtimerfd_init (EV_A);
3517
+ /* reschedule periodics, in case we missed something */
3518
+ ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3519
+ }
3520
+ #endif
3521
+
3522
+ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3523
+ if (ev_is_active (&pipe_w))
3524
+ {
3525
+ /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3147
3526
 
3148
- if (evpipe [0] >= 0)
3149
- EV_WIN32_CLOSE_FD (evpipe [0]);
3527
+ ev_ref (EV_A);
3528
+ ev_io_stop (EV_A_ &pipe_w);
3150
3529
 
3151
- evpipe_init (EV_A);
3152
- /* iterate over everything, in case we missed something before */
3153
- ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3530
+ if (evpipe [0] >= 0)
3531
+ EV_WIN32_CLOSE_FD (evpipe [0]);
3532
+
3533
+ evpipe_init (EV_A);
3534
+ /* iterate over everything, in case we missed something before */
3535
+ ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3536
+ }
3537
+ #endif
3154
3538
  }
3155
- #endif
3156
3539
 
3157
3540
  postfork = 0;
3158
3541
  }
@@ -3178,7 +3561,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
3178
3561
  #endif /* multiplicity */
3179
3562
 
3180
3563
  #if EV_VERIFY
3181
- noinline ecb_cold
3564
+ ecb_noinline ecb_cold
3182
3565
  static void
3183
3566
  verify_watcher (EV_P_ W w)
3184
3567
  {
@@ -3188,7 +3571,7 @@ verify_watcher (EV_P_ W w)
3188
3571
  assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3189
3572
  }
3190
3573
 
3191
- noinline ecb_cold
3574
+ ecb_noinline ecb_cold
3192
3575
  static void
3193
3576
  verify_heap (EV_P_ ANHE *heap, int N)
3194
3577
  {
@@ -3204,7 +3587,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
3204
3587
  }
3205
3588
  }
3206
3589
 
3207
- noinline ecb_cold
3590
+ ecb_noinline ecb_cold
3208
3591
  static void
3209
3592
  array_verify (EV_P_ W *ws, int cnt)
3210
3593
  {
@@ -3363,7 +3746,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
3363
3746
  return count;
3364
3747
  }
3365
3748
 
3366
- noinline
3749
+ ecb_noinline
3367
3750
  void
3368
3751
  ev_invoke_pending (EV_P)
3369
3752
  {
@@ -3392,7 +3775,7 @@ ev_invoke_pending (EV_P)
3392
3775
  inline_size void
3393
3776
  idle_reify (EV_P)
3394
3777
  {
3395
- if (expect_false (idleall))
3778
+ if (ecb_expect_false (idleall))
3396
3779
  {
3397
3780
  int pri;
3398
3781
 
@@ -3432,7 +3815,7 @@ timers_reify (EV_P)
3432
3815
  if (ev_at (w) < mn_now)
3433
3816
  ev_at (w) = mn_now;
3434
3817
 
3435
- assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
3818
+ assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3436
3819
 
3437
3820
  ANHE_at_cache (timers [HEAP0]);
3438
3821
  downheap (timers, timercnt, HEAP0);
@@ -3451,7 +3834,7 @@ timers_reify (EV_P)
3451
3834
 
3452
3835
  #if EV_PERIODIC_ENABLE
3453
3836
 
3454
- noinline
3837
+ ecb_noinline
3455
3838
  static void
3456
3839
  periodic_recalc (EV_P_ ev_periodic *w)
3457
3840
  {
@@ -3464,7 +3847,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
3464
3847
  ev_tstamp nat = at + w->interval;
3465
3848
 
3466
3849
  /* when resolution fails us, we use ev_rt_now */
3467
- if (expect_false (nat == at))
3850
+ if (ecb_expect_false (nat == at))
3468
3851
  {
3469
3852
  at = ev_rt_now;
3470
3853
  break;
@@ -3520,7 +3903,7 @@ periodics_reify (EV_P)
3520
3903
 
3521
3904
  /* simply recalculate all periodics */
3522
3905
  /* TODO: maybe ensure that at least one event happens when jumping forward? */
3523
- noinline ecb_cold
3906
+ ecb_noinline ecb_cold
3524
3907
  static void
3525
3908
  periodics_reschedule (EV_P)
3526
3909
  {
@@ -3544,7 +3927,7 @@ periodics_reschedule (EV_P)
3544
3927
  #endif
3545
3928
 
3546
3929
  /* adjust all timers by a given offset */
3547
- noinline ecb_cold
3930
+ ecb_noinline ecb_cold
3548
3931
  static void
3549
3932
  timers_reschedule (EV_P_ ev_tstamp adjust)
3550
3933
  {
@@ -3564,7 +3947,7 @@ inline_speed void
3564
3947
  time_update (EV_P_ ev_tstamp max_block)
3565
3948
  {
3566
3949
  #if EV_USE_MONOTONIC
3567
- if (expect_true (have_monotonic))
3950
+ if (ecb_expect_true (have_monotonic))
3568
3951
  {
3569
3952
  int i;
3570
3953
  ev_tstamp odiff = rtmn_diff;
@@ -3573,7 +3956,7 @@ time_update (EV_P_ ev_tstamp max_block)
3573
3956
 
3574
3957
  /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3575
3958
  /* interpolate in the meantime */
3576
- if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
3959
+ if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3577
3960
  {
3578
3961
  ev_rt_now = rtmn_diff + mn_now;
3579
3962
  return;
@@ -3597,7 +3980,7 @@ time_update (EV_P_ ev_tstamp max_block)
3597
3980
 
3598
3981
  diff = odiff - rtmn_diff;
3599
3982
 
3600
- if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
3983
+ if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3601
3984
  return; /* all is well */
3602
3985
 
3603
3986
  ev_rt_now = ev_time ();
@@ -3616,7 +3999,7 @@ time_update (EV_P_ ev_tstamp max_block)
3616
3999
  {
3617
4000
  ev_rt_now = ev_time ();
3618
4001
 
3619
- if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
4002
+ if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3620
4003
  {
3621
4004
  /* adjust timers. this is easy, as the offset is the same for all of them */
3622
4005
  timers_reschedule (EV_A_ ev_rt_now - mn_now);
@@ -3636,11 +4019,13 @@ struct ev_poll_args {
3636
4019
  };
3637
4020
 
3638
4021
  static
3639
- VALUE ev_backend_poll(void *ptr)
4022
+ void * ev_backend_poll(void *ptr)
3640
4023
  {
3641
4024
  struct ev_poll_args *args = (struct ev_poll_args *)ptr;
3642
4025
  struct ev_loop *loop = args->loop;
3643
4026
  backend_poll (EV_A_ args->waittime);
4027
+
4028
+ return NULL;
3644
4029
  }
3645
4030
  /* ######################################## */
3646
4031
 
@@ -3668,8 +4053,8 @@ ev_run (EV_P_ int flags)
3668
4053
  #endif
3669
4054
 
3670
4055
  #ifndef _WIN32
3671
- if (expect_false (curpid)) /* penalise the forking check even more */
3672
- if (expect_false (getpid () != curpid))
4056
+ if (ecb_expect_false (curpid)) /* penalise the forking check even more */
4057
+ if (ecb_expect_false (getpid () != curpid))
3673
4058
  {
3674
4059
  curpid = getpid ();
3675
4060
  postfork = 1;
@@ -3678,7 +4063,7 @@ ev_run (EV_P_ int flags)
3678
4063
 
3679
4064
  #if EV_FORK_ENABLE
3680
4065
  /* we might have forked, so queue fork handlers */
3681
- if (expect_false (postfork))
4066
+ if (ecb_expect_false (postfork))
3682
4067
  if (forkcnt)
3683
4068
  {
3684
4069
  queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
@@ -3688,18 +4073,18 @@ ev_run (EV_P_ int flags)
3688
4073
 
3689
4074
  #if EV_PREPARE_ENABLE
3690
4075
  /* queue prepare watchers (and execute them) */
3691
- if (expect_false (preparecnt))
4076
+ if (ecb_expect_false (preparecnt))
3692
4077
  {
3693
4078
  queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
3694
4079
  EV_INVOKE_PENDING;
3695
4080
  }
3696
4081
  #endif
3697
4082
 
3698
- if (expect_false (loop_done))
4083
+ if (ecb_expect_false (loop_done))
3699
4084
  break;
3700
4085
 
3701
4086
  /* we might have forked, so reify kernel state if necessary */
3702
- if (expect_false (postfork))
4087
+ if (ecb_expect_false (postfork))
3703
4088
  loop_fork (EV_A);
3704
4089
 
3705
4090
  /* update fd-related kernel structures */
@@ -3714,16 +4099,28 @@ ev_run (EV_P_ int flags)
3714
4099
  ev_tstamp prev_mn_now = mn_now;
3715
4100
 
3716
4101
  /* update time to cancel out callback processing overhead */
3717
- time_update (EV_A_ 1e100);
4102
+ time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3718
4103
 
3719
4104
  /* from now on, we want a pipe-wake-up */
3720
4105
  pipe_write_wanted = 1;
3721
4106
 
3722
4107
  ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3723
4108
 
3724
- if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
4109
+ if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3725
4110
  {
3726
- waittime = MAX_BLOCKTIME;
4111
+ waittime = EV_TS_CONST (MAX_BLOCKTIME);
4112
+
4113
+ #if EV_USE_TIMERFD
4114
+ /* sleep a lot longer when we can reliably detect timejumps */
4115
+ if (ecb_expect_true (timerfd >= 0))
4116
+ waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4117
+ #endif
4118
+ #if !EV_PERIODIC_ENABLE
4119
+ /* without periodics but with monotonic clock there is no need */
4120
+ /* for any time jump detection, so sleep longer */
4121
+ if (ecb_expect_true (have_monotonic))
4122
+ waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4123
+ #endif
3727
4124
 
3728
4125
  if (timercnt)
3729
4126
  {
@@ -3740,23 +4137,28 @@ ev_run (EV_P_ int flags)
3740
4137
  #endif
3741
4138
 
3742
4139
  /* don't let timeouts decrease the waittime below timeout_blocktime */
3743
- if (expect_false (waittime < timeout_blocktime))
4140
+ if (ecb_expect_false (waittime < timeout_blocktime))
3744
4141
  waittime = timeout_blocktime;
3745
4142
 
3746
- /* at this point, we NEED to wait, so we have to ensure */
3747
- /* to pass a minimum nonzero value to the backend */
3748
- if (expect_false (waittime < backend_mintime))
3749
- waittime = backend_mintime;
4143
+ /* now there are two more special cases left, either we have
4144
+ * already-expired timers, so we should not sleep, or we have timers
4145
+ * that expire very soon, in which case we need to wait for a minimum
4146
+ * amount of time for some event loop backends.
4147
+ */
4148
+ if (ecb_expect_false (waittime < backend_mintime))
4149
+ waittime = waittime <= EV_TS_CONST (0.)
4150
+ ? EV_TS_CONST (0.)
4151
+ : backend_mintime;
3750
4152
 
3751
4153
  /* extra check because io_blocktime is commonly 0 */
3752
- if (expect_false (io_blocktime))
4154
+ if (ecb_expect_false (io_blocktime))
3753
4155
  {
3754
4156
  sleeptime = io_blocktime - (mn_now - prev_mn_now);
3755
4157
 
3756
4158
  if (sleeptime > waittime - backend_mintime)
3757
4159
  sleeptime = waittime - backend_mintime;
3758
4160
 
3759
- if (expect_true (sleeptime > 0.))
4161
+ if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3760
4162
  {
3761
4163
  ev_sleep (sleeptime);
3762
4164
  waittime -= sleeptime;
@@ -3827,7 +4229,6 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3827
4229
  ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3828
4230
  }
3829
4231
 
3830
-
3831
4232
  /* update ev_rt_now, do magic */
3832
4233
  time_update (EV_A_ waittime + sleeptime);
3833
4234
  }
@@ -3845,13 +4246,13 @@ rb_thread_unsafe_dangerous_crazy_blocking_region_end(...);
3845
4246
 
3846
4247
  #if EV_CHECK_ENABLE
3847
4248
  /* queue check watchers, to be executed first */
3848
- if (expect_false (checkcnt))
4249
+ if (ecb_expect_false (checkcnt))
3849
4250
  queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
3850
4251
  #endif
3851
4252
 
3852
4253
  EV_INVOKE_PENDING;
3853
4254
  }
3854
- while (expect_true (
4255
+ while (ecb_expect_true (
3855
4256
  activecnt
3856
4257
  && !loop_done
3857
4258
  && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
@@ -3888,7 +4289,7 @@ ev_unref (EV_P) EV_NOEXCEPT
3888
4289
  void
3889
4290
  ev_now_update (EV_P) EV_NOEXCEPT
3890
4291
  {
3891
- time_update (EV_A_ 1e100);
4292
+ time_update (EV_A_ EV_TSTAMP_HUGE);
3892
4293
  }
3893
4294
 
3894
4295
  void
@@ -3925,7 +4326,7 @@ wlist_del (WL *head, WL elem)
3925
4326
  {
3926
4327
  while (*head)
3927
4328
  {
3928
- if (expect_true (*head == elem))
4329
+ if (ecb_expect_true (*head == elem))
3929
4330
  {
3930
4331
  *head = elem->next;
3931
4332
  break;
@@ -3952,7 +4353,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3952
4353
  W w_ = (W)w;
3953
4354
  int pending = w_->pending;
3954
4355
 
3955
- if (expect_true (pending))
4356
+ if (ecb_expect_true (pending))
3956
4357
  {
3957
4358
  ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
3958
4359
  p->w = (W)&pending_w;
@@ -3989,13 +4390,13 @@ ev_stop (EV_P_ W w)
3989
4390
 
3990
4391
  /*****************************************************************************/
3991
4392
 
3992
- noinline
4393
+ ecb_noinline
3993
4394
  void
3994
4395
  ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3995
4396
  {
3996
4397
  int fd = w->fd;
3997
4398
 
3998
- if (expect_false (ev_is_active (w)))
4399
+ if (ecb_expect_false (ev_is_active (w)))
3999
4400
  return;
4000
4401
 
4001
4402
  assert (("libev: ev_io_start called with negative fd", fd >= 0));
@@ -4019,12 +4420,12 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
4019
4420
  EV_FREQUENT_CHECK;
4020
4421
  }
4021
4422
 
4022
- noinline
4423
+ ecb_noinline
4023
4424
  void
4024
4425
  ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
4025
4426
  {
4026
4427
  clear_pending (EV_A_ (W)w);
4027
- if (expect_false (!ev_is_active (w)))
4428
+ if (ecb_expect_false (!ev_is_active (w)))
4028
4429
  return;
4029
4430
 
4030
4431
  assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
@@ -4042,11 +4443,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
4042
4443
  EV_FREQUENT_CHECK;
4043
4444
  }
4044
4445
 
4045
- noinline
4446
+ ecb_noinline
4046
4447
  void
4047
4448
  ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
4048
4449
  {
4049
- if (expect_false (ev_is_active (w)))
4450
+ if (ecb_expect_false (ev_is_active (w)))
4050
4451
  return;
4051
4452
 
4052
4453
  ev_at (w) += mn_now;
@@ -4067,12 +4468,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
4067
4468
  /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
4068
4469
  }
4069
4470
 
4070
- noinline
4471
+ ecb_noinline
4071
4472
  void
4072
4473
  ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4073
4474
  {
4074
4475
  clear_pending (EV_A_ (W)w);
4075
- if (expect_false (!ev_is_active (w)))
4476
+ if (ecb_expect_false (!ev_is_active (w)))
4076
4477
  return;
4077
4478
 
4078
4479
  EV_FREQUENT_CHECK;
@@ -4084,7 +4485,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4084
4485
 
4085
4486
  --timercnt;
4086
4487
 
4087
- if (expect_true (active < timercnt + HEAP0))
4488
+ if (ecb_expect_true (active < timercnt + HEAP0))
4088
4489
  {
4089
4490
  timers [active] = timers [timercnt + HEAP0];
4090
4491
  adjustheap (timers, timercnt, active);
@@ -4098,7 +4499,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
4098
4499
  EV_FREQUENT_CHECK;
4099
4500
  }
4100
4501
 
4101
- noinline
4502
+ ecb_noinline
4102
4503
  void
4103
4504
  ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4104
4505
  {
@@ -4129,17 +4530,22 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4129
4530
  ev_tstamp
4130
4531
  ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4131
4532
  {
4132
- return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
4533
+ return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4133
4534
  }
4134
4535
 
4135
4536
  #if EV_PERIODIC_ENABLE
4136
- noinline
4537
+ ecb_noinline
4137
4538
  void
4138
4539
  ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4139
4540
  {
4140
- if (expect_false (ev_is_active (w)))
4541
+ if (ecb_expect_false (ev_is_active (w)))
4141
4542
  return;
4142
4543
 
4544
+ #if EV_USE_TIMERFD
4545
+ if (timerfd == -2)
4546
+ evtimerfd_init (EV_A);
4547
+ #endif
4548
+
4143
4549
  if (w->reschedule_cb)
4144
4550
  ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4145
4551
  else if (w->interval)
@@ -4164,12 +4570,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4164
4570
  /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4165
4571
  }
4166
4572
 
4167
- noinline
4573
+ ecb_noinline
4168
4574
  void
4169
4575
  ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4170
4576
  {
4171
4577
  clear_pending (EV_A_ (W)w);
4172
- if (expect_false (!ev_is_active (w)))
4578
+ if (ecb_expect_false (!ev_is_active (w)))
4173
4579
  return;
4174
4580
 
4175
4581
  EV_FREQUENT_CHECK;
@@ -4181,7 +4587,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4181
4587
 
4182
4588
  --periodiccnt;
4183
4589
 
4184
- if (expect_true (active < periodiccnt + HEAP0))
4590
+ if (ecb_expect_true (active < periodiccnt + HEAP0))
4185
4591
  {
4186
4592
  periodics [active] = periodics [periodiccnt + HEAP0];
4187
4593
  adjustheap (periodics, periodiccnt, active);
@@ -4193,7 +4599,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4193
4599
  EV_FREQUENT_CHECK;
4194
4600
  }
4195
4601
 
4196
- noinline
4602
+ ecb_noinline
4197
4603
  void
4198
4604
  ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4199
4605
  {
@@ -4209,11 +4615,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4209
4615
 
4210
4616
  #if EV_SIGNAL_ENABLE
4211
4617
 
4212
- noinline
4618
+ ecb_noinline
4213
4619
  void
4214
4620
  ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4215
4621
  {
4216
- if (expect_false (ev_is_active (w)))
4622
+ if (ecb_expect_false (ev_is_active (w)))
4217
4623
  return;
4218
4624
 
4219
4625
  assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
@@ -4292,12 +4698,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4292
4698
  EV_FREQUENT_CHECK;
4293
4699
  }
4294
4700
 
4295
- noinline
4701
+ ecb_noinline
4296
4702
  void
4297
4703
  ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4298
4704
  {
4299
4705
  clear_pending (EV_A_ (W)w);
4300
- if (expect_false (!ev_is_active (w)))
4706
+ if (ecb_expect_false (!ev_is_active (w)))
4301
4707
  return;
4302
4708
 
4303
4709
  EV_FREQUENT_CHECK;
@@ -4340,7 +4746,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4340
4746
  #if EV_MULTIPLICITY
4341
4747
  assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
4342
4748
  #endif
4343
- if (expect_false (ev_is_active (w)))
4749
+ if (ecb_expect_false (ev_is_active (w)))
4344
4750
  return;
4345
4751
 
4346
4752
  EV_FREQUENT_CHECK;
@@ -4355,7 +4761,7 @@ void
4355
4761
  ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4356
4762
  {
4357
4763
  clear_pending (EV_A_ (W)w);
4358
- if (expect_false (!ev_is_active (w)))
4764
+ if (ecb_expect_false (!ev_is_active (w)))
4359
4765
  return;
4360
4766
 
4361
4767
  EV_FREQUENT_CHECK;
@@ -4379,14 +4785,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4379
4785
  #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4380
4786
  #define MIN_STAT_INTERVAL 0.1074891
4381
4787
 
4382
- noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4788
+ ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4383
4789
 
4384
4790
  #if EV_USE_INOTIFY
4385
4791
 
4386
4792
  /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4387
4793
  # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4388
4794
 
4389
- noinline
4795
+ ecb_noinline
4390
4796
  static void
4391
4797
  infy_add (EV_P_ ev_stat *w)
4392
4798
  {
@@ -4461,7 +4867,7 @@ infy_add (EV_P_ ev_stat *w)
4461
4867
  if (ev_is_active (&w->timer)) ev_unref (EV_A);
4462
4868
  }
4463
4869
 
4464
- noinline
4870
+ ecb_noinline
4465
4871
  static void
4466
4872
  infy_del (EV_P_ ev_stat *w)
4467
4873
  {
@@ -4479,7 +4885,7 @@ infy_del (EV_P_ ev_stat *w)
4479
4885
  inotify_rm_watch (fs_fd, wd);
4480
4886
  }
4481
4887
 
4482
- noinline
4888
+ ecb_noinline
4483
4889
  static void
4484
4890
  infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4485
4891
  {
@@ -4635,7 +5041,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
4635
5041
  w->attr.st_nlink = 1;
4636
5042
  }
4637
5043
 
4638
- noinline
5044
+ ecb_noinline
4639
5045
  static void
4640
5046
  stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4641
5047
  {
@@ -4679,7 +5085,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4679
5085
  void
4680
5086
  ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4681
5087
  {
4682
- if (expect_false (ev_is_active (w)))
5088
+ if (ecb_expect_false (ev_is_active (w)))
4683
5089
  return;
4684
5090
 
4685
5091
  ev_stat_stat (EV_A_ w);
@@ -4711,7 +5117,7 @@ void
4711
5117
  ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4712
5118
  {
4713
5119
  clear_pending (EV_A_ (W)w);
4714
- if (expect_false (!ev_is_active (w)))
5120
+ if (ecb_expect_false (!ev_is_active (w)))
4715
5121
  return;
4716
5122
 
4717
5123
  EV_FREQUENT_CHECK;
@@ -4736,7 +5142,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4736
5142
  void
4737
5143
  ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4738
5144
  {
4739
- if (expect_false (ev_is_active (w)))
5145
+ if (ecb_expect_false (ev_is_active (w)))
4740
5146
  return;
4741
5147
 
4742
5148
  pri_adjust (EV_A_ (W)w);
@@ -4760,7 +5166,7 @@ void
4760
5166
  ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4761
5167
  {
4762
5168
  clear_pending (EV_A_ (W)w);
4763
- if (expect_false (!ev_is_active (w)))
5169
+ if (ecb_expect_false (!ev_is_active (w)))
4764
5170
  return;
4765
5171
 
4766
5172
  EV_FREQUENT_CHECK;
@@ -4783,7 +5189,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4783
5189
  void
4784
5190
  ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4785
5191
  {
4786
- if (expect_false (ev_is_active (w)))
5192
+ if (ecb_expect_false (ev_is_active (w)))
4787
5193
  return;
4788
5194
 
4789
5195
  EV_FREQUENT_CHECK;
@@ -4799,7 +5205,7 @@ void
4799
5205
  ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4800
5206
  {
4801
5207
  clear_pending (EV_A_ (W)w);
4802
- if (expect_false (!ev_is_active (w)))
5208
+ if (ecb_expect_false (!ev_is_active (w)))
4803
5209
  return;
4804
5210
 
4805
5211
  EV_FREQUENT_CHECK;
@@ -4821,7 +5227,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4821
5227
  void
4822
5228
  ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4823
5229
  {
4824
- if (expect_false (ev_is_active (w)))
5230
+ if (ecb_expect_false (ev_is_active (w)))
4825
5231
  return;
4826
5232
 
4827
5233
  EV_FREQUENT_CHECK;
@@ -4837,7 +5243,7 @@ void
4837
5243
  ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4838
5244
  {
4839
5245
  clear_pending (EV_A_ (W)w);
4840
- if (expect_false (!ev_is_active (w)))
5246
+ if (ecb_expect_false (!ev_is_active (w)))
4841
5247
  return;
4842
5248
 
4843
5249
  EV_FREQUENT_CHECK;
@@ -4856,7 +5262,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4856
5262
  #endif
4857
5263
 
4858
5264
  #if EV_EMBED_ENABLE
4859
- noinline
5265
+ ecb_noinline
4860
5266
  void
4861
5267
  ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4862
5268
  {
@@ -4890,6 +5296,7 @@ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
4890
5296
  }
4891
5297
  }
4892
5298
 
5299
+ #if EV_FORK_ENABLE
4893
5300
  static void
4894
5301
  embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4895
5302
  {
@@ -4906,6 +5313,7 @@ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4906
5313
 
4907
5314
  ev_embed_start (EV_A_ w);
4908
5315
  }
5316
+ #endif
4909
5317
 
4910
5318
  #if 0
4911
5319
  static void
@@ -4918,7 +5326,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4918
5326
  void
4919
5327
  ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4920
5328
  {
4921
- if (expect_false (ev_is_active (w)))
5329
+ if (ecb_expect_false (ev_is_active (w)))
4922
5330
  return;
4923
5331
 
4924
5332
  {
@@ -4936,8 +5344,10 @@ ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4936
5344
  ev_set_priority (&w->prepare, EV_MINPRI);
4937
5345
  ev_prepare_start (EV_A_ &w->prepare);
4938
5346
 
5347
+ #if EV_FORK_ENABLE
4939
5348
  ev_fork_init (&w->fork, embed_fork_cb);
4940
5349
  ev_fork_start (EV_A_ &w->fork);
5350
+ #endif
4941
5351
 
4942
5352
  /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
4943
5353
 
@@ -4950,14 +5360,16 @@ void
4950
5360
  ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4951
5361
  {
4952
5362
  clear_pending (EV_A_ (W)w);
4953
- if (expect_false (!ev_is_active (w)))
5363
+ if (ecb_expect_false (!ev_is_active (w)))
4954
5364
  return;
4955
5365
 
4956
5366
  EV_FREQUENT_CHECK;
4957
5367
 
4958
5368
  ev_io_stop (EV_A_ &w->io);
4959
5369
  ev_prepare_stop (EV_A_ &w->prepare);
5370
+ #if EV_FORK_ENABLE
4960
5371
  ev_fork_stop (EV_A_ &w->fork);
5372
+ #endif
4961
5373
 
4962
5374
  ev_stop (EV_A_ (W)w);
4963
5375
 
@@ -4969,7 +5381,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4969
5381
  void
4970
5382
  ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4971
5383
  {
4972
- if (expect_false (ev_is_active (w)))
5384
+ if (ecb_expect_false (ev_is_active (w)))
4973
5385
  return;
4974
5386
 
4975
5387
  EV_FREQUENT_CHECK;
@@ -4985,7 +5397,7 @@ void
4985
5397
  ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4986
5398
  {
4987
5399
  clear_pending (EV_A_ (W)w);
4988
- if (expect_false (!ev_is_active (w)))
5400
+ if (ecb_expect_false (!ev_is_active (w)))
4989
5401
  return;
4990
5402
 
4991
5403
  EV_FREQUENT_CHECK;
@@ -5007,7 +5419,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
5007
5419
  void
5008
5420
  ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5009
5421
  {
5010
- if (expect_false (ev_is_active (w)))
5422
+ if (ecb_expect_false (ev_is_active (w)))
5011
5423
  return;
5012
5424
 
5013
5425
  EV_FREQUENT_CHECK;
@@ -5025,7 +5437,7 @@ void
5025
5437
  ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5026
5438
  {
5027
5439
  clear_pending (EV_A_ (W)w);
5028
- if (expect_false (!ev_is_active (w)))
5440
+ if (ecb_expect_false (!ev_is_active (w)))
5029
5441
  return;
5030
5442
 
5031
5443
  EV_FREQUENT_CHECK;
@@ -5048,7 +5460,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
5048
5460
  void
5049
5461
  ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
5050
5462
  {
5051
- if (expect_false (ev_is_active (w)))
5463
+ if (ecb_expect_false (ev_is_active (w)))
5052
5464
  return;
5053
5465
 
5054
5466
  w->sent = 0;
@@ -5068,7 +5480,7 @@ void
5068
5480
  ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
5069
5481
  {
5070
5482
  clear_pending (EV_A_ (W)w);
5071
- if (expect_false (!ev_is_active (w)))
5483
+ if (ecb_expect_false (!ev_is_active (w)))
5072
5484
  return;
5073
5485
 
5074
5486
  EV_FREQUENT_CHECK;
@@ -5275,4 +5687,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_NOEXCEPT
5275
5687
  #if EV_MULTIPLICITY
5276
5688
  #include "ev_wrap.h"
5277
5689
  #endif
5278
-