cool.io 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. data/.gitignore +25 -0
  2. data/CHANGES +199 -0
  3. data/LICENSE +20 -0
  4. data/README.markdown +4 -0
  5. data/Rakefile +98 -0
  6. data/VERSION +1 -0
  7. data/examples/echo_client.rb +38 -0
  8. data/examples/echo_server.rb +27 -0
  9. data/examples/google.rb +9 -0
  10. data/examples/httpclient.rb +38 -0
  11. data/ext/cool.io/.gitignore +5 -0
  12. data/ext/cool.io/cool.io.h +58 -0
  13. data/ext/cool.io/cool.io_ext.c +25 -0
  14. data/ext/cool.io/ev_wrap.h +8 -0
  15. data/ext/cool.io/extconf.rb +69 -0
  16. data/ext/cool.io/iowatcher.c +189 -0
  17. data/ext/cool.io/libev.c +8 -0
  18. data/ext/cool.io/loop.c +303 -0
  19. data/ext/cool.io/stat_watcher.c +191 -0
  20. data/ext/cool.io/timer_watcher.c +219 -0
  21. data/ext/cool.io/utils.c +122 -0
  22. data/ext/cool.io/watcher.c +264 -0
  23. data/ext/cool.io/watcher.h +71 -0
  24. data/ext/http11_client/.gitignore +5 -0
  25. data/ext/http11_client/ext_help.h +14 -0
  26. data/ext/http11_client/extconf.rb +6 -0
  27. data/ext/http11_client/http11_client.c +300 -0
  28. data/ext/http11_client/http11_parser.c +403 -0
  29. data/ext/http11_client/http11_parser.h +48 -0
  30. data/ext/http11_client/http11_parser.rl +173 -0
  31. data/ext/libev/Changes +364 -0
  32. data/ext/libev/LICENSE +36 -0
  33. data/ext/libev/README +58 -0
  34. data/ext/libev/README.embed +3 -0
  35. data/ext/libev/ev.c +3867 -0
  36. data/ext/libev/ev.h +826 -0
  37. data/ext/libev/ev_epoll.c +234 -0
  38. data/ext/libev/ev_kqueue.c +198 -0
  39. data/ext/libev/ev_poll.c +148 -0
  40. data/ext/libev/ev_port.c +164 -0
  41. data/ext/libev/ev_select.c +307 -0
  42. data/ext/libev/ev_vars.h +197 -0
  43. data/ext/libev/ev_win32.c +153 -0
  44. data/ext/libev/ev_wrap.h +186 -0
  45. data/ext/libev/test_libev_win32.c +123 -0
  46. data/ext/libev/update_ev_wrap +19 -0
  47. data/lib/.gitignore +2 -0
  48. data/lib/cool.io.rb +30 -0
  49. data/lib/cool.io/async_watcher.rb +43 -0
  50. data/lib/cool.io/dns_resolver.rb +220 -0
  51. data/lib/cool.io/eventmachine.rb +234 -0
  52. data/lib/cool.io/http_client.rb +419 -0
  53. data/lib/cool.io/io.rb +174 -0
  54. data/lib/cool.io/iowatcher.rb +17 -0
  55. data/lib/cool.io/listener.rb +93 -0
  56. data/lib/cool.io/loop.rb +130 -0
  57. data/lib/cool.io/meta.rb +49 -0
  58. data/lib/cool.io/server.rb +74 -0
  59. data/lib/cool.io/socket.rb +224 -0
  60. data/lib/cool.io/timer_watcher.rb +17 -0
  61. data/lib/coolio.rb +2 -0
  62. data/lib/rev.rb +4 -0
  63. data/spec/async_watcher_spec.rb +57 -0
  64. data/spec/possible_tests/schedules_other_threads.rb +48 -0
  65. data/spec/possible_tests/test_on_resolve_failed.rb +9 -0
  66. data/spec/possible_tests/test_resolves.rb +27 -0
  67. data/spec/possible_tests/test_write_during_resolve.rb +27 -0
  68. data/spec/possible_tests/works_straight.rb +71 -0
  69. data/spec/spec_helper.rb +5 -0
  70. data/spec/timer_watcher_spec.rb +55 -0
  71. data/spec/unix_listener_spec.rb +25 -0
  72. data/spec/unix_server_spec.rb +25 -0
  73. metadata +184 -0
data/ext/libev/LICENSE ADDED
@@ -0,0 +1,36 @@
1
+ All files in libev are Copyright (C)2007,2008,2009 Marc Alexander Lehmann.
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ * Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above
11
+ copyright notice, this list of conditions and the following
12
+ disclaimer in the documentation and/or other materials provided
13
+ with the distribution.
14
+
15
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
+
27
+ Alternatively, the contents of this package may be used under the terms
28
+ of the GNU General Public License ("GPL") version 2 or any later version,
29
+ in which case the provisions of the GPL are applicable instead of the
30
+ above. If you wish to allow the use of your version of this package only
31
+ under the terms of the GPL and not to allow others to use your version of
32
+ this file under the BSD license, indicate your decision by deleting the
33
+ provisions above and replace them with the notice and other provisions
34
+ required by the GPL in this and the other files of this package. If you do
35
+ not delete the provisions above, a recipient may use your version of this
36
+ file under either the BSD or the GPL.
data/ext/libev/README ADDED
@@ -0,0 +1,58 @@
1
+ libev is a high-performance event loop/event model with lots of features.
2
+ (see benchmark at http://libev.schmorp.de/bench.html)
3
+
4
+
5
+ ABOUT
6
+
7
+ Homepage: http://software.schmorp.de/pkg/libev
8
+ Mailinglist: libev@lists.schmorp.de
9
+ http://lists.schmorp.de/cgi-bin/mailman/listinfo/libev
10
+ Library Documentation: http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod
11
+
12
+ Libev is modelled (very losely) after libevent and the Event perl
13
+ module, but is faster, scales better and is more correct, and also more
14
+ featureful. And also smaller. Yay.
15
+
16
+ Some of the specialties of libev not commonly found elsewhere are:
17
+
18
+ - extensive and detailed, readable documentation (not doxygen garbage).
19
+ - fully supports fork, can detect fork in various ways and automatically
20
+ re-arms kernel mechanisms that do not support fork.
21
+ - highly optimised select, poll, epoll, kqueue and event ports backends.
22
+ - filesystem object (path) watching (with optional linux inotify support).
23
+ - wallclock-based times (using absolute time, cron-like).
24
+ - relative timers/timeouts (handle time jumps).
25
+ - fast intra-thread communication between multiple
26
+ event loops (with optional fast linux eventfd backend).
27
+ - extremely easy to embed.
28
+ - very small codebase, no bloated library.
29
+ - fully extensible by being able to plug into the event loop,
30
+ integrate other event loops, integrate other event loop users.
31
+ - very little memory use (small watchers, small event loop data).
32
+ - optional C++ interface allowing method and function callbacks
33
+ at no extra memory or runtime overhead.
34
+ - optional Perl interface with similar characteristics (capable
35
+ of running Glib/Gtk2 on libev, interfaces with Net::SNMP and
36
+ libadns).
37
+ - support for other languages (multiple C++ interfaces, D, Ruby,
38
+ Python) available from third-parties.
39
+
40
+ Examples of programs that embed libev: the EV perl module,
41
+ rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the Deliantra MMORPG
42
+ server (http://www.deliantra.net/), Rubinius (a next-generation Ruby
43
+ VM), the Ebb web server, the Rev event toolkit.
44
+
45
+
46
+ CONTRIBUTORS
47
+
48
+ libev was written and designed by Marc Lehmann and Emanuele Giaquinta.
49
+
50
+ The following people sent in patches or made other noteworthy
51
+ contributions to the design (for minor patches, see the Changes
52
+ file. If I forgot to include you, please shout at me, it was an
53
+ accident):
54
+
55
+ W.C.A. Wijngaards
56
+ Christopher Layne
57
+ Chris Brody
58
+
@@ -0,0 +1,3 @@
1
+ This file is now included in the main libev documentation, see
2
+
3
+ http://cvs.schmorp.de/libev/ev.html
data/ext/libev/ev.c ADDED
@@ -0,0 +1,3867 @@
1
+ /*
2
+ * libev event processing core, watcher management
3
+ *
4
+ * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without modifica-
8
+ * tion, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright
14
+ * notice, this list of conditions and the following disclaimer in the
15
+ * documentation and/or other materials provided with the distribution.
16
+ *
17
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ *
28
+ * Alternatively, the contents of this file may be used under the terms of
29
+ * the GNU General Public License ("GPL") version 2 or any later version,
30
+ * in which case the provisions of the GPL are applicable instead of
31
+ * the above. If you wish to allow the use of your version of this file
32
+ * only under the terms of the GPL and not to allow others to use your
33
+ * version of this file under the BSD license, indicate your decision
34
+ * by deleting the provisions above and replace them with the notice
35
+ * and other provisions required by the GPL. If you do not delete the
36
+ * provisions above, a recipient may use your version of this file under
37
+ * either the BSD or the GPL.
38
+ */
39
+
40
+ /* this big block deduces configuration from config.h */
41
+ #ifndef EV_STANDALONE
42
+ # ifdef EV_CONFIG_H
43
+ # include EV_CONFIG_H
44
+ # else
45
+ # include "config.h"
46
+ # endif
47
+
48
+ # if HAVE_CLOCK_SYSCALL
49
+ # ifndef EV_USE_CLOCK_SYSCALL
50
+ # define EV_USE_CLOCK_SYSCALL 1
51
+ # ifndef EV_USE_REALTIME
52
+ # define EV_USE_REALTIME 0
53
+ # endif
54
+ # ifndef EV_USE_MONOTONIC
55
+ # define EV_USE_MONOTONIC 1
56
+ # endif
57
+ # endif
58
+ # elif !defined(EV_USE_CLOCK_SYSCALL)
59
+ # define EV_USE_CLOCK_SYSCALL 0
60
+ # endif
61
+
62
+ # if HAVE_CLOCK_GETTIME
63
+ # ifndef EV_USE_MONOTONIC
64
+ # define EV_USE_MONOTONIC 1
65
+ # endif
66
+ # ifndef EV_USE_REALTIME
67
+ # define EV_USE_REALTIME 0
68
+ # endif
69
+ # else
70
+ # ifndef EV_USE_MONOTONIC
71
+ # define EV_USE_MONOTONIC 0
72
+ # endif
73
+ # ifndef EV_USE_REALTIME
74
+ # define EV_USE_REALTIME 0
75
+ # endif
76
+ # endif
77
+
78
+ # if HAVE_NANOSLEEP
79
+ # ifndef EV_USE_NANOSLEEP
80
+ # define EV_USE_NANOSLEEP EV_FEATURE_OS
81
+ # endif
82
+ # else
83
+ # undef EV_USE_NANOSLEEP
84
+ # define EV_USE_NANOSLEEP 0
85
+ # endif
86
+
87
+ # if HAVE_SELECT && HAVE_SYS_SELECT_H
88
+ # ifndef EV_USE_SELECT
89
+ # define EV_USE_SELECT EV_FEATURE_BACKENDS
90
+ # endif
91
+ # else
92
+ # undef EV_USE_SELECT
93
+ # define EV_USE_SELECT 0
94
+ # endif
95
+
96
+ # if HAVE_POLL && HAVE_POLL_H
97
+ # ifndef EV_USE_POLL
98
+ # define EV_USE_POLL EV_FEATURE_BACKENDS
99
+ # endif
100
+ # else
101
+ # undef EV_USE_POLL
102
+ # define EV_USE_POLL 0
103
+ # endif
104
+
105
+ # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
106
+ # ifndef EV_USE_EPOLL
107
+ # define EV_USE_EPOLL EV_FEATURE_BACKENDS
108
+ # endif
109
+ # else
110
+ # undef EV_USE_EPOLL
111
+ # define EV_USE_EPOLL 0
112
+ # endif
113
+
114
+ # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
115
+ # ifndef EV_USE_KQUEUE
116
+ # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
117
+ # endif
118
+ # else
119
+ # undef EV_USE_KQUEUE
120
+ # define EV_USE_KQUEUE 0
121
+ # endif
122
+
123
+ # if HAVE_PORT_H && HAVE_PORT_CREATE
124
+ # ifndef EV_USE_PORT
125
+ # define EV_USE_PORT EV_FEATURE_BACKENDS
126
+ # endif
127
+ # else
128
+ # undef EV_USE_PORT
129
+ # define EV_USE_PORT 0
130
+ # endif
131
+
132
+ # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
133
+ # ifndef EV_USE_INOTIFY
134
+ # define EV_USE_INOTIFY EV_FEATURE_OS
135
+ # endif
136
+ # else
137
+ # undef EV_USE_INOTIFY
138
+ # define EV_USE_INOTIFY 0
139
+ # endif
140
+
141
+ # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
142
+ # ifndef EV_USE_SIGNALFD
143
+ # define EV_USE_SIGNALFD EV_FEATURE_OS
144
+ # endif
145
+ # else
146
+ # undef EV_USE_SIGNALFD
147
+ # define EV_USE_SIGNALFD 0
148
+ # endif
149
+
150
+ # if HAVE_EVENTFD
151
+ # ifndef EV_USE_EVENTFD
152
+ # define EV_USE_EVENTFD EV_FEATURE_OS
153
+ # endif
154
+ # else
155
+ # undef EV_USE_EVENTFD
156
+ # define EV_USE_EVENTFD 0
157
+ # endif
158
+
159
+ #endif
160
+
161
+ #include <math.h>
162
+ #include <stdlib.h>
163
+ #include <string.h>
164
+ #include <fcntl.h>
165
+ #include <stddef.h>
166
+
167
+ #include <stdio.h>
168
+
169
+ #include <assert.h>
170
+ #include <errno.h>
171
+ #include <sys/types.h>
172
+ #include <time.h>
173
+ #include <limits.h>
174
+
175
+ #include <signal.h>
176
+
177
+ #ifdef EV_H
178
+ # include EV_H
179
+ #else
180
+ # include "ev.h"
181
+ #endif
182
+
183
+ EV_CPP(extern "C" {)
184
+
185
+ #ifndef _WIN32
186
+ # include <sys/time.h>
187
+ # include <sys/wait.h>
188
+ # include <unistd.h>
189
+ #else
190
+ # include <io.h>
191
+ # define WIN32_LEAN_AND_MEAN
192
+ # include <windows.h>
193
+ # ifndef EV_SELECT_IS_WINSOCKET
194
+ # define EV_SELECT_IS_WINSOCKET 1
195
+ # endif
196
+ # undef EV_AVOID_STDIO
197
+ #endif
198
+
199
+ /* OS X, in its infinite idiocy, actually HARDCODES
200
+ * a limit of 1024 into their select. Where people have brains,
201
+ * OS X engineers apparently have a vacuum. Or maybe they were
202
+ * ordered to have a vacuum, or they do anything for money.
203
+ * This might help. Or not.
204
+ */
205
+ #define _DARWIN_UNLIMITED_SELECT 1
206
+
207
+ /* this block tries to deduce configuration from header-defined symbols and defaults */
208
+
209
+ /* try to deduce the maximum number of signals on this platform */
210
+ #if defined (EV_NSIG)
211
+ /* use what's provided */
212
+ #elif defined (NSIG)
213
+ # define EV_NSIG (NSIG)
214
+ #elif defined(_NSIG)
215
+ # define EV_NSIG (_NSIG)
216
+ #elif defined (SIGMAX)
217
+ # define EV_NSIG (SIGMAX+1)
218
+ #elif defined (SIG_MAX)
219
+ # define EV_NSIG (SIG_MAX+1)
220
+ #elif defined (_SIG_MAX)
221
+ # define EV_NSIG (_SIG_MAX+1)
222
+ #elif defined (MAXSIG)
223
+ # define EV_NSIG (MAXSIG+1)
224
+ #elif defined (MAX_SIG)
225
+ # define EV_NSIG (MAX_SIG+1)
226
+ #elif defined (SIGARRAYSIZE)
227
+ # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
228
+ #elif defined (_sys_nsig)
229
+ # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
230
+ #else
231
+ # error "unable to find value for NSIG, please report"
232
+ /* to make it compile regardless, just remove the above line, */
233
+ /* but consider reporting it, too! :) */
234
+ # define EV_NSIG 65
235
+ #endif
236
+
237
+ #ifndef EV_USE_CLOCK_SYSCALL
238
+ # if __linux && __GLIBC__ >= 2
239
+ # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
240
+ # else
241
+ # define EV_USE_CLOCK_SYSCALL 0
242
+ # endif
243
+ #endif
244
+
245
+ #ifndef EV_USE_MONOTONIC
246
+ # if defined (_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
247
+ # define EV_USE_MONOTONIC EV_FEATURE_OS
248
+ # else
249
+ # define EV_USE_MONOTONIC 0
250
+ # endif
251
+ #endif
252
+
253
+ #ifndef EV_USE_REALTIME
254
+ # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
255
+ #endif
256
+
257
+ #ifndef EV_USE_NANOSLEEP
258
+ # if _POSIX_C_SOURCE >= 199309L
259
+ # define EV_USE_NANOSLEEP EV_FEATURE_OS
260
+ # else
261
+ # define EV_USE_NANOSLEEP 0
262
+ # endif
263
+ #endif
264
+
265
+ #ifndef EV_USE_SELECT
266
+ # define EV_USE_SELECT EV_FEATURE_BACKENDS
267
+ #endif
268
+
269
+ #ifndef EV_USE_POLL
270
+ # ifdef _WIN32
271
+ # define EV_USE_POLL 0
272
+ # else
273
+ # define EV_USE_POLL EV_FEATURE_BACKENDS
274
+ # endif
275
+ #endif
276
+
277
+ #ifndef EV_USE_EPOLL
278
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
279
+ # define EV_USE_EPOLL EV_FEATURE_BACKENDS
280
+ # else
281
+ # define EV_USE_EPOLL 0
282
+ # endif
283
+ #endif
284
+
285
+ #ifndef EV_USE_KQUEUE
286
+ # define EV_USE_KQUEUE 0
287
+ #endif
288
+
289
+ #ifndef EV_USE_PORT
290
+ # define EV_USE_PORT 0
291
+ #endif
292
+
293
+ #ifndef EV_USE_INOTIFY
294
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
295
+ # define EV_USE_INOTIFY EV_FEATURE_OS
296
+ # else
297
+ # define EV_USE_INOTIFY 0
298
+ # endif
299
+ #endif
300
+
301
+ #ifndef EV_PID_HASHSIZE
302
+ # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
303
+ #endif
304
+
305
+ #ifndef EV_INOTIFY_HASHSIZE
306
+ # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
307
+ #endif
308
+
309
+ #ifndef EV_USE_EVENTFD
310
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
311
+ # define EV_USE_EVENTFD EV_FEATURE_OS
312
+ # else
313
+ # define EV_USE_EVENTFD 0
314
+ # endif
315
+ #endif
316
+
317
+ #ifndef EV_USE_SIGNALFD
318
+ # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
319
+ # define EV_USE_SIGNALFD EV_FEATURE_OS
320
+ # else
321
+ # define EV_USE_SIGNALFD 0
322
+ # endif
323
+ #endif
324
+
325
+ #if 0 /* debugging */
326
+ # define EV_VERIFY 3
327
+ # define EV_USE_4HEAP 1
328
+ # define EV_HEAP_CACHE_AT 1
329
+ #endif
330
+
331
+ #ifndef EV_VERIFY
332
+ # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
333
+ #endif
334
+
335
+ #ifndef EV_USE_4HEAP
336
+ # define EV_USE_4HEAP EV_FEATURE_DATA
337
+ #endif
338
+
339
+ #ifndef EV_HEAP_CACHE_AT
340
+ # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
341
+ #endif
342
+
343
+ /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
344
+ /* which makes programs even slower. might work on other unices, too. */
345
+ #if EV_USE_CLOCK_SYSCALL
346
+ # include <syscall.h>
347
+ # ifdef SYS_clock_gettime
348
+ # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
349
+ # undef EV_USE_MONOTONIC
350
+ # define EV_USE_MONOTONIC 1
351
+ # else
352
+ # undef EV_USE_CLOCK_SYSCALL
353
+ # define EV_USE_CLOCK_SYSCALL 0
354
+ # endif
355
+ #endif
356
+
357
+ /* this block fixes any misconfiguration where we know we run into trouble otherwise */
358
+
359
+ #ifdef _AIX
360
+ /* AIX has a completely broken poll.h header */
361
+ # undef EV_USE_POLL
362
+ # define EV_USE_POLL 0
363
+ #endif
364
+
365
+ #ifndef CLOCK_MONOTONIC
366
+ # undef EV_USE_MONOTONIC
367
+ # define EV_USE_MONOTONIC 0
368
+ #endif
369
+
370
+ #ifndef CLOCK_REALTIME
371
+ # undef EV_USE_REALTIME
372
+ # define EV_USE_REALTIME 0
373
+ #endif
374
+
375
+ #if !EV_STAT_ENABLE
376
+ # undef EV_USE_INOTIFY
377
+ # define EV_USE_INOTIFY 0
378
+ #endif
379
+
380
+ #if !EV_USE_NANOSLEEP
381
+ # ifndef _WIN32
382
+ # include <sys/select.h>
383
+ # endif
384
+ #endif
385
+
386
+ #if EV_USE_INOTIFY
387
+ # include <sys/statfs.h>
388
+ # include <sys/inotify.h>
389
+ /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
390
+ # ifndef IN_DONT_FOLLOW
391
+ # undef EV_USE_INOTIFY
392
+ # define EV_USE_INOTIFY 0
393
+ # endif
394
+ #endif
395
+
396
+ #if EV_SELECT_IS_WINSOCKET
397
+ # include <winsock.h>
398
+ #endif
399
+
400
+ #if EV_USE_EVENTFD
401
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
402
+ # include <stdint.h>
403
+ # ifndef EFD_NONBLOCK
404
+ # define EFD_NONBLOCK O_NONBLOCK
405
+ # endif
406
+ # ifndef EFD_CLOEXEC
407
+ # ifdef O_CLOEXEC
408
+ # define EFD_CLOEXEC O_CLOEXEC
409
+ # else
410
+ # define EFD_CLOEXEC 02000000
411
+ # endif
412
+ # endif
413
+ EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
414
+ #endif
415
+
416
+ #if EV_USE_SIGNALFD
417
+ /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
418
+ # include <stdint.h>
419
+ # ifndef SFD_NONBLOCK
420
+ # define SFD_NONBLOCK O_NONBLOCK
421
+ # endif
422
+ # ifndef SFD_CLOEXEC
423
+ # ifdef O_CLOEXEC
424
+ # define SFD_CLOEXEC O_CLOEXEC
425
+ # else
426
+ # define SFD_CLOEXEC 02000000
427
+ # endif
428
+ # endif
429
+ EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
430
+
431
+ struct signalfd_siginfo
432
+ {
433
+ uint32_t ssi_signo;
434
+ char pad[128 - sizeof (uint32_t)];
435
+ };
436
+ #endif
437
+
438
+ /**/
439
+
440
+ #if EV_VERIFY >= 3
441
+ # define EV_FREQUENT_CHECK ev_verify (EV_A)
442
+ #else
443
+ # define EV_FREQUENT_CHECK do { } while (0)
444
+ #endif
445
+
446
+ /*
447
+ * This is used to avoid floating point rounding problems.
448
+ * It is added to ev_rt_now when scheduling periodics
449
+ * to ensure progress, time-wise, even when rounding
450
+ * errors are against us.
451
+ * This value is good at least till the year 4000.
452
+ * Better solutions welcome.
453
+ */
454
+ #define TIME_EPSILON 0.0001220703125 /* 1/8192 */
455
+
456
+ #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
457
+ #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
458
+
459
+ #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
460
+ #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
461
+
462
+ #if __GNUC__ >= 4
463
+ # define expect(expr,value) __builtin_expect ((expr),(value))
464
+ # define noinline __attribute__ ((noinline))
465
+ #else
466
+ # define expect(expr,value) (expr)
467
+ # define noinline
468
+ # if __STDC_VERSION__ < 199901L && __GNUC__ < 2
469
+ # define inline
470
+ # endif
471
+ #endif
472
+
473
+ #define expect_false(expr) expect ((expr) != 0, 0)
474
+ #define expect_true(expr) expect ((expr) != 0, 1)
475
+ #define inline_size static inline
476
+
477
+ #if EV_FEATURE_CODE
478
+ # define inline_speed static inline
479
+ #else
480
+ # define inline_speed static noinline
481
+ #endif
482
+
483
+ #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
484
+
485
+ #if EV_MINPRI == EV_MAXPRI
486
+ # define ABSPRI(w) (((W)w), 0)
487
+ #else
488
+ # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
489
+ #endif
490
+
491
+ #define EMPTY /* required for microsofts broken pseudo-c compiler */
492
+ #define EMPTY2(a,b) /* used to suppress some warnings */
493
+
494
+ typedef ev_watcher *W;
495
+ typedef ev_watcher_list *WL;
496
+ typedef ev_watcher_time *WT;
497
+
498
+ #define ev_active(w) ((W)(w))->active
499
+ #define ev_at(w) ((WT)(w))->at
500
+
501
+ #if EV_USE_REALTIME
502
+ /* sig_atomic_t is used to avoid per-thread variables or locking but still */
503
+ /* giving it a reasonably high chance of working on typical architectures */
504
+ static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
505
+ #endif
506
+
507
+ #if EV_USE_MONOTONIC
508
+ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
509
+ #endif
510
+
511
+ #ifndef EV_FD_TO_WIN32_HANDLE
512
+ # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
513
+ #endif
514
+ #ifndef EV_WIN32_HANDLE_TO_FD
515
+ # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
516
+ #endif
517
+ #ifndef EV_WIN32_CLOSE_FD
518
+ # define EV_WIN32_CLOSE_FD(fd) close (fd)
519
+ #endif
520
+
521
+ #ifdef _WIN32
522
+ # include "ev_win32.c"
523
+ #endif
524
+
525
+ /*****************************************************************************/
526
+
527
+ #ifdef __linux
528
+ # include <sys/utsname.h>
529
+ #endif
530
+
531
+ static unsigned int noinline
532
+ ev_linux_version (void)
533
+ {
534
+ #ifdef __linux
535
+ unsigned int v = 0;
536
+ struct utsname buf;
537
+ int i;
538
+ char *p = buf.release;
539
+
540
+ if (uname (&buf))
541
+ return 0;
542
+
543
+ for (i = 3+1; --i; )
544
+ {
545
+ unsigned int c = 0;
546
+
547
+ for (;;)
548
+ {
549
+ if (*p >= '0' && *p <= '9')
550
+ c = c * 10 + *p++ - '0';
551
+ else
552
+ {
553
+ p += *p == '.';
554
+ break;
555
+ }
556
+ }
557
+
558
+ v = (v << 8) | c;
559
+ }
560
+
561
+ return v;
562
+ #else
563
+ return 0;
564
+ #endif
565
+ }
566
+
567
+ /*****************************************************************************/
568
+
569
+ #if EV_AVOID_STDIO
570
+ static void noinline
571
+ ev_printerr (const char *msg)
572
+ {
573
+ write (STDERR_FILENO, msg, strlen (msg));
574
+ }
575
+ #endif
576
+
577
+ static void (*syserr_cb)(const char *msg);
578
+
579
+ void
580
+ ev_set_syserr_cb (void (*cb)(const char *msg))
581
+ {
582
+ syserr_cb = cb;
583
+ }
584
+
585
+ static void noinline
586
+ ev_syserr (const char *msg)
587
+ {
588
+ if (!msg)
589
+ msg = "(libev) system error";
590
+
591
+ if (syserr_cb)
592
+ syserr_cb (msg);
593
+ else
594
+ {
595
+ #if EV_AVOID_STDIO
596
+ ev_printerr (msg);
597
+ ev_printerr (": ");
598
+ ev_printerr (strerror (errno));
599
+ ev_printerr ("\n");
600
+ #else
601
+ perror (msg);
602
+ #endif
603
+ abort ();
604
+ }
605
+ }
606
+
607
+ static void *
608
+ ev_realloc_emul (void *ptr, long size)
609
+ {
610
+ #if __GLIBC__
611
+ return realloc (ptr, size);
612
+ #else
613
+ /* some systems, notably openbsd and darwin, fail to properly
614
+ * implement realloc (x, 0) (as required by both ansi c-89 and
615
+ * the single unix specification, so work around them here.
616
+ */
617
+
618
+ if (size)
619
+ return realloc (ptr, size);
620
+
621
+ free (ptr);
622
+ return 0;
623
+ #endif
624
+ }
625
+
626
+ static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
627
+
628
+ void
629
+ ev_set_allocator (void *(*cb)(void *ptr, long size))
630
+ {
631
+ alloc = cb;
632
+ }
633
+
634
+ inline_speed void *
635
+ ev_realloc (void *ptr, long size)
636
+ {
637
+ ptr = alloc (ptr, size);
638
+
639
+ if (!ptr && size)
640
+ {
641
+ #if EV_AVOID_STDIO
642
+ ev_printerr ("(libev) memory allocation failed, aborting.\n");
643
+ #else
644
+ fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
645
+ #endif
646
+ abort ();
647
+ }
648
+
649
+ return ptr;
650
+ }
651
+
652
+ #define ev_malloc(size) ev_realloc (0, (size))
653
+ #define ev_free(ptr) ev_realloc ((ptr), 0)
654
+
655
+ /*****************************************************************************/
656
+
657
+ /* set in reify when reification needed */
658
+ #define EV_ANFD_REIFY 1
659
+
660
+ /* file descriptor info structure */
661
+ typedef struct
662
+ {
663
+ WL head;
664
+ unsigned char events; /* the events watched for */
665
+ unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
666
+ unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
667
+ unsigned char unused;
668
+ #if EV_USE_EPOLL
669
+ unsigned int egen; /* generation counter to counter epoll bugs */
670
+ #endif
671
+ #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
672
+ SOCKET handle;
673
+ #endif
674
+ #if EV_USE_IOCP
675
+ OVERLAPPED or, ow;
676
+ #endif
677
+ } ANFD;
678
+
679
+ /* stores the pending event set for a given watcher */
680
+ typedef struct
681
+ {
682
+ W w;
683
+ int events; /* the pending event set for the given watcher */
684
+ } ANPENDING;
685
+
686
+ #if EV_USE_INOTIFY
687
+ /* hash table entry per inotify-id */
688
+ typedef struct
689
+ {
690
+ WL head;
691
+ } ANFS;
692
+ #endif
693
+
694
+ /* Heap Entry */
695
+ #if EV_HEAP_CACHE_AT
696
+ /* a heap element */
697
+ typedef struct {
698
+ ev_tstamp at;
699
+ WT w;
700
+ } ANHE;
701
+
702
+ #define ANHE_w(he) (he).w /* access watcher, read-write */
703
+ #define ANHE_at(he) (he).at /* access cached at, read-only */
704
+ #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
705
+ #else
706
+ /* a heap element */
707
+ typedef WT ANHE;
708
+
709
+ #define ANHE_w(he) (he)
710
+ #define ANHE_at(he) (he)->at
711
+ #define ANHE_at_cache(he)
712
+ #endif
713
+
714
+ #if EV_MULTIPLICITY
715
+
716
+ struct ev_loop
717
+ {
718
+ ev_tstamp ev_rt_now;
719
+ #define ev_rt_now ((loop)->ev_rt_now)
720
+ #define VAR(name,decl) decl;
721
+ #include "ev_vars.h"
722
+ #undef VAR
723
+ };
724
+ #include "ev_wrap.h"
725
+
726
+ static struct ev_loop default_loop_struct;
727
+ struct ev_loop *ev_default_loop_ptr;
728
+
729
+ #else
730
+
731
+ ev_tstamp ev_rt_now;
732
+ #define VAR(name,decl) static decl;
733
+ #include "ev_vars.h"
734
+ #undef VAR
735
+
736
+ static int ev_default_loop_ptr;
737
+
738
+ #endif
739
+
740
+ #if EV_FEATURE_API
741
+ # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
742
+ # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
743
+ # define EV_INVOKE_PENDING invoke_cb (EV_A)
744
+ #else
745
+ # define EV_RELEASE_CB (void)0
746
+ # define EV_ACQUIRE_CB (void)0
747
+ # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
748
+ #endif
749
+
750
+ #define EVBREAK_RECURSE 0x80
751
+
752
+ /*****************************************************************************/
753
+
754
+ #ifndef EV_HAVE_EV_TIME
755
+ ev_tstamp
756
+ ev_time (void)
757
+ {
758
+ #if EV_USE_REALTIME
759
+ if (expect_true (have_realtime))
760
+ {
761
+ struct timespec ts;
762
+ clock_gettime (CLOCK_REALTIME, &ts);
763
+ return ts.tv_sec + ts.tv_nsec * 1e-9;
764
+ }
765
+ #endif
766
+
767
+ struct timeval tv;
768
+ gettimeofday (&tv, 0);
769
+ return tv.tv_sec + tv.tv_usec * 1e-6;
770
+ }
771
+ #endif
772
+
773
+ inline_size ev_tstamp
774
+ get_clock (void)
775
+ {
776
+ #if EV_USE_MONOTONIC
777
+ if (expect_true (have_monotonic))
778
+ {
779
+ struct timespec ts;
780
+ clock_gettime (CLOCK_MONOTONIC, &ts);
781
+ return ts.tv_sec + ts.tv_nsec * 1e-9;
782
+ }
783
+ #endif
784
+
785
+ return ev_time ();
786
+ }
787
+
788
+ #if EV_MULTIPLICITY
789
+ ev_tstamp
790
+ ev_now (EV_P)
791
+ {
792
+ return ev_rt_now;
793
+ }
794
+ #endif
795
+
796
+ void
797
+ ev_sleep (ev_tstamp delay)
798
+ {
799
+ if (delay > 0.)
800
+ {
801
+ #if EV_USE_NANOSLEEP
802
+ struct timespec ts;
803
+
804
+ EV_TS_SET (ts, delay);
805
+ nanosleep (&ts, 0);
806
+ #elif defined(_WIN32)
807
+ Sleep ((unsigned long)(delay * 1e3));
808
+ #else
809
+ struct timeval tv;
810
+
811
+ /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
812
+ /* something not guaranteed by newer posix versions, but guaranteed */
813
+ /* by older ones */
814
+ EV_TV_SET (tv, delay);
815
+ select (0, 0, 0, 0, &tv);
816
+ #endif
817
+ }
818
+ }
819
+
820
+ /*****************************************************************************/
821
+
822
+ #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
823
+
824
+ /* find a suitable new size for the given array, */
825
+ /* hopefully by rounding to a nice-to-malloc size */
826
+ inline_size int
827
+ array_nextsize (int elem, int cur, int cnt)
828
+ {
829
+ int ncur = cur + 1;
830
+
831
+ do
832
+ ncur <<= 1;
833
+ while (cnt > ncur);
834
+
835
+ /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
836
+ if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
837
+ {
838
+ ncur *= elem;
839
+ ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
840
+ ncur = ncur - sizeof (void *) * 4;
841
+ ncur /= elem;
842
+ }
843
+
844
+ return ncur;
845
+ }
846
+
847
+ static noinline void *
848
+ array_realloc (int elem, void *base, int *cur, int cnt)
849
+ {
850
+ *cur = array_nextsize (elem, *cur, cnt);
851
+ return ev_realloc (base, elem * *cur);
852
+ }
853
+
854
+ #define array_init_zero(base,count) \
855
+ memset ((void *)(base), 0, sizeof (*(base)) * (count))
856
+
857
+ #define array_needsize(type,base,cur,cnt,init) \
858
+ if (expect_false ((cnt) > (cur))) \
859
+ { \
860
+ int ocur_ = (cur); \
861
+ (base) = (type *)array_realloc \
862
+ (sizeof (type), (base), &(cur), (cnt)); \
863
+ init ((base) + (ocur_), (cur) - ocur_); \
864
+ }
865
+
866
+ #if 0
867
+ #define array_slim(type,stem) \
868
+ if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
869
+ { \
870
+ stem ## max = array_roundsize (stem ## cnt >> 1); \
871
+ base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
872
+ fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
873
+ }
874
+ #endif
875
+
876
+ #define array_free(stem, idx) \
877
+ ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
878
+
879
+ /*****************************************************************************/
880
+
881
+ /* dummy callback for pending events */
882
+ static void noinline
883
+ pendingcb (EV_P_ ev_prepare *w, int revents)
884
+ {
885
+ }
886
+
887
+ void noinline
888
+ ev_feed_event (EV_P_ void *w, int revents)
889
+ {
890
+ W w_ = (W)w;
891
+ int pri = ABSPRI (w_);
892
+
893
+ if (expect_false (w_->pending))
894
+ pendings [pri][w_->pending - 1].events |= revents;
895
+ else
896
+ {
897
+ w_->pending = ++pendingcnt [pri];
898
+ array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
899
+ pendings [pri][w_->pending - 1].w = w_;
900
+ pendings [pri][w_->pending - 1].events = revents;
901
+ }
902
+ }
903
+
904
+ inline_speed void
905
+ feed_reverse (EV_P_ W w)
906
+ {
907
+ array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
908
+ rfeeds [rfeedcnt++] = w;
909
+ }
910
+
911
+ inline_size void
912
+ feed_reverse_done (EV_P_ int revents)
913
+ {
914
+ do
915
+ ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
916
+ while (rfeedcnt);
917
+ }
918
+
919
+ inline_speed void
920
+ queue_events (EV_P_ W *events, int eventcnt, int type)
921
+ {
922
+ int i;
923
+
924
+ for (i = 0; i < eventcnt; ++i)
925
+ ev_feed_event (EV_A_ events [i], type);
926
+ }
927
+
928
+ /*****************************************************************************/
929
+
930
+ inline_speed void
931
+ fd_event_nocheck (EV_P_ int fd, int revents)
932
+ {
933
+ ANFD *anfd = anfds + fd;
934
+ ev_io *w;
935
+
936
+ for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
937
+ {
938
+ int ev = w->events & revents;
939
+
940
+ if (ev)
941
+ ev_feed_event (EV_A_ (W)w, ev);
942
+ }
943
+ }
944
+
945
+ /* do not submit kernel events for fds that have reify set */
946
+ /* because that means they changed while we were polling for new events */
947
+ inline_speed void
948
+ fd_event (EV_P_ int fd, int revents)
949
+ {
950
+ ANFD *anfd = anfds + fd;
951
+
952
+ if (expect_true (!anfd->reify))
953
+ fd_event_nocheck (EV_A_ fd, revents);
954
+ }
955
+
956
+ void
957
+ ev_feed_fd_event (EV_P_ int fd, int revents)
958
+ {
959
+ if (fd >= 0 && fd < anfdmax)
960
+ fd_event_nocheck (EV_A_ fd, revents);
961
+ }
962
+
963
+ /* make sure the external fd watch events are in-sync */
964
+ /* with the kernel/libev internal state */
965
+ inline_size void
966
+ fd_reify (EV_P)
967
+ {
968
+ int i;
969
+
970
+ for (i = 0; i < fdchangecnt; ++i)
971
+ {
972
+ int fd = fdchanges [i];
973
+ ANFD *anfd = anfds + fd;
974
+ ev_io *w;
975
+
976
+ unsigned char o_events = anfd->events;
977
+ unsigned char o_reify = anfd->reify;
978
+
979
+ anfd->reify = 0;
980
+
981
+ #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
982
+ if (o_reify & EV__IOFDSET)
983
+ {
984
+ unsigned long arg;
985
+ anfd->handle = EV_FD_TO_WIN32_HANDLE (fd);
986
+ assert (("libev: only socket fds supported in this configuration", ioctlsocket (anfd->handle, FIONREAD, &arg) == 0));
987
+ printf ("oi %d %x\n", fd, anfd->handle);//D
988
+ }
989
+ #endif
990
+
991
+ /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
992
+ {
993
+ anfd->events = 0;
994
+
995
+ for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
996
+ anfd->events |= (unsigned char)w->events;
997
+
998
+ if (o_events != anfd->events)
999
+ o_reify = EV__IOFDSET; /* actually |= */
1000
+ }
1001
+
1002
+ if (o_reify & EV__IOFDSET)
1003
+ backend_modify (EV_A_ fd, o_events, anfd->events);
1004
+ }
1005
+
1006
+ fdchangecnt = 0;
1007
+ }
1008
+
1009
+ /* something about the given fd changed */
1010
+ inline_size void
1011
+ fd_change (EV_P_ int fd, int flags)
1012
+ {
1013
+ unsigned char reify = anfds [fd].reify;
1014
+ anfds [fd].reify |= flags;
1015
+
1016
+ if (expect_true (!reify))
1017
+ {
1018
+ ++fdchangecnt;
1019
+ array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
1020
+ fdchanges [fdchangecnt - 1] = fd;
1021
+ }
1022
+ }
1023
+
1024
+ /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
1025
+ inline_speed void
1026
+ fd_kill (EV_P_ int fd)
1027
+ {
1028
+ ev_io *w;
1029
+
1030
+ while ((w = (ev_io *)anfds [fd].head))
1031
+ {
1032
+ ev_io_stop (EV_A_ w);
1033
+ ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
1034
+ }
1035
+ }
1036
+
1037
+ /* check whether the given fd is actually valid, for error recovery */
1038
+ inline_size int
1039
+ fd_valid (int fd)
1040
+ {
1041
+ #ifdef _WIN32
1042
+ return EV_FD_TO_WIN32_HANDLE (fd) != -1;
1043
+ #else
1044
+ return fcntl (fd, F_GETFD) != -1;
1045
+ #endif
1046
+ }
1047
+
1048
+ /* called on EBADF to verify fds */
1049
+ static void noinline
1050
+ fd_ebadf (EV_P)
1051
+ {
1052
+ int fd;
1053
+
1054
+ for (fd = 0; fd < anfdmax; ++fd)
1055
+ if (anfds [fd].events)
1056
+ if (!fd_valid (fd) && errno == EBADF)
1057
+ fd_kill (EV_A_ fd);
1058
+ }
1059
+
1060
+ /* called on ENOMEM in select/poll to kill some fds and retry */
1061
+ static void noinline
1062
+ fd_enomem (EV_P)
1063
+ {
1064
+ int fd;
1065
+
1066
+ for (fd = anfdmax; fd--; )
1067
+ if (anfds [fd].events)
1068
+ {
1069
+ fd_kill (EV_A_ fd);
1070
+ break;
1071
+ }
1072
+ }
1073
+
1074
+ /* usually called after fork if backend needs to re-arm all fds from scratch */
1075
+ static void noinline
1076
+ fd_rearm_all (EV_P)
1077
+ {
1078
+ int fd;
1079
+
1080
+ for (fd = 0; fd < anfdmax; ++fd)
1081
+ if (anfds [fd].events)
1082
+ {
1083
+ anfds [fd].events = 0;
1084
+ anfds [fd].emask = 0;
1085
+ fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
1086
+ }
1087
+ }
1088
+
1089
+ /* used to prepare libev internal fd's */
1090
+ /* this is not fork-safe */
1091
+ inline_speed void
1092
+ fd_intern (int fd)
1093
+ {
1094
+ #ifdef _WIN32
1095
+ unsigned long arg = 1;
1096
+ ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
1097
+ #else
1098
+ fcntl (fd, F_SETFD, FD_CLOEXEC);
1099
+ fcntl (fd, F_SETFL, O_NONBLOCK);
1100
+ #endif
1101
+ }
1102
+
1103
+ /*****************************************************************************/
1104
+
1105
+ /*
1106
+ * the heap functions want a real array index. array index 0 is guaranteed to not
1107
+ * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
1108
+ * the branching factor of the d-tree.
1109
+ */
1110
+
1111
+ /*
1112
+ * at the moment we allow libev the luxury of two heaps,
1113
+ * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
1114
+ * which is more cache-efficient.
1115
+ * the difference is about 5% with 50000+ watchers.
1116
+ */
1117
+ #if EV_USE_4HEAP
1118
+
1119
+ #define DHEAP 4
1120
+ #define HEAP0 (DHEAP - 1) /* index of first element in heap */
1121
+ #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
1122
+ #define UPHEAP_DONE(p,k) ((p) == (k))
1123
+
1124
+ /* away from the root */
1125
+ inline_speed void
1126
+ downheap (ANHE *heap, int N, int k)
1127
+ {
1128
+ ANHE he = heap [k];
1129
+ ANHE *E = heap + N + HEAP0;
1130
+
1131
+ for (;;)
1132
+ {
1133
+ ev_tstamp minat;
1134
+ ANHE *minpos;
1135
+ ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
1136
+
1137
+ /* find minimum child */
1138
+ if (expect_true (pos + DHEAP - 1 < E))
1139
+ {
1140
+ /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
1141
+ if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
1142
+ if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
1143
+ if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
1144
+ }
1145
+ else if (pos < E)
1146
+ {
1147
+ /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
1148
+ if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
1149
+ if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
1150
+ if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
1151
+ }
1152
+ else
1153
+ break;
1154
+
1155
+ if (ANHE_at (he) <= minat)
1156
+ break;
1157
+
1158
+ heap [k] = *minpos;
1159
+ ev_active (ANHE_w (*minpos)) = k;
1160
+
1161
+ k = minpos - heap;
1162
+ }
1163
+
1164
+ heap [k] = he;
1165
+ ev_active (ANHE_w (he)) = k;
1166
+ }
1167
+
1168
+ #else /* 4HEAP */
1169
+
1170
+ #define HEAP0 1
1171
+ #define HPARENT(k) ((k) >> 1)
1172
+ #define UPHEAP_DONE(p,k) (!(p))
1173
+
1174
+ /* away from the root */
1175
+ inline_speed void
1176
+ downheap (ANHE *heap, int N, int k)
1177
+ {
1178
+ ANHE he = heap [k];
1179
+
1180
+ for (;;)
1181
+ {
1182
+ int c = k << 1;
1183
+
1184
+ if (c >= N + HEAP0)
1185
+ break;
1186
+
1187
+ c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
1188
+ ? 1 : 0;
1189
+
1190
+ if (ANHE_at (he) <= ANHE_at (heap [c]))
1191
+ break;
1192
+
1193
+ heap [k] = heap [c];
1194
+ ev_active (ANHE_w (heap [k])) = k;
1195
+
1196
+ k = c;
1197
+ }
1198
+
1199
+ heap [k] = he;
1200
+ ev_active (ANHE_w (he)) = k;
1201
+ }
1202
+ #endif
1203
+
1204
+ /* towards the root */
1205
+ inline_speed void
1206
+ upheap (ANHE *heap, int k)
1207
+ {
1208
+ ANHE he = heap [k];
1209
+
1210
+ for (;;)
1211
+ {
1212
+ int p = HPARENT (k);
1213
+
1214
+ if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
1215
+ break;
1216
+
1217
+ heap [k] = heap [p];
1218
+ ev_active (ANHE_w (heap [k])) = k;
1219
+ k = p;
1220
+ }
1221
+
1222
+ heap [k] = he;
1223
+ ev_active (ANHE_w (he)) = k;
1224
+ }
1225
+
1226
+ /* move an element suitably so it is in a correct place */
1227
+ inline_size void
1228
+ adjustheap (ANHE *heap, int N, int k)
1229
+ {
1230
+ if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
1231
+ upheap (heap, k);
1232
+ else
1233
+ downheap (heap, N, k);
1234
+ }
1235
+
1236
+ /* rebuild the heap: this function is used only once and executed rarely */
1237
+ inline_size void
1238
+ reheap (ANHE *heap, int N)
1239
+ {
1240
+ int i;
1241
+
1242
+ /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
1243
+ /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
1244
+ for (i = 0; i < N; ++i)
1245
+ upheap (heap, i + HEAP0);
1246
+ }
1247
+
1248
+ /*****************************************************************************/
1249
+
1250
+ /* associate signal watchers to a signal signal */
1251
+ typedef struct
1252
+ {
1253
+ EV_ATOMIC_T pending;
1254
+ #if EV_MULTIPLICITY
1255
+ EV_P;
1256
+ #endif
1257
+ WL head;
1258
+ } ANSIG;
1259
+
1260
+ static ANSIG signals [EV_NSIG - 1];
1261
+
1262
+ /*****************************************************************************/
1263
+
1264
+ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1265
+
1266
+ static void noinline
1267
+ evpipe_init (EV_P)
1268
+ {
1269
+ if (!ev_is_active (&pipe_w))
1270
+ {
1271
+ # if EV_USE_EVENTFD
1272
+ evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
1273
+ if (evfd < 0 && errno == EINVAL)
1274
+ evfd = eventfd (0, 0);
1275
+
1276
+ if (evfd >= 0)
1277
+ {
1278
+ evpipe [0] = -1;
1279
+ fd_intern (evfd); /* doing it twice doesn't hurt */
1280
+ ev_io_set (&pipe_w, evfd, EV_READ);
1281
+ }
1282
+ else
1283
+ # endif
1284
+ {
1285
+ while (pipe (evpipe))
1286
+ ev_syserr ("(libev) error creating signal/async pipe");
1287
+
1288
+ fd_intern (evpipe [0]);
1289
+ fd_intern (evpipe [1]);
1290
+ ev_io_set (&pipe_w, evpipe [0], EV_READ);
1291
+ }
1292
+
1293
+ ev_io_start (EV_A_ &pipe_w);
1294
+ ev_unref (EV_A); /* watcher should not keep loop alive */
1295
+ }
1296
+ }
1297
+
1298
+ inline_size void
1299
+ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
1300
+ {
1301
+ if (!*flag)
1302
+ {
1303
+ int old_errno = errno; /* save errno because write might clobber it */
1304
+ char dummy;
1305
+
1306
+ *flag = 1;
1307
+
1308
+ #if EV_USE_EVENTFD
1309
+ if (evfd >= 0)
1310
+ {
1311
+ uint64_t counter = 1;
1312
+ write (evfd, &counter, sizeof (uint64_t));
1313
+ }
1314
+ else
1315
+ #endif
1316
+ /* win32 people keep sending patches that change this write() to send() */
1317
+ /* and then run away. but send() is wrong, it wants a socket handle on win32 */
1318
+ /* so when you think this write should be a send instead, please find out */
1319
+ /* where your send() is from - it's definitely not the microsoft send, and */
1320
+ /* tell me. thank you. */
1321
+ write (evpipe [1], &dummy, 1);
1322
+
1323
+ errno = old_errno;
1324
+ }
1325
+ }
1326
+
1327
+ /* called whenever the libev signal pipe */
1328
+ /* got some events (signal, async) */
1329
+ static void
1330
+ pipecb (EV_P_ ev_io *iow, int revents)
1331
+ {
1332
+ int i;
1333
+
1334
+ #if EV_USE_EVENTFD
1335
+ if (evfd >= 0)
1336
+ {
1337
+ uint64_t counter;
1338
+ read (evfd, &counter, sizeof (uint64_t));
1339
+ }
1340
+ else
1341
+ #endif
1342
+ {
1343
+ char dummy;
1344
+ /* see discussion in evpipe_write when you think this read should be recv in win32 */
1345
+ read (evpipe [0], &dummy, 1);
1346
+ }
1347
+
1348
+ if (sig_pending)
1349
+ {
1350
+ sig_pending = 0;
1351
+
1352
+ for (i = EV_NSIG - 1; i--; )
1353
+ if (expect_false (signals [i].pending))
1354
+ ev_feed_signal_event (EV_A_ i + 1);
1355
+ }
1356
+
1357
+ #if EV_ASYNC_ENABLE
1358
+ if (async_pending)
1359
+ {
1360
+ async_pending = 0;
1361
+
1362
+ for (i = asynccnt; i--; )
1363
+ if (asyncs [i]->sent)
1364
+ {
1365
+ asyncs [i]->sent = 0;
1366
+ ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1367
+ }
1368
+ }
1369
+ #endif
1370
+ }
1371
+
1372
+ /*****************************************************************************/
1373
+
1374
+ static void
1375
+ ev_sighandler (int signum)
1376
+ {
1377
+ #if EV_MULTIPLICITY
1378
+ EV_P = signals [signum - 1].loop;
1379
+ #endif
1380
+
1381
+ #ifdef _WIN32
1382
+ signal (signum, ev_sighandler);
1383
+ #endif
1384
+
1385
+ signals [signum - 1].pending = 1;
1386
+ evpipe_write (EV_A_ &sig_pending);
1387
+ }
1388
+
1389
+ void noinline
1390
+ ev_feed_signal_event (EV_P_ int signum)
1391
+ {
1392
+ WL w;
1393
+
1394
+ if (expect_false (signum <= 0 || signum > EV_NSIG))
1395
+ return;
1396
+
1397
+ --signum;
1398
+
1399
+ #if EV_MULTIPLICITY
1400
+ /* it is permissible to try to feed a signal to the wrong loop */
1401
+ /* or, likely more useful, feeding a signal nobody is waiting for */
1402
+
1403
+ if (expect_false (signals [signum].loop != EV_A))
1404
+ return;
1405
+ #endif
1406
+
1407
+ signals [signum].pending = 0;
1408
+
1409
+ for (w = signals [signum].head; w; w = w->next)
1410
+ ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
1411
+ }
1412
+
1413
+ #if EV_USE_SIGNALFD
1414
+ static void
1415
+ sigfdcb (EV_P_ ev_io *iow, int revents)
1416
+ {
1417
+ struct signalfd_siginfo si[2], *sip; /* these structs are big */
1418
+
1419
+ for (;;)
1420
+ {
1421
+ ssize_t res = read (sigfd, si, sizeof (si));
1422
+
1423
+ /* not ISO-C, as res might be -1, but works with SuS */
1424
+ for (sip = si; (char *)sip < (char *)si + res; ++sip)
1425
+ ev_feed_signal_event (EV_A_ sip->ssi_signo);
1426
+
1427
+ if (res < (ssize_t)sizeof (si))
1428
+ break;
1429
+ }
1430
+ }
1431
+ #endif
1432
+
1433
+ #endif
1434
+
1435
+ /*****************************************************************************/
1436
+
1437
+ #if EV_CHILD_ENABLE
1438
+ static WL childs [EV_PID_HASHSIZE];
1439
+
1440
+ static ev_signal childev;
1441
+
1442
+ #ifndef WIFCONTINUED
1443
+ # define WIFCONTINUED(status) 0
1444
+ #endif
1445
+
1446
+ /* handle a single child status event */
1447
+ inline_speed void
1448
+ child_reap (EV_P_ int chain, int pid, int status)
1449
+ {
1450
+ ev_child *w;
1451
+ int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
1452
+
1453
+ for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
1454
+ {
1455
+ if ((w->pid == pid || !w->pid)
1456
+ && (!traced || (w->flags & 1)))
1457
+ {
1458
+ ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
1459
+ w->rpid = pid;
1460
+ w->rstatus = status;
1461
+ ev_feed_event (EV_A_ (W)w, EV_CHILD);
1462
+ }
1463
+ }
1464
+ }
1465
+
1466
+ #ifndef WCONTINUED
1467
+ # define WCONTINUED 0
1468
+ #endif
1469
+
1470
+ /* called on sigchld etc., calls waitpid */
1471
+ static void
1472
+ childcb (EV_P_ ev_signal *sw, int revents)
1473
+ {
1474
+ int pid, status;
1475
+
1476
+ /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
1477
+ if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
1478
+ if (!WCONTINUED
1479
+ || errno != EINVAL
1480
+ || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
1481
+ return;
1482
+
1483
+ /* make sure we are called again until all children have been reaped */
1484
+ /* we need to do it this way so that the callback gets called before we continue */
1485
+ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
1486
+
1487
+ child_reap (EV_A_ pid, pid, status);
1488
+ if ((EV_PID_HASHSIZE) > 1)
1489
+ child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
1490
+ }
1491
+
1492
+ #endif
1493
+
1494
+ /*****************************************************************************/
1495
+
1496
+ #if EV_USE_IOCP
1497
+ # include "ev_iocp.c"
1498
+ #endif
1499
+ #if EV_USE_PORT
1500
+ # include "ev_port.c"
1501
+ #endif
1502
+ #if EV_USE_KQUEUE
1503
+ # include "ev_kqueue.c"
1504
+ #endif
1505
+ #if EV_USE_EPOLL
1506
+ # include "ev_epoll.c"
1507
+ #endif
1508
+ #if EV_USE_POLL
1509
+ # include "ev_poll.c"
1510
+ #endif
1511
+ #if EV_USE_SELECT
1512
+ # include "ev_select.c"
1513
+ #endif
1514
+
1515
+ int
1516
+ ev_version_major (void)
1517
+ {
1518
+ return EV_VERSION_MAJOR;
1519
+ }
1520
+
1521
+ int
1522
+ ev_version_minor (void)
1523
+ {
1524
+ return EV_VERSION_MINOR;
1525
+ }
1526
+
1527
+ /* return true if we are running with elevated privileges and should ignore env variables */
1528
+ int inline_size
1529
+ enable_secure (void)
1530
+ {
1531
+ #ifdef _WIN32
1532
+ return 0;
1533
+ #else
1534
+ return getuid () != geteuid ()
1535
+ || getgid () != getegid ();
1536
+ #endif
1537
+ }
1538
+
1539
+ unsigned int
1540
+ ev_supported_backends (void)
1541
+ {
1542
+ unsigned int flags = 0;
1543
+
1544
+ if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
1545
+ if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
1546
+ if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
1547
+ if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
1548
+ if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
1549
+
1550
+ return flags;
1551
+ }
1552
+
1553
+ unsigned int
1554
+ ev_recommended_backends (void)
1555
+ {
1556
+ unsigned int flags = ev_supported_backends ();
1557
+
1558
+ #ifndef __NetBSD__
1559
+ /* kqueue is borked on everything but netbsd apparently */
1560
+ /* it usually doesn't work correctly on anything but sockets and pipes */
1561
+ flags &= ~EVBACKEND_KQUEUE;
1562
+ #endif
1563
+ #ifdef __APPLE__
1564
+ /* only select works correctly on that "unix-certified" platform */
1565
+ flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
1566
+ flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
1567
+ #endif
1568
+ #ifdef __FreeBSD__
1569
+ flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
1570
+ #endif
1571
+
1572
+ return flags;
1573
+ }
1574
+
1575
+ unsigned int
1576
+ ev_embeddable_backends (void)
1577
+ {
1578
+ int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
1579
+
1580
+ /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
1581
+ if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
1582
+ flags &= ~EVBACKEND_EPOLL;
1583
+
1584
+ return flags;
1585
+ }
1586
+
1587
+ unsigned int
1588
+ ev_backend (EV_P)
1589
+ {
1590
+ return backend;
1591
+ }
1592
+
1593
+ #if EV_FEATURE_API
1594
+ unsigned int
1595
+ ev_iteration (EV_P)
1596
+ {
1597
+ return loop_count;
1598
+ }
1599
+
1600
+ unsigned int
1601
+ ev_depth (EV_P)
1602
+ {
1603
+ return loop_depth;
1604
+ }
1605
+
1606
+ void
1607
+ ev_set_io_collect_interval (EV_P_ ev_tstamp interval)
1608
+ {
1609
+ io_blocktime = interval;
1610
+ }
1611
+
1612
+ void
1613
+ ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
1614
+ {
1615
+ timeout_blocktime = interval;
1616
+ }
1617
+
1618
+ void
1619
+ ev_set_userdata (EV_P_ void *data)
1620
+ {
1621
+ userdata = data;
1622
+ }
1623
+
1624
+ void *
1625
+ ev_userdata (EV_P)
1626
+ {
1627
+ return userdata;
1628
+ }
1629
+
1630
+ void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P))
1631
+ {
1632
+ invoke_cb = invoke_pending_cb;
1633
+ }
1634
+
1635
+ void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P))
1636
+ {
1637
+ release_cb = release;
1638
+ acquire_cb = acquire;
1639
+ }
1640
+ #endif
1641
+
1642
+ /* initialise a loop structure, must be zero-initialised */
1643
+ static void noinline
1644
+ loop_init (EV_P_ unsigned int flags)
1645
+ {
1646
+ if (!backend)
1647
+ {
1648
+ #if EV_USE_REALTIME
1649
+ if (!have_realtime)
1650
+ {
1651
+ struct timespec ts;
1652
+
1653
+ if (!clock_gettime (CLOCK_REALTIME, &ts))
1654
+ have_realtime = 1;
1655
+ }
1656
+ #endif
1657
+
1658
+ #if EV_USE_MONOTONIC
1659
+ if (!have_monotonic)
1660
+ {
1661
+ struct timespec ts;
1662
+
1663
+ if (!clock_gettime (CLOCK_MONOTONIC, &ts))
1664
+ have_monotonic = 1;
1665
+ }
1666
+ #endif
1667
+
1668
+ /* pid check not overridable via env */
1669
+ #ifndef _WIN32
1670
+ if (flags & EVFLAG_FORKCHECK)
1671
+ curpid = getpid ();
1672
+ #endif
1673
+
1674
+ if (!(flags & EVFLAG_NOENV)
1675
+ && !enable_secure ()
1676
+ && getenv ("LIBEV_FLAGS"))
1677
+ flags = atoi (getenv ("LIBEV_FLAGS"));
1678
+
1679
+ ev_rt_now = ev_time ();
1680
+ mn_now = get_clock ();
1681
+ now_floor = mn_now;
1682
+ rtmn_diff = ev_rt_now - mn_now;
1683
+ #if EV_FEATURE_API
1684
+ invoke_cb = ev_invoke_pending;
1685
+ #endif
1686
+
1687
+ io_blocktime = 0.;
1688
+ timeout_blocktime = 0.;
1689
+ backend = 0;
1690
+ backend_fd = -1;
1691
+ sig_pending = 0;
1692
+ #if EV_ASYNC_ENABLE
1693
+ async_pending = 0;
1694
+ #endif
1695
+ #if EV_USE_INOTIFY
1696
+ fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
1697
+ #endif
1698
+ #if EV_USE_SIGNALFD
1699
+ sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
1700
+ #endif
1701
+
1702
+ if (!(flags & 0x0000ffffU))
1703
+ flags |= ev_recommended_backends ();
1704
+
1705
+ #if EV_USE_IOCP
1706
+ if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
1707
+ #endif
1708
+ #if EV_USE_PORT
1709
+ if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1710
+ #endif
1711
+ #if EV_USE_KQUEUE
1712
+ if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
1713
+ #endif
1714
+ #if EV_USE_EPOLL
1715
+ if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
1716
+ #endif
1717
+ #if EV_USE_POLL
1718
+ if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
1719
+ #endif
1720
+ #if EV_USE_SELECT
1721
+ if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
1722
+ #endif
1723
+
1724
+ ev_prepare_init (&pending_w, pendingcb);
1725
+
1726
+ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1727
+ ev_init (&pipe_w, pipecb);
1728
+ ev_set_priority (&pipe_w, EV_MAXPRI);
1729
+ #endif
1730
+ }
1731
+ }
1732
+
1733
+ /* free up a loop structure */
1734
+ void
1735
+ ev_loop_destroy (EV_P)
1736
+ {
1737
+ int i;
1738
+
1739
+ #if EV_MULTIPLICITY
1740
+ /* mimic free (0) */
1741
+ if (!EV_A)
1742
+ return;
1743
+ #endif
1744
+
1745
+ #if EV_CLEANUP_ENABLE
1746
+ /* queue cleanup watchers (and execute them) */
1747
+ if (expect_false (cleanupcnt))
1748
+ {
1749
+ queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
1750
+ EV_INVOKE_PENDING;
1751
+ }
1752
+ #endif
1753
+
1754
+ #if EV_CHILD_ENABLE
1755
+ if (ev_is_active (&childev))
1756
+ {
1757
+ ev_ref (EV_A); /* child watcher */
1758
+ ev_signal_stop (EV_A_ &childev);
1759
+ }
1760
+ #endif
1761
+
1762
+ if (ev_is_active (&pipe_w))
1763
+ {
1764
+ /*ev_ref (EV_A);*/
1765
+ /*ev_io_stop (EV_A_ &pipe_w);*/
1766
+
1767
+ #if EV_USE_EVENTFD
1768
+ if (evfd >= 0)
1769
+ close (evfd);
1770
+ #endif
1771
+
1772
+ if (evpipe [0] >= 0)
1773
+ {
1774
+ EV_WIN32_CLOSE_FD (evpipe [0]);
1775
+ EV_WIN32_CLOSE_FD (evpipe [1]);
1776
+ }
1777
+ }
1778
+
1779
+ #if EV_USE_SIGNALFD
1780
+ if (ev_is_active (&sigfd_w))
1781
+ close (sigfd);
1782
+ #endif
1783
+
1784
+ #if EV_USE_INOTIFY
1785
+ if (fs_fd >= 0)
1786
+ close (fs_fd);
1787
+ #endif
1788
+
1789
+ if (backend_fd >= 0)
1790
+ close (backend_fd);
1791
+
1792
+ #if EV_USE_IOCP
1793
+ if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
1794
+ #endif
1795
+ #if EV_USE_PORT
1796
+ if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
1797
+ #endif
1798
+ #if EV_USE_KQUEUE
1799
+ if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
1800
+ #endif
1801
+ #if EV_USE_EPOLL
1802
+ if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
1803
+ #endif
1804
+ #if EV_USE_POLL
1805
+ if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
1806
+ #endif
1807
+ #if EV_USE_SELECT
1808
+ if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
1809
+ #endif
1810
+
1811
+ for (i = NUMPRI; i--; )
1812
+ {
1813
+ array_free (pending, [i]);
1814
+ #if EV_IDLE_ENABLE
1815
+ array_free (idle, [i]);
1816
+ #endif
1817
+ }
1818
+
1819
+ ev_free (anfds); anfds = 0; anfdmax = 0;
1820
+
1821
+ /* have to use the microsoft-never-gets-it-right macro */
1822
+ array_free (rfeed, EMPTY);
1823
+ array_free (fdchange, EMPTY);
1824
+ array_free (timer, EMPTY);
1825
+ #if EV_PERIODIC_ENABLE
1826
+ array_free (periodic, EMPTY);
1827
+ #endif
1828
+ #if EV_FORK_ENABLE
1829
+ array_free (fork, EMPTY);
1830
+ #endif
1831
+ #if EV_CLEANUP_ENABLE
1832
+ array_free (cleanup, EMPTY);
1833
+ #endif
1834
+ array_free (prepare, EMPTY);
1835
+ array_free (check, EMPTY);
1836
+ #if EV_ASYNC_ENABLE
1837
+ array_free (async, EMPTY);
1838
+ #endif
1839
+
1840
+ backend = 0;
1841
+
1842
+ #if EV_MULTIPLICITY
1843
+ if (ev_is_default_loop (EV_A))
1844
+ #endif
1845
+ ev_default_loop_ptr = 0;
1846
+ #if EV_MULTIPLICITY
1847
+ else
1848
+ ev_free (EV_A);
1849
+ #endif
1850
+ }
1851
+
1852
+ #if EV_USE_INOTIFY
1853
+ inline_size void infy_fork (EV_P);
1854
+ #endif
1855
+
1856
+ inline_size void
1857
+ loop_fork (EV_P)
1858
+ {
1859
+ #if EV_USE_PORT
1860
+ if (backend == EVBACKEND_PORT ) port_fork (EV_A);
1861
+ #endif
1862
+ #if EV_USE_KQUEUE
1863
+ if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
1864
+ #endif
1865
+ #if EV_USE_EPOLL
1866
+ if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
1867
+ #endif
1868
+ #if EV_USE_INOTIFY
1869
+ infy_fork (EV_A);
1870
+ #endif
1871
+
1872
+ if (ev_is_active (&pipe_w))
1873
+ {
1874
+ /* this "locks" the handlers against writing to the pipe */
1875
+ /* while we modify the fd vars */
1876
+ sig_pending = 1;
1877
+ #if EV_ASYNC_ENABLE
1878
+ async_pending = 1;
1879
+ #endif
1880
+
1881
+ ev_ref (EV_A);
1882
+ ev_io_stop (EV_A_ &pipe_w);
1883
+
1884
+ #if EV_USE_EVENTFD
1885
+ if (evfd >= 0)
1886
+ close (evfd);
1887
+ #endif
1888
+
1889
+ if (evpipe [0] >= 0)
1890
+ {
1891
+ EV_WIN32_CLOSE_FD (evpipe [0]);
1892
+ EV_WIN32_CLOSE_FD (evpipe [1]);
1893
+ }
1894
+
1895
+ #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1896
+ evpipe_init (EV_A);
1897
+ /* now iterate over everything, in case we missed something */
1898
+ pipecb (EV_A_ &pipe_w, EV_READ);
1899
+ #endif
1900
+ }
1901
+
1902
+ postfork = 0;
1903
+ }
1904
+
1905
+ #if EV_MULTIPLICITY
1906
+
1907
+ struct ev_loop *
1908
+ ev_loop_new (unsigned int flags)
1909
+ {
1910
+ EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
1911
+
1912
+ memset (EV_A, 0, sizeof (struct ev_loop));
1913
+ loop_init (EV_A_ flags);
1914
+
1915
+ if (ev_backend (EV_A))
1916
+ return EV_A;
1917
+
1918
+ ev_free (EV_A);
1919
+ return 0;
1920
+ }
1921
+
1922
+ #endif /* multiplicity */
1923
+
1924
+ #if EV_VERIFY
1925
+ static void noinline
1926
+ verify_watcher (EV_P_ W w)
1927
+ {
1928
+ assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
1929
+
1930
+ if (w->pending)
1931
+ assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
1932
+ }
1933
+
1934
+ static void noinline
1935
+ verify_heap (EV_P_ ANHE *heap, int N)
1936
+ {
1937
+ int i;
1938
+
1939
+ for (i = HEAP0; i < N + HEAP0; ++i)
1940
+ {
1941
+ assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
1942
+ assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
1943
+ assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
1944
+
1945
+ verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
1946
+ }
1947
+ }
1948
+
1949
+ static void noinline
1950
+ array_verify (EV_P_ W *ws, int cnt)
1951
+ {
1952
+ while (cnt--)
1953
+ {
1954
+ assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
1955
+ verify_watcher (EV_A_ ws [cnt]);
1956
+ }
1957
+ }
1958
+ #endif
1959
+
1960
+ #if EV_FEATURE_API
1961
+ void
1962
+ ev_verify (EV_P)
1963
+ {
1964
+ #if EV_VERIFY
1965
+ int i;
1966
+ WL w;
1967
+
1968
+ assert (activecnt >= -1);
1969
+
1970
+ assert (fdchangemax >= fdchangecnt);
1971
+ for (i = 0; i < fdchangecnt; ++i)
1972
+ assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
1973
+
1974
+ assert (anfdmax >= 0);
1975
+ for (i = 0; i < anfdmax; ++i)
1976
+ for (w = anfds [i].head; w; w = w->next)
1977
+ {
1978
+ verify_watcher (EV_A_ (W)w);
1979
+ assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
1980
+ assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
1981
+ }
1982
+
1983
+ assert (timermax >= timercnt);
1984
+ verify_heap (EV_A_ timers, timercnt);
1985
+
1986
+ #if EV_PERIODIC_ENABLE
1987
+ assert (periodicmax >= periodiccnt);
1988
+ verify_heap (EV_A_ periodics, periodiccnt);
1989
+ #endif
1990
+
1991
+ for (i = NUMPRI; i--; )
1992
+ {
1993
+ assert (pendingmax [i] >= pendingcnt [i]);
1994
+ #if EV_IDLE_ENABLE
1995
+ assert (idleall >= 0);
1996
+ assert (idlemax [i] >= idlecnt [i]);
1997
+ array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
1998
+ #endif
1999
+ }
2000
+
2001
+ #if EV_FORK_ENABLE
2002
+ assert (forkmax >= forkcnt);
2003
+ array_verify (EV_A_ (W *)forks, forkcnt);
2004
+ #endif
2005
+
2006
+ #if EV_CLEANUP_ENABLE
2007
+ assert (cleanupmax >= cleanupcnt);
2008
+ array_verify (EV_A_ (W *)cleanups, cleanupcnt);
2009
+ #endif
2010
+
2011
+ #if EV_ASYNC_ENABLE
2012
+ assert (asyncmax >= asynccnt);
2013
+ array_verify (EV_A_ (W *)asyncs, asynccnt);
2014
+ #endif
2015
+
2016
+ #if EV_PREPARE_ENABLE
2017
+ assert (preparemax >= preparecnt);
2018
+ array_verify (EV_A_ (W *)prepares, preparecnt);
2019
+ #endif
2020
+
2021
+ #if EV_CHECK_ENABLE
2022
+ assert (checkmax >= checkcnt);
2023
+ array_verify (EV_A_ (W *)checks, checkcnt);
2024
+ #endif
2025
+
2026
+ # if 0
2027
+ #if EV_CHILD_ENABLE
2028
+ for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
2029
+ for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
2030
+ #endif
2031
+ # endif
2032
+ #endif
2033
+ }
2034
+ #endif
2035
+
2036
+ #if EV_MULTIPLICITY
2037
+ struct ev_loop *
2038
+ #else
2039
+ int
2040
+ #endif
2041
+ ev_default_loop (unsigned int flags)
2042
+ {
2043
+ if (!ev_default_loop_ptr)
2044
+ {
2045
+ #if EV_MULTIPLICITY
2046
+ EV_P = ev_default_loop_ptr = &default_loop_struct;
2047
+ #else
2048
+ ev_default_loop_ptr = 1;
2049
+ #endif
2050
+
2051
+ loop_init (EV_A_ flags);
2052
+
2053
+ if (ev_backend (EV_A))
2054
+ {
2055
+ #if EV_CHILD_ENABLE
2056
+ ev_signal_init (&childev, childcb, SIGCHLD);
2057
+ ev_set_priority (&childev, EV_MAXPRI);
2058
+ ev_signal_start (EV_A_ &childev);
2059
+ ev_unref (EV_A); /* child watcher should not keep loop alive */
2060
+ #endif
2061
+ }
2062
+ else
2063
+ ev_default_loop_ptr = 0;
2064
+ }
2065
+
2066
+ return ev_default_loop_ptr;
2067
+ }
2068
+
2069
+ void
2070
+ ev_loop_fork (EV_P)
2071
+ {
2072
+ postfork = 1; /* must be in line with ev_default_fork */
2073
+ }
2074
+
2075
+ /*****************************************************************************/
2076
+
2077
+ void
2078
+ ev_invoke (EV_P_ void *w, int revents)
2079
+ {
2080
+ EV_CB_INVOKE ((W)w, revents);
2081
+ }
2082
+
2083
+ unsigned int
2084
+ ev_pending_count (EV_P)
2085
+ {
2086
+ int pri;
2087
+ unsigned int count = 0;
2088
+
2089
+ for (pri = NUMPRI; pri--; )
2090
+ count += pendingcnt [pri];
2091
+
2092
+ return count;
2093
+ }
2094
+
2095
+ void noinline
2096
+ ev_invoke_pending (EV_P)
2097
+ {
2098
+ int pri;
2099
+
2100
+ for (pri = NUMPRI; pri--; )
2101
+ while (pendingcnt [pri])
2102
+ {
2103
+ ANPENDING *p = pendings [pri] + --pendingcnt [pri];
2104
+
2105
+ /*assert (("libev: non-pending watcher on pending list", p->w->pending));*/
2106
+ /* ^ this is no longer true, as pending_w could be here */
2107
+
2108
+ p->w->pending = 0;
2109
+ EV_CB_INVOKE (p->w, p->events);
2110
+ EV_FREQUENT_CHECK;
2111
+ }
2112
+ }
2113
+
2114
+ #if EV_IDLE_ENABLE
2115
+ /* make idle watchers pending. this handles the "call-idle */
2116
+ /* only when higher priorities are idle" logic */
2117
+ inline_size void
2118
+ idle_reify (EV_P)
2119
+ {
2120
+ if (expect_false (idleall))
2121
+ {
2122
+ int pri;
2123
+
2124
+ for (pri = NUMPRI; pri--; )
2125
+ {
2126
+ if (pendingcnt [pri])
2127
+ break;
2128
+
2129
+ if (idlecnt [pri])
2130
+ {
2131
+ queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
2132
+ break;
2133
+ }
2134
+ }
2135
+ }
2136
+ }
2137
+ #endif
2138
+
2139
+ /* make timers pending */
2140
+ inline_size void
2141
+ timers_reify (EV_P)
2142
+ {
2143
+ EV_FREQUENT_CHECK;
2144
+
2145
+ if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
2146
+ {
2147
+ do
2148
+ {
2149
+ ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
2150
+
2151
+ /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
2152
+
2153
+ /* first reschedule or stop timer */
2154
+ if (w->repeat)
2155
+ {
2156
+ ev_at (w) += w->repeat;
2157
+ if (ev_at (w) < mn_now)
2158
+ ev_at (w) = mn_now;
2159
+
2160
+ assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
2161
+
2162
+ ANHE_at_cache (timers [HEAP0]);
2163
+ downheap (timers, timercnt, HEAP0);
2164
+ }
2165
+ else
2166
+ ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
2167
+
2168
+ EV_FREQUENT_CHECK;
2169
+ feed_reverse (EV_A_ (W)w);
2170
+ }
2171
+ while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
2172
+
2173
+ feed_reverse_done (EV_A_ EV_TIMER);
2174
+ }
2175
+ }
2176
+
2177
+ #if EV_PERIODIC_ENABLE
2178
+ /* make periodics pending */
2179
+ inline_size void
2180
+ periodics_reify (EV_P)
2181
+ {
2182
+ EV_FREQUENT_CHECK;
2183
+
2184
+ while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
2185
+ {
2186
+ int feed_count = 0;
2187
+
2188
+ do
2189
+ {
2190
+ ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
2191
+
2192
+ /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
2193
+
2194
+ /* first reschedule or stop timer */
2195
+ if (w->reschedule_cb)
2196
+ {
2197
+ ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2198
+
2199
+ assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
2200
+
2201
+ ANHE_at_cache (periodics [HEAP0]);
2202
+ downheap (periodics, periodiccnt, HEAP0);
2203
+ }
2204
+ else if (w->interval)
2205
+ {
2206
+ ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
2207
+ /* if next trigger time is not sufficiently in the future, put it there */
2208
+ /* this might happen because of floating point inexactness */
2209
+ if (ev_at (w) - ev_rt_now < TIME_EPSILON)
2210
+ {
2211
+ ev_at (w) += w->interval;
2212
+
2213
+ /* if interval is unreasonably low we might still have a time in the past */
2214
+ /* so correct this. this will make the periodic very inexact, but the user */
2215
+ /* has effectively asked to get triggered more often than possible */
2216
+ if (ev_at (w) < ev_rt_now)
2217
+ ev_at (w) = ev_rt_now;
2218
+ }
2219
+
2220
+ ANHE_at_cache (periodics [HEAP0]);
2221
+ downheap (periodics, periodiccnt, HEAP0);
2222
+ }
2223
+ else
2224
+ ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
2225
+
2226
+ EV_FREQUENT_CHECK;
2227
+ feed_reverse (EV_A_ (W)w);
2228
+ }
2229
+ while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
2230
+
2231
+ feed_reverse_done (EV_A_ EV_PERIODIC);
2232
+ }
2233
+ }
2234
+
2235
+ /* simply recalculate all periodics */
2236
+ /* TODO: maybe ensure that at least one event happens when jumping forward? */
2237
+ static void noinline
2238
+ periodics_reschedule (EV_P)
2239
+ {
2240
+ int i;
2241
+
2242
+ /* adjust periodics after time jump */
2243
+ for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
2244
+ {
2245
+ ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
2246
+
2247
+ if (w->reschedule_cb)
2248
+ ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2249
+ else if (w->interval)
2250
+ ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
2251
+
2252
+ ANHE_at_cache (periodics [i]);
2253
+ }
2254
+
2255
+ reheap (periodics, periodiccnt);
2256
+ }
2257
+ #endif
2258
+
2259
+ /* adjust all timers by a given offset */
2260
+ static void noinline
2261
+ timers_reschedule (EV_P_ ev_tstamp adjust)
2262
+ {
2263
+ int i;
2264
+
2265
+ for (i = 0; i < timercnt; ++i)
2266
+ {
2267
+ ANHE *he = timers + i + HEAP0;
2268
+ ANHE_w (*he)->at += adjust;
2269
+ ANHE_at_cache (*he);
2270
+ }
2271
+ }
2272
+
2273
+ /* fetch new monotonic and realtime times from the kernel */
2274
+ /* also detect if there was a timejump, and act accordingly */
2275
+ inline_speed void
2276
+ time_update (EV_P_ ev_tstamp max_block)
2277
+ {
2278
+ #if EV_USE_MONOTONIC
2279
+ if (expect_true (have_monotonic))
2280
+ {
2281
+ int i;
2282
+ ev_tstamp odiff = rtmn_diff;
2283
+
2284
+ mn_now = get_clock ();
2285
+
2286
+ /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
2287
+ /* interpolate in the meantime */
2288
+ if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
2289
+ {
2290
+ ev_rt_now = rtmn_diff + mn_now;
2291
+ return;
2292
+ }
2293
+
2294
+ now_floor = mn_now;
2295
+ ev_rt_now = ev_time ();
2296
+
2297
+ /* loop a few times, before making important decisions.
2298
+ * on the choice of "4": one iteration isn't enough,
2299
+ * in case we get preempted during the calls to
2300
+ * ev_time and get_clock. a second call is almost guaranteed
2301
+ * to succeed in that case, though. and looping a few more times
2302
+ * doesn't hurt either as we only do this on time-jumps or
2303
+ * in the unlikely event of having been preempted here.
2304
+ */
2305
+ for (i = 4; --i; )
2306
+ {
2307
+ rtmn_diff = ev_rt_now - mn_now;
2308
+
2309
+ if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
2310
+ return; /* all is well */
2311
+
2312
+ ev_rt_now = ev_time ();
2313
+ mn_now = get_clock ();
2314
+ now_floor = mn_now;
2315
+ }
2316
+
2317
+ /* no timer adjustment, as the monotonic clock doesn't jump */
2318
+ /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
2319
+ # if EV_PERIODIC_ENABLE
2320
+ periodics_reschedule (EV_A);
2321
+ # endif
2322
+ }
2323
+ else
2324
+ #endif
2325
+ {
2326
+ ev_rt_now = ev_time ();
2327
+
2328
+ if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
2329
+ {
2330
+ /* adjust timers. this is easy, as the offset is the same for all of them */
2331
+ timers_reschedule (EV_A_ ev_rt_now - mn_now);
2332
+ #if EV_PERIODIC_ENABLE
2333
+ periodics_reschedule (EV_A);
2334
+ #endif
2335
+ }
2336
+
2337
+ mn_now = ev_rt_now;
2338
+ }
2339
+ }
2340
+
2341
+ void
2342
+ ev_run (EV_P_ int flags)
2343
+ {
2344
+ #if EV_FEATURE_API
2345
+ ++loop_depth;
2346
+ #endif
2347
+
2348
+ assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
2349
+
2350
+ loop_done = EVBREAK_CANCEL;
2351
+
2352
+ EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
2353
+
2354
+ do
2355
+ {
2356
+ #if EV_VERIFY >= 2
2357
+ ev_verify (EV_A);
2358
+ #endif
2359
+
2360
+ #ifndef _WIN32
2361
+ if (expect_false (curpid)) /* penalise the forking check even more */
2362
+ if (expect_false (getpid () != curpid))
2363
+ {
2364
+ curpid = getpid ();
2365
+ postfork = 1;
2366
+ }
2367
+ #endif
2368
+
2369
+ #if EV_FORK_ENABLE
2370
+ /* we might have forked, so queue fork handlers */
2371
+ if (expect_false (postfork))
2372
+ if (forkcnt)
2373
+ {
2374
+ queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
2375
+ EV_INVOKE_PENDING;
2376
+ }
2377
+ #endif
2378
+
2379
+ #if EV_PREPARE_ENABLE
2380
+ /* queue prepare watchers (and execute them) */
2381
+ if (expect_false (preparecnt))
2382
+ {
2383
+ queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
2384
+ EV_INVOKE_PENDING;
2385
+ }
2386
+ #endif
2387
+
2388
+ if (expect_false (loop_done))
2389
+ break;
2390
+
2391
+ /* we might have forked, so reify kernel state if necessary */
2392
+ if (expect_false (postfork))
2393
+ loop_fork (EV_A);
2394
+
2395
+ /* update fd-related kernel structures */
2396
+ fd_reify (EV_A);
2397
+
2398
+ /* calculate blocking time */
2399
+ {
2400
+ ev_tstamp waittime = 0.;
2401
+ ev_tstamp sleeptime = 0.;
2402
+
2403
+ /* remember old timestamp for io_blocktime calculation */
2404
+ ev_tstamp prev_mn_now = mn_now;
2405
+
2406
+ /* update time to cancel out callback processing overhead */
2407
+ time_update (EV_A_ 1e100);
2408
+
2409
+ if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt)))
2410
+ {
2411
+ waittime = MAX_BLOCKTIME;
2412
+
2413
+ if (timercnt)
2414
+ {
2415
+ ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
2416
+ if (waittime > to) waittime = to;
2417
+ }
2418
+
2419
+ #if EV_PERIODIC_ENABLE
2420
+ if (periodiccnt)
2421
+ {
2422
+ ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
2423
+ if (waittime > to) waittime = to;
2424
+ }
2425
+ #endif
2426
+
2427
+ /* don't let timeouts decrease the waittime below timeout_blocktime */
2428
+ if (expect_false (waittime < timeout_blocktime))
2429
+ waittime = timeout_blocktime;
2430
+
2431
+ /* extra check because io_blocktime is commonly 0 */
2432
+ if (expect_false (io_blocktime))
2433
+ {
2434
+ sleeptime = io_blocktime - (mn_now - prev_mn_now);
2435
+
2436
+ if (sleeptime > waittime - backend_fudge)
2437
+ sleeptime = waittime - backend_fudge;
2438
+
2439
+ if (expect_true (sleeptime > 0.))
2440
+ {
2441
+ ev_sleep (sleeptime);
2442
+ waittime -= sleeptime;
2443
+ }
2444
+ }
2445
+ }
2446
+
2447
+ #if EV_FEATURE_API
2448
+ ++loop_count;
2449
+ #endif
2450
+ assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
2451
+ backend_poll (EV_A_ waittime);
2452
+ assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
2453
+
2454
+ /* update ev_rt_now, do magic */
2455
+ time_update (EV_A_ waittime + sleeptime);
2456
+ }
2457
+
2458
+ /* queue pending timers and reschedule them */
2459
+ timers_reify (EV_A); /* relative timers called last */
2460
+ #if EV_PERIODIC_ENABLE
2461
+ periodics_reify (EV_A); /* absolute timers called first */
2462
+ #endif
2463
+
2464
+ #if EV_IDLE_ENABLE
2465
+ /* queue idle watchers unless other events are pending */
2466
+ idle_reify (EV_A);
2467
+ #endif
2468
+
2469
+ #if EV_CHECK_ENABLE
2470
+ /* queue check watchers, to be executed first */
2471
+ if (expect_false (checkcnt))
2472
+ queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
2473
+ #endif
2474
+
2475
+ EV_INVOKE_PENDING;
2476
+ }
2477
+ while (expect_true (
2478
+ activecnt
2479
+ && !loop_done
2480
+ && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
2481
+ ));
2482
+
2483
+ if (loop_done == EVBREAK_ONE)
2484
+ loop_done = EVBREAK_CANCEL;
2485
+
2486
+ #if EV_FEATURE_API
2487
+ --loop_depth;
2488
+ #endif
2489
+ }
2490
+
2491
+ void
2492
+ ev_break (EV_P_ int how)
2493
+ {
2494
+ loop_done = how;
2495
+ }
2496
+
2497
+ void
2498
+ ev_ref (EV_P)
2499
+ {
2500
+ ++activecnt;
2501
+ }
2502
+
2503
+ void
2504
+ ev_unref (EV_P)
2505
+ {
2506
+ --activecnt;
2507
+ }
2508
+
2509
+ void
2510
+ ev_now_update (EV_P)
2511
+ {
2512
+ time_update (EV_A_ 1e100);
2513
+ }
2514
+
2515
+ void
2516
+ ev_suspend (EV_P)
2517
+ {
2518
+ ev_now_update (EV_A);
2519
+ }
2520
+
2521
+ void
2522
+ ev_resume (EV_P)
2523
+ {
2524
+ ev_tstamp mn_prev = mn_now;
2525
+
2526
+ ev_now_update (EV_A);
2527
+ timers_reschedule (EV_A_ mn_now - mn_prev);
2528
+ #if EV_PERIODIC_ENABLE
2529
+ /* TODO: really do this? */
2530
+ periodics_reschedule (EV_A);
2531
+ #endif
2532
+ }
2533
+
2534
+ /*****************************************************************************/
2535
+ /* singly-linked list management, used when the expected list length is short */
2536
+
2537
+ inline_size void
2538
+ wlist_add (WL *head, WL elem)
2539
+ {
2540
+ elem->next = *head;
2541
+ *head = elem;
2542
+ }
2543
+
2544
+ inline_size void
2545
+ wlist_del (WL *head, WL elem)
2546
+ {
2547
+ while (*head)
2548
+ {
2549
+ if (expect_true (*head == elem))
2550
+ {
2551
+ *head = elem->next;
2552
+ break;
2553
+ }
2554
+
2555
+ head = &(*head)->next;
2556
+ }
2557
+ }
2558
+
2559
+ /* internal, faster, version of ev_clear_pending */
2560
+ inline_speed void
2561
+ clear_pending (EV_P_ W w)
2562
+ {
2563
+ if (w->pending)
2564
+ {
2565
+ pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
2566
+ w->pending = 0;
2567
+ }
2568
+ }
2569
+
2570
+ int
2571
+ ev_clear_pending (EV_P_ void *w)
2572
+ {
2573
+ W w_ = (W)w;
2574
+ int pending = w_->pending;
2575
+
2576
+ if (expect_true (pending))
2577
+ {
2578
+ ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
2579
+ p->w = (W)&pending_w;
2580
+ w_->pending = 0;
2581
+ return p->events;
2582
+ }
2583
+ else
2584
+ return 0;
2585
+ }
2586
+
2587
+ inline_size void
2588
+ pri_adjust (EV_P_ W w)
2589
+ {
2590
+ int pri = ev_priority (w);
2591
+ pri = pri < EV_MINPRI ? EV_MINPRI : pri;
2592
+ pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
2593
+ ev_set_priority (w, pri);
2594
+ }
2595
+
2596
+ inline_speed void
2597
+ ev_start (EV_P_ W w, int active)
2598
+ {
2599
+ pri_adjust (EV_A_ w);
2600
+ w->active = active;
2601
+ ev_ref (EV_A);
2602
+ }
2603
+
2604
+ inline_size void
2605
+ ev_stop (EV_P_ W w)
2606
+ {
2607
+ ev_unref (EV_A);
2608
+ w->active = 0;
2609
+ }
2610
+
2611
+ /*****************************************************************************/
2612
+
2613
+ void noinline
2614
+ ev_io_start (EV_P_ ev_io *w)
2615
+ {
2616
+ int fd = w->fd;
2617
+
2618
+ if (expect_false (ev_is_active (w)))
2619
+ return;
2620
+
2621
+ assert (("libev: ev_io_start called with negative fd", fd >= 0));
2622
+ assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
2623
+
2624
+ EV_FREQUENT_CHECK;
2625
+
2626
+ ev_start (EV_A_ (W)w, 1);
2627
+ array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
2628
+ wlist_add (&anfds[fd].head, (WL)w);
2629
+
2630
+ fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
2631
+ w->events &= ~EV__IOFDSET;
2632
+
2633
+ EV_FREQUENT_CHECK;
2634
+ }
2635
+
2636
+ void noinline
2637
+ ev_io_stop (EV_P_ ev_io *w)
2638
+ {
2639
+ clear_pending (EV_A_ (W)w);
2640
+ if (expect_false (!ev_is_active (w)))
2641
+ return;
2642
+
2643
+ assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
2644
+
2645
+ EV_FREQUENT_CHECK;
2646
+
2647
+ wlist_del (&anfds[w->fd].head, (WL)w);
2648
+ ev_stop (EV_A_ (W)w);
2649
+
2650
+ fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
2651
+
2652
+ EV_FREQUENT_CHECK;
2653
+ }
2654
+
2655
+ void noinline
2656
+ ev_timer_start (EV_P_ ev_timer *w)
2657
+ {
2658
+ if (expect_false (ev_is_active (w)))
2659
+ return;
2660
+
2661
+ ev_at (w) += mn_now;
2662
+
2663
+ assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
2664
+
2665
+ EV_FREQUENT_CHECK;
2666
+
2667
+ ++timercnt;
2668
+ ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
2669
+ array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
2670
+ ANHE_w (timers [ev_active (w)]) = (WT)w;
2671
+ ANHE_at_cache (timers [ev_active (w)]);
2672
+ upheap (timers, ev_active (w));
2673
+
2674
+ EV_FREQUENT_CHECK;
2675
+
2676
+ /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
2677
+ }
2678
+
2679
+ void noinline
2680
+ ev_timer_stop (EV_P_ ev_timer *w)
2681
+ {
2682
+ clear_pending (EV_A_ (W)w);
2683
+ if (expect_false (!ev_is_active (w)))
2684
+ return;
2685
+
2686
+ EV_FREQUENT_CHECK;
2687
+
2688
+ {
2689
+ int active = ev_active (w);
2690
+
2691
+ assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2692
+
2693
+ --timercnt;
2694
+
2695
+ if (expect_true (active < timercnt + HEAP0))
2696
+ {
2697
+ timers [active] = timers [timercnt + HEAP0];
2698
+ adjustheap (timers, timercnt, active);
2699
+ }
2700
+ }
2701
+
2702
+ ev_at (w) -= mn_now;
2703
+
2704
+ ev_stop (EV_A_ (W)w);
2705
+
2706
+ EV_FREQUENT_CHECK;
2707
+ }
2708
+
2709
+ void noinline
2710
+ ev_timer_again (EV_P_ ev_timer *w)
2711
+ {
2712
+ EV_FREQUENT_CHECK;
2713
+
2714
+ if (ev_is_active (w))
2715
+ {
2716
+ if (w->repeat)
2717
+ {
2718
+ ev_at (w) = mn_now + w->repeat;
2719
+ ANHE_at_cache (timers [ev_active (w)]);
2720
+ adjustheap (timers, timercnt, ev_active (w));
2721
+ }
2722
+ else
2723
+ ev_timer_stop (EV_A_ w);
2724
+ }
2725
+ else if (w->repeat)
2726
+ {
2727
+ ev_at (w) = w->repeat;
2728
+ ev_timer_start (EV_A_ w);
2729
+ }
2730
+
2731
+ EV_FREQUENT_CHECK;
2732
+ }
2733
+
2734
+ ev_tstamp
2735
+ ev_timer_remaining (EV_P_ ev_timer *w)
2736
+ {
2737
+ return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
2738
+ }
2739
+
2740
+ #if EV_PERIODIC_ENABLE
2741
+ void noinline
2742
+ ev_periodic_start (EV_P_ ev_periodic *w)
2743
+ {
2744
+ if (expect_false (ev_is_active (w)))
2745
+ return;
2746
+
2747
+ if (w->reschedule_cb)
2748
+ ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2749
+ else if (w->interval)
2750
+ {
2751
+ assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
2752
+ /* this formula differs from the one in periodic_reify because we do not always round up */
2753
+ ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
2754
+ }
2755
+ else
2756
+ ev_at (w) = w->offset;
2757
+
2758
+ EV_FREQUENT_CHECK;
2759
+
2760
+ ++periodiccnt;
2761
+ ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
2762
+ array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
2763
+ ANHE_w (periodics [ev_active (w)]) = (WT)w;
2764
+ ANHE_at_cache (periodics [ev_active (w)]);
2765
+ upheap (periodics, ev_active (w));
2766
+
2767
+ EV_FREQUENT_CHECK;
2768
+
2769
+ /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
2770
+ }
2771
+
2772
+ void noinline
2773
+ ev_periodic_stop (EV_P_ ev_periodic *w)
2774
+ {
2775
+ clear_pending (EV_A_ (W)w);
2776
+ if (expect_false (!ev_is_active (w)))
2777
+ return;
2778
+
2779
+ EV_FREQUENT_CHECK;
2780
+
2781
+ {
2782
+ int active = ev_active (w);
2783
+
2784
+ assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
2785
+
2786
+ --periodiccnt;
2787
+
2788
+ if (expect_true (active < periodiccnt + HEAP0))
2789
+ {
2790
+ periodics [active] = periodics [periodiccnt + HEAP0];
2791
+ adjustheap (periodics, periodiccnt, active);
2792
+ }
2793
+ }
2794
+
2795
+ ev_stop (EV_A_ (W)w);
2796
+
2797
+ EV_FREQUENT_CHECK;
2798
+ }
2799
+
2800
+ void noinline
2801
+ ev_periodic_again (EV_P_ ev_periodic *w)
2802
+ {
2803
+ /* TODO: use adjustheap and recalculation */
2804
+ ev_periodic_stop (EV_A_ w);
2805
+ ev_periodic_start (EV_A_ w);
2806
+ }
2807
+ #endif
2808
+
2809
+ #ifndef SA_RESTART
2810
+ # define SA_RESTART 0
2811
+ #endif
2812
+
2813
+ #if EV_SIGNAL_ENABLE
2814
+
2815
+ void noinline
2816
+ ev_signal_start (EV_P_ ev_signal *w)
2817
+ {
2818
+ if (expect_false (ev_is_active (w)))
2819
+ return;
2820
+
2821
+ assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
2822
+
2823
+ #if EV_MULTIPLICITY
2824
+ assert (("libev: a signal must not be attached to two different loops",
2825
+ !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
2826
+
2827
+ signals [w->signum - 1].loop = EV_A;
2828
+ #endif
2829
+
2830
+ EV_FREQUENT_CHECK;
2831
+
2832
+ #if EV_USE_SIGNALFD
2833
+ if (sigfd == -2)
2834
+ {
2835
+ sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
2836
+ if (sigfd < 0 && errno == EINVAL)
2837
+ sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
2838
+
2839
+ if (sigfd >= 0)
2840
+ {
2841
+ fd_intern (sigfd); /* doing it twice will not hurt */
2842
+
2843
+ sigemptyset (&sigfd_set);
2844
+
2845
+ ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
2846
+ ev_set_priority (&sigfd_w, EV_MAXPRI);
2847
+ ev_io_start (EV_A_ &sigfd_w);
2848
+ ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
2849
+ }
2850
+ }
2851
+
2852
+ if (sigfd >= 0)
2853
+ {
2854
+ /* TODO: check .head */
2855
+ sigaddset (&sigfd_set, w->signum);
2856
+ sigprocmask (SIG_BLOCK, &sigfd_set, 0);
2857
+
2858
+ signalfd (sigfd, &sigfd_set, 0);
2859
+ }
2860
+ #endif
2861
+
2862
+ ev_start (EV_A_ (W)w, 1);
2863
+ wlist_add (&signals [w->signum - 1].head, (WL)w);
2864
+
2865
+ if (!((WL)w)->next)
2866
+ # if EV_USE_SIGNALFD
2867
+ if (sigfd < 0) /*TODO*/
2868
+ # endif
2869
+ {
2870
+ # ifdef _WIN32
2871
+ evpipe_init (EV_A);
2872
+
2873
+ signal (w->signum, ev_sighandler);
2874
+ # else
2875
+ struct sigaction sa;
2876
+
2877
+ evpipe_init (EV_A);
2878
+
2879
+ sa.sa_handler = ev_sighandler;
2880
+ sigfillset (&sa.sa_mask);
2881
+ sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
2882
+ sigaction (w->signum, &sa, 0);
2883
+
2884
+ sigemptyset (&sa.sa_mask);
2885
+ sigaddset (&sa.sa_mask, w->signum);
2886
+ sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
2887
+ #endif
2888
+ }
2889
+
2890
+ EV_FREQUENT_CHECK;
2891
+ }
2892
+
2893
+ void noinline
2894
+ ev_signal_stop (EV_P_ ev_signal *w)
2895
+ {
2896
+ clear_pending (EV_A_ (W)w);
2897
+ if (expect_false (!ev_is_active (w)))
2898
+ return;
2899
+
2900
+ EV_FREQUENT_CHECK;
2901
+
2902
+ wlist_del (&signals [w->signum - 1].head, (WL)w);
2903
+ ev_stop (EV_A_ (W)w);
2904
+
2905
+ if (!signals [w->signum - 1].head)
2906
+ {
2907
+ #if EV_MULTIPLICITY
2908
+ signals [w->signum - 1].loop = 0; /* unattach from signal */
2909
+ #endif
2910
+ #if EV_USE_SIGNALFD
2911
+ if (sigfd >= 0)
2912
+ {
2913
+ sigset_t ss;
2914
+
2915
+ sigemptyset (&ss);
2916
+ sigaddset (&ss, w->signum);
2917
+ sigdelset (&sigfd_set, w->signum);
2918
+
2919
+ signalfd (sigfd, &sigfd_set, 0);
2920
+ sigprocmask (SIG_UNBLOCK, &ss, 0);
2921
+ }
2922
+ else
2923
+ #endif
2924
+ signal (w->signum, SIG_DFL);
2925
+ }
2926
+
2927
+ EV_FREQUENT_CHECK;
2928
+ }
2929
+
2930
+ #endif
2931
+
2932
+ #if EV_CHILD_ENABLE
2933
+
2934
+ void
2935
+ ev_child_start (EV_P_ ev_child *w)
2936
+ {
2937
+ #if EV_MULTIPLICITY
2938
+ assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
2939
+ #endif
2940
+ if (expect_false (ev_is_active (w)))
2941
+ return;
2942
+
2943
+ EV_FREQUENT_CHECK;
2944
+
2945
+ ev_start (EV_A_ (W)w, 1);
2946
+ wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
2947
+
2948
+ EV_FREQUENT_CHECK;
2949
+ }
2950
+
2951
+ void
2952
+ ev_child_stop (EV_P_ ev_child *w)
2953
+ {
2954
+ clear_pending (EV_A_ (W)w);
2955
+ if (expect_false (!ev_is_active (w)))
2956
+ return;
2957
+
2958
+ EV_FREQUENT_CHECK;
2959
+
2960
+ wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
2961
+ ev_stop (EV_A_ (W)w);
2962
+
2963
+ EV_FREQUENT_CHECK;
2964
+ }
2965
+
2966
+ #endif
2967
+
2968
+ #if EV_STAT_ENABLE
2969
+
2970
+ # ifdef _WIN32
2971
+ # undef lstat
2972
+ # define lstat(a,b) _stati64 (a,b)
2973
+ # endif
2974
+
2975
+ #define DEF_STAT_INTERVAL 5.0074891
2976
+ #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
2977
+ #define MIN_STAT_INTERVAL 0.1074891
2978
+
2979
+ static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
2980
+
2981
+ #if EV_USE_INOTIFY
2982
+
2983
+ /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
2984
+ # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
2985
+
2986
+ static void noinline
2987
+ infy_add (EV_P_ ev_stat *w)
2988
+ {
2989
+ w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD);
2990
+
2991
+ if (w->wd >= 0)
2992
+ {
2993
+ struct statfs sfs;
2994
+
2995
+ /* now local changes will be tracked by inotify, but remote changes won't */
2996
+ /* unless the filesystem is known to be local, we therefore still poll */
2997
+ /* also do poll on <2.6.25, but with normal frequency */
2998
+
2999
+ if (!fs_2625)
3000
+ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3001
+ else if (!statfs (w->path, &sfs)
3002
+ && (sfs.f_type == 0x1373 /* devfs */
3003
+ || sfs.f_type == 0xEF53 /* ext2/3 */
3004
+ || sfs.f_type == 0x3153464a /* jfs */
3005
+ || sfs.f_type == 0x52654973 /* reiser3 */
3006
+ || sfs.f_type == 0x01021994 /* tempfs */
3007
+ || sfs.f_type == 0x58465342 /* xfs */))
3008
+ w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
3009
+ else
3010
+ w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
3011
+ }
3012
+ else
3013
+ {
3014
+ /* can't use inotify, continue to stat */
3015
+ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3016
+
3017
+ /* if path is not there, monitor some parent directory for speedup hints */
3018
+ /* note that exceeding the hardcoded path limit is not a correctness issue, */
3019
+ /* but an efficiency issue only */
3020
+ if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
3021
+ {
3022
+ char path [4096];
3023
+ strcpy (path, w->path);
3024
+
3025
+ do
3026
+ {
3027
+ int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
3028
+ | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
3029
+
3030
+ char *pend = strrchr (path, '/');
3031
+
3032
+ if (!pend || pend == path)
3033
+ break;
3034
+
3035
+ *pend = 0;
3036
+ w->wd = inotify_add_watch (fs_fd, path, mask);
3037
+ }
3038
+ while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
3039
+ }
3040
+ }
3041
+
3042
+ if (w->wd >= 0)
3043
+ wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
3044
+
3045
+ /* now re-arm timer, if required */
3046
+ if (ev_is_active (&w->timer)) ev_ref (EV_A);
3047
+ ev_timer_again (EV_A_ &w->timer);
3048
+ if (ev_is_active (&w->timer)) ev_unref (EV_A);
3049
+ }
3050
+
3051
+ static void noinline
3052
+ infy_del (EV_P_ ev_stat *w)
3053
+ {
3054
+ int slot;
3055
+ int wd = w->wd;
3056
+
3057
+ if (wd < 0)
3058
+ return;
3059
+
3060
+ w->wd = -2;
3061
+ slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
3062
+ wlist_del (&fs_hash [slot].head, (WL)w);
3063
+
3064
+ /* remove this watcher, if others are watching it, they will rearm */
3065
+ inotify_rm_watch (fs_fd, wd);
3066
+ }
3067
+
3068
+ static void noinline
3069
+ infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
3070
+ {
3071
+ if (slot < 0)
3072
+ /* overflow, need to check for all hash slots */
3073
+ for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
3074
+ infy_wd (EV_A_ slot, wd, ev);
3075
+ else
3076
+ {
3077
+ WL w_;
3078
+
3079
+ for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
3080
+ {
3081
+ ev_stat *w = (ev_stat *)w_;
3082
+ w_ = w_->next; /* lets us remove this watcher and all before it */
3083
+
3084
+ if (w->wd == wd || wd == -1)
3085
+ {
3086
+ if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
3087
+ {
3088
+ wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
3089
+ w->wd = -1;
3090
+ infy_add (EV_A_ w); /* re-add, no matter what */
3091
+ }
3092
+
3093
+ stat_timer_cb (EV_A_ &w->timer, 0);
3094
+ }
3095
+ }
3096
+ }
3097
+ }
3098
+
3099
+ static void
3100
+ infy_cb (EV_P_ ev_io *w, int revents)
3101
+ {
3102
+ char buf [EV_INOTIFY_BUFSIZE];
3103
+ int ofs;
3104
+ int len = read (fs_fd, buf, sizeof (buf));
3105
+
3106
+ for (ofs = 0; ofs < len; )
3107
+ {
3108
+ struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
3109
+ infy_wd (EV_A_ ev->wd, ev->wd, ev);
3110
+ ofs += sizeof (struct inotify_event) + ev->len;
3111
+ }
3112
+ }
3113
+
3114
+ inline_size void
3115
+ ev_check_2625 (EV_P)
3116
+ {
3117
+ /* kernels < 2.6.25 are borked
3118
+ * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
3119
+ */
3120
+ if (ev_linux_version () < 0x020619)
3121
+ return;
3122
+
3123
+ fs_2625 = 1;
3124
+ }
3125
+
3126
+ inline_size int
3127
+ infy_newfd (void)
3128
+ {
3129
+ #if defined (IN_CLOEXEC) && defined (IN_NONBLOCK)
3130
+ int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
3131
+ if (fd >= 0)
3132
+ return fd;
3133
+ #endif
3134
+ return inotify_init ();
3135
+ }
3136
+
3137
+ inline_size void
3138
+ infy_init (EV_P)
3139
+ {
3140
+ if (fs_fd != -2)
3141
+ return;
3142
+
3143
+ fs_fd = -1;
3144
+
3145
+ ev_check_2625 (EV_A);
3146
+
3147
+ fs_fd = infy_newfd ();
3148
+
3149
+ if (fs_fd >= 0)
3150
+ {
3151
+ fd_intern (fs_fd);
3152
+ ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
3153
+ ev_set_priority (&fs_w, EV_MAXPRI);
3154
+ ev_io_start (EV_A_ &fs_w);
3155
+ ev_unref (EV_A);
3156
+ }
3157
+ }
3158
+
3159
+ inline_size void
3160
+ infy_fork (EV_P)
3161
+ {
3162
+ int slot;
3163
+
3164
+ if (fs_fd < 0)
3165
+ return;
3166
+
3167
+ ev_ref (EV_A);
3168
+ ev_io_stop (EV_A_ &fs_w);
3169
+ close (fs_fd);
3170
+ fs_fd = infy_newfd ();
3171
+
3172
+ if (fs_fd >= 0)
3173
+ {
3174
+ fd_intern (fs_fd);
3175
+ ev_io_set (&fs_w, fs_fd, EV_READ);
3176
+ ev_io_start (EV_A_ &fs_w);
3177
+ ev_unref (EV_A);
3178
+ }
3179
+
3180
+ for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
3181
+ {
3182
+ WL w_ = fs_hash [slot].head;
3183
+ fs_hash [slot].head = 0;
3184
+
3185
+ while (w_)
3186
+ {
3187
+ ev_stat *w = (ev_stat *)w_;
3188
+ w_ = w_->next; /* lets us add this watcher */
3189
+
3190
+ w->wd = -1;
3191
+
3192
+ if (fs_fd >= 0)
3193
+ infy_add (EV_A_ w); /* re-add, no matter what */
3194
+ else
3195
+ {
3196
+ w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3197
+ if (ev_is_active (&w->timer)) ev_ref (EV_A);
3198
+ ev_timer_again (EV_A_ &w->timer);
3199
+ if (ev_is_active (&w->timer)) ev_unref (EV_A);
3200
+ }
3201
+ }
3202
+ }
3203
+ }
3204
+
3205
+ #endif
3206
+
3207
+ #ifdef _WIN32
3208
+ # define EV_LSTAT(p,b) _stati64 (p, b)
3209
+ #else
3210
+ # define EV_LSTAT(p,b) lstat (p, b)
3211
+ #endif
3212
+
3213
+ void
3214
+ ev_stat_stat (EV_P_ ev_stat *w)
3215
+ {
3216
+ if (lstat (w->path, &w->attr) < 0)
3217
+ w->attr.st_nlink = 0;
3218
+ else if (!w->attr.st_nlink)
3219
+ w->attr.st_nlink = 1;
3220
+ }
3221
+
3222
+ static void noinline
3223
+ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
3224
+ {
3225
+ ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
3226
+
3227
+ ev_statdata prev = w->attr;
3228
+ ev_stat_stat (EV_A_ w);
3229
+
3230
+ /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
3231
+ if (
3232
+ prev.st_dev != w->attr.st_dev
3233
+ || prev.st_ino != w->attr.st_ino
3234
+ || prev.st_mode != w->attr.st_mode
3235
+ || prev.st_nlink != w->attr.st_nlink
3236
+ || prev.st_uid != w->attr.st_uid
3237
+ || prev.st_gid != w->attr.st_gid
3238
+ || prev.st_rdev != w->attr.st_rdev
3239
+ || prev.st_size != w->attr.st_size
3240
+ || prev.st_atime != w->attr.st_atime
3241
+ || prev.st_mtime != w->attr.st_mtime
3242
+ || prev.st_ctime != w->attr.st_ctime
3243
+ ) {
3244
+ /* we only update w->prev on actual differences */
3245
+ /* in case we test more often than invoke the callback, */
3246
+ /* to ensure that prev is always different to attr */
3247
+ w->prev = prev;
3248
+
3249
+ #if EV_USE_INOTIFY
3250
+ if (fs_fd >= 0)
3251
+ {
3252
+ infy_del (EV_A_ w);
3253
+ infy_add (EV_A_ w);
3254
+ ev_stat_stat (EV_A_ w); /* avoid race... */
3255
+ }
3256
+ #endif
3257
+
3258
+ ev_feed_event (EV_A_ w, EV_STAT);
3259
+ }
3260
+ }
3261
+
3262
+ void
3263
+ ev_stat_start (EV_P_ ev_stat *w)
3264
+ {
3265
+ if (expect_false (ev_is_active (w)))
3266
+ return;
3267
+
3268
+ ev_stat_stat (EV_A_ w);
3269
+
3270
+ if (w->interval < MIN_STAT_INTERVAL && w->interval)
3271
+ w->interval = MIN_STAT_INTERVAL;
3272
+
3273
+ ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
3274
+ ev_set_priority (&w->timer, ev_priority (w));
3275
+
3276
+ #if EV_USE_INOTIFY
3277
+ infy_init (EV_A);
3278
+
3279
+ if (fs_fd >= 0)
3280
+ infy_add (EV_A_ w);
3281
+ else
3282
+ #endif
3283
+ {
3284
+ ev_timer_again (EV_A_ &w->timer);
3285
+ ev_unref (EV_A);
3286
+ }
3287
+
3288
+ ev_start (EV_A_ (W)w, 1);
3289
+
3290
+ EV_FREQUENT_CHECK;
3291
+ }
3292
+
3293
+ void
3294
+ ev_stat_stop (EV_P_ ev_stat *w)
3295
+ {
3296
+ clear_pending (EV_A_ (W)w);
3297
+ if (expect_false (!ev_is_active (w)))
3298
+ return;
3299
+
3300
+ EV_FREQUENT_CHECK;
3301
+
3302
+ #if EV_USE_INOTIFY
3303
+ infy_del (EV_A_ w);
3304
+ #endif
3305
+
3306
+ if (ev_is_active (&w->timer))
3307
+ {
3308
+ ev_ref (EV_A);
3309
+ ev_timer_stop (EV_A_ &w->timer);
3310
+ }
3311
+
3312
+ ev_stop (EV_A_ (W)w);
3313
+
3314
+ EV_FREQUENT_CHECK;
3315
+ }
3316
+ #endif
3317
+
3318
+ #if EV_IDLE_ENABLE
3319
+ void
3320
+ ev_idle_start (EV_P_ ev_idle *w)
3321
+ {
3322
+ if (expect_false (ev_is_active (w)))
3323
+ return;
3324
+
3325
+ pri_adjust (EV_A_ (W)w);
3326
+
3327
+ EV_FREQUENT_CHECK;
3328
+
3329
+ {
3330
+ int active = ++idlecnt [ABSPRI (w)];
3331
+
3332
+ ++idleall;
3333
+ ev_start (EV_A_ (W)w, active);
3334
+
3335
+ array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
3336
+ idles [ABSPRI (w)][active - 1] = w;
3337
+ }
3338
+
3339
+ EV_FREQUENT_CHECK;
3340
+ }
3341
+
3342
+ void
3343
+ ev_idle_stop (EV_P_ ev_idle *w)
3344
+ {
3345
+ clear_pending (EV_A_ (W)w);
3346
+ if (expect_false (!ev_is_active (w)))
3347
+ return;
3348
+
3349
+ EV_FREQUENT_CHECK;
3350
+
3351
+ {
3352
+ int active = ev_active (w);
3353
+
3354
+ idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
3355
+ ev_active (idles [ABSPRI (w)][active - 1]) = active;
3356
+
3357
+ ev_stop (EV_A_ (W)w);
3358
+ --idleall;
3359
+ }
3360
+
3361
+ EV_FREQUENT_CHECK;
3362
+ }
3363
+ #endif
3364
+
3365
+ #if EV_PREPARE_ENABLE
3366
+ void
3367
+ ev_prepare_start (EV_P_ ev_prepare *w)
3368
+ {
3369
+ if (expect_false (ev_is_active (w)))
3370
+ return;
3371
+
3372
+ EV_FREQUENT_CHECK;
3373
+
3374
+ ev_start (EV_A_ (W)w, ++preparecnt);
3375
+ array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
3376
+ prepares [preparecnt - 1] = w;
3377
+
3378
+ EV_FREQUENT_CHECK;
3379
+ }
3380
+
3381
+ void
3382
+ ev_prepare_stop (EV_P_ ev_prepare *w)
3383
+ {
3384
+ clear_pending (EV_A_ (W)w);
3385
+ if (expect_false (!ev_is_active (w)))
3386
+ return;
3387
+
3388
+ EV_FREQUENT_CHECK;
3389
+
3390
+ {
3391
+ int active = ev_active (w);
3392
+
3393
+ prepares [active - 1] = prepares [--preparecnt];
3394
+ ev_active (prepares [active - 1]) = active;
3395
+ }
3396
+
3397
+ ev_stop (EV_A_ (W)w);
3398
+
3399
+ EV_FREQUENT_CHECK;
3400
+ }
3401
+ #endif
3402
+
3403
+ #if EV_CHECK_ENABLE
3404
+ void
3405
+ ev_check_start (EV_P_ ev_check *w)
3406
+ {
3407
+ if (expect_false (ev_is_active (w)))
3408
+ return;
3409
+
3410
+ EV_FREQUENT_CHECK;
3411
+
3412
+ ev_start (EV_A_ (W)w, ++checkcnt);
3413
+ array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
3414
+ checks [checkcnt - 1] = w;
3415
+
3416
+ EV_FREQUENT_CHECK;
3417
+ }
3418
+
3419
+ void
3420
+ ev_check_stop (EV_P_ ev_check *w)
3421
+ {
3422
+ clear_pending (EV_A_ (W)w);
3423
+ if (expect_false (!ev_is_active (w)))
3424
+ return;
3425
+
3426
+ EV_FREQUENT_CHECK;
3427
+
3428
+ {
3429
+ int active = ev_active (w);
3430
+
3431
+ checks [active - 1] = checks [--checkcnt];
3432
+ ev_active (checks [active - 1]) = active;
3433
+ }
3434
+
3435
+ ev_stop (EV_A_ (W)w);
3436
+
3437
+ EV_FREQUENT_CHECK;
3438
+ }
3439
+ #endif
3440
+
3441
+ #if EV_EMBED_ENABLE
3442
+ void noinline
3443
+ ev_embed_sweep (EV_P_ ev_embed *w)
3444
+ {
3445
+ ev_run (w->other, EVRUN_NOWAIT);
3446
+ }
3447
+
3448
+ static void
3449
+ embed_io_cb (EV_P_ ev_io *io, int revents)
3450
+ {
3451
+ ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
3452
+
3453
+ if (ev_cb (w))
3454
+ ev_feed_event (EV_A_ (W)w, EV_EMBED);
3455
+ else
3456
+ ev_run (w->other, EVRUN_NOWAIT);
3457
+ }
3458
+
3459
+ static void
3460
+ embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
3461
+ {
3462
+ ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
3463
+
3464
+ {
3465
+ EV_P = w->other;
3466
+
3467
+ while (fdchangecnt)
3468
+ {
3469
+ fd_reify (EV_A);
3470
+ ev_run (EV_A_ EVRUN_NOWAIT);
3471
+ }
3472
+ }
3473
+ }
3474
+
3475
+ static void
3476
+ embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
3477
+ {
3478
+ ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
3479
+
3480
+ ev_embed_stop (EV_A_ w);
3481
+
3482
+ {
3483
+ EV_P = w->other;
3484
+
3485
+ ev_loop_fork (EV_A);
3486
+ ev_run (EV_A_ EVRUN_NOWAIT);
3487
+ }
3488
+
3489
+ ev_embed_start (EV_A_ w);
3490
+ }
3491
+
3492
+ #if 0
3493
+ static void
3494
+ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
3495
+ {
3496
+ ev_idle_stop (EV_A_ idle);
3497
+ }
3498
+ #endif
3499
+
3500
+ void
3501
+ ev_embed_start (EV_P_ ev_embed *w)
3502
+ {
3503
+ if (expect_false (ev_is_active (w)))
3504
+ return;
3505
+
3506
+ {
3507
+ EV_P = w->other;
3508
+ assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
3509
+ ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
3510
+ }
3511
+
3512
+ EV_FREQUENT_CHECK;
3513
+
3514
+ ev_set_priority (&w->io, ev_priority (w));
3515
+ ev_io_start (EV_A_ &w->io);
3516
+
3517
+ ev_prepare_init (&w->prepare, embed_prepare_cb);
3518
+ ev_set_priority (&w->prepare, EV_MINPRI);
3519
+ ev_prepare_start (EV_A_ &w->prepare);
3520
+
3521
+ ev_fork_init (&w->fork, embed_fork_cb);
3522
+ ev_fork_start (EV_A_ &w->fork);
3523
+
3524
+ /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
3525
+
3526
+ ev_start (EV_A_ (W)w, 1);
3527
+
3528
+ EV_FREQUENT_CHECK;
3529
+ }
3530
+
3531
+ void
3532
+ ev_embed_stop (EV_P_ ev_embed *w)
3533
+ {
3534
+ clear_pending (EV_A_ (W)w);
3535
+ if (expect_false (!ev_is_active (w)))
3536
+ return;
3537
+
3538
+ EV_FREQUENT_CHECK;
3539
+
3540
+ ev_io_stop (EV_A_ &w->io);
3541
+ ev_prepare_stop (EV_A_ &w->prepare);
3542
+ ev_fork_stop (EV_A_ &w->fork);
3543
+
3544
+ ev_stop (EV_A_ (W)w);
3545
+
3546
+ EV_FREQUENT_CHECK;
3547
+ }
3548
+ #endif
3549
+
3550
+ #if EV_FORK_ENABLE
3551
+ void
3552
+ ev_fork_start (EV_P_ ev_fork *w)
3553
+ {
3554
+ if (expect_false (ev_is_active (w)))
3555
+ return;
3556
+
3557
+ EV_FREQUENT_CHECK;
3558
+
3559
+ ev_start (EV_A_ (W)w, ++forkcnt);
3560
+ array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
3561
+ forks [forkcnt - 1] = w;
3562
+
3563
+ EV_FREQUENT_CHECK;
3564
+ }
3565
+
3566
+ void
3567
+ ev_fork_stop (EV_P_ ev_fork *w)
3568
+ {
3569
+ clear_pending (EV_A_ (W)w);
3570
+ if (expect_false (!ev_is_active (w)))
3571
+ return;
3572
+
3573
+ EV_FREQUENT_CHECK;
3574
+
3575
+ {
3576
+ int active = ev_active (w);
3577
+
3578
+ forks [active - 1] = forks [--forkcnt];
3579
+ ev_active (forks [active - 1]) = active;
3580
+ }
3581
+
3582
+ ev_stop (EV_A_ (W)w);
3583
+
3584
+ EV_FREQUENT_CHECK;
3585
+ }
3586
+ #endif
3587
+
3588
+ #if EV_CLEANUP_ENABLE
3589
+ void
3590
+ ev_cleanup_start (EV_P_ ev_cleanup *w)
3591
+ {
3592
+ if (expect_false (ev_is_active (w)))
3593
+ return;
3594
+
3595
+ EV_FREQUENT_CHECK;
3596
+
3597
+ ev_start (EV_A_ (W)w, ++cleanupcnt);
3598
+ array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
3599
+ cleanups [cleanupcnt - 1] = w;
3600
+
3601
+ /* cleanup watchers should never keep a refcount on the loop */
3602
+ ev_unref (EV_A);
3603
+ EV_FREQUENT_CHECK;
3604
+ }
3605
+
3606
+ void
3607
+ ev_cleanup_stop (EV_P_ ev_cleanup *w)
3608
+ {
3609
+ clear_pending (EV_A_ (W)w);
3610
+ if (expect_false (!ev_is_active (w)))
3611
+ return;
3612
+
3613
+ EV_FREQUENT_CHECK;
3614
+ ev_ref (EV_A);
3615
+
3616
+ {
3617
+ int active = ev_active (w);
3618
+
3619
+ cleanups [active - 1] = cleanups [--cleanupcnt];
3620
+ ev_active (cleanups [active - 1]) = active;
3621
+ }
3622
+
3623
+ ev_stop (EV_A_ (W)w);
3624
+
3625
+ EV_FREQUENT_CHECK;
3626
+ }
3627
+ #endif
3628
+
3629
+ #if EV_ASYNC_ENABLE
3630
+ void
3631
+ ev_async_start (EV_P_ ev_async *w)
3632
+ {
3633
+ if (expect_false (ev_is_active (w)))
3634
+ return;
3635
+
3636
+ w->sent = 0;
3637
+
3638
+ evpipe_init (EV_A);
3639
+
3640
+ EV_FREQUENT_CHECK;
3641
+
3642
+ ev_start (EV_A_ (W)w, ++asynccnt);
3643
+ array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
3644
+ asyncs [asynccnt - 1] = w;
3645
+
3646
+ EV_FREQUENT_CHECK;
3647
+ }
3648
+
3649
+ void
3650
+ ev_async_stop (EV_P_ ev_async *w)
3651
+ {
3652
+ clear_pending (EV_A_ (W)w);
3653
+ if (expect_false (!ev_is_active (w)))
3654
+ return;
3655
+
3656
+ EV_FREQUENT_CHECK;
3657
+
3658
+ {
3659
+ int active = ev_active (w);
3660
+
3661
+ asyncs [active - 1] = asyncs [--asynccnt];
3662
+ ev_active (asyncs [active - 1]) = active;
3663
+ }
3664
+
3665
+ ev_stop (EV_A_ (W)w);
3666
+
3667
+ EV_FREQUENT_CHECK;
3668
+ }
3669
+
3670
+ void
3671
+ ev_async_send (EV_P_ ev_async *w)
3672
+ {
3673
+ w->sent = 1;
3674
+ evpipe_write (EV_A_ &async_pending);
3675
+ }
3676
+ #endif
3677
+
3678
+ /*****************************************************************************/
3679
+
3680
+ struct ev_once
3681
+ {
3682
+ ev_io io;
3683
+ ev_timer to;
3684
+ void (*cb)(int revents, void *arg);
3685
+ void *arg;
3686
+ };
3687
+
3688
+ static void
3689
+ once_cb (EV_P_ struct ev_once *once, int revents)
3690
+ {
3691
+ void (*cb)(int revents, void *arg) = once->cb;
3692
+ void *arg = once->arg;
3693
+
3694
+ ev_io_stop (EV_A_ &once->io);
3695
+ ev_timer_stop (EV_A_ &once->to);
3696
+ ev_free (once);
3697
+
3698
+ cb (revents, arg);
3699
+ }
3700
+
3701
+ static void
3702
+ once_cb_io (EV_P_ ev_io *w, int revents)
3703
+ {
3704
+ struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
3705
+
3706
+ once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
3707
+ }
3708
+
3709
+ static void
3710
+ once_cb_to (EV_P_ ev_timer *w, int revents)
3711
+ {
3712
+ struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
3713
+
3714
+ once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
3715
+ }
3716
+
3717
+ void
3718
+ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
3719
+ {
3720
+ struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
3721
+
3722
+ if (expect_false (!once))
3723
+ {
3724
+ cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
3725
+ return;
3726
+ }
3727
+
3728
+ once->cb = cb;
3729
+ once->arg = arg;
3730
+
3731
+ ev_init (&once->io, once_cb_io);
3732
+ if (fd >= 0)
3733
+ {
3734
+ ev_io_set (&once->io, fd, events);
3735
+ ev_io_start (EV_A_ &once->io);
3736
+ }
3737
+
3738
+ ev_init (&once->to, once_cb_to);
3739
+ if (timeout >= 0.)
3740
+ {
3741
+ ev_timer_set (&once->to, timeout, 0.);
3742
+ ev_timer_start (EV_A_ &once->to);
3743
+ }
3744
+ }
3745
+
3746
+ /*****************************************************************************/
3747
+
3748
+ #if EV_WALK_ENABLE
3749
+ void
3750
+ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
3751
+ {
3752
+ int i, j;
3753
+ ev_watcher_list *wl, *wn;
3754
+
3755
+ if (types & (EV_IO | EV_EMBED))
3756
+ for (i = 0; i < anfdmax; ++i)
3757
+ for (wl = anfds [i].head; wl; )
3758
+ {
3759
+ wn = wl->next;
3760
+
3761
+ #if EV_EMBED_ENABLE
3762
+ if (ev_cb ((ev_io *)wl) == embed_io_cb)
3763
+ {
3764
+ if (types & EV_EMBED)
3765
+ cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
3766
+ }
3767
+ else
3768
+ #endif
3769
+ #if EV_USE_INOTIFY
3770
+ if (ev_cb ((ev_io *)wl) == infy_cb)
3771
+ ;
3772
+ else
3773
+ #endif
3774
+ if ((ev_io *)wl != &pipe_w)
3775
+ if (types & EV_IO)
3776
+ cb (EV_A_ EV_IO, wl);
3777
+
3778
+ wl = wn;
3779
+ }
3780
+
3781
+ if (types & (EV_TIMER | EV_STAT))
3782
+ for (i = timercnt + HEAP0; i-- > HEAP0; )
3783
+ #if EV_STAT_ENABLE
3784
+ /*TODO: timer is not always active*/
3785
+ if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
3786
+ {
3787
+ if (types & EV_STAT)
3788
+ cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
3789
+ }
3790
+ else
3791
+ #endif
3792
+ if (types & EV_TIMER)
3793
+ cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
3794
+
3795
+ #if EV_PERIODIC_ENABLE
3796
+ if (types & EV_PERIODIC)
3797
+ for (i = periodiccnt + HEAP0; i-- > HEAP0; )
3798
+ cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
3799
+ #endif
3800
+
3801
+ #if EV_IDLE_ENABLE
3802
+ if (types & EV_IDLE)
3803
+ for (j = NUMPRI; i--; )
3804
+ for (i = idlecnt [j]; i--; )
3805
+ cb (EV_A_ EV_IDLE, idles [j][i]);
3806
+ #endif
3807
+
3808
+ #if EV_FORK_ENABLE
3809
+ if (types & EV_FORK)
3810
+ for (i = forkcnt; i--; )
3811
+ if (ev_cb (forks [i]) != embed_fork_cb)
3812
+ cb (EV_A_ EV_FORK, forks [i]);
3813
+ #endif
3814
+
3815
+ #if EV_ASYNC_ENABLE
3816
+ if (types & EV_ASYNC)
3817
+ for (i = asynccnt; i--; )
3818
+ cb (EV_A_ EV_ASYNC, asyncs [i]);
3819
+ #endif
3820
+
3821
+ #if EV_PREPARE_ENABLE
3822
+ if (types & EV_PREPARE)
3823
+ for (i = preparecnt; i--; )
3824
+ # if EV_EMBED_ENABLE
3825
+ if (ev_cb (prepares [i]) != embed_prepare_cb)
3826
+ # endif
3827
+ cb (EV_A_ EV_PREPARE, prepares [i]);
3828
+ #endif
3829
+
3830
+ #if EV_CHECK_ENABLE
3831
+ if (types & EV_CHECK)
3832
+ for (i = checkcnt; i--; )
3833
+ cb (EV_A_ EV_CHECK, checks [i]);
3834
+ #endif
3835
+
3836
+ #if EV_SIGNAL_ENABLE
3837
+ if (types & EV_SIGNAL)
3838
+ for (i = 0; i < EV_NSIG - 1; ++i)
3839
+ for (wl = signals [i].head; wl; )
3840
+ {
3841
+ wn = wl->next;
3842
+ cb (EV_A_ EV_SIGNAL, wl);
3843
+ wl = wn;
3844
+ }
3845
+ #endif
3846
+
3847
+ #if EV_CHILD_ENABLE
3848
+ if (types & EV_CHILD)
3849
+ for (i = (EV_PID_HASHSIZE); i--; )
3850
+ for (wl = childs [i]; wl; )
3851
+ {
3852
+ wn = wl->next;
3853
+ cb (EV_A_ EV_CHILD, wl);
3854
+ wl = wn;
3855
+ }
3856
+ #endif
3857
+ /* EV_STAT 0x00001000 /* stat data changed */
3858
+ /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
3859
+ }
3860
+ #endif
3861
+
3862
+ #if EV_MULTIPLICITY
3863
+ #include "ev_wrap.h"
3864
+ #endif
3865
+
3866
+ EV_CPP(})
3867
+