cool.io 1.4.1-x64-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +29 -0
  3. data/.rspec +3 -0
  4. data/.travis.yml +13 -0
  5. data/CHANGES.md +229 -0
  6. data/Gemfile +4 -0
  7. data/LICENSE +20 -0
  8. data/README.md +166 -0
  9. data/Rakefile +79 -0
  10. data/cool.io.gemspec +29 -0
  11. data/examples/callbacked_echo_server.rb +24 -0
  12. data/examples/dslified_echo_client.rb +34 -0
  13. data/examples/dslified_echo_server.rb +24 -0
  14. data/examples/echo_client.rb +38 -0
  15. data/examples/echo_server.rb +27 -0
  16. data/examples/google.rb +9 -0
  17. data/ext/cool.io/.gitignore +5 -0
  18. data/ext/cool.io/cool.io.h +59 -0
  19. data/ext/cool.io/cool.io_ext.c +25 -0
  20. data/ext/cool.io/ev_wrap.h +10 -0
  21. data/ext/cool.io/extconf.rb +61 -0
  22. data/ext/cool.io/iowatcher.c +189 -0
  23. data/ext/cool.io/libev.c +8 -0
  24. data/ext/cool.io/loop.c +261 -0
  25. data/ext/cool.io/stat_watcher.c +269 -0
  26. data/ext/cool.io/timer_watcher.c +219 -0
  27. data/ext/cool.io/utils.c +122 -0
  28. data/ext/cool.io/watcher.c +264 -0
  29. data/ext/cool.io/watcher.h +71 -0
  30. data/ext/iobuffer/extconf.rb +9 -0
  31. data/ext/iobuffer/iobuffer.c +767 -0
  32. data/ext/libev/Changes +507 -0
  33. data/ext/libev/LICENSE +37 -0
  34. data/ext/libev/README +58 -0
  35. data/ext/libev/README.embed +3 -0
  36. data/ext/libev/ev.c +5054 -0
  37. data/ext/libev/ev.h +853 -0
  38. data/ext/libev/ev_epoll.c +282 -0
  39. data/ext/libev/ev_kqueue.c +214 -0
  40. data/ext/libev/ev_poll.c +148 -0
  41. data/ext/libev/ev_port.c +185 -0
  42. data/ext/libev/ev_select.c +362 -0
  43. data/ext/libev/ev_vars.h +204 -0
  44. data/ext/libev/ev_win32.c +163 -0
  45. data/ext/libev/ev_wrap.h +200 -0
  46. data/ext/libev/ruby_gil.patch +97 -0
  47. data/ext/libev/test_libev_win32.c +123 -0
  48. data/ext/libev/win_select.patch +115 -0
  49. data/lib/.gitignore +2 -0
  50. data/lib/cool.io.rb +34 -0
  51. data/lib/cool.io/async_watcher.rb +43 -0
  52. data/lib/cool.io/custom_require.rb +9 -0
  53. data/lib/cool.io/dns_resolver.rb +219 -0
  54. data/lib/cool.io/dsl.rb +139 -0
  55. data/lib/cool.io/io.rb +194 -0
  56. data/lib/cool.io/iowatcher.rb +17 -0
  57. data/lib/cool.io/listener.rb +99 -0
  58. data/lib/cool.io/loop.rb +122 -0
  59. data/lib/cool.io/meta.rb +49 -0
  60. data/lib/cool.io/server.rb +75 -0
  61. data/lib/cool.io/socket.rb +230 -0
  62. data/lib/cool.io/timer_watcher.rb +17 -0
  63. data/lib/cool.io/version.rb +7 -0
  64. data/lib/coolio.rb +2 -0
  65. data/spec/async_watcher_spec.rb +57 -0
  66. data/spec/dns_spec.rb +43 -0
  67. data/spec/iobuffer_spec.rb +147 -0
  68. data/spec/spec_helper.rb +19 -0
  69. data/spec/stat_watcher_spec.rb +77 -0
  70. data/spec/tcp_server_spec.rb +225 -0
  71. data/spec/tcp_socket_spec.rb +185 -0
  72. data/spec/timer_watcher_spec.rb +59 -0
  73. data/spec/udp_socket_spec.rb +58 -0
  74. data/spec/unix_listener_spec.rb +25 -0
  75. data/spec/unix_server_spec.rb +27 -0
  76. metadata +182 -0
@@ -0,0 +1,282 @@
1
+ /*
2
+ * libev epoll fd activity backend
3
+ *
4
+ * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without modifica-
8
+ * tion, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright
14
+ * notice, this list of conditions and the following disclaimer in the
15
+ * documentation and/or other materials provided with the distribution.
16
+ *
17
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ *
28
+ * Alternatively, the contents of this file may be used under the terms of
29
+ * the GNU General Public License ("GPL") version 2 or any later version,
30
+ * in which case the provisions of the GPL are applicable instead of
31
+ * the above. If you wish to allow the use of your version of this file
32
+ * only under the terms of the GPL and not to allow others to use your
33
+ * version of this file under the BSD license, indicate your decision
34
+ * by deleting the provisions above and replace them with the notice
35
+ * and other provisions required by the GPL. If you do not delete the
36
+ * provisions above, a recipient may use your version of this file under
37
+ * either the BSD or the GPL.
38
+ */
39
+
40
+ /*
41
+ * general notes about epoll:
42
+ *
43
+ * a) epoll silently removes fds from the fd set. as nothing tells us
44
+ * that an fd has been removed otherwise, we have to continually
45
+ * "rearm" fds that we suspect *might* have changed (same
46
+ * problem with kqueue, but much less costly there).
47
+ * b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
48
+ * and seems not to have any advantage.
49
+ * c) the inability to handle fork or file descriptors (think dup)
50
+ * limits the applicability over poll, so this is not a generic
51
+ * poll replacement.
52
+ * d) epoll doesn't work the same as select with many file descriptors
53
+ * (such as files). while not critical, no other advanced interface
54
+ * seems to share this (rather non-unixy) limitation.
55
+ * e) epoll claims to be embeddable, but in practise you never get
56
+ * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32).
57
+ * f) epoll_ctl returning EPERM means the fd is always ready.
58
+ *
59
+ * lots of "weird code" and complication handling in this file is due
60
+ * to these design problems with epoll, as we try very hard to avoid
61
+ * epoll_ctl syscalls for common usage patterns and handle the breakage
62
+ * ensuing from receiving events for closed and otherwise long gone
63
+ * file descriptors.
64
+ */
65
+
66
+ #include <sys/epoll.h>
67
+
68
+ #define EV_EMASK_EPERM 0x80
69
+
70
+ static void
71
+ epoll_modify (EV_P_ int fd, int oev, int nev)
72
+ {
73
+ struct epoll_event ev;
74
+ unsigned char oldmask;
75
+
76
+ /*
77
+ * we handle EPOLL_CTL_DEL by ignoring it here
78
+ * on the assumption that the fd is gone anyways
79
+ * if that is wrong, we have to handle the spurious
80
+ * event in epoll_poll.
81
+ * if the fd is added again, we try to ADD it, and, if that
82
+ * fails, we assume it still has the same eventmask.
83
+ */
84
+ if (!nev)
85
+ return;
86
+
87
+ oldmask = anfds [fd].emask;
88
+ anfds [fd].emask = nev;
89
+
90
+ /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
91
+ ev.data.u64 = (uint64_t)(uint32_t)fd
92
+ | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
93
+ ev.events = (nev & EV_READ ? EPOLLIN : 0)
94
+ | (nev & EV_WRITE ? EPOLLOUT : 0);
95
+
96
+ if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
97
+ return;
98
+
99
+ if (expect_true (errno == ENOENT))
100
+ {
101
+ /* if ENOENT then the fd went away, so try to do the right thing */
102
+ if (!nev)
103
+ goto dec_egen;
104
+
105
+ if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
106
+ return;
107
+ }
108
+ else if (expect_true (errno == EEXIST))
109
+ {
110
+ /* EEXIST means we ignored a previous DEL, but the fd is still active */
111
+ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
112
+ if (oldmask == nev)
113
+ goto dec_egen;
114
+
115
+ if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
116
+ return;
117
+ }
118
+ else if (expect_true (errno == EPERM))
119
+ {
120
+ /* EPERM means the fd is always ready, but epoll is too snobbish */
121
+ /* to handle it, unlike select or poll. */
122
+ anfds [fd].emask = EV_EMASK_EPERM;
123
+
124
+ /* add fd to epoll_eperms, if not already inside */
125
+ if (!(oldmask & EV_EMASK_EPERM))
126
+ {
127
+ array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2);
128
+ epoll_eperms [epoll_epermcnt++] = fd;
129
+ }
130
+
131
+ return;
132
+ }
133
+
134
+ fd_kill (EV_A_ fd);
135
+
136
+ dec_egen:
137
+ /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
138
+ --anfds [fd].egen;
139
+ }
140
+
141
+ static void
142
+ epoll_poll (EV_P_ ev_tstamp timeout)
143
+ {
144
+ int i;
145
+ int eventcnt;
146
+
147
+ if (expect_false (epoll_epermcnt))
148
+ timeout = 0.;
149
+
150
+ /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
151
+ /* the default libev max wait time, however. */
152
+ EV_RELEASE_CB;
153
+ eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3);
154
+ EV_ACQUIRE_CB;
155
+
156
+ if (expect_false (eventcnt < 0))
157
+ {
158
+ if (errno != EINTR)
159
+ ev_syserr ("(libev) epoll_wait");
160
+
161
+ return;
162
+ }
163
+
164
+ for (i = 0; i < eventcnt; ++i)
165
+ {
166
+ struct epoll_event *ev = epoll_events + i;
167
+
168
+ int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */
169
+ int want = anfds [fd].events;
170
+ int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
171
+ | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
172
+
173
+ /*
174
+ * check for spurious notification.
175
+ * this only finds spurious notifications on egen updates
176
+ * other spurious notifications will be found by epoll_ctl, below
177
+ * we assume that fd is always in range, as we never shrink the anfds array
178
+ */
179
+ if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
180
+ {
181
+ /* recreate kernel state */
182
+ postfork = 1;
183
+ continue;
184
+ }
185
+
186
+ if (expect_false (got & ~want))
187
+ {
188
+ anfds [fd].emask = want;
189
+
190
+ /*
191
+ * we received an event but are not interested in it, try mod or del
192
+ * this often happens because we optimistically do not unregister fds
193
+ * when we are no longer interested in them, but also when we get spurious
194
+ * notifications for fds from another process. this is partially handled
195
+ * above with the gencounter check (== our fd is not the event fd), and
196
+ * partially here, when epoll_ctl returns an error (== a child has the fd
197
+ * but we closed it).
198
+ */
199
+ ev->events = (want & EV_READ ? EPOLLIN : 0)
200
+ | (want & EV_WRITE ? EPOLLOUT : 0);
201
+
202
+ /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */
203
+ /* which is fortunately easy to do for us. */
204
+ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
205
+ {
206
+ postfork = 1; /* an error occurred, recreate kernel state */
207
+ continue;
208
+ }
209
+ }
210
+
211
+ fd_event (EV_A_ fd, got);
212
+ }
213
+
214
+ /* if the receive array was full, increase its size */
215
+ if (expect_false (eventcnt == epoll_eventmax))
216
+ {
217
+ ev_free (epoll_events);
218
+ epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
219
+ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
220
+ }
221
+
222
+ /* now synthesize events for all fds where epoll fails, while select works... */
223
+ for (i = epoll_epermcnt; i--; )
224
+ {
225
+ int fd = epoll_eperms [i];
226
+ unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE);
227
+
228
+ if (anfds [fd].emask & EV_EMASK_EPERM && events)
229
+ fd_event (EV_A_ fd, events);
230
+ else
231
+ {
232
+ epoll_eperms [i] = epoll_eperms [--epoll_epermcnt];
233
+ anfds [fd].emask = 0;
234
+ }
235
+ }
236
+ }
237
+
238
+ int inline_size
239
+ epoll_init (EV_P_ int flags)
240
+ {
241
+ #ifdef EPOLL_CLOEXEC
242
+ backend_fd = epoll_create1 (EPOLL_CLOEXEC);
243
+
244
+ if (backend_fd < 0 && (errno == EINVAL || errno == ENOSYS))
245
+ #endif
246
+ backend_fd = epoll_create (256);
247
+
248
+ if (backend_fd < 0)
249
+ return 0;
250
+
251
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
252
+
253
+ backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */
254
+ backend_modify = epoll_modify;
255
+ backend_poll = epoll_poll;
256
+
257
+ epoll_eventmax = 64; /* initial number of events receivable per poll */
258
+ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
259
+
260
+ return EVBACKEND_EPOLL;
261
+ }
262
+
263
+ void inline_size
264
+ epoll_destroy (EV_P)
265
+ {
266
+ ev_free (epoll_events);
267
+ array_free (epoll_eperm, EMPTY);
268
+ }
269
+
270
+ void inline_size
271
+ epoll_fork (EV_P)
272
+ {
273
+ close (backend_fd);
274
+
275
+ while ((backend_fd = epoll_create (256)) < 0)
276
+ ev_syserr ("(libev) epoll_create");
277
+
278
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
279
+
280
+ fd_rearm_all (EV_A);
281
+ }
282
+
@@ -0,0 +1,214 @@
1
+ /*
2
+ * libev kqueue backend
3
+ *
4
+ * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without modifica-
8
+ * tion, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright
14
+ * notice, this list of conditions and the following disclaimer in the
15
+ * documentation and/or other materials provided with the distribution.
16
+ *
17
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ *
28
+ * Alternatively, the contents of this file may be used under the terms of
29
+ * the GNU General Public License ("GPL") version 2 or any later version,
30
+ * in which case the provisions of the GPL are applicable instead of
31
+ * the above. If you wish to allow the use of your version of this file
32
+ * only under the terms of the GPL and not to allow others to use your
33
+ * version of this file under the BSD license, indicate your decision
34
+ * by deleting the provisions above and replace them with the notice
35
+ * and other provisions required by the GPL. If you do not delete the
36
+ * provisions above, a recipient may use your version of this file under
37
+ * either the BSD or the GPL.
38
+ */
39
+
40
+ #include <sys/types.h>
41
+ #include <sys/time.h>
42
+ #include <sys/event.h>
43
+ #include <string.h>
44
+ #include <errno.h>
45
+
46
+ void inline_speed
47
+ kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
48
+ {
49
+ ++kqueue_changecnt;
50
+ array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
51
+
52
+ EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0);
53
+ }
54
+
55
+ /* OS X at least needs this */
56
+ #ifndef EV_ENABLE
57
+ # define EV_ENABLE 0
58
+ #endif
59
+ #ifndef NOTE_EOF
60
+ # define NOTE_EOF 0
61
+ #endif
62
+
63
+ static void
64
+ kqueue_modify (EV_P_ int fd, int oev, int nev)
65
+ {
66
+ if (oev != nev)
67
+ {
68
+ if (oev & EV_READ)
69
+ kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0);
70
+
71
+ if (oev & EV_WRITE)
72
+ kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0);
73
+ }
74
+
75
+ /* to detect close/reopen reliably, we have to re-add */
76
+ /* event requests even when oev == nev */
77
+
78
+ if (nev & EV_READ)
79
+ kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF);
80
+
81
+ if (nev & EV_WRITE)
82
+ kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF);
83
+ }
84
+
85
+ static void
86
+ kqueue_poll (EV_P_ ev_tstamp timeout)
87
+ {
88
+ int res, i;
89
+ struct timespec ts;
90
+
91
+ /* need to resize so there is enough space for errors */
92
+ if (kqueue_changecnt > kqueue_eventmax)
93
+ {
94
+ ev_free (kqueue_events);
95
+ kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt);
96
+ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
97
+ }
98
+
99
+ EV_RELEASE_CB;
100
+ EV_TS_SET (ts, timeout);
101
+ res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
102
+ EV_ACQUIRE_CB;
103
+ kqueue_changecnt = 0;
104
+
105
+ if (expect_false (res < 0))
106
+ {
107
+ if (errno != EINTR)
108
+ ev_syserr ("(libev) kevent");
109
+
110
+ return;
111
+ }
112
+
113
+ for (i = 0; i < res; ++i)
114
+ {
115
+ int fd = kqueue_events [i].ident;
116
+
117
+ if (expect_false (kqueue_events [i].flags & EV_ERROR))
118
+ {
119
+ int err = kqueue_events [i].data;
120
+
121
+ /* we are only interested in errors for fds that we are interested in :) */
122
+ if (anfds [fd].events)
123
+ {
124
+ if (err == ENOENT) /* resubmit changes on ENOENT */
125
+ kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
126
+ else if (err == EBADF) /* on EBADF, we re-check the fd */
127
+ {
128
+ if (fd_valid (fd))
129
+ kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
130
+ else
131
+ fd_kill (EV_A_ fd);
132
+ }
133
+ else /* on all other errors, we error out on the fd */
134
+ fd_kill (EV_A_ fd);
135
+ }
136
+ }
137
+ else
138
+ fd_event (
139
+ EV_A_
140
+ fd,
141
+ kqueue_events [i].filter == EVFILT_READ ? EV_READ
142
+ : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
143
+ : 0
144
+ );
145
+ }
146
+
147
+ if (expect_false (res == kqueue_eventmax))
148
+ {
149
+ ev_free (kqueue_events);
150
+ kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1);
151
+ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
152
+ }
153
+ }
154
+
155
+ int inline_size
156
+ kqueue_init (EV_P_ int flags)
157
+ {
158
+ /* initialize the kernel queue */
159
+ kqueue_fd_pid = getpid ();
160
+ if ((backend_fd = kqueue ()) < 0)
161
+ return 0;
162
+
163
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
164
+
165
+ backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */
166
+ backend_modify = kqueue_modify;
167
+ backend_poll = kqueue_poll;
168
+
169
+ kqueue_eventmax = 64; /* initial number of events receivable per poll */
170
+ kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
171
+
172
+ kqueue_changes = 0;
173
+ kqueue_changemax = 0;
174
+ kqueue_changecnt = 0;
175
+
176
+ return EVBACKEND_KQUEUE;
177
+ }
178
+
179
+ void inline_size
180
+ kqueue_destroy (EV_P)
181
+ {
182
+ ev_free (kqueue_events);
183
+ ev_free (kqueue_changes);
184
+ }
185
+
186
+ void inline_size
187
+ kqueue_fork (EV_P)
188
+ {
189
+ /* some BSD kernels don't just destroy the kqueue itself,
190
+ * but also close the fd, which isn't documented, and
191
+ * impossible to support properly.
192
+ * we remember the pid of the kqueue call and only close
193
+ * the fd if the pid is still the same.
194
+ * this leaks fds on sane kernels, but BSD interfaces are
195
+ * notoriously buggy and rarely get fixed.
196
+ */
197
+ pid_t newpid = getpid ();
198
+
199
+ if (newpid == kqueue_fd_pid)
200
+ close (backend_fd);
201
+
202
+ kqueue_fd_pid = newpid;
203
+ while ((backend_fd = kqueue ()) < 0)
204
+ ev_syserr ("(libev) kqueue");
205
+
206
+ fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
207
+
208
+ /* re-register interest in fds */
209
+ fd_rearm_all (EV_A);
210
+ }
211
+
212
+ /* sys/event.h defines EV_ERROR */
213
+ #undef EV_ERROR
214
+