passenger 5.0.8 → 5.0.9
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of passenger might be problematic. Click here for more details.
- checksums.yaml +8 -8
- checksums.yaml.gz.asc +7 -7
- data.tar.gz.asc +7 -7
- data/.editorconfig +20 -0
- data/CHANGELOG +21 -0
- data/bin/passenger-install-apache2-module +3 -1
- data/build/agents.rb +7 -5
- data/build/basics.rb +3 -3
- data/build/common_library.rb +52 -30
- data/build/cxx_tests.rb +20 -13
- data/build/misc.rb +5 -5
- data/doc/Design and Architecture.html +1 -1
- data/doc/Design and Architecture.txt +1 -1
- data/doc/Packaging.html +4 -4
- data/doc/Packaging.txt.md +4 -4
- data/doc/Users guide Apache.html +22 -9
- data/doc/Users guide Apache.idmap.txt +4 -2
- data/doc/Users guide Apache.txt +2 -0
- data/doc/Users guide Nginx.html +22 -9
- data/doc/Users guide Nginx.idmap.txt +4 -2
- data/doc/Users guide Nginx.txt +2 -0
- data/doc/Users guide Standalone.html +14 -9
- data/doc/Users guide Standalone.idmap.txt +4 -2
- data/doc/users_guide_snippets/installation.txt +10 -6
- data/ext/apache2/Hooks.cpp +13 -2
- data/ext/common/ApplicationPool2/Pool/Inspection.h +8 -3
- data/ext/common/BackgroundEventLoop.cpp +249 -67
- data/ext/common/BackgroundEventLoop.h +5 -5
- data/ext/common/Constants.h +1 -1
- data/ext/common/InstanceDirectory.h +8 -6
- data/ext/common/ServerKit/Context.h +8 -2
- data/ext/common/ServerKit/FileBufferedChannel.h +262 -226
- data/ext/common/ServerKit/HeaderTable.h +28 -3
- data/ext/common/ServerKit/HttpHeaderParser.h +37 -13
- data/ext/common/ServerKit/HttpServer.h +17 -1
- data/ext/common/ServerKit/Implementation.cpp +2 -0
- data/ext/common/ServerKit/Server.h +25 -28
- data/ext/common/Utils/IOUtils.cpp +11 -0
- data/ext/common/Utils/ProcessMetricsCollector.h +4 -0
- data/ext/common/Utils/StrIntUtils.cpp +11 -7
- data/ext/common/Utils/StrIntUtils.h +1 -1
- data/ext/common/Utils/StrIntUtilsNoStrictAliasing.cpp +21 -16
- data/ext/common/agents/Base.cpp +6 -0
- data/ext/common/agents/Base.h +2 -0
- data/ext/common/agents/HelperAgent/AdminServer.h +25 -25
- data/ext/common/agents/HelperAgent/Main.cpp +37 -12
- data/ext/common/agents/HelperAgent/RequestHandler.h +18 -20
- data/ext/common/agents/HelperAgent/RequestHandler/AppResponse.h +4 -0
- data/ext/common/agents/HelperAgent/RequestHandler/ForwardResponse.cpp +10 -6
- data/ext/common/agents/HelperAgent/RequestHandler/Hooks.cpp +2 -0
- data/ext/common/agents/HelperAgent/RequestHandler/InitRequest.cpp +1 -1
- data/ext/common/agents/HelperAgent/RequestHandler/SendRequest.cpp +1 -1
- data/ext/common/agents/HelperAgent/RequestHandler/Utils.cpp +9 -2
- data/ext/common/agents/HelperAgent/ResponseCache.h +11 -11
- data/ext/common/agents/LoggingAgent/AdminServer.h +8 -8
- data/ext/common/agents/LoggingAgent/Main.cpp +6 -5
- data/ext/common/agents/Watchdog/AdminServer.h +13 -13
- data/ext/common/agents/Watchdog/Main.cpp +8 -3
- data/ext/libuv/.gitignore +72 -0
- data/ext/libuv/AUTHORS +199 -0
- data/ext/libuv/ChangeLog +2023 -0
- data/ext/libuv/LICENSE +46 -0
- data/ext/libuv/Makefile.am +336 -0
- data/ext/libuv/README.md +197 -0
- data/ext/libuv/checksparse.sh +233 -0
- data/ext/libuv/common.gypi +210 -0
- data/ext/libuv/configure.ac +67 -0
- data/ext/libuv/gyp_uv.py +96 -0
- data/ext/libuv/include/android-ifaddrs.h +54 -0
- data/ext/libuv/include/pthread-fixes.h +72 -0
- data/ext/libuv/include/tree.h +768 -0
- data/ext/libuv/include/uv-aix.h +32 -0
- data/ext/libuv/include/uv-bsd.h +34 -0
- data/ext/libuv/include/uv-darwin.h +61 -0
- data/ext/libuv/include/uv-errno.h +418 -0
- data/ext/libuv/include/uv-linux.h +34 -0
- data/ext/libuv/include/uv-sunos.h +44 -0
- data/ext/libuv/include/uv-threadpool.h +37 -0
- data/ext/libuv/include/uv-unix.h +383 -0
- data/ext/libuv/include/uv-version.h +39 -0
- data/ext/libuv/include/uv.h +1455 -0
- data/ext/libuv/libuv.pc.in +11 -0
- data/ext/libuv/m4/.gitignore +4 -0
- data/ext/libuv/m4/as_case.m4 +21 -0
- data/ext/libuv/m4/libuv-check-flags.m4 +319 -0
- data/ext/libuv/src/fs-poll.c +255 -0
- data/ext/libuv/src/heap-inl.h +245 -0
- data/ext/libuv/src/inet.c +313 -0
- data/ext/libuv/src/queue.h +92 -0
- data/ext/libuv/src/threadpool.c +303 -0
- data/ext/libuv/src/unix/aix.c +1240 -0
- data/ext/libuv/src/unix/android-ifaddrs.c +703 -0
- data/ext/libuv/src/unix/async.c +284 -0
- data/ext/libuv/src/unix/atomic-ops.h +60 -0
- data/ext/libuv/src/unix/core.c +985 -0
- data/ext/libuv/src/unix/darwin-proctitle.c +206 -0
- data/ext/libuv/src/unix/darwin.c +331 -0
- data/ext/libuv/src/unix/dl.c +83 -0
- data/ext/libuv/src/unix/freebsd.c +435 -0
- data/ext/libuv/src/unix/fs.c +1189 -0
- data/ext/libuv/src/unix/fsevents.c +899 -0
- data/ext/libuv/src/unix/getaddrinfo.c +202 -0
- data/ext/libuv/src/unix/getnameinfo.c +120 -0
- data/ext/libuv/src/unix/internal.h +314 -0
- data/ext/libuv/src/unix/kqueue.c +418 -0
- data/ext/libuv/src/unix/linux-core.c +876 -0
- data/ext/libuv/src/unix/linux-inotify.c +257 -0
- data/ext/libuv/src/unix/linux-syscalls.c +471 -0
- data/ext/libuv/src/unix/linux-syscalls.h +158 -0
- data/ext/libuv/src/unix/loop-watcher.c +63 -0
- data/ext/libuv/src/unix/loop.c +135 -0
- data/ext/libuv/src/unix/netbsd.c +368 -0
- data/ext/libuv/src/unix/openbsd.c +384 -0
- data/ext/libuv/src/unix/pipe.c +288 -0
- data/ext/libuv/src/unix/poll.c +113 -0
- data/ext/libuv/src/unix/process.c +551 -0
- data/ext/libuv/src/unix/proctitle.c +102 -0
- data/ext/libuv/src/unix/pthread-fixes.c +103 -0
- data/ext/libuv/src/unix/signal.c +465 -0
- data/ext/libuv/src/unix/spinlock.h +53 -0
- data/ext/libuv/src/unix/stream.c +1598 -0
- data/ext/libuv/src/unix/sunos.c +763 -0
- data/ext/libuv/src/unix/tcp.c +327 -0
- data/ext/libuv/src/unix/thread.c +519 -0
- data/ext/libuv/src/unix/timer.c +172 -0
- data/ext/libuv/src/unix/tty.c +265 -0
- data/ext/libuv/src/unix/udp.c +833 -0
- data/ext/libuv/src/uv-common.c +544 -0
- data/ext/libuv/src/uv-common.h +214 -0
- data/ext/libuv/src/version.c +49 -0
- data/ext/libuv/uv.gyp +487 -0
- data/ext/nginx/ContentHandler.c +21 -10
- data/ext/nginx/ngx_http_passenger_module.c +7 -0
- data/ext/oxt/implementation.cpp +9 -2
- data/ext/oxt/initialize.hpp +5 -1
- data/lib/phusion_passenger.rb +3 -3
- data/lib/phusion_passenger/admin_tools/instance.rb +10 -6
- data/lib/phusion_passenger/admin_tools/instance_registry.rb +6 -2
- data/lib/phusion_passenger/packaging.rb +3 -4
- data/lib/phusion_passenger/platform_info.rb +13 -1
- data/lib/phusion_passenger/platform_info/apache.rb +15 -4
- data/lib/phusion_passenger/platform_info/apache_detector.rb +5 -1
- data/lib/phusion_passenger/rack/thread_handler_extension.rb +184 -99
- data/lib/phusion_passenger/request_handler/thread_handler.rb +13 -6
- data/lib/phusion_passenger/standalone/start_command.rb +2 -2
- data/resources/templates/apache2/apache_install_broken.txt.erb +2 -1
- metadata +99 -22
- metadata.gz.asc +7 -7
- data/ext/libeio/Changes +0 -76
- data/ext/libeio/LICENSE +0 -36
- data/ext/libeio/Makefile.am +0 -15
- data/ext/libeio/Makefile.in +0 -694
- data/ext/libeio/aclocal.m4 +0 -9418
- data/ext/libeio/autogen.sh +0 -3
- data/ext/libeio/config.guess +0 -1540
- data/ext/libeio/config.h.in +0 -136
- data/ext/libeio/config.sub +0 -1779
- data/ext/libeio/configure +0 -14822
- data/ext/libeio/configure.ac +0 -22
- data/ext/libeio/demo.c +0 -194
- data/ext/libeio/ecb.h +0 -714
- data/ext/libeio/eio.c +0 -2818
- data/ext/libeio/eio.h +0 -414
- data/ext/libeio/install-sh +0 -520
- data/ext/libeio/libeio.m4 +0 -195
- data/ext/libeio/ltmain.sh +0 -9636
- data/ext/libeio/missing +0 -376
- data/ext/libeio/xthread.h +0 -166
@@ -0,0 +1,418 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
3
|
+
* of this software and associated documentation files (the "Software"), to
|
4
|
+
* deal in the Software without restriction, including without limitation the
|
5
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
6
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
7
|
+
* furnished to do so, subject to the following conditions:
|
8
|
+
*
|
9
|
+
* The above copyright notice and this permission notice shall be included in
|
10
|
+
* all copies or substantial portions of the Software.
|
11
|
+
*
|
12
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
13
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
14
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
15
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
16
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
17
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
18
|
+
* IN THE SOFTWARE.
|
19
|
+
*/
|
20
|
+
|
21
|
+
#include "uv.h"
|
22
|
+
#include "internal.h"
|
23
|
+
|
24
|
+
#include <assert.h>
|
25
|
+
#include <stdlib.h>
|
26
|
+
#include <string.h>
|
27
|
+
#include <errno.h>
|
28
|
+
|
29
|
+
#include <sys/sysctl.h>
|
30
|
+
#include <sys/types.h>
|
31
|
+
#include <sys/event.h>
|
32
|
+
#include <sys/time.h>
|
33
|
+
#include <unistd.h>
|
34
|
+
#include <fcntl.h>
|
35
|
+
#include <time.h>
|
36
|
+
|
37
|
+
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
|
38
|
+
|
39
|
+
|
40
|
+
int uv__kqueue_init(uv_loop_t* loop) {
|
41
|
+
loop->backend_fd = kqueue();
|
42
|
+
if (loop->backend_fd == -1)
|
43
|
+
return -errno;
|
44
|
+
|
45
|
+
uv__cloexec(loop->backend_fd, 1);
|
46
|
+
|
47
|
+
return 0;
|
48
|
+
}
|
49
|
+
|
50
|
+
|
51
|
+
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
52
|
+
struct kevent events[1024];
|
53
|
+
struct kevent* ev;
|
54
|
+
struct timespec spec;
|
55
|
+
unsigned int nevents;
|
56
|
+
unsigned int revents;
|
57
|
+
QUEUE* q;
|
58
|
+
uv__io_t* w;
|
59
|
+
sigset_t* pset;
|
60
|
+
sigset_t set;
|
61
|
+
uint64_t base;
|
62
|
+
uint64_t diff;
|
63
|
+
int filter;
|
64
|
+
int fflags;
|
65
|
+
int count;
|
66
|
+
int nfds;
|
67
|
+
int fd;
|
68
|
+
int op;
|
69
|
+
int i;
|
70
|
+
|
71
|
+
if (loop->nfds == 0) {
|
72
|
+
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
73
|
+
return;
|
74
|
+
}
|
75
|
+
|
76
|
+
nevents = 0;
|
77
|
+
|
78
|
+
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
79
|
+
q = QUEUE_HEAD(&loop->watcher_queue);
|
80
|
+
QUEUE_REMOVE(q);
|
81
|
+
QUEUE_INIT(q);
|
82
|
+
|
83
|
+
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
84
|
+
assert(w->pevents != 0);
|
85
|
+
assert(w->fd >= 0);
|
86
|
+
assert(w->fd < (int) loop->nwatchers);
|
87
|
+
|
88
|
+
if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
|
89
|
+
filter = EVFILT_READ;
|
90
|
+
fflags = 0;
|
91
|
+
op = EV_ADD;
|
92
|
+
|
93
|
+
if (w->cb == uv__fs_event) {
|
94
|
+
filter = EVFILT_VNODE;
|
95
|
+
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
|
96
|
+
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
|
97
|
+
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
|
98
|
+
}
|
99
|
+
|
100
|
+
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
|
101
|
+
|
102
|
+
if (++nevents == ARRAY_SIZE(events)) {
|
103
|
+
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
|
104
|
+
abort();
|
105
|
+
nevents = 0;
|
106
|
+
}
|
107
|
+
}
|
108
|
+
|
109
|
+
if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
|
110
|
+
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
|
111
|
+
|
112
|
+
if (++nevents == ARRAY_SIZE(events)) {
|
113
|
+
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
|
114
|
+
abort();
|
115
|
+
nevents = 0;
|
116
|
+
}
|
117
|
+
}
|
118
|
+
|
119
|
+
w->events = w->pevents;
|
120
|
+
}
|
121
|
+
|
122
|
+
pset = NULL;
|
123
|
+
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
124
|
+
pset = &set;
|
125
|
+
sigemptyset(pset);
|
126
|
+
sigaddset(pset, SIGPROF);
|
127
|
+
}
|
128
|
+
|
129
|
+
assert(timeout >= -1);
|
130
|
+
base = loop->time;
|
131
|
+
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
132
|
+
|
133
|
+
for (;; nevents = 0) {
|
134
|
+
if (timeout != -1) {
|
135
|
+
spec.tv_sec = timeout / 1000;
|
136
|
+
spec.tv_nsec = (timeout % 1000) * 1000000;
|
137
|
+
}
|
138
|
+
|
139
|
+
if (pset != NULL)
|
140
|
+
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
141
|
+
|
142
|
+
nfds = kevent(loop->backend_fd,
|
143
|
+
events,
|
144
|
+
nevents,
|
145
|
+
events,
|
146
|
+
ARRAY_SIZE(events),
|
147
|
+
timeout == -1 ? NULL : &spec);
|
148
|
+
|
149
|
+
if (pset != NULL)
|
150
|
+
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
151
|
+
|
152
|
+
/* Update loop->time unconditionally. It's tempting to skip the update when
|
153
|
+
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
154
|
+
* operating system didn't reschedule our process while in the syscall.
|
155
|
+
*/
|
156
|
+
SAVE_ERRNO(uv__update_time(loop));
|
157
|
+
|
158
|
+
if (nfds == 0) {
|
159
|
+
assert(timeout != -1);
|
160
|
+
return;
|
161
|
+
}
|
162
|
+
|
163
|
+
if (nfds == -1) {
|
164
|
+
if (errno != EINTR)
|
165
|
+
abort();
|
166
|
+
|
167
|
+
if (timeout == 0)
|
168
|
+
return;
|
169
|
+
|
170
|
+
if (timeout == -1)
|
171
|
+
continue;
|
172
|
+
|
173
|
+
/* Interrupted by a signal. Update timeout and poll again. */
|
174
|
+
goto update_timeout;
|
175
|
+
}
|
176
|
+
|
177
|
+
nevents = 0;
|
178
|
+
|
179
|
+
assert(loop->watchers != NULL);
|
180
|
+
loop->watchers[loop->nwatchers] = (void*) events;
|
181
|
+
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
|
182
|
+
for (i = 0; i < nfds; i++) {
|
183
|
+
ev = events + i;
|
184
|
+
fd = ev->ident;
|
185
|
+
/* Skip invalidated events, see uv__platform_invalidate_fd */
|
186
|
+
if (fd == -1)
|
187
|
+
continue;
|
188
|
+
w = loop->watchers[fd];
|
189
|
+
|
190
|
+
if (w == NULL) {
|
191
|
+
/* File descriptor that we've stopped watching, disarm it. */
|
192
|
+
/* TODO batch up */
|
193
|
+
struct kevent events[1];
|
194
|
+
|
195
|
+
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
196
|
+
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
197
|
+
if (errno != EBADF && errno != ENOENT)
|
198
|
+
abort();
|
199
|
+
|
200
|
+
continue;
|
201
|
+
}
|
202
|
+
|
203
|
+
if (ev->filter == EVFILT_VNODE) {
|
204
|
+
assert(w->events == UV__POLLIN);
|
205
|
+
assert(w->pevents == UV__POLLIN);
|
206
|
+
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
|
207
|
+
nevents++;
|
208
|
+
continue;
|
209
|
+
}
|
210
|
+
|
211
|
+
revents = 0;
|
212
|
+
|
213
|
+
if (ev->filter == EVFILT_READ) {
|
214
|
+
if (w->pevents & UV__POLLIN) {
|
215
|
+
revents |= UV__POLLIN;
|
216
|
+
w->rcount = ev->data;
|
217
|
+
} else {
|
218
|
+
/* TODO batch up */
|
219
|
+
struct kevent events[1];
|
220
|
+
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
221
|
+
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
222
|
+
if (errno != ENOENT)
|
223
|
+
abort();
|
224
|
+
}
|
225
|
+
}
|
226
|
+
|
227
|
+
if (ev->filter == EVFILT_WRITE) {
|
228
|
+
if (w->pevents & UV__POLLOUT) {
|
229
|
+
revents |= UV__POLLOUT;
|
230
|
+
w->wcount = ev->data;
|
231
|
+
} else {
|
232
|
+
/* TODO batch up */
|
233
|
+
struct kevent events[1];
|
234
|
+
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
235
|
+
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
236
|
+
if (errno != ENOENT)
|
237
|
+
abort();
|
238
|
+
}
|
239
|
+
}
|
240
|
+
|
241
|
+
if (ev->flags & EV_ERROR)
|
242
|
+
revents |= UV__POLLERR;
|
243
|
+
|
244
|
+
if (revents == 0)
|
245
|
+
continue;
|
246
|
+
|
247
|
+
w->cb(loop, w, revents);
|
248
|
+
nevents++;
|
249
|
+
}
|
250
|
+
loop->watchers[loop->nwatchers] = NULL;
|
251
|
+
loop->watchers[loop->nwatchers + 1] = NULL;
|
252
|
+
|
253
|
+
if (nevents != 0) {
|
254
|
+
if (nfds == ARRAY_SIZE(events) && --count != 0) {
|
255
|
+
/* Poll for more events but don't block this time. */
|
256
|
+
timeout = 0;
|
257
|
+
continue;
|
258
|
+
}
|
259
|
+
return;
|
260
|
+
}
|
261
|
+
|
262
|
+
if (timeout == 0)
|
263
|
+
return;
|
264
|
+
|
265
|
+
if (timeout == -1)
|
266
|
+
continue;
|
267
|
+
|
268
|
+
update_timeout:
|
269
|
+
assert(timeout > 0);
|
270
|
+
|
271
|
+
diff = loop->time - base;
|
272
|
+
if (diff >= (uint64_t) timeout)
|
273
|
+
return;
|
274
|
+
|
275
|
+
timeout -= diff;
|
276
|
+
}
|
277
|
+
}
|
278
|
+
|
279
|
+
|
280
|
+
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
|
281
|
+
struct kevent* events;
|
282
|
+
uintptr_t i;
|
283
|
+
uintptr_t nfds;
|
284
|
+
|
285
|
+
assert(loop->watchers != NULL);
|
286
|
+
|
287
|
+
events = (struct kevent*) loop->watchers[loop->nwatchers];
|
288
|
+
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
|
289
|
+
if (events == NULL)
|
290
|
+
return;
|
291
|
+
|
292
|
+
/* Invalidate events with same file descriptor */
|
293
|
+
for (i = 0; i < nfds; i++)
|
294
|
+
if ((int) events[i].ident == fd)
|
295
|
+
events[i].ident = -1;
|
296
|
+
}
|
297
|
+
|
298
|
+
|
299
|
+
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
|
300
|
+
uv_fs_event_t* handle;
|
301
|
+
struct kevent ev;
|
302
|
+
int events;
|
303
|
+
const char* path;
|
304
|
+
#if defined(F_GETPATH)
|
305
|
+
/* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
|
306
|
+
char pathbuf[MAXPATHLEN];
|
307
|
+
#endif
|
308
|
+
|
309
|
+
handle = container_of(w, uv_fs_event_t, event_watcher);
|
310
|
+
|
311
|
+
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
|
312
|
+
events = UV_CHANGE;
|
313
|
+
else
|
314
|
+
events = UV_RENAME;
|
315
|
+
|
316
|
+
path = NULL;
|
317
|
+
#if defined(F_GETPATH)
|
318
|
+
/* Also works when the file has been unlinked from the file system. Passing
|
319
|
+
* in the path when the file has been deleted is arguably a little strange
|
320
|
+
* but it's consistent with what the inotify backend does.
|
321
|
+
*/
|
322
|
+
if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
|
323
|
+
path = uv__basename_r(pathbuf);
|
324
|
+
#endif
|
325
|
+
handle->cb(handle, path, events, 0);
|
326
|
+
|
327
|
+
if (handle->event_watcher.fd == -1)
|
328
|
+
return;
|
329
|
+
|
330
|
+
/* Watcher operates in one-shot mode, re-arm it. */
|
331
|
+
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
|
332
|
+
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
|
333
|
+
|
334
|
+
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
|
335
|
+
|
336
|
+
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
|
337
|
+
abort();
|
338
|
+
}
|
339
|
+
|
340
|
+
|
341
|
+
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
|
342
|
+
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
|
343
|
+
return 0;
|
344
|
+
}
|
345
|
+
|
346
|
+
|
347
|
+
int uv_fs_event_start(uv_fs_event_t* handle,
|
348
|
+
uv_fs_event_cb cb,
|
349
|
+
const char* path,
|
350
|
+
unsigned int flags) {
|
351
|
+
#if defined(__APPLE__)
|
352
|
+
struct stat statbuf;
|
353
|
+
#endif /* defined(__APPLE__) */
|
354
|
+
int fd;
|
355
|
+
|
356
|
+
if (uv__is_active(handle))
|
357
|
+
return -EINVAL;
|
358
|
+
|
359
|
+
/* TODO open asynchronously - but how do we report back errors? */
|
360
|
+
fd = open(path, O_RDONLY);
|
361
|
+
if (fd == -1)
|
362
|
+
return -errno;
|
363
|
+
|
364
|
+
uv__handle_start(handle);
|
365
|
+
uv__io_init(&handle->event_watcher, uv__fs_event, fd);
|
366
|
+
handle->path = strdup(path);
|
367
|
+
handle->cb = cb;
|
368
|
+
|
369
|
+
#if defined(__APPLE__)
|
370
|
+
/* Nullify field to perform checks later */
|
371
|
+
handle->cf_cb = NULL;
|
372
|
+
handle->realpath = NULL;
|
373
|
+
handle->realpath_len = 0;
|
374
|
+
handle->cf_flags = flags;
|
375
|
+
|
376
|
+
if (fstat(fd, &statbuf))
|
377
|
+
goto fallback;
|
378
|
+
/* FSEvents works only with directories */
|
379
|
+
if (!(statbuf.st_mode & S_IFDIR))
|
380
|
+
goto fallback;
|
381
|
+
|
382
|
+
return uv__fsevents_init(handle);
|
383
|
+
|
384
|
+
fallback:
|
385
|
+
#endif /* defined(__APPLE__) */
|
386
|
+
|
387
|
+
uv__io_start(handle->loop, &handle->event_watcher, UV__POLLIN);
|
388
|
+
|
389
|
+
return 0;
|
390
|
+
}
|
391
|
+
|
392
|
+
|
393
|
+
int uv_fs_event_stop(uv_fs_event_t* handle) {
|
394
|
+
if (!uv__is_active(handle))
|
395
|
+
return 0;
|
396
|
+
|
397
|
+
uv__handle_stop(handle);
|
398
|
+
|
399
|
+
#if defined(__APPLE__)
|
400
|
+
if (uv__fsevents_close(handle))
|
401
|
+
#endif /* defined(__APPLE__) */
|
402
|
+
{
|
403
|
+
uv__io_close(handle->loop, &handle->event_watcher);
|
404
|
+
}
|
405
|
+
|
406
|
+
free(handle->path);
|
407
|
+
handle->path = NULL;
|
408
|
+
|
409
|
+
uv__close(handle->event_watcher.fd);
|
410
|
+
handle->event_watcher.fd = -1;
|
411
|
+
|
412
|
+
return 0;
|
413
|
+
}
|
414
|
+
|
415
|
+
|
416
|
+
void uv__fs_event_close(uv_fs_event_t* handle) {
|
417
|
+
uv_fs_event_stop(handle);
|
418
|
+
}
|
@@ -0,0 +1,876 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
3
|
+
* of this software and associated documentation files (the "Software"), to
|
4
|
+
* deal in the Software without restriction, including without limitation the
|
5
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
6
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
7
|
+
* furnished to do so, subject to the following conditions:
|
8
|
+
*
|
9
|
+
* The above copyright notice and this permission notice shall be included in
|
10
|
+
* all copies or substantial portions of the Software.
|
11
|
+
*
|
12
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
13
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
14
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
15
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
16
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
17
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
18
|
+
* IN THE SOFTWARE.
|
19
|
+
*/
|
20
|
+
|
21
|
+
#include "uv.h"
|
22
|
+
#include "internal.h"
|
23
|
+
|
24
|
+
#include <stdint.h>
|
25
|
+
#include <stdio.h>
|
26
|
+
#include <stdlib.h>
|
27
|
+
#include <string.h>
|
28
|
+
#include <assert.h>
|
29
|
+
#include <errno.h>
|
30
|
+
|
31
|
+
#include <net/if.h>
|
32
|
+
#include <sys/param.h>
|
33
|
+
#include <sys/prctl.h>
|
34
|
+
#include <sys/sysinfo.h>
|
35
|
+
#include <unistd.h>
|
36
|
+
#include <fcntl.h>
|
37
|
+
#include <time.h>
|
38
|
+
|
39
|
+
#define HAVE_IFADDRS_H 1
|
40
|
+
|
41
|
+
#ifdef __UCLIBC__
|
42
|
+
# if __UCLIBC_MAJOR__ < 0 || __UCLIBC_MINOR__ < 9 || __UCLIBC_SUBLEVEL__ < 32
|
43
|
+
# undef HAVE_IFADDRS_H
|
44
|
+
# endif
|
45
|
+
#endif
|
46
|
+
|
47
|
+
#ifdef HAVE_IFADDRS_H
|
48
|
+
# if defined(__ANDROID__)
|
49
|
+
# include "android-ifaddrs.h"
|
50
|
+
# else
|
51
|
+
# include <ifaddrs.h>
|
52
|
+
# endif
|
53
|
+
# include <sys/socket.h>
|
54
|
+
# include <net/ethernet.h>
|
55
|
+
# include <linux/if_packet.h>
|
56
|
+
#endif /* HAVE_IFADDRS_H */
|
57
|
+
|
58
|
+
/* Available from 2.6.32 onwards. */
|
59
|
+
#ifndef CLOCK_MONOTONIC_COARSE
|
60
|
+
# define CLOCK_MONOTONIC_COARSE 6
|
61
|
+
#endif
|
62
|
+
|
63
|
+
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
|
64
|
+
* include that file because it conflicts with <time.h>. We'll just have to
|
65
|
+
* define it ourselves.
|
66
|
+
*/
|
67
|
+
#ifndef CLOCK_BOOTTIME
|
68
|
+
# define CLOCK_BOOTTIME 7
|
69
|
+
#endif
|
70
|
+
|
71
|
+
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
|
72
|
+
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci);
|
73
|
+
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
|
74
|
+
static unsigned long read_cpufreq(unsigned int cpunum);
|
75
|
+
|
76
|
+
|
77
|
+
int uv__platform_loop_init(uv_loop_t* loop) {
|
78
|
+
int fd;
|
79
|
+
|
80
|
+
fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
|
81
|
+
|
82
|
+
/* epoll_create1() can fail either because it's not implemented (old kernel)
|
83
|
+
* or because it doesn't understand the EPOLL_CLOEXEC flag.
|
84
|
+
*/
|
85
|
+
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
|
86
|
+
fd = uv__epoll_create(256);
|
87
|
+
|
88
|
+
if (fd != -1)
|
89
|
+
uv__cloexec(fd, 1);
|
90
|
+
}
|
91
|
+
|
92
|
+
loop->backend_fd = fd;
|
93
|
+
loop->inotify_fd = -1;
|
94
|
+
loop->inotify_watchers = NULL;
|
95
|
+
|
96
|
+
if (fd == -1)
|
97
|
+
return -errno;
|
98
|
+
|
99
|
+
return 0;
|
100
|
+
}
|
101
|
+
|
102
|
+
|
103
|
+
void uv__platform_loop_delete(uv_loop_t* loop) {
|
104
|
+
if (loop->inotify_fd == -1) return;
|
105
|
+
uv__io_stop(loop, &loop->inotify_read_watcher, UV__POLLIN);
|
106
|
+
uv__close(loop->inotify_fd);
|
107
|
+
loop->inotify_fd = -1;
|
108
|
+
}
|
109
|
+
|
110
|
+
|
111
|
+
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
|
112
|
+
struct uv__epoll_event* events;
|
113
|
+
struct uv__epoll_event dummy;
|
114
|
+
uintptr_t i;
|
115
|
+
uintptr_t nfds;
|
116
|
+
|
117
|
+
assert(loop->watchers != NULL);
|
118
|
+
|
119
|
+
events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
|
120
|
+
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
|
121
|
+
if (events != NULL)
|
122
|
+
/* Invalidate events with same file descriptor */
|
123
|
+
for (i = 0; i < nfds; i++)
|
124
|
+
if ((int) events[i].data == fd)
|
125
|
+
events[i].data = -1;
|
126
|
+
|
127
|
+
/* Remove the file descriptor from the epoll.
|
128
|
+
* This avoids a problem where the same file description remains open
|
129
|
+
* in another process, causing repeated junk epoll events.
|
130
|
+
*
|
131
|
+
* We pass in a dummy epoll_event, to work around a bug in old kernels.
|
132
|
+
*/
|
133
|
+
if (loop->backend_fd >= 0) {
|
134
|
+
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
|
135
|
+
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
|
136
|
+
*/
|
137
|
+
memset(&dummy, 0, sizeof(dummy));
|
138
|
+
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy);
|
139
|
+
}
|
140
|
+
}
|
141
|
+
|
142
|
+
|
143
|
+
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
144
|
+
static int no_epoll_pwait;
|
145
|
+
static int no_epoll_wait;
|
146
|
+
struct uv__epoll_event events[1024];
|
147
|
+
struct uv__epoll_event* pe;
|
148
|
+
struct uv__epoll_event e;
|
149
|
+
QUEUE* q;
|
150
|
+
uv__io_t* w;
|
151
|
+
sigset_t sigset;
|
152
|
+
uint64_t sigmask;
|
153
|
+
uint64_t base;
|
154
|
+
uint64_t diff;
|
155
|
+
int nevents;
|
156
|
+
int count;
|
157
|
+
int nfds;
|
158
|
+
int fd;
|
159
|
+
int op;
|
160
|
+
int i;
|
161
|
+
|
162
|
+
if (loop->nfds == 0) {
|
163
|
+
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
164
|
+
return;
|
165
|
+
}
|
166
|
+
|
167
|
+
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
168
|
+
q = QUEUE_HEAD(&loop->watcher_queue);
|
169
|
+
QUEUE_REMOVE(q);
|
170
|
+
QUEUE_INIT(q);
|
171
|
+
|
172
|
+
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
173
|
+
assert(w->pevents != 0);
|
174
|
+
assert(w->fd >= 0);
|
175
|
+
assert(w->fd < (int) loop->nwatchers);
|
176
|
+
|
177
|
+
e.events = w->pevents;
|
178
|
+
e.data = w->fd;
|
179
|
+
|
180
|
+
if (w->events == 0)
|
181
|
+
op = UV__EPOLL_CTL_ADD;
|
182
|
+
else
|
183
|
+
op = UV__EPOLL_CTL_MOD;
|
184
|
+
|
185
|
+
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
|
186
|
+
* events, skip the syscall and squelch the events after epoll_wait().
|
187
|
+
*/
|
188
|
+
if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
|
189
|
+
if (errno != EEXIST)
|
190
|
+
abort();
|
191
|
+
|
192
|
+
assert(op == UV__EPOLL_CTL_ADD);
|
193
|
+
|
194
|
+
/* We've reactivated a file descriptor that's been watched before. */
|
195
|
+
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
|
196
|
+
abort();
|
197
|
+
}
|
198
|
+
|
199
|
+
w->events = w->pevents;
|
200
|
+
}
|
201
|
+
|
202
|
+
sigmask = 0;
|
203
|
+
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
204
|
+
sigemptyset(&sigset);
|
205
|
+
sigaddset(&sigset, SIGPROF);
|
206
|
+
sigmask |= 1 << (SIGPROF - 1);
|
207
|
+
}
|
208
|
+
|
209
|
+
assert(timeout >= -1);
|
210
|
+
base = loop->time;
|
211
|
+
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
212
|
+
|
213
|
+
for (;;) {
|
214
|
+
if (sigmask != 0 && no_epoll_pwait != 0)
|
215
|
+
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
|
216
|
+
abort();
|
217
|
+
|
218
|
+
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
|
219
|
+
nfds = uv__epoll_pwait(loop->backend_fd,
|
220
|
+
events,
|
221
|
+
ARRAY_SIZE(events),
|
222
|
+
timeout,
|
223
|
+
sigmask);
|
224
|
+
if (nfds == -1 && errno == ENOSYS)
|
225
|
+
no_epoll_pwait = 1;
|
226
|
+
} else {
|
227
|
+
nfds = uv__epoll_wait(loop->backend_fd,
|
228
|
+
events,
|
229
|
+
ARRAY_SIZE(events),
|
230
|
+
timeout);
|
231
|
+
if (nfds == -1 && errno == ENOSYS)
|
232
|
+
no_epoll_wait = 1;
|
233
|
+
}
|
234
|
+
|
235
|
+
if (sigmask != 0 && no_epoll_pwait != 0)
|
236
|
+
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
|
237
|
+
abort();
|
238
|
+
|
239
|
+
/* Update loop->time unconditionally. It's tempting to skip the update when
|
240
|
+
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
241
|
+
* operating system didn't reschedule our process while in the syscall.
|
242
|
+
*/
|
243
|
+
SAVE_ERRNO(uv__update_time(loop));
|
244
|
+
|
245
|
+
if (nfds == 0) {
|
246
|
+
assert(timeout != -1);
|
247
|
+
return;
|
248
|
+
}
|
249
|
+
|
250
|
+
if (nfds == -1) {
|
251
|
+
if (errno == ENOSYS) {
|
252
|
+
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
|
253
|
+
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
|
254
|
+
continue;
|
255
|
+
}
|
256
|
+
|
257
|
+
if (errno != EINTR)
|
258
|
+
abort();
|
259
|
+
|
260
|
+
if (timeout == -1)
|
261
|
+
continue;
|
262
|
+
|
263
|
+
if (timeout == 0)
|
264
|
+
return;
|
265
|
+
|
266
|
+
/* Interrupted by a signal. Update timeout and poll again. */
|
267
|
+
goto update_timeout;
|
268
|
+
}
|
269
|
+
|
270
|
+
nevents = 0;
|
271
|
+
|
272
|
+
assert(loop->watchers != NULL);
|
273
|
+
loop->watchers[loop->nwatchers] = (void*) events;
|
274
|
+
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
|
275
|
+
for (i = 0; i < nfds; i++) {
|
276
|
+
pe = events + i;
|
277
|
+
fd = pe->data;
|
278
|
+
|
279
|
+
/* Skip invalidated events, see uv__platform_invalidate_fd */
|
280
|
+
if (fd == -1)
|
281
|
+
continue;
|
282
|
+
|
283
|
+
assert(fd >= 0);
|
284
|
+
assert((unsigned) fd < loop->nwatchers);
|
285
|
+
|
286
|
+
w = loop->watchers[fd];
|
287
|
+
|
288
|
+
if (w == NULL) {
|
289
|
+
/* File descriptor that we've stopped watching, disarm it.
|
290
|
+
*
|
291
|
+
* Ignore all errors because we may be racing with another thread
|
292
|
+
* when the file descriptor is closed.
|
293
|
+
*/
|
294
|
+
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
|
295
|
+
continue;
|
296
|
+
}
|
297
|
+
|
298
|
+
/* Give users only events they're interested in. Prevents spurious
|
299
|
+
* callbacks when previous callback invocation in this loop has stopped
|
300
|
+
* the current watcher. Also, filters out events that users has not
|
301
|
+
* requested us to watch.
|
302
|
+
*/
|
303
|
+
pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP;
|
304
|
+
|
305
|
+
/* Work around an epoll quirk where it sometimes reports just the
|
306
|
+
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
|
307
|
+
* move forward, we merge in the read/write events that the watcher
|
308
|
+
* is interested in; uv__read() and uv__write() will then deal with
|
309
|
+
* the error or hangup in the usual fashion.
|
310
|
+
*
|
311
|
+
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
|
312
|
+
* reads the available data, calls uv_read_stop(), then sometime later
|
313
|
+
* calls uv_read_start() again. By then, libuv has forgotten about the
|
314
|
+
* hangup and the kernel won't report EPOLLIN again because there's
|
315
|
+
* nothing left to read. If anything, libuv is to blame here. The
|
316
|
+
* current hack is just a quick bandaid; to properly fix it, libuv
|
317
|
+
* needs to remember the error/hangup event. We should get that for
|
318
|
+
* free when we switch over to edge-triggered I/O.
|
319
|
+
*/
|
320
|
+
if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP)
|
321
|
+
pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT);
|
322
|
+
|
323
|
+
if (pe->events != 0) {
|
324
|
+
w->cb(loop, w, pe->events);
|
325
|
+
nevents++;
|
326
|
+
}
|
327
|
+
}
|
328
|
+
loop->watchers[loop->nwatchers] = NULL;
|
329
|
+
loop->watchers[loop->nwatchers + 1] = NULL;
|
330
|
+
|
331
|
+
if (nevents != 0) {
|
332
|
+
if (nfds == ARRAY_SIZE(events) && --count != 0) {
|
333
|
+
/* Poll for more events but don't block this time. */
|
334
|
+
timeout = 0;
|
335
|
+
continue;
|
336
|
+
}
|
337
|
+
return;
|
338
|
+
}
|
339
|
+
|
340
|
+
if (timeout == 0)
|
341
|
+
return;
|
342
|
+
|
343
|
+
if (timeout == -1)
|
344
|
+
continue;
|
345
|
+
|
346
|
+
update_timeout:
|
347
|
+
assert(timeout > 0);
|
348
|
+
|
349
|
+
diff = loop->time - base;
|
350
|
+
if (diff >= (uint64_t) timeout)
|
351
|
+
return;
|
352
|
+
|
353
|
+
timeout -= diff;
|
354
|
+
}
|
355
|
+
}
|
356
|
+
|
357
|
+
|
358
|
+
uint64_t uv__hrtime(uv_clocktype_t type) {
|
359
|
+
static clock_t fast_clock_id = -1;
|
360
|
+
struct timespec t;
|
361
|
+
clock_t clock_id;
|
362
|
+
|
363
|
+
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
|
364
|
+
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
|
365
|
+
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
|
366
|
+
* decide to make a costly system call.
|
367
|
+
*/
|
368
|
+
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
|
369
|
+
* when it has microsecond granularity or better (unlikely).
|
370
|
+
*/
|
371
|
+
if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
|
372
|
+
if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
|
373
|
+
t.tv_nsec <= 1 * 1000 * 1000) {
|
374
|
+
fast_clock_id = CLOCK_MONOTONIC_COARSE;
|
375
|
+
} else {
|
376
|
+
fast_clock_id = CLOCK_MONOTONIC;
|
377
|
+
}
|
378
|
+
}
|
379
|
+
|
380
|
+
clock_id = CLOCK_MONOTONIC;
|
381
|
+
if (type == UV_CLOCK_FAST)
|
382
|
+
clock_id = fast_clock_id;
|
383
|
+
|
384
|
+
if (clock_gettime(clock_id, &t))
|
385
|
+
return 0; /* Not really possible. */
|
386
|
+
|
387
|
+
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
|
388
|
+
}
|
389
|
+
|
390
|
+
|
391
|
+
void uv_loadavg(double avg[3]) {
|
392
|
+
struct sysinfo info;
|
393
|
+
|
394
|
+
if (sysinfo(&info) < 0) return;
|
395
|
+
|
396
|
+
avg[0] = (double) info.loads[0] / 65536.0;
|
397
|
+
avg[1] = (double) info.loads[1] / 65536.0;
|
398
|
+
avg[2] = (double) info.loads[2] / 65536.0;
|
399
|
+
}
|
400
|
+
|
401
|
+
|
402
|
+
int uv_exepath(char* buffer, size_t* size) {
|
403
|
+
ssize_t n;
|
404
|
+
|
405
|
+
if (buffer == NULL || size == NULL || *size == 0)
|
406
|
+
return -EINVAL;
|
407
|
+
|
408
|
+
n = *size - 1;
|
409
|
+
if (n > 0)
|
410
|
+
n = readlink("/proc/self/exe", buffer, n);
|
411
|
+
|
412
|
+
if (n == -1)
|
413
|
+
return -errno;
|
414
|
+
|
415
|
+
buffer[n] = '\0';
|
416
|
+
*size = n;
|
417
|
+
|
418
|
+
return 0;
|
419
|
+
}
|
420
|
+
|
421
|
+
|
422
|
+
uint64_t uv_get_free_memory(void) {
|
423
|
+
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
|
424
|
+
}
|
425
|
+
|
426
|
+
|
427
|
+
uint64_t uv_get_total_memory(void) {
|
428
|
+
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
|
429
|
+
}
|
430
|
+
|
431
|
+
|
432
|
+
int uv_resident_set_memory(size_t* rss) {
|
433
|
+
char buf[1024];
|
434
|
+
const char* s;
|
435
|
+
ssize_t n;
|
436
|
+
long val;
|
437
|
+
int fd;
|
438
|
+
int i;
|
439
|
+
|
440
|
+
do
|
441
|
+
fd = open("/proc/self/stat", O_RDONLY);
|
442
|
+
while (fd == -1 && errno == EINTR);
|
443
|
+
|
444
|
+
if (fd == -1)
|
445
|
+
return -errno;
|
446
|
+
|
447
|
+
do
|
448
|
+
n = read(fd, buf, sizeof(buf) - 1);
|
449
|
+
while (n == -1 && errno == EINTR);
|
450
|
+
|
451
|
+
uv__close(fd);
|
452
|
+
if (n == -1)
|
453
|
+
return -errno;
|
454
|
+
buf[n] = '\0';
|
455
|
+
|
456
|
+
s = strchr(buf, ' ');
|
457
|
+
if (s == NULL)
|
458
|
+
goto err;
|
459
|
+
|
460
|
+
s += 1;
|
461
|
+
if (*s != '(')
|
462
|
+
goto err;
|
463
|
+
|
464
|
+
s = strchr(s, ')');
|
465
|
+
if (s == NULL)
|
466
|
+
goto err;
|
467
|
+
|
468
|
+
for (i = 1; i <= 22; i++) {
|
469
|
+
s = strchr(s + 1, ' ');
|
470
|
+
if (s == NULL)
|
471
|
+
goto err;
|
472
|
+
}
|
473
|
+
|
474
|
+
errno = 0;
|
475
|
+
val = strtol(s, NULL, 10);
|
476
|
+
if (errno != 0)
|
477
|
+
goto err;
|
478
|
+
if (val < 0)
|
479
|
+
goto err;
|
480
|
+
|
481
|
+
*rss = val * getpagesize();
|
482
|
+
return 0;
|
483
|
+
|
484
|
+
err:
|
485
|
+
return -EINVAL;
|
486
|
+
}
|
487
|
+
|
488
|
+
|
489
|
+
int uv_uptime(double* uptime) {
|
490
|
+
static volatile int no_clock_boottime;
|
491
|
+
struct timespec now;
|
492
|
+
int r;
|
493
|
+
|
494
|
+
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
|
495
|
+
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
|
496
|
+
* is suspended.
|
497
|
+
*/
|
498
|
+
if (no_clock_boottime) {
|
499
|
+
retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
|
500
|
+
}
|
501
|
+
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
|
502
|
+
no_clock_boottime = 1;
|
503
|
+
goto retry;
|
504
|
+
}
|
505
|
+
|
506
|
+
if (r)
|
507
|
+
return -errno;
|
508
|
+
|
509
|
+
*uptime = now.tv_sec;
|
510
|
+
return 0;
|
511
|
+
}
|
512
|
+
|
513
|
+
|
514
|
+
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
515
|
+
unsigned int numcpus;
|
516
|
+
uv_cpu_info_t* ci;
|
517
|
+
int err;
|
518
|
+
|
519
|
+
*cpu_infos = NULL;
|
520
|
+
*count = 0;
|
521
|
+
|
522
|
+
numcpus = sysconf(_SC_NPROCESSORS_ONLN);
|
523
|
+
assert(numcpus != (unsigned int) -1);
|
524
|
+
assert(numcpus != 0);
|
525
|
+
|
526
|
+
ci = calloc(numcpus, sizeof(*ci));
|
527
|
+
if (ci == NULL)
|
528
|
+
return -ENOMEM;
|
529
|
+
|
530
|
+
err = read_models(numcpus, ci);
|
531
|
+
if (err == 0)
|
532
|
+
err = read_times(numcpus, ci);
|
533
|
+
|
534
|
+
if (err) {
|
535
|
+
uv_free_cpu_info(ci, numcpus);
|
536
|
+
return err;
|
537
|
+
}
|
538
|
+
|
539
|
+
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
|
540
|
+
* We don't check for errors here. Worst case, the field is left zero.
|
541
|
+
*/
|
542
|
+
if (ci[0].speed == 0)
|
543
|
+
read_speeds(numcpus, ci);
|
544
|
+
|
545
|
+
*cpu_infos = ci;
|
546
|
+
*count = numcpus;
|
547
|
+
|
548
|
+
return 0;
|
549
|
+
}
|
550
|
+
|
551
|
+
|
552
|
+
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
|
553
|
+
unsigned int num;
|
554
|
+
|
555
|
+
for (num = 0; num < numcpus; num++)
|
556
|
+
ci[num].speed = read_cpufreq(num) / 1000;
|
557
|
+
}
|
558
|
+
|
559
|
+
|
560
|
+
/* Also reads the CPU frequency on x86. The other architectures only have
|
561
|
+
* a BogoMIPS field, which may not be very accurate.
|
562
|
+
*
|
563
|
+
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
|
564
|
+
*/
|
565
|
+
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
566
|
+
static const char model_marker[] = "model name\t: ";
|
567
|
+
static const char speed_marker[] = "cpu MHz\t\t: ";
|
568
|
+
const char* inferred_model;
|
569
|
+
unsigned int model_idx;
|
570
|
+
unsigned int speed_idx;
|
571
|
+
char buf[1024];
|
572
|
+
char* model;
|
573
|
+
FILE* fp;
|
574
|
+
|
575
|
+
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
|
576
|
+
(void) &model_marker;
|
577
|
+
(void) &speed_marker;
|
578
|
+
(void) &speed_idx;
|
579
|
+
(void) &model;
|
580
|
+
(void) &buf;
|
581
|
+
(void) &fp;
|
582
|
+
|
583
|
+
model_idx = 0;
|
584
|
+
speed_idx = 0;
|
585
|
+
|
586
|
+
#if defined(__arm__) || \
|
587
|
+
defined(__i386__) || \
|
588
|
+
defined(__mips__) || \
|
589
|
+
defined(__x86_64__)
|
590
|
+
fp = fopen("/proc/cpuinfo", "r");
|
591
|
+
if (fp == NULL)
|
592
|
+
return -errno;
|
593
|
+
|
594
|
+
while (fgets(buf, sizeof(buf), fp)) {
|
595
|
+
if (model_idx < numcpus) {
|
596
|
+
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
597
|
+
model = buf + sizeof(model_marker) - 1;
|
598
|
+
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
599
|
+
if (model == NULL) {
|
600
|
+
fclose(fp);
|
601
|
+
return -ENOMEM;
|
602
|
+
}
|
603
|
+
ci[model_idx++].model = model;
|
604
|
+
continue;
|
605
|
+
}
|
606
|
+
}
|
607
|
+
#if defined(__arm__) || defined(__mips__)
|
608
|
+
if (model_idx < numcpus) {
|
609
|
+
#if defined(__arm__)
|
610
|
+
/* Fallback for pre-3.8 kernels. */
|
611
|
+
static const char model_marker[] = "Processor\t: ";
|
612
|
+
#else /* defined(__mips__) */
|
613
|
+
static const char model_marker[] = "cpu model\t\t: ";
|
614
|
+
#endif
|
615
|
+
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
616
|
+
model = buf + sizeof(model_marker) - 1;
|
617
|
+
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
618
|
+
if (model == NULL) {
|
619
|
+
fclose(fp);
|
620
|
+
return -ENOMEM;
|
621
|
+
}
|
622
|
+
ci[model_idx++].model = model;
|
623
|
+
continue;
|
624
|
+
}
|
625
|
+
}
|
626
|
+
#else /* !__arm__ && !__mips__ */
|
627
|
+
if (speed_idx < numcpus) {
|
628
|
+
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
|
629
|
+
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
630
|
+
continue;
|
631
|
+
}
|
632
|
+
}
|
633
|
+
#endif /* __arm__ || __mips__ */
|
634
|
+
}
|
635
|
+
|
636
|
+
fclose(fp);
|
637
|
+
#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
|
638
|
+
|
639
|
+
/* Now we want to make sure that all the models contain *something* because
|
640
|
+
* it's not safe to leave them as null. Copy the last entry unless there
|
641
|
+
* isn't one, in that case we simply put "unknown" into everything.
|
642
|
+
*/
|
643
|
+
inferred_model = "unknown";
|
644
|
+
if (model_idx > 0)
|
645
|
+
inferred_model = ci[model_idx - 1].model;
|
646
|
+
|
647
|
+
while (model_idx < numcpus) {
|
648
|
+
model = strndup(inferred_model, strlen(inferred_model));
|
649
|
+
if (model == NULL)
|
650
|
+
return -ENOMEM;
|
651
|
+
ci[model_idx++].model = model;
|
652
|
+
}
|
653
|
+
|
654
|
+
return 0;
|
655
|
+
}
|
656
|
+
|
657
|
+
|
658
|
+
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci) {
|
659
|
+
unsigned long clock_ticks;
|
660
|
+
struct uv_cpu_times_s ts;
|
661
|
+
unsigned long user;
|
662
|
+
unsigned long nice;
|
663
|
+
unsigned long sys;
|
664
|
+
unsigned long idle;
|
665
|
+
unsigned long dummy;
|
666
|
+
unsigned long irq;
|
667
|
+
unsigned int num;
|
668
|
+
unsigned int len;
|
669
|
+
char buf[1024];
|
670
|
+
FILE* fp;
|
671
|
+
|
672
|
+
clock_ticks = sysconf(_SC_CLK_TCK);
|
673
|
+
assert(clock_ticks != (unsigned long) -1);
|
674
|
+
assert(clock_ticks != 0);
|
675
|
+
|
676
|
+
fp = fopen("/proc/stat", "r");
|
677
|
+
if (fp == NULL)
|
678
|
+
return -errno;
|
679
|
+
|
680
|
+
if (!fgets(buf, sizeof(buf), fp))
|
681
|
+
abort();
|
682
|
+
|
683
|
+
num = 0;
|
684
|
+
|
685
|
+
while (fgets(buf, sizeof(buf), fp)) {
|
686
|
+
if (num >= numcpus)
|
687
|
+
break;
|
688
|
+
|
689
|
+
if (strncmp(buf, "cpu", 3))
|
690
|
+
break;
|
691
|
+
|
692
|
+
/* skip "cpu<num> " marker */
|
693
|
+
{
|
694
|
+
unsigned int n;
|
695
|
+
int r = sscanf(buf, "cpu%u ", &n);
|
696
|
+
assert(r == 1);
|
697
|
+
(void) r; /* silence build warning */
|
698
|
+
for (len = sizeof("cpu0"); n /= 10; len++);
|
699
|
+
}
|
700
|
+
|
701
|
+
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
|
702
|
+
* guest, guest_nice but we're only interested in the first four + irq.
|
703
|
+
*
|
704
|
+
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
|
705
|
+
* fields, they're not allowed in C89 mode.
|
706
|
+
*/
|
707
|
+
if (6 != sscanf(buf + len,
|
708
|
+
"%lu %lu %lu %lu %lu %lu",
|
709
|
+
&user,
|
710
|
+
&nice,
|
711
|
+
&sys,
|
712
|
+
&idle,
|
713
|
+
&dummy,
|
714
|
+
&irq))
|
715
|
+
abort();
|
716
|
+
|
717
|
+
ts.user = clock_ticks * user;
|
718
|
+
ts.nice = clock_ticks * nice;
|
719
|
+
ts.sys = clock_ticks * sys;
|
720
|
+
ts.idle = clock_ticks * idle;
|
721
|
+
ts.irq = clock_ticks * irq;
|
722
|
+
ci[num++].cpu_times = ts;
|
723
|
+
}
|
724
|
+
fclose(fp);
|
725
|
+
assert(num == numcpus);
|
726
|
+
|
727
|
+
return 0;
|
728
|
+
}
|
729
|
+
|
730
|
+
|
731
|
+
static unsigned long read_cpufreq(unsigned int cpunum) {
|
732
|
+
unsigned long val;
|
733
|
+
char buf[1024];
|
734
|
+
FILE* fp;
|
735
|
+
|
736
|
+
snprintf(buf,
|
737
|
+
sizeof(buf),
|
738
|
+
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
|
739
|
+
cpunum);
|
740
|
+
|
741
|
+
fp = fopen(buf, "r");
|
742
|
+
if (fp == NULL)
|
743
|
+
return 0;
|
744
|
+
|
745
|
+
if (fscanf(fp, "%lu", &val) != 1)
|
746
|
+
val = 0;
|
747
|
+
|
748
|
+
fclose(fp);
|
749
|
+
|
750
|
+
return val;
|
751
|
+
}
|
752
|
+
|
753
|
+
|
754
|
+
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
|
755
|
+
int i;
|
756
|
+
|
757
|
+
for (i = 0; i < count; i++) {
|
758
|
+
free(cpu_infos[i].model);
|
759
|
+
}
|
760
|
+
|
761
|
+
free(cpu_infos);
|
762
|
+
}
|
763
|
+
|
764
|
+
|
765
|
+
int uv_interface_addresses(uv_interface_address_t** addresses,
|
766
|
+
int* count) {
|
767
|
+
#ifndef HAVE_IFADDRS_H
|
768
|
+
return -ENOSYS;
|
769
|
+
#else
|
770
|
+
struct ifaddrs *addrs, *ent;
|
771
|
+
uv_interface_address_t* address;
|
772
|
+
int i;
|
773
|
+
struct sockaddr_ll *sll;
|
774
|
+
|
775
|
+
if (getifaddrs(&addrs))
|
776
|
+
return -errno;
|
777
|
+
|
778
|
+
*count = 0;
|
779
|
+
*addresses = NULL;
|
780
|
+
|
781
|
+
/* Count the number of interfaces */
|
782
|
+
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
783
|
+
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
|
784
|
+
(ent->ifa_addr == NULL) ||
|
785
|
+
(ent->ifa_addr->sa_family == PF_PACKET)) {
|
786
|
+
continue;
|
787
|
+
}
|
788
|
+
|
789
|
+
(*count)++;
|
790
|
+
}
|
791
|
+
|
792
|
+
if (*count == 0)
|
793
|
+
return 0;
|
794
|
+
|
795
|
+
*addresses = malloc(*count * sizeof(**addresses));
|
796
|
+
if (!(*addresses))
|
797
|
+
return -ENOMEM;
|
798
|
+
|
799
|
+
address = *addresses;
|
800
|
+
|
801
|
+
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
802
|
+
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
|
803
|
+
continue;
|
804
|
+
|
805
|
+
if (ent->ifa_addr == NULL)
|
806
|
+
continue;
|
807
|
+
|
808
|
+
/*
|
809
|
+
* On Linux getifaddrs returns information related to the raw underlying
|
810
|
+
* devices. We're not interested in this information yet.
|
811
|
+
*/
|
812
|
+
if (ent->ifa_addr->sa_family == PF_PACKET)
|
813
|
+
continue;
|
814
|
+
|
815
|
+
address->name = strdup(ent->ifa_name);
|
816
|
+
|
817
|
+
if (ent->ifa_addr->sa_family == AF_INET6) {
|
818
|
+
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
|
819
|
+
} else {
|
820
|
+
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
|
821
|
+
}
|
822
|
+
|
823
|
+
if (ent->ifa_netmask->sa_family == AF_INET6) {
|
824
|
+
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
|
825
|
+
} else {
|
826
|
+
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
|
827
|
+
}
|
828
|
+
|
829
|
+
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
|
830
|
+
|
831
|
+
address++;
|
832
|
+
}
|
833
|
+
|
834
|
+
/* Fill in physical addresses for each interface */
|
835
|
+
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
836
|
+
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
|
837
|
+
(ent->ifa_addr == NULL) ||
|
838
|
+
(ent->ifa_addr->sa_family != PF_PACKET)) {
|
839
|
+
continue;
|
840
|
+
}
|
841
|
+
|
842
|
+
address = *addresses;
|
843
|
+
|
844
|
+
for (i = 0; i < (*count); i++) {
|
845
|
+
if (strcmp(address->name, ent->ifa_name) == 0) {
|
846
|
+
sll = (struct sockaddr_ll*)ent->ifa_addr;
|
847
|
+
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
|
848
|
+
}
|
849
|
+
address++;
|
850
|
+
}
|
851
|
+
}
|
852
|
+
|
853
|
+
freeifaddrs(addrs);
|
854
|
+
|
855
|
+
return 0;
|
856
|
+
#endif
|
857
|
+
}
|
858
|
+
|
859
|
+
|
860
|
+
void uv_free_interface_addresses(uv_interface_address_t* addresses,
|
861
|
+
int count) {
|
862
|
+
int i;
|
863
|
+
|
864
|
+
for (i = 0; i < count; i++) {
|
865
|
+
free(addresses[i].name);
|
866
|
+
}
|
867
|
+
|
868
|
+
free(addresses);
|
869
|
+
}
|
870
|
+
|
871
|
+
|
872
|
+
void uv__set_process_title(const char* title) {
|
873
|
+
#if defined(PR_SET_NAME)
|
874
|
+
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
|
875
|
+
#endif
|
876
|
+
}
|