passenger 5.0.8 → 5.0.9

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of passenger might be problematic. Click here for more details.

Files changed (168) hide show
  1. checksums.yaml +8 -8
  2. checksums.yaml.gz.asc +7 -7
  3. data.tar.gz.asc +7 -7
  4. data/.editorconfig +20 -0
  5. data/CHANGELOG +21 -0
  6. data/bin/passenger-install-apache2-module +3 -1
  7. data/build/agents.rb +7 -5
  8. data/build/basics.rb +3 -3
  9. data/build/common_library.rb +52 -30
  10. data/build/cxx_tests.rb +20 -13
  11. data/build/misc.rb +5 -5
  12. data/doc/Design and Architecture.html +1 -1
  13. data/doc/Design and Architecture.txt +1 -1
  14. data/doc/Packaging.html +4 -4
  15. data/doc/Packaging.txt.md +4 -4
  16. data/doc/Users guide Apache.html +22 -9
  17. data/doc/Users guide Apache.idmap.txt +4 -2
  18. data/doc/Users guide Apache.txt +2 -0
  19. data/doc/Users guide Nginx.html +22 -9
  20. data/doc/Users guide Nginx.idmap.txt +4 -2
  21. data/doc/Users guide Nginx.txt +2 -0
  22. data/doc/Users guide Standalone.html +14 -9
  23. data/doc/Users guide Standalone.idmap.txt +4 -2
  24. data/doc/users_guide_snippets/installation.txt +10 -6
  25. data/ext/apache2/Hooks.cpp +13 -2
  26. data/ext/common/ApplicationPool2/Pool/Inspection.h +8 -3
  27. data/ext/common/BackgroundEventLoop.cpp +249 -67
  28. data/ext/common/BackgroundEventLoop.h +5 -5
  29. data/ext/common/Constants.h +1 -1
  30. data/ext/common/InstanceDirectory.h +8 -6
  31. data/ext/common/ServerKit/Context.h +8 -2
  32. data/ext/common/ServerKit/FileBufferedChannel.h +262 -226
  33. data/ext/common/ServerKit/HeaderTable.h +28 -3
  34. data/ext/common/ServerKit/HttpHeaderParser.h +37 -13
  35. data/ext/common/ServerKit/HttpServer.h +17 -1
  36. data/ext/common/ServerKit/Implementation.cpp +2 -0
  37. data/ext/common/ServerKit/Server.h +25 -28
  38. data/ext/common/Utils/IOUtils.cpp +11 -0
  39. data/ext/common/Utils/ProcessMetricsCollector.h +4 -0
  40. data/ext/common/Utils/StrIntUtils.cpp +11 -7
  41. data/ext/common/Utils/StrIntUtils.h +1 -1
  42. data/ext/common/Utils/StrIntUtilsNoStrictAliasing.cpp +21 -16
  43. data/ext/common/agents/Base.cpp +6 -0
  44. data/ext/common/agents/Base.h +2 -0
  45. data/ext/common/agents/HelperAgent/AdminServer.h +25 -25
  46. data/ext/common/agents/HelperAgent/Main.cpp +37 -12
  47. data/ext/common/agents/HelperAgent/RequestHandler.h +18 -20
  48. data/ext/common/agents/HelperAgent/RequestHandler/AppResponse.h +4 -0
  49. data/ext/common/agents/HelperAgent/RequestHandler/ForwardResponse.cpp +10 -6
  50. data/ext/common/agents/HelperAgent/RequestHandler/Hooks.cpp +2 -0
  51. data/ext/common/agents/HelperAgent/RequestHandler/InitRequest.cpp +1 -1
  52. data/ext/common/agents/HelperAgent/RequestHandler/SendRequest.cpp +1 -1
  53. data/ext/common/agents/HelperAgent/RequestHandler/Utils.cpp +9 -2
  54. data/ext/common/agents/HelperAgent/ResponseCache.h +11 -11
  55. data/ext/common/agents/LoggingAgent/AdminServer.h +8 -8
  56. data/ext/common/agents/LoggingAgent/Main.cpp +6 -5
  57. data/ext/common/agents/Watchdog/AdminServer.h +13 -13
  58. data/ext/common/agents/Watchdog/Main.cpp +8 -3
  59. data/ext/libuv/.gitignore +72 -0
  60. data/ext/libuv/AUTHORS +199 -0
  61. data/ext/libuv/ChangeLog +2023 -0
  62. data/ext/libuv/LICENSE +46 -0
  63. data/ext/libuv/Makefile.am +336 -0
  64. data/ext/libuv/README.md +197 -0
  65. data/ext/libuv/checksparse.sh +233 -0
  66. data/ext/libuv/common.gypi +210 -0
  67. data/ext/libuv/configure.ac +67 -0
  68. data/ext/libuv/gyp_uv.py +96 -0
  69. data/ext/libuv/include/android-ifaddrs.h +54 -0
  70. data/ext/libuv/include/pthread-fixes.h +72 -0
  71. data/ext/libuv/include/tree.h +768 -0
  72. data/ext/libuv/include/uv-aix.h +32 -0
  73. data/ext/libuv/include/uv-bsd.h +34 -0
  74. data/ext/libuv/include/uv-darwin.h +61 -0
  75. data/ext/libuv/include/uv-errno.h +418 -0
  76. data/ext/libuv/include/uv-linux.h +34 -0
  77. data/ext/libuv/include/uv-sunos.h +44 -0
  78. data/ext/libuv/include/uv-threadpool.h +37 -0
  79. data/ext/libuv/include/uv-unix.h +383 -0
  80. data/ext/libuv/include/uv-version.h +39 -0
  81. data/ext/libuv/include/uv.h +1455 -0
  82. data/ext/libuv/libuv.pc.in +11 -0
  83. data/ext/libuv/m4/.gitignore +4 -0
  84. data/ext/libuv/m4/as_case.m4 +21 -0
  85. data/ext/libuv/m4/libuv-check-flags.m4 +319 -0
  86. data/ext/libuv/src/fs-poll.c +255 -0
  87. data/ext/libuv/src/heap-inl.h +245 -0
  88. data/ext/libuv/src/inet.c +313 -0
  89. data/ext/libuv/src/queue.h +92 -0
  90. data/ext/libuv/src/threadpool.c +303 -0
  91. data/ext/libuv/src/unix/aix.c +1240 -0
  92. data/ext/libuv/src/unix/android-ifaddrs.c +703 -0
  93. data/ext/libuv/src/unix/async.c +284 -0
  94. data/ext/libuv/src/unix/atomic-ops.h +60 -0
  95. data/ext/libuv/src/unix/core.c +985 -0
  96. data/ext/libuv/src/unix/darwin-proctitle.c +206 -0
  97. data/ext/libuv/src/unix/darwin.c +331 -0
  98. data/ext/libuv/src/unix/dl.c +83 -0
  99. data/ext/libuv/src/unix/freebsd.c +435 -0
  100. data/ext/libuv/src/unix/fs.c +1189 -0
  101. data/ext/libuv/src/unix/fsevents.c +899 -0
  102. data/ext/libuv/src/unix/getaddrinfo.c +202 -0
  103. data/ext/libuv/src/unix/getnameinfo.c +120 -0
  104. data/ext/libuv/src/unix/internal.h +314 -0
  105. data/ext/libuv/src/unix/kqueue.c +418 -0
  106. data/ext/libuv/src/unix/linux-core.c +876 -0
  107. data/ext/libuv/src/unix/linux-inotify.c +257 -0
  108. data/ext/libuv/src/unix/linux-syscalls.c +471 -0
  109. data/ext/libuv/src/unix/linux-syscalls.h +158 -0
  110. data/ext/libuv/src/unix/loop-watcher.c +63 -0
  111. data/ext/libuv/src/unix/loop.c +135 -0
  112. data/ext/libuv/src/unix/netbsd.c +368 -0
  113. data/ext/libuv/src/unix/openbsd.c +384 -0
  114. data/ext/libuv/src/unix/pipe.c +288 -0
  115. data/ext/libuv/src/unix/poll.c +113 -0
  116. data/ext/libuv/src/unix/process.c +551 -0
  117. data/ext/libuv/src/unix/proctitle.c +102 -0
  118. data/ext/libuv/src/unix/pthread-fixes.c +103 -0
  119. data/ext/libuv/src/unix/signal.c +465 -0
  120. data/ext/libuv/src/unix/spinlock.h +53 -0
  121. data/ext/libuv/src/unix/stream.c +1598 -0
  122. data/ext/libuv/src/unix/sunos.c +763 -0
  123. data/ext/libuv/src/unix/tcp.c +327 -0
  124. data/ext/libuv/src/unix/thread.c +519 -0
  125. data/ext/libuv/src/unix/timer.c +172 -0
  126. data/ext/libuv/src/unix/tty.c +265 -0
  127. data/ext/libuv/src/unix/udp.c +833 -0
  128. data/ext/libuv/src/uv-common.c +544 -0
  129. data/ext/libuv/src/uv-common.h +214 -0
  130. data/ext/libuv/src/version.c +49 -0
  131. data/ext/libuv/uv.gyp +487 -0
  132. data/ext/nginx/ContentHandler.c +21 -10
  133. data/ext/nginx/ngx_http_passenger_module.c +7 -0
  134. data/ext/oxt/implementation.cpp +9 -2
  135. data/ext/oxt/initialize.hpp +5 -1
  136. data/lib/phusion_passenger.rb +3 -3
  137. data/lib/phusion_passenger/admin_tools/instance.rb +10 -6
  138. data/lib/phusion_passenger/admin_tools/instance_registry.rb +6 -2
  139. data/lib/phusion_passenger/packaging.rb +3 -4
  140. data/lib/phusion_passenger/platform_info.rb +13 -1
  141. data/lib/phusion_passenger/platform_info/apache.rb +15 -4
  142. data/lib/phusion_passenger/platform_info/apache_detector.rb +5 -1
  143. data/lib/phusion_passenger/rack/thread_handler_extension.rb +184 -99
  144. data/lib/phusion_passenger/request_handler/thread_handler.rb +13 -6
  145. data/lib/phusion_passenger/standalone/start_command.rb +2 -2
  146. data/resources/templates/apache2/apache_install_broken.txt.erb +2 -1
  147. metadata +99 -22
  148. metadata.gz.asc +7 -7
  149. data/ext/libeio/Changes +0 -76
  150. data/ext/libeio/LICENSE +0 -36
  151. data/ext/libeio/Makefile.am +0 -15
  152. data/ext/libeio/Makefile.in +0 -694
  153. data/ext/libeio/aclocal.m4 +0 -9418
  154. data/ext/libeio/autogen.sh +0 -3
  155. data/ext/libeio/config.guess +0 -1540
  156. data/ext/libeio/config.h.in +0 -136
  157. data/ext/libeio/config.sub +0 -1779
  158. data/ext/libeio/configure +0 -14822
  159. data/ext/libeio/configure.ac +0 -22
  160. data/ext/libeio/demo.c +0 -194
  161. data/ext/libeio/ecb.h +0 -714
  162. data/ext/libeio/eio.c +0 -2818
  163. data/ext/libeio/eio.h +0 -414
  164. data/ext/libeio/install-sh +0 -520
  165. data/ext/libeio/libeio.m4 +0 -195
  166. data/ext/libeio/ltmain.sh +0 -9636
  167. data/ext/libeio/missing +0 -376
  168. data/ext/libeio/xthread.h +0 -166
@@ -0,0 +1,53 @@
1
+ /* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
2
+ *
3
+ * Permission to use, copy, modify, and/or distribute this software for any
4
+ * purpose with or without fee is hereby granted, provided that the above
5
+ * copyright notice and this permission notice appear in all copies.
6
+ *
7
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
13
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
+ */
15
+
16
+ #ifndef UV_SPINLOCK_H_
17
+ #define UV_SPINLOCK_H_
18
+
19
+ #include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
20
+ #include "atomic-ops.h"
21
+
22
+ #define UV_SPINLOCK_INITIALIZER { 0 }
23
+
24
+ typedef struct {
25
+ int lock;
26
+ } uv_spinlock_t;
27
+
28
+ UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
29
+ UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
30
+ UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
31
+ UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
32
+
33
+ UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
34
+ ACCESS_ONCE(int, spinlock->lock) = 0;
35
+ }
36
+
37
+ UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
38
+ while (!uv_spinlock_trylock(spinlock)) cpu_relax();
39
+ }
40
+
41
+ UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
42
+ ACCESS_ONCE(int, spinlock->lock) = 0;
43
+ }
44
+
45
+ UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
46
+ /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
47
+ * Not really critical until we have locks that are (frequently) contended
48
+ * for by several threads.
49
+ */
50
+ return 0 == cmpxchgi(&spinlock->lock, 0, 1);
51
+ }
52
+
53
+ #endif /* UV_SPINLOCK_H_ */
@@ -0,0 +1,1598 @@
1
+ /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ * of this software and associated documentation files (the "Software"), to
5
+ * deal in the Software without restriction, including without limitation the
6
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
+ * sell copies of the Software, and to permit persons to whom the Software is
8
+ * furnished to do so, subject to the following conditions:
9
+ *
10
+ * The above copyright notice and this permission notice shall be included in
11
+ * all copies or substantial portions of the Software.
12
+ *
13
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
+ * IN THE SOFTWARE.
20
+ */
21
+
22
+ #include "uv.h"
23
+ #include "internal.h"
24
+
25
+ #include <stdio.h>
26
+ #include <stdlib.h>
27
+ #include <string.h>
28
+ #include <assert.h>
29
+ #include <errno.h>
30
+
31
+ #include <sys/types.h>
32
+ #include <sys/socket.h>
33
+ #include <sys/uio.h>
34
+ #include <sys/un.h>
35
+ #include <unistd.h>
36
+ #include <limits.h> /* IOV_MAX */
37
+
38
+ #if defined(__APPLE__)
39
+ # include <sys/event.h>
40
+ # include <sys/time.h>
41
+ # include <sys/select.h>
42
+
43
+ /* Forward declaration */
44
+ typedef struct uv__stream_select_s uv__stream_select_t;
45
+
46
+ struct uv__stream_select_s {
47
+ uv_stream_t* stream;
48
+ uv_thread_t thread;
49
+ uv_sem_t close_sem;
50
+ uv_sem_t async_sem;
51
+ uv_async_t async;
52
+ int events;
53
+ int fake_fd;
54
+ int int_fd;
55
+ int fd;
56
+ fd_set* sread;
57
+ size_t sread_sz;
58
+ fd_set* swrite;
59
+ size_t swrite_sz;
60
+ };
61
+ #endif /* defined(__APPLE__) */
62
+
63
+ static void uv__stream_connect(uv_stream_t*);
64
+ static void uv__write(uv_stream_t* stream);
65
+ static void uv__read(uv_stream_t* stream);
66
+ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
67
+ static void uv__write_callbacks(uv_stream_t* stream);
68
+ static size_t uv__write_req_size(uv_write_t* req);
69
+
70
+
71
+ void uv__stream_init(uv_loop_t* loop,
72
+ uv_stream_t* stream,
73
+ uv_handle_type type) {
74
+ int err;
75
+
76
+ uv__handle_init(loop, (uv_handle_t*)stream, type);
77
+ stream->read_cb = NULL;
78
+ stream->alloc_cb = NULL;
79
+ stream->close_cb = NULL;
80
+ stream->connection_cb = NULL;
81
+ stream->connect_req = NULL;
82
+ stream->shutdown_req = NULL;
83
+ stream->accepted_fd = -1;
84
+ stream->queued_fds = NULL;
85
+ stream->delayed_error = 0;
86
+ QUEUE_INIT(&stream->write_queue);
87
+ QUEUE_INIT(&stream->write_completed_queue);
88
+ stream->write_queue_size = 0;
89
+
90
+ if (loop->emfile_fd == -1) {
91
+ err = uv__open_cloexec("/dev/null", O_RDONLY);
92
+ if (err < 0)
93
+ /* In the rare case that "/dev/null" isn't mounted open "/"
94
+ * instead.
95
+ */
96
+ err = uv__open_cloexec("/", O_RDONLY);
97
+ if (err >= 0)
98
+ loop->emfile_fd = err;
99
+ }
100
+
101
+ #if defined(__APPLE__)
102
+ stream->select = NULL;
103
+ #endif /* defined(__APPLE_) */
104
+
105
+ uv__io_init(&stream->io_watcher, uv__stream_io, -1);
106
+ }
107
+
108
+
109
+ static void uv__stream_osx_interrupt_select(uv_stream_t* stream) {
110
+ #if defined(__APPLE__)
111
+ /* Notify select() thread about state change */
112
+ uv__stream_select_t* s;
113
+ int r;
114
+
115
+ s = stream->select;
116
+ if (s == NULL)
117
+ return;
118
+
119
+ /* Interrupt select() loop
120
+ * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will
121
+ * emit read event on other side
122
+ */
123
+ do
124
+ r = write(s->fake_fd, "x", 1);
125
+ while (r == -1 && errno == EINTR);
126
+
127
+ assert(r == 1);
128
+ #else /* !defined(__APPLE__) */
129
+ /* No-op on any other platform */
130
+ #endif /* !defined(__APPLE__) */
131
+ }
132
+
133
+
134
+ #if defined(__APPLE__)
135
+ static void uv__stream_osx_select(void* arg) {
136
+ uv_stream_t* stream;
137
+ uv__stream_select_t* s;
138
+ char buf[1024];
139
+ int events;
140
+ int fd;
141
+ int r;
142
+ int max_fd;
143
+
144
+ stream = arg;
145
+ s = stream->select;
146
+ fd = s->fd;
147
+
148
+ if (fd > s->int_fd)
149
+ max_fd = fd;
150
+ else
151
+ max_fd = s->int_fd;
152
+
153
+ while (1) {
154
+ /* Terminate on semaphore */
155
+ if (uv_sem_trywait(&s->close_sem) == 0)
156
+ break;
157
+
158
+ /* Watch fd using select(2) */
159
+ memset(s->sread, 0, s->sread_sz);
160
+ memset(s->swrite, 0, s->swrite_sz);
161
+
162
+ if (uv__io_active(&stream->io_watcher, UV__POLLIN))
163
+ FD_SET(fd, s->sread);
164
+ if (uv__io_active(&stream->io_watcher, UV__POLLOUT))
165
+ FD_SET(fd, s->swrite);
166
+ FD_SET(s->int_fd, s->sread);
167
+
168
+ /* Wait indefinitely for fd events */
169
+ r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL);
170
+ if (r == -1) {
171
+ if (errno == EINTR)
172
+ continue;
173
+
174
+ /* XXX: Possible?! */
175
+ abort();
176
+ }
177
+
178
+ /* Ignore timeouts */
179
+ if (r == 0)
180
+ continue;
181
+
182
+ /* Empty socketpair's buffer in case of interruption */
183
+ if (FD_ISSET(s->int_fd, s->sread))
184
+ while (1) {
185
+ r = read(s->int_fd, buf, sizeof(buf));
186
+
187
+ if (r == sizeof(buf))
188
+ continue;
189
+
190
+ if (r != -1)
191
+ break;
192
+
193
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
194
+ break;
195
+
196
+ if (errno == EINTR)
197
+ continue;
198
+
199
+ abort();
200
+ }
201
+
202
+ /* Handle events */
203
+ events = 0;
204
+ if (FD_ISSET(fd, s->sread))
205
+ events |= UV__POLLIN;
206
+ if (FD_ISSET(fd, s->swrite))
207
+ events |= UV__POLLOUT;
208
+
209
+ assert(events != 0 || FD_ISSET(s->int_fd, s->sread));
210
+ if (events != 0) {
211
+ ACCESS_ONCE(int, s->events) = events;
212
+
213
+ uv_async_send(&s->async);
214
+ uv_sem_wait(&s->async_sem);
215
+
216
+ /* Should be processed at this stage */
217
+ assert((s->events == 0) || (stream->flags & UV_CLOSING));
218
+ }
219
+ }
220
+ }
221
+
222
+
223
+ static void uv__stream_osx_select_cb(uv_async_t* handle) {
224
+ uv__stream_select_t* s;
225
+ uv_stream_t* stream;
226
+ int events;
227
+
228
+ s = container_of(handle, uv__stream_select_t, async);
229
+ stream = s->stream;
230
+
231
+ /* Get and reset stream's events */
232
+ events = s->events;
233
+ ACCESS_ONCE(int, s->events) = 0;
234
+ uv_sem_post(&s->async_sem);
235
+
236
+ assert(events != 0);
237
+ assert(events == (events & (UV__POLLIN | UV__POLLOUT)));
238
+
239
+ /* Invoke callback on event-loop */
240
+ if ((events & UV__POLLIN) && uv__io_active(&stream->io_watcher, UV__POLLIN))
241
+ uv__stream_io(stream->loop, &stream->io_watcher, UV__POLLIN);
242
+
243
+ if ((events & UV__POLLOUT) && uv__io_active(&stream->io_watcher, UV__POLLOUT))
244
+ uv__stream_io(stream->loop, &stream->io_watcher, UV__POLLOUT);
245
+ }
246
+
247
+
248
+ static void uv__stream_osx_cb_close(uv_handle_t* async) {
249
+ uv__stream_select_t* s;
250
+
251
+ s = container_of(async, uv__stream_select_t, async);
252
+ free(s);
253
+ }
254
+
255
+
256
+ int uv__stream_try_select(uv_stream_t* stream, int* fd) {
257
+ /*
258
+ * kqueue doesn't work with some files from /dev mount on osx.
259
+ * select(2) in separate thread for those fds
260
+ */
261
+
262
+ struct kevent filter[1];
263
+ struct kevent events[1];
264
+ struct timespec timeout;
265
+ uv__stream_select_t* s;
266
+ int fds[2];
267
+ int err;
268
+ int ret;
269
+ int kq;
270
+ int old_fd;
271
+ int max_fd;
272
+ size_t sread_sz;
273
+ size_t swrite_sz;
274
+
275
+ kq = kqueue();
276
+ if (kq == -1) {
277
+ perror("(libuv) kqueue()");
278
+ return -errno;
279
+ }
280
+
281
+ EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
282
+
283
+ /* Use small timeout, because we only want to capture EINVALs */
284
+ timeout.tv_sec = 0;
285
+ timeout.tv_nsec = 1;
286
+
287
+ ret = kevent(kq, filter, 1, events, 1, &timeout);
288
+ uv__close(kq);
289
+
290
+ if (ret == -1)
291
+ return -errno;
292
+
293
+ if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL)
294
+ return 0;
295
+
296
+ /* At this point we definitely know that this fd won't work with kqueue */
297
+
298
+ /*
299
+ * Create fds for io watcher and to interrupt the select() loop.
300
+ * NOTE: do it ahead of malloc below to allocate enough space for fd_sets
301
+ */
302
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
303
+ return -errno;
304
+
305
+ max_fd = *fd;
306
+ if (fds[1] > max_fd)
307
+ max_fd = fds[1];
308
+
309
+ sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY;
310
+ swrite_sz = sread_sz;
311
+
312
+ s = malloc(sizeof(*s) + sread_sz + swrite_sz);
313
+ if (s == NULL) {
314
+ err = -ENOMEM;
315
+ goto failed_malloc;
316
+ }
317
+
318
+ s->events = 0;
319
+ s->fd = *fd;
320
+ s->sread = (fd_set*) ((char*) s + sizeof(*s));
321
+ s->sread_sz = sread_sz;
322
+ s->swrite = (fd_set*) ((char*) s->sread + sread_sz);
323
+ s->swrite_sz = swrite_sz;
324
+
325
+ err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb);
326
+ if (err)
327
+ goto failed_async_init;
328
+
329
+ s->async.flags |= UV__HANDLE_INTERNAL;
330
+ uv__handle_unref(&s->async);
331
+
332
+ err = uv_sem_init(&s->close_sem, 0);
333
+ if (err != 0)
334
+ goto failed_close_sem_init;
335
+
336
+ err = uv_sem_init(&s->async_sem, 0);
337
+ if (err != 0)
338
+ goto failed_async_sem_init;
339
+
340
+ s->fake_fd = fds[0];
341
+ s->int_fd = fds[1];
342
+
343
+ old_fd = *fd;
344
+ s->stream = stream;
345
+ stream->select = s;
346
+ *fd = s->fake_fd;
347
+
348
+ err = uv_thread_create(&s->thread, uv__stream_osx_select, stream);
349
+ if (err != 0)
350
+ goto failed_thread_create;
351
+
352
+ return 0;
353
+
354
+ failed_thread_create:
355
+ s->stream = NULL;
356
+ stream->select = NULL;
357
+ *fd = old_fd;
358
+
359
+ uv_sem_destroy(&s->async_sem);
360
+
361
+ failed_async_sem_init:
362
+ uv_sem_destroy(&s->close_sem);
363
+
364
+ failed_close_sem_init:
365
+ uv__close(fds[0]);
366
+ uv__close(fds[1]);
367
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
368
+ return err;
369
+
370
+ failed_async_init:
371
+ free(s);
372
+
373
+ failed_malloc:
374
+ uv__close(fds[0]);
375
+ uv__close(fds[1]);
376
+
377
+ return err;
378
+ }
379
+ #endif /* defined(__APPLE__) */
380
+
381
+
382
+ int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
383
+ #if defined(__APPLE__)
384
+ int enable;
385
+ #endif
386
+
387
+ assert(fd >= 0);
388
+ stream->flags |= flags;
389
+
390
+ if (stream->type == UV_TCP) {
391
+ if ((stream->flags & UV_TCP_NODELAY) && uv__tcp_nodelay(fd, 1))
392
+ return -errno;
393
+
394
+ /* TODO Use delay the user passed in. */
395
+ if ((stream->flags & UV_TCP_KEEPALIVE) && uv__tcp_keepalive(fd, 1, 60))
396
+ return -errno;
397
+ }
398
+
399
+ #if defined(__APPLE__)
400
+ enable = 1;
401
+ if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) &&
402
+ errno != ENOTSOCK &&
403
+ errno != EINVAL) {
404
+ return -errno;
405
+ }
406
+ #endif
407
+
408
+ stream->io_watcher.fd = fd;
409
+
410
+ return 0;
411
+ }
412
+
413
+
414
+ void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
415
+ uv_write_t* req;
416
+ QUEUE* q;
417
+ while (!QUEUE_EMPTY(&stream->write_queue)) {
418
+ q = QUEUE_HEAD(&stream->write_queue);
419
+ QUEUE_REMOVE(q);
420
+
421
+ req = QUEUE_DATA(q, uv_write_t, queue);
422
+ req->error = error;
423
+
424
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
425
+ }
426
+ }
427
+
428
+
429
+ void uv__stream_destroy(uv_stream_t* stream) {
430
+ assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT));
431
+ assert(stream->flags & UV_CLOSED);
432
+
433
+ if (stream->connect_req) {
434
+ uv__req_unregister(stream->loop, stream->connect_req);
435
+ stream->connect_req->cb(stream->connect_req, -ECANCELED);
436
+ stream->connect_req = NULL;
437
+ }
438
+
439
+ uv__stream_flush_write_queue(stream, -ECANCELED);
440
+ uv__write_callbacks(stream);
441
+
442
+ if (stream->shutdown_req) {
443
+ /* The ECANCELED error code is a lie, the shutdown(2) syscall is a
444
+ * fait accompli at this point. Maybe we should revisit this in v0.11.
445
+ * A possible reason for leaving it unchanged is that it informs the
446
+ * callee that the handle has been destroyed.
447
+ */
448
+ uv__req_unregister(stream->loop, stream->shutdown_req);
449
+ stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED);
450
+ stream->shutdown_req = NULL;
451
+ }
452
+
453
+ assert(stream->write_queue_size == 0);
454
+ }
455
+
456
+
457
+ /* Implements a best effort approach to mitigating accept() EMFILE errors.
458
+ * We have a spare file descriptor stashed away that we close to get below
459
+ * the EMFILE limit. Next, we accept all pending connections and close them
460
+ * immediately to signal the clients that we're overloaded - and we are, but
461
+ * we still keep on trucking.
462
+ *
463
+ * There is one caveat: it's not reliable in a multi-threaded environment.
464
+ * The file descriptor limit is per process. Our party trick fails if another
465
+ * thread opens a file or creates a socket in the time window between us
466
+ * calling close() and accept().
467
+ */
468
+ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
469
+ int err;
470
+ int emfile_fd;
471
+
472
+ if (loop->emfile_fd == -1)
473
+ return -EMFILE;
474
+
475
+ uv__close(loop->emfile_fd);
476
+ loop->emfile_fd = -1;
477
+
478
+ do {
479
+ err = uv__accept(accept_fd);
480
+ if (err >= 0)
481
+ uv__close(err);
482
+ } while (err >= 0 || err == -EINTR);
483
+
484
+ emfile_fd = uv__open_cloexec("/", O_RDONLY);
485
+ if (emfile_fd >= 0)
486
+ loop->emfile_fd = emfile_fd;
487
+
488
+ return err;
489
+ }
490
+
491
+
492
+ #if defined(UV_HAVE_KQUEUE)
493
+ # define UV_DEC_BACKLOG(w) w->rcount--;
494
+ #else
495
+ # define UV_DEC_BACKLOG(w) /* no-op */
496
+ #endif /* defined(UV_HAVE_KQUEUE) */
497
+
498
+
499
+ void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
500
+ uv_stream_t* stream;
501
+ int err;
502
+
503
+ stream = container_of(w, uv_stream_t, io_watcher);
504
+ assert(events == UV__POLLIN);
505
+ assert(stream->accepted_fd == -1);
506
+ assert(!(stream->flags & UV_CLOSING));
507
+
508
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLIN);
509
+
510
+ /* connection_cb can close the server socket while we're
511
+ * in the loop so check it on each iteration.
512
+ */
513
+ while (uv__stream_fd(stream) != -1) {
514
+ assert(stream->accepted_fd == -1);
515
+
516
+ #if defined(UV_HAVE_KQUEUE)
517
+ if (w->rcount <= 0)
518
+ return;
519
+ #endif /* defined(UV_HAVE_KQUEUE) */
520
+
521
+ err = uv__accept(uv__stream_fd(stream));
522
+ if (err < 0) {
523
+ if (err == -EAGAIN || err == -EWOULDBLOCK)
524
+ return; /* Not an error. */
525
+
526
+ if (err == -ECONNABORTED)
527
+ continue; /* Ignore. Nothing we can do about that. */
528
+
529
+ if (err == -EMFILE || err == -ENFILE) {
530
+ err = uv__emfile_trick(loop, uv__stream_fd(stream));
531
+ if (err == -EAGAIN || err == -EWOULDBLOCK)
532
+ break;
533
+ }
534
+
535
+ stream->connection_cb(stream, err);
536
+ continue;
537
+ }
538
+
539
+ UV_DEC_BACKLOG(w)
540
+ stream->accepted_fd = err;
541
+ stream->connection_cb(stream, 0);
542
+
543
+ if (stream->accepted_fd != -1) {
544
+ /* The user hasn't yet accepted called uv_accept() */
545
+ uv__io_stop(loop, &stream->io_watcher, UV__POLLIN);
546
+ return;
547
+ }
548
+
549
+ if (stream->type == UV_TCP && (stream->flags & UV_TCP_SINGLE_ACCEPT)) {
550
+ /* Give other processes a chance to accept connections. */
551
+ struct timespec timeout = { 0, 1 };
552
+ nanosleep(&timeout, NULL);
553
+ }
554
+ }
555
+ }
556
+
557
+
558
+ #undef UV_DEC_BACKLOG
559
+
560
+
561
+ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
562
+ int err;
563
+
564
+ /* TODO document this */
565
+ assert(server->loop == client->loop);
566
+
567
+ if (server->accepted_fd == -1)
568
+ return -EAGAIN;
569
+
570
+ switch (client->type) {
571
+ case UV_NAMED_PIPE:
572
+ case UV_TCP:
573
+ err = uv__stream_open(client,
574
+ server->accepted_fd,
575
+ UV_STREAM_READABLE | UV_STREAM_WRITABLE);
576
+ if (err) {
577
+ /* TODO handle error */
578
+ uv__close(server->accepted_fd);
579
+ goto done;
580
+ }
581
+ break;
582
+
583
+ case UV_UDP:
584
+ err = uv_udp_open((uv_udp_t*) client, server->accepted_fd);
585
+ if (err) {
586
+ uv__close(server->accepted_fd);
587
+ goto done;
588
+ }
589
+ break;
590
+
591
+ default:
592
+ return -EINVAL;
593
+ }
594
+
595
+ done:
596
+ /* Process queued fds */
597
+ if (server->queued_fds != NULL) {
598
+ uv__stream_queued_fds_t* queued_fds;
599
+
600
+ queued_fds = server->queued_fds;
601
+
602
+ /* Read first */
603
+ server->accepted_fd = queued_fds->fds[0];
604
+
605
+ /* All read, free */
606
+ assert(queued_fds->offset > 0);
607
+ if (--queued_fds->offset == 0) {
608
+ free(queued_fds);
609
+ server->queued_fds = NULL;
610
+ } else {
611
+ /* Shift rest */
612
+ memmove(queued_fds->fds,
613
+ queued_fds->fds + 1,
614
+ queued_fds->offset * sizeof(*queued_fds->fds));
615
+ }
616
+ } else {
617
+ server->accepted_fd = -1;
618
+ if (err == 0)
619
+ uv__io_start(server->loop, &server->io_watcher, UV__POLLIN);
620
+ }
621
+ return err;
622
+ }
623
+
624
+
625
+ int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
626
+ int err;
627
+
628
+ switch (stream->type) {
629
+ case UV_TCP:
630
+ err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
631
+ break;
632
+
633
+ case UV_NAMED_PIPE:
634
+ err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
635
+ break;
636
+
637
+ default:
638
+ err = -EINVAL;
639
+ }
640
+
641
+ if (err == 0)
642
+ uv__handle_start(stream);
643
+
644
+ return err;
645
+ }
646
+
647
+
648
+ static void uv__drain(uv_stream_t* stream) {
649
+ uv_shutdown_t* req;
650
+ int err;
651
+
652
+ assert(QUEUE_EMPTY(&stream->write_queue));
653
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
654
+ uv__stream_osx_interrupt_select(stream);
655
+
656
+ /* Shutdown? */
657
+ if ((stream->flags & UV_STREAM_SHUTTING) &&
658
+ !(stream->flags & UV_CLOSING) &&
659
+ !(stream->flags & UV_STREAM_SHUT)) {
660
+ assert(stream->shutdown_req);
661
+
662
+ req = stream->shutdown_req;
663
+ stream->shutdown_req = NULL;
664
+ stream->flags &= ~UV_STREAM_SHUTTING;
665
+ uv__req_unregister(stream->loop, req);
666
+
667
+ err = 0;
668
+ if (shutdown(uv__stream_fd(stream), SHUT_WR))
669
+ err = -errno;
670
+
671
+ if (err == 0)
672
+ stream->flags |= UV_STREAM_SHUT;
673
+
674
+ if (req->cb != NULL)
675
+ req->cb(req, err);
676
+ }
677
+ }
678
+
679
+
680
+ static size_t uv__write_req_size(uv_write_t* req) {
681
+ size_t size;
682
+
683
+ assert(req->bufs != NULL);
684
+ size = uv__count_bufs(req->bufs + req->write_index,
685
+ req->nbufs - req->write_index);
686
+ assert(req->handle->write_queue_size >= size);
687
+
688
+ return size;
689
+ }
690
+
691
+
692
+ static void uv__write_req_finish(uv_write_t* req) {
693
+ uv_stream_t* stream = req->handle;
694
+
695
+ /* Pop the req off tcp->write_queue. */
696
+ QUEUE_REMOVE(&req->queue);
697
+
698
+ /* Only free when there was no error. On error, we touch up write_queue_size
699
+ * right before making the callback. The reason we don't do that right away
700
+ * is that a write_queue_size > 0 is our only way to signal to the user that
701
+ * they should stop writing - which they should if we got an error. Something
702
+ * to revisit in future revisions of the libuv API.
703
+ */
704
+ if (req->error == 0) {
705
+ if (req->bufs != req->bufsml)
706
+ free(req->bufs);
707
+ req->bufs = NULL;
708
+ }
709
+
710
+ /* Add it to the write_completed_queue where it will have its
711
+ * callback called in the near future.
712
+ */
713
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
714
+ uv__io_feed(stream->loop, &stream->io_watcher);
715
+ }
716
+
717
+
718
+ static int uv__handle_fd(uv_handle_t* handle) {
719
+ switch (handle->type) {
720
+ case UV_NAMED_PIPE:
721
+ case UV_TCP:
722
+ return ((uv_stream_t*) handle)->io_watcher.fd;
723
+
724
+ case UV_UDP:
725
+ return ((uv_udp_t*) handle)->io_watcher.fd;
726
+
727
+ default:
728
+ return -1;
729
+ }
730
+ }
731
+
732
+ static int uv__getiovmax() {
733
+ #if defined(IOV_MAX)
734
+ return IOV_MAX;
735
+ #elif defined(_SC_IOV_MAX)
736
+ static int iovmax = -1;
737
+ if (iovmax == -1)
738
+ iovmax = sysconf(_SC_IOV_MAX);
739
+ return iovmax;
740
+ #else
741
+ return 1024;
742
+ #endif
743
+ }
744
+
745
+ static void uv__write(uv_stream_t* stream) {
746
+ struct iovec* iov;
747
+ QUEUE* q;
748
+ uv_write_t* req;
749
+ int iovmax;
750
+ int iovcnt;
751
+ ssize_t n;
752
+
753
+ start:
754
+
755
+ assert(uv__stream_fd(stream) >= 0);
756
+
757
+ if (QUEUE_EMPTY(&stream->write_queue))
758
+ return;
759
+
760
+ q = QUEUE_HEAD(&stream->write_queue);
761
+ req = QUEUE_DATA(q, uv_write_t, queue);
762
+ assert(req->handle == stream);
763
+
764
+ /*
765
+ * Cast to iovec. We had to have our own uv_buf_t instead of iovec
766
+ * because Windows's WSABUF is not an iovec.
767
+ */
768
+ assert(sizeof(uv_buf_t) == sizeof(struct iovec));
769
+ iov = (struct iovec*) &(req->bufs[req->write_index]);
770
+ iovcnt = req->nbufs - req->write_index;
771
+
772
+ iovmax = uv__getiovmax();
773
+
774
+ /* Limit iov count to avoid EINVALs from writev() */
775
+ if (iovcnt > iovmax)
776
+ iovcnt = iovmax;
777
+
778
+ /*
779
+ * Now do the actual writev. Note that we've been updating the pointers
780
+ * inside the iov each time we write. So there is no need to offset it.
781
+ */
782
+
783
+ if (req->send_handle) {
784
+ struct msghdr msg;
785
+ char scratch[64];
786
+ struct cmsghdr *cmsg;
787
+ int fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle);
788
+
789
+ assert(fd_to_send >= 0);
790
+
791
+ msg.msg_name = NULL;
792
+ msg.msg_namelen = 0;
793
+ msg.msg_iov = iov;
794
+ msg.msg_iovlen = iovcnt;
795
+ msg.msg_flags = 0;
796
+
797
+ msg.msg_control = (void*) scratch;
798
+ msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
799
+
800
+ cmsg = CMSG_FIRSTHDR(&msg);
801
+ cmsg->cmsg_level = SOL_SOCKET;
802
+ cmsg->cmsg_type = SCM_RIGHTS;
803
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
804
+
805
+ /* silence aliasing warning */
806
+ {
807
+ void* pv = CMSG_DATA(cmsg);
808
+ int* pi = pv;
809
+ *pi = fd_to_send;
810
+ }
811
+
812
+ do {
813
+ n = sendmsg(uv__stream_fd(stream), &msg, 0);
814
+ }
815
+ while (n == -1 && errno == EINTR);
816
+ } else {
817
+ do {
818
+ if (iovcnt == 1) {
819
+ n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len);
820
+ } else {
821
+ n = writev(uv__stream_fd(stream), iov, iovcnt);
822
+ }
823
+ }
824
+ while (n == -1 && errno == EINTR);
825
+ }
826
+
827
+ if (n < 0) {
828
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
829
+ /* Error */
830
+ req->error = -errno;
831
+ uv__write_req_finish(req);
832
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
833
+ if (!uv__io_active(&stream->io_watcher, UV__POLLIN))
834
+ uv__handle_stop(stream);
835
+ uv__stream_osx_interrupt_select(stream);
836
+ return;
837
+ } else if (stream->flags & UV_STREAM_BLOCKING) {
838
+ /* If this is a blocking stream, try again. */
839
+ goto start;
840
+ }
841
+ } else {
842
+ /* Successful write */
843
+
844
+ while (n >= 0) {
845
+ uv_buf_t* buf = &(req->bufs[req->write_index]);
846
+ size_t len = buf->len;
847
+
848
+ assert(req->write_index < req->nbufs);
849
+
850
+ if ((size_t)n < len) {
851
+ buf->base += n;
852
+ buf->len -= n;
853
+ stream->write_queue_size -= n;
854
+ n = 0;
855
+
856
+ /* There is more to write. */
857
+ if (stream->flags & UV_STREAM_BLOCKING) {
858
+ /*
859
+ * If we're blocking then we should not be enabling the write
860
+ * watcher - instead we need to try again.
861
+ */
862
+ goto start;
863
+ } else {
864
+ /* Break loop and ensure the watcher is pending. */
865
+ break;
866
+ }
867
+
868
+ } else {
869
+ /* Finished writing the buf at index req->write_index. */
870
+ req->write_index++;
871
+
872
+ assert((size_t)n >= len);
873
+ n -= len;
874
+
875
+ assert(stream->write_queue_size >= len);
876
+ stream->write_queue_size -= len;
877
+
878
+ if (req->write_index == req->nbufs) {
879
+ /* Then we're done! */
880
+ assert(n == 0);
881
+ uv__write_req_finish(req);
882
+ /* TODO: start trying to write the next request. */
883
+ return;
884
+ }
885
+ }
886
+ }
887
+ }
888
+
889
+ /* Either we've counted n down to zero or we've got EAGAIN. */
890
+ assert(n == 0 || n == -1);
891
+
892
+ /* Only non-blocking streams should use the write_watcher. */
893
+ assert(!(stream->flags & UV_STREAM_BLOCKING));
894
+
895
+ /* We're not done. */
896
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT);
897
+
898
+ /* Notify select() thread about state change */
899
+ uv__stream_osx_interrupt_select(stream);
900
+ }
901
+
902
+
903
+ static void uv__write_callbacks(uv_stream_t* stream) {
904
+ uv_write_t* req;
905
+ QUEUE* q;
906
+
907
+ while (!QUEUE_EMPTY(&stream->write_completed_queue)) {
908
+ /* Pop a req off write_completed_queue. */
909
+ q = QUEUE_HEAD(&stream->write_completed_queue);
910
+ req = QUEUE_DATA(q, uv_write_t, queue);
911
+ QUEUE_REMOVE(q);
912
+ uv__req_unregister(stream->loop, req);
913
+
914
+ if (req->bufs != NULL) {
915
+ stream->write_queue_size -= uv__write_req_size(req);
916
+ if (req->bufs != req->bufsml)
917
+ free(req->bufs);
918
+ req->bufs = NULL;
919
+ }
920
+
921
+ /* NOTE: call callback AFTER freeing the request data. */
922
+ if (req->cb)
923
+ req->cb(req, req->error);
924
+ }
925
+
926
+ assert(QUEUE_EMPTY(&stream->write_completed_queue));
927
+ }
928
+
929
+
930
+ uv_handle_type uv__handle_type(int fd) {
931
+ struct sockaddr_storage ss;
932
+ socklen_t len;
933
+ int type;
934
+
935
+ memset(&ss, 0, sizeof(ss));
936
+ len = sizeof(ss);
937
+
938
+ if (getsockname(fd, (struct sockaddr*)&ss, &len))
939
+ return UV_UNKNOWN_HANDLE;
940
+
941
+ len = sizeof type;
942
+
943
+ if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len))
944
+ return UV_UNKNOWN_HANDLE;
945
+
946
+ if (type == SOCK_STREAM) {
947
+ switch (ss.ss_family) {
948
+ case AF_UNIX:
949
+ return UV_NAMED_PIPE;
950
+ case AF_INET:
951
+ case AF_INET6:
952
+ return UV_TCP;
953
+ }
954
+ }
955
+
956
+ if (type == SOCK_DGRAM &&
957
+ (ss.ss_family == AF_INET || ss.ss_family == AF_INET6))
958
+ return UV_UDP;
959
+
960
+ return UV_UNKNOWN_HANDLE;
961
+ }
962
+
963
+
964
+ static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
965
+ stream->flags |= UV_STREAM_READ_EOF;
966
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
967
+ if (!uv__io_active(&stream->io_watcher, UV__POLLOUT))
968
+ uv__handle_stop(stream);
969
+ uv__stream_osx_interrupt_select(stream);
970
+ stream->read_cb(stream, UV_EOF, buf);
971
+ stream->flags &= ~UV_STREAM_READING;
972
+ }
973
+
974
+
975
+ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
976
+ uv__stream_queued_fds_t* queued_fds;
977
+ unsigned int queue_size;
978
+
979
+ queued_fds = stream->queued_fds;
980
+ if (queued_fds == NULL) {
981
+ queue_size = 8;
982
+ queued_fds = malloc((queue_size - 1) * sizeof(*queued_fds->fds) +
983
+ sizeof(*queued_fds));
984
+ if (queued_fds == NULL)
985
+ return -ENOMEM;
986
+ queued_fds->size = queue_size;
987
+ queued_fds->offset = 0;
988
+ stream->queued_fds = queued_fds;
989
+
990
+ /* Grow */
991
+ } else if (queued_fds->size == queued_fds->offset) {
992
+ queue_size = queued_fds->size + 8;
993
+ queued_fds = realloc(queued_fds,
994
+ (queue_size - 1) * sizeof(*queued_fds->fds) +
995
+ sizeof(*queued_fds));
996
+
997
+ /*
998
+ * Allocation failure, report back.
999
+ * NOTE: if it is fatal - sockets will be closed in uv__stream_close
1000
+ */
1001
+ if (queued_fds == NULL)
1002
+ return -ENOMEM;
1003
+ queued_fds->size = queue_size;
1004
+ stream->queued_fds = queued_fds;
1005
+ }
1006
+
1007
+ /* Put fd in a queue */
1008
+ queued_fds->fds[queued_fds->offset++] = fd;
1009
+
1010
+ return 0;
1011
+ }
1012
+
1013
+
1014
+ #define UV__CMSG_FD_COUNT 64
1015
+ #define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
1016
+
1017
+
1018
+ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
1019
+ struct cmsghdr* cmsg;
1020
+
1021
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
1022
+ char* start;
1023
+ char* end;
1024
+ int err;
1025
+ void* pv;
1026
+ int* pi;
1027
+ unsigned int i;
1028
+ unsigned int count;
1029
+
1030
+ if (cmsg->cmsg_type != SCM_RIGHTS) {
1031
+ fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
1032
+ cmsg->cmsg_type);
1033
+ continue;
1034
+ }
1035
+
1036
+ /* silence aliasing warning */
1037
+ pv = CMSG_DATA(cmsg);
1038
+ pi = pv;
1039
+
1040
+ /* Count available fds */
1041
+ start = (char*) cmsg;
1042
+ end = (char*) cmsg + cmsg->cmsg_len;
1043
+ count = 0;
1044
+ while (start + CMSG_LEN(count * sizeof(*pi)) < end)
1045
+ count++;
1046
+ assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
1047
+
1048
+ for (i = 0; i < count; i++) {
1049
+ /* Already has accepted fd, queue now */
1050
+ if (stream->accepted_fd != -1) {
1051
+ err = uv__stream_queue_fd(stream, pi[i]);
1052
+ if (err != 0) {
1053
+ /* Close rest */
1054
+ for (; i < count; i++)
1055
+ uv__close(pi[i]);
1056
+ return err;
1057
+ }
1058
+ } else {
1059
+ stream->accepted_fd = pi[i];
1060
+ }
1061
+ }
1062
+ }
1063
+
1064
+ return 0;
1065
+ }
1066
+
1067
+
1068
+ static void uv__read(uv_stream_t* stream) {
1069
+ uv_buf_t buf;
1070
+ ssize_t nread;
1071
+ struct msghdr msg;
1072
+ char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
1073
+ int count;
1074
+ int err;
1075
+ int is_ipc;
1076
+
1077
+ stream->flags &= ~UV_STREAM_READ_PARTIAL;
1078
+
1079
+ /* Prevent loop starvation when the data comes in as fast as (or faster than)
1080
+ * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
1081
+ */
1082
+ count = 32;
1083
+
1084
+ is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc;
1085
+
1086
+ /* XXX: Maybe instead of having UV_STREAM_READING we just test if
1087
+ * tcp->read_cb is NULL or not?
1088
+ */
1089
+ while (stream->read_cb
1090
+ && (stream->flags & UV_STREAM_READING)
1091
+ && (count-- > 0)) {
1092
+ assert(stream->alloc_cb != NULL);
1093
+
1094
+ stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf);
1095
+ if (buf.len == 0) {
1096
+ /* User indicates it can't or won't handle the read. */
1097
+ stream->read_cb(stream, UV_ENOBUFS, &buf);
1098
+ return;
1099
+ }
1100
+
1101
+ assert(buf.base != NULL);
1102
+ assert(uv__stream_fd(stream) >= 0);
1103
+
1104
+ if (!is_ipc) {
1105
+ do {
1106
+ nread = read(uv__stream_fd(stream), buf.base, buf.len);
1107
+ }
1108
+ while (nread < 0 && errno == EINTR);
1109
+ } else {
1110
+ /* ipc uses recvmsg */
1111
+ msg.msg_flags = 0;
1112
+ msg.msg_iov = (struct iovec*) &buf;
1113
+ msg.msg_iovlen = 1;
1114
+ msg.msg_name = NULL;
1115
+ msg.msg_namelen = 0;
1116
+ /* Set up to receive a descriptor even if one isn't in the message */
1117
+ msg.msg_controllen = sizeof(cmsg_space);
1118
+ msg.msg_control = cmsg_space;
1119
+
1120
+ do {
1121
+ nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
1122
+ }
1123
+ while (nread < 0 && errno == EINTR);
1124
+ }
1125
+
1126
+ if (nread < 0) {
1127
+ /* Error */
1128
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
1129
+ /* Wait for the next one. */
1130
+ if (stream->flags & UV_STREAM_READING) {
1131
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLIN);
1132
+ uv__stream_osx_interrupt_select(stream);
1133
+ }
1134
+ stream->read_cb(stream, 0, &buf);
1135
+ } else {
1136
+ /* Error. User should call uv_close(). */
1137
+ stream->read_cb(stream, -errno, &buf);
1138
+ if (stream->flags & UV_STREAM_READING) {
1139
+ stream->flags &= ~UV_STREAM_READING;
1140
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
1141
+ if (!uv__io_active(&stream->io_watcher, UV__POLLOUT))
1142
+ uv__handle_stop(stream);
1143
+ uv__stream_osx_interrupt_select(stream);
1144
+ }
1145
+ }
1146
+ return;
1147
+ } else if (nread == 0) {
1148
+ uv__stream_eof(stream, &buf);
1149
+ return;
1150
+ } else {
1151
+ /* Successful read */
1152
+ ssize_t buflen = buf.len;
1153
+
1154
+ if (is_ipc) {
1155
+ err = uv__stream_recv_cmsg(stream, &msg);
1156
+ if (err != 0) {
1157
+ stream->read_cb(stream, err, &buf);
1158
+ return;
1159
+ }
1160
+ }
1161
+ stream->read_cb(stream, nread, &buf);
1162
+
1163
+ /* Return if we didn't fill the buffer, there is no more data to read. */
1164
+ if (nread < buflen) {
1165
+ stream->flags |= UV_STREAM_READ_PARTIAL;
1166
+ return;
1167
+ }
1168
+ }
1169
+ }
1170
+ }
1171
+
1172
+
1173
+ #undef UV__CMSG_FD_COUNT
1174
+ #undef UV__CMSG_FD_SIZE
1175
+
1176
+
1177
+ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
1178
+ assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) &&
1179
+ "uv_shutdown (unix) only supports uv_handle_t right now");
1180
+
1181
+ if (!(stream->flags & UV_STREAM_WRITABLE) ||
1182
+ stream->flags & UV_STREAM_SHUT ||
1183
+ stream->flags & UV_STREAM_SHUTTING ||
1184
+ stream->flags & UV_CLOSED ||
1185
+ stream->flags & UV_CLOSING) {
1186
+ return -ENOTCONN;
1187
+ }
1188
+
1189
+ assert(uv__stream_fd(stream) >= 0);
1190
+
1191
+ /* Initialize request */
1192
+ uv__req_init(stream->loop, req, UV_SHUTDOWN);
1193
+ req->handle = stream;
1194
+ req->cb = cb;
1195
+ stream->shutdown_req = req;
1196
+ stream->flags |= UV_STREAM_SHUTTING;
1197
+
1198
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT);
1199
+ uv__stream_osx_interrupt_select(stream);
1200
+
1201
+ return 0;
1202
+ }
1203
+
1204
+
1205
+ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
1206
+ uv_stream_t* stream;
1207
+
1208
+ stream = container_of(w, uv_stream_t, io_watcher);
1209
+
1210
+ assert(stream->type == UV_TCP ||
1211
+ stream->type == UV_NAMED_PIPE ||
1212
+ stream->type == UV_TTY);
1213
+ assert(!(stream->flags & UV_CLOSING));
1214
+
1215
+ if (stream->connect_req) {
1216
+ uv__stream_connect(stream);
1217
+ return;
1218
+ }
1219
+
1220
+ assert(uv__stream_fd(stream) >= 0);
1221
+
1222
+ /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */
1223
+ if (events & (UV__POLLIN | UV__POLLERR | UV__POLLHUP))
1224
+ uv__read(stream);
1225
+
1226
+ if (uv__stream_fd(stream) == -1)
1227
+ return; /* read_cb closed stream. */
1228
+
1229
+ /* Short-circuit iff POLLHUP is set, the user is still interested in read
1230
+ * events and uv__read() reported a partial read but not EOF. If the EOF
1231
+ * flag is set, uv__read() called read_cb with err=UV_EOF and we don't
1232
+ * have to do anything. If the partial read flag is not set, we can't
1233
+ * report the EOF yet because there is still data to read.
1234
+ */
1235
+ if ((events & UV__POLLHUP) &&
1236
+ (stream->flags & UV_STREAM_READING) &&
1237
+ (stream->flags & UV_STREAM_READ_PARTIAL) &&
1238
+ !(stream->flags & UV_STREAM_READ_EOF)) {
1239
+ uv_buf_t buf = { NULL, 0 };
1240
+ uv__stream_eof(stream, &buf);
1241
+ }
1242
+
1243
+ if (uv__stream_fd(stream) == -1)
1244
+ return; /* read_cb closed stream. */
1245
+
1246
+ if (events & (UV__POLLOUT | UV__POLLERR | UV__POLLHUP)) {
1247
+ uv__write(stream);
1248
+ uv__write_callbacks(stream);
1249
+
1250
+ /* Write queue drained. */
1251
+ if (QUEUE_EMPTY(&stream->write_queue))
1252
+ uv__drain(stream);
1253
+ }
1254
+ }
1255
+
1256
+
1257
+ /**
1258
+ * We get called here from directly following a call to connect(2).
1259
+ * In order to determine if we've errored out or succeeded must call
1260
+ * getsockopt.
1261
+ */
1262
+ static void uv__stream_connect(uv_stream_t* stream) {
1263
+ int error;
1264
+ uv_connect_t* req = stream->connect_req;
1265
+ socklen_t errorsize = sizeof(int);
1266
+
1267
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
1268
+ assert(req);
1269
+
1270
+ if (stream->delayed_error) {
1271
+ /* To smooth over the differences between unixes errors that
1272
+ * were reported synchronously on the first connect can be delayed
1273
+ * until the next tick--which is now.
1274
+ */
1275
+ error = stream->delayed_error;
1276
+ stream->delayed_error = 0;
1277
+ } else {
1278
+ /* Normal situation: we need to get the socket error from the kernel. */
1279
+ assert(uv__stream_fd(stream) >= 0);
1280
+ getsockopt(uv__stream_fd(stream),
1281
+ SOL_SOCKET,
1282
+ SO_ERROR,
1283
+ &error,
1284
+ &errorsize);
1285
+ error = -error;
1286
+ }
1287
+
1288
+ if (error == -EINPROGRESS)
1289
+ return;
1290
+
1291
+ stream->connect_req = NULL;
1292
+ uv__req_unregister(stream->loop, req);
1293
+
1294
+ if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
1295
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
1296
+ }
1297
+
1298
+ if (req->cb)
1299
+ req->cb(req, error);
1300
+
1301
+ if (uv__stream_fd(stream) == -1)
1302
+ return;
1303
+
1304
+ if (error < 0) {
1305
+ uv__stream_flush_write_queue(stream, -ECANCELED);
1306
+ uv__write_callbacks(stream);
1307
+ }
1308
+ }
1309
+
1310
+
1311
+ int uv_write2(uv_write_t* req,
1312
+ uv_stream_t* stream,
1313
+ const uv_buf_t bufs[],
1314
+ unsigned int nbufs,
1315
+ uv_stream_t* send_handle,
1316
+ uv_write_cb cb) {
1317
+ int empty_queue;
1318
+
1319
+ assert(nbufs > 0);
1320
+ assert((stream->type == UV_TCP ||
1321
+ stream->type == UV_NAMED_PIPE ||
1322
+ stream->type == UV_TTY) &&
1323
+ "uv_write (unix) does not yet support other types of streams");
1324
+
1325
+ if (uv__stream_fd(stream) < 0)
1326
+ return -EBADF;
1327
+
1328
+ if (send_handle) {
1329
+ if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
1330
+ return -EINVAL;
1331
+
1332
+ /* XXX We abuse uv_write2() to send over UDP handles to child processes.
1333
+ * Don't call uv__stream_fd() on those handles, it's a macro that on OS X
1334
+ * evaluates to a function that operates on a uv_stream_t with a couple of
1335
+ * OS X specific fields. On other Unices it does (handle)->io_watcher.fd,
1336
+ * which works but only by accident.
1337
+ */
1338
+ if (uv__handle_fd((uv_handle_t*) send_handle) < 0)
1339
+ return -EBADF;
1340
+ }
1341
+
1342
+ /* It's legal for write_queue_size > 0 even when the write_queue is empty;
1343
+ * it means there are error-state requests in the write_completed_queue that
1344
+ * will touch up write_queue_size later, see also uv__write_req_finish().
1345
+ * We could check that write_queue is empty instead but that implies making
1346
+ * a write() syscall when we know that the handle is in error mode.
1347
+ */
1348
+ empty_queue = (stream->write_queue_size == 0);
1349
+
1350
+ /* Initialize the req */
1351
+ uv__req_init(stream->loop, req, UV_WRITE);
1352
+ req->cb = cb;
1353
+ req->handle = stream;
1354
+ req->error = 0;
1355
+ req->send_handle = send_handle;
1356
+ QUEUE_INIT(&req->queue);
1357
+
1358
+ req->bufs = req->bufsml;
1359
+ if (nbufs > ARRAY_SIZE(req->bufsml))
1360
+ req->bufs = malloc(nbufs * sizeof(bufs[0]));
1361
+
1362
+ if (req->bufs == NULL)
1363
+ return -ENOMEM;
1364
+
1365
+ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
1366
+ req->nbufs = nbufs;
1367
+ req->write_index = 0;
1368
+ stream->write_queue_size += uv__count_bufs(bufs, nbufs);
1369
+
1370
+ /* Append the request to write_queue. */
1371
+ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
1372
+
1373
+ /* If the queue was empty when this function began, we should attempt to
1374
+ * do the write immediately. Otherwise start the write_watcher and wait
1375
+ * for the fd to become writable.
1376
+ */
1377
+ if (stream->connect_req) {
1378
+ /* Still connecting, do nothing. */
1379
+ }
1380
+ else if (empty_queue) {
1381
+ uv__write(stream);
1382
+ }
1383
+ else {
1384
+ /*
1385
+ * blocking streams should never have anything in the queue.
1386
+ * if this assert fires then somehow the blocking stream isn't being
1387
+ * sufficiently flushed in uv__write.
1388
+ */
1389
+ assert(!(stream->flags & UV_STREAM_BLOCKING));
1390
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT);
1391
+ uv__stream_osx_interrupt_select(stream);
1392
+ }
1393
+
1394
+ return 0;
1395
+ }
1396
+
1397
+
1398
+ /* The buffers to be written must remain valid until the callback is called.
1399
+ * This is not required for the uv_buf_t array.
1400
+ */
1401
+ int uv_write(uv_write_t* req,
1402
+ uv_stream_t* handle,
1403
+ const uv_buf_t bufs[],
1404
+ unsigned int nbufs,
1405
+ uv_write_cb cb) {
1406
+ return uv_write2(req, handle, bufs, nbufs, NULL, cb);
1407
+ }
1408
+
1409
+
1410
+ void uv_try_write_cb(uv_write_t* req, int status) {
1411
+ /* Should not be called */
1412
+ abort();
1413
+ }
1414
+
1415
+
1416
+ int uv_try_write(uv_stream_t* stream,
1417
+ const uv_buf_t bufs[],
1418
+ unsigned int nbufs) {
1419
+ int r;
1420
+ int has_pollout;
1421
+ size_t written;
1422
+ size_t req_size;
1423
+ uv_write_t req;
1424
+
1425
+ /* Connecting or already writing some data */
1426
+ if (stream->connect_req != NULL || stream->write_queue_size != 0)
1427
+ return -EAGAIN;
1428
+
1429
+ has_pollout = uv__io_active(&stream->io_watcher, UV__POLLOUT);
1430
+
1431
+ r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb);
1432
+ if (r != 0)
1433
+ return r;
1434
+
1435
+ /* Remove not written bytes from write queue size */
1436
+ written = uv__count_bufs(bufs, nbufs);
1437
+ if (req.bufs != NULL)
1438
+ req_size = uv__write_req_size(&req);
1439
+ else
1440
+ req_size = 0;
1441
+ written -= req_size;
1442
+ stream->write_queue_size -= req_size;
1443
+
1444
+ /* Unqueue request, regardless of immediateness */
1445
+ QUEUE_REMOVE(&req.queue);
1446
+ uv__req_unregister(stream->loop, &req);
1447
+ if (req.bufs != req.bufsml)
1448
+ free(req.bufs);
1449
+ req.bufs = NULL;
1450
+
1451
+ /* Do not poll for writable, if we wasn't before calling this */
1452
+ if (!has_pollout) {
1453
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
1454
+ uv__stream_osx_interrupt_select(stream);
1455
+ }
1456
+
1457
+ if (written == 0)
1458
+ return -EAGAIN;
1459
+ else
1460
+ return written;
1461
+ }
1462
+
1463
+
1464
+ int uv_read_start(uv_stream_t* stream,
1465
+ uv_alloc_cb alloc_cb,
1466
+ uv_read_cb read_cb) {
1467
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
1468
+ stream->type == UV_TTY);
1469
+
1470
+ if (stream->flags & UV_CLOSING)
1471
+ return -EINVAL;
1472
+
1473
+ /* The UV_STREAM_READING flag is irrelevant of the state of the tcp - it just
1474
+ * expresses the desired state of the user.
1475
+ */
1476
+ stream->flags |= UV_STREAM_READING;
1477
+
1478
+ /* TODO: try to do the read inline? */
1479
+ /* TODO: keep track of tcp state. If we've gotten a EOF then we should
1480
+ * not start the IO watcher.
1481
+ */
1482
+ assert(uv__stream_fd(stream) >= 0);
1483
+ assert(alloc_cb);
1484
+
1485
+ stream->read_cb = read_cb;
1486
+ stream->alloc_cb = alloc_cb;
1487
+
1488
+ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLIN);
1489
+ uv__handle_start(stream);
1490
+ uv__stream_osx_interrupt_select(stream);
1491
+
1492
+ return 0;
1493
+ }
1494
+
1495
+
1496
+ int uv_read_stop(uv_stream_t* stream) {
1497
+ if (!(stream->flags & UV_STREAM_READING))
1498
+ return 0;
1499
+
1500
+ stream->flags &= ~UV_STREAM_READING;
1501
+ uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
1502
+ if (!uv__io_active(&stream->io_watcher, UV__POLLOUT))
1503
+ uv__handle_stop(stream);
1504
+ uv__stream_osx_interrupt_select(stream);
1505
+
1506
+ stream->read_cb = NULL;
1507
+ stream->alloc_cb = NULL;
1508
+ return 0;
1509
+ }
1510
+
1511
+
1512
+ int uv_is_readable(const uv_stream_t* stream) {
1513
+ return !!(stream->flags & UV_STREAM_READABLE);
1514
+ }
1515
+
1516
+
1517
+ int uv_is_writable(const uv_stream_t* stream) {
1518
+ return !!(stream->flags & UV_STREAM_WRITABLE);
1519
+ }
1520
+
1521
+
1522
+ #if defined(__APPLE__)
1523
+ int uv___stream_fd(const uv_stream_t* handle) {
1524
+ const uv__stream_select_t* s;
1525
+
1526
+ assert(handle->type == UV_TCP ||
1527
+ handle->type == UV_TTY ||
1528
+ handle->type == UV_NAMED_PIPE);
1529
+
1530
+ s = handle->select;
1531
+ if (s != NULL)
1532
+ return s->fd;
1533
+
1534
+ return handle->io_watcher.fd;
1535
+ }
1536
+ #endif /* defined(__APPLE__) */
1537
+
1538
+
1539
+ void uv__stream_close(uv_stream_t* handle) {
1540
+ unsigned int i;
1541
+ uv__stream_queued_fds_t* queued_fds;
1542
+
1543
+ #if defined(__APPLE__)
1544
+ /* Terminate select loop first */
1545
+ if (handle->select != NULL) {
1546
+ uv__stream_select_t* s;
1547
+
1548
+ s = handle->select;
1549
+
1550
+ uv_sem_post(&s->close_sem);
1551
+ uv_sem_post(&s->async_sem);
1552
+ uv__stream_osx_interrupt_select(handle);
1553
+ uv_thread_join(&s->thread);
1554
+ uv_sem_destroy(&s->close_sem);
1555
+ uv_sem_destroy(&s->async_sem);
1556
+ uv__close(s->fake_fd);
1557
+ uv__close(s->int_fd);
1558
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
1559
+
1560
+ handle->select = NULL;
1561
+ }
1562
+ #endif /* defined(__APPLE__) */
1563
+
1564
+ uv__io_close(handle->loop, &handle->io_watcher);
1565
+ uv_read_stop(handle);
1566
+ uv__handle_stop(handle);
1567
+
1568
+ if (handle->io_watcher.fd != -1) {
1569
+ /* Don't close stdio file descriptors. Nothing good comes from it. */
1570
+ if (handle->io_watcher.fd > STDERR_FILENO)
1571
+ uv__close(handle->io_watcher.fd);
1572
+ handle->io_watcher.fd = -1;
1573
+ }
1574
+
1575
+ if (handle->accepted_fd != -1) {
1576
+ uv__close(handle->accepted_fd);
1577
+ handle->accepted_fd = -1;
1578
+ }
1579
+
1580
+ /* Close all queued fds */
1581
+ if (handle->queued_fds != NULL) {
1582
+ queued_fds = handle->queued_fds;
1583
+ for (i = 0; i < queued_fds->offset; i++)
1584
+ uv__close(queued_fds->fds[i]);
1585
+ free(handle->queued_fds);
1586
+ handle->queued_fds = NULL;
1587
+ }
1588
+
1589
+ assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT));
1590
+ }
1591
+
1592
+
1593
+ int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
1594
+ /* Don't need to check the file descriptor, uv__nonblock()
1595
+ * will fail with EBADF if it's not valid.
1596
+ */
1597
+ return uv__nonblock(uv__stream_fd(handle), !blocking);
1598
+ }