foolio 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +22 -0
- data/Gemfile +4 -0
- data/LICENSE +22 -0
- data/README.md +29 -0
- data/Rakefile +21 -0
- data/examples/timer.rb +20 -0
- data/ext/foolio/extconf.rb +34 -0
- data/ext/foolio/foolio_ext.c +921 -0
- data/ext/foolio/gen.rb +50 -0
- data/ext/foolio/make_table.rb +12 -0
- data/ext/foolio/templ +243 -0
- data/ext/libuv/.gitignore +33 -0
- data/ext/libuv/.mailmap +13 -0
- data/ext/libuv/.travis.yml +9 -0
- data/ext/libuv/AUTHORS +61 -0
- data/ext/libuv/LICENSE +44 -0
- data/ext/libuv/Makefile +71 -0
- data/ext/libuv/README.md +90 -0
- data/ext/libuv/common.gypi +178 -0
- data/ext/libuv/gyp_uv +73 -0
- data/ext/libuv/include/uv-private/eio.h +403 -0
- data/ext/libuv/include/uv-private/ev.h +838 -0
- data/ext/libuv/include/uv-private/ngx-queue.h +108 -0
- data/ext/libuv/include/uv-private/tree.h +768 -0
- data/ext/libuv/include/uv-private/uv-unix.h +324 -0
- data/ext/libuv/include/uv-private/uv-win.h +517 -0
- data/ext/libuv/include/uv.h +1838 -0
- data/ext/libuv/src/fs-poll.c +235 -0
- data/ext/libuv/src/inet.c +293 -0
- data/ext/libuv/src/unix/async.c +148 -0
- data/ext/libuv/src/unix/core.c +696 -0
- data/ext/libuv/src/unix/cygwin.c +83 -0
- data/ext/libuv/src/unix/darwin.c +342 -0
- data/ext/libuv/src/unix/dl.c +83 -0
- data/ext/libuv/src/unix/eio/Changes +63 -0
- data/ext/libuv/src/unix/eio/LICENSE +36 -0
- data/ext/libuv/src/unix/eio/Makefile.am +15 -0
- data/ext/libuv/src/unix/eio/aclocal.m4 +8957 -0
- data/ext/libuv/src/unix/eio/autogen.sh +3 -0
- data/ext/libuv/src/unix/eio/config.h.in +86 -0
- data/ext/libuv/src/unix/eio/config_cygwin.h +80 -0
- data/ext/libuv/src/unix/eio/config_darwin.h +141 -0
- data/ext/libuv/src/unix/eio/config_freebsd.h +81 -0
- data/ext/libuv/src/unix/eio/config_linux.h +94 -0
- data/ext/libuv/src/unix/eio/config_netbsd.h +81 -0
- data/ext/libuv/src/unix/eio/config_openbsd.h +137 -0
- data/ext/libuv/src/unix/eio/config_sunos.h +84 -0
- data/ext/libuv/src/unix/eio/configure.ac +22 -0
- data/ext/libuv/src/unix/eio/demo.c +194 -0
- data/ext/libuv/src/unix/eio/ecb.h +370 -0
- data/ext/libuv/src/unix/eio/eio.3 +3428 -0
- data/ext/libuv/src/unix/eio/eio.c +2593 -0
- data/ext/libuv/src/unix/eio/eio.pod +969 -0
- data/ext/libuv/src/unix/eio/libeio.m4 +195 -0
- data/ext/libuv/src/unix/eio/xthread.h +164 -0
- data/ext/libuv/src/unix/error.c +105 -0
- data/ext/libuv/src/unix/ev/Changes +388 -0
- data/ext/libuv/src/unix/ev/LICENSE +36 -0
- data/ext/libuv/src/unix/ev/Makefile.am +18 -0
- data/ext/libuv/src/unix/ev/Makefile.in +771 -0
- data/ext/libuv/src/unix/ev/README +58 -0
- data/ext/libuv/src/unix/ev/aclocal.m4 +8957 -0
- data/ext/libuv/src/unix/ev/autogen.sh +6 -0
- data/ext/libuv/src/unix/ev/config.guess +1526 -0
- data/ext/libuv/src/unix/ev/config.h.in +125 -0
- data/ext/libuv/src/unix/ev/config.sub +1658 -0
- data/ext/libuv/src/unix/ev/config_cygwin.h +123 -0
- data/ext/libuv/src/unix/ev/config_darwin.h +122 -0
- data/ext/libuv/src/unix/ev/config_freebsd.h +120 -0
- data/ext/libuv/src/unix/ev/config_linux.h +141 -0
- data/ext/libuv/src/unix/ev/config_netbsd.h +120 -0
- data/ext/libuv/src/unix/ev/config_openbsd.h +126 -0
- data/ext/libuv/src/unix/ev/config_sunos.h +122 -0
- data/ext/libuv/src/unix/ev/configure +13037 -0
- data/ext/libuv/src/unix/ev/configure.ac +18 -0
- data/ext/libuv/src/unix/ev/depcomp +630 -0
- data/ext/libuv/src/unix/ev/ev++.h +816 -0
- data/ext/libuv/src/unix/ev/ev.3 +5311 -0
- data/ext/libuv/src/unix/ev/ev.c +3925 -0
- data/ext/libuv/src/unix/ev/ev.pod +5243 -0
- data/ext/libuv/src/unix/ev/ev_epoll.c +266 -0
- data/ext/libuv/src/unix/ev/ev_kqueue.c +235 -0
- data/ext/libuv/src/unix/ev/ev_poll.c +148 -0
- data/ext/libuv/src/unix/ev/ev_port.c +179 -0
- data/ext/libuv/src/unix/ev/ev_select.c +310 -0
- data/ext/libuv/src/unix/ev/ev_vars.h +203 -0
- data/ext/libuv/src/unix/ev/ev_win32.c +153 -0
- data/ext/libuv/src/unix/ev/ev_wrap.h +196 -0
- data/ext/libuv/src/unix/ev/event.c +402 -0
- data/ext/libuv/src/unix/ev/event.h +170 -0
- data/ext/libuv/src/unix/ev/install-sh +294 -0
- data/ext/libuv/src/unix/ev/libev.m4 +39 -0
- data/ext/libuv/src/unix/ev/ltmain.sh +8413 -0
- data/ext/libuv/src/unix/ev/missing +336 -0
- data/ext/libuv/src/unix/ev/mkinstalldirs +111 -0
- data/ext/libuv/src/unix/freebsd.c +326 -0
- data/ext/libuv/src/unix/fs.c +739 -0
- data/ext/libuv/src/unix/internal.h +188 -0
- data/ext/libuv/src/unix/kqueue.c +120 -0
- data/ext/libuv/src/unix/linux/inotify.c +239 -0
- data/ext/libuv/src/unix/linux/linux-core.c +557 -0
- data/ext/libuv/src/unix/linux/syscalls.c +388 -0
- data/ext/libuv/src/unix/linux/syscalls.h +124 -0
- data/ext/libuv/src/unix/loop-watcher.c +62 -0
- data/ext/libuv/src/unix/loop.c +94 -0
- data/ext/libuv/src/unix/netbsd.c +108 -0
- data/ext/libuv/src/unix/openbsd.c +295 -0
- data/ext/libuv/src/unix/pipe.c +259 -0
- data/ext/libuv/src/unix/poll.c +114 -0
- data/ext/libuv/src/unix/process.c +495 -0
- data/ext/libuv/src/unix/signal.c +269 -0
- data/ext/libuv/src/unix/stream.c +990 -0
- data/ext/libuv/src/unix/sunos.c +481 -0
- data/ext/libuv/src/unix/tcp.c +393 -0
- data/ext/libuv/src/unix/thread.c +251 -0
- data/ext/libuv/src/unix/timer.c +136 -0
- data/ext/libuv/src/unix/tty.c +145 -0
- data/ext/libuv/src/unix/udp.c +659 -0
- data/ext/libuv/src/unix/uv-eio.c +107 -0
- data/ext/libuv/src/unix/uv-eio.h +13 -0
- data/ext/libuv/src/uv-common.c +380 -0
- data/ext/libuv/src/uv-common.h +170 -0
- data/ext/libuv/src/win/async.c +100 -0
- data/ext/libuv/src/win/atomicops-inl.h +56 -0
- data/ext/libuv/src/win/core.c +278 -0
- data/ext/libuv/src/win/dl.c +86 -0
- data/ext/libuv/src/win/error.c +155 -0
- data/ext/libuv/src/win/fs-event.c +510 -0
- data/ext/libuv/src/win/fs.c +1948 -0
- data/ext/libuv/src/win/getaddrinfo.c +365 -0
- data/ext/libuv/src/win/handle-inl.h +149 -0
- data/ext/libuv/src/win/handle.c +154 -0
- data/ext/libuv/src/win/internal.h +343 -0
- data/ext/libuv/src/win/loop-watcher.c +122 -0
- data/ext/libuv/src/win/pipe.c +1672 -0
- data/ext/libuv/src/win/poll.c +616 -0
- data/ext/libuv/src/win/process-stdio.c +500 -0
- data/ext/libuv/src/win/process.c +1013 -0
- data/ext/libuv/src/win/req-inl.h +220 -0
- data/ext/libuv/src/win/req.c +25 -0
- data/ext/libuv/src/win/signal.c +57 -0
- data/ext/libuv/src/win/stream-inl.h +67 -0
- data/ext/libuv/src/win/stream.c +167 -0
- data/ext/libuv/src/win/tcp.c +1394 -0
- data/ext/libuv/src/win/thread.c +372 -0
- data/ext/libuv/src/win/threadpool.c +74 -0
- data/ext/libuv/src/win/timer.c +224 -0
- data/ext/libuv/src/win/tty.c +1799 -0
- data/ext/libuv/src/win/udp.c +716 -0
- data/ext/libuv/src/win/util.c +864 -0
- data/ext/libuv/src/win/winapi.c +132 -0
- data/ext/libuv/src/win/winapi.h +4452 -0
- data/ext/libuv/src/win/winsock.c +557 -0
- data/ext/libuv/src/win/winsock.h +171 -0
- data/ext/libuv/test/benchmark-async-pummel.c +97 -0
- data/ext/libuv/test/benchmark-async.c +137 -0
- data/ext/libuv/test/benchmark-fs-stat.c +135 -0
- data/ext/libuv/test/benchmark-getaddrinfo.c +94 -0
- data/ext/libuv/test/benchmark-list.h +127 -0
- data/ext/libuv/test/benchmark-loop-count.c +88 -0
- data/ext/libuv/test/benchmark-million-timers.c +65 -0
- data/ext/libuv/test/benchmark-ping-pongs.c +213 -0
- data/ext/libuv/test/benchmark-pound.c +324 -0
- data/ext/libuv/test/benchmark-pump.c +462 -0
- data/ext/libuv/test/benchmark-sizes.c +44 -0
- data/ext/libuv/test/benchmark-spawn.c +162 -0
- data/ext/libuv/test/benchmark-tcp-write-batch.c +140 -0
- data/ext/libuv/test/benchmark-thread.c +64 -0
- data/ext/libuv/test/benchmark-udp-packet-storm.c +247 -0
- data/ext/libuv/test/blackhole-server.c +118 -0
- data/ext/libuv/test/dns-server.c +321 -0
- data/ext/libuv/test/echo-server.c +378 -0
- data/ext/libuv/test/fixtures/empty_file +0 -0
- data/ext/libuv/test/fixtures/load_error.node +1 -0
- data/ext/libuv/test/run-benchmarks.c +64 -0
- data/ext/libuv/test/run-tests.c +138 -0
- data/ext/libuv/test/runner-unix.c +295 -0
- data/ext/libuv/test/runner-unix.h +36 -0
- data/ext/libuv/test/runner-win.c +285 -0
- data/ext/libuv/test/runner-win.h +42 -0
- data/ext/libuv/test/runner.c +355 -0
- data/ext/libuv/test/runner.h +159 -0
- data/ext/libuv/test/task.h +112 -0
- data/ext/libuv/test/test-async.c +118 -0
- data/ext/libuv/test/test-callback-order.c +76 -0
- data/ext/libuv/test/test-callback-stack.c +203 -0
- data/ext/libuv/test/test-connection-fail.c +148 -0
- data/ext/libuv/test/test-cwd-and-chdir.c +64 -0
- data/ext/libuv/test/test-delayed-accept.c +188 -0
- data/ext/libuv/test/test-dlerror.c +58 -0
- data/ext/libuv/test/test-error.c +59 -0
- data/ext/libuv/test/test-fail-always.c +29 -0
- data/ext/libuv/test/test-fs-event.c +474 -0
- data/ext/libuv/test/test-fs-poll.c +146 -0
- data/ext/libuv/test/test-fs.c +1843 -0
- data/ext/libuv/test/test-get-currentexe.c +63 -0
- data/ext/libuv/test/test-get-loadavg.c +36 -0
- data/ext/libuv/test/test-get-memory.c +38 -0
- data/ext/libuv/test/test-getaddrinfo.c +122 -0
- data/ext/libuv/test/test-getsockname.c +342 -0
- data/ext/libuv/test/test-hrtime.c +54 -0
- data/ext/libuv/test/test-idle.c +81 -0
- data/ext/libuv/test/test-ipc-send-recv.c +209 -0
- data/ext/libuv/test/test-ipc.c +620 -0
- data/ext/libuv/test/test-list.h +427 -0
- data/ext/libuv/test/test-loop-handles.c +336 -0
- data/ext/libuv/test/test-multiple-listen.c +102 -0
- data/ext/libuv/test/test-mutexes.c +63 -0
- data/ext/libuv/test/test-pass-always.c +28 -0
- data/ext/libuv/test/test-ping-pong.c +253 -0
- data/ext/libuv/test/test-pipe-bind-error.c +140 -0
- data/ext/libuv/test/test-pipe-connect-error.c +96 -0
- data/ext/libuv/test/test-platform-output.c +87 -0
- data/ext/libuv/test/test-poll-close.c +72 -0
- data/ext/libuv/test/test-poll.c +573 -0
- data/ext/libuv/test/test-process-title.c +49 -0
- data/ext/libuv/test/test-ref.c +338 -0
- data/ext/libuv/test/test-run-once.c +48 -0
- data/ext/libuv/test/test-semaphore.c +111 -0
- data/ext/libuv/test/test-shutdown-close.c +103 -0
- data/ext/libuv/test/test-shutdown-eof.c +183 -0
- data/ext/libuv/test/test-signal.c +162 -0
- data/ext/libuv/test/test-spawn.c +863 -0
- data/ext/libuv/test/test-stdio-over-pipes.c +246 -0
- data/ext/libuv/test/test-tcp-bind-error.c +191 -0
- data/ext/libuv/test/test-tcp-bind6-error.c +154 -0
- data/ext/libuv/test/test-tcp-close-while-connecting.c +80 -0
- data/ext/libuv/test/test-tcp-close.c +129 -0
- data/ext/libuv/test/test-tcp-connect-error-after-write.c +95 -0
- data/ext/libuv/test/test-tcp-connect-error.c +70 -0
- data/ext/libuv/test/test-tcp-connect-timeout.c +85 -0
- data/ext/libuv/test/test-tcp-connect6-error.c +68 -0
- data/ext/libuv/test/test-tcp-flags.c +51 -0
- data/ext/libuv/test/test-tcp-shutdown-after-write.c +131 -0
- data/ext/libuv/test/test-tcp-unexpected-read.c +113 -0
- data/ext/libuv/test/test-tcp-write-error.c +168 -0
- data/ext/libuv/test/test-tcp-write-to-half-open-connection.c +135 -0
- data/ext/libuv/test/test-tcp-writealot.c +170 -0
- data/ext/libuv/test/test-thread.c +183 -0
- data/ext/libuv/test/test-threadpool.c +57 -0
- data/ext/libuv/test/test-timer-again.c +141 -0
- data/ext/libuv/test/test-timer.c +152 -0
- data/ext/libuv/test/test-tty.c +110 -0
- data/ext/libuv/test/test-udp-dgram-too-big.c +86 -0
- data/ext/libuv/test/test-udp-ipv6.c +156 -0
- data/ext/libuv/test/test-udp-multicast-join.c +139 -0
- data/ext/libuv/test/test-udp-multicast-ttl.c +86 -0
- data/ext/libuv/test/test-udp-options.c +86 -0
- data/ext/libuv/test/test-udp-send-and-recv.c +208 -0
- data/ext/libuv/test/test-util.c +97 -0
- data/ext/libuv/test/test-walk-handles.c +77 -0
- data/ext/libuv/uv.gyp +375 -0
- data/ext/libuv/vcbuild.bat +105 -0
- data/foolio.gemspec +18 -0
- data/lib/foolio.rb +9 -0
- data/lib/foolio/handle.rb +27 -0
- data/lib/foolio/listener.rb +26 -0
- data/lib/foolio/loop.rb +79 -0
- data/lib/foolio/stream.rb +109 -0
- data/lib/foolio/version.rb +3 -0
- metadata +309 -0
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
2
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
3
|
+
* of this software and associated documentation files (the "Software"), to
|
|
4
|
+
* deal in the Software without restriction, including without limitation the
|
|
5
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
6
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
|
7
|
+
* furnished to do so, subject to the following conditions:
|
|
8
|
+
*
|
|
9
|
+
* The above copyright notice and this permission notice shall be included in
|
|
10
|
+
* all copies or substantial portions of the Software.
|
|
11
|
+
*
|
|
12
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
13
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
14
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
15
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
16
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
17
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
18
|
+
* IN THE SOFTWARE.
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
#include "uv.h"
|
|
22
|
+
#include "internal.h"
|
|
23
|
+
|
|
24
|
+
#include <assert.h>
|
|
25
|
+
#include <stdlib.h>
|
|
26
|
+
#include <string.h>
|
|
27
|
+
#include <signal.h>
|
|
28
|
+
#include <unistd.h>
|
|
29
|
+
#include <errno.h>
|
|
30
|
+
|
|
31
|
+
struct signal_ctx {
|
|
32
|
+
int pipefd[2];
|
|
33
|
+
uv__io_t io_watcher;
|
|
34
|
+
unsigned int nqueues;
|
|
35
|
+
ngx_queue_t queues[1]; /* variable length */
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
static void uv__signal_handler(int signum);
|
|
39
|
+
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, int events);
|
|
40
|
+
static struct signal_ctx* uv__signal_ctx_new(uv_loop_t* loop);
|
|
41
|
+
static void uv__signal_ctx_delete(struct signal_ctx* ctx);
|
|
42
|
+
static void uv__signal_write(int fd, unsigned int val);
|
|
43
|
+
static unsigned int uv__signal_read(int fd);
|
|
44
|
+
static unsigned int uv__signal_max(void);
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
|
|
48
|
+
uv__handle_init(loop, (uv_handle_t*)handle, UV_SIGNAL);
|
|
49
|
+
handle->signum = 0;
|
|
50
|
+
return 0;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum_) {
|
|
55
|
+
struct signal_ctx* ctx;
|
|
56
|
+
struct sigaction sa;
|
|
57
|
+
unsigned int signum;
|
|
58
|
+
uv_loop_t* loop;
|
|
59
|
+
ngx_queue_t* q;
|
|
60
|
+
|
|
61
|
+
/* XXX doing this check in uv_signal_init() - the logical place for it -
|
|
62
|
+
* leads to an infinite loop when uv__loop_init() inits a signal watcher
|
|
63
|
+
*/
|
|
64
|
+
/* FIXME */
|
|
65
|
+
assert(handle->loop == uv_default_loop() &&
|
|
66
|
+
"uv_signal_t is currently only supported by the default loop");
|
|
67
|
+
|
|
68
|
+
loop = handle->loop;
|
|
69
|
+
signum = signum_;
|
|
70
|
+
|
|
71
|
+
if (uv__is_active(handle))
|
|
72
|
+
return uv__set_artificial_error(loop, UV_EBUSY);
|
|
73
|
+
|
|
74
|
+
if (signal_cb == NULL)
|
|
75
|
+
return uv__set_artificial_error(loop, UV_EINVAL);
|
|
76
|
+
|
|
77
|
+
if (signum <= 0)
|
|
78
|
+
return uv__set_artificial_error(loop, UV_EINVAL);
|
|
79
|
+
|
|
80
|
+
ctx = loop->signal_ctx;
|
|
81
|
+
|
|
82
|
+
if (ctx == NULL) {
|
|
83
|
+
ctx = uv__signal_ctx_new(loop);
|
|
84
|
+
|
|
85
|
+
if (ctx == NULL)
|
|
86
|
+
return uv__set_artificial_error(loop, UV_ENOMEM);
|
|
87
|
+
|
|
88
|
+
loop->signal_ctx = ctx;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (signum > ctx->nqueues)
|
|
92
|
+
return uv__set_artificial_error(loop, UV_EINVAL);
|
|
93
|
+
|
|
94
|
+
q = ctx->queues + signum;
|
|
95
|
+
|
|
96
|
+
if (!ngx_queue_empty(q))
|
|
97
|
+
goto skip;
|
|
98
|
+
|
|
99
|
+
/* XXX use a separate signal stack? */
|
|
100
|
+
memset(&sa, 0, sizeof(sa));
|
|
101
|
+
sa.sa_handler = uv__signal_handler;
|
|
102
|
+
|
|
103
|
+
/* XXX save old action so we can restore it later on? */
|
|
104
|
+
if (sigaction(signum, &sa, NULL))
|
|
105
|
+
return uv__set_artificial_error(loop, UV_EINVAL);
|
|
106
|
+
|
|
107
|
+
skip:
|
|
108
|
+
ngx_queue_insert_tail(q, &handle->queue);
|
|
109
|
+
uv__handle_start(handle);
|
|
110
|
+
handle->signum = signum;
|
|
111
|
+
handle->signal_cb = signal_cb;
|
|
112
|
+
|
|
113
|
+
return 0;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
int uv_signal_stop(uv_signal_t* handle) {
|
|
118
|
+
struct signal_ctx* ctx;
|
|
119
|
+
struct sigaction sa;
|
|
120
|
+
unsigned int signum;
|
|
121
|
+
uv_loop_t* loop;
|
|
122
|
+
|
|
123
|
+
if (!uv__is_active(handle))
|
|
124
|
+
return 0;
|
|
125
|
+
|
|
126
|
+
signum = handle->signum;
|
|
127
|
+
loop = handle->loop;
|
|
128
|
+
ctx = loop->signal_ctx;
|
|
129
|
+
assert(signum > 0);
|
|
130
|
+
assert(signum <= ctx->nqueues);
|
|
131
|
+
|
|
132
|
+
ngx_queue_remove(&handle->queue);
|
|
133
|
+
uv__handle_stop(handle);
|
|
134
|
+
handle->signum = 0;
|
|
135
|
+
|
|
136
|
+
if (!ngx_queue_empty(ctx->queues + signum))
|
|
137
|
+
goto skip;
|
|
138
|
+
|
|
139
|
+
memset(&sa, 0, sizeof(sa));
|
|
140
|
+
sa.sa_handler = SIG_DFL; /* XXX restore previous action? */
|
|
141
|
+
|
|
142
|
+
if (sigaction(signum, &sa, NULL))
|
|
143
|
+
return uv__set_artificial_error(loop, UV_EINVAL);
|
|
144
|
+
|
|
145
|
+
skip:
|
|
146
|
+
return 0;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
void uv__signal_close(uv_signal_t* handle) {
|
|
151
|
+
uv_signal_stop(handle);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
void uv__signal_unregister(uv_loop_t* loop) {
|
|
156
|
+
uv__signal_ctx_delete(loop->signal_ctx);
|
|
157
|
+
loop->signal_ctx = NULL;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
static void uv__signal_handler(int signum) {
|
|
162
|
+
struct signal_ctx* ctx = uv_default_loop()->signal_ctx;
|
|
163
|
+
uv__signal_write(ctx->pipefd[1], (unsigned int) signum);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, int events) {
|
|
168
|
+
struct signal_ctx* ctx;
|
|
169
|
+
unsigned int signum;
|
|
170
|
+
uv_signal_t* h;
|
|
171
|
+
ngx_queue_t* q;
|
|
172
|
+
|
|
173
|
+
ctx = container_of(w, struct signal_ctx, io_watcher);
|
|
174
|
+
signum = uv__signal_read(ctx->pipefd[0]);
|
|
175
|
+
assert(signum > 0);
|
|
176
|
+
assert(signum <= ctx->nqueues);
|
|
177
|
+
|
|
178
|
+
ngx_queue_foreach(q, ctx->queues + signum) {
|
|
179
|
+
h = ngx_queue_data(q, uv_signal_t, queue);
|
|
180
|
+
h->signal_cb(h, signum);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
static struct signal_ctx* uv__signal_ctx_new(uv_loop_t* loop) {
|
|
186
|
+
struct signal_ctx* ctx;
|
|
187
|
+
unsigned int nqueues;
|
|
188
|
+
unsigned int i;
|
|
189
|
+
|
|
190
|
+
nqueues = uv__signal_max();
|
|
191
|
+
assert(nqueues > 0);
|
|
192
|
+
|
|
193
|
+
/* The first ctx->queues entry is never used. It wastes a few bytes of memory
|
|
194
|
+
* but it saves us from having to substract 1 from the signum all the time -
|
|
195
|
+
* which inevitably someone will forget to do.
|
|
196
|
+
*/
|
|
197
|
+
ctx = calloc(1, sizeof(*ctx) + sizeof(ctx->queues[0]) * (nqueues + 1));
|
|
198
|
+
if (ctx == NULL)
|
|
199
|
+
return NULL;
|
|
200
|
+
|
|
201
|
+
if (uv__make_pipe(ctx->pipefd, UV__F_NONBLOCK)) {
|
|
202
|
+
free(ctx);
|
|
203
|
+
return NULL;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
uv__io_init(&ctx->io_watcher, uv__signal_event, ctx->pipefd[0], UV__IO_READ);
|
|
207
|
+
uv__io_start(loop, &ctx->io_watcher);
|
|
208
|
+
ctx->nqueues = nqueues;
|
|
209
|
+
|
|
210
|
+
for (i = 1; i <= nqueues; i++)
|
|
211
|
+
ngx_queue_init(ctx->queues + i);
|
|
212
|
+
|
|
213
|
+
return ctx;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
static void uv__signal_ctx_delete(struct signal_ctx* ctx) {
|
|
218
|
+
if (ctx == NULL) return;
|
|
219
|
+
close(ctx->pipefd[0]);
|
|
220
|
+
close(ctx->pipefd[1]);
|
|
221
|
+
free(ctx);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
static void uv__signal_write(int fd, unsigned int val) {
|
|
226
|
+
ssize_t n;
|
|
227
|
+
|
|
228
|
+
do
|
|
229
|
+
n = write(fd, &val, sizeof(val));
|
|
230
|
+
while (n == -1 && errno == EINTR);
|
|
231
|
+
|
|
232
|
+
if (n == sizeof(val))
|
|
233
|
+
return;
|
|
234
|
+
|
|
235
|
+
if (n == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
|
|
236
|
+
return; /* pipe full - nothing we can do about that */
|
|
237
|
+
|
|
238
|
+
abort();
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
static unsigned int uv__signal_read(int fd) {
|
|
243
|
+
unsigned int val;
|
|
244
|
+
ssize_t n;
|
|
245
|
+
|
|
246
|
+
do
|
|
247
|
+
n = read(fd, &val, sizeof(val));
|
|
248
|
+
while (n == -1 && errno == EINTR);
|
|
249
|
+
|
|
250
|
+
if (n == sizeof(val))
|
|
251
|
+
return val;
|
|
252
|
+
|
|
253
|
+
abort();
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
static unsigned int uv__signal_max(void) {
|
|
258
|
+
#if defined(_SC_RTSIG_MAX)
|
|
259
|
+
int max = sysconf(_SC_RTSIG_MAX);
|
|
260
|
+
if (max != -1) return max;
|
|
261
|
+
#endif
|
|
262
|
+
#if defined(SIGRTMAX)
|
|
263
|
+
return SIGRTMAX;
|
|
264
|
+
#elif defined(NSIG)
|
|
265
|
+
return NSIG;
|
|
266
|
+
#else
|
|
267
|
+
return 32;
|
|
268
|
+
#endif
|
|
269
|
+
}
|
|
@@ -0,0 +1,990 @@
|
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
2
|
+
*
|
|
3
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
4
|
+
* of this software and associated documentation files (the "Software"), to
|
|
5
|
+
* deal in the Software without restriction, including without limitation the
|
|
6
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
7
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
|
8
|
+
* furnished to do so, subject to the following conditions:
|
|
9
|
+
*
|
|
10
|
+
* The above copyright notice and this permission notice shall be included in
|
|
11
|
+
* all copies or substantial portions of the Software.
|
|
12
|
+
*
|
|
13
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
14
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
15
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
16
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
17
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
18
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
19
|
+
* IN THE SOFTWARE.
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
#include "uv.h"
|
|
23
|
+
#include "internal.h"
|
|
24
|
+
|
|
25
|
+
#include <stdio.h>
|
|
26
|
+
#include <stdlib.h>
|
|
27
|
+
#include <string.h>
|
|
28
|
+
#include <assert.h>
|
|
29
|
+
#include <errno.h>
|
|
30
|
+
|
|
31
|
+
#include <sys/types.h>
|
|
32
|
+
#include <sys/socket.h>
|
|
33
|
+
#include <sys/uio.h>
|
|
34
|
+
#include <sys/un.h>
|
|
35
|
+
#include <unistd.h>
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
static void uv__stream_connect(uv_stream_t*);
|
|
39
|
+
static void uv__write(uv_stream_t* stream);
|
|
40
|
+
static void uv__read(uv_stream_t* stream);
|
|
41
|
+
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, int events);
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
static size_t uv__buf_count(uv_buf_t bufs[], int bufcnt) {
|
|
45
|
+
size_t total = 0;
|
|
46
|
+
int i;
|
|
47
|
+
|
|
48
|
+
for (i = 0; i < bufcnt; i++) {
|
|
49
|
+
total += bufs[i].len;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return total;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
void uv__stream_init(uv_loop_t* loop,
|
|
57
|
+
uv_stream_t* stream,
|
|
58
|
+
uv_handle_type type) {
|
|
59
|
+
uv__handle_init(loop, (uv_handle_t*)stream, type);
|
|
60
|
+
stream->alloc_cb = NULL;
|
|
61
|
+
stream->close_cb = NULL;
|
|
62
|
+
stream->connection_cb = NULL;
|
|
63
|
+
stream->connect_req = NULL;
|
|
64
|
+
stream->shutdown_req = NULL;
|
|
65
|
+
stream->accepted_fd = -1;
|
|
66
|
+
stream->fd = -1;
|
|
67
|
+
stream->delayed_error = 0;
|
|
68
|
+
ngx_queue_init(&stream->write_queue);
|
|
69
|
+
ngx_queue_init(&stream->write_completed_queue);
|
|
70
|
+
stream->write_queue_size = 0;
|
|
71
|
+
|
|
72
|
+
uv__io_init(&stream->read_watcher, uv__stream_io, -1, 0);
|
|
73
|
+
uv__io_init(&stream->write_watcher, uv__stream_io, -1, 0);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
|
|
78
|
+
socklen_t yes;
|
|
79
|
+
|
|
80
|
+
assert(fd >= 0);
|
|
81
|
+
stream->fd = fd;
|
|
82
|
+
|
|
83
|
+
stream->flags |= flags;
|
|
84
|
+
|
|
85
|
+
if (stream->type == UV_TCP) {
|
|
86
|
+
/* Reuse the port address if applicable. */
|
|
87
|
+
yes = 1;
|
|
88
|
+
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof yes) == -1) {
|
|
89
|
+
uv__set_sys_error(stream->loop, errno);
|
|
90
|
+
return -1;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if ((stream->flags & UV_TCP_NODELAY) &&
|
|
94
|
+
uv__tcp_nodelay((uv_tcp_t*)stream, 1)) {
|
|
95
|
+
return -1;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
/* TODO Use delay the user passed in. */
|
|
99
|
+
if ((stream->flags & UV_TCP_KEEPALIVE) &&
|
|
100
|
+
uv__tcp_keepalive((uv_tcp_t*)stream, 1, 60)) {
|
|
101
|
+
return -1;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/* Associate the fd with each watcher. */
|
|
106
|
+
uv__io_set(&stream->read_watcher, uv__stream_io, fd, UV__IO_READ);
|
|
107
|
+
uv__io_set(&stream->write_watcher, uv__stream_io, fd, UV__IO_WRITE);
|
|
108
|
+
|
|
109
|
+
return 0;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
void uv__stream_destroy(uv_stream_t* stream) {
|
|
114
|
+
uv_write_t* req;
|
|
115
|
+
ngx_queue_t* q;
|
|
116
|
+
|
|
117
|
+
assert(stream->flags & UV_CLOSED);
|
|
118
|
+
|
|
119
|
+
if (stream->connect_req) {
|
|
120
|
+
uv__req_unregister(stream->loop, stream->connect_req);
|
|
121
|
+
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
|
122
|
+
stream->connect_req->cb(stream->connect_req, -1);
|
|
123
|
+
stream->connect_req = NULL;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
while (!ngx_queue_empty(&stream->write_queue)) {
|
|
127
|
+
q = ngx_queue_head(&stream->write_queue);
|
|
128
|
+
ngx_queue_remove(q);
|
|
129
|
+
|
|
130
|
+
req = ngx_queue_data(q, uv_write_t, queue);
|
|
131
|
+
uv__req_unregister(stream->loop, req);
|
|
132
|
+
|
|
133
|
+
if (req->bufs != req->bufsml)
|
|
134
|
+
free(req->bufs);
|
|
135
|
+
|
|
136
|
+
if (req->cb) {
|
|
137
|
+
uv__set_artificial_error(req->handle->loop, UV_ECANCELED);
|
|
138
|
+
req->cb(req, -1);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
while (!ngx_queue_empty(&stream->write_completed_queue)) {
|
|
143
|
+
q = ngx_queue_head(&stream->write_completed_queue);
|
|
144
|
+
ngx_queue_remove(q);
|
|
145
|
+
|
|
146
|
+
req = ngx_queue_data(q, uv_write_t, queue);
|
|
147
|
+
uv__req_unregister(stream->loop, req);
|
|
148
|
+
|
|
149
|
+
if (req->cb) {
|
|
150
|
+
uv__set_sys_error(stream->loop, req->error);
|
|
151
|
+
req->cb(req, req->error ? -1 : 0);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
if (stream->shutdown_req) {
|
|
156
|
+
uv__req_unregister(stream->loop, stream->shutdown_req);
|
|
157
|
+
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
|
158
|
+
stream->shutdown_req->cb(stream->shutdown_req, -1);
|
|
159
|
+
stream->shutdown_req = NULL;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
static void uv__next_accept(uv_idle_t* idle, int status) {
|
|
165
|
+
uv_stream_t* stream = idle->data;
|
|
166
|
+
|
|
167
|
+
uv_idle_stop(idle);
|
|
168
|
+
|
|
169
|
+
if (stream->accepted_fd == -1)
|
|
170
|
+
uv__io_start(stream->loop, &stream->read_watcher);
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
void uv__server_io(uv_loop_t* loop, uv__io_t* w, int events) {
|
|
175
|
+
int fd;
|
|
176
|
+
uv_stream_t* stream = container_of(w, uv_stream_t, read_watcher);
|
|
177
|
+
|
|
178
|
+
assert(events == UV__IO_READ);
|
|
179
|
+
assert(!(stream->flags & UV_CLOSING));
|
|
180
|
+
|
|
181
|
+
if (stream->accepted_fd >= 0) {
|
|
182
|
+
uv__io_stop(loop, &stream->read_watcher);
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/* connection_cb can close the server socket while we're
|
|
187
|
+
* in the loop so check it on each iteration.
|
|
188
|
+
*/
|
|
189
|
+
while (stream->fd != -1) {
|
|
190
|
+
assert(stream->accepted_fd < 0);
|
|
191
|
+
fd = uv__accept(stream->fd);
|
|
192
|
+
|
|
193
|
+
if (fd < 0) {
|
|
194
|
+
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
|
195
|
+
/* No problem. */
|
|
196
|
+
return;
|
|
197
|
+
} else if (errno == EMFILE) {
|
|
198
|
+
/* TODO special trick. unlock reserved socket, accept, close. */
|
|
199
|
+
return;
|
|
200
|
+
} else if (errno == ECONNABORTED) {
|
|
201
|
+
/* ignore */
|
|
202
|
+
continue;
|
|
203
|
+
} else {
|
|
204
|
+
uv__set_sys_error(stream->loop, errno);
|
|
205
|
+
stream->connection_cb((uv_stream_t*)stream, -1);
|
|
206
|
+
}
|
|
207
|
+
} else {
|
|
208
|
+
stream->accepted_fd = fd;
|
|
209
|
+
stream->connection_cb(stream, 0);
|
|
210
|
+
|
|
211
|
+
if (stream->accepted_fd != -1 ||
|
|
212
|
+
(stream->type == UV_TCP && stream->flags == UV_TCP_SINGLE_ACCEPT)) {
|
|
213
|
+
/* The user hasn't yet accepted called uv_accept() */
|
|
214
|
+
uv__io_stop(stream->loop, &stream->read_watcher);
|
|
215
|
+
break;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
if (stream->fd != -1 &&
|
|
221
|
+
stream->accepted_fd == -1 &&
|
|
222
|
+
(stream->type == UV_TCP && stream->flags == UV_TCP_SINGLE_ACCEPT))
|
|
223
|
+
{
|
|
224
|
+
/* Defer the next accept() syscall to the next event loop tick.
|
|
225
|
+
* This lets us guarantee fair load balancing in in multi-process setups.
|
|
226
|
+
* The problem is as follows:
|
|
227
|
+
*
|
|
228
|
+
* 1. Multiple processes listen on the same socket.
|
|
229
|
+
* 2. The OS scheduler commonly gives preference to one process to
|
|
230
|
+
* avoid task switches.
|
|
231
|
+
* 3. That process therefore accepts most of the new connections,
|
|
232
|
+
* leading to a (sometimes very) unevenly distributed load.
|
|
233
|
+
*
|
|
234
|
+
* Here is how we mitigate this issue:
|
|
235
|
+
*
|
|
236
|
+
* 1. Accept a connection.
|
|
237
|
+
* 2. Start an idle watcher.
|
|
238
|
+
* 3. Don't accept new connections until the idle callback fires.
|
|
239
|
+
*
|
|
240
|
+
* This works because the callback only fires when there have been
|
|
241
|
+
* no recent events, i.e. none of the watched file descriptors have
|
|
242
|
+
* recently been readable or writable.
|
|
243
|
+
*/
|
|
244
|
+
uv_tcp_t* tcp = (uv_tcp_t*) stream;
|
|
245
|
+
uv_idle_start(tcp->idle_handle, uv__next_accept);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
|
|
251
|
+
uv_stream_t* streamServer;
|
|
252
|
+
uv_stream_t* streamClient;
|
|
253
|
+
int saved_errno;
|
|
254
|
+
int status;
|
|
255
|
+
|
|
256
|
+
/* TODO document this */
|
|
257
|
+
assert(server->loop == client->loop);
|
|
258
|
+
|
|
259
|
+
saved_errno = errno;
|
|
260
|
+
status = -1;
|
|
261
|
+
|
|
262
|
+
streamServer = (uv_stream_t*)server;
|
|
263
|
+
streamClient = (uv_stream_t*)client;
|
|
264
|
+
|
|
265
|
+
if (streamServer->accepted_fd < 0) {
|
|
266
|
+
uv__set_sys_error(server->loop, EAGAIN);
|
|
267
|
+
goto out;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
if (uv__stream_open(streamClient, streamServer->accepted_fd,
|
|
271
|
+
UV_STREAM_READABLE | UV_STREAM_WRITABLE)) {
|
|
272
|
+
/* TODO handle error */
|
|
273
|
+
close(streamServer->accepted_fd);
|
|
274
|
+
streamServer->accepted_fd = -1;
|
|
275
|
+
goto out;
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
uv__io_start(streamServer->loop, &streamServer->read_watcher);
|
|
279
|
+
streamServer->accepted_fd = -1;
|
|
280
|
+
status = 0;
|
|
281
|
+
|
|
282
|
+
out:
|
|
283
|
+
errno = saved_errno;
|
|
284
|
+
return status;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
|
|
289
|
+
int r;
|
|
290
|
+
|
|
291
|
+
switch (stream->type) {
|
|
292
|
+
case UV_TCP:
|
|
293
|
+
r = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
|
|
294
|
+
break;
|
|
295
|
+
|
|
296
|
+
case UV_NAMED_PIPE:
|
|
297
|
+
r = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
|
|
298
|
+
break;
|
|
299
|
+
|
|
300
|
+
default:
|
|
301
|
+
assert(0);
|
|
302
|
+
return -1;
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if (r == 0)
|
|
306
|
+
uv__handle_start(stream);
|
|
307
|
+
|
|
308
|
+
return r;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
uv_write_t* uv_write_queue_head(uv_stream_t* stream) {
|
|
313
|
+
ngx_queue_t* q;
|
|
314
|
+
uv_write_t* req;
|
|
315
|
+
|
|
316
|
+
if (ngx_queue_empty(&stream->write_queue)) {
|
|
317
|
+
return NULL;
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
q = ngx_queue_head(&stream->write_queue);
|
|
321
|
+
if (!q) {
|
|
322
|
+
return NULL;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
req = ngx_queue_data(q, struct uv_write_s, queue);
|
|
326
|
+
assert(req);
|
|
327
|
+
|
|
328
|
+
return req;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
static void uv__drain(uv_stream_t* stream) {
|
|
333
|
+
uv_shutdown_t* req;
|
|
334
|
+
|
|
335
|
+
assert(!uv_write_queue_head(stream));
|
|
336
|
+
assert(stream->write_queue_size == 0);
|
|
337
|
+
|
|
338
|
+
uv__io_stop(stream->loop, &stream->write_watcher);
|
|
339
|
+
|
|
340
|
+
/* Shutdown? */
|
|
341
|
+
if ((stream->flags & UV_STREAM_SHUTTING) &&
|
|
342
|
+
!(stream->flags & UV_CLOSING) &&
|
|
343
|
+
!(stream->flags & UV_STREAM_SHUT)) {
|
|
344
|
+
assert(stream->shutdown_req);
|
|
345
|
+
|
|
346
|
+
req = stream->shutdown_req;
|
|
347
|
+
stream->shutdown_req = NULL;
|
|
348
|
+
uv__req_unregister(stream->loop, req);
|
|
349
|
+
|
|
350
|
+
if (shutdown(stream->fd, SHUT_WR)) {
|
|
351
|
+
/* Error. Report it. User should call uv_close(). */
|
|
352
|
+
uv__set_sys_error(stream->loop, errno);
|
|
353
|
+
if (req->cb) {
|
|
354
|
+
req->cb(req, -1);
|
|
355
|
+
}
|
|
356
|
+
} else {
|
|
357
|
+
uv__set_sys_error(stream->loop, 0);
|
|
358
|
+
((uv_handle_t*) stream)->flags |= UV_STREAM_SHUT;
|
|
359
|
+
if (req->cb) {
|
|
360
|
+
req->cb(req, 0);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
static size_t uv__write_req_size(uv_write_t* req) {
|
|
368
|
+
size_t size;
|
|
369
|
+
|
|
370
|
+
size = uv__buf_count(req->bufs + req->write_index,
|
|
371
|
+
req->bufcnt - req->write_index);
|
|
372
|
+
assert(req->handle->write_queue_size >= size);
|
|
373
|
+
|
|
374
|
+
return size;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
static void uv__write_req_finish(uv_write_t* req) {
|
|
379
|
+
uv_stream_t* stream = req->handle;
|
|
380
|
+
|
|
381
|
+
/* Pop the req off tcp->write_queue. */
|
|
382
|
+
ngx_queue_remove(&req->queue);
|
|
383
|
+
if (req->bufs != req->bufsml) {
|
|
384
|
+
free(req->bufs);
|
|
385
|
+
}
|
|
386
|
+
req->bufs = NULL;
|
|
387
|
+
|
|
388
|
+
/* Add it to the write_completed_queue where it will have its
|
|
389
|
+
* callback called in the near future.
|
|
390
|
+
*/
|
|
391
|
+
ngx_queue_insert_tail(&stream->write_completed_queue, &req->queue);
|
|
392
|
+
uv__io_feed(stream->loop, &stream->write_watcher, UV__IO_WRITE);
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
/* On success returns NULL. On error returns a pointer to the write request
|
|
397
|
+
* which had the error.
|
|
398
|
+
*/
|
|
399
|
+
static void uv__write(uv_stream_t* stream) {
|
|
400
|
+
uv_write_t* req;
|
|
401
|
+
struct iovec* iov;
|
|
402
|
+
int iovcnt;
|
|
403
|
+
ssize_t n;
|
|
404
|
+
|
|
405
|
+
if (stream->flags & UV_CLOSING) {
|
|
406
|
+
/* Handle was closed this tick. We've received a stale
|
|
407
|
+
* 'is writable' callback from the event loop, ignore.
|
|
408
|
+
*/
|
|
409
|
+
return;
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
start:
|
|
413
|
+
|
|
414
|
+
assert(stream->fd >= 0);
|
|
415
|
+
|
|
416
|
+
/* Get the request at the head of the queue. */
|
|
417
|
+
req = uv_write_queue_head(stream);
|
|
418
|
+
if (!req) {
|
|
419
|
+
assert(stream->write_queue_size == 0);
|
|
420
|
+
return;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
assert(req->handle == stream);
|
|
424
|
+
|
|
425
|
+
/*
|
|
426
|
+
* Cast to iovec. We had to have our own uv_buf_t instead of iovec
|
|
427
|
+
* because Windows's WSABUF is not an iovec.
|
|
428
|
+
*/
|
|
429
|
+
assert(sizeof(uv_buf_t) == sizeof(struct iovec));
|
|
430
|
+
iov = (struct iovec*) &(req->bufs[req->write_index]);
|
|
431
|
+
iovcnt = req->bufcnt - req->write_index;
|
|
432
|
+
|
|
433
|
+
/*
|
|
434
|
+
* Now do the actual writev. Note that we've been updating the pointers
|
|
435
|
+
* inside the iov each time we write. So there is no need to offset it.
|
|
436
|
+
*/
|
|
437
|
+
|
|
438
|
+
if (req->send_handle) {
|
|
439
|
+
struct msghdr msg;
|
|
440
|
+
char scratch[64];
|
|
441
|
+
struct cmsghdr *cmsg;
|
|
442
|
+
int fd_to_send = req->send_handle->fd;
|
|
443
|
+
|
|
444
|
+
assert(fd_to_send >= 0);
|
|
445
|
+
|
|
446
|
+
msg.msg_name = NULL;
|
|
447
|
+
msg.msg_namelen = 0;
|
|
448
|
+
msg.msg_iov = iov;
|
|
449
|
+
msg.msg_iovlen = iovcnt;
|
|
450
|
+
msg.msg_flags = 0;
|
|
451
|
+
|
|
452
|
+
msg.msg_control = (void*) scratch;
|
|
453
|
+
msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send));
|
|
454
|
+
|
|
455
|
+
cmsg = CMSG_FIRSTHDR(&msg);
|
|
456
|
+
cmsg->cmsg_level = SOL_SOCKET;
|
|
457
|
+
cmsg->cmsg_type = SCM_RIGHTS;
|
|
458
|
+
cmsg->cmsg_len = msg.msg_controllen;
|
|
459
|
+
*(int*) CMSG_DATA(cmsg) = fd_to_send;
|
|
460
|
+
|
|
461
|
+
do {
|
|
462
|
+
n = sendmsg(stream->fd, &msg, 0);
|
|
463
|
+
}
|
|
464
|
+
while (n == -1 && errno == EINTR);
|
|
465
|
+
} else {
|
|
466
|
+
do {
|
|
467
|
+
if (iovcnt == 1) {
|
|
468
|
+
n = write(stream->fd, iov[0].iov_base, iov[0].iov_len);
|
|
469
|
+
} else {
|
|
470
|
+
n = writev(stream->fd, iov, iovcnt);
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
while (n == -1 && errno == EINTR);
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
if (n < 0) {
|
|
477
|
+
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
|
478
|
+
/* Error */
|
|
479
|
+
req->error = errno;
|
|
480
|
+
stream->write_queue_size -= uv__write_req_size(req);
|
|
481
|
+
uv__write_req_finish(req);
|
|
482
|
+
return;
|
|
483
|
+
} else if (stream->flags & UV_STREAM_BLOCKING) {
|
|
484
|
+
/* If this is a blocking stream, try again. */
|
|
485
|
+
goto start;
|
|
486
|
+
}
|
|
487
|
+
} else {
|
|
488
|
+
/* Successful write */
|
|
489
|
+
|
|
490
|
+
while (n >= 0) {
|
|
491
|
+
uv_buf_t* buf = &(req->bufs[req->write_index]);
|
|
492
|
+
size_t len = buf->len;
|
|
493
|
+
|
|
494
|
+
assert(req->write_index < req->bufcnt);
|
|
495
|
+
|
|
496
|
+
if ((size_t)n < len) {
|
|
497
|
+
buf->base += n;
|
|
498
|
+
buf->len -= n;
|
|
499
|
+
stream->write_queue_size -= n;
|
|
500
|
+
n = 0;
|
|
501
|
+
|
|
502
|
+
/* There is more to write. */
|
|
503
|
+
if (stream->flags & UV_STREAM_BLOCKING) {
|
|
504
|
+
/*
|
|
505
|
+
* If we're blocking then we should not be enabling the write
|
|
506
|
+
* watcher - instead we need to try again.
|
|
507
|
+
*/
|
|
508
|
+
goto start;
|
|
509
|
+
} else {
|
|
510
|
+
/* Break loop and ensure the watcher is pending. */
|
|
511
|
+
break;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
} else {
|
|
515
|
+
/* Finished writing the buf at index req->write_index. */
|
|
516
|
+
req->write_index++;
|
|
517
|
+
|
|
518
|
+
assert((size_t)n >= len);
|
|
519
|
+
n -= len;
|
|
520
|
+
|
|
521
|
+
assert(stream->write_queue_size >= len);
|
|
522
|
+
stream->write_queue_size -= len;
|
|
523
|
+
|
|
524
|
+
if (req->write_index == req->bufcnt) {
|
|
525
|
+
/* Then we're done! */
|
|
526
|
+
assert(n == 0);
|
|
527
|
+
uv__write_req_finish(req);
|
|
528
|
+
/* TODO: start trying to write the next request. */
|
|
529
|
+
return;
|
|
530
|
+
}
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
/* Either we've counted n down to zero or we've got EAGAIN. */
|
|
536
|
+
assert(n == 0 || n == -1);
|
|
537
|
+
|
|
538
|
+
/* Only non-blocking streams should use the write_watcher. */
|
|
539
|
+
assert(!(stream->flags & UV_STREAM_BLOCKING));
|
|
540
|
+
|
|
541
|
+
/* We're not done. */
|
|
542
|
+
uv__io_start(stream->loop, &stream->write_watcher);
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
static void uv__write_callbacks(uv_stream_t* stream) {
|
|
547
|
+
uv_write_t* req;
|
|
548
|
+
ngx_queue_t* q;
|
|
549
|
+
|
|
550
|
+
while (!ngx_queue_empty(&stream->write_completed_queue)) {
|
|
551
|
+
/* Pop a req off write_completed_queue. */
|
|
552
|
+
q = ngx_queue_head(&stream->write_completed_queue);
|
|
553
|
+
req = ngx_queue_data(q, uv_write_t, queue);
|
|
554
|
+
ngx_queue_remove(q);
|
|
555
|
+
uv__req_unregister(stream->loop, req);
|
|
556
|
+
|
|
557
|
+
/* NOTE: call callback AFTER freeing the request data. */
|
|
558
|
+
if (req->cb) {
|
|
559
|
+
uv__set_sys_error(stream->loop, req->error);
|
|
560
|
+
req->cb(req, req->error ? -1 : 0);
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
assert(ngx_queue_empty(&stream->write_completed_queue));
|
|
565
|
+
|
|
566
|
+
/* Write queue drained. */
|
|
567
|
+
if (!uv_write_queue_head(stream)) {
|
|
568
|
+
uv__drain(stream);
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
static uv_handle_type uv__handle_type(int fd) {
|
|
574
|
+
struct sockaddr_storage ss;
|
|
575
|
+
socklen_t len;
|
|
576
|
+
|
|
577
|
+
memset(&ss, 0, sizeof(ss));
|
|
578
|
+
len = sizeof(ss);
|
|
579
|
+
|
|
580
|
+
if (getsockname(fd, (struct sockaddr*)&ss, &len))
|
|
581
|
+
return UV_UNKNOWN_HANDLE;
|
|
582
|
+
|
|
583
|
+
switch (ss.ss_family) {
|
|
584
|
+
case AF_UNIX:
|
|
585
|
+
return UV_NAMED_PIPE;
|
|
586
|
+
case AF_INET:
|
|
587
|
+
case AF_INET6:
|
|
588
|
+
return UV_TCP;
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
return UV_UNKNOWN_HANDLE;
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
static void uv__read(uv_stream_t* stream) {
|
|
596
|
+
uv_buf_t buf;
|
|
597
|
+
ssize_t nread;
|
|
598
|
+
struct msghdr msg;
|
|
599
|
+
struct cmsghdr* cmsg;
|
|
600
|
+
char cmsg_space[64];
|
|
601
|
+
int count;
|
|
602
|
+
|
|
603
|
+
/* Prevent loop starvation when the data comes in as fast as (or faster than)
|
|
604
|
+
* we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
|
|
605
|
+
*/
|
|
606
|
+
count = 32;
|
|
607
|
+
|
|
608
|
+
/* XXX: Maybe instead of having UV_STREAM_READING we just test if
|
|
609
|
+
* tcp->read_cb is NULL or not?
|
|
610
|
+
*/
|
|
611
|
+
while ((stream->read_cb || stream->read2_cb)
|
|
612
|
+
&& (stream->flags & UV_STREAM_READING)
|
|
613
|
+
&& (count-- > 0)) {
|
|
614
|
+
assert(stream->alloc_cb);
|
|
615
|
+
buf = stream->alloc_cb((uv_handle_t*)stream, 64 * 1024);
|
|
616
|
+
|
|
617
|
+
assert(buf.len > 0);
|
|
618
|
+
assert(buf.base);
|
|
619
|
+
assert(stream->fd >= 0);
|
|
620
|
+
|
|
621
|
+
if (stream->read_cb) {
|
|
622
|
+
do {
|
|
623
|
+
nread = read(stream->fd, buf.base, buf.len);
|
|
624
|
+
}
|
|
625
|
+
while (nread < 0 && errno == EINTR);
|
|
626
|
+
} else {
|
|
627
|
+
assert(stream->read2_cb);
|
|
628
|
+
/* read2_cb uses recvmsg */
|
|
629
|
+
msg.msg_flags = 0;
|
|
630
|
+
msg.msg_iov = (struct iovec*) &buf;
|
|
631
|
+
msg.msg_iovlen = 1;
|
|
632
|
+
msg.msg_name = NULL;
|
|
633
|
+
msg.msg_namelen = 0;
|
|
634
|
+
/* Set up to receive a descriptor even if one isn't in the message */
|
|
635
|
+
msg.msg_controllen = 64;
|
|
636
|
+
msg.msg_control = (void *) cmsg_space;
|
|
637
|
+
|
|
638
|
+
do {
|
|
639
|
+
nread = recvmsg(stream->fd, &msg, 0);
|
|
640
|
+
}
|
|
641
|
+
while (nread < 0 && errno == EINTR);
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
if (nread < 0) {
|
|
646
|
+
/* Error */
|
|
647
|
+
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
|
648
|
+
/* Wait for the next one. */
|
|
649
|
+
if (stream->flags & UV_STREAM_READING) {
|
|
650
|
+
uv__io_start(stream->loop, &stream->read_watcher);
|
|
651
|
+
}
|
|
652
|
+
uv__set_sys_error(stream->loop, EAGAIN);
|
|
653
|
+
|
|
654
|
+
if (stream->read_cb) {
|
|
655
|
+
stream->read_cb(stream, 0, buf);
|
|
656
|
+
} else {
|
|
657
|
+
stream->read2_cb((uv_pipe_t*)stream, 0, buf, UV_UNKNOWN_HANDLE);
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
return;
|
|
661
|
+
} else {
|
|
662
|
+
/* Error. User should call uv_close(). */
|
|
663
|
+
uv__set_sys_error(stream->loop, errno);
|
|
664
|
+
|
|
665
|
+
if (stream->read_cb) {
|
|
666
|
+
stream->read_cb(stream, -1, buf);
|
|
667
|
+
} else {
|
|
668
|
+
stream->read2_cb((uv_pipe_t*)stream, -1, buf, UV_UNKNOWN_HANDLE);
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
assert(!uv__io_active(&stream->read_watcher));
|
|
672
|
+
return;
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
} else if (nread == 0) {
|
|
676
|
+
/* EOF */
|
|
677
|
+
uv__set_artificial_error(stream->loop, UV_EOF);
|
|
678
|
+
uv__io_stop(stream->loop, &stream->read_watcher);
|
|
679
|
+
if (!uv__io_active(&stream->write_watcher))
|
|
680
|
+
uv__handle_stop(stream);
|
|
681
|
+
|
|
682
|
+
if (stream->read_cb) {
|
|
683
|
+
stream->read_cb(stream, -1, buf);
|
|
684
|
+
} else {
|
|
685
|
+
stream->read2_cb((uv_pipe_t*)stream, -1, buf, UV_UNKNOWN_HANDLE);
|
|
686
|
+
}
|
|
687
|
+
return;
|
|
688
|
+
} else {
|
|
689
|
+
/* Successful read */
|
|
690
|
+
ssize_t buflen = buf.len;
|
|
691
|
+
|
|
692
|
+
if (stream->read_cb) {
|
|
693
|
+
stream->read_cb(stream, nread, buf);
|
|
694
|
+
} else {
|
|
695
|
+
assert(stream->read2_cb);
|
|
696
|
+
|
|
697
|
+
/*
|
|
698
|
+
* XXX: Some implementations can send multiple file descriptors in a
|
|
699
|
+
* single message. We should be using CMSG_NXTHDR() to walk the
|
|
700
|
+
* chain to get at them all. This would require changing the API to
|
|
701
|
+
* hand these back up the caller, is a pain.
|
|
702
|
+
*/
|
|
703
|
+
|
|
704
|
+
for (cmsg = CMSG_FIRSTHDR(&msg);
|
|
705
|
+
msg.msg_controllen > 0 && cmsg != NULL;
|
|
706
|
+
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
|
|
707
|
+
|
|
708
|
+
if (cmsg->cmsg_type == SCM_RIGHTS) {
|
|
709
|
+
if (stream->accepted_fd != -1) {
|
|
710
|
+
fprintf(stderr, "(libuv) ignoring extra FD received\n");
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
stream->accepted_fd = *(int *) CMSG_DATA(cmsg);
|
|
714
|
+
|
|
715
|
+
} else {
|
|
716
|
+
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
|
|
717
|
+
cmsg->cmsg_type);
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
if (stream->accepted_fd >= 0) {
|
|
723
|
+
stream->read2_cb((uv_pipe_t*)stream, nread, buf,
|
|
724
|
+
uv__handle_type(stream->accepted_fd));
|
|
725
|
+
} else {
|
|
726
|
+
stream->read2_cb((uv_pipe_t*)stream, nread, buf, UV_UNKNOWN_HANDLE);
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/* Return if we didn't fill the buffer, there is no more data to read. */
|
|
731
|
+
if (nread < buflen) {
|
|
732
|
+
return;
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
|
740
|
+
assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) &&
|
|
741
|
+
"uv_shutdown (unix) only supports uv_handle_t right now");
|
|
742
|
+
assert(stream->fd >= 0);
|
|
743
|
+
|
|
744
|
+
if (!(stream->flags & UV_STREAM_WRITABLE) ||
|
|
745
|
+
stream->flags & UV_STREAM_SHUT ||
|
|
746
|
+
stream->flags & UV_CLOSED ||
|
|
747
|
+
stream->flags & UV_CLOSING) {
|
|
748
|
+
uv__set_artificial_error(stream->loop, UV_ENOTCONN);
|
|
749
|
+
return -1;
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
/* Initialize request */
|
|
753
|
+
uv__req_init(stream->loop, req, UV_SHUTDOWN);
|
|
754
|
+
req->handle = stream;
|
|
755
|
+
req->cb = cb;
|
|
756
|
+
stream->shutdown_req = req;
|
|
757
|
+
stream->flags |= UV_STREAM_SHUTTING;
|
|
758
|
+
|
|
759
|
+
uv__io_start(stream->loop, &stream->write_watcher);
|
|
760
|
+
|
|
761
|
+
return 0;
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, int events) {
|
|
766
|
+
uv_stream_t* stream;
|
|
767
|
+
|
|
768
|
+
/* either UV__IO_READ or UV__IO_WRITE but not both */
|
|
769
|
+
assert(!!(events & UV__IO_READ) ^ !!(events & UV__IO_WRITE));
|
|
770
|
+
|
|
771
|
+
if (events & UV__IO_READ)
|
|
772
|
+
stream = container_of(w, uv_stream_t, read_watcher);
|
|
773
|
+
else
|
|
774
|
+
stream = container_of(w, uv_stream_t, write_watcher);
|
|
775
|
+
|
|
776
|
+
assert(stream->type == UV_TCP ||
|
|
777
|
+
stream->type == UV_NAMED_PIPE ||
|
|
778
|
+
stream->type == UV_TTY);
|
|
779
|
+
assert(!(stream->flags & UV_CLOSING));
|
|
780
|
+
|
|
781
|
+
if (stream->connect_req)
|
|
782
|
+
uv__stream_connect(stream);
|
|
783
|
+
else if (events & UV__IO_READ) {
|
|
784
|
+
assert(stream->fd >= 0);
|
|
785
|
+
uv__read(stream);
|
|
786
|
+
}
|
|
787
|
+
else {
|
|
788
|
+
assert(stream->fd >= 0);
|
|
789
|
+
uv__write(stream);
|
|
790
|
+
uv__write_callbacks(stream);
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
/**
|
|
796
|
+
* We get called here from directly following a call to connect(2).
|
|
797
|
+
* In order to determine if we've errored out or succeeded must call
|
|
798
|
+
* getsockopt.
|
|
799
|
+
*/
|
|
800
|
+
static void uv__stream_connect(uv_stream_t* stream) {
|
|
801
|
+
int error;
|
|
802
|
+
uv_connect_t* req = stream->connect_req;
|
|
803
|
+
socklen_t errorsize = sizeof(int);
|
|
804
|
+
|
|
805
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
|
|
806
|
+
assert(req);
|
|
807
|
+
|
|
808
|
+
if (stream->delayed_error) {
|
|
809
|
+
/* To smooth over the differences between unixes errors that
|
|
810
|
+
* were reported synchronously on the first connect can be delayed
|
|
811
|
+
* until the next tick--which is now.
|
|
812
|
+
*/
|
|
813
|
+
error = stream->delayed_error;
|
|
814
|
+
stream->delayed_error = 0;
|
|
815
|
+
} else {
|
|
816
|
+
/* Normal situation: we need to get the socket error from the kernel. */
|
|
817
|
+
assert(stream->fd >= 0);
|
|
818
|
+
getsockopt(stream->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize);
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
if (error == EINPROGRESS)
|
|
822
|
+
return;
|
|
823
|
+
|
|
824
|
+
stream->connect_req = NULL;
|
|
825
|
+
uv__req_unregister(stream->loop, req);
|
|
826
|
+
|
|
827
|
+
if (req->cb) {
|
|
828
|
+
uv__set_sys_error(stream->loop, error);
|
|
829
|
+
req->cb(req, error ? -1 : 0);
|
|
830
|
+
}
|
|
831
|
+
}
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
int uv_write2(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt,
|
|
835
|
+
uv_stream_t* send_handle, uv_write_cb cb) {
|
|
836
|
+
int empty_queue;
|
|
837
|
+
|
|
838
|
+
assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
|
|
839
|
+
stream->type == UV_TTY) &&
|
|
840
|
+
"uv_write (unix) does not yet support other types of streams");
|
|
841
|
+
|
|
842
|
+
if (stream->fd < 0) {
|
|
843
|
+
uv__set_sys_error(stream->loop, EBADF);
|
|
844
|
+
return -1;
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
if (send_handle) {
|
|
848
|
+
if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) {
|
|
849
|
+
uv__set_sys_error(stream->loop, EOPNOTSUPP);
|
|
850
|
+
return -1;
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
empty_queue = (stream->write_queue_size == 0);
|
|
855
|
+
|
|
856
|
+
/* Initialize the req */
|
|
857
|
+
uv__req_init(stream->loop, req, UV_WRITE);
|
|
858
|
+
req->cb = cb;
|
|
859
|
+
req->handle = stream;
|
|
860
|
+
req->error = 0;
|
|
861
|
+
req->send_handle = send_handle;
|
|
862
|
+
ngx_queue_init(&req->queue);
|
|
863
|
+
|
|
864
|
+
if (bufcnt <= UV_REQ_BUFSML_SIZE)
|
|
865
|
+
req->bufs = req->bufsml;
|
|
866
|
+
else
|
|
867
|
+
req->bufs = malloc(sizeof(uv_buf_t) * bufcnt);
|
|
868
|
+
|
|
869
|
+
memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t));
|
|
870
|
+
req->bufcnt = bufcnt;
|
|
871
|
+
req->write_index = 0;
|
|
872
|
+
stream->write_queue_size += uv__buf_count(bufs, bufcnt);
|
|
873
|
+
|
|
874
|
+
/* Append the request to write_queue. */
|
|
875
|
+
ngx_queue_insert_tail(&stream->write_queue, &req->queue);
|
|
876
|
+
|
|
877
|
+
/* If the queue was empty when this function began, we should attempt to
|
|
878
|
+
* do the write immediately. Otherwise start the write_watcher and wait
|
|
879
|
+
* for the fd to become writable.
|
|
880
|
+
*/
|
|
881
|
+
if (stream->connect_req) {
|
|
882
|
+
/* Still connecting, do nothing. */
|
|
883
|
+
}
|
|
884
|
+
else if (empty_queue) {
|
|
885
|
+
uv__write(stream);
|
|
886
|
+
}
|
|
887
|
+
else {
|
|
888
|
+
/*
|
|
889
|
+
* blocking streams should never have anything in the queue.
|
|
890
|
+
* if this assert fires then somehow the blocking stream isn't being
|
|
891
|
+
* sufficiently flushed in uv__write.
|
|
892
|
+
*/
|
|
893
|
+
assert(!(stream->flags & UV_STREAM_BLOCKING));
|
|
894
|
+
uv__io_start(stream->loop, &stream->write_watcher);
|
|
895
|
+
}
|
|
896
|
+
|
|
897
|
+
return 0;
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
/* The buffers to be written must remain valid until the callback is called.
|
|
902
|
+
* This is not required for the uv_buf_t array.
|
|
903
|
+
*/
|
|
904
|
+
int uv_write(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt,
|
|
905
|
+
uv_write_cb cb) {
|
|
906
|
+
return uv_write2(req, stream, bufs, bufcnt, NULL, cb);
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
int uv__read_start_common(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
|
911
|
+
uv_read_cb read_cb, uv_read2_cb read2_cb) {
|
|
912
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
|
|
913
|
+
stream->type == UV_TTY);
|
|
914
|
+
|
|
915
|
+
if (stream->flags & UV_CLOSING) {
|
|
916
|
+
uv__set_sys_error(stream->loop, EINVAL);
|
|
917
|
+
return -1;
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
/* The UV_STREAM_READING flag is irrelevant of the state of the tcp - it just
|
|
921
|
+
* expresses the desired state of the user.
|
|
922
|
+
*/
|
|
923
|
+
stream->flags |= UV_STREAM_READING;
|
|
924
|
+
|
|
925
|
+
/* TODO: try to do the read inline? */
|
|
926
|
+
/* TODO: keep track of tcp state. If we've gotten a EOF then we should
|
|
927
|
+
* not start the IO watcher.
|
|
928
|
+
*/
|
|
929
|
+
assert(stream->fd >= 0);
|
|
930
|
+
assert(alloc_cb);
|
|
931
|
+
|
|
932
|
+
stream->read_cb = read_cb;
|
|
933
|
+
stream->read2_cb = read2_cb;
|
|
934
|
+
stream->alloc_cb = alloc_cb;
|
|
935
|
+
|
|
936
|
+
uv__io_start(stream->loop, &stream->read_watcher);
|
|
937
|
+
uv__handle_start(stream);
|
|
938
|
+
|
|
939
|
+
return 0;
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
|
|
943
|
+
int uv_read_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
|
944
|
+
uv_read_cb read_cb) {
|
|
945
|
+
return uv__read_start_common(stream, alloc_cb, read_cb, NULL);
|
|
946
|
+
}
|
|
947
|
+
|
|
948
|
+
|
|
949
|
+
int uv_read2_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
|
950
|
+
uv_read2_cb read_cb) {
|
|
951
|
+
return uv__read_start_common(stream, alloc_cb, NULL, read_cb);
|
|
952
|
+
}
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
int uv_read_stop(uv_stream_t* stream) {
|
|
956
|
+
uv__io_stop(stream->loop, &stream->read_watcher);
|
|
957
|
+
uv__handle_stop(stream);
|
|
958
|
+
stream->flags &= ~UV_STREAM_READING;
|
|
959
|
+
stream->read_cb = NULL;
|
|
960
|
+
stream->read2_cb = NULL;
|
|
961
|
+
stream->alloc_cb = NULL;
|
|
962
|
+
return 0;
|
|
963
|
+
}
|
|
964
|
+
|
|
965
|
+
|
|
966
|
+
int uv_is_readable(const uv_stream_t* stream) {
|
|
967
|
+
return stream->flags & UV_STREAM_READABLE;
|
|
968
|
+
}
|
|
969
|
+
|
|
970
|
+
|
|
971
|
+
int uv_is_writable(const uv_stream_t* stream) {
|
|
972
|
+
return stream->flags & UV_STREAM_WRITABLE;
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
|
|
976
|
+
void uv__stream_close(uv_stream_t* handle) {
|
|
977
|
+
uv_read_stop(handle);
|
|
978
|
+
uv__io_stop(handle->loop, &handle->write_watcher);
|
|
979
|
+
|
|
980
|
+
close(handle->fd);
|
|
981
|
+
handle->fd = -1;
|
|
982
|
+
|
|
983
|
+
if (handle->accepted_fd >= 0) {
|
|
984
|
+
close(handle->accepted_fd);
|
|
985
|
+
handle->accepted_fd = -1;
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
assert(!uv__io_active(&handle->read_watcher));
|
|
989
|
+
assert(!uv__io_active(&handle->write_watcher));
|
|
990
|
+
}
|