rbuv 0.0.1 → 0.0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +5 -0
- data/.rspec +2 -0
- data/.travis.yml +5 -0
- data/Gemfile +0 -1
- data/README.md +6 -1
- data/Rakefile +42 -0
- data/deps/libuv/.gitignore +34 -0
- data/deps/libuv/.mailmap +16 -0
- data/deps/libuv/AUTHORS +81 -0
- data/deps/libuv/ChangeLog +45 -0
- data/deps/libuv/LICENSE +41 -0
- data/deps/libuv/Makefile +53 -0
- data/deps/libuv/README.md +118 -0
- data/deps/libuv/build.mk +164 -0
- data/deps/libuv/checksparse.sh +230 -0
- data/deps/libuv/common.gypi +197 -0
- data/deps/libuv/config-mingw.mk +48 -0
- data/deps/libuv/config-unix.mk +167 -0
- data/deps/libuv/gyp_uv +98 -0
- data/deps/libuv/include/uv-private/ngx-queue.h +129 -0
- data/deps/libuv/include/uv-private/stdint-msvc2008.h +247 -0
- data/deps/libuv/include/uv-private/tree.h +768 -0
- data/deps/libuv/include/uv-private/uv-bsd.h +34 -0
- data/deps/libuv/include/uv-private/uv-darwin.h +61 -0
- data/deps/libuv/include/uv-private/uv-linux.h +34 -0
- data/deps/libuv/include/uv-private/uv-sunos.h +44 -0
- data/deps/libuv/include/uv-private/uv-unix.h +332 -0
- data/deps/libuv/include/uv-private/uv-win.h +585 -0
- data/deps/libuv/include/uv.h +1987 -0
- data/deps/libuv/src/fs-poll.c +248 -0
- data/deps/libuv/src/inet.c +298 -0
- data/deps/libuv/src/unix/aix.c +393 -0
- data/deps/libuv/src/unix/async.c +281 -0
- data/deps/libuv/src/unix/core.c +714 -0
- data/deps/libuv/src/unix/cygwin.c +93 -0
- data/deps/libuv/src/unix/darwin-proctitle.m +78 -0
- data/deps/libuv/src/unix/darwin.c +431 -0
- data/deps/libuv/src/unix/dl.c +83 -0
- data/deps/libuv/src/unix/error.c +109 -0
- data/deps/libuv/src/unix/freebsd.c +343 -0
- data/deps/libuv/src/unix/fs.c +869 -0
- data/deps/libuv/src/unix/fsevents.c +299 -0
- data/deps/libuv/src/unix/getaddrinfo.c +159 -0
- data/deps/libuv/src/unix/internal.h +259 -0
- data/deps/libuv/src/unix/kqueue.c +347 -0
- data/deps/libuv/src/unix/linux-core.c +724 -0
- data/deps/libuv/src/unix/linux-inotify.c +236 -0
- data/deps/libuv/src/unix/linux-syscalls.c +388 -0
- data/deps/libuv/src/unix/linux-syscalls.h +150 -0
- data/deps/libuv/src/unix/loop-watcher.c +64 -0
- data/deps/libuv/src/unix/loop.c +114 -0
- data/deps/libuv/src/unix/netbsd.c +353 -0
- data/deps/libuv/src/unix/openbsd.c +304 -0
- data/deps/libuv/src/unix/pipe.c +261 -0
- data/deps/libuv/src/unix/poll.c +108 -0
- data/deps/libuv/src/unix/process.c +501 -0
- data/deps/libuv/src/unix/proctitle.c +103 -0
- data/deps/libuv/src/unix/signal.c +455 -0
- data/deps/libuv/src/unix/stream.c +1380 -0
- data/deps/libuv/src/unix/sunos.c +647 -0
- data/deps/libuv/src/unix/tcp.c +357 -0
- data/deps/libuv/src/unix/thread.c +431 -0
- data/deps/libuv/src/unix/threadpool.c +286 -0
- data/deps/libuv/src/unix/timer.c +153 -0
- data/deps/libuv/src/unix/tty.c +179 -0
- data/deps/libuv/src/unix/udp.c +715 -0
- data/deps/libuv/src/uv-common.c +431 -0
- data/deps/libuv/src/uv-common.h +204 -0
- data/deps/libuv/src/version.c +60 -0
- data/deps/libuv/src/win/async.c +99 -0
- data/deps/libuv/src/win/atomicops-inl.h +56 -0
- data/deps/libuv/src/win/core.c +310 -0
- data/deps/libuv/src/win/dl.c +86 -0
- data/deps/libuv/src/win/error.c +164 -0
- data/deps/libuv/src/win/fs-event.c +506 -0
- data/deps/libuv/src/win/fs.c +1951 -0
- data/deps/libuv/src/win/getaddrinfo.c +365 -0
- data/deps/libuv/src/win/handle-inl.h +164 -0
- data/deps/libuv/src/win/handle.c +153 -0
- data/deps/libuv/src/win/internal.h +346 -0
- data/deps/libuv/src/win/loop-watcher.c +124 -0
- data/deps/libuv/src/win/pipe.c +1656 -0
- data/deps/libuv/src/win/poll.c +615 -0
- data/deps/libuv/src/win/process-stdio.c +503 -0
- data/deps/libuv/src/win/process.c +1048 -0
- data/deps/libuv/src/win/req-inl.h +224 -0
- data/deps/libuv/src/win/req.c +25 -0
- data/deps/libuv/src/win/signal.c +354 -0
- data/deps/libuv/src/win/stream-inl.h +67 -0
- data/deps/libuv/src/win/stream.c +198 -0
- data/deps/libuv/src/win/tcp.c +1422 -0
- data/deps/libuv/src/win/thread.c +666 -0
- data/deps/libuv/src/win/threadpool.c +82 -0
- data/deps/libuv/src/win/timer.c +230 -0
- data/deps/libuv/src/win/tty.c +1857 -0
- data/deps/libuv/src/win/udp.c +744 -0
- data/deps/libuv/src/win/util.c +946 -0
- data/deps/libuv/src/win/winapi.c +152 -0
- data/deps/libuv/src/win/winapi.h +4476 -0
- data/deps/libuv/src/win/winsock.c +560 -0
- data/deps/libuv/src/win/winsock.h +171 -0
- data/deps/libuv/test/benchmark-async-pummel.c +119 -0
- data/deps/libuv/test/benchmark-async.c +139 -0
- data/deps/libuv/test/benchmark-fs-stat.c +136 -0
- data/deps/libuv/test/benchmark-getaddrinfo.c +91 -0
- data/deps/libuv/test/benchmark-list.h +163 -0
- data/deps/libuv/test/benchmark-loop-count.c +90 -0
- data/deps/libuv/test/benchmark-million-async.c +112 -0
- data/deps/libuv/test/benchmark-million-timers.c +77 -0
- data/deps/libuv/test/benchmark-multi-accept.c +432 -0
- data/deps/libuv/test/benchmark-ping-pongs.c +212 -0
- data/deps/libuv/test/benchmark-pound.c +325 -0
- data/deps/libuv/test/benchmark-pump.c +459 -0
- data/deps/libuv/test/benchmark-sizes.c +45 -0
- data/deps/libuv/test/benchmark-spawn.c +163 -0
- data/deps/libuv/test/benchmark-tcp-write-batch.c +141 -0
- data/deps/libuv/test/benchmark-thread.c +64 -0
- data/deps/libuv/test/benchmark-udp-pummel.c +238 -0
- data/deps/libuv/test/blackhole-server.c +118 -0
- data/deps/libuv/test/dns-server.c +329 -0
- data/deps/libuv/test/echo-server.c +384 -0
- data/deps/libuv/test/fixtures/empty_file +0 -0
- data/deps/libuv/test/fixtures/load_error.node +1 -0
- data/deps/libuv/test/run-benchmarks.c +64 -0
- data/deps/libuv/test/run-tests.c +159 -0
- data/deps/libuv/test/runner-unix.c +328 -0
- data/deps/libuv/test/runner-unix.h +36 -0
- data/deps/libuv/test/runner-win.c +318 -0
- data/deps/libuv/test/runner-win.h +43 -0
- data/deps/libuv/test/runner.c +394 -0
- data/deps/libuv/test/runner.h +165 -0
- data/deps/libuv/test/task.h +122 -0
- data/deps/libuv/test/test-active.c +83 -0
- data/deps/libuv/test/test-async.c +136 -0
- data/deps/libuv/test/test-barrier.c +98 -0
- data/deps/libuv/test/test-callback-order.c +77 -0
- data/deps/libuv/test/test-callback-stack.c +204 -0
- data/deps/libuv/test/test-condvar.c +173 -0
- data/deps/libuv/test/test-connection-fail.c +150 -0
- data/deps/libuv/test/test-cwd-and-chdir.c +64 -0
- data/deps/libuv/test/test-delayed-accept.c +189 -0
- data/deps/libuv/test/test-dlerror.c +58 -0
- data/deps/libuv/test/test-embed.c +136 -0
- data/deps/libuv/test/test-error.c +59 -0
- data/deps/libuv/test/test-fail-always.c +29 -0
- data/deps/libuv/test/test-fs-event.c +504 -0
- data/deps/libuv/test/test-fs-poll.c +148 -0
- data/deps/libuv/test/test-fs.c +1899 -0
- data/deps/libuv/test/test-get-currentexe.c +63 -0
- data/deps/libuv/test/test-get-loadavg.c +36 -0
- data/deps/libuv/test/test-get-memory.c +38 -0
- data/deps/libuv/test/test-getaddrinfo.c +120 -0
- data/deps/libuv/test/test-getsockname.c +344 -0
- data/deps/libuv/test/test-hrtime.c +54 -0
- data/deps/libuv/test/test-idle.c +82 -0
- data/deps/libuv/test/test-ipc-send-recv.c +218 -0
- data/deps/libuv/test/test-ipc.c +625 -0
- data/deps/libuv/test/test-list.h +492 -0
- data/deps/libuv/test/test-loop-handles.c +337 -0
- data/deps/libuv/test/test-loop-stop.c +73 -0
- data/deps/libuv/test/test-multiple-listen.c +103 -0
- data/deps/libuv/test/test-mutexes.c +63 -0
- data/deps/libuv/test/test-pass-always.c +28 -0
- data/deps/libuv/test/test-ping-pong.c +250 -0
- data/deps/libuv/test/test-pipe-bind-error.c +144 -0
- data/deps/libuv/test/test-pipe-connect-error.c +98 -0
- data/deps/libuv/test/test-platform-output.c +87 -0
- data/deps/libuv/test/test-poll-close.c +73 -0
- data/deps/libuv/test/test-poll.c +575 -0
- data/deps/libuv/test/test-process-title.c +49 -0
- data/deps/libuv/test/test-ref.c +415 -0
- data/deps/libuv/test/test-run-nowait.c +46 -0
- data/deps/libuv/test/test-run-once.c +49 -0
- data/deps/libuv/test/test-semaphore.c +111 -0
- data/deps/libuv/test/test-shutdown-close.c +105 -0
- data/deps/libuv/test/test-shutdown-eof.c +184 -0
- data/deps/libuv/test/test-signal-multiple-loops.c +270 -0
- data/deps/libuv/test/test-signal.c +152 -0
- data/deps/libuv/test/test-spawn.c +938 -0
- data/deps/libuv/test/test-stdio-over-pipes.c +250 -0
- data/deps/libuv/test/test-tcp-bind-error.c +198 -0
- data/deps/libuv/test/test-tcp-bind6-error.c +159 -0
- data/deps/libuv/test/test-tcp-close-while-connecting.c +81 -0
- data/deps/libuv/test/test-tcp-close.c +130 -0
- data/deps/libuv/test/test-tcp-connect-error-after-write.c +96 -0
- data/deps/libuv/test/test-tcp-connect-error.c +71 -0
- data/deps/libuv/test/test-tcp-connect-timeout.c +86 -0
- data/deps/libuv/test/test-tcp-connect6-error.c +69 -0
- data/deps/libuv/test/test-tcp-flags.c +52 -0
- data/deps/libuv/test/test-tcp-open.c +175 -0
- data/deps/libuv/test/test-tcp-read-stop.c +73 -0
- data/deps/libuv/test/test-tcp-shutdown-after-write.c +132 -0
- data/deps/libuv/test/test-tcp-unexpected-read.c +114 -0
- data/deps/libuv/test/test-tcp-write-to-half-open-connection.c +136 -0
- data/deps/libuv/test/test-tcp-writealot.c +171 -0
- data/deps/libuv/test/test-thread.c +183 -0
- data/deps/libuv/test/test-threadpool-cancel.c +311 -0
- data/deps/libuv/test/test-threadpool.c +77 -0
- data/deps/libuv/test/test-timer-again.c +142 -0
- data/deps/libuv/test/test-timer.c +266 -0
- data/deps/libuv/test/test-tty.c +111 -0
- data/deps/libuv/test/test-udp-dgram-too-big.c +87 -0
- data/deps/libuv/test/test-udp-ipv6.c +158 -0
- data/deps/libuv/test/test-udp-multicast-join.c +140 -0
- data/deps/libuv/test/test-udp-multicast-ttl.c +87 -0
- data/deps/libuv/test/test-udp-open.c +154 -0
- data/deps/libuv/test/test-udp-options.c +87 -0
- data/deps/libuv/test/test-udp-send-and-recv.c +210 -0
- data/deps/libuv/test/test-util.c +97 -0
- data/deps/libuv/test/test-walk-handles.c +78 -0
- data/deps/libuv/uv.gyp +431 -0
- data/deps/libuv/vcbuild.bat +128 -0
- data/ext/rbuv/debug.h +27 -0
- data/ext/rbuv/error.c +7 -0
- data/ext/rbuv/error.h +10 -0
- data/ext/rbuv/extconf.rb +35 -0
- data/ext/rbuv/handle.c +40 -0
- data/ext/rbuv/handle.h +14 -0
- data/ext/rbuv/libuv.mk +12 -0
- data/ext/rbuv/loop.c +50 -0
- data/ext/rbuv/loop.h +13 -0
- data/ext/rbuv/rbuv.c +15 -0
- data/ext/rbuv/rbuv.h +27 -0
- data/ext/rbuv/timer.c +133 -0
- data/ext/rbuv/timer.h +13 -0
- data/lib/rbuv/timer.rb +7 -0
- data/lib/rbuv/version.rb +1 -1
- data/lib/rbuv.rb +24 -2
- data/rbuv.gemspec +5 -1
- data/spec/spec_helper.rb +22 -0
- data/spec/timer_spec.rb +144 -0
- metadata +278 -9
@@ -0,0 +1,432 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
* of this software and associated documentation files (the "Software"), to
|
5
|
+
* deal in the Software without restriction, including without limitation the
|
6
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
7
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
8
|
+
* furnished to do so, subject to the following conditions:
|
9
|
+
*
|
10
|
+
* The above copyright notice and this permission notice shall be included in
|
11
|
+
* all copies or substantial portions of the Software.
|
12
|
+
*
|
13
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
18
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
19
|
+
* IN THE SOFTWARE.
|
20
|
+
*/
|
21
|
+
|
22
|
+
#include "task.h"
|
23
|
+
#include "uv.h"
|
24
|
+
|
25
|
+
#define IPC_PIPE_NAME TEST_PIPENAME
|
26
|
+
#define NUM_CONNECTS (250 * 1000)
|
27
|
+
|
28
|
+
union stream_handle {
|
29
|
+
uv_pipe_t pipe;
|
30
|
+
uv_tcp_t tcp;
|
31
|
+
};
|
32
|
+
|
33
|
+
/* Use as (uv_stream_t *) &handle_storage -- it's kind of clunky but it
|
34
|
+
* avoids aliasing warnings.
|
35
|
+
*/
|
36
|
+
typedef unsigned char handle_storage_t[sizeof(union stream_handle)];
|
37
|
+
|
38
|
+
/* Used for passing around the listen handle, not part of the benchmark proper.
|
39
|
+
* We have an overabundance of server types here. It works like this:
|
40
|
+
*
|
41
|
+
* 1. The main thread starts an IPC pipe server.
|
42
|
+
* 2. The worker threads connect to the IPC server and obtain a listen handle.
|
43
|
+
* 3. The worker threads start accepting requests on the listen handle.
|
44
|
+
* 4. The main thread starts connecting repeatedly.
|
45
|
+
*
|
46
|
+
* Step #4 should perhaps be farmed out over several threads.
|
47
|
+
*/
|
48
|
+
struct ipc_server_ctx {
|
49
|
+
handle_storage_t server_handle;
|
50
|
+
unsigned int num_connects;
|
51
|
+
uv_pipe_t ipc_pipe;
|
52
|
+
};
|
53
|
+
|
54
|
+
struct ipc_peer_ctx {
|
55
|
+
handle_storage_t peer_handle;
|
56
|
+
uv_write_t write_req;
|
57
|
+
};
|
58
|
+
|
59
|
+
struct ipc_client_ctx {
|
60
|
+
uv_connect_t connect_req;
|
61
|
+
uv_stream_t* server_handle;
|
62
|
+
uv_pipe_t ipc_pipe;
|
63
|
+
char scratch[16];
|
64
|
+
};
|
65
|
+
|
66
|
+
/* Used in the actual benchmark. */
|
67
|
+
struct server_ctx {
|
68
|
+
handle_storage_t server_handle;
|
69
|
+
unsigned int num_connects;
|
70
|
+
uv_async_t async_handle;
|
71
|
+
uv_thread_t thread_id;
|
72
|
+
uv_sem_t semaphore;
|
73
|
+
};
|
74
|
+
|
75
|
+
struct client_ctx {
|
76
|
+
handle_storage_t client_handle;
|
77
|
+
unsigned int num_connects;
|
78
|
+
uv_connect_t connect_req;
|
79
|
+
uv_idle_t idle_handle;
|
80
|
+
};
|
81
|
+
|
82
|
+
static void ipc_connection_cb(uv_stream_t* ipc_pipe, int status);
|
83
|
+
static void ipc_write_cb(uv_write_t* req, int status);
|
84
|
+
static void ipc_close_cb(uv_handle_t* handle);
|
85
|
+
static void ipc_connect_cb(uv_connect_t* req, int status);
|
86
|
+
static void ipc_read2_cb(uv_pipe_t* ipc_pipe,
|
87
|
+
ssize_t nread,
|
88
|
+
uv_buf_t buf,
|
89
|
+
uv_handle_type type);
|
90
|
+
static uv_buf_t ipc_alloc_cb(uv_handle_t* handle, size_t suggested_size);
|
91
|
+
|
92
|
+
static void sv_async_cb(uv_async_t* handle, int status);
|
93
|
+
static void sv_connection_cb(uv_stream_t* server_handle, int status);
|
94
|
+
static void sv_read_cb(uv_stream_t* handle, ssize_t nread, uv_buf_t buf);
|
95
|
+
static uv_buf_t sv_alloc_cb(uv_handle_t* handle, size_t suggested_size);
|
96
|
+
|
97
|
+
static void cl_connect_cb(uv_connect_t* req, int status);
|
98
|
+
static void cl_idle_cb(uv_idle_t* handle, int status);
|
99
|
+
static void cl_close_cb(uv_handle_t* handle);
|
100
|
+
|
101
|
+
static struct sockaddr_in listen_addr;
|
102
|
+
|
103
|
+
|
104
|
+
static void ipc_connection_cb(uv_stream_t* ipc_pipe, int status) {
|
105
|
+
struct ipc_server_ctx* sc;
|
106
|
+
struct ipc_peer_ctx* pc;
|
107
|
+
uv_loop_t* loop;
|
108
|
+
uv_buf_t buf;
|
109
|
+
|
110
|
+
loop = ipc_pipe->loop;
|
111
|
+
buf = uv_buf_init("PING", 4);
|
112
|
+
sc = container_of(ipc_pipe, struct ipc_server_ctx, ipc_pipe);
|
113
|
+
pc = calloc(1, sizeof(*pc));
|
114
|
+
ASSERT(pc != NULL);
|
115
|
+
|
116
|
+
if (ipc_pipe->type == UV_TCP)
|
117
|
+
ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) &pc->peer_handle));
|
118
|
+
else if (ipc_pipe->type == UV_NAMED_PIPE)
|
119
|
+
ASSERT(0 == uv_pipe_init(loop, (uv_pipe_t*) &pc->peer_handle, 1));
|
120
|
+
else
|
121
|
+
ASSERT(0);
|
122
|
+
|
123
|
+
ASSERT(0 == uv_accept(ipc_pipe, (uv_stream_t*) &pc->peer_handle));
|
124
|
+
ASSERT(0 == uv_write2(&pc->write_req,
|
125
|
+
(uv_stream_t*) &pc->peer_handle,
|
126
|
+
&buf,
|
127
|
+
1,
|
128
|
+
(uv_stream_t*) &sc->server_handle,
|
129
|
+
ipc_write_cb));
|
130
|
+
|
131
|
+
if (--sc->num_connects == 0)
|
132
|
+
uv_close((uv_handle_t*) ipc_pipe, NULL);
|
133
|
+
}
|
134
|
+
|
135
|
+
|
136
|
+
static void ipc_write_cb(uv_write_t* req, int status) {
|
137
|
+
struct ipc_peer_ctx* ctx;
|
138
|
+
ctx = container_of(req, struct ipc_peer_ctx, write_req);
|
139
|
+
uv_close((uv_handle_t*) &ctx->peer_handle, ipc_close_cb);
|
140
|
+
}
|
141
|
+
|
142
|
+
|
143
|
+
static void ipc_close_cb(uv_handle_t* handle) {
|
144
|
+
struct ipc_peer_ctx* ctx;
|
145
|
+
ctx = container_of(handle, struct ipc_peer_ctx, peer_handle);
|
146
|
+
free(ctx);
|
147
|
+
}
|
148
|
+
|
149
|
+
|
150
|
+
static void ipc_connect_cb(uv_connect_t* req, int status) {
|
151
|
+
struct ipc_client_ctx* ctx;
|
152
|
+
ctx = container_of(req, struct ipc_client_ctx, connect_req);
|
153
|
+
ASSERT(0 == status);
|
154
|
+
ASSERT(0 == uv_read2_start((uv_stream_t*) &ctx->ipc_pipe,
|
155
|
+
ipc_alloc_cb,
|
156
|
+
ipc_read2_cb));
|
157
|
+
}
|
158
|
+
|
159
|
+
|
160
|
+
static uv_buf_t ipc_alloc_cb(uv_handle_t* handle, size_t suggested_size) {
|
161
|
+
struct ipc_client_ctx* ctx;
|
162
|
+
ctx = container_of(handle, struct ipc_client_ctx, ipc_pipe);
|
163
|
+
return uv_buf_init(ctx->scratch, sizeof(ctx->scratch));
|
164
|
+
}
|
165
|
+
|
166
|
+
|
167
|
+
static void ipc_read2_cb(uv_pipe_t* ipc_pipe,
|
168
|
+
ssize_t nread,
|
169
|
+
uv_buf_t buf,
|
170
|
+
uv_handle_type type) {
|
171
|
+
struct ipc_client_ctx* ctx;
|
172
|
+
uv_loop_t* loop;
|
173
|
+
|
174
|
+
ctx = container_of(ipc_pipe, struct ipc_client_ctx, ipc_pipe);
|
175
|
+
loop = ipc_pipe->loop;
|
176
|
+
|
177
|
+
if (type == UV_TCP)
|
178
|
+
ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) ctx->server_handle));
|
179
|
+
else if (type == UV_NAMED_PIPE)
|
180
|
+
ASSERT(0 == uv_pipe_init(loop, (uv_pipe_t*) ctx->server_handle, 0));
|
181
|
+
else
|
182
|
+
ASSERT(0);
|
183
|
+
|
184
|
+
ASSERT(0 == uv_accept((uv_stream_t*) &ctx->ipc_pipe, ctx->server_handle));
|
185
|
+
uv_close((uv_handle_t*) &ctx->ipc_pipe, NULL);
|
186
|
+
}
|
187
|
+
|
188
|
+
|
189
|
+
/* Set up an IPC pipe server that hands out listen sockets to the worker
|
190
|
+
* threads. It's kind of cumbersome for such a simple operation, maybe we
|
191
|
+
* should revive uv_import() and uv_export().
|
192
|
+
*/
|
193
|
+
static void send_listen_handles(uv_handle_type type,
|
194
|
+
unsigned int num_servers,
|
195
|
+
struct server_ctx* servers) {
|
196
|
+
struct ipc_server_ctx ctx;
|
197
|
+
uv_loop_t* loop;
|
198
|
+
unsigned int i;
|
199
|
+
|
200
|
+
loop = uv_default_loop();
|
201
|
+
ctx.num_connects = num_servers;
|
202
|
+
|
203
|
+
if (type == UV_TCP) {
|
204
|
+
ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) &ctx.server_handle));
|
205
|
+
ASSERT(0 == uv_tcp_bind((uv_tcp_t*) &ctx.server_handle, listen_addr));
|
206
|
+
}
|
207
|
+
else
|
208
|
+
ASSERT(0);
|
209
|
+
|
210
|
+
ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 1));
|
211
|
+
ASSERT(0 == uv_pipe_bind(&ctx.ipc_pipe, IPC_PIPE_NAME));
|
212
|
+
ASSERT(0 == uv_listen((uv_stream_t*) &ctx.ipc_pipe, 128, ipc_connection_cb));
|
213
|
+
|
214
|
+
for (i = 0; i < num_servers; i++)
|
215
|
+
uv_sem_post(&servers[i].semaphore);
|
216
|
+
|
217
|
+
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
|
218
|
+
uv_close((uv_handle_t*) &ctx.server_handle, NULL);
|
219
|
+
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
|
220
|
+
|
221
|
+
for (i = 0; i < num_servers; i++)
|
222
|
+
uv_sem_wait(&servers[i].semaphore);
|
223
|
+
}
|
224
|
+
|
225
|
+
|
226
|
+
static void get_listen_handle(uv_loop_t* loop, uv_stream_t* server_handle) {
|
227
|
+
struct ipc_client_ctx ctx;
|
228
|
+
|
229
|
+
ctx.server_handle = server_handle;
|
230
|
+
ctx.server_handle->data = "server handle";
|
231
|
+
|
232
|
+
ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 1));
|
233
|
+
uv_pipe_connect(&ctx.connect_req,
|
234
|
+
&ctx.ipc_pipe,
|
235
|
+
IPC_PIPE_NAME,
|
236
|
+
ipc_connect_cb);
|
237
|
+
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
|
238
|
+
}
|
239
|
+
|
240
|
+
|
241
|
+
static void server_cb(void *arg) {
|
242
|
+
struct server_ctx *ctx;
|
243
|
+
uv_loop_t* loop;
|
244
|
+
|
245
|
+
ctx = arg;
|
246
|
+
loop = uv_loop_new();
|
247
|
+
ASSERT(loop != NULL);
|
248
|
+
|
249
|
+
ASSERT(0 == uv_async_init(loop, &ctx->async_handle, sv_async_cb));
|
250
|
+
uv_unref((uv_handle_t*) &ctx->async_handle);
|
251
|
+
|
252
|
+
/* Wait until the main thread is ready. */
|
253
|
+
uv_sem_wait(&ctx->semaphore);
|
254
|
+
get_listen_handle(loop, (uv_stream_t*) &ctx->server_handle);
|
255
|
+
uv_sem_post(&ctx->semaphore);
|
256
|
+
|
257
|
+
/* Now start the actual benchmark. */
|
258
|
+
ASSERT(0 == uv_listen((uv_stream_t*) &ctx->server_handle,
|
259
|
+
128,
|
260
|
+
sv_connection_cb));
|
261
|
+
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
|
262
|
+
|
263
|
+
uv_loop_delete(loop);
|
264
|
+
}
|
265
|
+
|
266
|
+
|
267
|
+
static void sv_async_cb(uv_async_t* handle, int status) {
|
268
|
+
struct server_ctx* ctx;
|
269
|
+
ctx = container_of(handle, struct server_ctx, async_handle);
|
270
|
+
uv_close((uv_handle_t*) &ctx->server_handle, NULL);
|
271
|
+
uv_close((uv_handle_t*) &ctx->async_handle, NULL);
|
272
|
+
}
|
273
|
+
|
274
|
+
|
275
|
+
static void sv_connection_cb(uv_stream_t* server_handle, int status) {
|
276
|
+
handle_storage_t* storage;
|
277
|
+
struct server_ctx* ctx;
|
278
|
+
|
279
|
+
ctx = container_of(server_handle, struct server_ctx, server_handle);
|
280
|
+
ASSERT(status == 0);
|
281
|
+
|
282
|
+
storage = malloc(sizeof(*storage));
|
283
|
+
ASSERT(storage != NULL);
|
284
|
+
|
285
|
+
if (server_handle->type == UV_TCP)
|
286
|
+
ASSERT(0 == uv_tcp_init(server_handle->loop, (uv_tcp_t*) storage));
|
287
|
+
else if (server_handle->type == UV_NAMED_PIPE)
|
288
|
+
ASSERT(0 == uv_pipe_init(server_handle->loop, (uv_pipe_t*) storage, 0));
|
289
|
+
else
|
290
|
+
ASSERT(0);
|
291
|
+
|
292
|
+
ASSERT(0 == uv_accept(server_handle, (uv_stream_t*) storage));
|
293
|
+
ASSERT(0 == uv_read_start((uv_stream_t*) storage, sv_alloc_cb, sv_read_cb));
|
294
|
+
ctx->num_connects++;
|
295
|
+
}
|
296
|
+
|
297
|
+
|
298
|
+
static uv_buf_t sv_alloc_cb(uv_handle_t* handle, size_t suggested_size) {
|
299
|
+
static char buf[32];
|
300
|
+
return uv_buf_init(buf, sizeof(buf));
|
301
|
+
}
|
302
|
+
|
303
|
+
|
304
|
+
static void sv_read_cb(uv_stream_t* handle, ssize_t nread, uv_buf_t buf) {
|
305
|
+
ASSERT(nread == -1);
|
306
|
+
ASSERT(uv_last_error(handle->loop).code == UV_EOF);
|
307
|
+
uv_close((uv_handle_t*) handle, (uv_close_cb) free);
|
308
|
+
}
|
309
|
+
|
310
|
+
|
311
|
+
static void cl_connect_cb(uv_connect_t* req, int status) {
|
312
|
+
struct client_ctx* ctx = container_of(req, struct client_ctx, connect_req);
|
313
|
+
uv_idle_start(&ctx->idle_handle, cl_idle_cb);
|
314
|
+
ASSERT(0 == status);
|
315
|
+
}
|
316
|
+
|
317
|
+
|
318
|
+
static void cl_idle_cb(uv_idle_t* handle, int status) {
|
319
|
+
struct client_ctx* ctx = container_of(handle, struct client_ctx, idle_handle);
|
320
|
+
uv_close((uv_handle_t*) &ctx->client_handle, cl_close_cb);
|
321
|
+
uv_idle_stop(&ctx->idle_handle);
|
322
|
+
}
|
323
|
+
|
324
|
+
|
325
|
+
static void cl_close_cb(uv_handle_t* handle) {
|
326
|
+
struct client_ctx* ctx;
|
327
|
+
|
328
|
+
ctx = container_of(handle, struct client_ctx, client_handle);
|
329
|
+
|
330
|
+
if (--ctx->num_connects == 0) {
|
331
|
+
uv_close((uv_handle_t*) &ctx->idle_handle, NULL);
|
332
|
+
return;
|
333
|
+
}
|
334
|
+
|
335
|
+
ASSERT(0 == uv_tcp_init(handle->loop, (uv_tcp_t*) &ctx->client_handle));
|
336
|
+
ASSERT(0 == uv_tcp_connect(&ctx->connect_req,
|
337
|
+
(uv_tcp_t*) &ctx->client_handle,
|
338
|
+
listen_addr,
|
339
|
+
cl_connect_cb));
|
340
|
+
}
|
341
|
+
|
342
|
+
|
343
|
+
static int test_tcp(unsigned int num_servers, unsigned int num_clients) {
|
344
|
+
struct server_ctx* servers;
|
345
|
+
struct client_ctx* clients;
|
346
|
+
uv_loop_t* loop;
|
347
|
+
uv_tcp_t* handle;
|
348
|
+
unsigned int i;
|
349
|
+
double time;
|
350
|
+
|
351
|
+
listen_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
352
|
+
loop = uv_default_loop();
|
353
|
+
|
354
|
+
servers = calloc(num_servers, sizeof(servers[0]));
|
355
|
+
clients = calloc(num_clients, sizeof(clients[0]));
|
356
|
+
ASSERT(servers != NULL);
|
357
|
+
ASSERT(clients != NULL);
|
358
|
+
|
359
|
+
/* We're making the assumption here that from the perspective of the
|
360
|
+
* OS scheduler, threads are functionally equivalent to and interchangeable
|
361
|
+
* with full-blown processes.
|
362
|
+
*/
|
363
|
+
for (i = 0; i < num_servers; i++) {
|
364
|
+
struct server_ctx* ctx = servers + i;
|
365
|
+
ASSERT(0 == uv_sem_init(&ctx->semaphore, 0));
|
366
|
+
ASSERT(0 == uv_thread_create(&ctx->thread_id, server_cb, ctx));
|
367
|
+
}
|
368
|
+
|
369
|
+
send_listen_handles(UV_TCP, num_servers, servers);
|
370
|
+
|
371
|
+
for (i = 0; i < num_clients; i++) {
|
372
|
+
struct client_ctx* ctx = clients + i;
|
373
|
+
ctx->num_connects = NUM_CONNECTS / num_clients;
|
374
|
+
handle = (uv_tcp_t*) &ctx->client_handle;
|
375
|
+
handle->data = "client handle";
|
376
|
+
ASSERT(0 == uv_tcp_init(loop, handle));
|
377
|
+
ASSERT(0 == uv_tcp_connect(&ctx->connect_req,
|
378
|
+
handle,
|
379
|
+
listen_addr,
|
380
|
+
cl_connect_cb));
|
381
|
+
ASSERT(0 == uv_idle_init(loop, &ctx->idle_handle));
|
382
|
+
}
|
383
|
+
|
384
|
+
{
|
385
|
+
uint64_t t = uv_hrtime();
|
386
|
+
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
|
387
|
+
t = uv_hrtime() - t;
|
388
|
+
time = t / 1e9;
|
389
|
+
}
|
390
|
+
|
391
|
+
for (i = 0; i < num_servers; i++) {
|
392
|
+
struct server_ctx* ctx = servers + i;
|
393
|
+
uv_async_send(&ctx->async_handle);
|
394
|
+
ASSERT(0 == uv_thread_join(&ctx->thread_id));
|
395
|
+
uv_sem_destroy(&ctx->semaphore);
|
396
|
+
}
|
397
|
+
|
398
|
+
printf("accept%u: %.0f accepts/sec (%u total)\n",
|
399
|
+
num_servers,
|
400
|
+
NUM_CONNECTS / time,
|
401
|
+
NUM_CONNECTS);
|
402
|
+
|
403
|
+
for (i = 0; i < num_servers; i++) {
|
404
|
+
struct server_ctx* ctx = servers + i;
|
405
|
+
printf(" thread #%u: %.0f accepts/sec (%u total, %.1f%%)\n",
|
406
|
+
i,
|
407
|
+
ctx->num_connects / time,
|
408
|
+
ctx->num_connects,
|
409
|
+
ctx->num_connects * 100.0 / NUM_CONNECTS);
|
410
|
+
}
|
411
|
+
|
412
|
+
free(clients);
|
413
|
+
free(servers);
|
414
|
+
|
415
|
+
MAKE_VALGRIND_HAPPY();
|
416
|
+
return 0;
|
417
|
+
}
|
418
|
+
|
419
|
+
|
420
|
+
BENCHMARK_IMPL(tcp_multi_accept2) {
|
421
|
+
return test_tcp(2, 40);
|
422
|
+
}
|
423
|
+
|
424
|
+
|
425
|
+
BENCHMARK_IMPL(tcp_multi_accept4) {
|
426
|
+
return test_tcp(4, 40);
|
427
|
+
}
|
428
|
+
|
429
|
+
|
430
|
+
BENCHMARK_IMPL(tcp_multi_accept8) {
|
431
|
+
return test_tcp(8, 40);
|
432
|
+
}
|
@@ -0,0 +1,212 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
* of this software and associated documentation files (the "Software"), to
|
5
|
+
* deal in the Software without restriction, including without limitation the
|
6
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
7
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
8
|
+
* furnished to do so, subject to the following conditions:
|
9
|
+
*
|
10
|
+
* The above copyright notice and this permission notice shall be included in
|
11
|
+
* all copies or substantial portions of the Software.
|
12
|
+
*
|
13
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
18
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
19
|
+
* IN THE SOFTWARE.
|
20
|
+
*/
|
21
|
+
|
22
|
+
#include "uv.h"
|
23
|
+
#include "task.h"
|
24
|
+
|
25
|
+
#include <stdlib.h>
|
26
|
+
#include <stdio.h>
|
27
|
+
|
28
|
+
/* Run the benchmark for this many ms */
|
29
|
+
#define TIME 5000
|
30
|
+
|
31
|
+
|
32
|
+
typedef struct {
|
33
|
+
int pongs;
|
34
|
+
int state;
|
35
|
+
uv_tcp_t tcp;
|
36
|
+
uv_connect_t connect_req;
|
37
|
+
uv_shutdown_t shutdown_req;
|
38
|
+
} pinger_t;
|
39
|
+
|
40
|
+
typedef struct buf_s {
|
41
|
+
uv_buf_t uv_buf_t;
|
42
|
+
struct buf_s* next;
|
43
|
+
} buf_t;
|
44
|
+
|
45
|
+
|
46
|
+
static char PING[] = "PING\n";
|
47
|
+
|
48
|
+
static uv_loop_t* loop;
|
49
|
+
|
50
|
+
static buf_t* buf_freelist = NULL;
|
51
|
+
static int pinger_shutdown_cb_called;
|
52
|
+
static int completed_pingers = 0;
|
53
|
+
static int64_t start_time;
|
54
|
+
|
55
|
+
|
56
|
+
static uv_buf_t buf_alloc(uv_handle_t* tcp, size_t size) {
|
57
|
+
buf_t* ab;
|
58
|
+
|
59
|
+
ab = buf_freelist;
|
60
|
+
|
61
|
+
if (ab != NULL) {
|
62
|
+
buf_freelist = ab->next;
|
63
|
+
return ab->uv_buf_t;
|
64
|
+
}
|
65
|
+
|
66
|
+
ab = (buf_t*) malloc(size + sizeof *ab);
|
67
|
+
ab->uv_buf_t.len = size;
|
68
|
+
ab->uv_buf_t.base = ((char*) ab) + sizeof *ab;
|
69
|
+
|
70
|
+
return ab->uv_buf_t;
|
71
|
+
}
|
72
|
+
|
73
|
+
|
74
|
+
static void buf_free(uv_buf_t uv_buf_t) {
|
75
|
+
buf_t* ab = (buf_t*) (uv_buf_t.base - sizeof *ab);
|
76
|
+
|
77
|
+
ab->next = buf_freelist;
|
78
|
+
buf_freelist = ab;
|
79
|
+
}
|
80
|
+
|
81
|
+
|
82
|
+
static void pinger_close_cb(uv_handle_t* handle) {
|
83
|
+
pinger_t* pinger;
|
84
|
+
|
85
|
+
pinger = (pinger_t*)handle->data;
|
86
|
+
LOGF("ping_pongs: %d roundtrips/s\n", (1000 * pinger->pongs) / TIME);
|
87
|
+
|
88
|
+
free(pinger);
|
89
|
+
|
90
|
+
completed_pingers++;
|
91
|
+
}
|
92
|
+
|
93
|
+
|
94
|
+
static void pinger_write_cb(uv_write_t* req, int status) {
|
95
|
+
ASSERT(status == 0);
|
96
|
+
|
97
|
+
free(req);
|
98
|
+
}
|
99
|
+
|
100
|
+
|
101
|
+
static void pinger_write_ping(pinger_t* pinger) {
|
102
|
+
uv_write_t* req;
|
103
|
+
uv_buf_t buf;
|
104
|
+
|
105
|
+
buf = uv_buf_init(PING, sizeof(PING) - 1);
|
106
|
+
|
107
|
+
req = malloc(sizeof *req);
|
108
|
+
if (uv_write(req, (uv_stream_t*) &pinger->tcp, &buf, 1, pinger_write_cb)) {
|
109
|
+
FATAL("uv_write failed");
|
110
|
+
}
|
111
|
+
}
|
112
|
+
|
113
|
+
|
114
|
+
static void pinger_shutdown_cb(uv_shutdown_t* req, int status) {
|
115
|
+
ASSERT(status == 0);
|
116
|
+
pinger_shutdown_cb_called++;
|
117
|
+
|
118
|
+
/*
|
119
|
+
* The close callback has not been triggered yet. We must wait for EOF
|
120
|
+
* until we close the connection.
|
121
|
+
*/
|
122
|
+
ASSERT(completed_pingers == 0);
|
123
|
+
}
|
124
|
+
|
125
|
+
|
126
|
+
static void pinger_read_cb(uv_stream_t* tcp, ssize_t nread, uv_buf_t buf) {
|
127
|
+
ssize_t i;
|
128
|
+
pinger_t* pinger;
|
129
|
+
|
130
|
+
pinger = (pinger_t*)tcp->data;
|
131
|
+
|
132
|
+
if (nread < 0) {
|
133
|
+
ASSERT(uv_last_error(loop).code == UV_EOF);
|
134
|
+
|
135
|
+
if (buf.base) {
|
136
|
+
buf_free(buf);
|
137
|
+
}
|
138
|
+
|
139
|
+
ASSERT(pinger_shutdown_cb_called == 1);
|
140
|
+
uv_close((uv_handle_t*)tcp, pinger_close_cb);
|
141
|
+
|
142
|
+
return;
|
143
|
+
}
|
144
|
+
|
145
|
+
/* Now we count the pings */
|
146
|
+
for (i = 0; i < nread; i++) {
|
147
|
+
ASSERT(buf.base[i] == PING[pinger->state]);
|
148
|
+
pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
|
149
|
+
if (pinger->state == 0) {
|
150
|
+
pinger->pongs++;
|
151
|
+
if (uv_now(loop) - start_time > TIME) {
|
152
|
+
uv_shutdown(&pinger->shutdown_req, (uv_stream_t*) tcp, pinger_shutdown_cb);
|
153
|
+
break;
|
154
|
+
} else {
|
155
|
+
pinger_write_ping(pinger);
|
156
|
+
}
|
157
|
+
}
|
158
|
+
}
|
159
|
+
|
160
|
+
buf_free(buf);
|
161
|
+
}
|
162
|
+
|
163
|
+
|
164
|
+
static void pinger_connect_cb(uv_connect_t* req, int status) {
|
165
|
+
pinger_t *pinger = (pinger_t*)req->handle->data;
|
166
|
+
|
167
|
+
ASSERT(status == 0);
|
168
|
+
|
169
|
+
pinger_write_ping(pinger);
|
170
|
+
|
171
|
+
if (uv_read_start(req->handle, buf_alloc, pinger_read_cb)) {
|
172
|
+
FATAL("uv_read_start failed");
|
173
|
+
}
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
static void pinger_new(void) {
|
178
|
+
int r;
|
179
|
+
struct sockaddr_in client_addr = uv_ip4_addr("0.0.0.0", 0);
|
180
|
+
struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
181
|
+
pinger_t *pinger;
|
182
|
+
|
183
|
+
pinger = (pinger_t*)malloc(sizeof(*pinger));
|
184
|
+
pinger->state = 0;
|
185
|
+
pinger->pongs = 0;
|
186
|
+
|
187
|
+
/* Try to connect to the server and do NUM_PINGS ping-pongs. */
|
188
|
+
r = uv_tcp_init(loop, &pinger->tcp);
|
189
|
+
ASSERT(!r);
|
190
|
+
|
191
|
+
pinger->tcp.data = pinger;
|
192
|
+
|
193
|
+
uv_tcp_bind(&pinger->tcp, client_addr);
|
194
|
+
|
195
|
+
r = uv_tcp_connect(&pinger->connect_req, &pinger->tcp, server_addr, pinger_connect_cb);
|
196
|
+
ASSERT(!r);
|
197
|
+
}
|
198
|
+
|
199
|
+
|
200
|
+
BENCHMARK_IMPL(ping_pongs) {
|
201
|
+
loop = uv_default_loop();
|
202
|
+
|
203
|
+
start_time = uv_now(loop);
|
204
|
+
|
205
|
+
pinger_new();
|
206
|
+
uv_run(loop, UV_RUN_DEFAULT);
|
207
|
+
|
208
|
+
ASSERT(completed_pingers == 1);
|
209
|
+
|
210
|
+
MAKE_VALGRIND_HAPPY();
|
211
|
+
return 0;
|
212
|
+
}
|