asyncengine 0.0.1.testing1 → 0.0.2.alpha1
Sign up to get free protection for your applications and to get access to all the features.
- data/README.markdown +3 -0
- data/Rakefile +38 -0
- data/asyncengine.gemspec +8 -4
- data/ext/asyncengine/ae_call_from_other_thread.c +106 -0
- data/ext/asyncengine/ae_call_from_other_thread.h +12 -0
- data/ext/asyncengine/ae_handle_common.c +193 -48
- data/ext/asyncengine/ae_handle_common.h +40 -13
- data/ext/asyncengine/ae_ip_utils.c +246 -0
- data/ext/asyncengine/ae_ip_utils.h +25 -0
- data/ext/asyncengine/ae_next_tick.c +81 -21
- data/ext/asyncengine/ae_next_tick.h +4 -2
- data/ext/asyncengine/ae_resolver.c +156 -0
- data/ext/asyncengine/ae_resolver.h +10 -0
- data/ext/asyncengine/ae_tcp.c +908 -0
- data/ext/asyncengine/ae_tcp.h +20 -0
- data/ext/asyncengine/ae_timer.c +355 -81
- data/ext/asyncengine/ae_timer.h +11 -4
- data/ext/asyncengine/ae_udp.c +579 -13
- data/ext/asyncengine/ae_udp.h +15 -2
- data/ext/asyncengine/ae_utils.c +192 -0
- data/ext/asyncengine/ae_utils.h +16 -0
- data/ext/asyncengine/asyncengine_ruby.c +469 -26
- data/ext/asyncengine/asyncengine_ruby.h +49 -11
- data/ext/asyncengine/debug.h +68 -0
- data/ext/asyncengine/extconf.rb +26 -2
- data/ext/asyncengine/ip_parser.c +5954 -0
- data/ext/asyncengine/ip_parser.h +16 -0
- data/ext/asyncengine/libuv/AUTHORS +16 -0
- data/ext/asyncengine/libuv/common.gypi +4 -4
- data/ext/asyncengine/libuv/config-mingw.mk +6 -6
- data/ext/asyncengine/libuv/config-unix.mk +13 -13
- data/ext/asyncengine/libuv/gyp_uv +5 -1
- data/ext/asyncengine/libuv/ibc_tests/exec_test.sh +8 -0
- data/ext/asyncengine/libuv/ibc_tests/uv_shutdown_write_issue.c +171 -0
- data/ext/asyncengine/libuv/ibc_tests/uv_tcp_close_while_connecting.c +102 -0
- data/ext/asyncengine/libuv/include/uv-private/ngx-queue.h +3 -1
- data/ext/asyncengine/libuv/include/uv-private/uv-unix.h +103 -50
- data/ext/asyncengine/libuv/include/uv-private/uv-win.h +76 -24
- data/ext/asyncengine/libuv/include/uv.h +353 -88
- data/ext/asyncengine/libuv/src/ares/ares__close_sockets.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares__get_hostent.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares__read_line.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares__timeval.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_cancel.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_data.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_destroy.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_expand_name.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_expand_string.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_fds.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_free_hostent.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_free_string.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_gethostbyaddr.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_gethostbyname.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_getnameinfo.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_getopt.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_getsock.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_init.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_library_init.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_llist.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_mkquery.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_nowarn.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_options.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_a_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_aaaa_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_mx_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_ns_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_ptr_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_srv_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_parse_txt_reply.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_process.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_query.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_search.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_send.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_strcasecmp.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_strdup.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_strerror.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_timeout.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_version.o +0 -0
- data/ext/asyncengine/libuv/src/ares/ares_writev.o +0 -0
- data/ext/asyncengine/libuv/src/ares/bitncmp.o +0 -0
- data/ext/asyncengine/libuv/src/ares/inet_net_pton.o +0 -0
- data/ext/asyncengine/libuv/src/ares/inet_ntop.o +0 -0
- data/ext/asyncengine/libuv/src/cares.c +225 -0
- data/ext/asyncengine/libuv/src/cares.o +0 -0
- data/ext/asyncengine/libuv/src/fs-poll.c +237 -0
- data/ext/asyncengine/libuv/src/fs-poll.o +0 -0
- data/ext/asyncengine/libuv/src/unix/async.c +78 -17
- data/ext/asyncengine/libuv/src/unix/async.o +0 -0
- data/ext/asyncengine/libuv/src/unix/core.c +305 -213
- data/ext/asyncengine/libuv/src/unix/core.o +0 -0
- data/ext/asyncengine/libuv/src/unix/cygwin.c +1 -1
- data/ext/asyncengine/libuv/src/unix/darwin.c +2 -1
- data/ext/asyncengine/libuv/src/unix/dl.c +36 -44
- data/ext/asyncengine/libuv/src/unix/dl.o +0 -0
- data/ext/asyncengine/libuv/src/unix/eio/eio.o +0 -0
- data/ext/asyncengine/libuv/src/unix/error.c +6 -0
- data/ext/asyncengine/libuv/src/unix/error.o +0 -0
- data/ext/asyncengine/libuv/src/unix/ev/ev.c +8 -4
- data/ext/asyncengine/libuv/src/unix/ev/ev.o +0 -0
- data/ext/asyncengine/libuv/src/unix/freebsd.c +1 -1
- data/ext/asyncengine/libuv/src/unix/fs.c +25 -33
- data/ext/asyncengine/libuv/src/unix/fs.o +0 -0
- data/ext/asyncengine/libuv/src/unix/internal.h +50 -31
- data/ext/asyncengine/libuv/src/unix/kqueue.c +2 -7
- data/ext/asyncengine/libuv/src/unix/linux/core.o +0 -0
- data/ext/asyncengine/libuv/src/unix/linux/inotify.c +12 -14
- data/ext/asyncengine/libuv/src/unix/linux/inotify.o +0 -0
- data/ext/asyncengine/libuv/src/unix/linux/{core.c → linux-core.c} +1 -1
- data/ext/asyncengine/libuv/src/unix/linux/linux-core.o +0 -0
- data/ext/asyncengine/libuv/src/unix/linux/syscalls.c +147 -1
- data/ext/asyncengine/libuv/src/unix/linux/syscalls.h +39 -2
- data/ext/asyncengine/libuv/src/unix/linux/syscalls.o +0 -0
- data/ext/asyncengine/libuv/src/unix/loop-watcher.c +63 -0
- data/ext/asyncengine/libuv/src/unix/loop-watcher.o +0 -0
- data/ext/asyncengine/libuv/src/unix/loop.c +29 -6
- data/ext/asyncengine/libuv/src/unix/loop.o +0 -0
- data/ext/asyncengine/libuv/src/unix/netbsd.c +1 -1
- data/ext/asyncengine/libuv/src/unix/openbsd.c +1 -1
- data/ext/asyncengine/libuv/src/unix/pipe.c +31 -36
- data/ext/asyncengine/libuv/src/unix/pipe.o +0 -0
- data/ext/asyncengine/libuv/src/unix/poll.c +116 -0
- data/ext/asyncengine/libuv/src/unix/poll.o +0 -0
- data/ext/asyncengine/libuv/src/unix/process.c +193 -115
- data/ext/asyncengine/libuv/src/unix/process.o +0 -0
- data/ext/asyncengine/libuv/src/unix/stream.c +146 -153
- data/ext/asyncengine/libuv/src/unix/stream.o +0 -0
- data/ext/asyncengine/libuv/src/unix/sunos.c +45 -36
- data/ext/asyncengine/libuv/src/unix/tcp.c +6 -5
- data/ext/asyncengine/libuv/src/unix/tcp.o +0 -0
- data/ext/asyncengine/libuv/src/unix/thread.c +82 -25
- data/ext/asyncengine/libuv/src/unix/thread.o +0 -0
- data/ext/asyncengine/libuv/src/unix/timer.c +69 -58
- data/ext/asyncengine/libuv/src/unix/timer.o +0 -0
- data/ext/asyncengine/libuv/src/unix/tty.c +3 -3
- data/ext/asyncengine/libuv/src/unix/tty.o +0 -0
- data/ext/asyncengine/libuv/src/unix/udp.c +57 -66
- data/ext/asyncengine/libuv/src/unix/udp.o +0 -0
- data/ext/asyncengine/libuv/src/unix/uv-eio.c +33 -50
- data/ext/asyncengine/libuv/src/unix/uv-eio.o +0 -0
- data/ext/asyncengine/libuv/src/uv-common.c +68 -38
- data/ext/asyncengine/libuv/src/uv-common.h +104 -20
- data/ext/asyncengine/libuv/src/uv-common.o +0 -0
- data/ext/asyncengine/libuv/src/win/async.c +20 -17
- data/ext/asyncengine/libuv/src/win/core.c +44 -31
- data/ext/asyncengine/libuv/src/win/dl.c +40 -36
- data/ext/asyncengine/libuv/src/win/error.c +21 -1
- data/ext/asyncengine/libuv/src/win/fs-event.c +19 -21
- data/ext/asyncengine/libuv/src/win/fs.c +541 -189
- data/ext/asyncengine/libuv/src/win/getaddrinfo.c +56 -63
- data/ext/asyncengine/libuv/src/win/handle-inl.h +145 -0
- data/ext/asyncengine/libuv/src/win/handle.c +26 -101
- data/ext/asyncengine/libuv/src/win/internal.h +92 -107
- data/ext/asyncengine/libuv/src/win/loop-watcher.c +6 -14
- data/ext/asyncengine/libuv/src/win/pipe.c +78 -64
- data/ext/asyncengine/libuv/src/win/poll.c +618 -0
- data/ext/asyncengine/libuv/src/win/process-stdio.c +479 -0
- data/ext/asyncengine/libuv/src/win/process.c +147 -274
- data/ext/asyncengine/libuv/src/win/req-inl.h +225 -0
- data/ext/asyncengine/libuv/src/win/req.c +0 -149
- data/ext/asyncengine/libuv/src/{unix/check.c → win/stream-inl.h} +31 -42
- data/ext/asyncengine/libuv/src/win/stream.c +9 -43
- data/ext/asyncengine/libuv/src/win/tcp.c +200 -82
- data/ext/asyncengine/libuv/src/win/thread.c +42 -2
- data/ext/asyncengine/libuv/src/win/threadpool.c +3 -2
- data/ext/asyncengine/libuv/src/win/timer.c +13 -63
- data/ext/asyncengine/libuv/src/win/tty.c +26 -20
- data/ext/asyncengine/libuv/src/win/udp.c +26 -17
- data/ext/asyncengine/libuv/src/win/util.c +312 -167
- data/ext/asyncengine/libuv/src/win/winapi.c +16 -1
- data/ext/asyncengine/libuv/src/win/winapi.h +33 -9
- data/ext/asyncengine/libuv/src/win/winsock.c +88 -1
- data/ext/asyncengine/libuv/src/win/winsock.h +36 -3
- data/ext/asyncengine/libuv/test/benchmark-ares.c +16 -17
- data/ext/asyncengine/libuv/test/benchmark-fs-stat.c +164 -0
- data/ext/asyncengine/libuv/test/benchmark-list.h +9 -0
- data/ext/asyncengine/libuv/{src/unix/prepare.c → test/benchmark-loop-count.c} +42 -33
- data/ext/asyncengine/libuv/test/benchmark-million-timers.c +65 -0
- data/ext/asyncengine/libuv/test/benchmark-pound.c +1 -1
- data/ext/asyncengine/libuv/test/benchmark-sizes.c +2 -0
- data/ext/asyncengine/libuv/test/benchmark-spawn.c +7 -1
- data/ext/asyncengine/libuv/test/benchmark-udp-packet-storm.c +1 -1
- data/ext/asyncengine/libuv/test/echo-server.c +8 -0
- data/ext/asyncengine/libuv/test/run-tests.c +30 -0
- data/ext/asyncengine/libuv/test/runner-unix.c +6 -26
- data/ext/asyncengine/libuv/test/runner-win.c +5 -63
- data/ext/asyncengine/libuv/test/runner.c +10 -1
- data/ext/asyncengine/libuv/test/task.h +0 -8
- data/ext/asyncengine/libuv/test/test-async.c +43 -141
- data/ext/asyncengine/libuv/test/test-callback-order.c +76 -0
- data/ext/asyncengine/libuv/test/test-counters-init.c +2 -3
- data/ext/asyncengine/libuv/test/test-dlerror.c +17 -8
- data/ext/asyncengine/libuv/test/test-fs-event.c +31 -39
- data/ext/asyncengine/libuv/test/test-fs-poll.c +146 -0
- data/ext/asyncengine/libuv/test/test-fs.c +114 -2
- data/ext/asyncengine/libuv/test/test-gethostbyname.c +8 -8
- data/ext/asyncengine/libuv/test/test-hrtime.c +18 -15
- data/ext/asyncengine/libuv/test/test-ipc.c +8 -2
- data/ext/asyncengine/libuv/test/test-list.h +59 -9
- data/ext/asyncengine/libuv/test/test-loop-handles.c +2 -25
- data/ext/asyncengine/libuv/{src/unix/idle.c → test/test-poll-close.c} +37 -39
- data/ext/asyncengine/libuv/test/test-poll.c +573 -0
- data/ext/asyncengine/libuv/test/test-ref.c +79 -63
- data/ext/asyncengine/libuv/test/test-run-once.c +15 -11
- data/ext/asyncengine/libuv/test/test-semaphore.c +111 -0
- data/ext/asyncengine/libuv/test/test-spawn.c +368 -20
- data/ext/asyncengine/libuv/test/test-stdio-over-pipes.c +25 -35
- data/ext/asyncengine/libuv/test/test-tcp-close-while-connecting.c +80 -0
- data/ext/asyncengine/libuv/test/test-tcp-close.c +1 -1
- data/ext/asyncengine/libuv/test/test-tcp-connect-error-after-write.c +95 -0
- data/ext/asyncengine/libuv/test/test-tcp-connect-timeout.c +85 -0
- data/ext/asyncengine/libuv/test/test-tcp-shutdown-after-write.c +131 -0
- data/ext/asyncengine/libuv/test/test-tcp-write-error.c +2 -2
- data/ext/asyncengine/libuv/test/test-tcp-writealot.c +29 -54
- data/ext/asyncengine/libuv/test/test-timer-again.c +1 -1
- data/ext/asyncengine/libuv/test/test-timer.c +23 -1
- data/ext/asyncengine/libuv/test/test-udp-options.c +1 -1
- data/ext/asyncengine/libuv/test/{test-eio-overflow.c → test-walk-handles.c} +31 -44
- data/ext/asyncengine/libuv/uv.gyp +26 -9
- data/ext/asyncengine/rb_utilities.c +54 -0
- data/ext/asyncengine/rb_utilities.h +63 -0
- data/lib/asyncengine.rb +45 -38
- data/lib/asyncengine/asyncengine_ext.so +0 -0
- data/lib/asyncengine/debug.rb +37 -0
- data/lib/asyncengine/handle.rb +9 -0
- data/lib/asyncengine/tcp.rb +28 -0
- data/lib/asyncengine/timer.rb +18 -28
- data/lib/asyncengine/udp.rb +29 -0
- data/lib/asyncengine/utils.rb +32 -0
- data/lib/asyncengine/uv_error.rb +17 -0
- data/lib/asyncengine/version.rb +9 -1
- data/test/ae_test_helper.rb +62 -0
- data/test/test_basic.rb +169 -0
- data/test/test_call_from_other_thread.rb +55 -0
- data/test/test_error.rb +92 -0
- data/test/test_ip_utils.rb +44 -0
- data/test/test_next_tick.rb +37 -0
- data/test/test_resolver.rb +51 -0
- data/test/test_threads.rb +69 -0
- data/test/test_timer.rb +95 -0
- data/test/test_udp.rb +216 -0
- data/test/test_utils.rb +49 -0
- metadata +84 -57
- data/ext/asyncengine/libuv/mkmf.log +0 -24
- data/ext/asyncengine/libuv/src/unix/cares.c +0 -194
- data/ext/asyncengine/libuv/src/unix/cares.o +0 -0
- data/ext/asyncengine/libuv/src/unix/check.o +0 -0
- data/ext/asyncengine/libuv/src/unix/idle.o +0 -0
- data/ext/asyncengine/libuv/src/unix/prepare.o +0 -0
- data/ext/asyncengine/libuv/src/win/cares.c +0 -290
- data/lib/asyncengine/errors.rb +0 -5
- data/lib/asyncengine/next_tick.rb +0 -24
@@ -106,14 +106,10 @@ static int idle_2_close_cb_called = 0;
|
|
106
106
|
static int idle_2_cb_started = 0;
|
107
107
|
static int idle_2_is_active = 0;
|
108
108
|
|
109
|
-
static int timer_cb_called = 0;
|
110
|
-
|
111
109
|
|
112
110
|
static void timer_cb(uv_timer_t* handle, int status) {
|
113
111
|
ASSERT(handle == &timer_handle);
|
114
112
|
ASSERT(status == 0);
|
115
|
-
|
116
|
-
timer_cb_called++;
|
117
113
|
}
|
118
114
|
|
119
115
|
|
@@ -152,7 +148,7 @@ static void idle_1_cb(uv_idle_t* handle, int status) {
|
|
152
148
|
ASSERT(idles_1_active > 0);
|
153
149
|
|
154
150
|
/* Init idle_2 and make it active */
|
155
|
-
if (!idle_2_is_active) {
|
151
|
+
if (!idle_2_is_active && !uv_is_closing((uv_handle_t*)&idle_2_handle)) {
|
156
152
|
r = uv_idle_init(uv_default_loop(), &idle_2_handle);
|
157
153
|
ASSERT(r == 0);
|
158
154
|
r = uv_idle_start(&idle_2_handle, idle_2_cb);
|
@@ -212,11 +208,6 @@ static void check_cb(uv_check_t* handle, int status) {
|
|
212
208
|
ASSERT(handle == &check_handle);
|
213
209
|
ASSERT(status == 0);
|
214
210
|
|
215
|
-
/* XXX
|
216
|
-
ASSERT(idles_1_active == 0);
|
217
|
-
ASSERT(idle_2_is_active == 0);
|
218
|
-
*/
|
219
|
-
|
220
211
|
if (loop_iteration < ITERATIONS) {
|
221
212
|
/* Make some idle watchers active */
|
222
213
|
for (i = 0; i < 1 + (loop_iteration % IDLE_COUNT); i++) {
|
@@ -254,9 +245,6 @@ static void prepare_2_cb(uv_prepare_t* handle, int status) {
|
|
254
245
|
ASSERT(handle == &prepare_2_handle);
|
255
246
|
ASSERT(status == 0);
|
256
247
|
|
257
|
-
/* XXX ASSERT(idles_1_active == 0); */
|
258
|
-
/* XXX ASSERT(idle_2_is_active == 0); */
|
259
|
-
|
260
248
|
/* prepare_2 gets started by prepare_1 when (loop_iteration % 2 == 0), */
|
261
249
|
/* and it stops itself immediately. A started watcher is not queued */
|
262
250
|
/* until the next round, so when this callback is made */
|
@@ -278,11 +266,6 @@ static void prepare_1_cb(uv_prepare_t* handle, int status) {
|
|
278
266
|
ASSERT(handle == &prepare_1_handle);
|
279
267
|
ASSERT(status == 0);
|
280
268
|
|
281
|
-
/* XXX
|
282
|
-
ASSERT(idles_1_active == 0);
|
283
|
-
ASSERT(idle_2_is_active == 0);
|
284
|
-
*/
|
285
|
-
|
286
269
|
if (loop_iteration % 2 == 0) {
|
287
270
|
r = uv_prepare_start(&prepare_2_handle, prepare_2_cb);
|
288
271
|
ASSERT(r == 0);
|
@@ -327,7 +310,7 @@ TEST_IMPL(loop_handles) {
|
|
327
310
|
ASSERT(r == 0);
|
328
311
|
r = uv_timer_start(&timer_handle, timer_cb, TIMEOUT, TIMEOUT);
|
329
312
|
ASSERT(r == 0);
|
330
|
-
uv_unref(
|
313
|
+
uv_unref((uv_handle_t*)&timer_handle);
|
331
314
|
|
332
315
|
r = uv_run(uv_default_loop());
|
333
316
|
ASSERT(r == 0);
|
@@ -344,16 +327,10 @@ TEST_IMPL(loop_handles) {
|
|
344
327
|
ASSERT(check_close_cb_called == 1);
|
345
328
|
|
346
329
|
/* idle_1_cb should be called a lot */
|
347
|
-
/* XXX ASSERT(idle_1_cb_called >= ITERATIONS * IDLE_COUNT * 2); */
|
348
330
|
ASSERT(idle_1_close_cb_called == IDLE_COUNT);
|
349
|
-
/* XXX ASSERT(idles_1_active == 0); */
|
350
331
|
|
351
|
-
/* XXX ASSERT(idle_2_cb_started >= ITERATIONS); */
|
352
|
-
/* XXX ASSERT(idle_2_cb_called == idle_2_cb_started); */
|
353
332
|
ASSERT(idle_2_close_cb_called == idle_2_cb_started);
|
354
333
|
ASSERT(idle_2_is_active == 0);
|
355
334
|
|
356
|
-
ASSERT(timer_cb_called > 0);
|
357
|
-
|
358
335
|
return 0;
|
359
336
|
}
|
@@ -1,4 +1,5 @@
|
|
1
1
|
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
*
|
2
3
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
3
4
|
* of this software and associated documentation files (the "Software"), to
|
4
5
|
* deal in the Software without restriction, including without limitation the
|
@@ -18,62 +19,59 @@
|
|
18
19
|
* IN THE SOFTWARE.
|
19
20
|
*/
|
20
21
|
|
21
|
-
#include
|
22
|
-
#include "internal.h"
|
22
|
+
#include <errno.h>
|
23
23
|
|
24
|
+
#ifndef _WIN32
|
25
|
+
# include <fcntl.h>
|
26
|
+
# include <sys/socket.h>
|
27
|
+
# include <unistd.h>
|
28
|
+
#endif
|
24
29
|
|
25
|
-
|
26
|
-
|
30
|
+
#include "uv.h"
|
31
|
+
#include "task.h"
|
27
32
|
|
28
|
-
|
29
|
-
idle->idle_cb(idle, 0);
|
30
|
-
}
|
31
|
-
}
|
33
|
+
#define NUM_SOCKETS 64
|
32
34
|
|
33
35
|
|
34
|
-
int
|
35
|
-
uv__handle_init(loop, (uv_handle_t*)idle, UV_IDLE);
|
36
|
-
loop->counters.idle_init++;
|
36
|
+
static int close_cb_called = 0;
|
37
37
|
|
38
|
-
ev_idle_init(&idle->idle_watcher, uv__idle);
|
39
|
-
idle->idle_cb = NULL;
|
40
38
|
|
41
|
-
|
39
|
+
static void poll_cb_fail(uv_poll_t* handle, int status, int events) {
|
40
|
+
ASSERT(0 && "poll_fail_cb should never be called");
|
42
41
|
}
|
43
42
|
|
44
43
|
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
idle->idle_cb = cb;
|
49
|
-
ev_idle_start(idle->loop->ev, &idle->idle_watcher);
|
50
|
-
|
51
|
-
if (!was_active) {
|
52
|
-
ev_unref(idle->loop->ev);
|
53
|
-
}
|
54
|
-
|
55
|
-
return 0;
|
44
|
+
static void close_cb(uv_handle_t* handle) {
|
45
|
+
close_cb_called++;
|
56
46
|
}
|
57
47
|
|
58
48
|
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
49
|
+
TEST_IMPL(poll_close) {
|
50
|
+
uv_os_sock_t sockets[NUM_SOCKETS];
|
51
|
+
uv_poll_t poll_handles[NUM_SOCKETS];
|
52
|
+
int i;
|
63
53
|
|
64
|
-
|
65
|
-
|
54
|
+
#ifdef _WIN32
|
55
|
+
{
|
56
|
+
struct WSAData wsa_data;
|
57
|
+
int r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
|
58
|
+
ASSERT(r == 0);
|
66
59
|
}
|
60
|
+
#endif
|
67
61
|
|
68
|
-
|
69
|
-
|
70
|
-
|
62
|
+
for (i = 0; i < NUM_SOCKETS; i++) {
|
63
|
+
sockets[i] = socket(AF_INET, SOCK_STREAM, 0);
|
64
|
+
uv_poll_init_socket(uv_default_loop(), &poll_handles[i], sockets[i]);
|
65
|
+
uv_poll_start(&poll_handles[i], UV_READABLE | UV_WRITABLE, NULL);
|
66
|
+
}
|
67
|
+
|
68
|
+
for (i = 0; i < NUM_SOCKETS; i++) {
|
69
|
+
uv_close((uv_handle_t*) &poll_handles[i], close_cb);
|
70
|
+
}
|
71
71
|
|
72
|
-
|
73
|
-
return ev_is_active(&handle->idle_watcher);
|
74
|
-
}
|
72
|
+
uv_run(uv_default_loop());
|
75
73
|
|
74
|
+
ASSERT(close_cb_called == NUM_SOCKETS);
|
76
75
|
|
77
|
-
|
78
|
-
uv_idle_stop(handle);
|
76
|
+
return 0;
|
79
77
|
}
|
@@ -0,0 +1,573 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
* of this software and associated documentation files (the "Software"), to
|
5
|
+
* deal in the Software without restriction, including without limitation the
|
6
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
7
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
8
|
+
* furnished to do so, subject to the following conditions:
|
9
|
+
*
|
10
|
+
* The above copyright notice and this permission notice shall be included in
|
11
|
+
* all copies or substantial portions of the Software.
|
12
|
+
*
|
13
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
18
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
19
|
+
* IN THE SOFTWARE.
|
20
|
+
*/
|
21
|
+
|
22
|
+
#include <errno.h>
|
23
|
+
|
24
|
+
#ifndef _WIN32
|
25
|
+
# include <fcntl.h>
|
26
|
+
# include <sys/socket.h>
|
27
|
+
# include <unistd.h>
|
28
|
+
#endif
|
29
|
+
|
30
|
+
#include "uv.h"
|
31
|
+
#include "task.h"
|
32
|
+
|
33
|
+
|
34
|
+
#define NUM_CLIENTS 5
|
35
|
+
#define TRANSFER_BYTES (1 << 16)
|
36
|
+
|
37
|
+
#undef MIN
|
38
|
+
#define MIN(a, b) (((a) < (b)) ? (a) : (b));
|
39
|
+
|
40
|
+
|
41
|
+
typedef enum {
|
42
|
+
UNIDIRECTIONAL,
|
43
|
+
DUPLEX
|
44
|
+
} test_mode_t;
|
45
|
+
|
46
|
+
typedef struct connection_context_s {
|
47
|
+
uv_poll_t poll_handle;
|
48
|
+
uv_timer_t timer_handle;
|
49
|
+
uv_os_sock_t sock;
|
50
|
+
size_t read, sent;
|
51
|
+
int is_server_connection;
|
52
|
+
int open_handles;
|
53
|
+
int got_fin, sent_fin;
|
54
|
+
unsigned int events, delayed_events;
|
55
|
+
} connection_context_t;
|
56
|
+
|
57
|
+
typedef struct server_context_s {
|
58
|
+
uv_poll_t poll_handle;
|
59
|
+
uv_os_sock_t sock;
|
60
|
+
int connections;
|
61
|
+
} server_context_t;
|
62
|
+
|
63
|
+
|
64
|
+
static void delay_timer_cb(uv_timer_t* timer, int status);
|
65
|
+
|
66
|
+
|
67
|
+
static test_mode_t test_mode = DUPLEX;
|
68
|
+
|
69
|
+
static int closed_connections = 0;
|
70
|
+
|
71
|
+
static int valid_writable_wakeups = 0;
|
72
|
+
static int spurious_writable_wakeups = 0;
|
73
|
+
|
74
|
+
|
75
|
+
static int got_eagain() {
|
76
|
+
#ifdef _WIN32
|
77
|
+
return WSAGetLastError() == WSAEWOULDBLOCK;
|
78
|
+
#else
|
79
|
+
return errno == EAGAIN
|
80
|
+
|| errno == EINPROGRESS
|
81
|
+
#ifdef EWOULDBLOCK
|
82
|
+
|| errno == EWOULDBLOCK;
|
83
|
+
#endif
|
84
|
+
;
|
85
|
+
#endif
|
86
|
+
}
|
87
|
+
|
88
|
+
|
89
|
+
static void set_nonblocking(uv_os_sock_t sock) {
|
90
|
+
int r;
|
91
|
+
#ifdef _WIN32
|
92
|
+
unsigned long on = 1;
|
93
|
+
r = ioctlsocket(sock, FIONBIO, &on);
|
94
|
+
ASSERT(r == 0);
|
95
|
+
#else
|
96
|
+
int flags = fcntl(sock, F_GETFL, 0);
|
97
|
+
ASSERT(flags >= 0);
|
98
|
+
r = fcntl(sock, F_SETFL, flags | O_NONBLOCK);
|
99
|
+
ASSERT(r >= 0);
|
100
|
+
#endif
|
101
|
+
}
|
102
|
+
|
103
|
+
|
104
|
+
static uv_os_sock_t create_nonblocking_bound_socket(
|
105
|
+
struct sockaddr_in bind_addr) {
|
106
|
+
uv_os_sock_t sock;
|
107
|
+
int r;
|
108
|
+
|
109
|
+
sock = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
|
110
|
+
#ifdef _WIN32
|
111
|
+
ASSERT(sock != INVALID_SOCKET);
|
112
|
+
#else
|
113
|
+
ASSERT(sock >= 0);
|
114
|
+
#endif
|
115
|
+
|
116
|
+
set_nonblocking(sock);
|
117
|
+
|
118
|
+
#ifndef _WIN32
|
119
|
+
{
|
120
|
+
/* Allow reuse of the port. */
|
121
|
+
int yes = 1;
|
122
|
+
r = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof yes);
|
123
|
+
ASSERT(r == 0);
|
124
|
+
}
|
125
|
+
#endif
|
126
|
+
|
127
|
+
r = bind(sock, (const struct sockaddr*) &bind_addr, sizeof bind_addr);
|
128
|
+
ASSERT(r == 0);
|
129
|
+
|
130
|
+
return sock;
|
131
|
+
}
|
132
|
+
|
133
|
+
|
134
|
+
static void close_socket(uv_os_sock_t sock) {
|
135
|
+
int r;
|
136
|
+
#ifdef _WIN32
|
137
|
+
r = closesocket(sock);
|
138
|
+
#else
|
139
|
+
r = close(sock);
|
140
|
+
#endif
|
141
|
+
ASSERT(r == 0);
|
142
|
+
}
|
143
|
+
|
144
|
+
|
145
|
+
static connection_context_t* create_connection_context(
|
146
|
+
uv_os_sock_t sock, int is_server_connection) {
|
147
|
+
int r;
|
148
|
+
connection_context_t* context;
|
149
|
+
|
150
|
+
context = (connection_context_t*) malloc(sizeof *context);
|
151
|
+
ASSERT(context != NULL);
|
152
|
+
|
153
|
+
context->sock = sock;
|
154
|
+
context->is_server_connection = is_server_connection;
|
155
|
+
context->read = 0;
|
156
|
+
context->sent = 0;
|
157
|
+
context->open_handles = 0;
|
158
|
+
context->events = 0;
|
159
|
+
context->delayed_events = 0;
|
160
|
+
context->got_fin = 0;
|
161
|
+
context->sent_fin = 0;
|
162
|
+
|
163
|
+
r = uv_poll_init_socket(uv_default_loop(), &context->poll_handle, sock);
|
164
|
+
context->open_handles++;
|
165
|
+
context->poll_handle.data = context;
|
166
|
+
ASSERT(r == 0);
|
167
|
+
|
168
|
+
r = uv_timer_init(uv_default_loop(), &context->timer_handle);
|
169
|
+
context->open_handles++;
|
170
|
+
context->timer_handle.data = context;
|
171
|
+
ASSERT(r == 0);
|
172
|
+
|
173
|
+
return context;
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
static void connection_close_cb(uv_handle_t* handle) {
|
178
|
+
connection_context_t* context = (connection_context_t*) handle->data;
|
179
|
+
|
180
|
+
if (--context->open_handles == 0) {
|
181
|
+
if (test_mode == DUPLEX || context->is_server_connection) {
|
182
|
+
ASSERT(context->read == TRANSFER_BYTES);
|
183
|
+
} else {
|
184
|
+
ASSERT(context->read == 0);
|
185
|
+
}
|
186
|
+
|
187
|
+
if (test_mode == DUPLEX || !context->is_server_connection) {
|
188
|
+
ASSERT(context->sent == TRANSFER_BYTES);
|
189
|
+
} else {
|
190
|
+
ASSERT(context->sent == 0);
|
191
|
+
}
|
192
|
+
|
193
|
+
closed_connections++;
|
194
|
+
|
195
|
+
free(context);
|
196
|
+
}
|
197
|
+
}
|
198
|
+
|
199
|
+
|
200
|
+
static void destroy_connection_context(connection_context_t* context) {
|
201
|
+
uv_close((uv_handle_t*) &context->poll_handle, connection_close_cb);
|
202
|
+
uv_close((uv_handle_t*) &context->timer_handle, connection_close_cb);
|
203
|
+
}
|
204
|
+
|
205
|
+
|
206
|
+
static void connection_poll_cb(uv_poll_t* handle, int status, int events) {
|
207
|
+
connection_context_t* context = (connection_context_t*) handle->data;
|
208
|
+
int new_events;
|
209
|
+
int r;
|
210
|
+
|
211
|
+
ASSERT(status == 0);
|
212
|
+
ASSERT(events & context->events);
|
213
|
+
ASSERT(!(events & ~context->events));
|
214
|
+
|
215
|
+
new_events = context->events;
|
216
|
+
|
217
|
+
if (events & UV_READABLE) {
|
218
|
+
int action = rand() % 7;
|
219
|
+
|
220
|
+
switch (action) {
|
221
|
+
case 0:
|
222
|
+
case 1: {
|
223
|
+
/* Read a couple of bytes. */
|
224
|
+
static char buffer[74];
|
225
|
+
r = recv(context->sock, buffer, sizeof buffer, 0);
|
226
|
+
ASSERT(r >= 0);
|
227
|
+
|
228
|
+
if (r > 0) {
|
229
|
+
context->read += r;
|
230
|
+
} else {
|
231
|
+
/* Got FIN. */
|
232
|
+
context->got_fin = 1;
|
233
|
+
new_events &= ~UV_READABLE;
|
234
|
+
}
|
235
|
+
|
236
|
+
break;
|
237
|
+
}
|
238
|
+
|
239
|
+
case 2:
|
240
|
+
case 3: {
|
241
|
+
/* Read until EAGAIN. */
|
242
|
+
static char buffer[931];
|
243
|
+
r = recv(context->sock, buffer, sizeof buffer, 0);
|
244
|
+
ASSERT(r >= 0);
|
245
|
+
|
246
|
+
while (r > 0) {
|
247
|
+
context->read += r;
|
248
|
+
r = recv(context->sock, buffer, sizeof buffer, 0);
|
249
|
+
}
|
250
|
+
|
251
|
+
if (r == 0) {
|
252
|
+
/* Got FIN. */
|
253
|
+
context->got_fin = 1;
|
254
|
+
new_events &= ~UV_READABLE;
|
255
|
+
} else {
|
256
|
+
ASSERT(got_eagain());
|
257
|
+
}
|
258
|
+
|
259
|
+
break;
|
260
|
+
}
|
261
|
+
|
262
|
+
case 4:
|
263
|
+
/* Ignore. */
|
264
|
+
break;
|
265
|
+
|
266
|
+
case 5:
|
267
|
+
/* Stop reading for a while. Restart in timer callback. */
|
268
|
+
new_events &= ~UV_READABLE;
|
269
|
+
if (!uv_is_active((uv_handle_t*) &context->timer_handle)) {
|
270
|
+
context->delayed_events = UV_READABLE;
|
271
|
+
uv_timer_start(&context->timer_handle, delay_timer_cb, 10, 0);
|
272
|
+
} else {
|
273
|
+
context->delayed_events |= UV_READABLE;
|
274
|
+
}
|
275
|
+
break;
|
276
|
+
|
277
|
+
case 6:
|
278
|
+
/* Fudge with the event mask. */
|
279
|
+
uv_poll_start(&context->poll_handle, UV_WRITABLE, connection_poll_cb);
|
280
|
+
uv_poll_start(&context->poll_handle, UV_READABLE, connection_poll_cb);
|
281
|
+
context->events = UV_READABLE;
|
282
|
+
break;
|
283
|
+
|
284
|
+
default:
|
285
|
+
ASSERT(0);
|
286
|
+
}
|
287
|
+
}
|
288
|
+
|
289
|
+
if (events & UV_WRITABLE) {
|
290
|
+
if (context->sent < TRANSFER_BYTES &&
|
291
|
+
!(test_mode == UNIDIRECTIONAL && context->is_server_connection)) {
|
292
|
+
/* We have to send more bytes. */
|
293
|
+
int action = rand() % 7;
|
294
|
+
|
295
|
+
switch (action) {
|
296
|
+
case 0:
|
297
|
+
case 1: {
|
298
|
+
/* Send a couple of bytes. */
|
299
|
+
static char buffer[103];
|
300
|
+
|
301
|
+
int send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer);
|
302
|
+
ASSERT(send_bytes > 0);
|
303
|
+
|
304
|
+
r = send(context->sock, buffer, send_bytes, 0);
|
305
|
+
|
306
|
+
if (r < 0) {
|
307
|
+
ASSERT(got_eagain());
|
308
|
+
spurious_writable_wakeups++;
|
309
|
+
break;
|
310
|
+
}
|
311
|
+
|
312
|
+
ASSERT(r > 0);
|
313
|
+
context->sent += r;
|
314
|
+
valid_writable_wakeups++;
|
315
|
+
break;
|
316
|
+
}
|
317
|
+
|
318
|
+
case 2:
|
319
|
+
case 3: {
|
320
|
+
/* Send until EAGAIN. */
|
321
|
+
static char buffer[1234];
|
322
|
+
|
323
|
+
int send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer);
|
324
|
+
ASSERT(send_bytes > 0);
|
325
|
+
|
326
|
+
r = send(context->sock, buffer, send_bytes, 0);
|
327
|
+
|
328
|
+
if (r < 0) {
|
329
|
+
ASSERT(got_eagain());
|
330
|
+
spurious_writable_wakeups++;
|
331
|
+
break;
|
332
|
+
}
|
333
|
+
|
334
|
+
ASSERT(r > 0);
|
335
|
+
valid_writable_wakeups++;
|
336
|
+
context->sent += r;
|
337
|
+
|
338
|
+
while (context->sent < TRANSFER_BYTES) {
|
339
|
+
send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer);
|
340
|
+
ASSERT(send_bytes > 0);
|
341
|
+
|
342
|
+
r = send(context->sock, buffer, send_bytes, 0);
|
343
|
+
|
344
|
+
if (r <= 0) break;
|
345
|
+
context->sent += r;
|
346
|
+
}
|
347
|
+
ASSERT(r > 0 || got_eagain());
|
348
|
+
break;
|
349
|
+
}
|
350
|
+
|
351
|
+
case 4:
|
352
|
+
/* Ignore. */
|
353
|
+
break;
|
354
|
+
|
355
|
+
case 5:
|
356
|
+
/* Stop sending for a while. Restart in timer callback. */
|
357
|
+
new_events &= ~UV_WRITABLE;
|
358
|
+
if (!uv_is_active((uv_handle_t*) &context->timer_handle)) {
|
359
|
+
context->delayed_events = UV_WRITABLE;
|
360
|
+
uv_timer_start(&context->timer_handle, delay_timer_cb, 100, 0);
|
361
|
+
} else {
|
362
|
+
context->delayed_events |= UV_WRITABLE;
|
363
|
+
}
|
364
|
+
break;
|
365
|
+
|
366
|
+
case 6:
|
367
|
+
/* Fudge with the event mask. */
|
368
|
+
uv_poll_start(&context->poll_handle,
|
369
|
+
UV_READABLE,
|
370
|
+
connection_poll_cb);
|
371
|
+
uv_poll_start(&context->poll_handle,
|
372
|
+
UV_WRITABLE,
|
373
|
+
connection_poll_cb);
|
374
|
+
context->events = UV_WRITABLE;
|
375
|
+
break;
|
376
|
+
|
377
|
+
default:
|
378
|
+
ASSERT(0);
|
379
|
+
}
|
380
|
+
|
381
|
+
} else {
|
382
|
+
/* Nothing more to write. Send FIN. */
|
383
|
+
int r;
|
384
|
+
#ifdef _WIN32
|
385
|
+
r = shutdown(context->sock, SD_SEND);
|
386
|
+
#else
|
387
|
+
r = shutdown(context->sock, SHUT_WR);
|
388
|
+
#endif
|
389
|
+
ASSERT(r == 0);
|
390
|
+
context->sent_fin = 1;
|
391
|
+
new_events &= ~UV_WRITABLE;
|
392
|
+
}
|
393
|
+
}
|
394
|
+
|
395
|
+
if (context->got_fin && context->sent_fin) {
|
396
|
+
/* Sent and received FIN. Close and destroy context. */
|
397
|
+
close_socket(context->sock);
|
398
|
+
destroy_connection_context(context);
|
399
|
+
context->events = 0;
|
400
|
+
|
401
|
+
} else if (new_events != context->events) {
|
402
|
+
/* Poll mask changed. Call uv_poll_start again. */
|
403
|
+
context->events = new_events;
|
404
|
+
uv_poll_start(handle, new_events, connection_poll_cb);
|
405
|
+
}
|
406
|
+
|
407
|
+
/* Assert that uv_is_active works correctly for poll handles. */
|
408
|
+
if (context->events != 0) {
|
409
|
+
ASSERT(uv_is_active((uv_handle_t*) handle));
|
410
|
+
} else {
|
411
|
+
ASSERT(!uv_is_active((uv_handle_t*) handle));
|
412
|
+
}
|
413
|
+
}
|
414
|
+
|
415
|
+
|
416
|
+
static void delay_timer_cb(uv_timer_t* timer, int status) {
|
417
|
+
connection_context_t* context = (connection_context_t*) timer->data;
|
418
|
+
int r;
|
419
|
+
|
420
|
+
/* Timer should auto stop. */
|
421
|
+
ASSERT(!uv_is_active((uv_handle_t*) timer));
|
422
|
+
|
423
|
+
/* Add the requested events to the poll mask. */
|
424
|
+
ASSERT(context->delayed_events != 0);
|
425
|
+
context->events |= context->delayed_events;
|
426
|
+
context->delayed_events = 0;
|
427
|
+
|
428
|
+
r = uv_poll_start(&context->poll_handle,
|
429
|
+
context->events,
|
430
|
+
connection_poll_cb);
|
431
|
+
ASSERT(r == 0);
|
432
|
+
}
|
433
|
+
|
434
|
+
|
435
|
+
static server_context_t* create_server_context(
|
436
|
+
uv_os_sock_t sock) {
|
437
|
+
int r;
|
438
|
+
server_context_t* context;
|
439
|
+
|
440
|
+
context = (server_context_t*) malloc(sizeof *context);
|
441
|
+
ASSERT(context != NULL);
|
442
|
+
|
443
|
+
context->sock = sock;
|
444
|
+
context->connections = 0;
|
445
|
+
|
446
|
+
r = uv_poll_init_socket(uv_default_loop(), &context->poll_handle, sock);
|
447
|
+
context->poll_handle.data = context;
|
448
|
+
ASSERT(r == 0);
|
449
|
+
|
450
|
+
return context;
|
451
|
+
}
|
452
|
+
|
453
|
+
|
454
|
+
static void server_close_cb(uv_handle_t* handle) {
|
455
|
+
server_context_t* context = (server_context_t*) handle->data;
|
456
|
+
free(context);
|
457
|
+
}
|
458
|
+
|
459
|
+
|
460
|
+
static void destroy_server_context(server_context_t* context) {
|
461
|
+
uv_close((uv_handle_t*) &context->poll_handle, server_close_cb);
|
462
|
+
}
|
463
|
+
|
464
|
+
|
465
|
+
static void server_poll_cb(uv_poll_t* handle, int status, int events) {
|
466
|
+
server_context_t* server_context = (server_context_t*)
|
467
|
+
handle->data;
|
468
|
+
connection_context_t* connection_context;
|
469
|
+
struct sockaddr_in addr;
|
470
|
+
socklen_t addr_len;
|
471
|
+
uv_os_sock_t sock;
|
472
|
+
int r;
|
473
|
+
|
474
|
+
addr_len = sizeof addr;
|
475
|
+
sock = accept(server_context->sock, (struct sockaddr*) &addr, &addr_len);
|
476
|
+
#ifdef _WIN32
|
477
|
+
ASSERT(sock != INVALID_SOCKET);
|
478
|
+
#else
|
479
|
+
ASSERT(sock >= 0);
|
480
|
+
#endif
|
481
|
+
|
482
|
+
set_nonblocking(sock);
|
483
|
+
|
484
|
+
connection_context = create_connection_context(sock, 1);
|
485
|
+
connection_context->events = UV_READABLE | UV_WRITABLE;
|
486
|
+
r = uv_poll_start(&connection_context->poll_handle,
|
487
|
+
UV_READABLE | UV_WRITABLE,
|
488
|
+
connection_poll_cb);
|
489
|
+
ASSERT(r == 0);
|
490
|
+
|
491
|
+
if (++server_context->connections == NUM_CLIENTS) {
|
492
|
+
close_socket(server_context->sock);
|
493
|
+
destroy_server_context(server_context);
|
494
|
+
}
|
495
|
+
}
|
496
|
+
|
497
|
+
|
498
|
+
static void start_server() {
|
499
|
+
uv_os_sock_t sock;
|
500
|
+
server_context_t* context;
|
501
|
+
int r;
|
502
|
+
|
503
|
+
sock = create_nonblocking_bound_socket(uv_ip4_addr("127.0.0.1", TEST_PORT));
|
504
|
+
context = create_server_context(sock);
|
505
|
+
|
506
|
+
r = listen(sock, 100);
|
507
|
+
ASSERT(r == 0);
|
508
|
+
|
509
|
+
r = uv_poll_start(&context->poll_handle, UV_READABLE, server_poll_cb);
|
510
|
+
ASSERT(r == 0);
|
511
|
+
}
|
512
|
+
|
513
|
+
|
514
|
+
static void start_client() {
|
515
|
+
uv_os_sock_t sock;
|
516
|
+
connection_context_t* context;
|
517
|
+
struct sockaddr_in server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
|
518
|
+
int r;
|
519
|
+
|
520
|
+
sock = create_nonblocking_bound_socket(uv_ip4_addr("0.0.0.0", 0));
|
521
|
+
context = create_connection_context(sock, 0);
|
522
|
+
|
523
|
+
context->events = UV_READABLE | UV_WRITABLE;
|
524
|
+
r = uv_poll_start(&context->poll_handle,
|
525
|
+
UV_READABLE | UV_WRITABLE,
|
526
|
+
connection_poll_cb);
|
527
|
+
ASSERT(r == 0);
|
528
|
+
|
529
|
+
r = connect(sock, (struct sockaddr*) &server_addr, sizeof server_addr);
|
530
|
+
ASSERT(r == 0 || got_eagain());
|
531
|
+
}
|
532
|
+
|
533
|
+
|
534
|
+
static void start_poll_test() {
|
535
|
+
int i, r;
|
536
|
+
|
537
|
+
#ifdef _WIN32
|
538
|
+
{
|
539
|
+
struct WSAData wsa_data;
|
540
|
+
int r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
|
541
|
+
ASSERT(r == 0);
|
542
|
+
}
|
543
|
+
#endif
|
544
|
+
|
545
|
+
start_server();
|
546
|
+
|
547
|
+
for (i = 0; i < NUM_CLIENTS; i++)
|
548
|
+
start_client();
|
549
|
+
|
550
|
+
r = uv_run(uv_default_loop());
|
551
|
+
ASSERT(r == 0);
|
552
|
+
|
553
|
+
/* Assert that at most five percent of the writable wakeups was spurious. */
|
554
|
+
ASSERT(spurious_writable_wakeups == 0 ||
|
555
|
+
(valid_writable_wakeups + spurious_writable_wakeups) /
|
556
|
+
spurious_writable_wakeups > 20);
|
557
|
+
|
558
|
+
ASSERT(closed_connections == NUM_CLIENTS * 2);
|
559
|
+
}
|
560
|
+
|
561
|
+
|
562
|
+
TEST_IMPL(poll_duplex) {
|
563
|
+
test_mode = DUPLEX;
|
564
|
+
start_poll_test();
|
565
|
+
return 0;
|
566
|
+
}
|
567
|
+
|
568
|
+
|
569
|
+
TEST_IMPL(poll_unidirectional) {
|
570
|
+
test_mode = UNIDIRECTIONAL;
|
571
|
+
start_poll_test();
|
572
|
+
return 0;
|
573
|
+
}
|