asyncengine 0.0.1.testing
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.markdown +0 -0
- data/asyncengine.gemspec +26 -0
- data/ext/asyncengine_ext/asyncengine_ruby.c +82 -0
- data/ext/asyncengine_ext/extconf.rb +47 -0
- data/ext/asyncengine_ext/libuv/AUTHORS +45 -0
- data/ext/asyncengine_ext/libuv/LICENSE +42 -0
- data/ext/asyncengine_ext/libuv/Makefile +119 -0
- data/ext/asyncengine_ext/libuv/README.md +88 -0
- data/ext/asyncengine_ext/libuv/build/gcc_version.py +20 -0
- data/ext/asyncengine_ext/libuv/common.gypi +176 -0
- data/ext/asyncengine_ext/libuv/config-mingw.mk +61 -0
- data/ext/asyncengine_ext/libuv/config-unix.mk +173 -0
- data/ext/asyncengine_ext/libuv/gyp_uv +60 -0
- data/ext/asyncengine_ext/libuv/include/ares.h +591 -0
- data/ext/asyncengine_ext/libuv/include/ares_version.h +24 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/eio.h +403 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/ev.h +838 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/ngx-queue.h +106 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/tree.h +768 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/uv-unix.h +256 -0
- data/ext/asyncengine_ext/libuv/include/uv-private/uv-win.h +458 -0
- data/ext/asyncengine_ext/libuv/include/uv.h +1556 -0
- data/ext/asyncengine_ext/libuv/src/ares/AUTHORS +37 -0
- data/ext/asyncengine_ext/libuv/src/ares/CHANGES +1218 -0
- data/ext/asyncengine_ext/libuv/src/ares/CMakeLists.txt +22 -0
- data/ext/asyncengine_ext/libuv/src/ares/NEWS +21 -0
- data/ext/asyncengine_ext/libuv/src/ares/README +60 -0
- data/ext/asyncengine_ext/libuv/src/ares/README.cares +13 -0
- data/ext/asyncengine_ext/libuv/src/ares/README.msvc +142 -0
- data/ext/asyncengine_ext/libuv/src/ares/README.node +21 -0
- data/ext/asyncengine_ext/libuv/src/ares/RELEASE-NOTES +26 -0
- data/ext/asyncengine_ext/libuv/src/ares/TODO +23 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares__close_sockets.c +66 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares__get_hostent.c +263 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares__read_line.c +71 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares__timeval.c +111 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_cancel.c +63 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_data.c +190 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_data.h +65 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_destroy.c +105 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_dns.h +90 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_expand_name.c +200 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_expand_string.c +75 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_fds.c +63 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_free_hostent.c +42 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_free_string.c +25 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.c +30 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.h +26 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyaddr.c +301 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyname.c +523 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getnameinfo.c +427 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.c +122 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.h +53 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_getsock.c +72 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_init.c +1809 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_iphlpapi.h +221 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_ipv6.h +78 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.c +142 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.h +42 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_llist.c +86 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_llist.h +42 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_mkquery.c +195 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.c +181 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.h +55 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_options.c +248 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_a_reply.c +263 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_aaaa_reply.c +259 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_mx_reply.c +170 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ns_reply.c +182 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ptr_reply.c +217 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_srv_reply.c +179 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_parse_txt_reply.c +201 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_platform.c +11035 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_platform.h +43 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_private.h +355 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_process.c +1295 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_query.c +183 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_rules.h +144 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_search.c +321 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_send.c +134 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_setup.h +199 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.c +66 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.h +30 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.c +42 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.h +26 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_strerror.c +56 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_timeout.c +80 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_version.c +11 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_writev.c +79 -0
- data/ext/asyncengine_ext/libuv/src/ares/ares_writev.h +36 -0
- data/ext/asyncengine_ext/libuv/src/ares/bitncmp.c +59 -0
- data/ext/asyncengine_ext/libuv/src/ares/bitncmp.h +26 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_cygwin/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_darwin/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_freebsd/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_linux/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_netbsd/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_openbsd/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_sunos/ares_config.h +512 -0
- data/ext/asyncengine_ext/libuv/src/ares/config_win32/ares_config.h +369 -0
- data/ext/asyncengine_ext/libuv/src/ares/get_ver.awk +35 -0
- data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.c +451 -0
- data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.h +31 -0
- data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.c +208 -0
- data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.h +26 -0
- data/ext/asyncengine_ext/libuv/src/ares/nameser.h +203 -0
- data/ext/asyncengine_ext/libuv/src/ares/setup_once.h +504 -0
- data/ext/asyncengine_ext/libuv/src/ares/windows_port.c +22 -0
- data/ext/asyncengine_ext/libuv/src/unix/async.c +58 -0
- data/ext/asyncengine_ext/libuv/src/unix/cares.c +194 -0
- data/ext/asyncengine_ext/libuv/src/unix/check.c +80 -0
- data/ext/asyncengine_ext/libuv/src/unix/core.c +588 -0
- data/ext/asyncengine_ext/libuv/src/unix/cygwin.c +84 -0
- data/ext/asyncengine_ext/libuv/src/unix/darwin.c +341 -0
- data/ext/asyncengine_ext/libuv/src/unix/dl.c +91 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/Changes +63 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/LICENSE +36 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/Makefile.am +15 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/aclocal.m4 +8957 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/autogen.sh +3 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config.h.in +86 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_cygwin.h +80 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_darwin.h +141 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_freebsd.h +81 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_linux.h +94 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_netbsd.h +81 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_openbsd.h +137 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/config_sunos.h +84 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/configure.ac +22 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/demo.c +194 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/ecb.h +370 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/eio.3 +3428 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/eio.c +2593 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/eio.pod +969 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/libeio.m4 +195 -0
- data/ext/asyncengine_ext/libuv/src/unix/eio/xthread.h +164 -0
- data/ext/asyncengine_ext/libuv/src/unix/error.c +98 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/Changes +388 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/LICENSE +36 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.am +18 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.in +771 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/README +58 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/aclocal.m4 +8957 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/autogen.sh +6 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config.guess +1526 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config.h.in +125 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config.sub +1658 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_cygwin.h +123 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_darwin.h +122 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_freebsd.h +120 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_linux.h +141 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_netbsd.h +120 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_openbsd.h +126 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/config_sunos.h +122 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/configure +13037 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/configure.ac +18 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/depcomp +630 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev++.h +816 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev.3 +5311 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev.c +3921 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev.pod +5243 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_epoll.c +266 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_kqueue.c +235 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_poll.c +148 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_port.c +179 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_select.c +310 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_vars.h +203 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_win32.c +153 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ev_wrap.h +196 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/event.c +402 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/event.h +170 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/install-sh +294 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/libev.m4 +39 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/ltmain.sh +8413 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/missing +336 -0
- data/ext/asyncengine_ext/libuv/src/unix/ev/mkinstalldirs +111 -0
- data/ext/asyncengine_ext/libuv/src/unix/freebsd.c +312 -0
- data/ext/asyncengine_ext/libuv/src/unix/fs.c +707 -0
- data/ext/asyncengine_ext/libuv/src/unix/idle.c +79 -0
- data/ext/asyncengine_ext/libuv/src/unix/internal.h +161 -0
- data/ext/asyncengine_ext/libuv/src/unix/kqueue.c +127 -0
- data/ext/asyncengine_ext/libuv/src/unix/linux/core.c +474 -0
- data/ext/asyncengine_ext/libuv/src/unix/linux/inotify.c +211 -0
- data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.c +230 -0
- data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.h +87 -0
- data/ext/asyncengine_ext/libuv/src/unix/loop.c +58 -0
- data/ext/asyncengine_ext/libuv/src/unix/netbsd.c +108 -0
- data/ext/asyncengine_ext/libuv/src/unix/openbsd.c +295 -0
- data/ext/asyncengine_ext/libuv/src/unix/pipe.c +266 -0
- data/ext/asyncengine_ext/libuv/src/unix/prepare.c +79 -0
- data/ext/asyncengine_ext/libuv/src/unix/process.c +369 -0
- data/ext/asyncengine_ext/libuv/src/unix/stream.c +1033 -0
- data/ext/asyncengine_ext/libuv/src/unix/sunos.c +466 -0
- data/ext/asyncengine_ext/libuv/src/unix/tcp.c +327 -0
- data/ext/asyncengine_ext/libuv/src/unix/thread.c +154 -0
- data/ext/asyncengine_ext/libuv/src/unix/timer.c +127 -0
- data/ext/asyncengine_ext/libuv/src/unix/tty.c +146 -0
- data/ext/asyncengine_ext/libuv/src/unix/udp.c +670 -0
- data/ext/asyncengine_ext/libuv/src/unix/uv-eio.c +124 -0
- data/ext/asyncengine_ext/libuv/src/unix/uv-eio.h +13 -0
- data/ext/asyncengine_ext/libuv/src/uv-common.c +354 -0
- data/ext/asyncengine_ext/libuv/src/uv-common.h +87 -0
- data/ext/asyncengine_ext/libuv/src/win/async.c +127 -0
- data/ext/asyncengine_ext/libuv/src/win/cares.c +290 -0
- data/ext/asyncengine_ext/libuv/src/win/core.c +270 -0
- data/ext/asyncengine_ext/libuv/src/win/dl.c +82 -0
- data/ext/asyncengine_ext/libuv/src/win/error.c +132 -0
- data/ext/asyncengine_ext/libuv/src/win/fs-event.c +514 -0
- data/ext/asyncengine_ext/libuv/src/win/fs.c +1576 -0
- data/ext/asyncengine_ext/libuv/src/win/getaddrinfo.c +372 -0
- data/ext/asyncengine_ext/libuv/src/win/handle.c +225 -0
- data/ext/asyncengine_ext/libuv/src/win/internal.h +352 -0
- data/ext/asyncengine_ext/libuv/src/win/loop-watcher.c +131 -0
- data/ext/asyncengine_ext/libuv/src/win/pipe.c +1661 -0
- data/ext/asyncengine_ext/libuv/src/win/process.c +1140 -0
- data/ext/asyncengine_ext/libuv/src/win/req.c +174 -0
- data/ext/asyncengine_ext/libuv/src/win/stream.c +201 -0
- data/ext/asyncengine_ext/libuv/src/win/tcp.c +1282 -0
- data/ext/asyncengine_ext/libuv/src/win/thread.c +332 -0
- data/ext/asyncengine_ext/libuv/src/win/threadpool.c +73 -0
- data/ext/asyncengine_ext/libuv/src/win/timer.c +276 -0
- data/ext/asyncengine_ext/libuv/src/win/tty.c +1795 -0
- data/ext/asyncengine_ext/libuv/src/win/udp.c +709 -0
- data/ext/asyncengine_ext/libuv/src/win/util.c +719 -0
- data/ext/asyncengine_ext/libuv/src/win/winapi.c +117 -0
- data/ext/asyncengine_ext/libuv/src/win/winapi.h +4419 -0
- data/ext/asyncengine_ext/libuv/src/win/winsock.c +470 -0
- data/ext/asyncengine_ext/libuv/src/win/winsock.h +138 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-ares.c +118 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-getaddrinfo.c +94 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-list.h +105 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-ping-pongs.c +213 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-pound.c +324 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-pump.c +462 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-sizes.c +40 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-spawn.c +156 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-tcp-write-batch.c +140 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-thread.c +64 -0
- data/ext/asyncengine_ext/libuv/test/benchmark-udp-packet-storm.c +247 -0
- data/ext/asyncengine_ext/libuv/test/blackhole-server.c +118 -0
- data/ext/asyncengine_ext/libuv/test/dns-server.c +321 -0
- data/ext/asyncengine_ext/libuv/test/echo-server.c +370 -0
- data/ext/asyncengine_ext/libuv/test/fixtures/empty_file +0 -0
- data/ext/asyncengine_ext/libuv/test/fixtures/load_error.node +1 -0
- data/ext/asyncengine_ext/libuv/test/run-benchmarks.c +64 -0
- data/ext/asyncengine_ext/libuv/test/run-tests.c +108 -0
- data/ext/asyncengine_ext/libuv/test/runner-unix.c +315 -0
- data/ext/asyncengine_ext/libuv/test/runner-unix.h +36 -0
- data/ext/asyncengine_ext/libuv/test/runner-win.c +343 -0
- data/ext/asyncengine_ext/libuv/test/runner-win.h +42 -0
- data/ext/asyncengine_ext/libuv/test/runner.c +317 -0
- data/ext/asyncengine_ext/libuv/test/runner.h +159 -0
- data/ext/asyncengine_ext/libuv/test/task.h +117 -0
- data/ext/asyncengine_ext/libuv/test/test-async.c +216 -0
- data/ext/asyncengine_ext/libuv/test/test-callback-stack.c +203 -0
- data/ext/asyncengine_ext/libuv/test/test-connection-fail.c +148 -0
- data/ext/asyncengine_ext/libuv/test/test-counters-init.c +216 -0
- data/ext/asyncengine_ext/libuv/test/test-cwd-and-chdir.c +64 -0
- data/ext/asyncengine_ext/libuv/test/test-delayed-accept.c +197 -0
- data/ext/asyncengine_ext/libuv/test/test-dlerror.c +49 -0
- data/ext/asyncengine_ext/libuv/test/test-eio-overflow.c +90 -0
- data/ext/asyncengine_ext/libuv/test/test-error.c +59 -0
- data/ext/asyncengine_ext/libuv/test/test-fail-always.c +29 -0
- data/ext/asyncengine_ext/libuv/test/test-fs-event.c +442 -0
- data/ext/asyncengine_ext/libuv/test/test-fs.c +1731 -0
- data/ext/asyncengine_ext/libuv/test/test-get-currentexe.c +63 -0
- data/ext/asyncengine_ext/libuv/test/test-get-loadavg.c +36 -0
- data/ext/asyncengine_ext/libuv/test/test-get-memory.c +38 -0
- data/ext/asyncengine_ext/libuv/test/test-getaddrinfo.c +122 -0
- data/ext/asyncengine_ext/libuv/test/test-gethostbyname.c +189 -0
- data/ext/asyncengine_ext/libuv/test/test-getsockname.c +342 -0
- data/ext/asyncengine_ext/libuv/test/test-hrtime.c +51 -0
- data/ext/asyncengine_ext/libuv/test/test-idle.c +81 -0
- data/ext/asyncengine_ext/libuv/test/test-ipc-send-recv.c +209 -0
- data/ext/asyncengine_ext/libuv/test/test-ipc.c +614 -0
- data/ext/asyncengine_ext/libuv/test/test-list.h +371 -0
- data/ext/asyncengine_ext/libuv/test/test-loop-handles.c +359 -0
- data/ext/asyncengine_ext/libuv/test/test-multiple-listen.c +102 -0
- data/ext/asyncengine_ext/libuv/test/test-mutexes.c +63 -0
- data/ext/asyncengine_ext/libuv/test/test-pass-always.c +28 -0
- data/ext/asyncengine_ext/libuv/test/test-ping-pong.c +253 -0
- data/ext/asyncengine_ext/libuv/test/test-pipe-bind-error.c +140 -0
- data/ext/asyncengine_ext/libuv/test/test-pipe-connect-error.c +96 -0
- data/ext/asyncengine_ext/libuv/test/test-platform-output.c +87 -0
- data/ext/asyncengine_ext/libuv/test/test-process-title.c +42 -0
- data/ext/asyncengine_ext/libuv/test/test-ref.c +322 -0
- data/ext/asyncengine_ext/libuv/test/test-run-once.c +44 -0
- data/ext/asyncengine_ext/libuv/test/test-shutdown-close.c +103 -0
- data/ext/asyncengine_ext/libuv/test/test-shutdown-eof.c +183 -0
- data/ext/asyncengine_ext/libuv/test/test-spawn.c +499 -0
- data/ext/asyncengine_ext/libuv/test/test-stdio-over-pipes.c +256 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-bind-error.c +191 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-bind6-error.c +154 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-close.c +129 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-connect-error.c +70 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-connect6-error.c +68 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-flags.c +51 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-write-error.c +168 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-write-to-half-open-connection.c +135 -0
- data/ext/asyncengine_ext/libuv/test/test-tcp-writealot.c +195 -0
- data/ext/asyncengine_ext/libuv/test/test-thread.c +183 -0
- data/ext/asyncengine_ext/libuv/test/test-threadpool.c +57 -0
- data/ext/asyncengine_ext/libuv/test/test-timer-again.c +141 -0
- data/ext/asyncengine_ext/libuv/test/test-timer.c +130 -0
- data/ext/asyncengine_ext/libuv/test/test-tty.c +110 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-dgram-too-big.c +86 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-ipv6.c +156 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-multicast-join.c +139 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-multicast-ttl.c +86 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-options.c +86 -0
- data/ext/asyncengine_ext/libuv/test/test-udp-send-and-recv.c +208 -0
- data/ext/asyncengine_ext/libuv/test/test-util.c +97 -0
- data/ext/asyncengine_ext/libuv/uv.gyp +435 -0
- data/ext/asyncengine_ext/libuv/vcbuild.bat +105 -0
- data/lib/asyncengine/version.rb +3 -0
- data/lib/asyncengine.rb +41 -0
- metadata +384 -0
@@ -0,0 +1,1033 @@
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
* of this software and associated documentation files (the "Software"), to
|
5
|
+
* deal in the Software without restriction, including without limitation the
|
6
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
7
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
8
|
+
* furnished to do so, subject to the following conditions:
|
9
|
+
*
|
10
|
+
* The above copyright notice and this permission notice shall be included in
|
11
|
+
* all copies or substantial portions of the Software.
|
12
|
+
*
|
13
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
18
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
19
|
+
* IN THE SOFTWARE.
|
20
|
+
*/
|
21
|
+
|
22
|
+
#include "uv.h"
|
23
|
+
#include "internal.h"
|
24
|
+
|
25
|
+
#include <stdio.h>
|
26
|
+
#include <stdlib.h>
|
27
|
+
#include <string.h>
|
28
|
+
#include <assert.h>
|
29
|
+
#include <errno.h>
|
30
|
+
|
31
|
+
#include <sys/types.h>
|
32
|
+
#include <sys/socket.h>
|
33
|
+
#include <sys/uio.h>
|
34
|
+
#include <sys/un.h>
|
35
|
+
#include <unistd.h>
|
36
|
+
|
37
|
+
|
38
|
+
static void uv__stream_connect(uv_stream_t*);
|
39
|
+
static void uv__write(uv_stream_t* stream);
|
40
|
+
static void uv__read(uv_stream_t* stream);
|
41
|
+
|
42
|
+
|
43
|
+
static size_t uv__buf_count(uv_buf_t bufs[], int bufcnt) {
|
44
|
+
size_t total = 0;
|
45
|
+
int i;
|
46
|
+
|
47
|
+
for (i = 0; i < bufcnt; i++) {
|
48
|
+
total += bufs[i].len;
|
49
|
+
}
|
50
|
+
|
51
|
+
return total;
|
52
|
+
}
|
53
|
+
|
54
|
+
|
55
|
+
void uv__stream_init(uv_loop_t* loop,
|
56
|
+
uv_stream_t* stream,
|
57
|
+
uv_handle_type type) {
|
58
|
+
uv__handle_init(loop, (uv_handle_t*)stream, type);
|
59
|
+
loop->counters.stream_init++;
|
60
|
+
|
61
|
+
stream->alloc_cb = NULL;
|
62
|
+
stream->close_cb = NULL;
|
63
|
+
stream->connection_cb = NULL;
|
64
|
+
stream->connect_req = NULL;
|
65
|
+
stream->accepted_fd = -1;
|
66
|
+
stream->fd = -1;
|
67
|
+
stream->delayed_error = 0;
|
68
|
+
stream->blocking = 0;
|
69
|
+
ngx_queue_init(&stream->write_queue);
|
70
|
+
ngx_queue_init(&stream->write_completed_queue);
|
71
|
+
stream->write_queue_size = 0;
|
72
|
+
|
73
|
+
ev_init(&stream->read_watcher, uv__stream_io);
|
74
|
+
stream->read_watcher.data = stream;
|
75
|
+
|
76
|
+
ev_init(&stream->write_watcher, uv__stream_io);
|
77
|
+
stream->write_watcher.data = stream;
|
78
|
+
|
79
|
+
assert(ngx_queue_empty(&stream->write_queue));
|
80
|
+
assert(ngx_queue_empty(&stream->write_completed_queue));
|
81
|
+
assert(stream->write_queue_size == 0);
|
82
|
+
}
|
83
|
+
|
84
|
+
|
85
|
+
int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
|
86
|
+
socklen_t yes;
|
87
|
+
|
88
|
+
assert(fd >= 0);
|
89
|
+
stream->fd = fd;
|
90
|
+
|
91
|
+
stream->flags |= flags;
|
92
|
+
|
93
|
+
if (stream->type == UV_TCP) {
|
94
|
+
/* Reuse the port address if applicable. */
|
95
|
+
yes = 1;
|
96
|
+
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof yes) == -1) {
|
97
|
+
uv__set_sys_error(stream->loop, errno);
|
98
|
+
return -1;
|
99
|
+
}
|
100
|
+
|
101
|
+
if ((stream->flags & UV_TCP_NODELAY) &&
|
102
|
+
uv__tcp_nodelay((uv_tcp_t*)stream, 1)) {
|
103
|
+
return -1;
|
104
|
+
}
|
105
|
+
|
106
|
+
/* TODO Use delay the user passed in. */
|
107
|
+
if ((stream->flags & UV_TCP_KEEPALIVE) &&
|
108
|
+
uv__tcp_keepalive((uv_tcp_t*)stream, 1, 60)) {
|
109
|
+
return -1;
|
110
|
+
}
|
111
|
+
}
|
112
|
+
|
113
|
+
/* Associate the fd with each ev_io watcher. */
|
114
|
+
ev_io_set(&stream->read_watcher, fd, EV_READ);
|
115
|
+
ev_io_set(&stream->write_watcher, fd, EV_WRITE);
|
116
|
+
|
117
|
+
/* These should have been set up by uv_tcp_init or uv_pipe_init. */
|
118
|
+
assert(stream->read_watcher.cb == uv__stream_io);
|
119
|
+
assert(stream->write_watcher.cb == uv__stream_io);
|
120
|
+
|
121
|
+
return 0;
|
122
|
+
}
|
123
|
+
|
124
|
+
|
125
|
+
void uv__stream_destroy(uv_stream_t* stream) {
|
126
|
+
uv_write_t* req;
|
127
|
+
ngx_queue_t* q;
|
128
|
+
|
129
|
+
assert(stream->flags & UV_CLOSED);
|
130
|
+
|
131
|
+
while (!ngx_queue_empty(&stream->write_queue)) {
|
132
|
+
q = ngx_queue_head(&stream->write_queue);
|
133
|
+
ngx_queue_remove(q);
|
134
|
+
|
135
|
+
req = ngx_queue_data(q, uv_write_t, queue);
|
136
|
+
if (req->bufs != req->bufsml)
|
137
|
+
free(req->bufs);
|
138
|
+
|
139
|
+
if (req->cb) {
|
140
|
+
uv__set_artificial_error(req->handle->loop, UV_EINTR);
|
141
|
+
req->cb(req, -1);
|
142
|
+
}
|
143
|
+
}
|
144
|
+
|
145
|
+
while (!ngx_queue_empty(&stream->write_completed_queue)) {
|
146
|
+
q = ngx_queue_head(&stream->write_completed_queue);
|
147
|
+
ngx_queue_remove(q);
|
148
|
+
|
149
|
+
req = ngx_queue_data(q, uv_write_t, queue);
|
150
|
+
if (req->cb) {
|
151
|
+
uv__set_sys_error(stream->loop, req->error);
|
152
|
+
req->cb(req, req->error ? -1 : 0);
|
153
|
+
}
|
154
|
+
}
|
155
|
+
|
156
|
+
if (stream->flags & UV_SHUTTING) {
|
157
|
+
uv_shutdown_t* req = stream->shutdown_req;
|
158
|
+
if (req && req->cb) {
|
159
|
+
uv__set_artificial_error(stream->loop, UV_EINTR);
|
160
|
+
req->cb(req, -1);
|
161
|
+
}
|
162
|
+
}
|
163
|
+
}
|
164
|
+
|
165
|
+
|
166
|
+
void uv__server_io(EV_P_ ev_io* watcher, int revents) {
|
167
|
+
int fd;
|
168
|
+
struct sockaddr_storage addr;
|
169
|
+
uv_stream_t* stream = watcher->data;
|
170
|
+
|
171
|
+
assert(watcher == &stream->read_watcher ||
|
172
|
+
watcher == &stream->write_watcher);
|
173
|
+
assert(revents == EV_READ);
|
174
|
+
|
175
|
+
assert(!(stream->flags & UV_CLOSING));
|
176
|
+
|
177
|
+
if (stream->accepted_fd >= 0) {
|
178
|
+
ev_io_stop(EV_A, &stream->read_watcher);
|
179
|
+
return;
|
180
|
+
}
|
181
|
+
|
182
|
+
/* connection_cb can close the server socket while we're
|
183
|
+
* in the loop so check it on each iteration.
|
184
|
+
*/
|
185
|
+
while (stream->fd != -1) {
|
186
|
+
assert(stream->accepted_fd < 0);
|
187
|
+
fd = uv__accept(stream->fd, (struct sockaddr*)&addr, sizeof addr);
|
188
|
+
|
189
|
+
if (fd < 0) {
|
190
|
+
if (errno == EAGAIN) {
|
191
|
+
/* No problem. */
|
192
|
+
return;
|
193
|
+
} else if (errno == EMFILE) {
|
194
|
+
/* TODO special trick. unlock reserved socket, accept, close. */
|
195
|
+
return;
|
196
|
+
} else if (errno == ECONNABORTED) {
|
197
|
+
/* ignore */
|
198
|
+
continue;
|
199
|
+
} else {
|
200
|
+
uv__set_sys_error(stream->loop, errno);
|
201
|
+
stream->connection_cb((uv_stream_t*)stream, -1);
|
202
|
+
}
|
203
|
+
} else {
|
204
|
+
stream->accepted_fd = fd;
|
205
|
+
stream->connection_cb((uv_stream_t*)stream, 0);
|
206
|
+
if (stream->accepted_fd >= 0) {
|
207
|
+
/* The user hasn't yet accepted called uv_accept() */
|
208
|
+
ev_io_stop(stream->loop->ev, &stream->read_watcher);
|
209
|
+
return;
|
210
|
+
}
|
211
|
+
}
|
212
|
+
}
|
213
|
+
}
|
214
|
+
|
215
|
+
|
216
|
+
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
|
217
|
+
uv_stream_t* streamServer;
|
218
|
+
uv_stream_t* streamClient;
|
219
|
+
int saved_errno;
|
220
|
+
int status;
|
221
|
+
|
222
|
+
/* TODO document this */
|
223
|
+
assert(server->loop == client->loop);
|
224
|
+
|
225
|
+
saved_errno = errno;
|
226
|
+
status = -1;
|
227
|
+
|
228
|
+
streamServer = (uv_stream_t*)server;
|
229
|
+
streamClient = (uv_stream_t*)client;
|
230
|
+
|
231
|
+
if (streamServer->accepted_fd < 0) {
|
232
|
+
uv__set_sys_error(server->loop, EAGAIN);
|
233
|
+
goto out;
|
234
|
+
}
|
235
|
+
|
236
|
+
if (uv__stream_open(streamClient, streamServer->accepted_fd,
|
237
|
+
UV_READABLE | UV_WRITABLE)) {
|
238
|
+
/* TODO handle error */
|
239
|
+
close(streamServer->accepted_fd);
|
240
|
+
streamServer->accepted_fd = -1;
|
241
|
+
goto out;
|
242
|
+
}
|
243
|
+
|
244
|
+
ev_io_start(streamServer->loop->ev, &streamServer->read_watcher);
|
245
|
+
streamServer->accepted_fd = -1;
|
246
|
+
status = 0;
|
247
|
+
|
248
|
+
out:
|
249
|
+
errno = saved_errno;
|
250
|
+
return status;
|
251
|
+
}
|
252
|
+
|
253
|
+
|
254
|
+
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
|
255
|
+
switch (stream->type) {
|
256
|
+
case UV_TCP:
|
257
|
+
return uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
|
258
|
+
case UV_NAMED_PIPE:
|
259
|
+
return uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
|
260
|
+
default:
|
261
|
+
assert(0);
|
262
|
+
return -1;
|
263
|
+
}
|
264
|
+
}
|
265
|
+
|
266
|
+
|
267
|
+
uv_write_t* uv_write_queue_head(uv_stream_t* stream) {
|
268
|
+
ngx_queue_t* q;
|
269
|
+
uv_write_t* req;
|
270
|
+
|
271
|
+
if (ngx_queue_empty(&stream->write_queue)) {
|
272
|
+
return NULL;
|
273
|
+
}
|
274
|
+
|
275
|
+
q = ngx_queue_head(&stream->write_queue);
|
276
|
+
if (!q) {
|
277
|
+
return NULL;
|
278
|
+
}
|
279
|
+
|
280
|
+
req = ngx_queue_data(q, struct uv_write_s, queue);
|
281
|
+
assert(req);
|
282
|
+
|
283
|
+
return req;
|
284
|
+
}
|
285
|
+
|
286
|
+
|
287
|
+
static void uv__drain(uv_stream_t* stream) {
|
288
|
+
uv_shutdown_t* req;
|
289
|
+
|
290
|
+
assert(!uv_write_queue_head(stream));
|
291
|
+
assert(stream->write_queue_size == 0);
|
292
|
+
|
293
|
+
ev_io_stop(stream->loop->ev, &stream->write_watcher);
|
294
|
+
|
295
|
+
/* Shutdown? */
|
296
|
+
if ((stream->flags & UV_SHUTTING) &&
|
297
|
+
!(stream->flags & UV_CLOSING) &&
|
298
|
+
!(stream->flags & UV_SHUT)) {
|
299
|
+
assert(stream->shutdown_req);
|
300
|
+
|
301
|
+
req = stream->shutdown_req;
|
302
|
+
stream->shutdown_req = NULL;
|
303
|
+
|
304
|
+
if (shutdown(stream->fd, SHUT_WR)) {
|
305
|
+
/* Error. Report it. User should call uv_close(). */
|
306
|
+
uv__set_sys_error(stream->loop, errno);
|
307
|
+
if (req->cb) {
|
308
|
+
req->cb(req, -1);
|
309
|
+
}
|
310
|
+
} else {
|
311
|
+
uv__set_sys_error(stream->loop, 0);
|
312
|
+
((uv_handle_t*) stream)->flags |= UV_SHUT;
|
313
|
+
if (req->cb) {
|
314
|
+
req->cb(req, 0);
|
315
|
+
}
|
316
|
+
}
|
317
|
+
}
|
318
|
+
}
|
319
|
+
|
320
|
+
|
321
|
+
static size_t uv__write_req_size(uv_write_t* req) {
|
322
|
+
size_t size;
|
323
|
+
|
324
|
+
size = uv__buf_count(req->bufs + req->write_index,
|
325
|
+
req->bufcnt - req->write_index);
|
326
|
+
assert(req->handle->write_queue_size >= size);
|
327
|
+
|
328
|
+
return size;
|
329
|
+
}
|
330
|
+
|
331
|
+
|
332
|
+
static void uv__write_req_finish(uv_write_t* req) {
|
333
|
+
uv_stream_t* stream = req->handle;
|
334
|
+
|
335
|
+
/* Pop the req off tcp->write_queue. */
|
336
|
+
ngx_queue_remove(&req->queue);
|
337
|
+
if (req->bufs != req->bufsml) {
|
338
|
+
free(req->bufs);
|
339
|
+
}
|
340
|
+
req->bufs = NULL;
|
341
|
+
|
342
|
+
/* Add it to the write_completed_queue where it will have its
|
343
|
+
* callback called in the near future.
|
344
|
+
*/
|
345
|
+
ngx_queue_insert_tail(&stream->write_completed_queue, &req->queue);
|
346
|
+
ev_feed_event(stream->loop->ev, &stream->write_watcher, EV_WRITE);
|
347
|
+
}
|
348
|
+
|
349
|
+
|
350
|
+
/* On success returns NULL. On error returns a pointer to the write request
|
351
|
+
* which had the error.
|
352
|
+
*/
|
353
|
+
static void uv__write(uv_stream_t* stream) {
|
354
|
+
uv_write_t* req;
|
355
|
+
struct iovec* iov;
|
356
|
+
int iovcnt;
|
357
|
+
ssize_t n;
|
358
|
+
|
359
|
+
if (stream->flags & UV_CLOSING) {
|
360
|
+
/* Handle was closed this tick. We've received a stale
|
361
|
+
* 'is writable' callback from the event loop, ignore.
|
362
|
+
*/
|
363
|
+
return;
|
364
|
+
}
|
365
|
+
|
366
|
+
start:
|
367
|
+
|
368
|
+
assert(stream->fd >= 0);
|
369
|
+
|
370
|
+
/* Get the request at the head of the queue. */
|
371
|
+
req = uv_write_queue_head(stream);
|
372
|
+
if (!req) {
|
373
|
+
assert(stream->write_queue_size == 0);
|
374
|
+
return;
|
375
|
+
}
|
376
|
+
|
377
|
+
assert(req->handle == stream);
|
378
|
+
|
379
|
+
/*
|
380
|
+
* Cast to iovec. We had to have our own uv_buf_t instead of iovec
|
381
|
+
* because Windows's WSABUF is not an iovec.
|
382
|
+
*/
|
383
|
+
assert(sizeof(uv_buf_t) == sizeof(struct iovec));
|
384
|
+
iov = (struct iovec*) &(req->bufs[req->write_index]);
|
385
|
+
iovcnt = req->bufcnt - req->write_index;
|
386
|
+
|
387
|
+
/*
|
388
|
+
* Now do the actual writev. Note that we've been updating the pointers
|
389
|
+
* inside the iov each time we write. So there is no need to offset it.
|
390
|
+
*/
|
391
|
+
|
392
|
+
if (req->send_handle) {
|
393
|
+
struct msghdr msg;
|
394
|
+
char scratch[64];
|
395
|
+
struct cmsghdr *cmsg;
|
396
|
+
int fd_to_send = req->send_handle->fd;
|
397
|
+
|
398
|
+
assert(fd_to_send >= 0);
|
399
|
+
|
400
|
+
msg.msg_name = NULL;
|
401
|
+
msg.msg_namelen = 0;
|
402
|
+
msg.msg_iov = iov;
|
403
|
+
msg.msg_iovlen = iovcnt;
|
404
|
+
msg.msg_flags = 0;
|
405
|
+
|
406
|
+
msg.msg_control = (void*) scratch;
|
407
|
+
msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send));
|
408
|
+
|
409
|
+
cmsg = CMSG_FIRSTHDR(&msg);
|
410
|
+
cmsg->cmsg_level = SOL_SOCKET;
|
411
|
+
cmsg->cmsg_type = SCM_RIGHTS;
|
412
|
+
cmsg->cmsg_len = msg.msg_controllen;
|
413
|
+
*(int*) CMSG_DATA(cmsg) = fd_to_send;
|
414
|
+
|
415
|
+
do {
|
416
|
+
n = sendmsg(stream->fd, &msg, 0);
|
417
|
+
}
|
418
|
+
while (n == -1 && errno == EINTR);
|
419
|
+
} else {
|
420
|
+
do {
|
421
|
+
if (iovcnt == 1) {
|
422
|
+
n = write(stream->fd, iov[0].iov_base, iov[0].iov_len);
|
423
|
+
} else {
|
424
|
+
n = writev(stream->fd, iov, iovcnt);
|
425
|
+
}
|
426
|
+
}
|
427
|
+
while (n == -1 && errno == EINTR);
|
428
|
+
}
|
429
|
+
|
430
|
+
if (n < 0) {
|
431
|
+
if (errno != EAGAIN) {
|
432
|
+
/* Error */
|
433
|
+
req->error = errno;
|
434
|
+
stream->write_queue_size -= uv__write_req_size(req);
|
435
|
+
uv__write_req_finish(req);
|
436
|
+
return;
|
437
|
+
} else if (stream->blocking) {
|
438
|
+
/* If this is a blocking stream, try again. */
|
439
|
+
goto start;
|
440
|
+
}
|
441
|
+
} else {
|
442
|
+
/* Successful write */
|
443
|
+
|
444
|
+
/* Update the counters. */
|
445
|
+
while (n >= 0) {
|
446
|
+
uv_buf_t* buf = &(req->bufs[req->write_index]);
|
447
|
+
size_t len = buf->len;
|
448
|
+
|
449
|
+
assert(req->write_index < req->bufcnt);
|
450
|
+
|
451
|
+
if ((size_t)n < len) {
|
452
|
+
buf->base += n;
|
453
|
+
buf->len -= n;
|
454
|
+
stream->write_queue_size -= n;
|
455
|
+
n = 0;
|
456
|
+
|
457
|
+
/* There is more to write. */
|
458
|
+
if (stream->blocking) {
|
459
|
+
/*
|
460
|
+
* If we're blocking then we should not be enabling the write
|
461
|
+
* watcher - instead we need to try again.
|
462
|
+
*/
|
463
|
+
goto start;
|
464
|
+
} else {
|
465
|
+
/* Break loop and ensure the watcher is pending. */
|
466
|
+
break;
|
467
|
+
}
|
468
|
+
|
469
|
+
} else {
|
470
|
+
/* Finished writing the buf at index req->write_index. */
|
471
|
+
req->write_index++;
|
472
|
+
|
473
|
+
assert((size_t)n >= len);
|
474
|
+
n -= len;
|
475
|
+
|
476
|
+
assert(stream->write_queue_size >= len);
|
477
|
+
stream->write_queue_size -= len;
|
478
|
+
|
479
|
+
if (req->write_index == req->bufcnt) {
|
480
|
+
/* Then we're done! */
|
481
|
+
assert(n == 0);
|
482
|
+
uv__write_req_finish(req);
|
483
|
+
/* TODO: start trying to write the next request. */
|
484
|
+
return;
|
485
|
+
}
|
486
|
+
}
|
487
|
+
}
|
488
|
+
}
|
489
|
+
|
490
|
+
/* Either we've counted n down to zero or we've got EAGAIN. */
|
491
|
+
assert(n == 0 || n == -1);
|
492
|
+
|
493
|
+
/* Only non-blocking streams should use the write_watcher. */
|
494
|
+
assert(!stream->blocking);
|
495
|
+
|
496
|
+
/* We're not done. */
|
497
|
+
ev_io_start(stream->loop->ev, &stream->write_watcher);
|
498
|
+
}
|
499
|
+
|
500
|
+
|
501
|
+
static void uv__write_callbacks(uv_stream_t* stream) {
|
502
|
+
int callbacks_made = 0;
|
503
|
+
ngx_queue_t* q;
|
504
|
+
uv_write_t* req;
|
505
|
+
|
506
|
+
while (!ngx_queue_empty(&stream->write_completed_queue)) {
|
507
|
+
/* Pop a req off write_completed_queue. */
|
508
|
+
q = ngx_queue_head(&stream->write_completed_queue);
|
509
|
+
assert(q);
|
510
|
+
req = ngx_queue_data(q, struct uv_write_s, queue);
|
511
|
+
ngx_queue_remove(q);
|
512
|
+
|
513
|
+
/* NOTE: call callback AFTER freeing the request data. */
|
514
|
+
if (req->cb) {
|
515
|
+
uv__set_sys_error(stream->loop, req->error);
|
516
|
+
req->cb(req, req->error ? -1 : 0);
|
517
|
+
}
|
518
|
+
|
519
|
+
callbacks_made++;
|
520
|
+
}
|
521
|
+
|
522
|
+
assert(ngx_queue_empty(&stream->write_completed_queue));
|
523
|
+
|
524
|
+
/* Write queue drained. */
|
525
|
+
if (!uv_write_queue_head(stream)) {
|
526
|
+
uv__drain(stream);
|
527
|
+
}
|
528
|
+
}
|
529
|
+
|
530
|
+
|
531
|
+
static uv_handle_type uv__handle_type(int fd) {
|
532
|
+
struct sockaddr_storage ss;
|
533
|
+
socklen_t len;
|
534
|
+
|
535
|
+
memset(&ss, 0, sizeof(ss));
|
536
|
+
len = sizeof(ss);
|
537
|
+
|
538
|
+
if (getsockname(fd, (struct sockaddr*)&ss, &len))
|
539
|
+
return UV_UNKNOWN_HANDLE;
|
540
|
+
|
541
|
+
switch (ss.ss_family) {
|
542
|
+
case AF_UNIX:
|
543
|
+
return UV_NAMED_PIPE;
|
544
|
+
case AF_INET:
|
545
|
+
case AF_INET6:
|
546
|
+
return UV_TCP;
|
547
|
+
}
|
548
|
+
|
549
|
+
return UV_UNKNOWN_HANDLE;
|
550
|
+
}
|
551
|
+
|
552
|
+
|
553
|
+
static void uv__read(uv_stream_t* stream) {
|
554
|
+
uv_buf_t buf;
|
555
|
+
ssize_t nread;
|
556
|
+
struct msghdr msg;
|
557
|
+
struct cmsghdr* cmsg;
|
558
|
+
char cmsg_space[64];
|
559
|
+
struct ev_loop* ev = stream->loop->ev;
|
560
|
+
|
561
|
+
/* XXX: Maybe instead of having UV_READING we just test if
|
562
|
+
* tcp->read_cb is NULL or not?
|
563
|
+
*/
|
564
|
+
while ((stream->read_cb || stream->read2_cb) &&
|
565
|
+
stream->flags & UV_READING) {
|
566
|
+
assert(stream->alloc_cb);
|
567
|
+
buf = stream->alloc_cb((uv_handle_t*)stream, 64 * 1024);
|
568
|
+
|
569
|
+
assert(buf.len > 0);
|
570
|
+
assert(buf.base);
|
571
|
+
assert(stream->fd >= 0);
|
572
|
+
|
573
|
+
if (stream->read_cb) {
|
574
|
+
do {
|
575
|
+
nread = read(stream->fd, buf.base, buf.len);
|
576
|
+
}
|
577
|
+
while (nread < 0 && errno == EINTR);
|
578
|
+
} else {
|
579
|
+
assert(stream->read2_cb);
|
580
|
+
/* read2_cb uses recvmsg */
|
581
|
+
msg.msg_flags = 0;
|
582
|
+
msg.msg_iov = (struct iovec*) &buf;
|
583
|
+
msg.msg_iovlen = 1;
|
584
|
+
msg.msg_name = NULL;
|
585
|
+
msg.msg_namelen = 0;
|
586
|
+
/* Set up to receive a descriptor even if one isn't in the message */
|
587
|
+
msg.msg_controllen = 64;
|
588
|
+
msg.msg_control = (void *) cmsg_space;
|
589
|
+
|
590
|
+
do {
|
591
|
+
nread = recvmsg(stream->fd, &msg, 0);
|
592
|
+
}
|
593
|
+
while (nread < 0 && errno == EINTR);
|
594
|
+
}
|
595
|
+
|
596
|
+
|
597
|
+
if (nread < 0) {
|
598
|
+
/* Error */
|
599
|
+
if (errno == EAGAIN) {
|
600
|
+
/* Wait for the next one. */
|
601
|
+
if (stream->flags & UV_READING) {
|
602
|
+
ev_io_start(ev, &stream->read_watcher);
|
603
|
+
}
|
604
|
+
uv__set_sys_error(stream->loop, EAGAIN);
|
605
|
+
|
606
|
+
if (stream->read_cb) {
|
607
|
+
stream->read_cb(stream, 0, buf);
|
608
|
+
} else {
|
609
|
+
stream->read2_cb((uv_pipe_t*)stream, 0, buf, UV_UNKNOWN_HANDLE);
|
610
|
+
}
|
611
|
+
|
612
|
+
return;
|
613
|
+
} else {
|
614
|
+
/* Error. User should call uv_close(). */
|
615
|
+
uv__set_sys_error(stream->loop, errno);
|
616
|
+
|
617
|
+
if (stream->read_cb) {
|
618
|
+
stream->read_cb(stream, -1, buf);
|
619
|
+
} else {
|
620
|
+
stream->read2_cb((uv_pipe_t*)stream, -1, buf, UV_UNKNOWN_HANDLE);
|
621
|
+
}
|
622
|
+
|
623
|
+
assert(!ev_is_active(&stream->read_watcher));
|
624
|
+
return;
|
625
|
+
}
|
626
|
+
|
627
|
+
} else if (nread == 0) {
|
628
|
+
/* EOF */
|
629
|
+
uv__set_artificial_error(stream->loop, UV_EOF);
|
630
|
+
ev_io_stop(ev, &stream->read_watcher);
|
631
|
+
|
632
|
+
if (stream->read_cb) {
|
633
|
+
stream->read_cb(stream, -1, buf);
|
634
|
+
} else {
|
635
|
+
stream->read2_cb((uv_pipe_t*)stream, -1, buf, UV_UNKNOWN_HANDLE);
|
636
|
+
}
|
637
|
+
return;
|
638
|
+
} else {
|
639
|
+
/* Successful read */
|
640
|
+
ssize_t buflen = buf.len;
|
641
|
+
|
642
|
+
if (stream->read_cb) {
|
643
|
+
stream->read_cb(stream, nread, buf);
|
644
|
+
} else {
|
645
|
+
assert(stream->read2_cb);
|
646
|
+
|
647
|
+
/*
|
648
|
+
* XXX: Some implementations can send multiple file descriptors in a
|
649
|
+
* single message. We should be using CMSG_NXTHDR() to walk the
|
650
|
+
* chain to get at them all. This would require changing the API to
|
651
|
+
* hand these back up the caller, is a pain.
|
652
|
+
*/
|
653
|
+
|
654
|
+
for (cmsg = CMSG_FIRSTHDR(&msg);
|
655
|
+
msg.msg_controllen > 0 && cmsg != NULL;
|
656
|
+
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
|
657
|
+
|
658
|
+
if (cmsg->cmsg_type == SCM_RIGHTS) {
|
659
|
+
if (stream->accepted_fd != -1) {
|
660
|
+
fprintf(stderr, "(libuv) ignoring extra FD received\n");
|
661
|
+
}
|
662
|
+
|
663
|
+
stream->accepted_fd = *(int *) CMSG_DATA(cmsg);
|
664
|
+
|
665
|
+
} else {
|
666
|
+
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
|
667
|
+
cmsg->cmsg_type);
|
668
|
+
}
|
669
|
+
}
|
670
|
+
|
671
|
+
|
672
|
+
if (stream->accepted_fd >= 0) {
|
673
|
+
stream->read2_cb((uv_pipe_t*)stream, nread, buf,
|
674
|
+
uv__handle_type(stream->accepted_fd));
|
675
|
+
} else {
|
676
|
+
stream->read2_cb((uv_pipe_t*)stream, nread, buf, UV_UNKNOWN_HANDLE);
|
677
|
+
}
|
678
|
+
}
|
679
|
+
|
680
|
+
/* Return if we didn't fill the buffer, there is no more data to read. */
|
681
|
+
if (nread < buflen) {
|
682
|
+
return;
|
683
|
+
}
|
684
|
+
}
|
685
|
+
}
|
686
|
+
}
|
687
|
+
|
688
|
+
|
689
|
+
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
690
|
+
assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) &&
|
691
|
+
"uv_shutdown (unix) only supports uv_handle_t right now");
|
692
|
+
assert(stream->fd >= 0);
|
693
|
+
|
694
|
+
if (!(stream->flags & UV_WRITABLE) ||
|
695
|
+
stream->flags & UV_SHUT ||
|
696
|
+
stream->flags & UV_CLOSED ||
|
697
|
+
stream->flags & UV_CLOSING) {
|
698
|
+
uv__set_sys_error(stream->loop, EINVAL);
|
699
|
+
return -1;
|
700
|
+
}
|
701
|
+
|
702
|
+
/* Initialize request */
|
703
|
+
uv__req_init(stream->loop, (uv_req_t*)req);
|
704
|
+
req->handle = stream;
|
705
|
+
req->cb = cb;
|
706
|
+
|
707
|
+
stream->shutdown_req = req;
|
708
|
+
req->type = UV_SHUTDOWN;
|
709
|
+
|
710
|
+
((uv_handle_t*)stream)->flags |= UV_SHUTTING;
|
711
|
+
|
712
|
+
|
713
|
+
ev_io_start(stream->loop->ev, &stream->write_watcher);
|
714
|
+
|
715
|
+
return 0;
|
716
|
+
}
|
717
|
+
|
718
|
+
|
719
|
+
void uv__stream_io(EV_P_ ev_io* watcher, int revents) {
|
720
|
+
uv_stream_t* stream = watcher->data;
|
721
|
+
|
722
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
|
723
|
+
stream->type == UV_TTY);
|
724
|
+
assert(watcher == &stream->read_watcher ||
|
725
|
+
watcher == &stream->write_watcher);
|
726
|
+
assert(!(stream->flags & UV_CLOSING));
|
727
|
+
|
728
|
+
if (stream->connect_req) {
|
729
|
+
uv__stream_connect(stream);
|
730
|
+
} else {
|
731
|
+
assert(revents & (EV_READ | EV_WRITE));
|
732
|
+
assert(stream->fd >= 0);
|
733
|
+
|
734
|
+
if (revents & EV_READ) {
|
735
|
+
uv__read((uv_stream_t*)stream);
|
736
|
+
}
|
737
|
+
|
738
|
+
if (revents & EV_WRITE) {
|
739
|
+
uv__write(stream);
|
740
|
+
uv__write_callbacks(stream);
|
741
|
+
}
|
742
|
+
}
|
743
|
+
}
|
744
|
+
|
745
|
+
|
746
|
+
/**
|
747
|
+
* We get called here from directly following a call to connect(2).
|
748
|
+
* In order to determine if we've errored out or succeeded must call
|
749
|
+
* getsockopt.
|
750
|
+
*/
|
751
|
+
static void uv__stream_connect(uv_stream_t* stream) {
|
752
|
+
int error;
|
753
|
+
uv_connect_t* req = stream->connect_req;
|
754
|
+
socklen_t errorsize = sizeof(int);
|
755
|
+
|
756
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
|
757
|
+
assert(req);
|
758
|
+
|
759
|
+
if (stream->delayed_error) {
|
760
|
+
/* To smooth over the differences between unixes errors that
|
761
|
+
* were reported synchronously on the first connect can be delayed
|
762
|
+
* until the next tick--which is now.
|
763
|
+
*/
|
764
|
+
error = stream->delayed_error;
|
765
|
+
stream->delayed_error = 0;
|
766
|
+
} else {
|
767
|
+
/* Normal situation: we need to get the socket error from the kernel. */
|
768
|
+
assert(stream->fd >= 0);
|
769
|
+
getsockopt(stream->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize);
|
770
|
+
}
|
771
|
+
|
772
|
+
if (!error) {
|
773
|
+
ev_io_start(stream->loop->ev, &stream->read_watcher);
|
774
|
+
|
775
|
+
/* Successful connection */
|
776
|
+
stream->connect_req = NULL;
|
777
|
+
if (req->cb) {
|
778
|
+
req->cb(req, 0);
|
779
|
+
}
|
780
|
+
|
781
|
+
} else if (error == EINPROGRESS) {
|
782
|
+
/* Still connecting. */
|
783
|
+
return;
|
784
|
+
} else {
|
785
|
+
/* Error */
|
786
|
+
uv__set_sys_error(stream->loop, error);
|
787
|
+
|
788
|
+
stream->connect_req = NULL;
|
789
|
+
if (req->cb) {
|
790
|
+
req->cb(req, -1);
|
791
|
+
}
|
792
|
+
}
|
793
|
+
}
|
794
|
+
|
795
|
+
|
796
|
+
int uv__connect(uv_connect_t* req, uv_stream_t* stream, struct sockaddr* addr,
|
797
|
+
socklen_t addrlen, uv_connect_cb cb) {
|
798
|
+
int sockfd;
|
799
|
+
int r;
|
800
|
+
|
801
|
+
if (stream->fd <= 0) {
|
802
|
+
if ((sockfd = uv__socket(addr->sa_family, SOCK_STREAM, 0)) == -1) {
|
803
|
+
uv__set_sys_error(stream->loop, errno);
|
804
|
+
return -1;
|
805
|
+
}
|
806
|
+
|
807
|
+
if (uv__stream_open(stream, sockfd, UV_READABLE | UV_WRITABLE)) {
|
808
|
+
close(sockfd);
|
809
|
+
return -2;
|
810
|
+
}
|
811
|
+
}
|
812
|
+
|
813
|
+
uv__req_init(stream->loop, (uv_req_t*)req);
|
814
|
+
req->cb = cb;
|
815
|
+
req->handle = stream;
|
816
|
+
req->type = UV_CONNECT;
|
817
|
+
ngx_queue_init(&req->queue);
|
818
|
+
|
819
|
+
if (stream->connect_req) {
|
820
|
+
uv__set_sys_error(stream->loop, EALREADY);
|
821
|
+
return -1;
|
822
|
+
}
|
823
|
+
|
824
|
+
if (stream->type != UV_TCP) {
|
825
|
+
uv__set_sys_error(stream->loop, ENOTSOCK);
|
826
|
+
return -1;
|
827
|
+
}
|
828
|
+
|
829
|
+
stream->connect_req = req;
|
830
|
+
|
831
|
+
do {
|
832
|
+
r = connect(stream->fd, addr, addrlen);
|
833
|
+
}
|
834
|
+
while (r == -1 && errno == EINTR);
|
835
|
+
|
836
|
+
stream->delayed_error = 0;
|
837
|
+
|
838
|
+
if (r != 0 && errno != EINPROGRESS) {
|
839
|
+
switch (errno) {
|
840
|
+
/* If we get a ECONNREFUSED wait until the next tick to report the
|
841
|
+
* error. Solaris wants to report immediately--other unixes want to
|
842
|
+
* wait.
|
843
|
+
*
|
844
|
+
* XXX: do the same for ECONNABORTED?
|
845
|
+
*/
|
846
|
+
case ECONNREFUSED:
|
847
|
+
stream->delayed_error = errno;
|
848
|
+
break;
|
849
|
+
|
850
|
+
default:
|
851
|
+
uv__set_sys_error(stream->loop, errno);
|
852
|
+
return -1;
|
853
|
+
}
|
854
|
+
}
|
855
|
+
|
856
|
+
assert(stream->write_watcher.data == stream);
|
857
|
+
ev_io_start(stream->loop->ev, &stream->write_watcher);
|
858
|
+
|
859
|
+
if (stream->delayed_error) {
|
860
|
+
ev_feed_event(stream->loop->ev, &stream->write_watcher, EV_WRITE);
|
861
|
+
}
|
862
|
+
|
863
|
+
return 0;
|
864
|
+
}
|
865
|
+
|
866
|
+
|
867
|
+
int uv_write2(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt,
|
868
|
+
uv_stream_t* send_handle, uv_write_cb cb) {
|
869
|
+
int empty_queue;
|
870
|
+
|
871
|
+
assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
|
872
|
+
stream->type == UV_TTY) &&
|
873
|
+
"uv_write (unix) does not yet support other types of streams");
|
874
|
+
|
875
|
+
if (stream->fd < 0) {
|
876
|
+
uv__set_sys_error(stream->loop, EBADF);
|
877
|
+
return -1;
|
878
|
+
}
|
879
|
+
|
880
|
+
if (send_handle) {
|
881
|
+
if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) {
|
882
|
+
uv__set_sys_error(stream->loop, EOPNOTSUPP);
|
883
|
+
return -1;
|
884
|
+
}
|
885
|
+
}
|
886
|
+
|
887
|
+
empty_queue = (stream->write_queue_size == 0);
|
888
|
+
|
889
|
+
/* Initialize the req */
|
890
|
+
uv__req_init(stream->loop, (uv_req_t*)req);
|
891
|
+
req->cb = cb;
|
892
|
+
req->handle = stream;
|
893
|
+
req->error = 0;
|
894
|
+
req->send_handle = send_handle;
|
895
|
+
req->type = UV_WRITE;
|
896
|
+
ngx_queue_init(&req->queue);
|
897
|
+
|
898
|
+
if (bufcnt <= UV_REQ_BUFSML_SIZE) {
|
899
|
+
req->bufs = req->bufsml;
|
900
|
+
}
|
901
|
+
else {
|
902
|
+
req->bufs = malloc(sizeof(uv_buf_t) * bufcnt);
|
903
|
+
}
|
904
|
+
|
905
|
+
memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t));
|
906
|
+
req->bufcnt = bufcnt;
|
907
|
+
|
908
|
+
/*
|
909
|
+
* fprintf(stderr, "cnt: %d bufs: %p bufsml: %p\n", bufcnt, req->bufs, req->bufsml);
|
910
|
+
*/
|
911
|
+
|
912
|
+
req->write_index = 0;
|
913
|
+
stream->write_queue_size += uv__buf_count(bufs, bufcnt);
|
914
|
+
|
915
|
+
/* Append the request to write_queue. */
|
916
|
+
ngx_queue_insert_tail(&stream->write_queue, &req->queue);
|
917
|
+
|
918
|
+
assert(!ngx_queue_empty(&stream->write_queue));
|
919
|
+
assert(stream->write_watcher.cb == uv__stream_io);
|
920
|
+
assert(stream->write_watcher.data == stream);
|
921
|
+
assert(stream->write_watcher.fd == stream->fd);
|
922
|
+
|
923
|
+
/* If the queue was empty when this function began, we should attempt to
|
924
|
+
* do the write immediately. Otherwise start the write_watcher and wait
|
925
|
+
* for the fd to become writable.
|
926
|
+
*/
|
927
|
+
if (empty_queue) {
|
928
|
+
uv__write(stream);
|
929
|
+
} else {
|
930
|
+
/*
|
931
|
+
* blocking streams should never have anything in the queue.
|
932
|
+
* if this assert fires then somehow the blocking stream isn't being
|
933
|
+
* sufficently flushed in uv__write.
|
934
|
+
*/
|
935
|
+
assert(!stream->blocking);
|
936
|
+
|
937
|
+
ev_io_start(stream->loop->ev, &stream->write_watcher);
|
938
|
+
}
|
939
|
+
|
940
|
+
return 0;
|
941
|
+
}
|
942
|
+
|
943
|
+
|
944
|
+
/* The buffers to be written must remain valid until the callback is called.
|
945
|
+
* This is not required for the uv_buf_t array.
|
946
|
+
*/
|
947
|
+
int uv_write(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt,
|
948
|
+
uv_write_cb cb) {
|
949
|
+
return uv_write2(req, stream, bufs, bufcnt, NULL, cb);
|
950
|
+
}
|
951
|
+
|
952
|
+
|
953
|
+
int uv__read_start_common(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
954
|
+
uv_read_cb read_cb, uv_read2_cb read2_cb) {
|
955
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
|
956
|
+
stream->type == UV_TTY);
|
957
|
+
|
958
|
+
if (stream->flags & UV_CLOSING) {
|
959
|
+
uv__set_sys_error(stream->loop, EINVAL);
|
960
|
+
return -1;
|
961
|
+
}
|
962
|
+
|
963
|
+
/* The UV_READING flag is irrelevant of the state of the tcp - it just
|
964
|
+
* expresses the desired state of the user.
|
965
|
+
*/
|
966
|
+
((uv_handle_t*)stream)->flags |= UV_READING;
|
967
|
+
|
968
|
+
/* TODO: try to do the read inline? */
|
969
|
+
/* TODO: keep track of tcp state. If we've gotten a EOF then we should
|
970
|
+
* not start the IO watcher.
|
971
|
+
*/
|
972
|
+
assert(stream->fd >= 0);
|
973
|
+
assert(alloc_cb);
|
974
|
+
|
975
|
+
stream->read_cb = read_cb;
|
976
|
+
stream->read2_cb = read2_cb;
|
977
|
+
stream->alloc_cb = alloc_cb;
|
978
|
+
|
979
|
+
/* These should have been set by uv_tcp_init. */
|
980
|
+
assert(stream->read_watcher.cb == uv__stream_io);
|
981
|
+
|
982
|
+
ev_io_start(stream->loop->ev, &stream->read_watcher);
|
983
|
+
return 0;
|
984
|
+
}
|
985
|
+
|
986
|
+
|
987
|
+
int uv_read_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
988
|
+
uv_read_cb read_cb) {
|
989
|
+
return uv__read_start_common(stream, alloc_cb, read_cb, NULL);
|
990
|
+
}
|
991
|
+
|
992
|
+
|
993
|
+
int uv_read2_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
994
|
+
uv_read2_cb read_cb) {
|
995
|
+
return uv__read_start_common(stream, alloc_cb, NULL, read_cb);
|
996
|
+
}
|
997
|
+
|
998
|
+
|
999
|
+
int uv_read_stop(uv_stream_t* stream) {
|
1000
|
+
ev_io_stop(stream->loop->ev, &stream->read_watcher);
|
1001
|
+
stream->flags &= ~UV_READING;
|
1002
|
+
stream->read_cb = NULL;
|
1003
|
+
stream->read2_cb = NULL;
|
1004
|
+
stream->alloc_cb = NULL;
|
1005
|
+
return 0;
|
1006
|
+
}
|
1007
|
+
|
1008
|
+
|
1009
|
+
int uv_is_readable(uv_stream_t* stream) {
|
1010
|
+
return stream->flags & UV_READABLE;
|
1011
|
+
}
|
1012
|
+
|
1013
|
+
|
1014
|
+
int uv_is_writable(uv_stream_t* stream) {
|
1015
|
+
return stream->flags & UV_WRITABLE;
|
1016
|
+
}
|
1017
|
+
|
1018
|
+
|
1019
|
+
void uv__stream_close(uv_stream_t* handle) {
|
1020
|
+
uv_read_stop(handle);
|
1021
|
+
ev_io_stop(handle->loop->ev, &handle->write_watcher);
|
1022
|
+
|
1023
|
+
close(handle->fd);
|
1024
|
+
handle->fd = -1;
|
1025
|
+
|
1026
|
+
if (handle->accepted_fd >= 0) {
|
1027
|
+
close(handle->accepted_fd);
|
1028
|
+
handle->accepted_fd = -1;
|
1029
|
+
}
|
1030
|
+
|
1031
|
+
assert(!ev_is_active(&handle->read_watcher));
|
1032
|
+
assert(!ev_is_active(&handle->write_watcher));
|
1033
|
+
}
|