asyncengine 0.0.1.testing

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (317) hide show
  1. data/README.markdown +0 -0
  2. data/asyncengine.gemspec +26 -0
  3. data/ext/asyncengine_ext/asyncengine_ruby.c +82 -0
  4. data/ext/asyncengine_ext/extconf.rb +47 -0
  5. data/ext/asyncengine_ext/libuv/AUTHORS +45 -0
  6. data/ext/asyncengine_ext/libuv/LICENSE +42 -0
  7. data/ext/asyncengine_ext/libuv/Makefile +119 -0
  8. data/ext/asyncengine_ext/libuv/README.md +88 -0
  9. data/ext/asyncengine_ext/libuv/build/gcc_version.py +20 -0
  10. data/ext/asyncengine_ext/libuv/common.gypi +176 -0
  11. data/ext/asyncengine_ext/libuv/config-mingw.mk +61 -0
  12. data/ext/asyncengine_ext/libuv/config-unix.mk +173 -0
  13. data/ext/asyncengine_ext/libuv/gyp_uv +60 -0
  14. data/ext/asyncengine_ext/libuv/include/ares.h +591 -0
  15. data/ext/asyncengine_ext/libuv/include/ares_version.h +24 -0
  16. data/ext/asyncengine_ext/libuv/include/uv-private/eio.h +403 -0
  17. data/ext/asyncengine_ext/libuv/include/uv-private/ev.h +838 -0
  18. data/ext/asyncengine_ext/libuv/include/uv-private/ngx-queue.h +106 -0
  19. data/ext/asyncengine_ext/libuv/include/uv-private/tree.h +768 -0
  20. data/ext/asyncengine_ext/libuv/include/uv-private/uv-unix.h +256 -0
  21. data/ext/asyncengine_ext/libuv/include/uv-private/uv-win.h +458 -0
  22. data/ext/asyncengine_ext/libuv/include/uv.h +1556 -0
  23. data/ext/asyncengine_ext/libuv/src/ares/AUTHORS +37 -0
  24. data/ext/asyncengine_ext/libuv/src/ares/CHANGES +1218 -0
  25. data/ext/asyncengine_ext/libuv/src/ares/CMakeLists.txt +22 -0
  26. data/ext/asyncengine_ext/libuv/src/ares/NEWS +21 -0
  27. data/ext/asyncengine_ext/libuv/src/ares/README +60 -0
  28. data/ext/asyncengine_ext/libuv/src/ares/README.cares +13 -0
  29. data/ext/asyncengine_ext/libuv/src/ares/README.msvc +142 -0
  30. data/ext/asyncengine_ext/libuv/src/ares/README.node +21 -0
  31. data/ext/asyncengine_ext/libuv/src/ares/RELEASE-NOTES +26 -0
  32. data/ext/asyncengine_ext/libuv/src/ares/TODO +23 -0
  33. data/ext/asyncengine_ext/libuv/src/ares/ares__close_sockets.c +66 -0
  34. data/ext/asyncengine_ext/libuv/src/ares/ares__get_hostent.c +263 -0
  35. data/ext/asyncengine_ext/libuv/src/ares/ares__read_line.c +71 -0
  36. data/ext/asyncengine_ext/libuv/src/ares/ares__timeval.c +111 -0
  37. data/ext/asyncengine_ext/libuv/src/ares/ares_cancel.c +63 -0
  38. data/ext/asyncengine_ext/libuv/src/ares/ares_data.c +190 -0
  39. data/ext/asyncengine_ext/libuv/src/ares/ares_data.h +65 -0
  40. data/ext/asyncengine_ext/libuv/src/ares/ares_destroy.c +105 -0
  41. data/ext/asyncengine_ext/libuv/src/ares/ares_dns.h +90 -0
  42. data/ext/asyncengine_ext/libuv/src/ares/ares_expand_name.c +200 -0
  43. data/ext/asyncengine_ext/libuv/src/ares/ares_expand_string.c +75 -0
  44. data/ext/asyncengine_ext/libuv/src/ares/ares_fds.c +63 -0
  45. data/ext/asyncengine_ext/libuv/src/ares/ares_free_hostent.c +42 -0
  46. data/ext/asyncengine_ext/libuv/src/ares/ares_free_string.c +25 -0
  47. data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.c +30 -0
  48. data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.h +26 -0
  49. data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyaddr.c +301 -0
  50. data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyname.c +523 -0
  51. data/ext/asyncengine_ext/libuv/src/ares/ares_getnameinfo.c +427 -0
  52. data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.c +122 -0
  53. data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.h +53 -0
  54. data/ext/asyncengine_ext/libuv/src/ares/ares_getsock.c +72 -0
  55. data/ext/asyncengine_ext/libuv/src/ares/ares_init.c +1809 -0
  56. data/ext/asyncengine_ext/libuv/src/ares/ares_iphlpapi.h +221 -0
  57. data/ext/asyncengine_ext/libuv/src/ares/ares_ipv6.h +78 -0
  58. data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.c +142 -0
  59. data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.h +42 -0
  60. data/ext/asyncengine_ext/libuv/src/ares/ares_llist.c +86 -0
  61. data/ext/asyncengine_ext/libuv/src/ares/ares_llist.h +42 -0
  62. data/ext/asyncengine_ext/libuv/src/ares/ares_mkquery.c +195 -0
  63. data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.c +181 -0
  64. data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.h +55 -0
  65. data/ext/asyncengine_ext/libuv/src/ares/ares_options.c +248 -0
  66. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_a_reply.c +263 -0
  67. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_aaaa_reply.c +259 -0
  68. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_mx_reply.c +170 -0
  69. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ns_reply.c +182 -0
  70. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ptr_reply.c +217 -0
  71. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_srv_reply.c +179 -0
  72. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_txt_reply.c +201 -0
  73. data/ext/asyncengine_ext/libuv/src/ares/ares_platform.c +11035 -0
  74. data/ext/asyncengine_ext/libuv/src/ares/ares_platform.h +43 -0
  75. data/ext/asyncengine_ext/libuv/src/ares/ares_private.h +355 -0
  76. data/ext/asyncengine_ext/libuv/src/ares/ares_process.c +1295 -0
  77. data/ext/asyncengine_ext/libuv/src/ares/ares_query.c +183 -0
  78. data/ext/asyncengine_ext/libuv/src/ares/ares_rules.h +144 -0
  79. data/ext/asyncengine_ext/libuv/src/ares/ares_search.c +321 -0
  80. data/ext/asyncengine_ext/libuv/src/ares/ares_send.c +134 -0
  81. data/ext/asyncengine_ext/libuv/src/ares/ares_setup.h +199 -0
  82. data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.c +66 -0
  83. data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.h +30 -0
  84. data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.c +42 -0
  85. data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.h +26 -0
  86. data/ext/asyncengine_ext/libuv/src/ares/ares_strerror.c +56 -0
  87. data/ext/asyncengine_ext/libuv/src/ares/ares_timeout.c +80 -0
  88. data/ext/asyncengine_ext/libuv/src/ares/ares_version.c +11 -0
  89. data/ext/asyncengine_ext/libuv/src/ares/ares_writev.c +79 -0
  90. data/ext/asyncengine_ext/libuv/src/ares/ares_writev.h +36 -0
  91. data/ext/asyncengine_ext/libuv/src/ares/bitncmp.c +59 -0
  92. data/ext/asyncengine_ext/libuv/src/ares/bitncmp.h +26 -0
  93. data/ext/asyncengine_ext/libuv/src/ares/config_cygwin/ares_config.h +512 -0
  94. data/ext/asyncengine_ext/libuv/src/ares/config_darwin/ares_config.h +512 -0
  95. data/ext/asyncengine_ext/libuv/src/ares/config_freebsd/ares_config.h +512 -0
  96. data/ext/asyncengine_ext/libuv/src/ares/config_linux/ares_config.h +512 -0
  97. data/ext/asyncengine_ext/libuv/src/ares/config_netbsd/ares_config.h +512 -0
  98. data/ext/asyncengine_ext/libuv/src/ares/config_openbsd/ares_config.h +512 -0
  99. data/ext/asyncengine_ext/libuv/src/ares/config_sunos/ares_config.h +512 -0
  100. data/ext/asyncengine_ext/libuv/src/ares/config_win32/ares_config.h +369 -0
  101. data/ext/asyncengine_ext/libuv/src/ares/get_ver.awk +35 -0
  102. data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.c +451 -0
  103. data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.h +31 -0
  104. data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.c +208 -0
  105. data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.h +26 -0
  106. data/ext/asyncengine_ext/libuv/src/ares/nameser.h +203 -0
  107. data/ext/asyncengine_ext/libuv/src/ares/setup_once.h +504 -0
  108. data/ext/asyncengine_ext/libuv/src/ares/windows_port.c +22 -0
  109. data/ext/asyncengine_ext/libuv/src/unix/async.c +58 -0
  110. data/ext/asyncengine_ext/libuv/src/unix/cares.c +194 -0
  111. data/ext/asyncengine_ext/libuv/src/unix/check.c +80 -0
  112. data/ext/asyncengine_ext/libuv/src/unix/core.c +588 -0
  113. data/ext/asyncengine_ext/libuv/src/unix/cygwin.c +84 -0
  114. data/ext/asyncengine_ext/libuv/src/unix/darwin.c +341 -0
  115. data/ext/asyncengine_ext/libuv/src/unix/dl.c +91 -0
  116. data/ext/asyncengine_ext/libuv/src/unix/eio/Changes +63 -0
  117. data/ext/asyncengine_ext/libuv/src/unix/eio/LICENSE +36 -0
  118. data/ext/asyncengine_ext/libuv/src/unix/eio/Makefile.am +15 -0
  119. data/ext/asyncengine_ext/libuv/src/unix/eio/aclocal.m4 +8957 -0
  120. data/ext/asyncengine_ext/libuv/src/unix/eio/autogen.sh +3 -0
  121. data/ext/asyncengine_ext/libuv/src/unix/eio/config.h.in +86 -0
  122. data/ext/asyncengine_ext/libuv/src/unix/eio/config_cygwin.h +80 -0
  123. data/ext/asyncengine_ext/libuv/src/unix/eio/config_darwin.h +141 -0
  124. data/ext/asyncengine_ext/libuv/src/unix/eio/config_freebsd.h +81 -0
  125. data/ext/asyncengine_ext/libuv/src/unix/eio/config_linux.h +94 -0
  126. data/ext/asyncengine_ext/libuv/src/unix/eio/config_netbsd.h +81 -0
  127. data/ext/asyncengine_ext/libuv/src/unix/eio/config_openbsd.h +137 -0
  128. data/ext/asyncengine_ext/libuv/src/unix/eio/config_sunos.h +84 -0
  129. data/ext/asyncengine_ext/libuv/src/unix/eio/configure.ac +22 -0
  130. data/ext/asyncengine_ext/libuv/src/unix/eio/demo.c +194 -0
  131. data/ext/asyncengine_ext/libuv/src/unix/eio/ecb.h +370 -0
  132. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.3 +3428 -0
  133. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.c +2593 -0
  134. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.pod +969 -0
  135. data/ext/asyncengine_ext/libuv/src/unix/eio/libeio.m4 +195 -0
  136. data/ext/asyncengine_ext/libuv/src/unix/eio/xthread.h +164 -0
  137. data/ext/asyncengine_ext/libuv/src/unix/error.c +98 -0
  138. data/ext/asyncengine_ext/libuv/src/unix/ev/Changes +388 -0
  139. data/ext/asyncengine_ext/libuv/src/unix/ev/LICENSE +36 -0
  140. data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.am +18 -0
  141. data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.in +771 -0
  142. data/ext/asyncengine_ext/libuv/src/unix/ev/README +58 -0
  143. data/ext/asyncengine_ext/libuv/src/unix/ev/aclocal.m4 +8957 -0
  144. data/ext/asyncengine_ext/libuv/src/unix/ev/autogen.sh +6 -0
  145. data/ext/asyncengine_ext/libuv/src/unix/ev/config.guess +1526 -0
  146. data/ext/asyncengine_ext/libuv/src/unix/ev/config.h.in +125 -0
  147. data/ext/asyncengine_ext/libuv/src/unix/ev/config.sub +1658 -0
  148. data/ext/asyncengine_ext/libuv/src/unix/ev/config_cygwin.h +123 -0
  149. data/ext/asyncengine_ext/libuv/src/unix/ev/config_darwin.h +122 -0
  150. data/ext/asyncengine_ext/libuv/src/unix/ev/config_freebsd.h +120 -0
  151. data/ext/asyncengine_ext/libuv/src/unix/ev/config_linux.h +141 -0
  152. data/ext/asyncengine_ext/libuv/src/unix/ev/config_netbsd.h +120 -0
  153. data/ext/asyncengine_ext/libuv/src/unix/ev/config_openbsd.h +126 -0
  154. data/ext/asyncengine_ext/libuv/src/unix/ev/config_sunos.h +122 -0
  155. data/ext/asyncengine_ext/libuv/src/unix/ev/configure +13037 -0
  156. data/ext/asyncengine_ext/libuv/src/unix/ev/configure.ac +18 -0
  157. data/ext/asyncengine_ext/libuv/src/unix/ev/depcomp +630 -0
  158. data/ext/asyncengine_ext/libuv/src/unix/ev/ev++.h +816 -0
  159. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.3 +5311 -0
  160. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.c +3921 -0
  161. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.pod +5243 -0
  162. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_epoll.c +266 -0
  163. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_kqueue.c +235 -0
  164. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_poll.c +148 -0
  165. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_port.c +179 -0
  166. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_select.c +310 -0
  167. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_vars.h +203 -0
  168. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_win32.c +153 -0
  169. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_wrap.h +196 -0
  170. data/ext/asyncengine_ext/libuv/src/unix/ev/event.c +402 -0
  171. data/ext/asyncengine_ext/libuv/src/unix/ev/event.h +170 -0
  172. data/ext/asyncengine_ext/libuv/src/unix/ev/install-sh +294 -0
  173. data/ext/asyncengine_ext/libuv/src/unix/ev/libev.m4 +39 -0
  174. data/ext/asyncengine_ext/libuv/src/unix/ev/ltmain.sh +8413 -0
  175. data/ext/asyncengine_ext/libuv/src/unix/ev/missing +336 -0
  176. data/ext/asyncengine_ext/libuv/src/unix/ev/mkinstalldirs +111 -0
  177. data/ext/asyncengine_ext/libuv/src/unix/freebsd.c +312 -0
  178. data/ext/asyncengine_ext/libuv/src/unix/fs.c +707 -0
  179. data/ext/asyncengine_ext/libuv/src/unix/idle.c +79 -0
  180. data/ext/asyncengine_ext/libuv/src/unix/internal.h +161 -0
  181. data/ext/asyncengine_ext/libuv/src/unix/kqueue.c +127 -0
  182. data/ext/asyncengine_ext/libuv/src/unix/linux/core.c +474 -0
  183. data/ext/asyncengine_ext/libuv/src/unix/linux/inotify.c +211 -0
  184. data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.c +230 -0
  185. data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.h +87 -0
  186. data/ext/asyncengine_ext/libuv/src/unix/loop.c +58 -0
  187. data/ext/asyncengine_ext/libuv/src/unix/netbsd.c +108 -0
  188. data/ext/asyncengine_ext/libuv/src/unix/openbsd.c +295 -0
  189. data/ext/asyncengine_ext/libuv/src/unix/pipe.c +266 -0
  190. data/ext/asyncengine_ext/libuv/src/unix/prepare.c +79 -0
  191. data/ext/asyncengine_ext/libuv/src/unix/process.c +369 -0
  192. data/ext/asyncengine_ext/libuv/src/unix/stream.c +1033 -0
  193. data/ext/asyncengine_ext/libuv/src/unix/sunos.c +466 -0
  194. data/ext/asyncengine_ext/libuv/src/unix/tcp.c +327 -0
  195. data/ext/asyncengine_ext/libuv/src/unix/thread.c +154 -0
  196. data/ext/asyncengine_ext/libuv/src/unix/timer.c +127 -0
  197. data/ext/asyncengine_ext/libuv/src/unix/tty.c +146 -0
  198. data/ext/asyncengine_ext/libuv/src/unix/udp.c +670 -0
  199. data/ext/asyncengine_ext/libuv/src/unix/uv-eio.c +124 -0
  200. data/ext/asyncengine_ext/libuv/src/unix/uv-eio.h +13 -0
  201. data/ext/asyncengine_ext/libuv/src/uv-common.c +354 -0
  202. data/ext/asyncengine_ext/libuv/src/uv-common.h +87 -0
  203. data/ext/asyncengine_ext/libuv/src/win/async.c +127 -0
  204. data/ext/asyncengine_ext/libuv/src/win/cares.c +290 -0
  205. data/ext/asyncengine_ext/libuv/src/win/core.c +270 -0
  206. data/ext/asyncengine_ext/libuv/src/win/dl.c +82 -0
  207. data/ext/asyncengine_ext/libuv/src/win/error.c +132 -0
  208. data/ext/asyncengine_ext/libuv/src/win/fs-event.c +514 -0
  209. data/ext/asyncengine_ext/libuv/src/win/fs.c +1576 -0
  210. data/ext/asyncengine_ext/libuv/src/win/getaddrinfo.c +372 -0
  211. data/ext/asyncengine_ext/libuv/src/win/handle.c +225 -0
  212. data/ext/asyncengine_ext/libuv/src/win/internal.h +352 -0
  213. data/ext/asyncengine_ext/libuv/src/win/loop-watcher.c +131 -0
  214. data/ext/asyncengine_ext/libuv/src/win/pipe.c +1661 -0
  215. data/ext/asyncengine_ext/libuv/src/win/process.c +1140 -0
  216. data/ext/asyncengine_ext/libuv/src/win/req.c +174 -0
  217. data/ext/asyncengine_ext/libuv/src/win/stream.c +201 -0
  218. data/ext/asyncengine_ext/libuv/src/win/tcp.c +1282 -0
  219. data/ext/asyncengine_ext/libuv/src/win/thread.c +332 -0
  220. data/ext/asyncengine_ext/libuv/src/win/threadpool.c +73 -0
  221. data/ext/asyncengine_ext/libuv/src/win/timer.c +276 -0
  222. data/ext/asyncengine_ext/libuv/src/win/tty.c +1795 -0
  223. data/ext/asyncengine_ext/libuv/src/win/udp.c +709 -0
  224. data/ext/asyncengine_ext/libuv/src/win/util.c +719 -0
  225. data/ext/asyncengine_ext/libuv/src/win/winapi.c +117 -0
  226. data/ext/asyncengine_ext/libuv/src/win/winapi.h +4419 -0
  227. data/ext/asyncengine_ext/libuv/src/win/winsock.c +470 -0
  228. data/ext/asyncengine_ext/libuv/src/win/winsock.h +138 -0
  229. data/ext/asyncengine_ext/libuv/test/benchmark-ares.c +118 -0
  230. data/ext/asyncengine_ext/libuv/test/benchmark-getaddrinfo.c +94 -0
  231. data/ext/asyncengine_ext/libuv/test/benchmark-list.h +105 -0
  232. data/ext/asyncengine_ext/libuv/test/benchmark-ping-pongs.c +213 -0
  233. data/ext/asyncengine_ext/libuv/test/benchmark-pound.c +324 -0
  234. data/ext/asyncengine_ext/libuv/test/benchmark-pump.c +462 -0
  235. data/ext/asyncengine_ext/libuv/test/benchmark-sizes.c +40 -0
  236. data/ext/asyncengine_ext/libuv/test/benchmark-spawn.c +156 -0
  237. data/ext/asyncengine_ext/libuv/test/benchmark-tcp-write-batch.c +140 -0
  238. data/ext/asyncengine_ext/libuv/test/benchmark-thread.c +64 -0
  239. data/ext/asyncengine_ext/libuv/test/benchmark-udp-packet-storm.c +247 -0
  240. data/ext/asyncengine_ext/libuv/test/blackhole-server.c +118 -0
  241. data/ext/asyncengine_ext/libuv/test/dns-server.c +321 -0
  242. data/ext/asyncengine_ext/libuv/test/echo-server.c +370 -0
  243. data/ext/asyncengine_ext/libuv/test/fixtures/empty_file +0 -0
  244. data/ext/asyncengine_ext/libuv/test/fixtures/load_error.node +1 -0
  245. data/ext/asyncengine_ext/libuv/test/run-benchmarks.c +64 -0
  246. data/ext/asyncengine_ext/libuv/test/run-tests.c +108 -0
  247. data/ext/asyncengine_ext/libuv/test/runner-unix.c +315 -0
  248. data/ext/asyncengine_ext/libuv/test/runner-unix.h +36 -0
  249. data/ext/asyncengine_ext/libuv/test/runner-win.c +343 -0
  250. data/ext/asyncengine_ext/libuv/test/runner-win.h +42 -0
  251. data/ext/asyncengine_ext/libuv/test/runner.c +317 -0
  252. data/ext/asyncengine_ext/libuv/test/runner.h +159 -0
  253. data/ext/asyncengine_ext/libuv/test/task.h +117 -0
  254. data/ext/asyncengine_ext/libuv/test/test-async.c +216 -0
  255. data/ext/asyncengine_ext/libuv/test/test-callback-stack.c +203 -0
  256. data/ext/asyncengine_ext/libuv/test/test-connection-fail.c +148 -0
  257. data/ext/asyncengine_ext/libuv/test/test-counters-init.c +216 -0
  258. data/ext/asyncengine_ext/libuv/test/test-cwd-and-chdir.c +64 -0
  259. data/ext/asyncengine_ext/libuv/test/test-delayed-accept.c +197 -0
  260. data/ext/asyncengine_ext/libuv/test/test-dlerror.c +49 -0
  261. data/ext/asyncengine_ext/libuv/test/test-eio-overflow.c +90 -0
  262. data/ext/asyncengine_ext/libuv/test/test-error.c +59 -0
  263. data/ext/asyncengine_ext/libuv/test/test-fail-always.c +29 -0
  264. data/ext/asyncengine_ext/libuv/test/test-fs-event.c +442 -0
  265. data/ext/asyncengine_ext/libuv/test/test-fs.c +1731 -0
  266. data/ext/asyncengine_ext/libuv/test/test-get-currentexe.c +63 -0
  267. data/ext/asyncengine_ext/libuv/test/test-get-loadavg.c +36 -0
  268. data/ext/asyncengine_ext/libuv/test/test-get-memory.c +38 -0
  269. data/ext/asyncengine_ext/libuv/test/test-getaddrinfo.c +122 -0
  270. data/ext/asyncengine_ext/libuv/test/test-gethostbyname.c +189 -0
  271. data/ext/asyncengine_ext/libuv/test/test-getsockname.c +342 -0
  272. data/ext/asyncengine_ext/libuv/test/test-hrtime.c +51 -0
  273. data/ext/asyncengine_ext/libuv/test/test-idle.c +81 -0
  274. data/ext/asyncengine_ext/libuv/test/test-ipc-send-recv.c +209 -0
  275. data/ext/asyncengine_ext/libuv/test/test-ipc.c +614 -0
  276. data/ext/asyncengine_ext/libuv/test/test-list.h +371 -0
  277. data/ext/asyncengine_ext/libuv/test/test-loop-handles.c +359 -0
  278. data/ext/asyncengine_ext/libuv/test/test-multiple-listen.c +102 -0
  279. data/ext/asyncengine_ext/libuv/test/test-mutexes.c +63 -0
  280. data/ext/asyncengine_ext/libuv/test/test-pass-always.c +28 -0
  281. data/ext/asyncengine_ext/libuv/test/test-ping-pong.c +253 -0
  282. data/ext/asyncengine_ext/libuv/test/test-pipe-bind-error.c +140 -0
  283. data/ext/asyncengine_ext/libuv/test/test-pipe-connect-error.c +96 -0
  284. data/ext/asyncengine_ext/libuv/test/test-platform-output.c +87 -0
  285. data/ext/asyncengine_ext/libuv/test/test-process-title.c +42 -0
  286. data/ext/asyncengine_ext/libuv/test/test-ref.c +322 -0
  287. data/ext/asyncengine_ext/libuv/test/test-run-once.c +44 -0
  288. data/ext/asyncengine_ext/libuv/test/test-shutdown-close.c +103 -0
  289. data/ext/asyncengine_ext/libuv/test/test-shutdown-eof.c +183 -0
  290. data/ext/asyncengine_ext/libuv/test/test-spawn.c +499 -0
  291. data/ext/asyncengine_ext/libuv/test/test-stdio-over-pipes.c +256 -0
  292. data/ext/asyncengine_ext/libuv/test/test-tcp-bind-error.c +191 -0
  293. data/ext/asyncengine_ext/libuv/test/test-tcp-bind6-error.c +154 -0
  294. data/ext/asyncengine_ext/libuv/test/test-tcp-close.c +129 -0
  295. data/ext/asyncengine_ext/libuv/test/test-tcp-connect-error.c +70 -0
  296. data/ext/asyncengine_ext/libuv/test/test-tcp-connect6-error.c +68 -0
  297. data/ext/asyncengine_ext/libuv/test/test-tcp-flags.c +51 -0
  298. data/ext/asyncengine_ext/libuv/test/test-tcp-write-error.c +168 -0
  299. data/ext/asyncengine_ext/libuv/test/test-tcp-write-to-half-open-connection.c +135 -0
  300. data/ext/asyncengine_ext/libuv/test/test-tcp-writealot.c +195 -0
  301. data/ext/asyncengine_ext/libuv/test/test-thread.c +183 -0
  302. data/ext/asyncengine_ext/libuv/test/test-threadpool.c +57 -0
  303. data/ext/asyncengine_ext/libuv/test/test-timer-again.c +141 -0
  304. data/ext/asyncengine_ext/libuv/test/test-timer.c +130 -0
  305. data/ext/asyncengine_ext/libuv/test/test-tty.c +110 -0
  306. data/ext/asyncengine_ext/libuv/test/test-udp-dgram-too-big.c +86 -0
  307. data/ext/asyncengine_ext/libuv/test/test-udp-ipv6.c +156 -0
  308. data/ext/asyncengine_ext/libuv/test/test-udp-multicast-join.c +139 -0
  309. data/ext/asyncengine_ext/libuv/test/test-udp-multicast-ttl.c +86 -0
  310. data/ext/asyncengine_ext/libuv/test/test-udp-options.c +86 -0
  311. data/ext/asyncengine_ext/libuv/test/test-udp-send-and-recv.c +208 -0
  312. data/ext/asyncengine_ext/libuv/test/test-util.c +97 -0
  313. data/ext/asyncengine_ext/libuv/uv.gyp +435 -0
  314. data/ext/asyncengine_ext/libuv/vcbuild.bat +105 -0
  315. data/lib/asyncengine/version.rb +3 -0
  316. data/lib/asyncengine.rb +41 -0
  317. metadata +384 -0
@@ -0,0 +1,1282 @@
1
+ /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ * of this software and associated documentation files (the "Software"), to
5
+ * deal in the Software without restriction, including without limitation the
6
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
+ * sell copies of the Software, and to permit persons to whom the Software is
8
+ * furnished to do so, subject to the following conditions:
9
+ *
10
+ * The above copyright notice and this permission notice shall be included in
11
+ * all copies or substantial portions of the Software.
12
+ *
13
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
+ * IN THE SOFTWARE.
20
+ */
21
+
22
+ #include <assert.h>
23
+
24
+ #include "uv.h"
25
+ #include "../uv-common.h"
26
+ #include "internal.h"
27
+
28
+
29
+ /*
30
+ * Threshold of active tcp streams for which to preallocate tcp read buffers.
31
+ * (Due to node slab allocator performing poorly under this pattern,
32
+ * the optimization is temporarily disabled (threshold=0). This will be
33
+ * revisited once node allocator is improved.)
34
+ */
35
+ const unsigned int uv_active_tcp_streams_threshold = 0;
36
+
37
+ /*
38
+ * Number of simultaneous pending AcceptEx calls.
39
+ */
40
+ const unsigned int uv_simultaneous_server_accepts = 32;
41
+
42
+ /* A zero-size buffer for use by uv_tcp_read */
43
+ static char uv_zero_[] = "";
44
+
45
+ static int uv__tcp_nodelay(uv_tcp_t* handle, SOCKET socket, int enable) {
46
+ if (setsockopt(socket,
47
+ IPPROTO_TCP,
48
+ TCP_NODELAY,
49
+ (const char*)&enable,
50
+ sizeof enable) == -1) {
51
+ uv__set_sys_error(handle->loop, errno);
52
+ return -1;
53
+ }
54
+ return 0;
55
+ }
56
+
57
+
58
+ static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsigned int delay) {
59
+ if (setsockopt(socket,
60
+ SOL_SOCKET,
61
+ SO_KEEPALIVE,
62
+ (const char*)&enable,
63
+ sizeof enable) == -1) {
64
+ uv__set_sys_error(handle->loop, errno);
65
+ return -1;
66
+ }
67
+
68
+ if (enable && setsockopt(socket,
69
+ IPPROTO_TCP,
70
+ TCP_KEEPALIVE,
71
+ (const char*)&delay,
72
+ sizeof delay) == -1) {
73
+ uv__set_sys_error(handle->loop, errno);
74
+ return -1;
75
+ }
76
+
77
+ return 0;
78
+ }
79
+
80
+
81
+ static int uv_tcp_set_socket(uv_loop_t* loop, uv_tcp_t* handle,
82
+ SOCKET socket, int imported) {
83
+ DWORD yes = 1;
84
+ int non_ifs_lsp;
85
+
86
+ assert(handle->socket == INVALID_SOCKET);
87
+
88
+ /* Set the socket to nonblocking mode */
89
+ if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
90
+ uv__set_sys_error(loop, WSAGetLastError());
91
+ return -1;
92
+ }
93
+
94
+ /* Make the socket non-inheritable */
95
+ if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
96
+ uv__set_sys_error(loop, GetLastError());
97
+ return -1;
98
+ }
99
+
100
+ /* Associate it with the I/O completion port. */
101
+ /* Use uv_handle_t pointer as completion key. */
102
+ if (CreateIoCompletionPort((HANDLE)socket,
103
+ loop->iocp,
104
+ (ULONG_PTR)socket,
105
+ 0) == NULL) {
106
+ if (imported) {
107
+ handle->flags |= UV_HANDLE_EMULATE_IOCP;
108
+ } else {
109
+ uv__set_sys_error(loop, GetLastError());
110
+ return -1;
111
+ }
112
+ }
113
+
114
+ non_ifs_lsp = (handle->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
115
+ uv_tcp_non_ifs_lsp_ipv4;
116
+
117
+ if (pSetFileCompletionNotificationModes && !non_ifs_lsp) {
118
+ if (pSetFileCompletionNotificationModes((HANDLE) socket,
119
+ FILE_SKIP_SET_EVENT_ON_HANDLE |
120
+ FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
121
+ handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
122
+ } else if (GetLastError() != ERROR_INVALID_FUNCTION) {
123
+ uv__set_sys_error(loop, GetLastError());
124
+ return -1;
125
+ }
126
+ }
127
+
128
+ if ((handle->flags & UV_HANDLE_TCP_NODELAY) &&
129
+ uv__tcp_nodelay(handle, socket, 1)) {
130
+ return -1;
131
+ }
132
+
133
+ /* TODO: Use stored delay. */
134
+ if ((handle->flags & UV_HANDLE_TCP_KEEPALIVE) &&
135
+ uv__tcp_keepalive(handle, socket, 1, 60)) {
136
+ return -1;
137
+ }
138
+
139
+ handle->socket = socket;
140
+
141
+ return 0;
142
+ }
143
+
144
+
145
+ int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
146
+ uv_stream_init(loop, (uv_stream_t*)handle);
147
+
148
+ handle->accept_reqs = NULL;
149
+ handle->pending_accepts = NULL;
150
+ handle->socket = INVALID_SOCKET;
151
+ handle->type = UV_TCP;
152
+ handle->reqs_pending = 0;
153
+ handle->func_acceptex = NULL;
154
+ handle->func_connectex = NULL;
155
+ handle->processed_accepts = 0;
156
+
157
+ loop->counters.tcp_init++;
158
+
159
+ return 0;
160
+ }
161
+
162
+
163
+ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
164
+ int status;
165
+ int sys_error;
166
+ unsigned int i;
167
+ uv_tcp_accept_t* req;
168
+
169
+ if (handle->flags & UV_HANDLE_CONNECTION &&
170
+ handle->shutdown_req != NULL &&
171
+ handle->write_reqs_pending == 0) {
172
+
173
+ if (handle->flags & UV_HANDLE_CLOSING) {
174
+ status = -1;
175
+ sys_error = WSAEINTR;
176
+ } else if (shutdown(handle->socket, SD_SEND) != SOCKET_ERROR) {
177
+ status = 0;
178
+ handle->flags |= UV_HANDLE_SHUT;
179
+ } else {
180
+ status = -1;
181
+ sys_error = WSAGetLastError();
182
+ }
183
+ if (handle->shutdown_req->cb) {
184
+ if (status == -1) {
185
+ uv__set_sys_error(loop, sys_error);
186
+ }
187
+ handle->shutdown_req->cb(handle->shutdown_req, status);
188
+ }
189
+
190
+ handle->shutdown_req = NULL;
191
+
192
+ uv_unref(loop);
193
+ DECREASE_PENDING_REQ_COUNT(handle);
194
+ return;
195
+ }
196
+
197
+ if (handle->flags & UV_HANDLE_CLOSING &&
198
+ handle->reqs_pending == 0) {
199
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
200
+ handle->flags |= UV_HANDLE_CLOSED;
201
+
202
+ if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
203
+ closesocket(handle->socket);
204
+ handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
205
+ }
206
+
207
+ if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->accept_reqs) {
208
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
209
+ for (i = 0; i < uv_simultaneous_server_accepts; i++) {
210
+ req = &handle->accept_reqs[i];
211
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
212
+ UnregisterWait(req->wait_handle);
213
+ req->wait_handle = INVALID_HANDLE_VALUE;
214
+ }
215
+ if (req->event_handle) {
216
+ CloseHandle(req->event_handle);
217
+ req->event_handle = NULL;
218
+ }
219
+ }
220
+ }
221
+
222
+ free(handle->accept_reqs);
223
+ handle->accept_reqs = NULL;
224
+ }
225
+
226
+ if (handle->flags & UV_HANDLE_CONNECTION &&
227
+ handle->flags & UV_HANDLE_EMULATE_IOCP) {
228
+ if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
229
+ UnregisterWait(handle->read_req.wait_handle);
230
+ handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
231
+ }
232
+ if (handle->read_req.event_handle) {
233
+ CloseHandle(handle->read_req.event_handle);
234
+ handle->read_req.event_handle = NULL;
235
+ }
236
+ }
237
+
238
+ if (handle->close_cb) {
239
+ handle->close_cb((uv_handle_t*)handle);
240
+ }
241
+
242
+ loop->active_tcp_streams--;
243
+
244
+ uv_unref(loop);
245
+ }
246
+ }
247
+
248
+
249
+ static int uv__bind(uv_tcp_t* handle,
250
+ int domain,
251
+ struct sockaddr* addr,
252
+ int addrsize) {
253
+ DWORD err;
254
+ int r;
255
+
256
+ if (handle->socket == INVALID_SOCKET) {
257
+ SOCKET sock = socket(domain, SOCK_STREAM, 0);
258
+ if (sock == INVALID_SOCKET) {
259
+ uv__set_sys_error(handle->loop, WSAGetLastError());
260
+ return -1;
261
+ }
262
+
263
+ if (uv_tcp_set_socket(handle->loop, handle, sock, 0) == -1) {
264
+ closesocket(sock);
265
+ return -1;
266
+ }
267
+ }
268
+
269
+ r = bind(handle->socket, addr, addrsize);
270
+
271
+ if (r == SOCKET_ERROR) {
272
+ err = WSAGetLastError();
273
+ if (err == WSAEADDRINUSE) {
274
+ /* Some errors are not to be reported until connect() or listen() */
275
+ handle->bind_error = err;
276
+ handle->flags |= UV_HANDLE_BIND_ERROR;
277
+ } else {
278
+ uv__set_sys_error(handle->loop, err);
279
+ return -1;
280
+ }
281
+ }
282
+
283
+ handle->flags |= UV_HANDLE_BOUND;
284
+
285
+ return 0;
286
+ }
287
+
288
+
289
+ int uv__tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
290
+ return uv__bind(handle,
291
+ AF_INET,
292
+ (struct sockaddr*)&addr,
293
+ sizeof(struct sockaddr_in));
294
+ }
295
+
296
+
297
+ int uv__tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) {
298
+ if (uv_allow_ipv6) {
299
+ handle->flags |= UV_HANDLE_IPV6;
300
+ return uv__bind(handle,
301
+ AF_INET6,
302
+ (struct sockaddr*)&addr,
303
+ sizeof(struct sockaddr_in6));
304
+
305
+ } else {
306
+ uv__set_sys_error(handle->loop, WSAEAFNOSUPPORT);
307
+ return -1;
308
+ }
309
+ }
310
+
311
+
312
+ static void CALLBACK post_completion(void* context, BOOLEAN timed_out) {
313
+ uv_tcp_accept_t* req;
314
+ uv_tcp_t* handle;
315
+
316
+ req = (uv_tcp_accept_t*) context;
317
+ assert(req != NULL);
318
+ handle = (uv_tcp_t*)req->data;
319
+ assert(handle != NULL);
320
+ assert(!timed_out);
321
+
322
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
323
+ req->overlapped.InternalHigh,
324
+ 0,
325
+ &req->overlapped)) {
326
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
327
+ }
328
+ }
329
+
330
+
331
+ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
332
+ uv_loop_t* loop = handle->loop;
333
+ BOOL success;
334
+ DWORD bytes;
335
+ SOCKET accept_socket;
336
+ short family;
337
+
338
+ assert(handle->flags & UV_HANDLE_LISTENING);
339
+ assert(req->accept_socket == INVALID_SOCKET);
340
+
341
+ /* choose family and extension function */
342
+ if (handle->flags & UV_HANDLE_IPV6) {
343
+ family = AF_INET6;
344
+ } else {
345
+ family = AF_INET;
346
+ }
347
+
348
+ /* Open a socket for the accepted connection. */
349
+ accept_socket = socket(family, SOCK_STREAM, 0);
350
+ if (accept_socket == INVALID_SOCKET) {
351
+ SET_REQ_ERROR(req, WSAGetLastError());
352
+ uv_insert_pending_req(loop, (uv_req_t*)req);
353
+ handle->reqs_pending++;
354
+ return;
355
+ }
356
+
357
+ /* Prepare the overlapped structure. */
358
+ memset(&(req->overlapped), 0, sizeof(req->overlapped));
359
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
360
+ req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
361
+ }
362
+
363
+ success = handle->func_acceptex(handle->socket,
364
+ accept_socket,
365
+ (void*)req->accept_buffer,
366
+ 0,
367
+ sizeof(struct sockaddr_storage),
368
+ sizeof(struct sockaddr_storage),
369
+ &bytes,
370
+ &req->overlapped);
371
+
372
+ if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
373
+ /* Process the req without IOCP. */
374
+ req->accept_socket = accept_socket;
375
+ handle->reqs_pending++;
376
+ uv_insert_pending_req(loop, (uv_req_t*)req);
377
+ } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
378
+ /* The req will be processed with IOCP. */
379
+ req->accept_socket = accept_socket;
380
+ handle->reqs_pending++;
381
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
382
+ req->wait_handle == INVALID_HANDLE_VALUE &&
383
+ !RegisterWaitForSingleObject(&req->wait_handle,
384
+ req->overlapped.hEvent, post_completion, (void*) req,
385
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
386
+ SET_REQ_ERROR(req, GetLastError());
387
+ uv_insert_pending_req(loop, (uv_req_t*)req);
388
+ handle->reqs_pending++;
389
+ return;
390
+ }
391
+ } else {
392
+ /* Make this req pending reporting an error. */
393
+ SET_REQ_ERROR(req, WSAGetLastError());
394
+ uv_insert_pending_req(loop, (uv_req_t*)req);
395
+ handle->reqs_pending++;
396
+ /* Destroy the preallocated client socket. */
397
+ closesocket(accept_socket);
398
+ /* Destroy the event handle */
399
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
400
+ CloseHandle(req->overlapped.hEvent);
401
+ req->event_handle = NULL;
402
+ }
403
+ }
404
+ }
405
+
406
+
407
+ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
408
+ uv_read_t* req;
409
+ uv_buf_t buf;
410
+ int result;
411
+ DWORD bytes, flags;
412
+
413
+ assert(handle->flags & UV_HANDLE_READING);
414
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
415
+
416
+ req = &handle->read_req;
417
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
418
+
419
+ /*
420
+ * Preallocate a read buffer if the number of active streams is below
421
+ * the threshold.
422
+ */
423
+ if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
424
+ handle->flags &= ~UV_HANDLE_ZERO_READ;
425
+ handle->read_buffer = handle->alloc_cb((uv_handle_t*) handle, 65536);
426
+ assert(handle->read_buffer.len > 0);
427
+ buf = handle->read_buffer;
428
+ } else {
429
+ handle->flags |= UV_HANDLE_ZERO_READ;
430
+ buf.base = (char*) &uv_zero_;
431
+ buf.len = 0;
432
+ }
433
+
434
+ /* Prepare the overlapped structure. */
435
+ memset(&(req->overlapped), 0, sizeof(req->overlapped));
436
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
437
+ assert(req->event_handle);
438
+ req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
439
+ }
440
+
441
+ flags = 0;
442
+ result = WSARecv(handle->socket,
443
+ (WSABUF*)&buf,
444
+ 1,
445
+ &bytes,
446
+ &flags,
447
+ &req->overlapped,
448
+ NULL);
449
+
450
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
451
+ /* Process the req without IOCP. */
452
+ handle->flags |= UV_HANDLE_READ_PENDING;
453
+ req->overlapped.InternalHigh = bytes;
454
+ handle->reqs_pending++;
455
+ uv_insert_pending_req(loop, (uv_req_t*)req);
456
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
457
+ /* The req will be processed with IOCP. */
458
+ handle->flags |= UV_HANDLE_READ_PENDING;
459
+ handle->reqs_pending++;
460
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
461
+ req->wait_handle == INVALID_HANDLE_VALUE &&
462
+ !RegisterWaitForSingleObject(&req->wait_handle,
463
+ req->overlapped.hEvent, post_completion, (void*) req,
464
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
465
+ SET_REQ_ERROR(req, GetLastError());
466
+ uv_insert_pending_req(loop, (uv_req_t*)req);
467
+ }
468
+ } else {
469
+ /* Make this req pending reporting an error. */
470
+ SET_REQ_ERROR(req, WSAGetLastError());
471
+ uv_insert_pending_req(loop, (uv_req_t*)req);
472
+ handle->reqs_pending++;
473
+ }
474
+ }
475
+
476
+
477
+ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
478
+ uv_loop_t* loop = handle->loop;
479
+ unsigned int i, simultaneous_accepts;
480
+ uv_tcp_accept_t* req;
481
+
482
+ assert(backlog > 0);
483
+
484
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
485
+ uv__set_sys_error(loop, handle->bind_error);
486
+ return -1;
487
+ }
488
+
489
+ if (!(handle->flags & UV_HANDLE_BOUND) &&
490
+ uv_tcp_bind(handle, uv_addr_ip4_any_) < 0)
491
+ return -1;
492
+
493
+ if (!handle->func_acceptex) {
494
+ if(!uv_get_acceptex_function(handle->socket, &handle->func_acceptex)) {
495
+ uv__set_sys_error(loop, WSAEAFNOSUPPORT);
496
+ return -1;
497
+ }
498
+ }
499
+
500
+ if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
501
+ listen(handle->socket, backlog) == SOCKET_ERROR) {
502
+ uv__set_sys_error(loop, WSAGetLastError());
503
+ return -1;
504
+ }
505
+
506
+ handle->flags |= UV_HANDLE_LISTENING;
507
+ handle->connection_cb = cb;
508
+
509
+ simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1
510
+ : uv_simultaneous_server_accepts;
511
+
512
+ if(!handle->accept_reqs) {
513
+ handle->accept_reqs = (uv_tcp_accept_t*)
514
+ malloc(simultaneous_accepts * sizeof(uv_tcp_accept_t));
515
+ if (!handle->accept_reqs) {
516
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
517
+ }
518
+
519
+ for (i = 0; i < simultaneous_accepts; i++) {
520
+ req = &handle->accept_reqs[i];
521
+ uv_req_init(loop, (uv_req_t*)req);
522
+ req->type = UV_ACCEPT;
523
+ req->accept_socket = INVALID_SOCKET;
524
+ req->data = handle;
525
+
526
+ req->wait_handle = INVALID_HANDLE_VALUE;
527
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
528
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
529
+ if (!req->event_handle) {
530
+ uv_fatal_error(GetLastError(), "CreateEvent");
531
+ }
532
+ } else {
533
+ req->event_handle = NULL;
534
+ }
535
+
536
+ uv_tcp_queue_accept(handle, req);
537
+ }
538
+ }
539
+
540
+ return 0;
541
+ }
542
+
543
+
544
+ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
545
+ uv_loop_t* loop = server->loop;
546
+ int rv = 0;
547
+
548
+ uv_tcp_accept_t* req = server->pending_accepts;
549
+
550
+ if (!req) {
551
+ /* No valid connections found, so we error out. */
552
+ uv__set_sys_error(loop, WSAEWOULDBLOCK);
553
+ return -1;
554
+ }
555
+
556
+ if (req->accept_socket == INVALID_SOCKET) {
557
+ uv__set_sys_error(loop, WSAENOTCONN);
558
+ return -1;
559
+ }
560
+
561
+ if (uv_tcp_set_socket(client->loop, client, req->accept_socket, 0) == -1) {
562
+ closesocket(req->accept_socket);
563
+ rv = -1;
564
+ } else {
565
+ uv_connection_init((uv_stream_t*) client);
566
+ /* AcceptEx() implicitly binds the accepted socket. */
567
+ client->flags |= UV_HANDLE_BOUND;
568
+ }
569
+
570
+ /* Prepare the req to pick up a new connection */
571
+ server->pending_accepts = req->next_pending;
572
+ req->next_pending = NULL;
573
+ req->accept_socket = INVALID_SOCKET;
574
+
575
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
576
+ /* Check if we're in a middle of changing the number of pending accepts. */
577
+ if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
578
+ uv_tcp_queue_accept(server, req);
579
+ } else {
580
+ /* We better be switching to a single pending accept. */
581
+ assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
582
+
583
+ server->processed_accepts++;
584
+
585
+ if (server->processed_accepts >= uv_simultaneous_server_accepts) {
586
+ server->processed_accepts = 0;
587
+ /*
588
+ * All previously queued accept requests are now processed.
589
+ * We now switch to queueing just a single accept.
590
+ */
591
+ uv_tcp_queue_accept(server, &server->accept_reqs[0]);
592
+ server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
593
+ server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
594
+ }
595
+ }
596
+ }
597
+
598
+ loop->active_tcp_streams++;
599
+
600
+ return rv;
601
+ }
602
+
603
+
604
+ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
605
+ uv_read_cb read_cb) {
606
+ uv_loop_t* loop = handle->loop;
607
+
608
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
609
+ uv__set_sys_error(loop, WSAEINVAL);
610
+ return -1;
611
+ }
612
+
613
+ if (handle->flags & UV_HANDLE_READING) {
614
+ uv__set_sys_error(loop, WSAEALREADY);
615
+ return -1;
616
+ }
617
+
618
+ if (handle->flags & UV_HANDLE_EOF) {
619
+ uv__set_sys_error(loop, WSAESHUTDOWN);
620
+ return -1;
621
+ }
622
+
623
+ handle->flags |= UV_HANDLE_READING;
624
+ handle->read_cb = read_cb;
625
+ handle->alloc_cb = alloc_cb;
626
+
627
+ /* If reading was stopped and then started again, there could still be a */
628
+ /* read request pending. */
629
+ if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
630
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
631
+ !handle->read_req.event_handle) {
632
+ handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
633
+ if (!handle->read_req.event_handle) {
634
+ uv_fatal_error(GetLastError(), "CreateEvent");
635
+ }
636
+ }
637
+ uv_tcp_queue_read(loop, handle);
638
+ }
639
+
640
+ return 0;
641
+ }
642
+
643
+
644
+ int uv__tcp_connect(uv_connect_t* req,
645
+ uv_tcp_t* handle,
646
+ struct sockaddr_in address,
647
+ uv_connect_cb cb) {
648
+ uv_loop_t* loop = handle->loop;
649
+ int addrsize = sizeof(struct sockaddr_in);
650
+ BOOL success;
651
+ DWORD bytes;
652
+
653
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
654
+ uv__set_sys_error(loop, handle->bind_error);
655
+ return -1;
656
+ }
657
+
658
+ if (!(handle->flags & UV_HANDLE_BOUND) &&
659
+ uv_tcp_bind(handle, uv_addr_ip4_any_) < 0)
660
+ return -1;
661
+
662
+ if (!handle->func_connectex) {
663
+ if(!uv_get_connectex_function(handle->socket, &handle->func_connectex)) {
664
+ uv__set_sys_error(loop, WSAEAFNOSUPPORT);
665
+ return -1;
666
+ }
667
+ }
668
+
669
+ uv_req_init(loop, (uv_req_t*) req);
670
+ req->type = UV_CONNECT;
671
+ req->handle = (uv_stream_t*) handle;
672
+ req->cb = cb;
673
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
674
+
675
+ success = handle->func_connectex(handle->socket,
676
+ (struct sockaddr*) &address,
677
+ addrsize,
678
+ NULL,
679
+ 0,
680
+ &bytes,
681
+ &req->overlapped);
682
+
683
+ if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
684
+ /* Process the req without IOCP. */
685
+ handle->reqs_pending++;
686
+ uv_ref(loop);
687
+ uv_insert_pending_req(loop, (uv_req_t*)req);
688
+ } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
689
+ /* The req will be processed with IOCP. */
690
+ handle->reqs_pending++;
691
+ uv_ref(loop);
692
+ } else {
693
+ uv__set_sys_error(loop, WSAGetLastError());
694
+ return -1;
695
+ }
696
+
697
+ return 0;
698
+ }
699
+
700
+
701
+ int uv__tcp_connect6(uv_connect_t* req,
702
+ uv_tcp_t* handle,
703
+ struct sockaddr_in6 address,
704
+ uv_connect_cb cb) {
705
+ uv_loop_t* loop = handle->loop;
706
+ int addrsize = sizeof(struct sockaddr_in6);
707
+ BOOL success;
708
+ DWORD bytes;
709
+
710
+ if (!uv_allow_ipv6) {
711
+ uv__set_sys_error(loop, WSAEAFNOSUPPORT);
712
+ return -1;
713
+ }
714
+
715
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
716
+ uv__set_sys_error(loop, handle->bind_error);
717
+ return -1;
718
+ }
719
+
720
+ if (!(handle->flags & UV_HANDLE_BOUND) &&
721
+ uv_tcp_bind6(handle, uv_addr_ip6_any_) < 0)
722
+ return -1;
723
+
724
+ if (!handle->func_connectex) {
725
+ if(!uv_get_connectex_function(handle->socket, &handle->func_connectex)) {
726
+ uv__set_sys_error(loop, WSAEAFNOSUPPORT);
727
+ return -1;
728
+ }
729
+ }
730
+
731
+ uv_req_init(loop, (uv_req_t*) req);
732
+ req->type = UV_CONNECT;
733
+ req->handle = (uv_stream_t*) handle;
734
+ req->cb = cb;
735
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
736
+
737
+ success = handle->func_connectex(handle->socket,
738
+ (struct sockaddr*) &address,
739
+ addrsize,
740
+ NULL,
741
+ 0,
742
+ &bytes,
743
+ &req->overlapped);
744
+
745
+ if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
746
+ handle->reqs_pending++;
747
+ uv_ref(loop);
748
+ uv_insert_pending_req(loop, (uv_req_t*)req);
749
+ } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
750
+ handle->reqs_pending++;
751
+ uv_ref(loop);
752
+ } else {
753
+ uv__set_sys_error(loop, WSAGetLastError());
754
+ return -1;
755
+ }
756
+
757
+ return 0;
758
+ }
759
+
760
+
761
+ int uv_tcp_getsockname(uv_tcp_t* handle, struct sockaddr* name,
762
+ int* namelen) {
763
+ uv_loop_t* loop = handle->loop;
764
+ int result;
765
+
766
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
767
+ uv__set_sys_error(loop, WSAEINVAL);
768
+ return -1;
769
+ }
770
+
771
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
772
+ uv__set_sys_error(loop, handle->bind_error);
773
+ return -1;
774
+ }
775
+
776
+ result = getsockname(handle->socket, name, namelen);
777
+ if (result != 0) {
778
+ uv__set_sys_error(loop, WSAGetLastError());
779
+ return -1;
780
+ }
781
+
782
+ return 0;
783
+ }
784
+
785
+
786
+ int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name,
787
+ int* namelen) {
788
+ uv_loop_t* loop = handle->loop;
789
+ int result;
790
+
791
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
792
+ uv__set_sys_error(loop, WSAEINVAL);
793
+ return -1;
794
+ }
795
+
796
+ if (handle->flags & UV_HANDLE_BIND_ERROR) {
797
+ uv__set_sys_error(loop, handle->bind_error);
798
+ return -1;
799
+ }
800
+
801
+ result = getpeername(handle->socket, name, namelen);
802
+ if (result != 0) {
803
+ uv__set_sys_error(loop, WSAGetLastError());
804
+ return -1;
805
+ }
806
+
807
+ return 0;
808
+ }
809
+
810
+
811
+ int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
812
+ uv_buf_t bufs[], int bufcnt, uv_write_cb cb) {
813
+ int result;
814
+ DWORD bytes;
815
+
816
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
817
+ uv__set_sys_error(loop, WSAEINVAL);
818
+ return -1;
819
+ }
820
+
821
+ if (handle->flags & UV_HANDLE_SHUTTING) {
822
+ uv__set_sys_error(loop, WSAESHUTDOWN);
823
+ return -1;
824
+ }
825
+
826
+ uv_req_init(loop, (uv_req_t*) req);
827
+ req->type = UV_WRITE;
828
+ req->handle = (uv_stream_t*) handle;
829
+ req->cb = cb;
830
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
831
+
832
+ /* Prepare the overlapped structure. */
833
+ memset(&(req->overlapped), 0, sizeof(req->overlapped));
834
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
835
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
836
+ if (!req->event_handle) {
837
+ uv_fatal_error(GetLastError(), "CreateEvent");
838
+ }
839
+ req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
840
+ }
841
+
842
+ result = WSASend(handle->socket,
843
+ (WSABUF*)bufs,
844
+ bufcnt,
845
+ &bytes,
846
+ 0,
847
+ &req->overlapped,
848
+ NULL);
849
+
850
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
851
+ /* Request completed immediately. */
852
+ req->queued_bytes = 0;
853
+ handle->reqs_pending++;
854
+ handle->write_reqs_pending++;
855
+ uv_insert_pending_req(loop, (uv_req_t*) req);
856
+ uv_ref(loop);
857
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
858
+ /* Request queued by the kernel. */
859
+ req->queued_bytes = uv_count_bufs(bufs, bufcnt);
860
+ handle->reqs_pending++;
861
+ handle->write_reqs_pending++;
862
+ handle->write_queue_size += req->queued_bytes;
863
+ uv_ref(loop);
864
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
865
+ req->wait_handle == INVALID_HANDLE_VALUE &&
866
+ !RegisterWaitForSingleObject(&req->wait_handle,
867
+ req->overlapped.hEvent, post_completion, (void*) req,
868
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
869
+ SET_REQ_ERROR(req, GetLastError());
870
+ uv_insert_pending_req(loop, (uv_req_t*)req);
871
+ }
872
+ } else {
873
+ /* Send failed due to an error. */
874
+ uv__set_sys_error(loop, WSAGetLastError());
875
+ return -1;
876
+ }
877
+
878
+ return 0;
879
+ }
880
+
881
+
882
+ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
883
+ uv_req_t* req) {
884
+ DWORD bytes, flags, err;
885
+ uv_buf_t buf;
886
+
887
+ assert(handle->type == UV_TCP);
888
+
889
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
890
+
891
+ if (!REQ_SUCCESS(req)) {
892
+ /* An error occurred doing the read. */
893
+ if ((handle->flags & UV_HANDLE_READING) ||
894
+ !(handle->flags & UV_HANDLE_ZERO_READ)) {
895
+ handle->flags &= ~UV_HANDLE_READING;
896
+ buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
897
+ uv_buf_init(NULL, 0) : handle->read_buffer;
898
+
899
+ err = GET_REQ_SOCK_ERROR(req);
900
+
901
+ if (err == WSAECONNABORTED) {
902
+ /*
903
+ * Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
904
+ */
905
+ uv__set_error(loop, UV_ECONNRESET, err);
906
+ } else {
907
+ uv__set_sys_error(loop, err);
908
+ }
909
+
910
+ handle->read_cb((uv_stream_t*)handle, -1, buf);
911
+ }
912
+ } else {
913
+ if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
914
+ /* The read was done with a non-zero buffer length. */
915
+ if (req->overlapped.InternalHigh > 0) {
916
+ /* Successful read */
917
+ handle->read_cb((uv_stream_t*)handle,
918
+ req->overlapped.InternalHigh,
919
+ handle->read_buffer);
920
+ /* Read again only if bytes == buf.len */
921
+ if (req->overlapped.InternalHigh < handle->read_buffer.len) {
922
+ goto done;
923
+ }
924
+ } else {
925
+ /* Connection closed */
926
+ handle->flags &= ~UV_HANDLE_READING;
927
+ handle->flags |= UV_HANDLE_EOF;
928
+ uv__set_error(loop, UV_EOF, ERROR_SUCCESS);
929
+ buf.base = 0;
930
+ buf.len = 0;
931
+ handle->read_cb((uv_stream_t*)handle, -1, handle->read_buffer);
932
+ goto done;
933
+ }
934
+ }
935
+
936
+ /* Do nonblocking reads until the buffer is empty */
937
+ while (handle->flags & UV_HANDLE_READING) {
938
+ buf = handle->alloc_cb((uv_handle_t*) handle, 65536);
939
+ assert(buf.len > 0);
940
+ flags = 0;
941
+ if (WSARecv(handle->socket,
942
+ (WSABUF*)&buf,
943
+ 1,
944
+ &bytes,
945
+ &flags,
946
+ NULL,
947
+ NULL) != SOCKET_ERROR) {
948
+ if (bytes > 0) {
949
+ /* Successful read */
950
+ handle->read_cb((uv_stream_t*)handle, bytes, buf);
951
+ /* Read again only if bytes == buf.len */
952
+ if (bytes < buf.len) {
953
+ break;
954
+ }
955
+ } else {
956
+ /* Connection closed */
957
+ handle->flags &= ~UV_HANDLE_READING;
958
+ handle->flags |= UV_HANDLE_EOF;
959
+ uv__set_error(loop, UV_EOF, ERROR_SUCCESS);
960
+ handle->read_cb((uv_stream_t*)handle, -1, buf);
961
+ break;
962
+ }
963
+ } else {
964
+ err = WSAGetLastError();
965
+ if (err == WSAEWOULDBLOCK) {
966
+ /* Read buffer was completely empty, report a 0-byte read. */
967
+ uv__set_sys_error(loop, WSAEWOULDBLOCK);
968
+ handle->read_cb((uv_stream_t*)handle, 0, buf);
969
+ } else {
970
+ if (err == WSAECONNABORTED) {
971
+ /*
972
+ * Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
973
+ */
974
+ uv__set_error(loop, UV_ECONNRESET, err);
975
+ } else {
976
+ /* Ouch! serious error. */
977
+ uv__set_sys_error(loop, err);
978
+ }
979
+ handle->flags &= ~UV_HANDLE_READING;
980
+ handle->read_cb((uv_stream_t*)handle, -1, buf);
981
+ }
982
+ break;
983
+ }
984
+ }
985
+
986
+ done:
987
+ /* Post another read if still reading and not closing. */
988
+ if ((handle->flags & UV_HANDLE_READING) &&
989
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
990
+ uv_tcp_queue_read(loop, handle);
991
+ }
992
+ }
993
+
994
+ DECREASE_PENDING_REQ_COUNT(handle);
995
+ }
996
+
997
+
998
+ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
999
+ uv_write_t* req) {
1000
+ assert(handle->type == UV_TCP);
1001
+
1002
+ assert(handle->write_queue_size >= req->queued_bytes);
1003
+ handle->write_queue_size -= req->queued_bytes;
1004
+
1005
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
1006
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
1007
+ UnregisterWait(req->wait_handle);
1008
+ req->wait_handle = INVALID_HANDLE_VALUE;
1009
+ }
1010
+ if (req->event_handle) {
1011
+ CloseHandle(req->event_handle);
1012
+ req->event_handle = NULL;
1013
+ }
1014
+ }
1015
+
1016
+ if (req->cb) {
1017
+ uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req));
1018
+ ((uv_write_cb)req->cb)(req, loop->last_err.code == UV_OK ? 0 : -1);
1019
+ }
1020
+
1021
+ handle->write_reqs_pending--;
1022
+ if (handle->flags & UV_HANDLE_SHUTTING &&
1023
+ handle->write_reqs_pending == 0) {
1024
+ uv_want_endgame(loop, (uv_handle_t*)handle);
1025
+ }
1026
+
1027
+ DECREASE_PENDING_REQ_COUNT(handle);
1028
+ uv_unref(loop);
1029
+ }
1030
+
1031
+
1032
+ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
1033
+ uv_req_t* raw_req) {
1034
+ uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
1035
+
1036
+ assert(handle->type == UV_TCP);
1037
+
1038
+ /* If handle->accepted_socket is not a valid socket, then */
1039
+ /* uv_queue_accept must have failed. This is a serious error. We stop */
1040
+ /* accepting connections and report this error to the connection */
1041
+ /* callback. */
1042
+ if (req->accept_socket == INVALID_SOCKET) {
1043
+ if (handle->flags & UV_HANDLE_LISTENING) {
1044
+ handle->flags &= ~UV_HANDLE_LISTENING;
1045
+ if (handle->connection_cb) {
1046
+ uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req));
1047
+ handle->connection_cb((uv_stream_t*)handle, -1);
1048
+ }
1049
+ }
1050
+ } else if (REQ_SUCCESS(req) &&
1051
+ setsockopt(req->accept_socket,
1052
+ SOL_SOCKET,
1053
+ SO_UPDATE_ACCEPT_CONTEXT,
1054
+ (char*)&handle->socket,
1055
+ sizeof(handle->socket)) == 0) {
1056
+ req->next_pending = handle->pending_accepts;
1057
+ handle->pending_accepts = req;
1058
+
1059
+ /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
1060
+ if (handle->connection_cb) {
1061
+ handle->connection_cb((uv_stream_t*)handle, 0);
1062
+ }
1063
+ } else {
1064
+ /* Error related to accepted socket is ignored because the server */
1065
+ /* socket may still be healthy. If the server socket is broken
1066
+ /* uv_queue_accept will detect it. */
1067
+ closesocket(req->accept_socket);
1068
+ req->accept_socket = INVALID_SOCKET;
1069
+ if (handle->flags & UV_HANDLE_LISTENING) {
1070
+ uv_tcp_queue_accept(handle, req);
1071
+ }
1072
+ }
1073
+
1074
+ DECREASE_PENDING_REQ_COUNT(handle);
1075
+ }
1076
+
1077
+
1078
+ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
1079
+ uv_connect_t* req) {
1080
+ assert(handle->type == UV_TCP);
1081
+
1082
+ if (req->cb) {
1083
+ if (REQ_SUCCESS(req)) {
1084
+ if (setsockopt(handle->socket,
1085
+ SOL_SOCKET,
1086
+ SO_UPDATE_CONNECT_CONTEXT,
1087
+ NULL,
1088
+ 0) == 0) {
1089
+ uv_connection_init((uv_stream_t*)handle);
1090
+ loop->active_tcp_streams++;
1091
+ ((uv_connect_cb)req->cb)(req, 0);
1092
+ } else {
1093
+ uv__set_sys_error(loop, WSAGetLastError());
1094
+ ((uv_connect_cb)req->cb)(req, -1);
1095
+ }
1096
+ } else {
1097
+ uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req));
1098
+ ((uv_connect_cb)req->cb)(req, -1);
1099
+ }
1100
+ }
1101
+
1102
+ DECREASE_PENDING_REQ_COUNT(handle);
1103
+ uv_unref(loop);
1104
+ }
1105
+
1106
+
1107
+ int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info,
1108
+ int tcp_connection) {
1109
+ SOCKET socket = WSASocketW(AF_INET,
1110
+ SOCK_STREAM,
1111
+ IPPROTO_IP,
1112
+ socket_protocol_info,
1113
+ 0,
1114
+ WSA_FLAG_OVERLAPPED);
1115
+
1116
+ if (socket == INVALID_SOCKET) {
1117
+ uv__set_sys_error(tcp->loop, WSAGetLastError());
1118
+ return -1;
1119
+ }
1120
+
1121
+ tcp->flags |= UV_HANDLE_BOUND;
1122
+ tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
1123
+
1124
+ if (tcp_connection) {
1125
+ uv_connection_init((uv_stream_t*)tcp);
1126
+ }
1127
+
1128
+ if (socket_protocol_info->iAddressFamily == AF_INET6) {
1129
+ tcp->flags |= UV_HANDLE_IPV6;
1130
+ }
1131
+
1132
+ if (uv_tcp_set_socket(tcp->loop, tcp, socket, 1) != 0) {
1133
+ return -1;
1134
+ }
1135
+
1136
+ tcp->loop->active_tcp_streams++;
1137
+ return 0;
1138
+ }
1139
+
1140
+
1141
+ int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
1142
+ if (handle->socket != INVALID_SOCKET &&
1143
+ uv__tcp_nodelay(handle, handle->socket, enable)) {
1144
+ return -1;
1145
+ }
1146
+
1147
+ if (enable) {
1148
+ handle->flags |= UV_HANDLE_TCP_NODELAY;
1149
+ } else {
1150
+ handle->flags &= ~UV_HANDLE_TCP_NODELAY;
1151
+ }
1152
+
1153
+ return 0;
1154
+ }
1155
+
1156
+
1157
+ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
1158
+ if (handle->socket != INVALID_SOCKET &&
1159
+ uv__tcp_keepalive(handle, handle->socket, enable, delay)) {
1160
+ return -1;
1161
+ }
1162
+
1163
+ if (enable) {
1164
+ handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
1165
+ } else {
1166
+ handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
1167
+ }
1168
+
1169
+ /* TODO: Store delay if handle->socket isn't created yet. */
1170
+
1171
+ return 0;
1172
+ }
1173
+
1174
+
1175
+
1176
+ int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
1177
+ LPWSAPROTOCOL_INFOW protocol_info) {
1178
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
1179
+ /*
1180
+ * We're about to share the socket with another process. Because
1181
+ * this is a listening socket, we assume that the other process will
1182
+ * be accepting connections on it. So, before sharing the socket
1183
+ * with another process, we call listen here in the parent process.
1184
+ */
1185
+
1186
+ if (!(handle->flags & UV_HANDLE_LISTENING)) {
1187
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
1188
+ uv__set_artificial_error(handle->loop, UV_EINVAL);
1189
+ return -1;
1190
+ }
1191
+ if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
1192
+ uv__set_sys_error(handle->loop, WSAGetLastError());
1193
+ return -1;
1194
+ }
1195
+ }
1196
+ }
1197
+
1198
+ if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) {
1199
+ uv__set_sys_error(handle->loop, WSAGetLastError());
1200
+ return -1;
1201
+ }
1202
+
1203
+ handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
1204
+
1205
+ return 0;
1206
+ }
1207
+
1208
+
1209
+ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
1210
+ if (handle->flags & UV_HANDLE_CONNECTION) {
1211
+ uv__set_artificial_error(handle->loop, UV_EINVAL);
1212
+ return -1;
1213
+ }
1214
+
1215
+ /* Check if we're already in the desired mode. */
1216
+ if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) ||
1217
+ (!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
1218
+ return 0;
1219
+ }
1220
+
1221
+ /* Don't allow switching from single pending accept to many. */
1222
+ if (enable) {
1223
+ uv__set_artificial_error(handle->loop, UV_ENOTSUP);
1224
+ return -1;
1225
+ }
1226
+
1227
+ /* Check if we're in a middle of changing the number of pending accepts. */
1228
+ if (handle->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING) {
1229
+ return 0;
1230
+ }
1231
+
1232
+ handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
1233
+
1234
+ /* Flip the changing flag if we have already queued multiple accepts. */
1235
+ if (handle->flags & UV_HANDLE_LISTENING) {
1236
+ handle->flags |= UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
1237
+ }
1238
+
1239
+ return 0;
1240
+ }
1241
+
1242
+
1243
+ void uv_tcp_close(uv_tcp_t* tcp) {
1244
+ int non_ifs_lsp;
1245
+ int close_socket = 1;
1246
+
1247
+ /*
1248
+ * In order for winsock to do a graceful close there must not be
1249
+ * any pending reads.
1250
+ */
1251
+ if (tcp->flags & UV_HANDLE_READ_PENDING) {
1252
+ /* Just do shutdown on non-shared sockets, which ensures graceful close. */
1253
+ if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
1254
+ shutdown(tcp->socket, SD_SEND);
1255
+ tcp->flags |= UV_HANDLE_SHUT;
1256
+ } else {
1257
+ /* Check if we have any non-IFS LSPs stacked on top of TCP */
1258
+ non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
1259
+ uv_tcp_non_ifs_lsp_ipv4;
1260
+
1261
+ if (!non_ifs_lsp) {
1262
+ /*
1263
+ * Shared socket with no non-IFS LSPs, request to cancel pending I/O.
1264
+ * The socket will be closed inside endgame.
1265
+ */
1266
+ CancelIo((HANDLE)tcp->socket);
1267
+ close_socket = 0;
1268
+ }
1269
+ }
1270
+ }
1271
+
1272
+ tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING);
1273
+
1274
+ if (close_socket) {
1275
+ closesocket(tcp->socket);
1276
+ tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
1277
+ }
1278
+
1279
+ if (tcp->reqs_pending == 0) {
1280
+ uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
1281
+ }
1282
+ }