asyncengine 0.0.1.testing

Sign up to get free protection for your applications and to get access to all the features.
Files changed (317) hide show
  1. data/README.markdown +0 -0
  2. data/asyncengine.gemspec +26 -0
  3. data/ext/asyncengine_ext/asyncengine_ruby.c +82 -0
  4. data/ext/asyncengine_ext/extconf.rb +47 -0
  5. data/ext/asyncengine_ext/libuv/AUTHORS +45 -0
  6. data/ext/asyncengine_ext/libuv/LICENSE +42 -0
  7. data/ext/asyncengine_ext/libuv/Makefile +119 -0
  8. data/ext/asyncengine_ext/libuv/README.md +88 -0
  9. data/ext/asyncengine_ext/libuv/build/gcc_version.py +20 -0
  10. data/ext/asyncengine_ext/libuv/common.gypi +176 -0
  11. data/ext/asyncengine_ext/libuv/config-mingw.mk +61 -0
  12. data/ext/asyncengine_ext/libuv/config-unix.mk +173 -0
  13. data/ext/asyncengine_ext/libuv/gyp_uv +60 -0
  14. data/ext/asyncengine_ext/libuv/include/ares.h +591 -0
  15. data/ext/asyncengine_ext/libuv/include/ares_version.h +24 -0
  16. data/ext/asyncengine_ext/libuv/include/uv-private/eio.h +403 -0
  17. data/ext/asyncengine_ext/libuv/include/uv-private/ev.h +838 -0
  18. data/ext/asyncengine_ext/libuv/include/uv-private/ngx-queue.h +106 -0
  19. data/ext/asyncengine_ext/libuv/include/uv-private/tree.h +768 -0
  20. data/ext/asyncengine_ext/libuv/include/uv-private/uv-unix.h +256 -0
  21. data/ext/asyncengine_ext/libuv/include/uv-private/uv-win.h +458 -0
  22. data/ext/asyncengine_ext/libuv/include/uv.h +1556 -0
  23. data/ext/asyncengine_ext/libuv/src/ares/AUTHORS +37 -0
  24. data/ext/asyncengine_ext/libuv/src/ares/CHANGES +1218 -0
  25. data/ext/asyncengine_ext/libuv/src/ares/CMakeLists.txt +22 -0
  26. data/ext/asyncengine_ext/libuv/src/ares/NEWS +21 -0
  27. data/ext/asyncengine_ext/libuv/src/ares/README +60 -0
  28. data/ext/asyncengine_ext/libuv/src/ares/README.cares +13 -0
  29. data/ext/asyncengine_ext/libuv/src/ares/README.msvc +142 -0
  30. data/ext/asyncengine_ext/libuv/src/ares/README.node +21 -0
  31. data/ext/asyncengine_ext/libuv/src/ares/RELEASE-NOTES +26 -0
  32. data/ext/asyncengine_ext/libuv/src/ares/TODO +23 -0
  33. data/ext/asyncengine_ext/libuv/src/ares/ares__close_sockets.c +66 -0
  34. data/ext/asyncengine_ext/libuv/src/ares/ares__get_hostent.c +263 -0
  35. data/ext/asyncengine_ext/libuv/src/ares/ares__read_line.c +71 -0
  36. data/ext/asyncengine_ext/libuv/src/ares/ares__timeval.c +111 -0
  37. data/ext/asyncengine_ext/libuv/src/ares/ares_cancel.c +63 -0
  38. data/ext/asyncengine_ext/libuv/src/ares/ares_data.c +190 -0
  39. data/ext/asyncengine_ext/libuv/src/ares/ares_data.h +65 -0
  40. data/ext/asyncengine_ext/libuv/src/ares/ares_destroy.c +105 -0
  41. data/ext/asyncengine_ext/libuv/src/ares/ares_dns.h +90 -0
  42. data/ext/asyncengine_ext/libuv/src/ares/ares_expand_name.c +200 -0
  43. data/ext/asyncengine_ext/libuv/src/ares/ares_expand_string.c +75 -0
  44. data/ext/asyncengine_ext/libuv/src/ares/ares_fds.c +63 -0
  45. data/ext/asyncengine_ext/libuv/src/ares/ares_free_hostent.c +42 -0
  46. data/ext/asyncengine_ext/libuv/src/ares/ares_free_string.c +25 -0
  47. data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.c +30 -0
  48. data/ext/asyncengine_ext/libuv/src/ares/ares_getenv.h +26 -0
  49. data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyaddr.c +301 -0
  50. data/ext/asyncengine_ext/libuv/src/ares/ares_gethostbyname.c +523 -0
  51. data/ext/asyncengine_ext/libuv/src/ares/ares_getnameinfo.c +427 -0
  52. data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.c +122 -0
  53. data/ext/asyncengine_ext/libuv/src/ares/ares_getopt.h +53 -0
  54. data/ext/asyncengine_ext/libuv/src/ares/ares_getsock.c +72 -0
  55. data/ext/asyncengine_ext/libuv/src/ares/ares_init.c +1809 -0
  56. data/ext/asyncengine_ext/libuv/src/ares/ares_iphlpapi.h +221 -0
  57. data/ext/asyncengine_ext/libuv/src/ares/ares_ipv6.h +78 -0
  58. data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.c +142 -0
  59. data/ext/asyncengine_ext/libuv/src/ares/ares_library_init.h +42 -0
  60. data/ext/asyncengine_ext/libuv/src/ares/ares_llist.c +86 -0
  61. data/ext/asyncengine_ext/libuv/src/ares/ares_llist.h +42 -0
  62. data/ext/asyncengine_ext/libuv/src/ares/ares_mkquery.c +195 -0
  63. data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.c +181 -0
  64. data/ext/asyncengine_ext/libuv/src/ares/ares_nowarn.h +55 -0
  65. data/ext/asyncengine_ext/libuv/src/ares/ares_options.c +248 -0
  66. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_a_reply.c +263 -0
  67. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_aaaa_reply.c +259 -0
  68. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_mx_reply.c +170 -0
  69. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ns_reply.c +182 -0
  70. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_ptr_reply.c +217 -0
  71. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_srv_reply.c +179 -0
  72. data/ext/asyncengine_ext/libuv/src/ares/ares_parse_txt_reply.c +201 -0
  73. data/ext/asyncengine_ext/libuv/src/ares/ares_platform.c +11035 -0
  74. data/ext/asyncengine_ext/libuv/src/ares/ares_platform.h +43 -0
  75. data/ext/asyncengine_ext/libuv/src/ares/ares_private.h +355 -0
  76. data/ext/asyncengine_ext/libuv/src/ares/ares_process.c +1295 -0
  77. data/ext/asyncengine_ext/libuv/src/ares/ares_query.c +183 -0
  78. data/ext/asyncengine_ext/libuv/src/ares/ares_rules.h +144 -0
  79. data/ext/asyncengine_ext/libuv/src/ares/ares_search.c +321 -0
  80. data/ext/asyncengine_ext/libuv/src/ares/ares_send.c +134 -0
  81. data/ext/asyncengine_ext/libuv/src/ares/ares_setup.h +199 -0
  82. data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.c +66 -0
  83. data/ext/asyncengine_ext/libuv/src/ares/ares_strcasecmp.h +30 -0
  84. data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.c +42 -0
  85. data/ext/asyncengine_ext/libuv/src/ares/ares_strdup.h +26 -0
  86. data/ext/asyncengine_ext/libuv/src/ares/ares_strerror.c +56 -0
  87. data/ext/asyncengine_ext/libuv/src/ares/ares_timeout.c +80 -0
  88. data/ext/asyncengine_ext/libuv/src/ares/ares_version.c +11 -0
  89. data/ext/asyncengine_ext/libuv/src/ares/ares_writev.c +79 -0
  90. data/ext/asyncengine_ext/libuv/src/ares/ares_writev.h +36 -0
  91. data/ext/asyncengine_ext/libuv/src/ares/bitncmp.c +59 -0
  92. data/ext/asyncengine_ext/libuv/src/ares/bitncmp.h +26 -0
  93. data/ext/asyncengine_ext/libuv/src/ares/config_cygwin/ares_config.h +512 -0
  94. data/ext/asyncengine_ext/libuv/src/ares/config_darwin/ares_config.h +512 -0
  95. data/ext/asyncengine_ext/libuv/src/ares/config_freebsd/ares_config.h +512 -0
  96. data/ext/asyncengine_ext/libuv/src/ares/config_linux/ares_config.h +512 -0
  97. data/ext/asyncengine_ext/libuv/src/ares/config_netbsd/ares_config.h +512 -0
  98. data/ext/asyncengine_ext/libuv/src/ares/config_openbsd/ares_config.h +512 -0
  99. data/ext/asyncengine_ext/libuv/src/ares/config_sunos/ares_config.h +512 -0
  100. data/ext/asyncengine_ext/libuv/src/ares/config_win32/ares_config.h +369 -0
  101. data/ext/asyncengine_ext/libuv/src/ares/get_ver.awk +35 -0
  102. data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.c +451 -0
  103. data/ext/asyncengine_ext/libuv/src/ares/inet_net_pton.h +31 -0
  104. data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.c +208 -0
  105. data/ext/asyncengine_ext/libuv/src/ares/inet_ntop.h +26 -0
  106. data/ext/asyncengine_ext/libuv/src/ares/nameser.h +203 -0
  107. data/ext/asyncengine_ext/libuv/src/ares/setup_once.h +504 -0
  108. data/ext/asyncengine_ext/libuv/src/ares/windows_port.c +22 -0
  109. data/ext/asyncengine_ext/libuv/src/unix/async.c +58 -0
  110. data/ext/asyncengine_ext/libuv/src/unix/cares.c +194 -0
  111. data/ext/asyncengine_ext/libuv/src/unix/check.c +80 -0
  112. data/ext/asyncengine_ext/libuv/src/unix/core.c +588 -0
  113. data/ext/asyncengine_ext/libuv/src/unix/cygwin.c +84 -0
  114. data/ext/asyncengine_ext/libuv/src/unix/darwin.c +341 -0
  115. data/ext/asyncengine_ext/libuv/src/unix/dl.c +91 -0
  116. data/ext/asyncengine_ext/libuv/src/unix/eio/Changes +63 -0
  117. data/ext/asyncengine_ext/libuv/src/unix/eio/LICENSE +36 -0
  118. data/ext/asyncengine_ext/libuv/src/unix/eio/Makefile.am +15 -0
  119. data/ext/asyncengine_ext/libuv/src/unix/eio/aclocal.m4 +8957 -0
  120. data/ext/asyncengine_ext/libuv/src/unix/eio/autogen.sh +3 -0
  121. data/ext/asyncengine_ext/libuv/src/unix/eio/config.h.in +86 -0
  122. data/ext/asyncengine_ext/libuv/src/unix/eio/config_cygwin.h +80 -0
  123. data/ext/asyncengine_ext/libuv/src/unix/eio/config_darwin.h +141 -0
  124. data/ext/asyncengine_ext/libuv/src/unix/eio/config_freebsd.h +81 -0
  125. data/ext/asyncengine_ext/libuv/src/unix/eio/config_linux.h +94 -0
  126. data/ext/asyncengine_ext/libuv/src/unix/eio/config_netbsd.h +81 -0
  127. data/ext/asyncengine_ext/libuv/src/unix/eio/config_openbsd.h +137 -0
  128. data/ext/asyncengine_ext/libuv/src/unix/eio/config_sunos.h +84 -0
  129. data/ext/asyncengine_ext/libuv/src/unix/eio/configure.ac +22 -0
  130. data/ext/asyncengine_ext/libuv/src/unix/eio/demo.c +194 -0
  131. data/ext/asyncengine_ext/libuv/src/unix/eio/ecb.h +370 -0
  132. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.3 +3428 -0
  133. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.c +2593 -0
  134. data/ext/asyncengine_ext/libuv/src/unix/eio/eio.pod +969 -0
  135. data/ext/asyncengine_ext/libuv/src/unix/eio/libeio.m4 +195 -0
  136. data/ext/asyncengine_ext/libuv/src/unix/eio/xthread.h +164 -0
  137. data/ext/asyncengine_ext/libuv/src/unix/error.c +98 -0
  138. data/ext/asyncengine_ext/libuv/src/unix/ev/Changes +388 -0
  139. data/ext/asyncengine_ext/libuv/src/unix/ev/LICENSE +36 -0
  140. data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.am +18 -0
  141. data/ext/asyncengine_ext/libuv/src/unix/ev/Makefile.in +771 -0
  142. data/ext/asyncengine_ext/libuv/src/unix/ev/README +58 -0
  143. data/ext/asyncengine_ext/libuv/src/unix/ev/aclocal.m4 +8957 -0
  144. data/ext/asyncengine_ext/libuv/src/unix/ev/autogen.sh +6 -0
  145. data/ext/asyncengine_ext/libuv/src/unix/ev/config.guess +1526 -0
  146. data/ext/asyncengine_ext/libuv/src/unix/ev/config.h.in +125 -0
  147. data/ext/asyncengine_ext/libuv/src/unix/ev/config.sub +1658 -0
  148. data/ext/asyncengine_ext/libuv/src/unix/ev/config_cygwin.h +123 -0
  149. data/ext/asyncengine_ext/libuv/src/unix/ev/config_darwin.h +122 -0
  150. data/ext/asyncengine_ext/libuv/src/unix/ev/config_freebsd.h +120 -0
  151. data/ext/asyncengine_ext/libuv/src/unix/ev/config_linux.h +141 -0
  152. data/ext/asyncengine_ext/libuv/src/unix/ev/config_netbsd.h +120 -0
  153. data/ext/asyncengine_ext/libuv/src/unix/ev/config_openbsd.h +126 -0
  154. data/ext/asyncengine_ext/libuv/src/unix/ev/config_sunos.h +122 -0
  155. data/ext/asyncengine_ext/libuv/src/unix/ev/configure +13037 -0
  156. data/ext/asyncengine_ext/libuv/src/unix/ev/configure.ac +18 -0
  157. data/ext/asyncengine_ext/libuv/src/unix/ev/depcomp +630 -0
  158. data/ext/asyncengine_ext/libuv/src/unix/ev/ev++.h +816 -0
  159. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.3 +5311 -0
  160. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.c +3921 -0
  161. data/ext/asyncengine_ext/libuv/src/unix/ev/ev.pod +5243 -0
  162. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_epoll.c +266 -0
  163. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_kqueue.c +235 -0
  164. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_poll.c +148 -0
  165. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_port.c +179 -0
  166. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_select.c +310 -0
  167. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_vars.h +203 -0
  168. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_win32.c +153 -0
  169. data/ext/asyncengine_ext/libuv/src/unix/ev/ev_wrap.h +196 -0
  170. data/ext/asyncengine_ext/libuv/src/unix/ev/event.c +402 -0
  171. data/ext/asyncengine_ext/libuv/src/unix/ev/event.h +170 -0
  172. data/ext/asyncengine_ext/libuv/src/unix/ev/install-sh +294 -0
  173. data/ext/asyncengine_ext/libuv/src/unix/ev/libev.m4 +39 -0
  174. data/ext/asyncengine_ext/libuv/src/unix/ev/ltmain.sh +8413 -0
  175. data/ext/asyncengine_ext/libuv/src/unix/ev/missing +336 -0
  176. data/ext/asyncengine_ext/libuv/src/unix/ev/mkinstalldirs +111 -0
  177. data/ext/asyncengine_ext/libuv/src/unix/freebsd.c +312 -0
  178. data/ext/asyncengine_ext/libuv/src/unix/fs.c +707 -0
  179. data/ext/asyncengine_ext/libuv/src/unix/idle.c +79 -0
  180. data/ext/asyncengine_ext/libuv/src/unix/internal.h +161 -0
  181. data/ext/asyncengine_ext/libuv/src/unix/kqueue.c +127 -0
  182. data/ext/asyncengine_ext/libuv/src/unix/linux/core.c +474 -0
  183. data/ext/asyncengine_ext/libuv/src/unix/linux/inotify.c +211 -0
  184. data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.c +230 -0
  185. data/ext/asyncengine_ext/libuv/src/unix/linux/syscalls.h +87 -0
  186. data/ext/asyncengine_ext/libuv/src/unix/loop.c +58 -0
  187. data/ext/asyncengine_ext/libuv/src/unix/netbsd.c +108 -0
  188. data/ext/asyncengine_ext/libuv/src/unix/openbsd.c +295 -0
  189. data/ext/asyncengine_ext/libuv/src/unix/pipe.c +266 -0
  190. data/ext/asyncengine_ext/libuv/src/unix/prepare.c +79 -0
  191. data/ext/asyncengine_ext/libuv/src/unix/process.c +369 -0
  192. data/ext/asyncengine_ext/libuv/src/unix/stream.c +1033 -0
  193. data/ext/asyncengine_ext/libuv/src/unix/sunos.c +466 -0
  194. data/ext/asyncengine_ext/libuv/src/unix/tcp.c +327 -0
  195. data/ext/asyncengine_ext/libuv/src/unix/thread.c +154 -0
  196. data/ext/asyncengine_ext/libuv/src/unix/timer.c +127 -0
  197. data/ext/asyncengine_ext/libuv/src/unix/tty.c +146 -0
  198. data/ext/asyncengine_ext/libuv/src/unix/udp.c +670 -0
  199. data/ext/asyncengine_ext/libuv/src/unix/uv-eio.c +124 -0
  200. data/ext/asyncengine_ext/libuv/src/unix/uv-eio.h +13 -0
  201. data/ext/asyncengine_ext/libuv/src/uv-common.c +354 -0
  202. data/ext/asyncengine_ext/libuv/src/uv-common.h +87 -0
  203. data/ext/asyncengine_ext/libuv/src/win/async.c +127 -0
  204. data/ext/asyncengine_ext/libuv/src/win/cares.c +290 -0
  205. data/ext/asyncengine_ext/libuv/src/win/core.c +270 -0
  206. data/ext/asyncengine_ext/libuv/src/win/dl.c +82 -0
  207. data/ext/asyncengine_ext/libuv/src/win/error.c +132 -0
  208. data/ext/asyncengine_ext/libuv/src/win/fs-event.c +514 -0
  209. data/ext/asyncengine_ext/libuv/src/win/fs.c +1576 -0
  210. data/ext/asyncengine_ext/libuv/src/win/getaddrinfo.c +372 -0
  211. data/ext/asyncengine_ext/libuv/src/win/handle.c +225 -0
  212. data/ext/asyncengine_ext/libuv/src/win/internal.h +352 -0
  213. data/ext/asyncengine_ext/libuv/src/win/loop-watcher.c +131 -0
  214. data/ext/asyncengine_ext/libuv/src/win/pipe.c +1661 -0
  215. data/ext/asyncengine_ext/libuv/src/win/process.c +1140 -0
  216. data/ext/asyncengine_ext/libuv/src/win/req.c +174 -0
  217. data/ext/asyncengine_ext/libuv/src/win/stream.c +201 -0
  218. data/ext/asyncengine_ext/libuv/src/win/tcp.c +1282 -0
  219. data/ext/asyncengine_ext/libuv/src/win/thread.c +332 -0
  220. data/ext/asyncengine_ext/libuv/src/win/threadpool.c +73 -0
  221. data/ext/asyncengine_ext/libuv/src/win/timer.c +276 -0
  222. data/ext/asyncengine_ext/libuv/src/win/tty.c +1795 -0
  223. data/ext/asyncengine_ext/libuv/src/win/udp.c +709 -0
  224. data/ext/asyncengine_ext/libuv/src/win/util.c +719 -0
  225. data/ext/asyncengine_ext/libuv/src/win/winapi.c +117 -0
  226. data/ext/asyncengine_ext/libuv/src/win/winapi.h +4419 -0
  227. data/ext/asyncengine_ext/libuv/src/win/winsock.c +470 -0
  228. data/ext/asyncengine_ext/libuv/src/win/winsock.h +138 -0
  229. data/ext/asyncengine_ext/libuv/test/benchmark-ares.c +118 -0
  230. data/ext/asyncengine_ext/libuv/test/benchmark-getaddrinfo.c +94 -0
  231. data/ext/asyncengine_ext/libuv/test/benchmark-list.h +105 -0
  232. data/ext/asyncengine_ext/libuv/test/benchmark-ping-pongs.c +213 -0
  233. data/ext/asyncengine_ext/libuv/test/benchmark-pound.c +324 -0
  234. data/ext/asyncengine_ext/libuv/test/benchmark-pump.c +462 -0
  235. data/ext/asyncengine_ext/libuv/test/benchmark-sizes.c +40 -0
  236. data/ext/asyncengine_ext/libuv/test/benchmark-spawn.c +156 -0
  237. data/ext/asyncengine_ext/libuv/test/benchmark-tcp-write-batch.c +140 -0
  238. data/ext/asyncengine_ext/libuv/test/benchmark-thread.c +64 -0
  239. data/ext/asyncengine_ext/libuv/test/benchmark-udp-packet-storm.c +247 -0
  240. data/ext/asyncengine_ext/libuv/test/blackhole-server.c +118 -0
  241. data/ext/asyncengine_ext/libuv/test/dns-server.c +321 -0
  242. data/ext/asyncengine_ext/libuv/test/echo-server.c +370 -0
  243. data/ext/asyncengine_ext/libuv/test/fixtures/empty_file +0 -0
  244. data/ext/asyncengine_ext/libuv/test/fixtures/load_error.node +1 -0
  245. data/ext/asyncengine_ext/libuv/test/run-benchmarks.c +64 -0
  246. data/ext/asyncengine_ext/libuv/test/run-tests.c +108 -0
  247. data/ext/asyncengine_ext/libuv/test/runner-unix.c +315 -0
  248. data/ext/asyncengine_ext/libuv/test/runner-unix.h +36 -0
  249. data/ext/asyncengine_ext/libuv/test/runner-win.c +343 -0
  250. data/ext/asyncengine_ext/libuv/test/runner-win.h +42 -0
  251. data/ext/asyncengine_ext/libuv/test/runner.c +317 -0
  252. data/ext/asyncengine_ext/libuv/test/runner.h +159 -0
  253. data/ext/asyncengine_ext/libuv/test/task.h +117 -0
  254. data/ext/asyncengine_ext/libuv/test/test-async.c +216 -0
  255. data/ext/asyncengine_ext/libuv/test/test-callback-stack.c +203 -0
  256. data/ext/asyncengine_ext/libuv/test/test-connection-fail.c +148 -0
  257. data/ext/asyncengine_ext/libuv/test/test-counters-init.c +216 -0
  258. data/ext/asyncengine_ext/libuv/test/test-cwd-and-chdir.c +64 -0
  259. data/ext/asyncengine_ext/libuv/test/test-delayed-accept.c +197 -0
  260. data/ext/asyncengine_ext/libuv/test/test-dlerror.c +49 -0
  261. data/ext/asyncengine_ext/libuv/test/test-eio-overflow.c +90 -0
  262. data/ext/asyncengine_ext/libuv/test/test-error.c +59 -0
  263. data/ext/asyncengine_ext/libuv/test/test-fail-always.c +29 -0
  264. data/ext/asyncengine_ext/libuv/test/test-fs-event.c +442 -0
  265. data/ext/asyncengine_ext/libuv/test/test-fs.c +1731 -0
  266. data/ext/asyncengine_ext/libuv/test/test-get-currentexe.c +63 -0
  267. data/ext/asyncengine_ext/libuv/test/test-get-loadavg.c +36 -0
  268. data/ext/asyncengine_ext/libuv/test/test-get-memory.c +38 -0
  269. data/ext/asyncengine_ext/libuv/test/test-getaddrinfo.c +122 -0
  270. data/ext/asyncengine_ext/libuv/test/test-gethostbyname.c +189 -0
  271. data/ext/asyncengine_ext/libuv/test/test-getsockname.c +342 -0
  272. data/ext/asyncengine_ext/libuv/test/test-hrtime.c +51 -0
  273. data/ext/asyncengine_ext/libuv/test/test-idle.c +81 -0
  274. data/ext/asyncengine_ext/libuv/test/test-ipc-send-recv.c +209 -0
  275. data/ext/asyncengine_ext/libuv/test/test-ipc.c +614 -0
  276. data/ext/asyncengine_ext/libuv/test/test-list.h +371 -0
  277. data/ext/asyncengine_ext/libuv/test/test-loop-handles.c +359 -0
  278. data/ext/asyncengine_ext/libuv/test/test-multiple-listen.c +102 -0
  279. data/ext/asyncengine_ext/libuv/test/test-mutexes.c +63 -0
  280. data/ext/asyncengine_ext/libuv/test/test-pass-always.c +28 -0
  281. data/ext/asyncengine_ext/libuv/test/test-ping-pong.c +253 -0
  282. data/ext/asyncengine_ext/libuv/test/test-pipe-bind-error.c +140 -0
  283. data/ext/asyncengine_ext/libuv/test/test-pipe-connect-error.c +96 -0
  284. data/ext/asyncengine_ext/libuv/test/test-platform-output.c +87 -0
  285. data/ext/asyncengine_ext/libuv/test/test-process-title.c +42 -0
  286. data/ext/asyncengine_ext/libuv/test/test-ref.c +322 -0
  287. data/ext/asyncengine_ext/libuv/test/test-run-once.c +44 -0
  288. data/ext/asyncengine_ext/libuv/test/test-shutdown-close.c +103 -0
  289. data/ext/asyncengine_ext/libuv/test/test-shutdown-eof.c +183 -0
  290. data/ext/asyncengine_ext/libuv/test/test-spawn.c +499 -0
  291. data/ext/asyncengine_ext/libuv/test/test-stdio-over-pipes.c +256 -0
  292. data/ext/asyncengine_ext/libuv/test/test-tcp-bind-error.c +191 -0
  293. data/ext/asyncengine_ext/libuv/test/test-tcp-bind6-error.c +154 -0
  294. data/ext/asyncengine_ext/libuv/test/test-tcp-close.c +129 -0
  295. data/ext/asyncengine_ext/libuv/test/test-tcp-connect-error.c +70 -0
  296. data/ext/asyncengine_ext/libuv/test/test-tcp-connect6-error.c +68 -0
  297. data/ext/asyncengine_ext/libuv/test/test-tcp-flags.c +51 -0
  298. data/ext/asyncengine_ext/libuv/test/test-tcp-write-error.c +168 -0
  299. data/ext/asyncengine_ext/libuv/test/test-tcp-write-to-half-open-connection.c +135 -0
  300. data/ext/asyncengine_ext/libuv/test/test-tcp-writealot.c +195 -0
  301. data/ext/asyncengine_ext/libuv/test/test-thread.c +183 -0
  302. data/ext/asyncengine_ext/libuv/test/test-threadpool.c +57 -0
  303. data/ext/asyncengine_ext/libuv/test/test-timer-again.c +141 -0
  304. data/ext/asyncengine_ext/libuv/test/test-timer.c +130 -0
  305. data/ext/asyncengine_ext/libuv/test/test-tty.c +110 -0
  306. data/ext/asyncengine_ext/libuv/test/test-udp-dgram-too-big.c +86 -0
  307. data/ext/asyncengine_ext/libuv/test/test-udp-ipv6.c +156 -0
  308. data/ext/asyncengine_ext/libuv/test/test-udp-multicast-join.c +139 -0
  309. data/ext/asyncengine_ext/libuv/test/test-udp-multicast-ttl.c +86 -0
  310. data/ext/asyncengine_ext/libuv/test/test-udp-options.c +86 -0
  311. data/ext/asyncengine_ext/libuv/test/test-udp-send-and-recv.c +208 -0
  312. data/ext/asyncengine_ext/libuv/test/test-util.c +97 -0
  313. data/ext/asyncengine_ext/libuv/uv.gyp +435 -0
  314. data/ext/asyncengine_ext/libuv/vcbuild.bat +105 -0
  315. data/lib/asyncengine/version.rb +3 -0
  316. data/lib/asyncengine.rb +41 -0
  317. metadata +384 -0
@@ -0,0 +1,1661 @@
1
+ /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ * of this software and associated documentation files (the "Software"), to
5
+ * deal in the Software without restriction, including without limitation the
6
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
+ * sell copies of the Software, and to permit persons to whom the Software is
8
+ * furnished to do so, subject to the following conditions:
9
+ *
10
+ * The above copyright notice and this permission notice shall be included in
11
+ * all copies or substantial portions of the Software.
12
+ *
13
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
+ * IN THE SOFTWARE.
20
+ */
21
+
22
+ #include <assert.h>
23
+ #include <io.h>
24
+ #include <string.h>
25
+ #include <stdio.h>
26
+
27
+ #include "uv.h"
28
+ #include "../uv-common.h"
29
+ #include "internal.h"
30
+
31
+
32
+ /* A zero-size buffer for use by uv_pipe_read */
33
+ static char uv_zero_[] = "";
34
+
35
+ /* Null uv_buf_t */
36
+ static const uv_buf_t uv_null_buf_ = { 0, NULL };
37
+
38
+ /* The timeout that the pipe will wait for the remote end to write data */
39
+ /* when the local ends wants to shut it down. */
40
+ static const int64_t eof_timeout = 50; /* ms */
41
+
42
+ static const int default_pending_pipe_instances = 4;
43
+
44
+ /* IPC protocol flags. */
45
+ #define UV_IPC_RAW_DATA 0x0001
46
+ #define UV_IPC_TCP_SERVER 0x0002
47
+ #define UV_IPC_TCP_CONNECTION 0x0004
48
+
49
+ /* IPC frame header. */
50
+ typedef struct {
51
+ int flags;
52
+ uint64_t raw_data_length;
53
+ } uv_ipc_frame_header_t;
54
+
55
+ /* IPC frame, which contains an imported TCP socket stream. */
56
+ typedef struct {
57
+ uv_ipc_frame_header_t header;
58
+ WSAPROTOCOL_INFOW socket_info;
59
+ } uv_ipc_frame_uv_stream;
60
+
61
+ static void eof_timer_init(uv_pipe_t* pipe);
62
+ static void eof_timer_start(uv_pipe_t* pipe);
63
+ static void eof_timer_stop(uv_pipe_t* pipe);
64
+ static void eof_timer_cb(uv_timer_t* timer, int status);
65
+ static void eof_timer_destroy(uv_pipe_t* pipe);
66
+ static void eof_timer_close_cb(uv_handle_t* handle);
67
+
68
+
69
+ static void uv_unique_pipe_name(char* ptr, char* name, size_t size) {
70
+ _snprintf(name, size, "\\\\.\\pipe\\uv\\%p-%d", ptr, GetCurrentProcessId());
71
+ }
72
+
73
+
74
+ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
75
+ uv_stream_init(loop, (uv_stream_t*)handle);
76
+
77
+ handle->type = UV_NAMED_PIPE;
78
+ handle->reqs_pending = 0;
79
+ handle->handle = INVALID_HANDLE_VALUE;
80
+ handle->name = NULL;
81
+ handle->ipc_pid = 0;
82
+ handle->remaining_ipc_rawdata_bytes = 0;
83
+ handle->pending_ipc_info.socket_info = NULL;
84
+ handle->pending_ipc_info.tcp_connection = 0;
85
+ handle->ipc = ipc;
86
+ handle->non_overlapped_writes_tail = NULL;
87
+
88
+ uv_req_init(loop, (uv_req_t*) &handle->ipc_header_write_req);
89
+
90
+ loop->counters.pipe_init++;
91
+
92
+ return 0;
93
+ }
94
+
95
+
96
+ static void uv_pipe_connection_init(uv_pipe_t* handle) {
97
+ uv_connection_init((uv_stream_t*) handle);
98
+ handle->read_req.data = handle;
99
+ handle->eof_timer = NULL;
100
+ }
101
+
102
+
103
+ static HANDLE open_named_pipe(WCHAR* name, DWORD* duplex_flags) {
104
+ HANDLE pipeHandle;
105
+
106
+ /*
107
+ * Assume that we have a duplex pipe first, so attempt to
108
+ * connect with GENERIC_READ | GENERIC_WRITE.
109
+ */
110
+ pipeHandle = CreateFileW(name,
111
+ GENERIC_READ | GENERIC_WRITE,
112
+ 0,
113
+ NULL,
114
+ OPEN_EXISTING,
115
+ FILE_FLAG_OVERLAPPED,
116
+ NULL);
117
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
118
+ *duplex_flags = 0;
119
+ return pipeHandle;
120
+ }
121
+
122
+ /*
123
+ * If the pipe is not duplex CreateFileW fails with
124
+ * ERROR_ACCESS_DENIED. In that case try to connect
125
+ * as a read-only or write-only.
126
+ */
127
+ if (GetLastError() == ERROR_ACCESS_DENIED) {
128
+ pipeHandle = CreateFileW(name,
129
+ GENERIC_READ | FILE_WRITE_ATTRIBUTES,
130
+ 0,
131
+ NULL,
132
+ OPEN_EXISTING,
133
+ FILE_FLAG_OVERLAPPED,
134
+ NULL);
135
+
136
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
137
+ *duplex_flags = UV_HANDLE_SHUTTING;
138
+ return pipeHandle;
139
+ }
140
+ }
141
+
142
+ if (GetLastError() == ERROR_ACCESS_DENIED) {
143
+ pipeHandle = CreateFileW(name,
144
+ GENERIC_WRITE | FILE_READ_ATTRIBUTES,
145
+ 0,
146
+ NULL,
147
+ OPEN_EXISTING,
148
+ FILE_FLAG_OVERLAPPED,
149
+ NULL);
150
+
151
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
152
+ *duplex_flags = UV_HANDLE_EOF;
153
+ return pipeHandle;
154
+ }
155
+ }
156
+
157
+ return INVALID_HANDLE_VALUE;
158
+ }
159
+
160
+
161
+ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
162
+ char* name, size_t nameSize) {
163
+ HANDLE pipeHandle;
164
+ int errno;
165
+ int err;
166
+ char* ptr = (char*)handle;
167
+
168
+ while (TRUE) {
169
+ uv_unique_pipe_name(ptr, name, nameSize);
170
+
171
+ pipeHandle = CreateNamedPipeA(name,
172
+ access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
173
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
174
+ NULL);
175
+
176
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
177
+ /* No name collisions. We're done. */
178
+ break;
179
+ }
180
+
181
+ errno = GetLastError();
182
+ if (errno != ERROR_PIPE_BUSY && errno != ERROR_ACCESS_DENIED) {
183
+ uv__set_sys_error(loop, errno);
184
+ err = -1;
185
+ goto done;
186
+ }
187
+
188
+ /* Pipe name collision. Increment the pointer and try again. */
189
+ ptr++;
190
+ }
191
+
192
+ if (CreateIoCompletionPort(pipeHandle,
193
+ loop->iocp,
194
+ (ULONG_PTR)handle,
195
+ 0) == NULL) {
196
+ uv__set_sys_error(loop, GetLastError());
197
+ err = -1;
198
+ goto done;
199
+ }
200
+
201
+ uv_pipe_connection_init(handle);
202
+ handle->handle = pipeHandle;
203
+ err = 0;
204
+
205
+ done:
206
+ if (err && pipeHandle != INVALID_HANDLE_VALUE) {
207
+ CloseHandle(pipeHandle);
208
+ }
209
+
210
+ return err;
211
+ }
212
+
213
+
214
+ static int uv_set_pipe_handle(uv_loop_t* loop, uv_pipe_t* handle,
215
+ HANDLE pipeHandle, DWORD duplex_flags) {
216
+ NTSTATUS nt_status;
217
+ IO_STATUS_BLOCK io_status;
218
+ FILE_MODE_INFORMATION mode_info;
219
+ DWORD mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT;
220
+
221
+ if (!SetNamedPipeHandleState(pipeHandle, &mode, NULL, NULL)) {
222
+ /* If this returns ERROR_INVALID_PARAMETER we probably opened something */
223
+ /* that is not a pipe. */
224
+ if (GetLastError() == ERROR_INVALID_PARAMETER) {
225
+ SetLastError(WSAENOTSOCK);
226
+ }
227
+ return -1;
228
+ }
229
+
230
+ /* Check if the pipe was created with FILE_FLAG_OVERLAPPED. */
231
+ nt_status = pNtQueryInformationFile(pipeHandle,
232
+ &io_status,
233
+ &mode_info,
234
+ sizeof(mode_info),
235
+ FileModeInformation);
236
+ if (nt_status != STATUS_SUCCESS) {
237
+ return -1;
238
+ }
239
+
240
+ if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT ||
241
+ mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) {
242
+ /* Non-overlapped pipe. */
243
+ handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE;
244
+ } else {
245
+ /* Overlapped pipe. Try to associate with IOCP. */
246
+ if (CreateIoCompletionPort(pipeHandle,
247
+ loop->iocp,
248
+ (ULONG_PTR)handle,
249
+ 0) == NULL) {
250
+ handle->flags |= UV_HANDLE_EMULATE_IOCP;
251
+ }
252
+ }
253
+
254
+ handle->handle = pipeHandle;
255
+ handle->flags |= duplex_flags;
256
+
257
+ return 0;
258
+ }
259
+
260
+
261
+ static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
262
+ int errno;
263
+ uv_loop_t* loop;
264
+ uv_pipe_t* handle;
265
+ uv_shutdown_t* req;
266
+
267
+ req = (uv_shutdown_t*) parameter;
268
+ assert(req);
269
+ handle = (uv_pipe_t*) req->handle;
270
+ assert(handle);
271
+ loop = handle->loop;
272
+ assert(loop);
273
+
274
+ FlushFileBuffers(handle->handle);
275
+
276
+ /* Post completed */
277
+ POST_COMPLETION_FOR_REQ(loop, req);
278
+
279
+ return 0;
280
+ }
281
+
282
+
283
+ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
284
+ unsigned int uv_alloced;
285
+ DWORD result;
286
+ uv_shutdown_t* req;
287
+ NTSTATUS nt_status;
288
+ IO_STATUS_BLOCK io_status;
289
+ FILE_PIPE_LOCAL_INFORMATION pipe_info;
290
+
291
+ if ((handle->flags & UV_HANDLE_CONNECTION) &&
292
+ handle->shutdown_req != NULL &&
293
+ handle->write_reqs_pending == 0) {
294
+ req = handle->shutdown_req;
295
+
296
+ /* Clear the shutdown_req field so we don't go here again. */
297
+ handle->shutdown_req = NULL;
298
+
299
+ if (handle->flags & UV_HANDLE_CLOSING) {
300
+ /* Already closing. Cancel the shutdown. */
301
+ if (req->cb) {
302
+ uv__set_sys_error(loop, WSAEINTR);
303
+ req->cb(req, -1);
304
+ }
305
+ uv_unref(loop);
306
+ DECREASE_PENDING_REQ_COUNT(handle);
307
+ return;
308
+ }
309
+
310
+ /* Try to avoid flushing the pipe buffer in the thread pool. */
311
+ nt_status = pNtQueryInformationFile(handle->handle,
312
+ &io_status,
313
+ &pipe_info,
314
+ sizeof pipe_info,
315
+ FilePipeLocalInformation);
316
+
317
+ if (nt_status != STATUS_SUCCESS) {
318
+ /* Failure */
319
+ handle->flags &= ~UV_HANDLE_SHUTTING;
320
+ if (req->cb) {
321
+ uv__set_sys_error(loop, pRtlNtStatusToDosError(nt_status));
322
+ req->cb(req, -1);
323
+ }
324
+ uv_unref(loop);
325
+ DECREASE_PENDING_REQ_COUNT(handle);
326
+ return;
327
+ }
328
+
329
+ if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
330
+ /* Short-circuit, no need to call FlushFileBuffers. */
331
+ uv_insert_pending_req(loop, (uv_req_t*) req);
332
+ return;
333
+ }
334
+
335
+ /* Run FlushFileBuffers in the thread pool. */
336
+ result = QueueUserWorkItem(pipe_shutdown_thread_proc,
337
+ req,
338
+ WT_EXECUTELONGFUNCTION);
339
+ if (result) {
340
+ return;
341
+
342
+ } else {
343
+ /* Failure. */
344
+ handle->flags &= ~UV_HANDLE_SHUTTING;
345
+ if (req->cb) {
346
+ uv__set_sys_error(loop, GetLastError());
347
+ req->cb(req, -1);
348
+ }
349
+ uv_unref(loop);
350
+ DECREASE_PENDING_REQ_COUNT(handle);
351
+ return;
352
+ }
353
+ }
354
+
355
+ if (handle->flags & UV_HANDLE_CLOSING &&
356
+ handle->reqs_pending == 0) {
357
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
358
+ handle->flags |= UV_HANDLE_CLOSED;
359
+
360
+ if (handle->flags & UV_HANDLE_CONNECTION) {
361
+ if (handle->pending_ipc_info.socket_info) {
362
+ free(handle->pending_ipc_info.socket_info);
363
+ handle->pending_ipc_info.socket_info = NULL;
364
+ }
365
+
366
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
367
+ if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
368
+ UnregisterWait(handle->read_req.wait_handle);
369
+ handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
370
+ }
371
+ if (handle->read_req.event_handle) {
372
+ CloseHandle(handle->read_req.event_handle);
373
+ handle->read_req.event_handle = NULL;
374
+ }
375
+ }
376
+ }
377
+
378
+ if (handle->flags & UV_HANDLE_PIPESERVER) {
379
+ assert(handle->accept_reqs);
380
+ free(handle->accept_reqs);
381
+ handle->accept_reqs = NULL;
382
+ }
383
+
384
+ /* Remember the state of this flag because the close callback is */
385
+ /* allowed to clobber or free the handle's memory */
386
+ uv_alloced = handle->flags & UV_HANDLE_UV_ALLOCED;
387
+
388
+ if (handle->close_cb) {
389
+ handle->close_cb((uv_handle_t*)handle);
390
+ }
391
+
392
+ if (uv_alloced) {
393
+ free(handle);
394
+ }
395
+
396
+ uv_unref(loop);
397
+ }
398
+ }
399
+
400
+
401
+ void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
402
+ handle->pending_instances = count;
403
+ handle->flags |= UV_HANDLE_PIPESERVER;
404
+ }
405
+
406
+
407
+ /* Creates a pipe server. */
408
+ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
409
+ uv_loop_t* loop = handle->loop;
410
+ int i, errno, nameSize;
411
+ uv_pipe_accept_t* req;
412
+
413
+ if (handle->flags & UV_HANDLE_BOUND) {
414
+ uv__set_sys_error(loop, WSAEINVAL);
415
+ return -1;
416
+ }
417
+
418
+ if (!name) {
419
+ uv__set_sys_error(loop, WSAEINVAL);
420
+ return -1;
421
+ }
422
+
423
+ if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
424
+ handle->pending_instances = default_pending_pipe_instances;
425
+ }
426
+
427
+ handle->accept_reqs = (uv_pipe_accept_t*)
428
+ malloc(sizeof(uv_pipe_accept_t) * handle->pending_instances);
429
+ if (!handle->accept_reqs) {
430
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
431
+ }
432
+
433
+ for (i = 0; i < handle->pending_instances; i++) {
434
+ req = &handle->accept_reqs[i];
435
+ uv_req_init(loop, (uv_req_t*) req);
436
+ req->type = UV_ACCEPT;
437
+ req->data = handle;
438
+ req->pipeHandle = INVALID_HANDLE_VALUE;
439
+ req->next_pending = NULL;
440
+ }
441
+
442
+ /* Convert name to UTF16. */
443
+ nameSize = uv_utf8_to_utf16(name, NULL, 0) * sizeof(wchar_t);
444
+ handle->name = (wchar_t*)malloc(nameSize);
445
+ if (!handle->name) {
446
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
447
+ }
448
+
449
+ if (!uv_utf8_to_utf16(name, handle->name, nameSize / sizeof(wchar_t))) {
450
+ uv__set_sys_error(loop, GetLastError());
451
+ return -1;
452
+ }
453
+
454
+ /*
455
+ * Attempt to create the first pipe with FILE_FLAG_FIRST_PIPE_INSTANCE.
456
+ * If this fails then there's already a pipe server for the given pipe name.
457
+ */
458
+ handle->accept_reqs[0].pipeHandle = CreateNamedPipeW(handle->name,
459
+ PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED |
460
+ FILE_FLAG_FIRST_PIPE_INSTANCE,
461
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
462
+ PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL);
463
+
464
+ if (handle->accept_reqs[0].pipeHandle == INVALID_HANDLE_VALUE) {
465
+ errno = GetLastError();
466
+ if (errno == ERROR_ACCESS_DENIED) {
467
+ uv__set_error(loop, UV_EADDRINUSE, errno);
468
+ } else if (errno == ERROR_PATH_NOT_FOUND || errno == ERROR_INVALID_NAME) {
469
+ uv__set_error(loop, UV_EACCES, errno);
470
+ } else {
471
+ uv__set_sys_error(loop, errno);
472
+ }
473
+ goto error;
474
+ }
475
+
476
+ if (uv_set_pipe_handle(loop, handle, handle->accept_reqs[0].pipeHandle, 0)) {
477
+ uv__set_sys_error(loop, GetLastError());
478
+ goto error;
479
+ }
480
+
481
+ handle->pending_accepts = NULL;
482
+ handle->flags |= UV_HANDLE_PIPESERVER;
483
+ handle->flags |= UV_HANDLE_BOUND;
484
+
485
+ return 0;
486
+
487
+ error:
488
+ if (handle->name) {
489
+ free(handle->name);
490
+ handle->name = NULL;
491
+ }
492
+
493
+ if (handle->accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE) {
494
+ CloseHandle(handle->accept_reqs[0].pipeHandle);
495
+ handle->accept_reqs[0].pipeHandle = INVALID_HANDLE_VALUE;
496
+ }
497
+
498
+ return -1;
499
+ }
500
+
501
+
502
+ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
503
+ int errno;
504
+ uv_loop_t* loop;
505
+ uv_pipe_t* handle;
506
+ uv_connect_t* req;
507
+ HANDLE pipeHandle = INVALID_HANDLE_VALUE;
508
+ DWORD duplex_flags;
509
+
510
+ req = (uv_connect_t*) parameter;
511
+ assert(req);
512
+ handle = (uv_pipe_t*) req->handle;
513
+ assert(handle);
514
+ loop = handle->loop;
515
+ assert(loop);
516
+
517
+ /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. */
518
+ /* We wait for the pipe to become available with WaitNamedPipe. */
519
+ while (WaitNamedPipeW(handle->name, 30000)) {
520
+ /* The pipe is now available, try to connect. */
521
+ pipeHandle = open_named_pipe(handle->name, &duplex_flags);
522
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
523
+ break;
524
+ }
525
+
526
+ SwitchToThread();
527
+ }
528
+
529
+ if (pipeHandle != INVALID_HANDLE_VALUE &&
530
+ !uv_set_pipe_handle(loop, handle, pipeHandle, duplex_flags)) {
531
+ SET_REQ_SUCCESS(req);
532
+ } else {
533
+ SET_REQ_ERROR(req, GetLastError());
534
+ }
535
+
536
+ /* Post completed */
537
+ POST_COMPLETION_FOR_REQ(loop, req);
538
+
539
+ return 0;
540
+ }
541
+
542
+
543
+ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
544
+ const char* name, uv_connect_cb cb) {
545
+ uv_loop_t* loop = handle->loop;
546
+ int errno, nameSize;
547
+ HANDLE pipeHandle = INVALID_HANDLE_VALUE;
548
+ DWORD duplex_flags;
549
+
550
+ uv_req_init(loop, (uv_req_t*) req);
551
+ req->type = UV_CONNECT;
552
+ req->handle = (uv_stream_t*) handle;
553
+ req->cb = cb;
554
+
555
+ /* Convert name to UTF16. */
556
+ nameSize = uv_utf8_to_utf16(name, NULL, 0) * sizeof(wchar_t);
557
+ handle->name = (wchar_t*)malloc(nameSize);
558
+ if (!handle->name) {
559
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
560
+ }
561
+
562
+ if (!uv_utf8_to_utf16(name, handle->name, nameSize / sizeof(wchar_t))) {
563
+ errno = GetLastError();
564
+ goto error;
565
+ }
566
+
567
+ pipeHandle = open_named_pipe(handle->name, &duplex_flags);
568
+ if (pipeHandle == INVALID_HANDLE_VALUE) {
569
+ if (GetLastError() == ERROR_PIPE_BUSY) {
570
+ /* Wait for the server to make a pipe instance available. */
571
+ if (!QueueUserWorkItem(&pipe_connect_thread_proc,
572
+ req,
573
+ WT_EXECUTELONGFUNCTION)) {
574
+ errno = GetLastError();
575
+ goto error;
576
+ }
577
+
578
+ uv_ref(loop);
579
+ handle->reqs_pending++;
580
+
581
+ return;
582
+ }
583
+
584
+ errno = GetLastError();
585
+ goto error;
586
+ }
587
+
588
+ assert(pipeHandle != INVALID_HANDLE_VALUE);
589
+
590
+ if (uv_set_pipe_handle(loop,
591
+ (uv_pipe_t*) req->handle,
592
+ pipeHandle,
593
+ duplex_flags)) {
594
+ errno = GetLastError();
595
+ goto error;
596
+ }
597
+
598
+ SET_REQ_SUCCESS(req);
599
+ uv_insert_pending_req(loop, (uv_req_t*) req);
600
+ handle->reqs_pending++;
601
+ uv_ref(loop);
602
+ return;
603
+
604
+ error:
605
+ if (handle->name) {
606
+ free(handle->name);
607
+ handle->name = NULL;
608
+ }
609
+
610
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
611
+ CloseHandle(pipeHandle);
612
+ }
613
+
614
+ /* Make this req pending reporting an error. */
615
+ SET_REQ_ERROR(req, errno);
616
+ uv_insert_pending_req(loop, (uv_req_t*) req);
617
+ handle->reqs_pending++;
618
+ uv_ref(loop);
619
+ return;
620
+ }
621
+
622
+
623
+ /* Cleans up uv_pipe_t (server or connection) and all resources associated */
624
+ /* with it. */
625
+ void close_pipe(uv_pipe_t* handle, int* status, uv_err_t* err) {
626
+ int i;
627
+ HANDLE pipeHandle;
628
+
629
+ if (handle->name) {
630
+ free(handle->name);
631
+ handle->name = NULL;
632
+ }
633
+
634
+ if (handle->flags & UV_HANDLE_PIPESERVER) {
635
+ for (i = 0; i < handle->pending_instances; i++) {
636
+ pipeHandle = handle->accept_reqs[i].pipeHandle;
637
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
638
+ CloseHandle(pipeHandle);
639
+ handle->accept_reqs[i].pipeHandle = INVALID_HANDLE_VALUE;
640
+ }
641
+ }
642
+ }
643
+
644
+ if (handle->flags & UV_HANDLE_CONNECTION) {
645
+ handle->flags |= UV_HANDLE_SHUTTING;
646
+ eof_timer_destroy(handle);
647
+ }
648
+
649
+ if ((handle->flags & UV_HANDLE_CONNECTION)
650
+ && handle->handle != INVALID_HANDLE_VALUE) {
651
+ CloseHandle(handle->handle);
652
+ handle->handle = INVALID_HANDLE_VALUE;
653
+ }
654
+ }
655
+
656
+
657
+ static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
658
+ uv_pipe_accept_t* req, BOOL firstInstance) {
659
+ assert(handle->flags & UV_HANDLE_LISTENING);
660
+
661
+ if (!firstInstance) {
662
+ assert(req->pipeHandle == INVALID_HANDLE_VALUE);
663
+
664
+ req->pipeHandle = CreateNamedPipeW(handle->name,
665
+ PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED,
666
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
667
+ PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL);
668
+
669
+ if (req->pipeHandle == INVALID_HANDLE_VALUE) {
670
+ SET_REQ_ERROR(req, GetLastError());
671
+ uv_insert_pending_req(loop, (uv_req_t*) req);
672
+ handle->reqs_pending++;
673
+ return;
674
+ }
675
+
676
+ if (uv_set_pipe_handle(loop, handle, req->pipeHandle, 0)) {
677
+ CloseHandle(req->pipeHandle);
678
+ req->pipeHandle = INVALID_HANDLE_VALUE;
679
+ SET_REQ_ERROR(req, GetLastError());
680
+ uv_insert_pending_req(loop, (uv_req_t*) req);
681
+ handle->reqs_pending++;
682
+ return;
683
+ }
684
+ }
685
+
686
+ assert(req->pipeHandle != INVALID_HANDLE_VALUE);
687
+
688
+ /* Prepare the overlapped structure. */
689
+ memset(&(req->overlapped), 0, sizeof(req->overlapped));
690
+
691
+ if (!ConnectNamedPipe(req->pipeHandle, &req->overlapped) &&
692
+ GetLastError() != ERROR_IO_PENDING) {
693
+ if (GetLastError() == ERROR_PIPE_CONNECTED) {
694
+ SET_REQ_SUCCESS(req);
695
+ } else {
696
+ CloseHandle(req->pipeHandle);
697
+ req->pipeHandle = INVALID_HANDLE_VALUE;
698
+ /* Make this req pending reporting an error. */
699
+ SET_REQ_ERROR(req, GetLastError());
700
+ }
701
+ uv_insert_pending_req(loop, (uv_req_t*) req);
702
+ handle->reqs_pending++;
703
+ return;
704
+ }
705
+
706
+ handle->reqs_pending++;
707
+ }
708
+
709
+
710
+ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
711
+ uv_loop_t* loop = server->loop;
712
+ uv_pipe_t* pipe_client;
713
+ uv_pipe_accept_t* req;
714
+
715
+ if (server->ipc) {
716
+ if (!server->pending_ipc_info.socket_info) {
717
+ /* No valid pending sockets. */
718
+ uv__set_sys_error(loop, WSAEWOULDBLOCK);
719
+ return -1;
720
+ }
721
+
722
+ return uv_tcp_import((uv_tcp_t*)client, server->pending_ipc_info.socket_info,
723
+ server->pending_ipc_info.tcp_connection);
724
+ } else {
725
+ pipe_client = (uv_pipe_t*)client;
726
+
727
+ /* Find a connection instance that has been connected, but not yet */
728
+ /* accepted. */
729
+ req = server->pending_accepts;
730
+
731
+ if (!req) {
732
+ /* No valid connections found, so we error out. */
733
+ uv__set_sys_error(loop, WSAEWOULDBLOCK);
734
+ return -1;
735
+ }
736
+
737
+ /* Initialize the client handle and copy the pipeHandle to the client */
738
+ uv_pipe_connection_init(pipe_client);
739
+ pipe_client->handle = req->pipeHandle;
740
+
741
+ /* Prepare the req to pick up a new connection */
742
+ server->pending_accepts = req->next_pending;
743
+ req->next_pending = NULL;
744
+ req->pipeHandle = INVALID_HANDLE_VALUE;
745
+
746
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
747
+ uv_pipe_queue_accept(loop, server, req, FALSE);
748
+ }
749
+ }
750
+
751
+ return 0;
752
+ }
753
+
754
+
755
+ /* Starts listening for connections for the given pipe. */
756
+ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
757
+ uv_loop_t* loop = handle->loop;
758
+
759
+ int i, errno;
760
+
761
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
762
+ uv__set_artificial_error(loop, UV_EINVAL);
763
+ return -1;
764
+ }
765
+
766
+ if (handle->flags & UV_HANDLE_LISTENING ||
767
+ handle->flags & UV_HANDLE_READING) {
768
+ uv__set_artificial_error(loop, UV_EALREADY);
769
+ return -1;
770
+ }
771
+
772
+ if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
773
+ uv__set_artificial_error(loop, UV_ENOTSUP);
774
+ return -1;
775
+ }
776
+
777
+ handle->flags |= UV_HANDLE_LISTENING;
778
+ handle->connection_cb = cb;
779
+
780
+ /* First pipe handle should have already been created in uv_pipe_bind */
781
+ assert(handle->accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE);
782
+
783
+ for (i = 0; i < handle->pending_instances; i++) {
784
+ uv_pipe_queue_accept(loop, handle, &handle->accept_reqs[i], i == 0);
785
+ }
786
+
787
+ return 0;
788
+ }
789
+
790
+
791
+ static DWORD WINAPI uv_pipe_zero_readfile_thread_proc(void* parameter) {
792
+ int result;
793
+ DWORD bytes;
794
+ uv_read_t* req = (uv_read_t*) parameter;
795
+ uv_pipe_t* handle = (uv_pipe_t*) req->data;
796
+ uv_loop_t* loop = handle->loop;
797
+
798
+ assert(req != NULL);
799
+ assert(req->type == UV_READ);
800
+ assert(handle->type == UV_NAMED_PIPE);
801
+
802
+ result = ReadFile(handle->handle,
803
+ &uv_zero_,
804
+ 0,
805
+ &bytes,
806
+ NULL);
807
+
808
+ if (!result) {
809
+ SET_REQ_ERROR(req, GetLastError());
810
+ }
811
+
812
+ POST_COMPLETION_FOR_REQ(loop, req);
813
+ return 0;
814
+ }
815
+
816
+
817
+ static DWORD WINAPI uv_pipe_writefile_thread_proc(void* parameter) {
818
+ int result;
819
+ DWORD bytes;
820
+ uv_write_t* req = (uv_write_t*) parameter;
821
+ uv_pipe_t* handle = (uv_pipe_t*) req->handle;
822
+ uv_loop_t* loop = handle->loop;
823
+
824
+ assert(req != NULL);
825
+ assert(req->type == UV_WRITE);
826
+ assert(handle->type == UV_NAMED_PIPE);
827
+ assert(req->write_buffer.base);
828
+
829
+ result = WriteFile(handle->handle,
830
+ req->write_buffer.base,
831
+ req->write_buffer.len,
832
+ &bytes,
833
+ NULL);
834
+
835
+ if (!result) {
836
+ SET_REQ_ERROR(req, GetLastError());
837
+ }
838
+
839
+ POST_COMPLETION_FOR_REQ(loop, req);
840
+ return 0;
841
+ }
842
+
843
+
844
+ static void CALLBACK post_completion_read_wait(void* context, BOOLEAN timed_out) {
845
+ uv_read_t* req;
846
+ uv_tcp_t* handle;
847
+
848
+ req = (uv_read_t*) context;
849
+ assert(req != NULL);
850
+ handle = (uv_tcp_t*)req->data;
851
+ assert(handle != NULL);
852
+ assert(!timed_out);
853
+
854
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
855
+ req->overlapped.InternalHigh,
856
+ 0,
857
+ &req->overlapped)) {
858
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
859
+ }
860
+ }
861
+
862
+
863
+ static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out) {
864
+ uv_write_t* req;
865
+ uv_tcp_t* handle;
866
+
867
+ req = (uv_write_t*) context;
868
+ assert(req != NULL);
869
+ handle = (uv_tcp_t*)req->handle;
870
+ assert(handle != NULL);
871
+ assert(!timed_out);
872
+
873
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
874
+ req->overlapped.InternalHigh,
875
+ 0,
876
+ &req->overlapped)) {
877
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
878
+ }
879
+ }
880
+
881
+
882
+ static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
883
+ uv_read_t* req;
884
+ int result;
885
+
886
+ assert(handle->flags & UV_HANDLE_READING);
887
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
888
+
889
+ assert(handle->handle != INVALID_HANDLE_VALUE);
890
+
891
+ req = &handle->read_req;
892
+
893
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
894
+ if (!QueueUserWorkItem(&uv_pipe_zero_readfile_thread_proc,
895
+ req,
896
+ WT_EXECUTELONGFUNCTION)) {
897
+ /* Make this req pending reporting an error. */
898
+ SET_REQ_ERROR(req, GetLastError());
899
+ goto error;
900
+ }
901
+ } else {
902
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
903
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
904
+ req->overlapped.hEvent = (HANDLE) ((DWORD) req->event_handle | 1);
905
+ }
906
+
907
+ /* Do 0-read */
908
+ result = ReadFile(handle->handle,
909
+ &uv_zero_,
910
+ 0,
911
+ NULL,
912
+ &req->overlapped);
913
+
914
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
915
+ /* Make this req pending reporting an error. */
916
+ SET_REQ_ERROR(req, GetLastError());
917
+ goto error;
918
+ }
919
+
920
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
921
+ if (!req->event_handle) {
922
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
923
+ if (!req->event_handle) {
924
+ uv_fatal_error(GetLastError(), "CreateEvent");
925
+ }
926
+ }
927
+ if (req->wait_handle == INVALID_HANDLE_VALUE) {
928
+ if (!RegisterWaitForSingleObject(&req->wait_handle,
929
+ req->overlapped.hEvent, post_completion_read_wait, (void*) req,
930
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
931
+ SET_REQ_ERROR(req, GetLastError());
932
+ goto error;
933
+ }
934
+ }
935
+ }
936
+ }
937
+
938
+ /* Start the eof timer if there is one */
939
+ eof_timer_start(handle);
940
+ handle->flags |= UV_HANDLE_READ_PENDING;
941
+ handle->reqs_pending++;
942
+ return;
943
+
944
+ error:
945
+ uv_insert_pending_req(loop, (uv_req_t*)req);
946
+ handle->flags |= UV_HANDLE_READ_PENDING;
947
+ handle->reqs_pending++;
948
+ }
949
+
950
+
951
+ static int uv_pipe_read_start_impl(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
952
+ uv_read_cb read_cb, uv_read2_cb read2_cb) {
953
+ uv_loop_t* loop = handle->loop;
954
+
955
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
956
+ uv__set_artificial_error(loop, UV_EINVAL);
957
+ return -1;
958
+ }
959
+
960
+ if (handle->flags & UV_HANDLE_READING) {
961
+ uv__set_artificial_error(loop, UV_EALREADY);
962
+ return -1;
963
+ }
964
+
965
+ if (handle->flags & UV_HANDLE_EOF) {
966
+ uv__set_artificial_error(loop, UV_EOF);
967
+ return -1;
968
+ }
969
+
970
+ handle->flags |= UV_HANDLE_READING;
971
+ handle->read_cb = read_cb;
972
+ handle->read2_cb = read2_cb;
973
+ handle->alloc_cb = alloc_cb;
974
+
975
+ /* If reading was stopped and then started again, there could still be a */
976
+ /* read request pending. */
977
+ if (!(handle->flags & UV_HANDLE_READ_PENDING))
978
+ uv_pipe_queue_read(loop, handle);
979
+
980
+ return 0;
981
+ }
982
+
983
+
984
+ int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
985
+ uv_read_cb read_cb) {
986
+ return uv_pipe_read_start_impl(handle, alloc_cb, read_cb, NULL);
987
+ }
988
+
989
+
990
+ int uv_pipe_read2_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
991
+ uv_read2_cb read_cb) {
992
+ return uv_pipe_read_start_impl(handle, alloc_cb, NULL, read_cb);
993
+ }
994
+
995
+
996
+ static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle,
997
+ uv_write_t* req) {
998
+ req->next_req = NULL;
999
+ if (handle->non_overlapped_writes_tail) {
1000
+ req->next_req =
1001
+ handle->non_overlapped_writes_tail->next_req;
1002
+ handle->non_overlapped_writes_tail->next_req = (uv_req_t*)req;
1003
+ handle->non_overlapped_writes_tail = req;
1004
+ } else {
1005
+ req->next_req = (uv_req_t*)req;
1006
+ handle->non_overlapped_writes_tail = req;
1007
+ }
1008
+ }
1009
+
1010
+
1011
+ static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) {
1012
+ uv_write_t* req;
1013
+
1014
+ if (handle->non_overlapped_writes_tail) {
1015
+ req = (uv_write_t*)handle->non_overlapped_writes_tail->next_req;
1016
+
1017
+ if (req == handle->non_overlapped_writes_tail) {
1018
+ handle->non_overlapped_writes_tail = NULL;
1019
+ } else {
1020
+ handle->non_overlapped_writes_tail->next_req =
1021
+ req->next_req;
1022
+ }
1023
+
1024
+ return req;
1025
+ } else {
1026
+ /* queue empty */
1027
+ return NULL;
1028
+ }
1029
+ }
1030
+
1031
+
1032
+ static void uv_queue_non_overlapped_write(uv_pipe_t* handle) {
1033
+ uv_write_t* req = uv_remove_non_overlapped_write_req(handle);
1034
+ if (req) {
1035
+ if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc,
1036
+ req,
1037
+ WT_EXECUTELONGFUNCTION)) {
1038
+ uv_fatal_error(GetLastError(), "QueueUserWorkItem");
1039
+ }
1040
+ }
1041
+ }
1042
+
1043
+
1044
+ static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req,
1045
+ uv_pipe_t* handle, uv_buf_t bufs[], int bufcnt,
1046
+ uv_stream_t* send_handle, uv_write_cb cb) {
1047
+ int result;
1048
+ uv_tcp_t* tcp_send_handle;
1049
+ uv_write_t* ipc_header_req;
1050
+ uv_ipc_frame_uv_stream ipc_frame;
1051
+
1052
+ if (bufcnt != 1 && (bufcnt != 0 || !send_handle)) {
1053
+ uv__set_artificial_error(loop, UV_ENOTSUP);
1054
+ return -1;
1055
+ }
1056
+
1057
+ /* Only TCP handles are supported for sharing. */
1058
+ if (send_handle && ((send_handle->type != UV_TCP) ||
1059
+ (!(send_handle->flags & UV_HANDLE_BOUND) &&
1060
+ !(send_handle->flags & UV_HANDLE_CONNECTION)))) {
1061
+ uv__set_artificial_error(loop, UV_ENOTSUP);
1062
+ return -1;
1063
+ }
1064
+
1065
+ assert(handle->handle != INVALID_HANDLE_VALUE);
1066
+
1067
+ if (!(handle->flags & UV_HANDLE_CONNECTION)) {
1068
+ uv__set_artificial_error(loop, UV_EINVAL);
1069
+ return -1;
1070
+ }
1071
+
1072
+ if (handle->flags & UV_HANDLE_SHUTTING) {
1073
+ uv__set_artificial_error(loop, UV_EOF);
1074
+ return -1;
1075
+ }
1076
+
1077
+ uv_req_init(loop, (uv_req_t*) req);
1078
+ req->type = UV_WRITE;
1079
+ req->handle = (uv_stream_t*) handle;
1080
+ req->cb = cb;
1081
+ req->ipc_header = 0;
1082
+ req->event_handle = NULL;
1083
+ req->wait_handle = INVALID_HANDLE_VALUE;
1084
+ memset(&req->overlapped, 0, sizeof(req->overlapped));
1085
+
1086
+ if (handle->ipc) {
1087
+ assert(!(handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
1088
+ ipc_frame.header.flags = 0;
1089
+
1090
+ /* Use the IPC framing protocol. */
1091
+ if (send_handle) {
1092
+ tcp_send_handle = (uv_tcp_t*)send_handle;
1093
+
1094
+ if (uv_tcp_duplicate_socket(tcp_send_handle, handle->ipc_pid,
1095
+ &ipc_frame.socket_info)) {
1096
+ return -1;
1097
+ }
1098
+ ipc_frame.header.flags |= UV_IPC_TCP_SERVER;
1099
+
1100
+ if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) {
1101
+ ipc_frame.header.flags |= UV_IPC_TCP_CONNECTION;
1102
+ }
1103
+ }
1104
+
1105
+ if (bufcnt == 1) {
1106
+ ipc_frame.header.flags |= UV_IPC_RAW_DATA;
1107
+ ipc_frame.header.raw_data_length = bufs[0].len;
1108
+ }
1109
+
1110
+ /*
1111
+ * Use the provided req if we're only doing a single write.
1112
+ * If we're doing multiple writes, use ipc_header_write_req to do
1113
+ * the first write, and then use the provided req for the second write.
1114
+ */
1115
+ if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) {
1116
+ ipc_header_req = req;
1117
+ } else {
1118
+ /*
1119
+ * Try to use the preallocated write req if it's available.
1120
+ * Otherwise allocate a new one.
1121
+ */
1122
+ if (handle->ipc_header_write_req.type != UV_WRITE) {
1123
+ ipc_header_req = (uv_write_t*)&handle->ipc_header_write_req;
1124
+ } else {
1125
+ ipc_header_req = (uv_write_t*)malloc(sizeof(uv_write_t));
1126
+ if (!ipc_header_req) {
1127
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
1128
+ }
1129
+ }
1130
+
1131
+ uv_req_init(loop, (uv_req_t*) ipc_header_req);
1132
+ ipc_header_req->type = UV_WRITE;
1133
+ ipc_header_req->handle = (uv_stream_t*) handle;
1134
+ ipc_header_req->cb = NULL;
1135
+ ipc_header_req->ipc_header = 1;
1136
+ }
1137
+
1138
+ /* Write the header or the whole frame. */
1139
+ memset(&ipc_header_req->overlapped, 0, sizeof(ipc_header_req->overlapped));
1140
+
1141
+ result = WriteFile(handle->handle,
1142
+ &ipc_frame,
1143
+ ipc_frame.header.flags & UV_IPC_TCP_SERVER ?
1144
+ sizeof(ipc_frame) : sizeof(ipc_frame.header),
1145
+ NULL,
1146
+ &ipc_header_req->overlapped);
1147
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
1148
+ uv__set_sys_error(loop, GetLastError());
1149
+ return -1;
1150
+ }
1151
+
1152
+ if (result) {
1153
+ /* Request completed immediately. */
1154
+ ipc_header_req->queued_bytes = 0;
1155
+ } else {
1156
+ /* Request queued by the kernel. */
1157
+ ipc_header_req->queued_bytes = ipc_frame.header.flags & UV_IPC_TCP_SERVER ?
1158
+ sizeof(ipc_frame) : sizeof(ipc_frame.header);
1159
+ handle->write_queue_size += req->queued_bytes;
1160
+ }
1161
+
1162
+ uv_ref(loop);
1163
+ handle->reqs_pending++;
1164
+ handle->write_reqs_pending++;
1165
+
1166
+ /* If we don't have any raw data to write - we're done. */
1167
+ if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) {
1168
+ return 0;
1169
+ }
1170
+ }
1171
+
1172
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
1173
+ req->write_buffer = bufs[0];
1174
+ uv_insert_non_overlapped_write_req(handle, req);
1175
+ if (handle->write_reqs_pending == 0) {
1176
+ uv_queue_non_overlapped_write(handle);
1177
+ }
1178
+
1179
+ /* Request queued by the kernel. */
1180
+ req->queued_bytes = uv_count_bufs(bufs, bufcnt);
1181
+ handle->write_queue_size += req->queued_bytes;
1182
+ } else {
1183
+ result = WriteFile(handle->handle,
1184
+ bufs[0].base,
1185
+ bufs[0].len,
1186
+ NULL,
1187
+ &req->overlapped);
1188
+
1189
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
1190
+ uv__set_sys_error(loop, GetLastError());
1191
+ return -1;
1192
+ }
1193
+
1194
+ if (result) {
1195
+ /* Request completed immediately. */
1196
+ req->queued_bytes = 0;
1197
+ } else {
1198
+ /* Request queued by the kernel. */
1199
+ req->queued_bytes = uv_count_bufs(bufs, bufcnt);
1200
+ handle->write_queue_size += req->queued_bytes;
1201
+ }
1202
+
1203
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
1204
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
1205
+ if (!req->event_handle) {
1206
+ uv_fatal_error(GetLastError(), "CreateEvent");
1207
+ }
1208
+ if (!RegisterWaitForSingleObject(&req->wait_handle,
1209
+ req->overlapped.hEvent, post_completion_write_wait, (void*) req,
1210
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
1211
+ uv__set_sys_error(loop, GetLastError());
1212
+ return -1;
1213
+ }
1214
+ }
1215
+ }
1216
+
1217
+ uv_ref(loop);
1218
+ handle->reqs_pending++;
1219
+ handle->write_reqs_pending++;
1220
+
1221
+ return 0;
1222
+ }
1223
+
1224
+
1225
+ int uv_pipe_write(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
1226
+ uv_buf_t bufs[], int bufcnt, uv_write_cb cb) {
1227
+ return uv_pipe_write_impl(loop, req, handle, bufs, bufcnt, NULL, cb);
1228
+ }
1229
+
1230
+
1231
+ int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
1232
+ uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) {
1233
+ if (!handle->ipc) {
1234
+ uv__set_artificial_error(loop, UV_EINVAL);
1235
+ return -1;
1236
+ }
1237
+
1238
+ return uv_pipe_write_impl(loop, req, handle, bufs, bufcnt, send_handle, cb);
1239
+ }
1240
+
1241
+
1242
+ static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
1243
+ uv_buf_t buf) {
1244
+ /* If there is an eof timer running, we don't need it any more, */
1245
+ /* so discard it. */
1246
+ eof_timer_destroy(handle);
1247
+
1248
+ handle->flags |= UV_HANDLE_EOF;
1249
+ uv_read_stop((uv_stream_t*) handle);
1250
+
1251
+ uv__set_artificial_error(loop, UV_EOF);
1252
+ if (handle->read2_cb) {
1253
+ handle->read2_cb(handle, -1, uv_null_buf_, UV_UNKNOWN_HANDLE);
1254
+ } else {
1255
+ handle->read_cb((uv_stream_t*) handle, -1, uv_null_buf_);
1256
+ }
1257
+ }
1258
+
1259
+
1260
+ static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
1261
+ uv_buf_t buf) {
1262
+ /* If there is an eof timer running, we don't need it any more, */
1263
+ /* so discard it. */
1264
+ eof_timer_destroy(handle);
1265
+
1266
+ uv_read_stop((uv_stream_t*) handle);
1267
+
1268
+ uv__set_sys_error(loop, error);
1269
+ if (handle->read2_cb) {
1270
+ handle->read2_cb(handle, -1, buf, UV_UNKNOWN_HANDLE);
1271
+ } else {
1272
+ handle->read_cb((uv_stream_t*)handle, -1, buf);
1273
+ }
1274
+ }
1275
+
1276
+
1277
+ static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
1278
+ int error, uv_buf_t buf) {
1279
+ if (error == ERROR_BROKEN_PIPE) {
1280
+ uv_pipe_read_eof(loop, handle, buf);
1281
+ } else {
1282
+ uv_pipe_read_error(loop, handle, error, buf);
1283
+ }
1284
+ }
1285
+
1286
+
1287
+ void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
1288
+ uv_req_t* req) {
1289
+ DWORD bytes, avail;
1290
+ uv_buf_t buf;
1291
+ uv_ipc_frame_uv_stream ipc_frame;
1292
+
1293
+ assert(handle->type == UV_NAMED_PIPE);
1294
+
1295
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
1296
+ eof_timer_stop(handle);
1297
+
1298
+ if (!REQ_SUCCESS(req)) {
1299
+ /* An error occurred doing the 0-read. */
1300
+ if (handle->flags & UV_HANDLE_READING) {
1301
+ uv_pipe_read_error_or_eof(loop,
1302
+ handle,
1303
+ GET_REQ_ERROR(req),
1304
+ uv_null_buf_);
1305
+ }
1306
+ } else {
1307
+ /* Do non-blocking reads until the buffer is empty */
1308
+ while (handle->flags & UV_HANDLE_READING) {
1309
+ if (!PeekNamedPipe(handle->handle,
1310
+ NULL,
1311
+ 0,
1312
+ NULL,
1313
+ &avail,
1314
+ NULL)) {
1315
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
1316
+ break;
1317
+ }
1318
+
1319
+ if (avail == 0) {
1320
+ /* There is nothing to read after all. */
1321
+ break;
1322
+ }
1323
+
1324
+ if (handle->ipc) {
1325
+ /* Use the IPC framing protocol to read the incoming data. */
1326
+ if (handle->remaining_ipc_rawdata_bytes == 0) {
1327
+ /* We're reading a new frame. First, read the header. */
1328
+ assert(avail >= sizeof(ipc_frame.header));
1329
+
1330
+ if (!ReadFile(handle->handle,
1331
+ &ipc_frame.header,
1332
+ sizeof(ipc_frame.header),
1333
+ &bytes,
1334
+ NULL)) {
1335
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(),
1336
+ uv_null_buf_);
1337
+ break;
1338
+ }
1339
+
1340
+ assert(bytes == sizeof(ipc_frame.header));
1341
+ assert(ipc_frame.header.flags <= (UV_IPC_TCP_SERVER | UV_IPC_RAW_DATA |
1342
+ UV_IPC_TCP_CONNECTION));
1343
+
1344
+ if (ipc_frame.header.flags & UV_IPC_TCP_SERVER) {
1345
+ assert(avail - sizeof(ipc_frame.header) >=
1346
+ sizeof(ipc_frame.socket_info));
1347
+
1348
+ /* Read the TCP socket info. */
1349
+ if (!ReadFile(handle->handle,
1350
+ &ipc_frame.socket_info,
1351
+ sizeof(ipc_frame) - sizeof(ipc_frame.header),
1352
+ &bytes,
1353
+ NULL)) {
1354
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(),
1355
+ uv_null_buf_);
1356
+ break;
1357
+ }
1358
+
1359
+ assert(bytes == sizeof(ipc_frame) - sizeof(ipc_frame.header));
1360
+
1361
+ /* Store the pending socket info. */
1362
+ assert(!handle->pending_ipc_info.socket_info);
1363
+ handle->pending_ipc_info.socket_info =
1364
+ (WSAPROTOCOL_INFOW*)malloc(sizeof(*(handle->pending_ipc_info.socket_info)));
1365
+ if (!handle->pending_ipc_info.socket_info) {
1366
+ uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
1367
+ }
1368
+
1369
+ *(handle->pending_ipc_info.socket_info) = ipc_frame.socket_info;
1370
+ handle->pending_ipc_info.tcp_connection =
1371
+ ipc_frame.header.flags & UV_IPC_TCP_CONNECTION;
1372
+ }
1373
+
1374
+ if (ipc_frame.header.flags & UV_IPC_RAW_DATA) {
1375
+ handle->remaining_ipc_rawdata_bytes =
1376
+ ipc_frame.header.raw_data_length;
1377
+ continue;
1378
+ }
1379
+ } else {
1380
+ avail = min(avail, (DWORD)handle->remaining_ipc_rawdata_bytes);
1381
+ }
1382
+ }
1383
+
1384
+ buf = handle->alloc_cb((uv_handle_t*) handle, avail);
1385
+ assert(buf.len > 0);
1386
+
1387
+ if (ReadFile(handle->handle,
1388
+ buf.base,
1389
+ buf.len,
1390
+ &bytes,
1391
+ NULL)) {
1392
+ /* Successful read */
1393
+ if (handle->ipc) {
1394
+ assert(handle->remaining_ipc_rawdata_bytes >= bytes);
1395
+ handle->remaining_ipc_rawdata_bytes =
1396
+ handle->remaining_ipc_rawdata_bytes - bytes;
1397
+ if (handle->read2_cb) {
1398
+ handle->read2_cb(handle, bytes, buf,
1399
+ handle->pending_ipc_info.socket_info ? UV_TCP : UV_UNKNOWN_HANDLE);
1400
+ } else if (handle->read_cb) {
1401
+ handle->read_cb((uv_stream_t*)handle, bytes, buf);
1402
+ }
1403
+
1404
+ if (handle->pending_ipc_info.socket_info) {
1405
+ free(handle->pending_ipc_info.socket_info);
1406
+ handle->pending_ipc_info.socket_info = NULL;
1407
+ }
1408
+ } else {
1409
+ handle->read_cb((uv_stream_t*)handle, bytes, buf);
1410
+ }
1411
+
1412
+ /* Read again only if bytes == buf.len */
1413
+ if (bytes <= buf.len) {
1414
+ break;
1415
+ }
1416
+ } else {
1417
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
1418
+ break;
1419
+ }
1420
+ }
1421
+
1422
+ /* Post another 0-read if still reading and not closing. */
1423
+ if ((handle->flags & UV_HANDLE_READING) &&
1424
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
1425
+ uv_pipe_queue_read(loop, handle);
1426
+ }
1427
+ }
1428
+
1429
+ DECREASE_PENDING_REQ_COUNT(handle);
1430
+ }
1431
+
1432
+
1433
+ void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
1434
+ uv_write_t* req) {
1435
+ assert(handle->type == UV_NAMED_PIPE);
1436
+
1437
+ assert(handle->write_queue_size >= req->queued_bytes);
1438
+ handle->write_queue_size -= req->queued_bytes;
1439
+
1440
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
1441
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
1442
+ UnregisterWait(req->wait_handle);
1443
+ req->wait_handle = INVALID_HANDLE_VALUE;
1444
+ }
1445
+ if (req->event_handle) {
1446
+ CloseHandle(req->event_handle);
1447
+ req->event_handle = NULL;
1448
+ }
1449
+ }
1450
+
1451
+ if (req->ipc_header) {
1452
+ if (req == &handle->ipc_header_write_req) {
1453
+ req->type = UV_UNKNOWN_REQ;
1454
+ } else {
1455
+ free(req);
1456
+ }
1457
+ } else {
1458
+ if (req->cb) {
1459
+ if (!REQ_SUCCESS(req)) {
1460
+ uv__set_sys_error(loop, GET_REQ_ERROR(req));
1461
+ ((uv_write_cb)req->cb)(req, -1);
1462
+ } else {
1463
+ ((uv_write_cb)req->cb)(req, 0);
1464
+ }
1465
+ }
1466
+ }
1467
+
1468
+ handle->write_reqs_pending--;
1469
+
1470
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE &&
1471
+ handle->non_overlapped_writes_tail) {
1472
+ assert(handle->write_reqs_pending > 0);
1473
+ uv_queue_non_overlapped_write(handle);
1474
+ }
1475
+
1476
+ if (handle->write_reqs_pending == 0 &&
1477
+ handle->flags & UV_HANDLE_SHUTTING) {
1478
+ uv_want_endgame(loop, (uv_handle_t*)handle);
1479
+ }
1480
+
1481
+ uv_unref(loop);
1482
+ DECREASE_PENDING_REQ_COUNT(handle);
1483
+ }
1484
+
1485
+
1486
+ void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
1487
+ uv_req_t* raw_req) {
1488
+ uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req;
1489
+
1490
+ assert(handle->type == UV_NAMED_PIPE);
1491
+
1492
+ if (REQ_SUCCESS(req)) {
1493
+ assert(req->pipeHandle != INVALID_HANDLE_VALUE);
1494
+ req->next_pending = handle->pending_accepts;
1495
+ handle->pending_accepts = req;
1496
+
1497
+ if (handle->connection_cb) {
1498
+ handle->connection_cb((uv_stream_t*)handle, 0);
1499
+ }
1500
+ } else {
1501
+ if (req->pipeHandle != INVALID_HANDLE_VALUE) {
1502
+ CloseHandle(req->pipeHandle);
1503
+ req->pipeHandle = INVALID_HANDLE_VALUE;
1504
+ }
1505
+ if (!(handle->flags & UV_HANDLE_CLOSING)) {
1506
+ uv_pipe_queue_accept(loop, handle, req, FALSE);
1507
+ }
1508
+ }
1509
+
1510
+ DECREASE_PENDING_REQ_COUNT(handle);
1511
+ }
1512
+
1513
+
1514
+ void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
1515
+ uv_connect_t* req) {
1516
+ assert(handle->type == UV_NAMED_PIPE);
1517
+
1518
+ if (req->cb) {
1519
+ if (REQ_SUCCESS(req)) {
1520
+ uv_pipe_connection_init(handle);
1521
+ ((uv_connect_cb)req->cb)(req, 0);
1522
+ } else {
1523
+ uv__set_sys_error(loop, GET_REQ_ERROR(req));
1524
+ ((uv_connect_cb)req->cb)(req, -1);
1525
+ }
1526
+ }
1527
+
1528
+ uv_unref(loop);
1529
+ DECREASE_PENDING_REQ_COUNT(handle);
1530
+ }
1531
+
1532
+
1533
+ void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
1534
+ uv_shutdown_t* req) {
1535
+ assert(handle->type == UV_NAMED_PIPE);
1536
+
1537
+ /* Initialize and optionally start the eof timer. */
1538
+ /* This makes no sense if we've already seen EOF. */
1539
+ if (!(handle->flags & UV_HANDLE_EOF)) {
1540
+ eof_timer_init(handle);
1541
+
1542
+ /* If reading start the timer right now. */
1543
+ /* Otherwise uv_pipe_queue_read will start it. */
1544
+ if (handle->flags & UV_HANDLE_READ_PENDING) {
1545
+ eof_timer_start(handle);
1546
+ }
1547
+ }
1548
+
1549
+ if (req->cb) {
1550
+ req->cb(req, 0);
1551
+ }
1552
+
1553
+ uv_unref(loop);
1554
+ DECREASE_PENDING_REQ_COUNT(handle);
1555
+ }
1556
+
1557
+
1558
+ static void eof_timer_init(uv_pipe_t* pipe) {
1559
+ int r;
1560
+
1561
+ assert(pipe->eof_timer == NULL);
1562
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
1563
+
1564
+ pipe->eof_timer = (uv_timer_t*) malloc(sizeof *pipe->eof_timer);
1565
+
1566
+ r = uv_timer_init(pipe->loop, pipe->eof_timer);
1567
+ assert(r == 0); /* timers can't fail */
1568
+ pipe->eof_timer->data = pipe;
1569
+ uv_unref(pipe->loop);
1570
+ }
1571
+
1572
+
1573
+ static void eof_timer_start(uv_pipe_t* pipe) {
1574
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
1575
+
1576
+ if (pipe->eof_timer != NULL) {
1577
+ uv_timer_start(pipe->eof_timer, eof_timer_cb, eof_timeout, 0);
1578
+ }
1579
+ }
1580
+
1581
+
1582
+ static void eof_timer_stop(uv_pipe_t* pipe) {
1583
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
1584
+
1585
+ if (pipe->eof_timer != NULL) {
1586
+ uv_timer_stop(pipe->eof_timer);
1587
+ }
1588
+ }
1589
+
1590
+
1591
+ static void eof_timer_cb(uv_timer_t* timer, int status) {
1592
+ uv_pipe_t* pipe = (uv_pipe_t*) timer->data;
1593
+ uv_loop_t* loop = timer->loop;
1594
+
1595
+ assert(status == 0); /* timers can't fail */
1596
+ assert(pipe->type == UV_NAMED_PIPE);
1597
+
1598
+ /* This should always be true, since we start the timer only */
1599
+ /* in uv_pipe_queue_read after successfully calling ReadFile, */
1600
+ /* or in uv_process_pipe_shutdown_req if a read is pending, */
1601
+ /* and we always immediately stop the timer in */
1602
+ /* uv_process_pipe_read_req. */
1603
+ assert(pipe->flags & UV_HANDLE_READ_PENDING) ;
1604
+
1605
+ /* If there are many packets coming off the iocp then the timer callback */
1606
+ /* may be called before the read request is coming off the queue. */
1607
+ /* Therefore we check here if the read request has completed but will */
1608
+ /* be processed later. */
1609
+ if ((pipe->flags & UV_HANDLE_READ_PENDING) &&
1610
+ HasOverlappedIoCompleted(&pipe->read_req.overlapped)) {
1611
+ return;
1612
+ }
1613
+
1614
+ /* Force both ends off the pipe. */
1615
+ CloseHandle(pipe->handle);
1616
+ pipe->handle = INVALID_HANDLE_VALUE;
1617
+
1618
+ /* Stop reading, so the pending read that is going to fail will */
1619
+ /* not be reported to the user. */
1620
+ uv_read_stop((uv_stream_t*) pipe);
1621
+
1622
+ /* Report the eof and update flags. This will get reported even if the */
1623
+ /* user stopped reading in the meantime. TODO: is that okay? */
1624
+ uv_pipe_read_eof(loop, pipe, uv_null_buf_);
1625
+ }
1626
+
1627
+
1628
+ static void eof_timer_destroy(uv_pipe_t* pipe) {
1629
+ assert(pipe->flags && UV_HANDLE_CONNECTION);
1630
+
1631
+ if (pipe->eof_timer) {
1632
+ uv_ref(pipe->loop);
1633
+ uv_close((uv_handle_t*) pipe->eof_timer, eof_timer_close_cb);
1634
+ pipe->eof_timer = NULL;
1635
+ }
1636
+ }
1637
+
1638
+
1639
+ static void eof_timer_close_cb(uv_handle_t* handle) {
1640
+ assert(handle->type == UV_TIMER);
1641
+ free(handle);
1642
+ }
1643
+
1644
+
1645
+ void uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
1646
+ HANDLE os_handle = (HANDLE)_get_osfhandle(file);
1647
+
1648
+ if (os_handle == INVALID_HANDLE_VALUE ||
1649
+ uv_set_pipe_handle(pipe->loop, pipe, os_handle, 0) == -1) {
1650
+ return;
1651
+ }
1652
+
1653
+ uv_pipe_connection_init(pipe);
1654
+ pipe->handle = os_handle;
1655
+
1656
+ if (pipe->ipc) {
1657
+ assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
1658
+ pipe->ipc_pid = uv_parent_pid();
1659
+ assert(pipe->ipc_pid != -1);
1660
+ }
1661
+ }