asyncengine 0.0.1.testing1 → 0.0.2.alpha1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (251) hide show
  1. data/README.markdown +3 -0
  2. data/Rakefile +38 -0
  3. data/asyncengine.gemspec +8 -4
  4. data/ext/asyncengine/ae_call_from_other_thread.c +106 -0
  5. data/ext/asyncengine/ae_call_from_other_thread.h +12 -0
  6. data/ext/asyncengine/ae_handle_common.c +193 -48
  7. data/ext/asyncengine/ae_handle_common.h +40 -13
  8. data/ext/asyncengine/ae_ip_utils.c +246 -0
  9. data/ext/asyncengine/ae_ip_utils.h +25 -0
  10. data/ext/asyncengine/ae_next_tick.c +81 -21
  11. data/ext/asyncengine/ae_next_tick.h +4 -2
  12. data/ext/asyncengine/ae_resolver.c +156 -0
  13. data/ext/asyncengine/ae_resolver.h +10 -0
  14. data/ext/asyncengine/ae_tcp.c +908 -0
  15. data/ext/asyncengine/ae_tcp.h +20 -0
  16. data/ext/asyncengine/ae_timer.c +355 -81
  17. data/ext/asyncengine/ae_timer.h +11 -4
  18. data/ext/asyncengine/ae_udp.c +579 -13
  19. data/ext/asyncengine/ae_udp.h +15 -2
  20. data/ext/asyncengine/ae_utils.c +192 -0
  21. data/ext/asyncengine/ae_utils.h +16 -0
  22. data/ext/asyncengine/asyncengine_ruby.c +469 -26
  23. data/ext/asyncengine/asyncengine_ruby.h +49 -11
  24. data/ext/asyncengine/debug.h +68 -0
  25. data/ext/asyncengine/extconf.rb +26 -2
  26. data/ext/asyncengine/ip_parser.c +5954 -0
  27. data/ext/asyncengine/ip_parser.h +16 -0
  28. data/ext/asyncengine/libuv/AUTHORS +16 -0
  29. data/ext/asyncengine/libuv/common.gypi +4 -4
  30. data/ext/asyncengine/libuv/config-mingw.mk +6 -6
  31. data/ext/asyncengine/libuv/config-unix.mk +13 -13
  32. data/ext/asyncengine/libuv/gyp_uv +5 -1
  33. data/ext/asyncengine/libuv/ibc_tests/exec_test.sh +8 -0
  34. data/ext/asyncengine/libuv/ibc_tests/uv_shutdown_write_issue.c +171 -0
  35. data/ext/asyncengine/libuv/ibc_tests/uv_tcp_close_while_connecting.c +102 -0
  36. data/ext/asyncengine/libuv/include/uv-private/ngx-queue.h +3 -1
  37. data/ext/asyncengine/libuv/include/uv-private/uv-unix.h +103 -50
  38. data/ext/asyncengine/libuv/include/uv-private/uv-win.h +76 -24
  39. data/ext/asyncengine/libuv/include/uv.h +353 -88
  40. data/ext/asyncengine/libuv/src/ares/ares__close_sockets.o +0 -0
  41. data/ext/asyncengine/libuv/src/ares/ares__get_hostent.o +0 -0
  42. data/ext/asyncengine/libuv/src/ares/ares__read_line.o +0 -0
  43. data/ext/asyncengine/libuv/src/ares/ares__timeval.o +0 -0
  44. data/ext/asyncengine/libuv/src/ares/ares_cancel.o +0 -0
  45. data/ext/asyncengine/libuv/src/ares/ares_data.o +0 -0
  46. data/ext/asyncengine/libuv/src/ares/ares_destroy.o +0 -0
  47. data/ext/asyncengine/libuv/src/ares/ares_expand_name.o +0 -0
  48. data/ext/asyncengine/libuv/src/ares/ares_expand_string.o +0 -0
  49. data/ext/asyncengine/libuv/src/ares/ares_fds.o +0 -0
  50. data/ext/asyncengine/libuv/src/ares/ares_free_hostent.o +0 -0
  51. data/ext/asyncengine/libuv/src/ares/ares_free_string.o +0 -0
  52. data/ext/asyncengine/libuv/src/ares/ares_gethostbyaddr.o +0 -0
  53. data/ext/asyncengine/libuv/src/ares/ares_gethostbyname.o +0 -0
  54. data/ext/asyncengine/libuv/src/ares/ares_getnameinfo.o +0 -0
  55. data/ext/asyncengine/libuv/src/ares/ares_getopt.o +0 -0
  56. data/ext/asyncengine/libuv/src/ares/ares_getsock.o +0 -0
  57. data/ext/asyncengine/libuv/src/ares/ares_init.o +0 -0
  58. data/ext/asyncengine/libuv/src/ares/ares_library_init.o +0 -0
  59. data/ext/asyncengine/libuv/src/ares/ares_llist.o +0 -0
  60. data/ext/asyncengine/libuv/src/ares/ares_mkquery.o +0 -0
  61. data/ext/asyncengine/libuv/src/ares/ares_nowarn.o +0 -0
  62. data/ext/asyncengine/libuv/src/ares/ares_options.o +0 -0
  63. data/ext/asyncengine/libuv/src/ares/ares_parse_a_reply.o +0 -0
  64. data/ext/asyncengine/libuv/src/ares/ares_parse_aaaa_reply.o +0 -0
  65. data/ext/asyncengine/libuv/src/ares/ares_parse_mx_reply.o +0 -0
  66. data/ext/asyncengine/libuv/src/ares/ares_parse_ns_reply.o +0 -0
  67. data/ext/asyncengine/libuv/src/ares/ares_parse_ptr_reply.o +0 -0
  68. data/ext/asyncengine/libuv/src/ares/ares_parse_srv_reply.o +0 -0
  69. data/ext/asyncengine/libuv/src/ares/ares_parse_txt_reply.o +0 -0
  70. data/ext/asyncengine/libuv/src/ares/ares_process.o +0 -0
  71. data/ext/asyncengine/libuv/src/ares/ares_query.o +0 -0
  72. data/ext/asyncengine/libuv/src/ares/ares_search.o +0 -0
  73. data/ext/asyncengine/libuv/src/ares/ares_send.o +0 -0
  74. data/ext/asyncengine/libuv/src/ares/ares_strcasecmp.o +0 -0
  75. data/ext/asyncengine/libuv/src/ares/ares_strdup.o +0 -0
  76. data/ext/asyncengine/libuv/src/ares/ares_strerror.o +0 -0
  77. data/ext/asyncengine/libuv/src/ares/ares_timeout.o +0 -0
  78. data/ext/asyncengine/libuv/src/ares/ares_version.o +0 -0
  79. data/ext/asyncengine/libuv/src/ares/ares_writev.o +0 -0
  80. data/ext/asyncengine/libuv/src/ares/bitncmp.o +0 -0
  81. data/ext/asyncengine/libuv/src/ares/inet_net_pton.o +0 -0
  82. data/ext/asyncengine/libuv/src/ares/inet_ntop.o +0 -0
  83. data/ext/asyncengine/libuv/src/cares.c +225 -0
  84. data/ext/asyncengine/libuv/src/cares.o +0 -0
  85. data/ext/asyncengine/libuv/src/fs-poll.c +237 -0
  86. data/ext/asyncengine/libuv/src/fs-poll.o +0 -0
  87. data/ext/asyncengine/libuv/src/unix/async.c +78 -17
  88. data/ext/asyncengine/libuv/src/unix/async.o +0 -0
  89. data/ext/asyncengine/libuv/src/unix/core.c +305 -213
  90. data/ext/asyncengine/libuv/src/unix/core.o +0 -0
  91. data/ext/asyncengine/libuv/src/unix/cygwin.c +1 -1
  92. data/ext/asyncengine/libuv/src/unix/darwin.c +2 -1
  93. data/ext/asyncengine/libuv/src/unix/dl.c +36 -44
  94. data/ext/asyncengine/libuv/src/unix/dl.o +0 -0
  95. data/ext/asyncengine/libuv/src/unix/eio/eio.o +0 -0
  96. data/ext/asyncengine/libuv/src/unix/error.c +6 -0
  97. data/ext/asyncengine/libuv/src/unix/error.o +0 -0
  98. data/ext/asyncengine/libuv/src/unix/ev/ev.c +8 -4
  99. data/ext/asyncengine/libuv/src/unix/ev/ev.o +0 -0
  100. data/ext/asyncengine/libuv/src/unix/freebsd.c +1 -1
  101. data/ext/asyncengine/libuv/src/unix/fs.c +25 -33
  102. data/ext/asyncengine/libuv/src/unix/fs.o +0 -0
  103. data/ext/asyncengine/libuv/src/unix/internal.h +50 -31
  104. data/ext/asyncengine/libuv/src/unix/kqueue.c +2 -7
  105. data/ext/asyncengine/libuv/src/unix/linux/core.o +0 -0
  106. data/ext/asyncengine/libuv/src/unix/linux/inotify.c +12 -14
  107. data/ext/asyncengine/libuv/src/unix/linux/inotify.o +0 -0
  108. data/ext/asyncengine/libuv/src/unix/linux/{core.c → linux-core.c} +1 -1
  109. data/ext/asyncengine/libuv/src/unix/linux/linux-core.o +0 -0
  110. data/ext/asyncengine/libuv/src/unix/linux/syscalls.c +147 -1
  111. data/ext/asyncengine/libuv/src/unix/linux/syscalls.h +39 -2
  112. data/ext/asyncengine/libuv/src/unix/linux/syscalls.o +0 -0
  113. data/ext/asyncengine/libuv/src/unix/loop-watcher.c +63 -0
  114. data/ext/asyncengine/libuv/src/unix/loop-watcher.o +0 -0
  115. data/ext/asyncengine/libuv/src/unix/loop.c +29 -6
  116. data/ext/asyncengine/libuv/src/unix/loop.o +0 -0
  117. data/ext/asyncengine/libuv/src/unix/netbsd.c +1 -1
  118. data/ext/asyncengine/libuv/src/unix/openbsd.c +1 -1
  119. data/ext/asyncengine/libuv/src/unix/pipe.c +31 -36
  120. data/ext/asyncengine/libuv/src/unix/pipe.o +0 -0
  121. data/ext/asyncengine/libuv/src/unix/poll.c +116 -0
  122. data/ext/asyncengine/libuv/src/unix/poll.o +0 -0
  123. data/ext/asyncengine/libuv/src/unix/process.c +193 -115
  124. data/ext/asyncengine/libuv/src/unix/process.o +0 -0
  125. data/ext/asyncengine/libuv/src/unix/stream.c +146 -153
  126. data/ext/asyncengine/libuv/src/unix/stream.o +0 -0
  127. data/ext/asyncengine/libuv/src/unix/sunos.c +45 -36
  128. data/ext/asyncengine/libuv/src/unix/tcp.c +6 -5
  129. data/ext/asyncengine/libuv/src/unix/tcp.o +0 -0
  130. data/ext/asyncengine/libuv/src/unix/thread.c +82 -25
  131. data/ext/asyncengine/libuv/src/unix/thread.o +0 -0
  132. data/ext/asyncengine/libuv/src/unix/timer.c +69 -58
  133. data/ext/asyncengine/libuv/src/unix/timer.o +0 -0
  134. data/ext/asyncengine/libuv/src/unix/tty.c +3 -3
  135. data/ext/asyncengine/libuv/src/unix/tty.o +0 -0
  136. data/ext/asyncengine/libuv/src/unix/udp.c +57 -66
  137. data/ext/asyncengine/libuv/src/unix/udp.o +0 -0
  138. data/ext/asyncengine/libuv/src/unix/uv-eio.c +33 -50
  139. data/ext/asyncengine/libuv/src/unix/uv-eio.o +0 -0
  140. data/ext/asyncengine/libuv/src/uv-common.c +68 -38
  141. data/ext/asyncengine/libuv/src/uv-common.h +104 -20
  142. data/ext/asyncengine/libuv/src/uv-common.o +0 -0
  143. data/ext/asyncengine/libuv/src/win/async.c +20 -17
  144. data/ext/asyncengine/libuv/src/win/core.c +44 -31
  145. data/ext/asyncengine/libuv/src/win/dl.c +40 -36
  146. data/ext/asyncengine/libuv/src/win/error.c +21 -1
  147. data/ext/asyncengine/libuv/src/win/fs-event.c +19 -21
  148. data/ext/asyncengine/libuv/src/win/fs.c +541 -189
  149. data/ext/asyncengine/libuv/src/win/getaddrinfo.c +56 -63
  150. data/ext/asyncengine/libuv/src/win/handle-inl.h +145 -0
  151. data/ext/asyncengine/libuv/src/win/handle.c +26 -101
  152. data/ext/asyncengine/libuv/src/win/internal.h +92 -107
  153. data/ext/asyncengine/libuv/src/win/loop-watcher.c +6 -14
  154. data/ext/asyncengine/libuv/src/win/pipe.c +78 -64
  155. data/ext/asyncengine/libuv/src/win/poll.c +618 -0
  156. data/ext/asyncengine/libuv/src/win/process-stdio.c +479 -0
  157. data/ext/asyncengine/libuv/src/win/process.c +147 -274
  158. data/ext/asyncengine/libuv/src/win/req-inl.h +225 -0
  159. data/ext/asyncengine/libuv/src/win/req.c +0 -149
  160. data/ext/asyncengine/libuv/src/{unix/check.c → win/stream-inl.h} +31 -42
  161. data/ext/asyncengine/libuv/src/win/stream.c +9 -43
  162. data/ext/asyncengine/libuv/src/win/tcp.c +200 -82
  163. data/ext/asyncengine/libuv/src/win/thread.c +42 -2
  164. data/ext/asyncengine/libuv/src/win/threadpool.c +3 -2
  165. data/ext/asyncengine/libuv/src/win/timer.c +13 -63
  166. data/ext/asyncengine/libuv/src/win/tty.c +26 -20
  167. data/ext/asyncengine/libuv/src/win/udp.c +26 -17
  168. data/ext/asyncengine/libuv/src/win/util.c +312 -167
  169. data/ext/asyncengine/libuv/src/win/winapi.c +16 -1
  170. data/ext/asyncengine/libuv/src/win/winapi.h +33 -9
  171. data/ext/asyncengine/libuv/src/win/winsock.c +88 -1
  172. data/ext/asyncengine/libuv/src/win/winsock.h +36 -3
  173. data/ext/asyncengine/libuv/test/benchmark-ares.c +16 -17
  174. data/ext/asyncengine/libuv/test/benchmark-fs-stat.c +164 -0
  175. data/ext/asyncengine/libuv/test/benchmark-list.h +9 -0
  176. data/ext/asyncengine/libuv/{src/unix/prepare.c → test/benchmark-loop-count.c} +42 -33
  177. data/ext/asyncengine/libuv/test/benchmark-million-timers.c +65 -0
  178. data/ext/asyncengine/libuv/test/benchmark-pound.c +1 -1
  179. data/ext/asyncengine/libuv/test/benchmark-sizes.c +2 -0
  180. data/ext/asyncengine/libuv/test/benchmark-spawn.c +7 -1
  181. data/ext/asyncengine/libuv/test/benchmark-udp-packet-storm.c +1 -1
  182. data/ext/asyncengine/libuv/test/echo-server.c +8 -0
  183. data/ext/asyncengine/libuv/test/run-tests.c +30 -0
  184. data/ext/asyncengine/libuv/test/runner-unix.c +6 -26
  185. data/ext/asyncengine/libuv/test/runner-win.c +5 -63
  186. data/ext/asyncengine/libuv/test/runner.c +10 -1
  187. data/ext/asyncengine/libuv/test/task.h +0 -8
  188. data/ext/asyncengine/libuv/test/test-async.c +43 -141
  189. data/ext/asyncengine/libuv/test/test-callback-order.c +76 -0
  190. data/ext/asyncengine/libuv/test/test-counters-init.c +2 -3
  191. data/ext/asyncengine/libuv/test/test-dlerror.c +17 -8
  192. data/ext/asyncengine/libuv/test/test-fs-event.c +31 -39
  193. data/ext/asyncengine/libuv/test/test-fs-poll.c +146 -0
  194. data/ext/asyncengine/libuv/test/test-fs.c +114 -2
  195. data/ext/asyncengine/libuv/test/test-gethostbyname.c +8 -8
  196. data/ext/asyncengine/libuv/test/test-hrtime.c +18 -15
  197. data/ext/asyncengine/libuv/test/test-ipc.c +8 -2
  198. data/ext/asyncengine/libuv/test/test-list.h +59 -9
  199. data/ext/asyncengine/libuv/test/test-loop-handles.c +2 -25
  200. data/ext/asyncengine/libuv/{src/unix/idle.c → test/test-poll-close.c} +37 -39
  201. data/ext/asyncengine/libuv/test/test-poll.c +573 -0
  202. data/ext/asyncengine/libuv/test/test-ref.c +79 -63
  203. data/ext/asyncengine/libuv/test/test-run-once.c +15 -11
  204. data/ext/asyncengine/libuv/test/test-semaphore.c +111 -0
  205. data/ext/asyncengine/libuv/test/test-spawn.c +368 -20
  206. data/ext/asyncengine/libuv/test/test-stdio-over-pipes.c +25 -35
  207. data/ext/asyncengine/libuv/test/test-tcp-close-while-connecting.c +80 -0
  208. data/ext/asyncengine/libuv/test/test-tcp-close.c +1 -1
  209. data/ext/asyncengine/libuv/test/test-tcp-connect-error-after-write.c +95 -0
  210. data/ext/asyncengine/libuv/test/test-tcp-connect-timeout.c +85 -0
  211. data/ext/asyncengine/libuv/test/test-tcp-shutdown-after-write.c +131 -0
  212. data/ext/asyncengine/libuv/test/test-tcp-write-error.c +2 -2
  213. data/ext/asyncengine/libuv/test/test-tcp-writealot.c +29 -54
  214. data/ext/asyncengine/libuv/test/test-timer-again.c +1 -1
  215. data/ext/asyncengine/libuv/test/test-timer.c +23 -1
  216. data/ext/asyncengine/libuv/test/test-udp-options.c +1 -1
  217. data/ext/asyncengine/libuv/test/{test-eio-overflow.c → test-walk-handles.c} +31 -44
  218. data/ext/asyncengine/libuv/uv.gyp +26 -9
  219. data/ext/asyncengine/rb_utilities.c +54 -0
  220. data/ext/asyncengine/rb_utilities.h +63 -0
  221. data/lib/asyncengine.rb +45 -38
  222. data/lib/asyncengine/asyncengine_ext.so +0 -0
  223. data/lib/asyncengine/debug.rb +37 -0
  224. data/lib/asyncengine/handle.rb +9 -0
  225. data/lib/asyncengine/tcp.rb +28 -0
  226. data/lib/asyncengine/timer.rb +18 -28
  227. data/lib/asyncengine/udp.rb +29 -0
  228. data/lib/asyncengine/utils.rb +32 -0
  229. data/lib/asyncengine/uv_error.rb +17 -0
  230. data/lib/asyncengine/version.rb +9 -1
  231. data/test/ae_test_helper.rb +62 -0
  232. data/test/test_basic.rb +169 -0
  233. data/test/test_call_from_other_thread.rb +55 -0
  234. data/test/test_error.rb +92 -0
  235. data/test/test_ip_utils.rb +44 -0
  236. data/test/test_next_tick.rb +37 -0
  237. data/test/test_resolver.rb +51 -0
  238. data/test/test_threads.rb +69 -0
  239. data/test/test_timer.rb +95 -0
  240. data/test/test_udp.rb +216 -0
  241. data/test/test_utils.rb +49 -0
  242. metadata +84 -57
  243. data/ext/asyncengine/libuv/mkmf.log +0 -24
  244. data/ext/asyncengine/libuv/src/unix/cares.c +0 -194
  245. data/ext/asyncengine/libuv/src/unix/cares.o +0 -0
  246. data/ext/asyncengine/libuv/src/unix/check.o +0 -0
  247. data/ext/asyncengine/libuv/src/unix/idle.o +0 -0
  248. data/ext/asyncengine/libuv/src/unix/prepare.o +0 -0
  249. data/ext/asyncengine/libuv/src/win/cares.c +0 -290
  250. data/lib/asyncengine/errors.rb +0 -5
  251. data/lib/asyncengine/next_tick.rb +0 -24
@@ -0,0 +1,618 @@
1
+ /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ * of this software and associated documentation files (the "Software"), to
5
+ * deal in the Software without restriction, including without limitation the
6
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
+ * sell copies of the Software, and to permit persons to whom the Software is
8
+ * furnished to do so, subject to the following conditions:
9
+ *
10
+ * The above copyright notice and this permission notice shall be included in
11
+ * all copies or substantial portions of the Software.
12
+ *
13
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
+ * IN THE SOFTWARE.
20
+ */
21
+
22
+ #include <assert.h>
23
+ #include <io.h>
24
+
25
+ #include "uv.h"
26
+ #include "internal.h"
27
+ #include "handle-inl.h"
28
+ #include "req-inl.h"
29
+
30
+
31
+ static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
32
+ {0xe70f1aa0, 0xab8b, 0x11cf,
33
+ {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
34
+ {0xf9eab0c0, 0x26d4, 0x11d0,
35
+ {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
36
+ {0x9fc48064, 0x7298, 0x43e4,
37
+ {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
38
+ };
39
+
40
+ typedef struct uv_single_fd_set_s {
41
+ unsigned int fd_count;
42
+ SOCKET fd_array[1];
43
+ } uv_single_fd_set_t;
44
+
45
+
46
+ static OVERLAPPED overlapped_dummy_;
47
+ static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
48
+
49
+
50
+ static void uv__init_overlapped_dummy(void) {
51
+ HANDLE event;
52
+
53
+ event = CreateEvent(NULL, TRUE, TRUE, NULL);
54
+ if (event == NULL)
55
+ uv_fatal_error(GetLastError(), "CreateEvent");
56
+
57
+ memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
58
+ overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
59
+ }
60
+
61
+
62
+ static OVERLAPPED* uv__get_overlapped_dummy() {
63
+ uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
64
+ return &overlapped_dummy_;
65
+ }
66
+
67
+
68
+ static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
69
+ uv_req_t* req;
70
+ AFD_POLL_INFO* afd_poll_info;
71
+ DWORD result;
72
+
73
+ /* Find a yet unsubmitted req to submit. */
74
+ if (handle->submitted_events_1 == 0) {
75
+ req = &handle->poll_req_1;
76
+ afd_poll_info = &handle->afd_poll_info_1;
77
+ handle->submitted_events_1 = handle->events;
78
+ handle->mask_events_1 = 0;
79
+ handle->mask_events_2 = handle->events;
80
+ } else if (handle->submitted_events_2 == 0) {
81
+ req = &handle->poll_req_2;
82
+ afd_poll_info = &handle->afd_poll_info_2;
83
+ handle->submitted_events_2 = handle->events;
84
+ handle->mask_events_1 = handle->events;
85
+ handle->mask_events_2 = 0;
86
+ } else {
87
+ assert(0);
88
+ }
89
+
90
+ /* Setting Exclusive to TRUE makes the other poll request return if there */
91
+ /* is any. */
92
+ afd_poll_info->Exclusive = TRUE;
93
+ afd_poll_info->NumberOfHandles = 1;
94
+ afd_poll_info->Timeout.QuadPart = INT64_MAX;
95
+ afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
96
+ afd_poll_info->Handles[0].Status = 0;
97
+ afd_poll_info->Handles[0].Events = 0;
98
+
99
+ if (handle->events & UV_READABLE) {
100
+ afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
101
+ AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
102
+ }
103
+ if (handle->events & UV_WRITABLE) {
104
+ afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
105
+ }
106
+
107
+ memset(&req->overlapped, 0, sizeof req->overlapped);
108
+
109
+ result = uv_msafd_poll((SOCKET) handle->peer_socket,
110
+ afd_poll_info,
111
+ &req->overlapped);
112
+ if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
113
+ /* Queue this req, reporting an error. */
114
+ SET_REQ_ERROR(req, WSAGetLastError());
115
+ uv_insert_pending_req(loop, req);
116
+ }
117
+ }
118
+
119
+
120
+ static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
121
+ AFD_POLL_INFO afd_poll_info;
122
+ DWORD result;
123
+
124
+ afd_poll_info.Exclusive = TRUE;
125
+ afd_poll_info.NumberOfHandles = 1;
126
+ afd_poll_info.Timeout.QuadPart = INT64_MAX;
127
+ afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
128
+ afd_poll_info.Handles[0].Status = 0;
129
+ afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
130
+
131
+ result = uv_msafd_poll(handle->socket,
132
+ &afd_poll_info,
133
+ uv__get_overlapped_dummy());
134
+
135
+ if (result == SOCKET_ERROR) {
136
+ DWORD error = WSAGetLastError();
137
+ if (error != WSA_IO_PENDING) {
138
+ uv__set_sys_error(loop, WSAGetLastError());
139
+ return -1;
140
+ }
141
+ }
142
+
143
+ return 0;
144
+ }
145
+
146
+
147
+ static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
148
+ uv_req_t* req) {
149
+ unsigned char mask_events;
150
+ AFD_POLL_INFO* afd_poll_info;
151
+
152
+ if (req == &handle->poll_req_1) {
153
+ afd_poll_info = &handle->afd_poll_info_1;
154
+ handle->submitted_events_1 = 0;
155
+ mask_events = handle->mask_events_1;
156
+ } else if (req == &handle->poll_req_2) {
157
+ afd_poll_info = &handle->afd_poll_info_2;
158
+ handle->submitted_events_2 = 0;
159
+ mask_events = handle->mask_events_2;
160
+ } else {
161
+ assert(0);
162
+ }
163
+
164
+ /* Report an error unless the select was just interrupted. */
165
+ if (!REQ_SUCCESS(req)) {
166
+ DWORD error = GET_REQ_SOCK_ERROR(req);
167
+ if (error != WSAEINTR && handle->events != 0) {
168
+ handle->events = 0; /* Stop the watcher */
169
+ uv__set_sys_error(loop, error);
170
+ handle->poll_cb(handle, -1, 0);
171
+ }
172
+
173
+ } else if (afd_poll_info->NumberOfHandles >= 1) {
174
+ unsigned char events = 0;
175
+
176
+ if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
177
+ AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
178
+ events |= UV_READABLE;
179
+ }
180
+ if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
181
+ AFD_POLL_CONNECT_FAIL)) != 0) {
182
+ events |= UV_WRITABLE;
183
+ }
184
+
185
+ events &= handle->events & ~mask_events;
186
+
187
+ if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
188
+ /* Stop polling. */
189
+ handle->events = 0;
190
+ uv__handle_stop(handle);
191
+ }
192
+
193
+ if (events != 0) {
194
+ handle->poll_cb(handle, 0, events);
195
+ }
196
+ }
197
+
198
+ if ((handle->events & ~(handle->submitted_events_1 |
199
+ handle->submitted_events_2)) != 0) {
200
+ uv__fast_poll_submit_poll_req(loop, handle);
201
+ } else if ((handle->flags & UV_HANDLE_CLOSING) &&
202
+ handle->submitted_events_1 == 0 &&
203
+ handle->submitted_events_2 == 0) {
204
+ uv_want_endgame(loop, (uv_handle_t*) handle);
205
+ }
206
+ }
207
+
208
+
209
+ static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
210
+ assert(handle->type == UV_POLL);
211
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
212
+ assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
213
+
214
+ handle->events = events;
215
+
216
+ if (handle->events != 0) {
217
+ uv__handle_start(handle);
218
+ } else {
219
+ uv__handle_stop(handle);
220
+ }
221
+
222
+ if ((handle->events & ~(handle->submitted_events_1 |
223
+ handle->submitted_events_2)) != 0) {
224
+ uv__fast_poll_submit_poll_req(handle->loop, handle);
225
+ }
226
+
227
+ return 0;
228
+ }
229
+
230
+
231
+ static void uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
232
+ handle->events = 0;
233
+ uv__handle_start(handle);
234
+
235
+ if (handle->submitted_events_1 == 0 &&
236
+ handle->submitted_events_2 == 0) {
237
+ uv_want_endgame(loop, (uv_handle_t*) handle);
238
+ } else {
239
+ /* Cancel outstanding poll requests by executing another, unique poll */
240
+ /* request that forces the outstanding ones to return. */
241
+ uv__fast_poll_cancel_poll_req(loop, handle);
242
+ }
243
+ }
244
+
245
+
246
+ static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
247
+ WSAPROTOCOL_INFOW* protocol_info) {
248
+ SOCKET sock = 0;
249
+
250
+ sock = WSASocketW(protocol_info->iAddressFamily,
251
+ protocol_info->iSocketType,
252
+ protocol_info->iProtocol,
253
+ protocol_info,
254
+ 0,
255
+ WSA_FLAG_OVERLAPPED);
256
+ if (sock == INVALID_SOCKET) {
257
+ return INVALID_SOCKET;
258
+ }
259
+
260
+ if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
261
+ goto error;
262
+ };
263
+
264
+ if (CreateIoCompletionPort((HANDLE) sock,
265
+ iocp,
266
+ (ULONG_PTR) sock,
267
+ 0) == NULL) {
268
+ goto error;
269
+ }
270
+
271
+ return sock;
272
+
273
+ error:
274
+ closesocket(sock);
275
+ return INVALID_SOCKET;
276
+ }
277
+
278
+
279
+ static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
280
+ WSAPROTOCOL_INFOW* protocol_info) {
281
+ int index, i;
282
+ SOCKET peer_socket;
283
+
284
+ index = -1;
285
+ for (i = 0; i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
286
+ if (memcmp((void*) &protocol_info->ProviderId,
287
+ (void*) &uv_msafd_provider_ids[i],
288
+ sizeof protocol_info->ProviderId) == 0) {
289
+ index = i;
290
+ }
291
+ }
292
+
293
+ /* Check if the protocol uses an msafd socket. */
294
+ if (index < 0) {
295
+ return INVALID_SOCKET;
296
+ }
297
+
298
+ /* If we didn't (try) to create a peer socket yet, try to make one. Don't */
299
+ /* try again if the peer socket creation failed earlier for the same */
300
+ /* protocol. */
301
+ peer_socket = loop->poll_peer_sockets[index];
302
+ if (peer_socket == 0) {
303
+ peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
304
+ loop->poll_peer_sockets[index] = peer_socket;
305
+ }
306
+
307
+ return peer_socket;
308
+ }
309
+
310
+
311
+ static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
312
+ uv_req_t* req = (uv_req_t*) arg;
313
+ uv_poll_t* handle = (uv_poll_t*) req->data;
314
+ unsigned char events, reported_events;
315
+ int r;
316
+ uv_single_fd_set_t rfds, wfds, efds;
317
+ struct timeval timeout;
318
+
319
+ assert(handle->type == UV_POLL);
320
+ assert(req->type == UV_POLL_REQ);
321
+
322
+ if (req == &handle->poll_req_1) {
323
+ events = handle->submitted_events_1;
324
+ } else if (req == &handle->poll_req_2) {
325
+ events = handle->submitted_events_2;
326
+ } else {
327
+ assert(0);
328
+ }
329
+
330
+ if (handle->events & UV_READABLE) {
331
+ rfds.fd_count = 1;
332
+ rfds.fd_array[0] = handle->socket;
333
+ } else {
334
+ rfds.fd_count = 0;
335
+ }
336
+
337
+ if (handle->events & UV_WRITABLE) {
338
+ wfds.fd_count = 1;
339
+ wfds.fd_array[0] = handle->socket;
340
+ efds.fd_count = 1;
341
+ efds.fd_array[0] = handle->socket;
342
+ } else {
343
+ wfds.fd_count = 0;
344
+ efds.fd_count = 0;
345
+ }
346
+
347
+ /* Make the select() time out after 3 minutes. If select() hangs because */
348
+ /* the user closed the socket, we will at least not hang indefinitely. */
349
+ timeout.tv_sec = 3 * 60;
350
+ timeout.tv_usec = 0;
351
+
352
+ r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
353
+ if (r == SOCKET_ERROR) {
354
+ /* Queue this req, reporting an error. */
355
+ SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
356
+ POST_COMPLETION_FOR_REQ(handle->loop, req);
357
+ return 0;
358
+ }
359
+
360
+ reported_events = 0;
361
+
362
+ if (r > 0) {
363
+ if (rfds.fd_count > 0) {
364
+ assert(rfds.fd_count == 1);
365
+ assert(rfds.fd_array[0] == handle->socket);
366
+ reported_events |= UV_READABLE;
367
+ }
368
+
369
+ if (wfds.fd_count > 0) {
370
+ assert(wfds.fd_count == 1);
371
+ assert(wfds.fd_array[0] == handle->socket);
372
+ reported_events |= UV_WRITABLE;
373
+ } else if (efds.fd_count > 0) {
374
+ assert(efds.fd_count == 1);
375
+ assert(efds.fd_array[0] == handle->socket);
376
+ reported_events |= UV_WRITABLE;
377
+ }
378
+ }
379
+
380
+ SET_REQ_SUCCESS(req);
381
+ req->overlapped.InternalHigh = (DWORD) reported_events;
382
+ POST_COMPLETION_FOR_REQ(handle->loop, req);
383
+
384
+ return 0;
385
+ }
386
+
387
+
388
+ static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
389
+ uv_req_t* req;
390
+
391
+ /* Find a yet unsubmitted req to submit. */
392
+ if (handle->submitted_events_1 == 0) {
393
+ req = &handle->poll_req_1;
394
+ handle->submitted_events_1 = handle->events;
395
+ handle->mask_events_1 = 0;
396
+ handle->mask_events_2 = handle->events;
397
+ } else if (handle->submitted_events_2 == 0) {
398
+ req = &handle->poll_req_2;
399
+ handle->submitted_events_2 = handle->events;
400
+ handle->mask_events_1 = handle->events;
401
+ handle->mask_events_2 = 0;
402
+ } else {
403
+ assert(0);
404
+ }
405
+
406
+ if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
407
+ (void*) req,
408
+ WT_EXECUTELONGFUNCTION)) {
409
+ /* Make this req pending, reporting an error. */
410
+ SET_REQ_ERROR(req, GetLastError());
411
+ uv_insert_pending_req(loop, req);
412
+ }
413
+ }
414
+
415
+
416
+
417
+ static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
418
+ uv_req_t* req) {
419
+ unsigned char mask_events;
420
+ if (req == &handle->poll_req_1) {
421
+ handle->submitted_events_1 = 0;
422
+ mask_events = handle->mask_events_1;
423
+ } else if (req == &handle->poll_req_2) {
424
+ handle->submitted_events_2 = 0;
425
+ mask_events = handle->mask_events_2;
426
+ } else {
427
+ assert(0);
428
+ }
429
+
430
+ if (!REQ_SUCCESS(req)) {
431
+ /* Error. */
432
+ if (handle->events != 0) {
433
+ handle->events = 0; /* Stop the watcher */
434
+ uv__set_sys_error(loop, GET_REQ_ERROR(req));
435
+ handle->poll_cb(handle, -1, 0);
436
+ }
437
+ } else {
438
+ /* Got some events. */
439
+ int events = req->overlapped.InternalHigh & handle->events & ~mask_events;
440
+ if (events != 0) {
441
+ handle->poll_cb(handle, 0, events);
442
+ }
443
+ }
444
+
445
+ if ((handle->events & ~(handle->submitted_events_1 |
446
+ handle->submitted_events_2)) != 0) {
447
+ uv__slow_poll_submit_poll_req(loop, handle);
448
+ } else if ((handle->flags & UV_HANDLE_CLOSING) &&
449
+ handle->submitted_events_1 == 0 &&
450
+ handle->submitted_events_2 == 0) {
451
+ uv_want_endgame(loop, (uv_handle_t*) handle);
452
+ }
453
+ }
454
+
455
+
456
+ static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
457
+ assert(handle->type == UV_POLL);
458
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
459
+ assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
460
+
461
+ handle->events = events;
462
+
463
+ if (handle->events != 0) {
464
+ uv__handle_start(handle);
465
+ } else {
466
+ uv__handle_stop(handle);
467
+ }
468
+
469
+ if ((handle->events &
470
+ ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) {
471
+ uv__slow_poll_submit_poll_req(handle->loop, handle);
472
+ }
473
+
474
+ return 0;
475
+ }
476
+
477
+
478
+ static void uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
479
+ handle->events = 0;
480
+ uv__handle_start(handle);
481
+
482
+ if (handle->submitted_events_1 == 0 &&
483
+ handle->submitted_events_2 == 0) {
484
+ uv_want_endgame(loop, (uv_handle_t*) handle);
485
+ }
486
+ }
487
+
488
+
489
+ int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
490
+ return uv_poll_init_socket(loop, handle, (SOCKET) _get_osfhandle(fd));
491
+ }
492
+
493
+
494
+ int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
495
+ uv_os_sock_t socket) {
496
+ WSAPROTOCOL_INFOW protocol_info;
497
+ int len;
498
+ SOCKET peer_socket, base_socket;
499
+ DWORD bytes;
500
+
501
+ /* Try to obtain a base handle for the socket. This increases this chances */
502
+ /* that we find an AFD handle and are able to use the fast poll mechanism. */
503
+ /* This will always fail on windows XP/2k3, since they don't support the */
504
+ /* SIO_BASE_HANDLE ioctl. */
505
+ #ifndef NDEBUG
506
+ base_socket = INVALID_SOCKET;
507
+ #endif
508
+
509
+ if (WSAIoctl(socket,
510
+ SIO_BASE_HANDLE,
511
+ NULL,
512
+ 0,
513
+ &base_socket,
514
+ sizeof base_socket,
515
+ &bytes,
516
+ NULL,
517
+ NULL) == 0) {
518
+ assert(base_socket != 0 && base_socket != INVALID_SOCKET);
519
+ socket = base_socket;
520
+ }
521
+
522
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
523
+ handle->socket = socket;
524
+ handle->events = 0;
525
+
526
+ /* Obtain protocol information about the socket. */
527
+ len = sizeof protocol_info;
528
+ if (getsockopt(socket,
529
+ SOL_SOCKET,
530
+ SO_PROTOCOL_INFOW,
531
+ (char*) &protocol_info,
532
+ &len) != 0) {
533
+ uv__set_sys_error(loop, WSAGetLastError());
534
+ return -1;
535
+ }
536
+
537
+ /* Get the peer socket that is needed to enable fast poll. If the returned */
538
+ /* value is NULL, the protocol is not implemented by MSAFD and we'll have */
539
+ /* to use slow mode. */
540
+ peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
541
+
542
+ if (peer_socket != INVALID_SOCKET) {
543
+ /* Initialize fast poll specific fields. */
544
+ handle->peer_socket = peer_socket;
545
+ } else {
546
+ /* Initialize slow poll specific fields. */
547
+ handle->flags |= UV_HANDLE_POLL_SLOW;
548
+ }
549
+
550
+ /* Intialize 2 poll reqs. */
551
+ handle->submitted_events_1 = 0;
552
+ uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1));
553
+ handle->poll_req_1.type = UV_POLL_REQ;
554
+ handle->poll_req_1.data = handle;
555
+
556
+ handle->submitted_events_2 = 0;
557
+ uv_req_init(loop, (uv_req_t*) &(handle->poll_req_2));
558
+ handle->poll_req_2.type = UV_POLL_REQ;
559
+ handle->poll_req_2.data = handle;
560
+
561
+ loop->counters.poll_init++;
562
+
563
+ return 0;
564
+ }
565
+
566
+
567
+ int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
568
+ if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
569
+ if (uv__fast_poll_set(handle->loop, handle, events) < 0)
570
+ return -1;
571
+ } else {
572
+ if (uv__slow_poll_set(handle->loop, handle, events) < 0)
573
+ return -1;
574
+ }
575
+
576
+ handle->poll_cb = cb;
577
+
578
+ return 0;
579
+ }
580
+
581
+
582
+ int uv_poll_stop(uv_poll_t* handle) {
583
+ if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
584
+ return uv__fast_poll_set(handle->loop, handle, 0);
585
+ } else {
586
+ return uv__slow_poll_set(handle->loop, handle, 0);
587
+ }
588
+ }
589
+
590
+
591
+ void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
592
+ if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
593
+ uv__fast_poll_process_poll_req(loop, handle, req);
594
+ } else {
595
+ uv__slow_poll_process_poll_req(loop, handle, req);
596
+ }
597
+ }
598
+
599
+
600
+ void uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
601
+ if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
602
+ uv__fast_poll_close(loop, handle);
603
+ } else {
604
+ uv__slow_poll_close(loop, handle);
605
+ }
606
+ }
607
+
608
+
609
+ void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
610
+ assert(handle->flags & UV_HANDLE_CLOSING);
611
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
612
+
613
+ assert(handle->submitted_events_1 == 0);
614
+ assert(handle->submitted_events_2 == 0);
615
+
616
+ uv__handle_stop(handle);
617
+ uv__handle_close(handle);
618
+ }