noderb 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +19 -0
- data/README.md +25 -0
- data/ext/noderb_extension/extconf.rb +11 -0
- data/ext/noderb_extension/libuv/AUTHORS +11 -0
- data/ext/noderb_extension/libuv/LICENSE +48 -0
- data/ext/noderb_extension/libuv/Makefile +119 -0
- data/ext/noderb_extension/libuv/README +45 -0
- data/ext/noderb_extension/libuv/build/all.gyp +254 -0
- data/ext/noderb_extension/libuv/build/common.gypi +13 -0
- data/ext/noderb_extension/libuv/build/gyp_uv +43 -0
- data/ext/noderb_extension/libuv/config-mingw.mk +67 -0
- data/ext/noderb_extension/libuv/config-unix.mk +121 -0
- data/ext/noderb_extension/libuv/create-msvs-files.bat +14 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ANNOUNCE +482 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/BUGS +141 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/Bmakefile +268 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/CONTRIBUTORS +140 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/COPYING +150 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/COPYING.LIB +504 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ChangeLog +5194 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/FAQ +451 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/GNUmakefile +593 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/MAINTAINERS +4 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/Makefile +516 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/NEWS +1245 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/Nmakefile +24 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/Nmakefile.tests +260 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/PROGRESS +4 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README +601 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README.Borland +57 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README.CV +3036 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README.NONPORTABLE +783 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README.Watcom +62 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/README.WinCE +6 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/TODO +7 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/WinCE-PORT +222 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/attr.c +53 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/autostatic.c +69 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/barrier.c +47 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/build/all.gyp +207 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/builddmc.bat +9 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/cancel.c +44 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/cleanup.c +148 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/condvar.c +50 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/config.h +153 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/context.h +74 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/create.c +308 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/dll.c +92 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/errno.c +94 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/exit.c +44 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/fork.c +39 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/global.c +107 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/implement.h +944 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/misc.c +50 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/mutex.c +62 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/need_errno.h +145 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/nonportable.c +47 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/private.c +54 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.c +66 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.dsp +142 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.dsw +29 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.h +1368 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_destroy.c +79 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getdetachstate.c +86 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getinheritsched.c +51 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getschedparam.c +52 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getschedpolicy.c +61 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getscope.c +54 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getstackaddr.c +97 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getstacksize.c +100 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_init.c +117 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setdetachstate.c +91 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setinheritsched.c +57 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setschedparam.c +63 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setschedpolicy.c +55 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setscope.c +62 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setstackaddr.c +97 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setstacksize.c +110 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_destroy.c +103 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_init.c +69 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_wait.c +104 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_destroy.c +83 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_getpshared.c +95 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_init.c +85 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_setpshared.c +119 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cancel.c +189 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_destroy.c +253 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_init.c +167 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_signal.c +231 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_wait.c +567 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_destroy.c +86 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_getpshared.c +97 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_init.c +87 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_setpshared.c +117 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_delay_np.c +172 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_detach.c +136 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_equal.c +76 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_exit.c +106 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getconcurrency.c +45 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getschedparam.c +75 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getspecific.c +87 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getunique_np.c +47 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getw32threadhandle_np.c +65 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_join.c +157 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_key_create.c +108 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_key_delete.c +125 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_kill.c +105 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_consistent.c +187 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_destroy.c +148 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_init.c +130 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_lock.c +269 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_timedlock.c +324 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_trylock.c +154 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_unlock.c +175 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_destroy.c +83 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getkind_np.c +44 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getpshared.c +95 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getrobust.c +113 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_gettype.c +56 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_init.c +86 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setkind_np.c +44 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setpshared.c +119 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setrobust.c +119 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_settype.c +143 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_num_processors_np.c +56 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_once.c +79 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_destroy.c +143 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_init.c +109 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_rdlock.c +102 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_timedrdlock.c +109 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_timedwrlock.c +139 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_tryrdlock.c +102 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_trywrlock.c +122 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_unlock.c +93 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_wrlock.c +133 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_destroy.c +84 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_getpshared.c +97 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_init.c +83 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_setpshared.c +120 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_self.c +141 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setcancelstate.c +125 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setcanceltype.c +126 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setconcurrency.c +53 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setschedparam.c +123 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setspecific.c +167 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_destroy.c +111 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_init.c +123 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_lock.c +80 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_trylock.c +77 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_unlock.c +71 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_testcancel.c +103 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_timechange_handler_np.c +108 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_win32_attach_detach_np.c +258 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_MCS_lock.c +278 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_callUserDestroyRoutines.c +232 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_calloc.c +56 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_cond_check_need_init.c +78 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_getprocessors.c +91 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_is_attr.c +47 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_mutex_check_need_init.c +92 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_new.c +94 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_processInitialize.c +92 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_processTerminate.c +105 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_relmillisecs.c +132 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_reuse.c +151 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_rwlock_cancelwrwait.c +50 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_rwlock_check_need_init.c +77 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_semwait.c +135 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_spinlock_check_need_init.c +78 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_threadDestroy.c +79 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_threadStart.c +357 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_throw.c +189 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_timespec.c +83 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_tkAssocCreate.c +118 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_tkAssocDestroy.c +114 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/rwlock.c +51 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched.c +53 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched.h +183 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched_get_priority_max.c +134 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched_get_priority_min.c +135 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched_getscheduler.c +71 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched_setscheduler.c +83 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sched_yield.c +71 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_close.c +58 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_destroy.c +144 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_getvalue.c +110 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_init.c +169 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_open.c +58 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_post.c +128 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_post_multiple.c +142 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_timedwait.c +238 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_trywait.c +117 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_unlink.c +58 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sem_wait.c +187 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/semaphore.c +69 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/semaphore.h +169 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/signal.c +179 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/spin.c +46 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/sync.c +43 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/tsd.c +44 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/version.rc +388 -0
- data/ext/noderb_extension/libuv/deps/pthread-win32/w32_CancelableWait.c +161 -0
- data/ext/noderb_extension/libuv/doc/desired-api.md +159 -0
- data/ext/noderb_extension/libuv/doc/iocp-links.html +574 -0
- data/ext/noderb_extension/libuv/include/ares.h +582 -0
- data/ext/noderb_extension/libuv/include/ares_version.h +24 -0
- data/ext/noderb_extension/libuv/include/eio.h +376 -0
- data/ext/noderb_extension/libuv/include/ev.h +835 -0
- data/ext/noderb_extension/libuv/include/ngx-queue.h +102 -0
- data/ext/noderb_extension/libuv/include/tree.h +762 -0
- data/ext/noderb_extension/libuv/include/uv-unix.h +138 -0
- data/ext/noderb_extension/libuv/include/uv-win.h +187 -0
- data/ext/noderb_extension/libuv/include/uv.h +635 -0
- data/ext/noderb_extension/libuv/src/ares/AUTHORS +37 -0
- data/ext/noderb_extension/libuv/src/ares/CHANGES +1198 -0
- data/ext/noderb_extension/libuv/src/ares/CMakeLists.txt +22 -0
- data/ext/noderb_extension/libuv/src/ares/NEWS +21 -0
- data/ext/noderb_extension/libuv/src/ares/README +60 -0
- data/ext/noderb_extension/libuv/src/ares/README.cares +13 -0
- data/ext/noderb_extension/libuv/src/ares/README.msvc +118 -0
- data/ext/noderb_extension/libuv/src/ares/README.node +21 -0
- data/ext/noderb_extension/libuv/src/ares/RELEASE-NOTES +25 -0
- data/ext/noderb_extension/libuv/src/ares/TODO +23 -0
- data/ext/noderb_extension/libuv/src/ares/ares__close_sockets.c +66 -0
- data/ext/noderb_extension/libuv/src/ares/ares__get_hostent.c +263 -0
- data/ext/noderb_extension/libuv/src/ares/ares__read_line.c +71 -0
- data/ext/noderb_extension/libuv/src/ares/ares__timeval.c +111 -0
- data/ext/noderb_extension/libuv/src/ares/ares_cancel.c +63 -0
- data/ext/noderb_extension/libuv/src/ares/ares_data.c +190 -0
- data/ext/noderb_extension/libuv/src/ares/ares_data.h +65 -0
- data/ext/noderb_extension/libuv/src/ares/ares_destroy.c +105 -0
- data/ext/noderb_extension/libuv/src/ares/ares_dns.h +90 -0
- data/ext/noderb_extension/libuv/src/ares/ares_expand_name.c +193 -0
- data/ext/noderb_extension/libuv/src/ares/ares_expand_string.c +75 -0
- data/ext/noderb_extension/libuv/src/ares/ares_fds.c +62 -0
- data/ext/noderb_extension/libuv/src/ares/ares_free_hostent.c +39 -0
- data/ext/noderb_extension/libuv/src/ares/ares_free_string.c +25 -0
- data/ext/noderb_extension/libuv/src/ares/ares_gethostbyaddr.c +292 -0
- data/ext/noderb_extension/libuv/src/ares/ares_gethostbyname.c +515 -0
- data/ext/noderb_extension/libuv/src/ares/ares_getnameinfo.c +426 -0
- data/ext/noderb_extension/libuv/src/ares/ares_getopt.c +122 -0
- data/ext/noderb_extension/libuv/src/ares/ares_getopt.h +53 -0
- data/ext/noderb_extension/libuv/src/ares/ares_getsock.c +72 -0
- data/ext/noderb_extension/libuv/src/ares/ares_init.c +1665 -0
- data/ext/noderb_extension/libuv/src/ares/ares_ipv6.h +78 -0
- data/ext/noderb_extension/libuv/src/ares/ares_library_init.c +132 -0
- data/ext/noderb_extension/libuv/src/ares/ares_library_init.h +39 -0
- data/ext/noderb_extension/libuv/src/ares/ares_llist.c +86 -0
- data/ext/noderb_extension/libuv/src/ares/ares_llist.h +42 -0
- data/ext/noderb_extension/libuv/src/ares/ares_mkquery.c +195 -0
- data/ext/noderb_extension/libuv/src/ares/ares_nowarn.c +59 -0
- data/ext/noderb_extension/libuv/src/ares/ares_nowarn.h +24 -0
- data/ext/noderb_extension/libuv/src/ares/ares_options.c +253 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_a_reply.c +260 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_aaaa_reply.c +256 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_mx_reply.c +170 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_ns_reply.c +182 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_ptr_reply.c +208 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_srv_reply.c +179 -0
- data/ext/noderb_extension/libuv/src/ares/ares_parse_txt_reply.c +201 -0
- data/ext/noderb_extension/libuv/src/ares/ares_private.h +351 -0
- data/ext/noderb_extension/libuv/src/ares/ares_process.c +1296 -0
- data/ext/noderb_extension/libuv/src/ares/ares_query.c +183 -0
- data/ext/noderb_extension/libuv/src/ares/ares_rules.h +144 -0
- data/ext/noderb_extension/libuv/src/ares/ares_search.c +322 -0
- data/ext/noderb_extension/libuv/src/ares/ares_send.c +134 -0
- data/ext/noderb_extension/libuv/src/ares/ares_setup.h +191 -0
- data/ext/noderb_extension/libuv/src/ares/ares_strcasecmp.c +66 -0
- data/ext/noderb_extension/libuv/src/ares/ares_strcasecmp.h +30 -0
- data/ext/noderb_extension/libuv/src/ares/ares_strdup.c +42 -0
- data/ext/noderb_extension/libuv/src/ares/ares_strdup.h +26 -0
- data/ext/noderb_extension/libuv/src/ares/ares_strerror.c +56 -0
- data/ext/noderb_extension/libuv/src/ares/ares_timeout.c +80 -0
- data/ext/noderb_extension/libuv/src/ares/ares_version.c +11 -0
- data/ext/noderb_extension/libuv/src/ares/ares_writev.c +79 -0
- data/ext/noderb_extension/libuv/src/ares/ares_writev.h +36 -0
- data/ext/noderb_extension/libuv/src/ares/bitncmp.c +59 -0
- data/ext/noderb_extension/libuv/src/ares/bitncmp.h +26 -0
- data/ext/noderb_extension/libuv/src/ares/config_cygwin/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_darwin/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_freebsd/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_linux/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_openbsd/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_sunos/ares_config.h +510 -0
- data/ext/noderb_extension/libuv/src/ares/config_win32/ares_config.h +369 -0
- data/ext/noderb_extension/libuv/src/ares/get_ver.awk +35 -0
- data/ext/noderb_extension/libuv/src/ares/inet_net_pton.c +450 -0
- data/ext/noderb_extension/libuv/src/ares/inet_net_pton.h +31 -0
- data/ext/noderb_extension/libuv/src/ares/inet_ntop.c +232 -0
- data/ext/noderb_extension/libuv/src/ares/inet_ntop.h +27 -0
- data/ext/noderb_extension/libuv/src/ares/nameser.h +193 -0
- data/ext/noderb_extension/libuv/src/ares/setup_once.h +488 -0
- data/ext/noderb_extension/libuv/src/ares/windows_port.c +22 -0
- data/ext/noderb_extension/libuv/src/eio/Changes +63 -0
- data/ext/noderb_extension/libuv/src/eio/LICENSE +36 -0
- data/ext/noderb_extension/libuv/src/eio/Makefile.am +15 -0
- data/ext/noderb_extension/libuv/src/eio/aclocal.m4 +8957 -0
- data/ext/noderb_extension/libuv/src/eio/autogen.sh +3 -0
- data/ext/noderb_extension/libuv/src/eio/config.h.in +86 -0
- data/ext/noderb_extension/libuv/src/eio/config_cygwin.h +77 -0
- data/ext/noderb_extension/libuv/src/eio/config_darwin.h +137 -0
- data/ext/noderb_extension/libuv/src/eio/config_freebsd.h +78 -0
- data/ext/noderb_extension/libuv/src/eio/config_linux.h +101 -0
- data/ext/noderb_extension/libuv/src/eio/config_sunos.h +81 -0
- data/ext/noderb_extension/libuv/src/eio/configure.ac +22 -0
- data/ext/noderb_extension/libuv/src/eio/demo.c +194 -0
- data/ext/noderb_extension/libuv/src/eio/ecb.h +370 -0
- data/ext/noderb_extension/libuv/src/eio/eio.3 +3428 -0
- data/ext/noderb_extension/libuv/src/eio/eio.c +2562 -0
- data/ext/noderb_extension/libuv/src/eio/eio.pod +969 -0
- data/ext/noderb_extension/libuv/src/eio/libeio.m4 +195 -0
- data/ext/noderb_extension/libuv/src/eio/xthread.h +164 -0
- data/ext/noderb_extension/libuv/src/ev/Changes +388 -0
- data/ext/noderb_extension/libuv/src/ev/LICENSE +36 -0
- data/ext/noderb_extension/libuv/src/ev/Makefile.am +18 -0
- data/ext/noderb_extension/libuv/src/ev/Makefile.in +771 -0
- data/ext/noderb_extension/libuv/src/ev/README +58 -0
- data/ext/noderb_extension/libuv/src/ev/aclocal.m4 +8957 -0
- data/ext/noderb_extension/libuv/src/ev/autogen.sh +6 -0
- data/ext/noderb_extension/libuv/src/ev/config.guess +1526 -0
- data/ext/noderb_extension/libuv/src/ev/config.h.in +125 -0
- data/ext/noderb_extension/libuv/src/ev/config.sub +1658 -0
- data/ext/noderb_extension/libuv/src/ev/config_cygwin.h +123 -0
- data/ext/noderb_extension/libuv/src/ev/config_darwin.h +122 -0
- data/ext/noderb_extension/libuv/src/ev/config_freebsd.h +120 -0
- data/ext/noderb_extension/libuv/src/ev/config_linux.h +130 -0
- data/ext/noderb_extension/libuv/src/ev/config_sunos.h +122 -0
- data/ext/noderb_extension/libuv/src/ev/configure +13037 -0
- data/ext/noderb_extension/libuv/src/ev/configure.ac +18 -0
- data/ext/noderb_extension/libuv/src/ev/depcomp +630 -0
- data/ext/noderb_extension/libuv/src/ev/ev++.h +816 -0
- data/ext/noderb_extension/libuv/src/ev/ev.3 +5311 -0
- data/ext/noderb_extension/libuv/src/ev/ev.c +3913 -0
- data/ext/noderb_extension/libuv/src/ev/ev.pod +5243 -0
- data/ext/noderb_extension/libuv/src/ev/ev_epoll.c +266 -0
- data/ext/noderb_extension/libuv/src/ev/ev_kqueue.c +198 -0
- data/ext/noderb_extension/libuv/src/ev/ev_poll.c +148 -0
- data/ext/noderb_extension/libuv/src/ev/ev_port.c +179 -0
- data/ext/noderb_extension/libuv/src/ev/ev_select.c +310 -0
- data/ext/noderb_extension/libuv/src/ev/ev_vars.h +203 -0
- data/ext/noderb_extension/libuv/src/ev/ev_win32.c +153 -0
- data/ext/noderb_extension/libuv/src/ev/ev_wrap.h +196 -0
- data/ext/noderb_extension/libuv/src/ev/event.c +402 -0
- data/ext/noderb_extension/libuv/src/ev/event.h +170 -0
- data/ext/noderb_extension/libuv/src/ev/install-sh +294 -0
- data/ext/noderb_extension/libuv/src/ev/libev.m4 +39 -0
- data/ext/noderb_extension/libuv/src/ev/ltmain.sh +8413 -0
- data/ext/noderb_extension/libuv/src/ev/missing +336 -0
- data/ext/noderb_extension/libuv/src/ev/mkinstalldirs +111 -0
- data/ext/noderb_extension/libuv/src/uv-common.c +172 -0
- data/ext/noderb_extension/libuv/src/uv-common.h +53 -0
- data/ext/noderb_extension/libuv/src/uv-cygwin.c +52 -0
- data/ext/noderb_extension/libuv/src/uv-darwin.c +64 -0
- data/ext/noderb_extension/libuv/src/uv-eio.c +113 -0
- data/ext/noderb_extension/libuv/src/uv-eio.h +13 -0
- data/ext/noderb_extension/libuv/src/uv-freebsd.c +65 -0
- data/ext/noderb_extension/libuv/src/uv-linux.c +51 -0
- data/ext/noderb_extension/libuv/src/uv-sunos.c +60 -0
- data/ext/noderb_extension/libuv/src/uv-unix.c +2408 -0
- data/ext/noderb_extension/libuv/src/win/async.c +129 -0
- data/ext/noderb_extension/libuv/src/win/cares.c +304 -0
- data/ext/noderb_extension/libuv/src/win/core.c +155 -0
- data/ext/noderb_extension/libuv/src/win/error.c +140 -0
- data/ext/noderb_extension/libuv/src/win/getaddrinfo.c +341 -0
- data/ext/noderb_extension/libuv/src/win/handle.c +176 -0
- data/ext/noderb_extension/libuv/src/win/internal.h +237 -0
- data/ext/noderb_extension/libuv/src/win/loop-watcher.c +128 -0
- data/ext/noderb_extension/libuv/src/win/pipe.c +828 -0
- data/ext/noderb_extension/libuv/src/win/process.c +936 -0
- data/ext/noderb_extension/libuv/src/win/req.c +141 -0
- data/ext/noderb_extension/libuv/src/win/stdio.c +75 -0
- data/ext/noderb_extension/libuv/src/win/stream.c +149 -0
- data/ext/noderb_extension/libuv/src/win/tcp.c +895 -0
- data/ext/noderb_extension/libuv/src/win/timer.c +269 -0
- data/ext/noderb_extension/libuv/src/win/util.c +82 -0
- data/ext/noderb_extension/libuv/test/benchmark-ares.c +117 -0
- data/ext/noderb_extension/libuv/test/benchmark-getaddrinfo.c +90 -0
- data/ext/noderb_extension/libuv/test/benchmark-list.h +77 -0
- data/ext/noderb_extension/libuv/test/benchmark-ping-pongs.c +210 -0
- data/ext/noderb_extension/libuv/test/benchmark-pound.c +237 -0
- data/ext/noderb_extension/libuv/test/benchmark-pump.c +459 -0
- data/ext/noderb_extension/libuv/test/benchmark-sizes.c +39 -0
- data/ext/noderb_extension/libuv/test/benchmark-spawn.c +154 -0
- data/ext/noderb_extension/libuv/test/dns-server.c +323 -0
- data/ext/noderb_extension/libuv/test/echo-server.c +299 -0
- data/ext/noderb_extension/libuv/test/run-benchmarks.c +64 -0
- data/ext/noderb_extension/libuv/test/run-tests.c +82 -0
- data/ext/noderb_extension/libuv/test/runner-unix.c +335 -0
- data/ext/noderb_extension/libuv/test/runner-unix.h +36 -0
- data/ext/noderb_extension/libuv/test/runner-win.c +343 -0
- data/ext/noderb_extension/libuv/test/runner-win.h +42 -0
- data/ext/noderb_extension/libuv/test/runner.c +311 -0
- data/ext/noderb_extension/libuv/test/runner.h +155 -0
- data/ext/noderb_extension/libuv/test/task.h +111 -0
- data/ext/noderb_extension/libuv/test/test-async.c +218 -0
- data/ext/noderb_extension/libuv/test/test-callback-stack.c +205 -0
- data/ext/noderb_extension/libuv/test/test-connection-fail.c +149 -0
- data/ext/noderb_extension/libuv/test/test-delayed-accept.c +198 -0
- data/ext/noderb_extension/libuv/test/test-fail-always.c +29 -0
- data/ext/noderb_extension/libuv/test/test-get-currentexe.c +53 -0
- data/ext/noderb_extension/libuv/test/test-getaddrinfo.c +110 -0
- data/ext/noderb_extension/libuv/test/test-gethostbyname.c +192 -0
- data/ext/noderb_extension/libuv/test/test-getsockname.c +196 -0
- data/ext/noderb_extension/libuv/test/test-hrtime.c +51 -0
- data/ext/noderb_extension/libuv/test/test-idle.c +83 -0
- data/ext/noderb_extension/libuv/test/test-list.h +165 -0
- data/ext/noderb_extension/libuv/test/test-loop-handles.c +361 -0
- data/ext/noderb_extension/libuv/test/test-pass-always.c +28 -0
- data/ext/noderb_extension/libuv/test/test-ping-pong.c +256 -0
- data/ext/noderb_extension/libuv/test/test-pipe-bind-error.c +148 -0
- data/ext/noderb_extension/libuv/test/test-ref.c +91 -0
- data/ext/noderb_extension/libuv/test/test-shutdown-eof.c +183 -0
- data/ext/noderb_extension/libuv/test/test-spawn.c +345 -0
- data/ext/noderb_extension/libuv/test/test-tcp-bind-error.c +204 -0
- data/ext/noderb_extension/libuv/test/test-tcp-bind6-error.c +164 -0
- data/ext/noderb_extension/libuv/test/test-tcp-writealot.c +198 -0
- data/ext/noderb_extension/libuv/test/test-timer-again.c +141 -0
- data/ext/noderb_extension/libuv/test/test-timer.c +134 -0
- data/ext/noderb_extension/noderb.c +340 -0
- data/ext/noderb_extension/noderb.h +2 -0
- data/lib/noderb/connection.rb +21 -0
- data/lib/noderb/process.rb +17 -0
- data/lib/noderb.rb +25 -0
- metadata +470 -0
|
@@ -0,0 +1,2408 @@
|
|
|
1
|
+
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
2
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
3
|
+
* of this software and associated documentation files (the "Software"), to
|
|
4
|
+
* deal in the Software without restriction, including without limitation the
|
|
5
|
+
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
6
|
+
* sell copies of the Software, and to permit persons to whom the Software is
|
|
7
|
+
* furnished to do so, subject to the following conditions:
|
|
8
|
+
*
|
|
9
|
+
* The above copyright notice and this permission notice shall be included in
|
|
10
|
+
* all copies or substantial portions of the Software.
|
|
11
|
+
*
|
|
12
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
13
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
14
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
15
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
16
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
17
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
18
|
+
* IN THE SOFTWARE.
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
#ifndef _GNU_SOURCE
|
|
22
|
+
#define _GNU_SOURCE /* O_CLOEXEC, accept4(), etc. */
|
|
23
|
+
#endif
|
|
24
|
+
|
|
25
|
+
#include "uv.h"
|
|
26
|
+
#include "uv-common.h"
|
|
27
|
+
#include "uv-eio.h"
|
|
28
|
+
|
|
29
|
+
#include <stddef.h> /* NULL */
|
|
30
|
+
#include <stdio.h> /* printf */
|
|
31
|
+
#include <stdlib.h>
|
|
32
|
+
#include <string.h> /* strerror */
|
|
33
|
+
#include <errno.h>
|
|
34
|
+
#include <assert.h>
|
|
35
|
+
#include <unistd.h>
|
|
36
|
+
#include <sys/types.h>
|
|
37
|
+
#include <sys/stat.h>
|
|
38
|
+
#include <fcntl.h>
|
|
39
|
+
#include <sys/socket.h>
|
|
40
|
+
#include <sys/un.h>
|
|
41
|
+
#include <netinet/in.h>
|
|
42
|
+
#include <arpa/inet.h>
|
|
43
|
+
#include <limits.h> /* PATH_MAX */
|
|
44
|
+
#include <sys/uio.h> /* writev */
|
|
45
|
+
#include <poll.h>
|
|
46
|
+
|
|
47
|
+
#ifdef __linux__
|
|
48
|
+
#include <linux/version.h>
|
|
49
|
+
/* pipe2() requires linux >= 2.6.27 and glibc >= 2.9 */
|
|
50
|
+
#define HAVE_PIPE2 \
|
|
51
|
+
defined(LINUX_VERSION_CODE) && defined(__GLIBC_PREREQ) && LINUX_VERSION_CODE >= 0x2061B && __GLIBC_PREREQ(2, 9))
|
|
52
|
+
#endif
|
|
53
|
+
|
|
54
|
+
#ifdef __sun
|
|
55
|
+
# include <sys/types.h>
|
|
56
|
+
# include <sys/wait.h>
|
|
57
|
+
#endif
|
|
58
|
+
|
|
59
|
+
#if defined(__APPLE__)
|
|
60
|
+
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
|
|
61
|
+
#endif
|
|
62
|
+
|
|
63
|
+
#if defined(__FreeBSD__)
|
|
64
|
+
#include <sys/sysctl.h>
|
|
65
|
+
#include <sys/wait.h>
|
|
66
|
+
#endif
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# ifdef __APPLE__
|
|
70
|
+
# include <crt_externs.h>
|
|
71
|
+
# define environ (*_NSGetEnviron())
|
|
72
|
+
# else
|
|
73
|
+
extern char **environ;
|
|
74
|
+
# endif
|
|
75
|
+
|
|
76
|
+
static uv_err_t last_err;
|
|
77
|
+
|
|
78
|
+
struct uv_ares_data_s {
|
|
79
|
+
ares_channel channel;
|
|
80
|
+
/*
|
|
81
|
+
* While the channel is active this timer is called once per second to be sure
|
|
82
|
+
* that we're always calling ares_process. See the warning above the
|
|
83
|
+
* definition of ares_timeout().
|
|
84
|
+
*/
|
|
85
|
+
ev_timer timer;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
static struct uv_ares_data_s ares_data;
|
|
89
|
+
|
|
90
|
+
void uv__req_init(uv_req_t*);
|
|
91
|
+
void uv__next(EV_P_ ev_idle* watcher, int revents);
|
|
92
|
+
static int uv__stream_open(uv_stream_t*, int fd, int flags);
|
|
93
|
+
static void uv__finish_close(uv_handle_t* handle);
|
|
94
|
+
static uv_err_t uv_err_new(uv_handle_t* handle, int sys_error);
|
|
95
|
+
|
|
96
|
+
static int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
|
|
97
|
+
static int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
|
|
98
|
+
static int uv_pipe_cleanup(uv_pipe_t* handle);
|
|
99
|
+
static uv_write_t* uv__write(uv_stream_t* stream);
|
|
100
|
+
static void uv__read(uv_stream_t* stream);
|
|
101
|
+
static void uv__stream_connect(uv_stream_t*);
|
|
102
|
+
static void uv__stream_io(EV_P_ ev_io* watcher, int revents);
|
|
103
|
+
static void uv__pipe_accept(EV_P_ ev_io* watcher, int revents);
|
|
104
|
+
|
|
105
|
+
#ifndef __GNUC__
|
|
106
|
+
#define __attribute__(a)
|
|
107
|
+
#endif
|
|
108
|
+
|
|
109
|
+
/* Unused on systems that support O_CLOEXEC, SOCK_CLOEXEC, etc. */
|
|
110
|
+
static int uv__cloexec(int fd, int set) __attribute__((unused));
|
|
111
|
+
static int uv__nonblock(int fd, int set) __attribute__((unused));
|
|
112
|
+
|
|
113
|
+
static int uv__socket(int domain, int type, int protocol);
|
|
114
|
+
static int uv__accept(int sockfd, struct sockaddr* saddr, socklen_t len);
|
|
115
|
+
static int uv__close(int fd);
|
|
116
|
+
|
|
117
|
+
size_t uv__strlcpy(char* dst, const char* src, size_t size);
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
/* flags */
|
|
121
|
+
enum {
|
|
122
|
+
UV_CLOSING = 0x00000001, /* uv_close() called but not finished. */
|
|
123
|
+
UV_CLOSED = 0x00000002, /* close(2) finished. */
|
|
124
|
+
UV_READING = 0x00000004, /* uv_read_start() called. */
|
|
125
|
+
UV_SHUTTING = 0x00000008, /* uv_shutdown() called but not complete. */
|
|
126
|
+
UV_SHUT = 0x00000010, /* Write side closed. */
|
|
127
|
+
UV_READABLE = 0x00000020, /* The stream is readable */
|
|
128
|
+
UV_WRITABLE = 0x00000040 /* The stream is writable */
|
|
129
|
+
};
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
/* TODO Share this code with Windows. */
|
|
133
|
+
/* TODO Expose callback to user to handle fatal error like V8 does. */
|
|
134
|
+
static void uv_fatal_error(const int errorno, const char* syscall) {
|
|
135
|
+
char* buf = NULL;
|
|
136
|
+
const char* errmsg;
|
|
137
|
+
|
|
138
|
+
if (buf) {
|
|
139
|
+
errmsg = buf;
|
|
140
|
+
} else {
|
|
141
|
+
errmsg = "Unknown error";
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
if (syscall) {
|
|
145
|
+
fprintf(stderr, "\nlibuv fatal error. %s: (%d) %s\n", syscall, errorno,
|
|
146
|
+
errmsg);
|
|
147
|
+
} else {
|
|
148
|
+
fprintf(stderr, "\nlibuv fatal error. (%d) %s\n", errorno, errmsg);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
abort();
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
uv_err_t uv_last_error() {
|
|
156
|
+
return last_err;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
char* uv_strerror(uv_err_t err) {
|
|
161
|
+
return strerror(err.sys_errno_);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
static uv_err_code uv_translate_sys_error(int sys_errno) {
|
|
166
|
+
switch (sys_errno) {
|
|
167
|
+
case 0: return UV_OK;
|
|
168
|
+
case EACCES: return UV_EACCESS;
|
|
169
|
+
case EBADF: return UV_EBADF;
|
|
170
|
+
case EAGAIN: return UV_EAGAIN;
|
|
171
|
+
case ECONNRESET: return UV_ECONNRESET;
|
|
172
|
+
case EFAULT: return UV_EFAULT;
|
|
173
|
+
case EMFILE: return UV_EMFILE;
|
|
174
|
+
case EINVAL: return UV_EINVAL;
|
|
175
|
+
case ECONNREFUSED: return UV_ECONNREFUSED;
|
|
176
|
+
case EADDRINUSE: return UV_EADDRINUSE;
|
|
177
|
+
case EADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
|
|
178
|
+
default: return UV_UNKNOWN;
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
static uv_err_t uv_err_new_artificial(uv_handle_t* handle, int code) {
|
|
184
|
+
uv_err_t err;
|
|
185
|
+
err.sys_errno_ = 0;
|
|
186
|
+
err.code = code;
|
|
187
|
+
last_err = err;
|
|
188
|
+
return err;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
static uv_err_t uv_err_new(uv_handle_t* handle, int sys_error) {
|
|
193
|
+
uv_err_t err;
|
|
194
|
+
err.sys_errno_ = sys_error;
|
|
195
|
+
err.code = uv_translate_sys_error(sys_error);
|
|
196
|
+
last_err = err;
|
|
197
|
+
return err;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
|
|
202
|
+
uv_tcp_t* tcp;
|
|
203
|
+
uv_pipe_t* pipe;
|
|
204
|
+
uv_async_t* async;
|
|
205
|
+
uv_timer_t* timer;
|
|
206
|
+
uv_process_t* process;
|
|
207
|
+
|
|
208
|
+
handle->close_cb = close_cb;
|
|
209
|
+
|
|
210
|
+
switch (handle->type) {
|
|
211
|
+
case UV_TCP:
|
|
212
|
+
tcp = (uv_tcp_t*) handle;
|
|
213
|
+
uv_read_stop((uv_stream_t*)tcp);
|
|
214
|
+
ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);
|
|
215
|
+
break;
|
|
216
|
+
|
|
217
|
+
case UV_PREPARE:
|
|
218
|
+
uv_prepare_stop((uv_prepare_t*) handle);
|
|
219
|
+
break;
|
|
220
|
+
|
|
221
|
+
case UV_CHECK:
|
|
222
|
+
uv_check_stop((uv_check_t*) handle);
|
|
223
|
+
break;
|
|
224
|
+
|
|
225
|
+
case UV_IDLE:
|
|
226
|
+
uv_idle_stop((uv_idle_t*) handle);
|
|
227
|
+
break;
|
|
228
|
+
|
|
229
|
+
case UV_ASYNC:
|
|
230
|
+
async = (uv_async_t*)handle;
|
|
231
|
+
ev_async_stop(EV_DEFAULT_ &async->async_watcher);
|
|
232
|
+
ev_ref(EV_DEFAULT_UC);
|
|
233
|
+
break;
|
|
234
|
+
|
|
235
|
+
case UV_TIMER:
|
|
236
|
+
timer = (uv_timer_t*)handle;
|
|
237
|
+
if (ev_is_active(&timer->timer_watcher)) {
|
|
238
|
+
ev_ref(EV_DEFAULT_UC);
|
|
239
|
+
}
|
|
240
|
+
ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher);
|
|
241
|
+
break;
|
|
242
|
+
|
|
243
|
+
case UV_NAMED_PIPE:
|
|
244
|
+
pipe = (uv_pipe_t*)handle;
|
|
245
|
+
uv_pipe_cleanup(pipe);
|
|
246
|
+
uv_read_stop((uv_stream_t*)handle);
|
|
247
|
+
ev_io_stop(EV_DEFAULT_ &pipe->write_watcher);
|
|
248
|
+
break;
|
|
249
|
+
|
|
250
|
+
case UV_PROCESS:
|
|
251
|
+
process = (uv_process_t*)handle;
|
|
252
|
+
ev_child_stop(EV_DEFAULT_UC_ &process->child_watcher);
|
|
253
|
+
break;
|
|
254
|
+
|
|
255
|
+
default:
|
|
256
|
+
assert(0);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
handle->flags |= UV_CLOSING;
|
|
260
|
+
|
|
261
|
+
/* This is used to call the on_close callback in the next loop. */
|
|
262
|
+
ev_idle_start(EV_DEFAULT_ &handle->next_watcher);
|
|
263
|
+
ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE);
|
|
264
|
+
assert(ev_is_pending(&handle->next_watcher));
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
void uv_init() {
|
|
269
|
+
/* Initialize the default ev loop. */
|
|
270
|
+
#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060
|
|
271
|
+
ev_default_loop(EVBACKEND_KQUEUE);
|
|
272
|
+
#else
|
|
273
|
+
ev_default_loop(EVFLAG_AUTO);
|
|
274
|
+
#endif
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
int uv_run() {
|
|
279
|
+
ev_run(EV_DEFAULT_ 0);
|
|
280
|
+
return 0;
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
static void uv__handle_init(uv_handle_t* handle, uv_handle_type type) {
|
|
285
|
+
uv_counters()->handle_init++;
|
|
286
|
+
|
|
287
|
+
handle->type = type;
|
|
288
|
+
handle->flags = 0;
|
|
289
|
+
|
|
290
|
+
ev_init(&handle->next_watcher, uv__next);
|
|
291
|
+
handle->next_watcher.data = handle;
|
|
292
|
+
|
|
293
|
+
/* Ref the loop until this handle is closed. See uv__finish_close. */
|
|
294
|
+
ev_ref(EV_DEFAULT_UC);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
int uv_tcp_init(uv_tcp_t* tcp) {
|
|
299
|
+
uv__handle_init((uv_handle_t*)tcp, UV_TCP);
|
|
300
|
+
uv_counters()->tcp_init++;
|
|
301
|
+
|
|
302
|
+
tcp->alloc_cb = NULL;
|
|
303
|
+
tcp->connect_req = NULL;
|
|
304
|
+
tcp->accepted_fd = -1;
|
|
305
|
+
tcp->fd = -1;
|
|
306
|
+
tcp->delayed_error = 0;
|
|
307
|
+
ngx_queue_init(&tcp->write_queue);
|
|
308
|
+
ngx_queue_init(&tcp->write_completed_queue);
|
|
309
|
+
tcp->write_queue_size = 0;
|
|
310
|
+
|
|
311
|
+
ev_init(&tcp->read_watcher, uv__stream_io);
|
|
312
|
+
tcp->read_watcher.data = tcp;
|
|
313
|
+
|
|
314
|
+
ev_init(&tcp->write_watcher, uv__stream_io);
|
|
315
|
+
tcp->write_watcher.data = tcp;
|
|
316
|
+
|
|
317
|
+
assert(ngx_queue_empty(&tcp->write_queue));
|
|
318
|
+
assert(ngx_queue_empty(&tcp->write_completed_queue));
|
|
319
|
+
assert(tcp->write_queue_size == 0);
|
|
320
|
+
|
|
321
|
+
return 0;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
static int uv__bind(uv_tcp_t* tcp, int domain, struct sockaddr* addr,
|
|
326
|
+
int addrsize) {
|
|
327
|
+
int saved_errno;
|
|
328
|
+
int status;
|
|
329
|
+
int fd;
|
|
330
|
+
|
|
331
|
+
saved_errno = errno;
|
|
332
|
+
status = -1;
|
|
333
|
+
|
|
334
|
+
if (tcp->fd <= 0) {
|
|
335
|
+
if ((fd = uv__socket(domain, SOCK_STREAM, 0)) == -1) {
|
|
336
|
+
uv_err_new((uv_handle_t*)tcp, errno);
|
|
337
|
+
goto out;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
if (uv__stream_open((uv_stream_t*)tcp, fd, UV_READABLE | UV_WRITABLE)) {
|
|
341
|
+
status = -2;
|
|
342
|
+
uv__close(fd);
|
|
343
|
+
goto out;
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
assert(tcp->fd >= 0);
|
|
348
|
+
|
|
349
|
+
tcp->delayed_error = 0;
|
|
350
|
+
if (bind(tcp->fd, addr, addrsize) == -1) {
|
|
351
|
+
if (errno == EADDRINUSE) {
|
|
352
|
+
tcp->delayed_error = errno;
|
|
353
|
+
} else {
|
|
354
|
+
uv_err_new((uv_handle_t*)tcp, errno);
|
|
355
|
+
goto out;
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
status = 0;
|
|
359
|
+
|
|
360
|
+
out:
|
|
361
|
+
errno = saved_errno;
|
|
362
|
+
return status;
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
int uv_tcp_bind(uv_tcp_t* tcp, struct sockaddr_in addr) {
|
|
367
|
+
if (addr.sin_family != AF_INET) {
|
|
368
|
+
uv_err_new((uv_handle_t*)tcp, EFAULT);
|
|
369
|
+
return -1;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
return uv__bind(tcp, AF_INET, (struct sockaddr*)&addr,
|
|
373
|
+
sizeof(struct sockaddr_in));
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
int uv_tcp_bind6(uv_tcp_t* tcp, struct sockaddr_in6 addr) {
|
|
378
|
+
if (addr.sin6_family != AF_INET6) {
|
|
379
|
+
uv_err_new((uv_handle_t*)tcp, EFAULT);
|
|
380
|
+
return -1;
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
return uv__bind(tcp, AF_INET6, (struct sockaddr*)&addr,
|
|
384
|
+
sizeof(struct sockaddr_in6));
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
static int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
|
|
389
|
+
socklen_t yes;
|
|
390
|
+
|
|
391
|
+
assert(fd >= 0);
|
|
392
|
+
stream->fd = fd;
|
|
393
|
+
|
|
394
|
+
((uv_handle_t*)stream)->flags |= flags;
|
|
395
|
+
|
|
396
|
+
/* Reuse the port address if applicable. */
|
|
397
|
+
yes = 1;
|
|
398
|
+
if (stream->type == UV_TCP
|
|
399
|
+
&& setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) {
|
|
400
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
401
|
+
return -1;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
/* Associate the fd with each ev_io watcher. */
|
|
405
|
+
ev_io_set(&stream->read_watcher, fd, EV_READ);
|
|
406
|
+
ev_io_set(&stream->write_watcher, fd, EV_WRITE);
|
|
407
|
+
|
|
408
|
+
/* These should have been set up by uv_tcp_init or uv_pipe_init. */
|
|
409
|
+
assert(stream->read_watcher.cb == uv__stream_io);
|
|
410
|
+
assert(stream->write_watcher.cb == uv__stream_io);
|
|
411
|
+
|
|
412
|
+
return 0;
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
void uv__server_io(EV_P_ ev_io* watcher, int revents) {
|
|
417
|
+
int fd;
|
|
418
|
+
struct sockaddr_storage addr;
|
|
419
|
+
uv_stream_t* stream = watcher->data;
|
|
420
|
+
|
|
421
|
+
assert(watcher == &stream->read_watcher ||
|
|
422
|
+
watcher == &stream->write_watcher);
|
|
423
|
+
assert(revents == EV_READ);
|
|
424
|
+
|
|
425
|
+
assert(!(((uv_handle_t*)stream)->flags & UV_CLOSING));
|
|
426
|
+
|
|
427
|
+
if (stream->accepted_fd >= 0) {
|
|
428
|
+
ev_io_stop(EV_DEFAULT_ &stream->read_watcher);
|
|
429
|
+
return;
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
while (1) {
|
|
433
|
+
assert(stream->accepted_fd < 0);
|
|
434
|
+
fd = uv__accept(stream->fd, (struct sockaddr*)&addr, sizeof addr);
|
|
435
|
+
|
|
436
|
+
if (fd < 0) {
|
|
437
|
+
if (errno == EAGAIN) {
|
|
438
|
+
/* No problem. */
|
|
439
|
+
return;
|
|
440
|
+
} else if (errno == EMFILE) {
|
|
441
|
+
/* TODO special trick. unlock reserved socket, accept, close. */
|
|
442
|
+
return;
|
|
443
|
+
} else {
|
|
444
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
445
|
+
stream->connection_cb((uv_stream_t*)stream, -1);
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
} else {
|
|
449
|
+
stream->accepted_fd = fd;
|
|
450
|
+
stream->connection_cb((uv_stream_t*)stream, 0);
|
|
451
|
+
if (stream->accepted_fd >= 0) {
|
|
452
|
+
/* The user hasn't yet accepted called uv_accept() */
|
|
453
|
+
ev_io_stop(EV_DEFAULT_ &stream->read_watcher);
|
|
454
|
+
return;
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
|
|
462
|
+
uv_stream_t* streamServer;
|
|
463
|
+
uv_stream_t* streamClient;
|
|
464
|
+
int saved_errno;
|
|
465
|
+
int status;
|
|
466
|
+
|
|
467
|
+
saved_errno = errno;
|
|
468
|
+
status = -1;
|
|
469
|
+
|
|
470
|
+
streamServer = (uv_stream_t*)server;
|
|
471
|
+
streamClient = (uv_stream_t*)client;
|
|
472
|
+
|
|
473
|
+
if (streamServer->accepted_fd < 0) {
|
|
474
|
+
uv_err_new((uv_handle_t*)server, EAGAIN);
|
|
475
|
+
goto out;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
if (uv__stream_open(streamClient, streamServer->accepted_fd,
|
|
479
|
+
UV_READABLE | UV_WRITABLE)) {
|
|
480
|
+
/* TODO handle error */
|
|
481
|
+
streamServer->accepted_fd = -1;
|
|
482
|
+
uv__close(streamServer->accepted_fd);
|
|
483
|
+
goto out;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
ev_io_start(EV_DEFAULT_ &streamServer->read_watcher);
|
|
487
|
+
streamServer->accepted_fd = -1;
|
|
488
|
+
status = 0;
|
|
489
|
+
|
|
490
|
+
out:
|
|
491
|
+
errno = saved_errno;
|
|
492
|
+
return status;
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
|
|
497
|
+
switch (stream->type) {
|
|
498
|
+
case UV_TCP:
|
|
499
|
+
return uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
|
|
500
|
+
case UV_NAMED_PIPE:
|
|
501
|
+
return uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
|
|
502
|
+
default:
|
|
503
|
+
assert(0);
|
|
504
|
+
return -1;
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
static int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
|
|
510
|
+
int r;
|
|
511
|
+
int fd;
|
|
512
|
+
|
|
513
|
+
if (tcp->delayed_error) {
|
|
514
|
+
uv_err_new((uv_handle_t*)tcp, tcp->delayed_error);
|
|
515
|
+
return -1;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
if (tcp->fd <= 0) {
|
|
519
|
+
if ((fd = uv__socket(AF_INET, SOCK_STREAM, 0)) == -1) {
|
|
520
|
+
uv_err_new((uv_handle_t*)tcp, errno);
|
|
521
|
+
return -1;
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
if (uv__stream_open((uv_stream_t*)tcp, fd, UV_READABLE)) {
|
|
525
|
+
uv__close(fd);
|
|
526
|
+
return -1;
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
assert(tcp->fd >= 0);
|
|
531
|
+
|
|
532
|
+
r = listen(tcp->fd, backlog);
|
|
533
|
+
if (r < 0) {
|
|
534
|
+
uv_err_new((uv_handle_t*)tcp, errno);
|
|
535
|
+
return -1;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
tcp->connection_cb = cb;
|
|
539
|
+
|
|
540
|
+
/* Start listening for connections. */
|
|
541
|
+
ev_io_set(&tcp->read_watcher, tcp->fd, EV_READ);
|
|
542
|
+
ev_set_cb(&tcp->read_watcher, uv__server_io);
|
|
543
|
+
ev_io_start(EV_DEFAULT_ &tcp->read_watcher);
|
|
544
|
+
|
|
545
|
+
return 0;
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
void uv__finish_close(uv_handle_t* handle) {
|
|
550
|
+
assert(handle->flags & UV_CLOSING);
|
|
551
|
+
assert(!(handle->flags & UV_CLOSED));
|
|
552
|
+
handle->flags |= UV_CLOSED;
|
|
553
|
+
|
|
554
|
+
switch (handle->type) {
|
|
555
|
+
case UV_PREPARE:
|
|
556
|
+
assert(!ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher));
|
|
557
|
+
break;
|
|
558
|
+
|
|
559
|
+
case UV_CHECK:
|
|
560
|
+
assert(!ev_is_active(&((uv_check_t*)handle)->check_watcher));
|
|
561
|
+
break;
|
|
562
|
+
|
|
563
|
+
case UV_IDLE:
|
|
564
|
+
assert(!ev_is_active(&((uv_idle_t*)handle)->idle_watcher));
|
|
565
|
+
break;
|
|
566
|
+
|
|
567
|
+
case UV_ASYNC:
|
|
568
|
+
assert(!ev_is_active(&((uv_async_t*)handle)->async_watcher));
|
|
569
|
+
break;
|
|
570
|
+
|
|
571
|
+
case UV_TIMER:
|
|
572
|
+
assert(!ev_is_active(&((uv_timer_t*)handle)->timer_watcher));
|
|
573
|
+
break;
|
|
574
|
+
|
|
575
|
+
case UV_NAMED_PIPE:
|
|
576
|
+
case UV_TCP:
|
|
577
|
+
{
|
|
578
|
+
uv_stream_t* stream;
|
|
579
|
+
|
|
580
|
+
stream = (uv_stream_t*)handle;
|
|
581
|
+
|
|
582
|
+
assert(!ev_is_active(&stream->read_watcher));
|
|
583
|
+
assert(!ev_is_active(&stream->write_watcher));
|
|
584
|
+
|
|
585
|
+
uv__close(stream->fd);
|
|
586
|
+
stream->fd = -1;
|
|
587
|
+
|
|
588
|
+
if (stream->accepted_fd >= 0) {
|
|
589
|
+
uv__close(stream->accepted_fd);
|
|
590
|
+
stream->accepted_fd = -1;
|
|
591
|
+
}
|
|
592
|
+
break;
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
case UV_PROCESS:
|
|
596
|
+
assert(!ev_is_active(&((uv_process_t*)handle)->child_watcher));
|
|
597
|
+
break;
|
|
598
|
+
|
|
599
|
+
default:
|
|
600
|
+
assert(0);
|
|
601
|
+
break;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
ev_idle_stop(EV_DEFAULT_ &handle->next_watcher);
|
|
605
|
+
|
|
606
|
+
if (handle->close_cb) {
|
|
607
|
+
handle->close_cb(handle);
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
ev_unref(EV_DEFAULT_UC);
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
uv_write_t* uv_write_queue_head(uv_stream_t* stream) {
|
|
615
|
+
ngx_queue_t* q;
|
|
616
|
+
uv_write_t* req;
|
|
617
|
+
|
|
618
|
+
if (ngx_queue_empty(&stream->write_queue)) {
|
|
619
|
+
return NULL;
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
q = ngx_queue_head(&stream->write_queue);
|
|
623
|
+
if (!q) {
|
|
624
|
+
return NULL;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
req = ngx_queue_data(q, struct uv_write_s, queue);
|
|
628
|
+
assert(req);
|
|
629
|
+
|
|
630
|
+
return req;
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
void uv__next(EV_P_ ev_idle* watcher, int revents) {
|
|
635
|
+
uv_handle_t* handle = watcher->data;
|
|
636
|
+
assert(watcher == &handle->next_watcher);
|
|
637
|
+
assert(revents == EV_IDLE);
|
|
638
|
+
|
|
639
|
+
/* For now this function is only to handle the closing event, but we might
|
|
640
|
+
* put more stuff here later.
|
|
641
|
+
*/
|
|
642
|
+
assert(handle->flags & UV_CLOSING);
|
|
643
|
+
uv__finish_close(handle);
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
static void uv__drain(uv_stream_t* stream) {
|
|
648
|
+
uv_shutdown_t* req;
|
|
649
|
+
|
|
650
|
+
assert(!uv_write_queue_head(stream));
|
|
651
|
+
assert(stream->write_queue_size == 0);
|
|
652
|
+
|
|
653
|
+
ev_io_stop(EV_DEFAULT_ &stream->write_watcher);
|
|
654
|
+
|
|
655
|
+
/* Shutdown? */
|
|
656
|
+
if ((((uv_handle_t*)stream)->flags & UV_SHUTTING) &&
|
|
657
|
+
!(((uv_handle_t*)stream)->flags & UV_CLOSING) &&
|
|
658
|
+
!(((uv_handle_t*)stream)->flags & UV_SHUT)) {
|
|
659
|
+
assert(stream->shutdown_req);
|
|
660
|
+
|
|
661
|
+
req = stream->shutdown_req;
|
|
662
|
+
|
|
663
|
+
if (shutdown(stream->fd, SHUT_WR)) {
|
|
664
|
+
/* Error. Report it. User should call uv_close(). */
|
|
665
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
666
|
+
if (req->cb) {
|
|
667
|
+
req->cb(req, -1);
|
|
668
|
+
}
|
|
669
|
+
} else {
|
|
670
|
+
uv_err_new((uv_handle_t*)stream, 0);
|
|
671
|
+
((uv_handle_t*) stream)->flags |= UV_SHUT;
|
|
672
|
+
if (req->cb) {
|
|
673
|
+
req->cb(req, 0);
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
|
|
680
|
+
/* On success returns NULL. On error returns a pointer to the write request
|
|
681
|
+
* which had the error.
|
|
682
|
+
*/
|
|
683
|
+
static uv_write_t* uv__write(uv_stream_t* stream) {
|
|
684
|
+
uv_write_t* req;
|
|
685
|
+
struct iovec* iov;
|
|
686
|
+
int iovcnt;
|
|
687
|
+
ssize_t n;
|
|
688
|
+
|
|
689
|
+
assert(stream->fd >= 0);
|
|
690
|
+
|
|
691
|
+
/* TODO: should probably while(1) here until EAGAIN */
|
|
692
|
+
|
|
693
|
+
/* Get the request at the head of the queue. */
|
|
694
|
+
req = uv_write_queue_head(stream);
|
|
695
|
+
if (!req) {
|
|
696
|
+
assert(stream->write_queue_size == 0);
|
|
697
|
+
return NULL;
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
assert(req->handle == stream);
|
|
701
|
+
|
|
702
|
+
/* Cast to iovec. We had to have our own uv_buf_t instead of iovec
|
|
703
|
+
* because Windows's WSABUF is not an iovec.
|
|
704
|
+
*/
|
|
705
|
+
assert(sizeof(uv_buf_t) == sizeof(struct iovec));
|
|
706
|
+
iov = (struct iovec*) &(req->bufs[req->write_index]);
|
|
707
|
+
iovcnt = req->bufcnt - req->write_index;
|
|
708
|
+
|
|
709
|
+
/* Now do the actual writev. Note that we've been updating the pointers
|
|
710
|
+
* inside the iov each time we write. So there is no need to offset it.
|
|
711
|
+
*/
|
|
712
|
+
|
|
713
|
+
do {
|
|
714
|
+
if (iovcnt == 1) {
|
|
715
|
+
n = write(stream->fd, iov[0].iov_base, iov[0].iov_len);
|
|
716
|
+
} else {
|
|
717
|
+
n = writev(stream->fd, iov, iovcnt);
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
while (n == -1 && errno == EINTR);
|
|
721
|
+
|
|
722
|
+
if (n < 0) {
|
|
723
|
+
if (errno != EAGAIN) {
|
|
724
|
+
/* Error */
|
|
725
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
726
|
+
return req;
|
|
727
|
+
}
|
|
728
|
+
} else {
|
|
729
|
+
/* Successful write */
|
|
730
|
+
|
|
731
|
+
/* Update the counters. */
|
|
732
|
+
while (n >= 0) {
|
|
733
|
+
uv_buf_t* buf = &(req->bufs[req->write_index]);
|
|
734
|
+
size_t len = buf->len;
|
|
735
|
+
|
|
736
|
+
assert(req->write_index < req->bufcnt);
|
|
737
|
+
|
|
738
|
+
if ((size_t)n < len) {
|
|
739
|
+
buf->base += n;
|
|
740
|
+
buf->len -= n;
|
|
741
|
+
stream->write_queue_size -= n;
|
|
742
|
+
n = 0;
|
|
743
|
+
|
|
744
|
+
/* There is more to write. Break and ensure the watcher is pending. */
|
|
745
|
+
break;
|
|
746
|
+
|
|
747
|
+
} else {
|
|
748
|
+
/* Finished writing the buf at index req->write_index. */
|
|
749
|
+
req->write_index++;
|
|
750
|
+
|
|
751
|
+
assert((size_t)n >= len);
|
|
752
|
+
n -= len;
|
|
753
|
+
|
|
754
|
+
assert(stream->write_queue_size >= len);
|
|
755
|
+
stream->write_queue_size -= len;
|
|
756
|
+
|
|
757
|
+
if (req->write_index == req->bufcnt) {
|
|
758
|
+
/* Then we're done! */
|
|
759
|
+
assert(n == 0);
|
|
760
|
+
|
|
761
|
+
/* Pop the req off tcp->write_queue. */
|
|
762
|
+
ngx_queue_remove(&req->queue);
|
|
763
|
+
if (req->bufs != req->bufsml) {
|
|
764
|
+
free(req->bufs);
|
|
765
|
+
}
|
|
766
|
+
req->bufs = NULL;
|
|
767
|
+
|
|
768
|
+
/* Add it to the write_completed_queue where it will have its
|
|
769
|
+
* callback called in the near future.
|
|
770
|
+
* TODO: start trying to write the next request.
|
|
771
|
+
*/
|
|
772
|
+
ngx_queue_insert_tail(&stream->write_completed_queue, &req->queue);
|
|
773
|
+
ev_feed_event(EV_DEFAULT_ &stream->write_watcher, EV_WRITE);
|
|
774
|
+
return NULL;
|
|
775
|
+
}
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
/* Either we've counted n down to zero or we've got EAGAIN. */
|
|
781
|
+
assert(n == 0 || n == -1);
|
|
782
|
+
|
|
783
|
+
/* We're not done. */
|
|
784
|
+
ev_io_start(EV_DEFAULT_ &stream->write_watcher);
|
|
785
|
+
|
|
786
|
+
return NULL;
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
static void uv__write_callbacks(uv_stream_t* stream) {
|
|
791
|
+
int callbacks_made = 0;
|
|
792
|
+
ngx_queue_t* q;
|
|
793
|
+
uv_write_t* req;
|
|
794
|
+
|
|
795
|
+
while (!ngx_queue_empty(&stream->write_completed_queue)) {
|
|
796
|
+
/* Pop a req off write_completed_queue. */
|
|
797
|
+
q = ngx_queue_head(&stream->write_completed_queue);
|
|
798
|
+
assert(q);
|
|
799
|
+
req = ngx_queue_data(q, struct uv_write_s, queue);
|
|
800
|
+
ngx_queue_remove(q);
|
|
801
|
+
|
|
802
|
+
/* NOTE: call callback AFTER freeing the request data. */
|
|
803
|
+
if (req->cb) {
|
|
804
|
+
req->cb(req, 0);
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
callbacks_made++;
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
assert(ngx_queue_empty(&stream->write_completed_queue));
|
|
811
|
+
|
|
812
|
+
/* Write queue drained. */
|
|
813
|
+
if (!uv_write_queue_head(stream)) {
|
|
814
|
+
uv__drain(stream);
|
|
815
|
+
}
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
|
|
819
|
+
static void uv__read(uv_stream_t* stream) {
|
|
820
|
+
uv_buf_t buf;
|
|
821
|
+
struct iovec* iov;
|
|
822
|
+
ssize_t nread;
|
|
823
|
+
|
|
824
|
+
/* XXX: Maybe instead of having UV_READING we just test if
|
|
825
|
+
* tcp->read_cb is NULL or not?
|
|
826
|
+
*/
|
|
827
|
+
while (stream->read_cb && ((uv_handle_t*)stream)->flags & UV_READING) {
|
|
828
|
+
assert(stream->alloc_cb);
|
|
829
|
+
buf = stream->alloc_cb(stream, 64 * 1024);
|
|
830
|
+
|
|
831
|
+
assert(buf.len > 0);
|
|
832
|
+
assert(buf.base);
|
|
833
|
+
|
|
834
|
+
iov = (struct iovec*) &buf;
|
|
835
|
+
|
|
836
|
+
do {
|
|
837
|
+
nread = read(stream->fd, buf.base, buf.len);
|
|
838
|
+
}
|
|
839
|
+
while (nread == -1 && errno == EINTR);
|
|
840
|
+
|
|
841
|
+
if (nread < 0) {
|
|
842
|
+
/* Error */
|
|
843
|
+
if (errno == EAGAIN) {
|
|
844
|
+
/* Wait for the next one. */
|
|
845
|
+
if (((uv_handle_t*)stream)->flags & UV_READING) {
|
|
846
|
+
ev_io_start(EV_DEFAULT_UC_ &stream->read_watcher);
|
|
847
|
+
}
|
|
848
|
+
uv_err_new((uv_handle_t*)stream, EAGAIN);
|
|
849
|
+
stream->read_cb(stream, 0, buf);
|
|
850
|
+
return;
|
|
851
|
+
} else {
|
|
852
|
+
/* Error. User should call uv_close(). */
|
|
853
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
854
|
+
stream->read_cb(stream, -1, buf);
|
|
855
|
+
assert(!ev_is_active(&stream->read_watcher));
|
|
856
|
+
return;
|
|
857
|
+
}
|
|
858
|
+
} else if (nread == 0) {
|
|
859
|
+
/* EOF */
|
|
860
|
+
uv_err_new_artificial((uv_handle_t*)stream, UV_EOF);
|
|
861
|
+
ev_io_stop(EV_DEFAULT_UC_ &stream->read_watcher);
|
|
862
|
+
stream->read_cb(stream, -1, buf);
|
|
863
|
+
return;
|
|
864
|
+
} else {
|
|
865
|
+
/* Successful read */
|
|
866
|
+
stream->read_cb(stream, nread, buf);
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
|
873
|
+
assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) &&
|
|
874
|
+
"uv_shutdown (unix) only supports uv_handle_t right now");
|
|
875
|
+
assert(stream->fd >= 0);
|
|
876
|
+
|
|
877
|
+
if (!(((uv_handle_t*)stream)->flags & UV_WRITABLE) ||
|
|
878
|
+
((uv_handle_t*)stream)->flags & UV_SHUT ||
|
|
879
|
+
((uv_handle_t*)stream)->flags & UV_CLOSED ||
|
|
880
|
+
((uv_handle_t*)stream)->flags & UV_CLOSING) {
|
|
881
|
+
uv_err_new((uv_handle_t*)stream, EINVAL);
|
|
882
|
+
return -1;
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
/* Initialize request */
|
|
886
|
+
uv__req_init((uv_req_t*)req);
|
|
887
|
+
req->handle = stream;
|
|
888
|
+
req->cb = cb;
|
|
889
|
+
|
|
890
|
+
stream->shutdown_req = req;
|
|
891
|
+
req->type = UV_SHUTDOWN;
|
|
892
|
+
|
|
893
|
+
((uv_handle_t*)stream)->flags |= UV_SHUTTING;
|
|
894
|
+
|
|
895
|
+
|
|
896
|
+
ev_io_start(EV_DEFAULT_UC_ &stream->write_watcher);
|
|
897
|
+
|
|
898
|
+
return 0;
|
|
899
|
+
}
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
static void uv__stream_io(EV_P_ ev_io* watcher, int revents) {
|
|
903
|
+
uv_stream_t* stream = watcher->data;
|
|
904
|
+
|
|
905
|
+
assert(stream->type == UV_TCP ||
|
|
906
|
+
stream->type == UV_NAMED_PIPE);
|
|
907
|
+
assert(watcher == &stream->read_watcher ||
|
|
908
|
+
watcher == &stream->write_watcher);
|
|
909
|
+
assert(!(((uv_handle_t*)stream)->flags & UV_CLOSING));
|
|
910
|
+
|
|
911
|
+
if (stream->connect_req) {
|
|
912
|
+
uv__stream_connect(stream);
|
|
913
|
+
} else {
|
|
914
|
+
assert(revents & (EV_READ | EV_WRITE));
|
|
915
|
+
assert(stream->fd >= 0);
|
|
916
|
+
|
|
917
|
+
if (revents & EV_READ) {
|
|
918
|
+
uv__read((uv_stream_t*)stream);
|
|
919
|
+
}
|
|
920
|
+
|
|
921
|
+
if (revents & EV_WRITE) {
|
|
922
|
+
uv_write_t* req = uv__write(stream);
|
|
923
|
+
if (req) {
|
|
924
|
+
/* Error. Notify the user. */
|
|
925
|
+
if (req->cb) {
|
|
926
|
+
req->cb(req, -1);
|
|
927
|
+
}
|
|
928
|
+
} else {
|
|
929
|
+
uv__write_callbacks(stream);
|
|
930
|
+
}
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
|
|
936
|
+
/**
|
|
937
|
+
* We get called here from directly following a call to connect(2).
|
|
938
|
+
* In order to determine if we've errored out or succeeded must call
|
|
939
|
+
* getsockopt.
|
|
940
|
+
*/
|
|
941
|
+
static void uv__stream_connect(uv_stream_t* stream) {
|
|
942
|
+
int error;
|
|
943
|
+
uv_connect_t* req = stream->connect_req;
|
|
944
|
+
socklen_t errorsize = sizeof(int);
|
|
945
|
+
|
|
946
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
|
|
947
|
+
assert(req);
|
|
948
|
+
|
|
949
|
+
if (stream->delayed_error) {
|
|
950
|
+
/* To smooth over the differences between unixes errors that
|
|
951
|
+
* were reported synchronously on the first connect can be delayed
|
|
952
|
+
* until the next tick--which is now.
|
|
953
|
+
*/
|
|
954
|
+
error = stream->delayed_error;
|
|
955
|
+
stream->delayed_error = 0;
|
|
956
|
+
} else {
|
|
957
|
+
/* Normal situation: we need to get the socket error from the kernel. */
|
|
958
|
+
assert(stream->fd >= 0);
|
|
959
|
+
getsockopt(stream->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize);
|
|
960
|
+
}
|
|
961
|
+
|
|
962
|
+
if (!error) {
|
|
963
|
+
ev_io_start(EV_DEFAULT_ &stream->read_watcher);
|
|
964
|
+
|
|
965
|
+
/* Successful connection */
|
|
966
|
+
stream->connect_req = NULL;
|
|
967
|
+
if (req->cb) {
|
|
968
|
+
req->cb(req, 0);
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
} else if (error == EINPROGRESS) {
|
|
972
|
+
/* Still connecting. */
|
|
973
|
+
return;
|
|
974
|
+
} else {
|
|
975
|
+
/* Error */
|
|
976
|
+
uv_err_new((uv_handle_t*)stream, error);
|
|
977
|
+
|
|
978
|
+
stream->connect_req = NULL;
|
|
979
|
+
if (req->cb) {
|
|
980
|
+
req->cb(req, -1);
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
static int uv__connect(uv_connect_t* req,
|
|
987
|
+
uv_stream_t* stream,
|
|
988
|
+
struct sockaddr* addr,
|
|
989
|
+
socklen_t addrlen,
|
|
990
|
+
uv_connect_cb cb) {
|
|
991
|
+
|
|
992
|
+
int sockfd;
|
|
993
|
+
int r;
|
|
994
|
+
|
|
995
|
+
if (stream->fd <= 0) {
|
|
996
|
+
if ((sockfd = uv__socket(addr->sa_family, SOCK_STREAM, 0)) == -1) {
|
|
997
|
+
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
if (sockfd < 0) {
|
|
1001
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
1002
|
+
return -1;
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
if (uv__stream_open(stream, sockfd, UV_READABLE | UV_WRITABLE)) {
|
|
1006
|
+
uv__close(sockfd);
|
|
1007
|
+
return -2;
|
|
1008
|
+
}
|
|
1009
|
+
}
|
|
1010
|
+
|
|
1011
|
+
uv__req_init((uv_req_t*)req);
|
|
1012
|
+
req->cb = cb;
|
|
1013
|
+
req->handle = stream;
|
|
1014
|
+
req->type = UV_CONNECT;
|
|
1015
|
+
ngx_queue_init(&req->queue);
|
|
1016
|
+
|
|
1017
|
+
if (stream->connect_req) {
|
|
1018
|
+
uv_err_new((uv_handle_t*)stream, EALREADY);
|
|
1019
|
+
return -1;
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
if (stream->type != UV_TCP) {
|
|
1023
|
+
uv_err_new((uv_handle_t*)stream, ENOTSOCK);
|
|
1024
|
+
return -1;
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
stream->connect_req = req;
|
|
1028
|
+
|
|
1029
|
+
do {
|
|
1030
|
+
r = connect(stream->fd, addr, addrlen);
|
|
1031
|
+
}
|
|
1032
|
+
while (r == -1 && errno == EINTR);
|
|
1033
|
+
|
|
1034
|
+
stream->delayed_error = 0;
|
|
1035
|
+
|
|
1036
|
+
if (r != 0 && errno != EINPROGRESS) {
|
|
1037
|
+
switch (errno) {
|
|
1038
|
+
/* If we get a ECONNREFUSED wait until the next tick to report the
|
|
1039
|
+
* error. Solaris wants to report immediately--other unixes want to
|
|
1040
|
+
* wait.
|
|
1041
|
+
*/
|
|
1042
|
+
case ECONNREFUSED:
|
|
1043
|
+
stream->delayed_error = errno;
|
|
1044
|
+
break;
|
|
1045
|
+
|
|
1046
|
+
default:
|
|
1047
|
+
uv_err_new((uv_handle_t*)stream, errno);
|
|
1048
|
+
return -1;
|
|
1049
|
+
}
|
|
1050
|
+
}
|
|
1051
|
+
|
|
1052
|
+
assert(stream->write_watcher.data == stream);
|
|
1053
|
+
ev_io_start(EV_DEFAULT_ &stream->write_watcher);
|
|
1054
|
+
|
|
1055
|
+
if (stream->delayed_error) {
|
|
1056
|
+
ev_feed_event(EV_DEFAULT_ &stream->write_watcher, EV_WRITE);
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
return 0;
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
|
|
1063
|
+
int uv_tcp_connect(uv_connect_t* req,
|
|
1064
|
+
uv_tcp_t* handle,
|
|
1065
|
+
struct sockaddr_in address,
|
|
1066
|
+
uv_connect_cb cb) {
|
|
1067
|
+
int saved_errno;
|
|
1068
|
+
int status;
|
|
1069
|
+
|
|
1070
|
+
saved_errno = errno;
|
|
1071
|
+
status = -1;
|
|
1072
|
+
|
|
1073
|
+
if (handle->type != UV_TCP) {
|
|
1074
|
+
uv_err_new((uv_handle_t*)handle, EINVAL);
|
|
1075
|
+
goto out;
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
if (address.sin_family != AF_INET) {
|
|
1079
|
+
uv_err_new((uv_handle_t*)handle, EINVAL);
|
|
1080
|
+
goto out;
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
status = uv__connect(req,
|
|
1084
|
+
(uv_stream_t*)handle,
|
|
1085
|
+
(struct sockaddr*)&address,
|
|
1086
|
+
sizeof address,
|
|
1087
|
+
cb);
|
|
1088
|
+
|
|
1089
|
+
out:
|
|
1090
|
+
errno = saved_errno;
|
|
1091
|
+
return status;
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
|
|
1095
|
+
int uv_tcp_connect6(uv_connect_t* req,
|
|
1096
|
+
uv_tcp_t* handle,
|
|
1097
|
+
struct sockaddr_in6 address,
|
|
1098
|
+
uv_connect_cb cb) {
|
|
1099
|
+
int saved_errno;
|
|
1100
|
+
int status;
|
|
1101
|
+
|
|
1102
|
+
saved_errno = errno;
|
|
1103
|
+
status = -1;
|
|
1104
|
+
|
|
1105
|
+
if (handle->type != UV_TCP) {
|
|
1106
|
+
uv_err_new((uv_handle_t*)handle, EINVAL);
|
|
1107
|
+
goto out;
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
if (address.sin6_family != AF_INET6) {
|
|
1111
|
+
uv_err_new((uv_handle_t*)handle, EINVAL);
|
|
1112
|
+
goto out;
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
status = uv__connect(req,
|
|
1116
|
+
(uv_stream_t*)handle,
|
|
1117
|
+
(struct sockaddr*)&address,
|
|
1118
|
+
sizeof address,
|
|
1119
|
+
cb);
|
|
1120
|
+
|
|
1121
|
+
out:
|
|
1122
|
+
errno = saved_errno;
|
|
1123
|
+
return status;
|
|
1124
|
+
}
|
|
1125
|
+
|
|
1126
|
+
|
|
1127
|
+
int uv_getsockname(uv_tcp_t* handle, struct sockaddr* name, int* namelen) {
|
|
1128
|
+
socklen_t socklen;
|
|
1129
|
+
int saved_errno;
|
|
1130
|
+
|
|
1131
|
+
/* Don't clobber errno. */
|
|
1132
|
+
saved_errno = errno;
|
|
1133
|
+
|
|
1134
|
+
/* sizeof(socklen_t) != sizeof(int) on some systems. */
|
|
1135
|
+
socklen = (socklen_t)*namelen;
|
|
1136
|
+
|
|
1137
|
+
if (getsockname(handle->fd, name, &socklen) == -1) {
|
|
1138
|
+
uv_err_new((uv_handle_t*)handle, errno);
|
|
1139
|
+
} else {
|
|
1140
|
+
*namelen = (int)socklen;
|
|
1141
|
+
}
|
|
1142
|
+
|
|
1143
|
+
errno = saved_errno;
|
|
1144
|
+
return 0;
|
|
1145
|
+
}
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
static size_t uv__buf_count(uv_buf_t bufs[], int bufcnt) {
|
|
1149
|
+
size_t total = 0;
|
|
1150
|
+
int i;
|
|
1151
|
+
|
|
1152
|
+
for (i = 0; i < bufcnt; i++) {
|
|
1153
|
+
total += bufs[i].len;
|
|
1154
|
+
}
|
|
1155
|
+
|
|
1156
|
+
return total;
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
|
|
1160
|
+
/* The buffers to be written must remain valid until the callback is called.
|
|
1161
|
+
* This is not required for the uv_buf_t array.
|
|
1162
|
+
*/
|
|
1163
|
+
int uv_write(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt,
|
|
1164
|
+
uv_write_cb cb) {
|
|
1165
|
+
uv_stream_t* stream;
|
|
1166
|
+
int empty_queue;
|
|
1167
|
+
|
|
1168
|
+
stream = (uv_stream_t*)handle;
|
|
1169
|
+
|
|
1170
|
+
/* Initialize the req */
|
|
1171
|
+
uv__req_init((uv_req_t*) req);
|
|
1172
|
+
req->cb = cb;
|
|
1173
|
+
req->handle = handle;
|
|
1174
|
+
ngx_queue_init(&req->queue);
|
|
1175
|
+
|
|
1176
|
+
assert((handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
|
|
1177
|
+
&& "uv_write (unix) does not yet support other types of streams");
|
|
1178
|
+
|
|
1179
|
+
empty_queue = (stream->write_queue_size == 0);
|
|
1180
|
+
|
|
1181
|
+
if (stream->fd < 0) {
|
|
1182
|
+
uv_err_new((uv_handle_t*)stream, EBADF);
|
|
1183
|
+
return -1;
|
|
1184
|
+
}
|
|
1185
|
+
|
|
1186
|
+
ngx_queue_init(&req->queue);
|
|
1187
|
+
req->type = UV_WRITE;
|
|
1188
|
+
|
|
1189
|
+
|
|
1190
|
+
if (bufcnt < UV_REQ_BUFSML_SIZE) {
|
|
1191
|
+
req->bufs = req->bufsml;
|
|
1192
|
+
}
|
|
1193
|
+
else {
|
|
1194
|
+
req->bufs = malloc(sizeof(uv_buf_t) * bufcnt);
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t));
|
|
1198
|
+
req->bufcnt = bufcnt;
|
|
1199
|
+
|
|
1200
|
+
/*
|
|
1201
|
+
* fprintf(stderr, "cnt: %d bufs: %p bufsml: %p\n", bufcnt, req->bufs, req->bufsml);
|
|
1202
|
+
*/
|
|
1203
|
+
|
|
1204
|
+
req->write_index = 0;
|
|
1205
|
+
stream->write_queue_size += uv__buf_count(bufs, bufcnt);
|
|
1206
|
+
|
|
1207
|
+
/* Append the request to write_queue. */
|
|
1208
|
+
ngx_queue_insert_tail(&stream->write_queue, &req->queue);
|
|
1209
|
+
|
|
1210
|
+
assert(!ngx_queue_empty(&stream->write_queue));
|
|
1211
|
+
assert(stream->write_watcher.cb == uv__stream_io);
|
|
1212
|
+
assert(stream->write_watcher.data == stream);
|
|
1213
|
+
assert(stream->write_watcher.fd == stream->fd);
|
|
1214
|
+
|
|
1215
|
+
/* If the queue was empty when this function began, we should attempt to
|
|
1216
|
+
* do the write immediately. Otherwise start the write_watcher and wait
|
|
1217
|
+
* for the fd to become writable.
|
|
1218
|
+
*/
|
|
1219
|
+
if (empty_queue) {
|
|
1220
|
+
if (uv__write(stream)) {
|
|
1221
|
+
/* Error. uv_last_error has been set. */
|
|
1222
|
+
return -1;
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
|
|
1226
|
+
/* If the queue is now empty - we've flushed the request already. That
|
|
1227
|
+
* means we need to make the callback. The callback can only be done on a
|
|
1228
|
+
* fresh stack so we feed the event loop in order to service it.
|
|
1229
|
+
*/
|
|
1230
|
+
if (ngx_queue_empty(&stream->write_queue)) {
|
|
1231
|
+
ev_feed_event(EV_DEFAULT_ &stream->write_watcher, EV_WRITE);
|
|
1232
|
+
} else {
|
|
1233
|
+
/* Otherwise there is data to write - so we should wait for the file
|
|
1234
|
+
* descriptor to become writable.
|
|
1235
|
+
*/
|
|
1236
|
+
ev_io_start(EV_DEFAULT_ &stream->write_watcher);
|
|
1237
|
+
}
|
|
1238
|
+
|
|
1239
|
+
return 0;
|
|
1240
|
+
}
|
|
1241
|
+
|
|
1242
|
+
|
|
1243
|
+
void uv_ref() {
|
|
1244
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1245
|
+
}
|
|
1246
|
+
|
|
1247
|
+
|
|
1248
|
+
void uv_unref() {
|
|
1249
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1250
|
+
}
|
|
1251
|
+
|
|
1252
|
+
|
|
1253
|
+
void uv_update_time() {
|
|
1254
|
+
ev_now_update(EV_DEFAULT_UC);
|
|
1255
|
+
}
|
|
1256
|
+
|
|
1257
|
+
|
|
1258
|
+
int64_t uv_now() {
|
|
1259
|
+
return (int64_t)(ev_now(EV_DEFAULT_UC) * 1000);
|
|
1260
|
+
}
|
|
1261
|
+
|
|
1262
|
+
|
|
1263
|
+
int uv_read_start(uv_stream_t* stream, uv_alloc_cb alloc_cb, uv_read_cb read_cb) {
|
|
1264
|
+
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
|
|
1265
|
+
|
|
1266
|
+
/* The UV_READING flag is irrelevant of the state of the tcp - it just
|
|
1267
|
+
* expresses the desired state of the user.
|
|
1268
|
+
*/
|
|
1269
|
+
((uv_handle_t*)stream)->flags |= UV_READING;
|
|
1270
|
+
|
|
1271
|
+
/* TODO: try to do the read inline? */
|
|
1272
|
+
/* TODO: keep track of tcp state. If we've gotten a EOF then we should
|
|
1273
|
+
* not start the IO watcher.
|
|
1274
|
+
*/
|
|
1275
|
+
assert(stream->fd >= 0);
|
|
1276
|
+
assert(alloc_cb);
|
|
1277
|
+
|
|
1278
|
+
stream->read_cb = read_cb;
|
|
1279
|
+
stream->alloc_cb = alloc_cb;
|
|
1280
|
+
|
|
1281
|
+
/* These should have been set by uv_tcp_init. */
|
|
1282
|
+
assert(stream->read_watcher.cb == uv__stream_io);
|
|
1283
|
+
|
|
1284
|
+
ev_io_start(EV_DEFAULT_UC_ &stream->read_watcher);
|
|
1285
|
+
return 0;
|
|
1286
|
+
}
|
|
1287
|
+
|
|
1288
|
+
|
|
1289
|
+
int uv_read_stop(uv_stream_t* stream) {
|
|
1290
|
+
uv_tcp_t* tcp = (uv_tcp_t*)stream;
|
|
1291
|
+
|
|
1292
|
+
((uv_handle_t*)tcp)->flags &= ~UV_READING;
|
|
1293
|
+
|
|
1294
|
+
ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher);
|
|
1295
|
+
tcp->read_cb = NULL;
|
|
1296
|
+
tcp->alloc_cb = NULL;
|
|
1297
|
+
return 0;
|
|
1298
|
+
}
|
|
1299
|
+
|
|
1300
|
+
|
|
1301
|
+
void uv__req_init(uv_req_t* req) {
|
|
1302
|
+
uv_counters()->req_init++;
|
|
1303
|
+
req->type = UV_UNKNOWN_REQ;
|
|
1304
|
+
req->data = NULL;
|
|
1305
|
+
}
|
|
1306
|
+
|
|
1307
|
+
|
|
1308
|
+
static void uv__prepare(EV_P_ ev_prepare* w, int revents) {
|
|
1309
|
+
uv_prepare_t* prepare = w->data;
|
|
1310
|
+
|
|
1311
|
+
if (prepare->prepare_cb) {
|
|
1312
|
+
prepare->prepare_cb(prepare, 0);
|
|
1313
|
+
}
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
|
|
1317
|
+
int uv_prepare_init(uv_prepare_t* prepare) {
|
|
1318
|
+
uv__handle_init((uv_handle_t*)prepare, UV_PREPARE);
|
|
1319
|
+
uv_counters()->prepare_init++;
|
|
1320
|
+
|
|
1321
|
+
ev_prepare_init(&prepare->prepare_watcher, uv__prepare);
|
|
1322
|
+
prepare->prepare_watcher.data = prepare;
|
|
1323
|
+
|
|
1324
|
+
prepare->prepare_cb = NULL;
|
|
1325
|
+
|
|
1326
|
+
return 0;
|
|
1327
|
+
}
|
|
1328
|
+
|
|
1329
|
+
|
|
1330
|
+
int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb) {
|
|
1331
|
+
int was_active = ev_is_active(&prepare->prepare_watcher);
|
|
1332
|
+
|
|
1333
|
+
prepare->prepare_cb = cb;
|
|
1334
|
+
|
|
1335
|
+
ev_prepare_start(EV_DEFAULT_UC_ &prepare->prepare_watcher);
|
|
1336
|
+
|
|
1337
|
+
if (!was_active) {
|
|
1338
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1339
|
+
}
|
|
1340
|
+
|
|
1341
|
+
return 0;
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
int uv_prepare_stop(uv_prepare_t* prepare) {
|
|
1346
|
+
int was_active = ev_is_active(&prepare->prepare_watcher);
|
|
1347
|
+
|
|
1348
|
+
ev_prepare_stop(EV_DEFAULT_UC_ &prepare->prepare_watcher);
|
|
1349
|
+
|
|
1350
|
+
if (was_active) {
|
|
1351
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1352
|
+
}
|
|
1353
|
+
return 0;
|
|
1354
|
+
}
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
|
|
1358
|
+
static void uv__check(EV_P_ ev_check* w, int revents) {
|
|
1359
|
+
uv_check_t* check = w->data;
|
|
1360
|
+
|
|
1361
|
+
if (check->check_cb) {
|
|
1362
|
+
check->check_cb(check, 0);
|
|
1363
|
+
}
|
|
1364
|
+
}
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
int uv_check_init(uv_check_t* check) {
|
|
1368
|
+
uv__handle_init((uv_handle_t*)check, UV_CHECK);
|
|
1369
|
+
uv_counters()->check_init++;
|
|
1370
|
+
|
|
1371
|
+
ev_check_init(&check->check_watcher, uv__check);
|
|
1372
|
+
check->check_watcher.data = check;
|
|
1373
|
+
|
|
1374
|
+
check->check_cb = NULL;
|
|
1375
|
+
|
|
1376
|
+
return 0;
|
|
1377
|
+
}
|
|
1378
|
+
|
|
1379
|
+
|
|
1380
|
+
int uv_check_start(uv_check_t* check, uv_check_cb cb) {
|
|
1381
|
+
int was_active = ev_is_active(&check->check_watcher);
|
|
1382
|
+
|
|
1383
|
+
check->check_cb = cb;
|
|
1384
|
+
|
|
1385
|
+
ev_check_start(EV_DEFAULT_UC_ &check->check_watcher);
|
|
1386
|
+
|
|
1387
|
+
if (!was_active) {
|
|
1388
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
return 0;
|
|
1392
|
+
}
|
|
1393
|
+
|
|
1394
|
+
|
|
1395
|
+
int uv_check_stop(uv_check_t* check) {
|
|
1396
|
+
int was_active = ev_is_active(&check->check_watcher);
|
|
1397
|
+
|
|
1398
|
+
ev_check_stop(EV_DEFAULT_UC_ &check->check_watcher);
|
|
1399
|
+
|
|
1400
|
+
if (was_active) {
|
|
1401
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1402
|
+
}
|
|
1403
|
+
|
|
1404
|
+
return 0;
|
|
1405
|
+
}
|
|
1406
|
+
|
|
1407
|
+
|
|
1408
|
+
static void uv__idle(EV_P_ ev_idle* w, int revents) {
|
|
1409
|
+
uv_idle_t* idle = (uv_idle_t*)(w->data);
|
|
1410
|
+
|
|
1411
|
+
if (idle->idle_cb) {
|
|
1412
|
+
idle->idle_cb(idle, 0);
|
|
1413
|
+
}
|
|
1414
|
+
}
|
|
1415
|
+
|
|
1416
|
+
|
|
1417
|
+
|
|
1418
|
+
int uv_idle_init(uv_idle_t* idle) {
|
|
1419
|
+
uv__handle_init((uv_handle_t*)idle, UV_IDLE);
|
|
1420
|
+
uv_counters()->idle_init++;
|
|
1421
|
+
|
|
1422
|
+
ev_idle_init(&idle->idle_watcher, uv__idle);
|
|
1423
|
+
idle->idle_watcher.data = idle;
|
|
1424
|
+
|
|
1425
|
+
idle->idle_cb = NULL;
|
|
1426
|
+
|
|
1427
|
+
return 0;
|
|
1428
|
+
}
|
|
1429
|
+
|
|
1430
|
+
|
|
1431
|
+
int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) {
|
|
1432
|
+
int was_active = ev_is_active(&idle->idle_watcher);
|
|
1433
|
+
|
|
1434
|
+
idle->idle_cb = cb;
|
|
1435
|
+
ev_idle_start(EV_DEFAULT_UC_ &idle->idle_watcher);
|
|
1436
|
+
|
|
1437
|
+
if (!was_active) {
|
|
1438
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1439
|
+
}
|
|
1440
|
+
|
|
1441
|
+
return 0;
|
|
1442
|
+
}
|
|
1443
|
+
|
|
1444
|
+
|
|
1445
|
+
int uv_idle_stop(uv_idle_t* idle) {
|
|
1446
|
+
int was_active = ev_is_active(&idle->idle_watcher);
|
|
1447
|
+
|
|
1448
|
+
ev_idle_stop(EV_DEFAULT_UC_ &idle->idle_watcher);
|
|
1449
|
+
|
|
1450
|
+
if (was_active) {
|
|
1451
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1452
|
+
}
|
|
1453
|
+
|
|
1454
|
+
return 0;
|
|
1455
|
+
}
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
int uv_is_active(uv_handle_t* handle) {
|
|
1459
|
+
switch (handle->type) {
|
|
1460
|
+
case UV_TIMER:
|
|
1461
|
+
return ev_is_active(&((uv_timer_t*)handle)->timer_watcher);
|
|
1462
|
+
|
|
1463
|
+
case UV_PREPARE:
|
|
1464
|
+
return ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher);
|
|
1465
|
+
|
|
1466
|
+
case UV_CHECK:
|
|
1467
|
+
return ev_is_active(&((uv_check_t*)handle)->check_watcher);
|
|
1468
|
+
|
|
1469
|
+
case UV_IDLE:
|
|
1470
|
+
return ev_is_active(&((uv_idle_t*)handle)->idle_watcher);
|
|
1471
|
+
|
|
1472
|
+
default:
|
|
1473
|
+
return 1;
|
|
1474
|
+
}
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
static void uv__async(EV_P_ ev_async* w, int revents) {
|
|
1479
|
+
uv_async_t* async = w->data;
|
|
1480
|
+
|
|
1481
|
+
if (async->async_cb) {
|
|
1482
|
+
async->async_cb(async, 0);
|
|
1483
|
+
}
|
|
1484
|
+
}
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
int uv_async_init(uv_async_t* async, uv_async_cb async_cb) {
|
|
1488
|
+
uv__handle_init((uv_handle_t*)async, UV_ASYNC);
|
|
1489
|
+
uv_counters()->async_init++;
|
|
1490
|
+
|
|
1491
|
+
ev_async_init(&async->async_watcher, uv__async);
|
|
1492
|
+
async->async_watcher.data = async;
|
|
1493
|
+
|
|
1494
|
+
async->async_cb = async_cb;
|
|
1495
|
+
|
|
1496
|
+
/* Note: This does not have symmetry with the other libev wrappers. */
|
|
1497
|
+
ev_async_start(EV_DEFAULT_UC_ &async->async_watcher);
|
|
1498
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1499
|
+
|
|
1500
|
+
return 0;
|
|
1501
|
+
}
|
|
1502
|
+
|
|
1503
|
+
|
|
1504
|
+
int uv_async_send(uv_async_t* async) {
|
|
1505
|
+
ev_async_send(EV_DEFAULT_UC_ &async->async_watcher);
|
|
1506
|
+
return 0;
|
|
1507
|
+
}
|
|
1508
|
+
|
|
1509
|
+
|
|
1510
|
+
static void uv__timer_cb(EV_P_ ev_timer* w, int revents) {
|
|
1511
|
+
uv_timer_t* timer = w->data;
|
|
1512
|
+
|
|
1513
|
+
if (!ev_is_active(w)) {
|
|
1514
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1515
|
+
}
|
|
1516
|
+
|
|
1517
|
+
if (timer->timer_cb) {
|
|
1518
|
+
timer->timer_cb(timer, 0);
|
|
1519
|
+
}
|
|
1520
|
+
}
|
|
1521
|
+
|
|
1522
|
+
|
|
1523
|
+
int uv_timer_init(uv_timer_t* timer) {
|
|
1524
|
+
uv__handle_init((uv_handle_t*)timer, UV_TIMER);
|
|
1525
|
+
uv_counters()->timer_init++;
|
|
1526
|
+
|
|
1527
|
+
ev_init(&timer->timer_watcher, uv__timer_cb);
|
|
1528
|
+
timer->timer_watcher.data = timer;
|
|
1529
|
+
|
|
1530
|
+
return 0;
|
|
1531
|
+
}
|
|
1532
|
+
|
|
1533
|
+
|
|
1534
|
+
int uv_timer_start(uv_timer_t* timer, uv_timer_cb cb, int64_t timeout,
|
|
1535
|
+
int64_t repeat) {
|
|
1536
|
+
if (ev_is_active(&timer->timer_watcher)) {
|
|
1537
|
+
return -1;
|
|
1538
|
+
}
|
|
1539
|
+
|
|
1540
|
+
timer->timer_cb = cb;
|
|
1541
|
+
ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0);
|
|
1542
|
+
ev_timer_start(EV_DEFAULT_UC_ &timer->timer_watcher);
|
|
1543
|
+
ev_unref(EV_DEFAULT_UC);
|
|
1544
|
+
return 0;
|
|
1545
|
+
}
|
|
1546
|
+
|
|
1547
|
+
|
|
1548
|
+
int uv_timer_stop(uv_timer_t* timer) {
|
|
1549
|
+
if (ev_is_active(&timer->timer_watcher)) {
|
|
1550
|
+
ev_ref(EV_DEFAULT_UC);
|
|
1551
|
+
}
|
|
1552
|
+
|
|
1553
|
+
ev_timer_stop(EV_DEFAULT_UC_ &timer->timer_watcher);
|
|
1554
|
+
return 0;
|
|
1555
|
+
}
|
|
1556
|
+
|
|
1557
|
+
|
|
1558
|
+
int uv_timer_again(uv_timer_t* timer) {
|
|
1559
|
+
if (!ev_is_active(&timer->timer_watcher)) {
|
|
1560
|
+
uv_err_new((uv_handle_t*)timer, EINVAL);
|
|
1561
|
+
return -1;
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
ev_timer_again(EV_DEFAULT_UC_ &timer->timer_watcher);
|
|
1565
|
+
return 0;
|
|
1566
|
+
}
|
|
1567
|
+
|
|
1568
|
+
void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat) {
|
|
1569
|
+
assert(timer->type == UV_TIMER);
|
|
1570
|
+
timer->timer_watcher.repeat = repeat / 1000.0;
|
|
1571
|
+
}
|
|
1572
|
+
|
|
1573
|
+
int64_t uv_timer_get_repeat(uv_timer_t* timer) {
|
|
1574
|
+
assert(timer->type == UV_TIMER);
|
|
1575
|
+
return (int64_t)(1000 * timer->timer_watcher.repeat);
|
|
1576
|
+
}
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
/*
|
|
1580
|
+
* This is called once per second by ares_data.timer. It is used to
|
|
1581
|
+
* constantly callback into c-ares for possibly processing timeouts.
|
|
1582
|
+
*/
|
|
1583
|
+
static void uv__ares_timeout(EV_P_ struct ev_timer* watcher, int revents) {
|
|
1584
|
+
assert(watcher == &ares_data.timer);
|
|
1585
|
+
assert(revents == EV_TIMER);
|
|
1586
|
+
assert(!uv_ares_handles_empty());
|
|
1587
|
+
ares_process_fd(ares_data.channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
|
1588
|
+
}
|
|
1589
|
+
|
|
1590
|
+
|
|
1591
|
+
static void uv__ares_io(EV_P_ struct ev_io* watcher, int revents) {
|
|
1592
|
+
/* Reset the idle timer */
|
|
1593
|
+
ev_timer_again(EV_A_ &ares_data.timer);
|
|
1594
|
+
|
|
1595
|
+
/* Process DNS responses */
|
|
1596
|
+
ares_process_fd(ares_data.channel,
|
|
1597
|
+
revents & EV_READ ? watcher->fd : ARES_SOCKET_BAD,
|
|
1598
|
+
revents & EV_WRITE ? watcher->fd : ARES_SOCKET_BAD);
|
|
1599
|
+
}
|
|
1600
|
+
|
|
1601
|
+
|
|
1602
|
+
/* Allocates and returns a new uv_ares_task_t */
|
|
1603
|
+
static uv_ares_task_t* uv__ares_task_create(int fd) {
|
|
1604
|
+
uv_ares_task_t* h = malloc(sizeof(uv_ares_task_t));
|
|
1605
|
+
|
|
1606
|
+
if (h == NULL) {
|
|
1607
|
+
uv_fatal_error(ENOMEM, "malloc");
|
|
1608
|
+
}
|
|
1609
|
+
|
|
1610
|
+
h->sock = fd;
|
|
1611
|
+
|
|
1612
|
+
ev_io_init(&h->read_watcher, uv__ares_io, fd, EV_READ);
|
|
1613
|
+
ev_io_init(&h->write_watcher, uv__ares_io, fd, EV_WRITE);
|
|
1614
|
+
|
|
1615
|
+
h->read_watcher.data = h;
|
|
1616
|
+
h->write_watcher.data = h;
|
|
1617
|
+
|
|
1618
|
+
return h;
|
|
1619
|
+
}
|
|
1620
|
+
|
|
1621
|
+
|
|
1622
|
+
/* Callback from ares when socket operation is started */
|
|
1623
|
+
static void uv__ares_sockstate_cb(void* data, ares_socket_t sock,
|
|
1624
|
+
int read, int write) {
|
|
1625
|
+
uv_ares_task_t* h = uv_find_ares_handle(sock);
|
|
1626
|
+
|
|
1627
|
+
if (read || write) {
|
|
1628
|
+
if (!h) {
|
|
1629
|
+
/* New socket */
|
|
1630
|
+
|
|
1631
|
+
/* If this is the first socket then start the timer. */
|
|
1632
|
+
if (!ev_is_active(&ares_data.timer)) {
|
|
1633
|
+
assert(uv_ares_handles_empty());
|
|
1634
|
+
ev_timer_again(EV_DEFAULT_UC_ &ares_data.timer);
|
|
1635
|
+
}
|
|
1636
|
+
|
|
1637
|
+
h = uv__ares_task_create(sock);
|
|
1638
|
+
uv_add_ares_handle(h);
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
if (read) {
|
|
1642
|
+
ev_io_start(EV_DEFAULT_UC_ &h->read_watcher);
|
|
1643
|
+
} else {
|
|
1644
|
+
ev_io_stop(EV_DEFAULT_UC_ &h->read_watcher);
|
|
1645
|
+
}
|
|
1646
|
+
|
|
1647
|
+
if (write) {
|
|
1648
|
+
ev_io_start(EV_DEFAULT_UC_ &h->write_watcher);
|
|
1649
|
+
} else {
|
|
1650
|
+
ev_io_stop(EV_DEFAULT_UC_ &h->write_watcher);
|
|
1651
|
+
}
|
|
1652
|
+
|
|
1653
|
+
} else {
|
|
1654
|
+
/*
|
|
1655
|
+
* read == 0 and write == 0 this is c-ares's way of notifying us that
|
|
1656
|
+
* the socket is now closed. We must free the data associated with
|
|
1657
|
+
* socket.
|
|
1658
|
+
*/
|
|
1659
|
+
assert(h && "When an ares socket is closed we should have a handle for it");
|
|
1660
|
+
|
|
1661
|
+
ev_io_stop(EV_DEFAULT_UC_ &h->read_watcher);
|
|
1662
|
+
ev_io_stop(EV_DEFAULT_UC_ &h->write_watcher);
|
|
1663
|
+
|
|
1664
|
+
uv_remove_ares_handle(h);
|
|
1665
|
+
free(h);
|
|
1666
|
+
|
|
1667
|
+
if (uv_ares_handles_empty()) {
|
|
1668
|
+
ev_timer_stop(EV_DEFAULT_UC_ &ares_data.timer);
|
|
1669
|
+
}
|
|
1670
|
+
}
|
|
1671
|
+
}
|
|
1672
|
+
|
|
1673
|
+
|
|
1674
|
+
/* c-ares integration initialize and terminate */
|
|
1675
|
+
/* TODO: share this with windows? */
|
|
1676
|
+
int uv_ares_init_options(ares_channel *channelptr,
|
|
1677
|
+
struct ares_options *options,
|
|
1678
|
+
int optmask) {
|
|
1679
|
+
int rc;
|
|
1680
|
+
|
|
1681
|
+
/* only allow single init at a time */
|
|
1682
|
+
if (ares_data.channel != NULL) {
|
|
1683
|
+
uv_err_new_artificial(NULL, UV_EALREADY);
|
|
1684
|
+
return -1;
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
/* set our callback as an option */
|
|
1688
|
+
options->sock_state_cb = uv__ares_sockstate_cb;
|
|
1689
|
+
options->sock_state_cb_data = &ares_data;
|
|
1690
|
+
optmask |= ARES_OPT_SOCK_STATE_CB;
|
|
1691
|
+
|
|
1692
|
+
/* We do the call to ares_init_option for caller. */
|
|
1693
|
+
rc = ares_init_options(channelptr, options, optmask);
|
|
1694
|
+
|
|
1695
|
+
/* if success, save channel */
|
|
1696
|
+
if (rc == ARES_SUCCESS) {
|
|
1697
|
+
ares_data.channel = *channelptr;
|
|
1698
|
+
}
|
|
1699
|
+
|
|
1700
|
+
/*
|
|
1701
|
+
* Initialize the timeout timer. The timer won't be started until the
|
|
1702
|
+
* first socket is opened.
|
|
1703
|
+
*/
|
|
1704
|
+
ev_init(&ares_data.timer, uv__ares_timeout);
|
|
1705
|
+
ares_data.timer.repeat = 1.0;
|
|
1706
|
+
|
|
1707
|
+
return rc;
|
|
1708
|
+
}
|
|
1709
|
+
|
|
1710
|
+
|
|
1711
|
+
/* TODO share this with windows? */
|
|
1712
|
+
void uv_ares_destroy(ares_channel channel) {
|
|
1713
|
+
/* only allow destroy if did init */
|
|
1714
|
+
if (ares_data.channel != NULL) {
|
|
1715
|
+
ev_timer_stop(EV_DEFAULT_UC_ &ares_data.timer);
|
|
1716
|
+
ares_destroy(channel);
|
|
1717
|
+
ares_data.channel = NULL;
|
|
1718
|
+
}
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
|
|
1722
|
+
static int uv_getaddrinfo_done(eio_req* req) {
|
|
1723
|
+
uv_getaddrinfo_t* handle = req->data;
|
|
1724
|
+
|
|
1725
|
+
uv_unref();
|
|
1726
|
+
|
|
1727
|
+
free(handle->hints);
|
|
1728
|
+
free(handle->service);
|
|
1729
|
+
free(handle->hostname);
|
|
1730
|
+
|
|
1731
|
+
if (handle->retcode != 0) {
|
|
1732
|
+
/* TODO how to display gai error strings? */
|
|
1733
|
+
uv_err_new(NULL, handle->retcode);
|
|
1734
|
+
}
|
|
1735
|
+
|
|
1736
|
+
handle->cb(handle, handle->retcode, handle->res);
|
|
1737
|
+
|
|
1738
|
+
freeaddrinfo(handle->res);
|
|
1739
|
+
handle->res = NULL;
|
|
1740
|
+
|
|
1741
|
+
return 0;
|
|
1742
|
+
}
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
static void getaddrinfo_thread_proc(eio_req *req) {
|
|
1746
|
+
uv_getaddrinfo_t* handle = req->data;
|
|
1747
|
+
|
|
1748
|
+
handle->retcode = getaddrinfo(handle->hostname,
|
|
1749
|
+
handle->service,
|
|
1750
|
+
handle->hints,
|
|
1751
|
+
&handle->res);
|
|
1752
|
+
}
|
|
1753
|
+
|
|
1754
|
+
|
|
1755
|
+
/* stub implementation of uv_getaddrinfo */
|
|
1756
|
+
int uv_getaddrinfo(uv_getaddrinfo_t* handle,
|
|
1757
|
+
uv_getaddrinfo_cb cb,
|
|
1758
|
+
const char* hostname,
|
|
1759
|
+
const char* service,
|
|
1760
|
+
const struct addrinfo* hints) {
|
|
1761
|
+
eio_req* req;
|
|
1762
|
+
uv_eio_init();
|
|
1763
|
+
|
|
1764
|
+
if (handle == NULL || cb == NULL ||
|
|
1765
|
+
(hostname == NULL && service == NULL)) {
|
|
1766
|
+
uv_err_new_artificial(NULL, UV_EINVAL);
|
|
1767
|
+
return -1;
|
|
1768
|
+
}
|
|
1769
|
+
|
|
1770
|
+
memset(handle, 0, sizeof(uv_getaddrinfo_t));
|
|
1771
|
+
|
|
1772
|
+
/* TODO don't alloc so much. */
|
|
1773
|
+
|
|
1774
|
+
if (hints) {
|
|
1775
|
+
handle->hints = malloc(sizeof(struct addrinfo));
|
|
1776
|
+
memcpy(&handle->hints, hints, sizeof(struct addrinfo));
|
|
1777
|
+
}
|
|
1778
|
+
|
|
1779
|
+
/* TODO security! check lengths, check return values. */
|
|
1780
|
+
|
|
1781
|
+
handle->cb = cb;
|
|
1782
|
+
handle->hostname = hostname ? strdup(hostname) : NULL;
|
|
1783
|
+
handle->service = service ? strdup(service) : NULL;
|
|
1784
|
+
|
|
1785
|
+
/* TODO check handle->hostname == NULL */
|
|
1786
|
+
/* TODO check handle->service == NULL */
|
|
1787
|
+
|
|
1788
|
+
uv_ref();
|
|
1789
|
+
|
|
1790
|
+
req = eio_custom(getaddrinfo_thread_proc, EIO_PRI_DEFAULT,
|
|
1791
|
+
uv_getaddrinfo_done, handle);
|
|
1792
|
+
assert(req);
|
|
1793
|
+
assert(req->data == handle);
|
|
1794
|
+
|
|
1795
|
+
return 0;
|
|
1796
|
+
}
|
|
1797
|
+
|
|
1798
|
+
|
|
1799
|
+
int uv_pipe_init(uv_pipe_t* handle) {
|
|
1800
|
+
memset(handle, 0, sizeof *handle);
|
|
1801
|
+
|
|
1802
|
+
uv__handle_init((uv_handle_t*)handle, UV_NAMED_PIPE);
|
|
1803
|
+
uv_counters()->pipe_init++;
|
|
1804
|
+
|
|
1805
|
+
handle->type = UV_NAMED_PIPE;
|
|
1806
|
+
handle->pipe_fname = NULL; /* Only set by listener. */
|
|
1807
|
+
|
|
1808
|
+
ev_init(&handle->write_watcher, uv__stream_io);
|
|
1809
|
+
ev_init(&handle->read_watcher, uv__stream_io);
|
|
1810
|
+
handle->write_watcher.data = handle;
|
|
1811
|
+
handle->read_watcher.data = handle;
|
|
1812
|
+
handle->accepted_fd = -1;
|
|
1813
|
+
handle->fd = -1;
|
|
1814
|
+
|
|
1815
|
+
ngx_queue_init(&handle->write_completed_queue);
|
|
1816
|
+
ngx_queue_init(&handle->write_queue);
|
|
1817
|
+
|
|
1818
|
+
return 0;
|
|
1819
|
+
}
|
|
1820
|
+
|
|
1821
|
+
|
|
1822
|
+
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
|
1823
|
+
struct sockaddr_un sun;
|
|
1824
|
+
const char* pipe_fname;
|
|
1825
|
+
int saved_errno;
|
|
1826
|
+
int locked;
|
|
1827
|
+
int sockfd;
|
|
1828
|
+
int status;
|
|
1829
|
+
int bound;
|
|
1830
|
+
|
|
1831
|
+
saved_errno = errno;
|
|
1832
|
+
pipe_fname = NULL;
|
|
1833
|
+
sockfd = -1;
|
|
1834
|
+
status = -1;
|
|
1835
|
+
bound = 0;
|
|
1836
|
+
|
|
1837
|
+
/* Already bound? */
|
|
1838
|
+
if (handle->fd >= 0) {
|
|
1839
|
+
uv_err_new_artificial((uv_handle_t*)handle, UV_EINVAL);
|
|
1840
|
+
goto out;
|
|
1841
|
+
}
|
|
1842
|
+
|
|
1843
|
+
/* Make a copy of the file name, it outlives this function's scope. */
|
|
1844
|
+
if ((pipe_fname = strdup(name)) == NULL) {
|
|
1845
|
+
uv_err_new((uv_handle_t*)handle, ENOMEM);
|
|
1846
|
+
goto out;
|
|
1847
|
+
}
|
|
1848
|
+
|
|
1849
|
+
/* We've got a copy, don't touch the original any more. */
|
|
1850
|
+
name = NULL;
|
|
1851
|
+
|
|
1852
|
+
if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
|
|
1853
|
+
uv_err_new((uv_handle_t*)handle, errno);
|
|
1854
|
+
goto out;
|
|
1855
|
+
}
|
|
1856
|
+
|
|
1857
|
+
memset(&sun, 0, sizeof sun);
|
|
1858
|
+
uv__strlcpy(sun.sun_path, pipe_fname, sizeof(sun.sun_path));
|
|
1859
|
+
sun.sun_family = AF_UNIX;
|
|
1860
|
+
|
|
1861
|
+
if (bind(sockfd, (struct sockaddr*)&sun, sizeof sun) == -1) {
|
|
1862
|
+
/* On EADDRINUSE:
|
|
1863
|
+
*
|
|
1864
|
+
* We hold the file lock so there is no other process listening
|
|
1865
|
+
* on the socket. Ergo, it's stale - remove it.
|
|
1866
|
+
*
|
|
1867
|
+
* This assumes that the other process uses locking too
|
|
1868
|
+
* but that's a good enough assumption for now.
|
|
1869
|
+
*/
|
|
1870
|
+
if (errno != EADDRINUSE
|
|
1871
|
+
|| unlink(pipe_fname) == -1
|
|
1872
|
+
|| bind(sockfd, (struct sockaddr*)&sun, sizeof sun) == -1) {
|
|
1873
|
+
/* Convert ENOENT to EACCES for compatibility with Windows. */
|
|
1874
|
+
uv_err_new((uv_handle_t*)handle, (errno == ENOENT) ? EACCES : errno);
|
|
1875
|
+
goto out;
|
|
1876
|
+
}
|
|
1877
|
+
}
|
|
1878
|
+
bound = 1;
|
|
1879
|
+
|
|
1880
|
+
/* Success. */
|
|
1881
|
+
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
|
|
1882
|
+
handle->fd = sockfd;
|
|
1883
|
+
status = 0;
|
|
1884
|
+
|
|
1885
|
+
out:
|
|
1886
|
+
/* Clean up on error. */
|
|
1887
|
+
if (status) {
|
|
1888
|
+
if (bound) {
|
|
1889
|
+
/* unlink() before close() to avoid races. */
|
|
1890
|
+
assert(pipe_fname != NULL);
|
|
1891
|
+
unlink(pipe_fname);
|
|
1892
|
+
}
|
|
1893
|
+
uv__close(sockfd);
|
|
1894
|
+
|
|
1895
|
+
free((void*)pipe_fname);
|
|
1896
|
+
}
|
|
1897
|
+
|
|
1898
|
+
errno = saved_errno;
|
|
1899
|
+
return status;
|
|
1900
|
+
}
|
|
1901
|
+
|
|
1902
|
+
|
|
1903
|
+
static int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
|
|
1904
|
+
int saved_errno;
|
|
1905
|
+
int status;
|
|
1906
|
+
|
|
1907
|
+
saved_errno = errno;
|
|
1908
|
+
status = -1;
|
|
1909
|
+
|
|
1910
|
+
if (handle->fd == -1) {
|
|
1911
|
+
uv_err_new_artificial((uv_handle_t*)handle, UV_EINVAL);
|
|
1912
|
+
goto out;
|
|
1913
|
+
}
|
|
1914
|
+
assert(handle->fd >= 0);
|
|
1915
|
+
|
|
1916
|
+
if ((status = listen(handle->fd, backlog)) == -1) {
|
|
1917
|
+
uv_err_new((uv_handle_t*)handle, errno);
|
|
1918
|
+
} else {
|
|
1919
|
+
handle->connection_cb = cb;
|
|
1920
|
+
ev_io_init(&handle->read_watcher, uv__pipe_accept, handle->fd, EV_READ);
|
|
1921
|
+
ev_io_start(EV_DEFAULT_ &handle->read_watcher);
|
|
1922
|
+
}
|
|
1923
|
+
|
|
1924
|
+
out:
|
|
1925
|
+
errno = saved_errno;
|
|
1926
|
+
return status;
|
|
1927
|
+
}
|
|
1928
|
+
|
|
1929
|
+
|
|
1930
|
+
static int uv_pipe_cleanup(uv_pipe_t* handle) {
|
|
1931
|
+
int saved_errno;
|
|
1932
|
+
int status;
|
|
1933
|
+
|
|
1934
|
+
saved_errno = errno;
|
|
1935
|
+
status = -1;
|
|
1936
|
+
|
|
1937
|
+
if (handle->pipe_fname) {
|
|
1938
|
+
/*
|
|
1939
|
+
* Unlink the file system entity before closing the file descriptor.
|
|
1940
|
+
* Doing it the other way around introduces a race where our process
|
|
1941
|
+
* unlinks a socket with the same name that's just been created by
|
|
1942
|
+
* another thread or process.
|
|
1943
|
+
*
|
|
1944
|
+
* This is less of an issue now that we attach a file lock
|
|
1945
|
+
* to the socket but it's still a best practice.
|
|
1946
|
+
*/
|
|
1947
|
+
unlink(handle->pipe_fname);
|
|
1948
|
+
free((void*)handle->pipe_fname);
|
|
1949
|
+
}
|
|
1950
|
+
|
|
1951
|
+
errno = saved_errno;
|
|
1952
|
+
return status;
|
|
1953
|
+
}
|
|
1954
|
+
|
|
1955
|
+
|
|
1956
|
+
int uv_pipe_connect(uv_connect_t* req,
|
|
1957
|
+
uv_pipe_t* handle,
|
|
1958
|
+
const char* name,
|
|
1959
|
+
uv_connect_cb cb) {
|
|
1960
|
+
struct sockaddr_un sun;
|
|
1961
|
+
int saved_errno;
|
|
1962
|
+
int sockfd;
|
|
1963
|
+
int status;
|
|
1964
|
+
int r;
|
|
1965
|
+
|
|
1966
|
+
saved_errno = errno;
|
|
1967
|
+
sockfd = -1;
|
|
1968
|
+
status = -1;
|
|
1969
|
+
|
|
1970
|
+
if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
|
|
1971
|
+
uv_err_new((uv_handle_t*)handle, errno);
|
|
1972
|
+
goto out;
|
|
1973
|
+
}
|
|
1974
|
+
|
|
1975
|
+
memset(&sun, 0, sizeof sun);
|
|
1976
|
+
uv__strlcpy(sun.sun_path, name, sizeof(sun.sun_path));
|
|
1977
|
+
sun.sun_family = AF_UNIX;
|
|
1978
|
+
|
|
1979
|
+
/* We don't check for EINPROGRESS. Think about it: the socket
|
|
1980
|
+
* is either there or not.
|
|
1981
|
+
*/
|
|
1982
|
+
do {
|
|
1983
|
+
r = connect(sockfd, (struct sockaddr*)&sun, sizeof sun);
|
|
1984
|
+
}
|
|
1985
|
+
while (r == -1 && errno == EINTR);
|
|
1986
|
+
|
|
1987
|
+
if (r == -1) {
|
|
1988
|
+
uv_err_new((uv_handle_t*)handle, errno);
|
|
1989
|
+
uv__close(sockfd);
|
|
1990
|
+
goto out;
|
|
1991
|
+
}
|
|
1992
|
+
|
|
1993
|
+
handle->fd = sockfd;
|
|
1994
|
+
ev_io_init(&handle->read_watcher, uv__stream_io, sockfd, EV_READ);
|
|
1995
|
+
ev_io_init(&handle->write_watcher, uv__stream_io, sockfd, EV_WRITE);
|
|
1996
|
+
ev_io_start(EV_DEFAULT_ &handle->read_watcher);
|
|
1997
|
+
ev_io_start(EV_DEFAULT_ &handle->write_watcher);
|
|
1998
|
+
|
|
1999
|
+
status = 0;
|
|
2000
|
+
|
|
2001
|
+
out:
|
|
2002
|
+
handle->delayed_error = status; /* Passed to callback. */
|
|
2003
|
+
handle->connect_req = req;
|
|
2004
|
+
req->handle = (uv_stream_t*)handle;
|
|
2005
|
+
req->type = UV_CONNECT;
|
|
2006
|
+
req->cb = cb;
|
|
2007
|
+
ngx_queue_init(&req->queue);
|
|
2008
|
+
|
|
2009
|
+
/* Run callback on next tick. */
|
|
2010
|
+
ev_feed_event(EV_DEFAULT_ &handle->read_watcher, EV_CUSTOM);
|
|
2011
|
+
assert(ev_is_pending(&handle->read_watcher));
|
|
2012
|
+
|
|
2013
|
+
/* Mimic the Windows pipe implementation, always
|
|
2014
|
+
* return 0 and let the callback handle errors.
|
|
2015
|
+
*/
|
|
2016
|
+
errno = saved_errno;
|
|
2017
|
+
return 0;
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
|
|
2021
|
+
/* TODO merge with uv__server_io()? */
|
|
2022
|
+
static void uv__pipe_accept(EV_P_ ev_io* watcher, int revents) {
|
|
2023
|
+
struct sockaddr_un sun;
|
|
2024
|
+
uv_pipe_t* pipe;
|
|
2025
|
+
int saved_errno;
|
|
2026
|
+
int sockfd;
|
|
2027
|
+
|
|
2028
|
+
saved_errno = errno;
|
|
2029
|
+
pipe = watcher->data;
|
|
2030
|
+
|
|
2031
|
+
assert(pipe->type == UV_NAMED_PIPE);
|
|
2032
|
+
assert(pipe->pipe_fname != NULL);
|
|
2033
|
+
|
|
2034
|
+
sockfd = uv__accept(pipe->fd, (struct sockaddr *)&sun, sizeof sun);
|
|
2035
|
+
if (sockfd == -1) {
|
|
2036
|
+
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
|
2037
|
+
assert(0 && "EAGAIN on uv__accept(pipefd)");
|
|
2038
|
+
} else {
|
|
2039
|
+
uv_err_new((uv_handle_t*)pipe, errno);
|
|
2040
|
+
}
|
|
2041
|
+
} else {
|
|
2042
|
+
pipe->accepted_fd = sockfd;
|
|
2043
|
+
pipe->connection_cb((uv_stream_t*)pipe, 0);
|
|
2044
|
+
if (pipe->accepted_fd == sockfd) {
|
|
2045
|
+
/* The user hasn't yet accepted called uv_accept() */
|
|
2046
|
+
ev_io_stop(EV_DEFAULT_ &pipe->read_watcher);
|
|
2047
|
+
}
|
|
2048
|
+
}
|
|
2049
|
+
|
|
2050
|
+
errno = saved_errno;
|
|
2051
|
+
}
|
|
2052
|
+
|
|
2053
|
+
|
|
2054
|
+
/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
|
|
2055
|
+
static int uv__socket(int domain, int type, int protocol) {
|
|
2056
|
+
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
|
|
2057
|
+
return socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
|
|
2058
|
+
#else
|
|
2059
|
+
int sockfd;
|
|
2060
|
+
|
|
2061
|
+
if ((sockfd = socket(domain, type, protocol)) == -1) {
|
|
2062
|
+
return -1;
|
|
2063
|
+
}
|
|
2064
|
+
|
|
2065
|
+
if (uv__nonblock(sockfd, 1) == -1 || uv__cloexec(sockfd, 1) == -1) {
|
|
2066
|
+
uv__close(sockfd);
|
|
2067
|
+
return -1;
|
|
2068
|
+
}
|
|
2069
|
+
|
|
2070
|
+
return sockfd;
|
|
2071
|
+
#endif
|
|
2072
|
+
}
|
|
2073
|
+
|
|
2074
|
+
|
|
2075
|
+
static int uv__accept(int sockfd, struct sockaddr* saddr, socklen_t slen) {
|
|
2076
|
+
int peerfd;
|
|
2077
|
+
|
|
2078
|
+
do {
|
|
2079
|
+
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
|
|
2080
|
+
peerfd = accept4(sockfd, saddr, &slen, SOCK_NONBLOCK | SOCK_CLOEXEC);
|
|
2081
|
+
#else
|
|
2082
|
+
if ((peerfd = accept(sockfd, saddr, &slen)) != -1) {
|
|
2083
|
+
if (uv__cloexec(peerfd, 1) == -1 || uv__nonblock(peerfd, 1) == -1) {
|
|
2084
|
+
uv__close(peerfd);
|
|
2085
|
+
return -1;
|
|
2086
|
+
}
|
|
2087
|
+
}
|
|
2088
|
+
#endif
|
|
2089
|
+
}
|
|
2090
|
+
while (peerfd == -1 && errno == EINTR);
|
|
2091
|
+
|
|
2092
|
+
return peerfd;
|
|
2093
|
+
}
|
|
2094
|
+
|
|
2095
|
+
|
|
2096
|
+
static int uv__close(int fd) {
|
|
2097
|
+
int status;
|
|
2098
|
+
|
|
2099
|
+
/*
|
|
2100
|
+
* Retry on EINTR. You may think this is academic but on linux
|
|
2101
|
+
* and probably other Unices too, close(2) is interruptible.
|
|
2102
|
+
* Failing to handle EINTR is a common source of fd leaks.
|
|
2103
|
+
*/
|
|
2104
|
+
do {
|
|
2105
|
+
status = close(fd);
|
|
2106
|
+
}
|
|
2107
|
+
while (status == -1 && errno == EINTR);
|
|
2108
|
+
|
|
2109
|
+
return status;
|
|
2110
|
+
}
|
|
2111
|
+
|
|
2112
|
+
|
|
2113
|
+
static int uv__nonblock(int fd, int set) {
|
|
2114
|
+
int flags;
|
|
2115
|
+
|
|
2116
|
+
if ((flags = fcntl(fd, F_GETFL)) == -1) {
|
|
2117
|
+
return -1;
|
|
2118
|
+
}
|
|
2119
|
+
|
|
2120
|
+
if (set) {
|
|
2121
|
+
flags |= O_NONBLOCK;
|
|
2122
|
+
} else {
|
|
2123
|
+
flags &= ~O_NONBLOCK;
|
|
2124
|
+
}
|
|
2125
|
+
|
|
2126
|
+
if (fcntl(fd, F_SETFL, flags) == -1) {
|
|
2127
|
+
return -1;
|
|
2128
|
+
}
|
|
2129
|
+
|
|
2130
|
+
return 0;
|
|
2131
|
+
}
|
|
2132
|
+
|
|
2133
|
+
|
|
2134
|
+
static int uv__cloexec(int fd, int set) {
|
|
2135
|
+
int flags;
|
|
2136
|
+
|
|
2137
|
+
if ((flags = fcntl(fd, F_GETFD)) == -1) {
|
|
2138
|
+
return -1;
|
|
2139
|
+
}
|
|
2140
|
+
|
|
2141
|
+
if (set) {
|
|
2142
|
+
flags |= FD_CLOEXEC;
|
|
2143
|
+
} else {
|
|
2144
|
+
flags &= ~FD_CLOEXEC;
|
|
2145
|
+
}
|
|
2146
|
+
|
|
2147
|
+
if (fcntl(fd, F_SETFD, flags) == -1) {
|
|
2148
|
+
return -1;
|
|
2149
|
+
}
|
|
2150
|
+
|
|
2151
|
+
return 0;
|
|
2152
|
+
}
|
|
2153
|
+
|
|
2154
|
+
|
|
2155
|
+
/* TODO move to uv-common.c? */
|
|
2156
|
+
size_t uv__strlcpy(char* dst, const char* src, size_t size) {
|
|
2157
|
+
const char *org;
|
|
2158
|
+
|
|
2159
|
+
if (size == 0) {
|
|
2160
|
+
return 0;
|
|
2161
|
+
}
|
|
2162
|
+
|
|
2163
|
+
org = src;
|
|
2164
|
+
while (size > 1) {
|
|
2165
|
+
if ((*dst++ = *src++) == '\0') {
|
|
2166
|
+
return org - src;
|
|
2167
|
+
}
|
|
2168
|
+
}
|
|
2169
|
+
*dst = '\0';
|
|
2170
|
+
|
|
2171
|
+
return src - org;
|
|
2172
|
+
}
|
|
2173
|
+
|
|
2174
|
+
|
|
2175
|
+
uv_stream_t* uv_std_handle(uv_std_type type) {
|
|
2176
|
+
assert(0 && "implement me");
|
|
2177
|
+
return NULL;
|
|
2178
|
+
}
|
|
2179
|
+
|
|
2180
|
+
|
|
2181
|
+
static void uv__chld(EV_P_ ev_child* watcher, int revents) {
|
|
2182
|
+
int status = watcher->rstatus;
|
|
2183
|
+
int exit_status = 0;
|
|
2184
|
+
int term_signal = 0;
|
|
2185
|
+
uv_process_t *process = watcher->data;
|
|
2186
|
+
|
|
2187
|
+
assert(&process->child_watcher == watcher);
|
|
2188
|
+
assert(revents & EV_CHILD);
|
|
2189
|
+
|
|
2190
|
+
ev_child_stop(EV_A_ &process->child_watcher);
|
|
2191
|
+
|
|
2192
|
+
if (WIFEXITED(status)) {
|
|
2193
|
+
exit_status = WEXITSTATUS(status);
|
|
2194
|
+
}
|
|
2195
|
+
|
|
2196
|
+
if (WIFSIGNALED(status)) {
|
|
2197
|
+
term_signal = WTERMSIG(status);
|
|
2198
|
+
}
|
|
2199
|
+
|
|
2200
|
+
if (process->exit_cb) {
|
|
2201
|
+
process->exit_cb(process, exit_status, term_signal);
|
|
2202
|
+
}
|
|
2203
|
+
}
|
|
2204
|
+
|
|
2205
|
+
|
|
2206
|
+
int uv_spawn(uv_process_t* process, uv_process_options_t options) {
|
|
2207
|
+
/*
|
|
2208
|
+
* Save environ in the case that we get it clobbered
|
|
2209
|
+
* by the child process.
|
|
2210
|
+
*/
|
|
2211
|
+
char** save_our_env = environ;
|
|
2212
|
+
int stdin_pipe[2] = { -1, -1 };
|
|
2213
|
+
int stdout_pipe[2] = { -1, -1 };
|
|
2214
|
+
int stderr_pipe[2] = { -1, -1 };
|
|
2215
|
+
int signal_pipe[2] = { -1, -1 };
|
|
2216
|
+
struct pollfd pfd;
|
|
2217
|
+
int status;
|
|
2218
|
+
pid_t pid;
|
|
2219
|
+
|
|
2220
|
+
uv__handle_init((uv_handle_t*)process, UV_PROCESS);
|
|
2221
|
+
uv_counters()->process_init++;
|
|
2222
|
+
|
|
2223
|
+
process->exit_cb = options.exit_cb;
|
|
2224
|
+
|
|
2225
|
+
if (options.stdin_stream) {
|
|
2226
|
+
if (options.stdin_stream->type != UV_NAMED_PIPE) {
|
|
2227
|
+
errno = EINVAL;
|
|
2228
|
+
goto error;
|
|
2229
|
+
}
|
|
2230
|
+
|
|
2231
|
+
if (pipe(stdin_pipe) < 0) {
|
|
2232
|
+
goto error;
|
|
2233
|
+
}
|
|
2234
|
+
}
|
|
2235
|
+
|
|
2236
|
+
if (options.stdout_stream) {
|
|
2237
|
+
if (options.stdout_stream->type != UV_NAMED_PIPE) {
|
|
2238
|
+
errno = EINVAL;
|
|
2239
|
+
goto error;
|
|
2240
|
+
}
|
|
2241
|
+
|
|
2242
|
+
if (pipe(stdout_pipe) < 0) {
|
|
2243
|
+
goto error;
|
|
2244
|
+
}
|
|
2245
|
+
}
|
|
2246
|
+
|
|
2247
|
+
if (options.stderr_stream) {
|
|
2248
|
+
if (options.stderr_stream->type != UV_NAMED_PIPE) {
|
|
2249
|
+
errno = EINVAL;
|
|
2250
|
+
goto error;
|
|
2251
|
+
}
|
|
2252
|
+
|
|
2253
|
+
if (pipe(stderr_pipe) < 0) {
|
|
2254
|
+
goto error;
|
|
2255
|
+
}
|
|
2256
|
+
}
|
|
2257
|
+
|
|
2258
|
+
/* This pipe is used by the parent to wait until
|
|
2259
|
+
* the child has called `execve()`. We need this
|
|
2260
|
+
* to avoid the following race condition:
|
|
2261
|
+
*
|
|
2262
|
+
* if ((pid = fork()) > 0) {
|
|
2263
|
+
* kill(pid, SIGTERM);
|
|
2264
|
+
* }
|
|
2265
|
+
* else if (pid == 0) {
|
|
2266
|
+
* execve("/bin/cat", argp, envp);
|
|
2267
|
+
* }
|
|
2268
|
+
*
|
|
2269
|
+
* The parent sends a signal immediately after forking.
|
|
2270
|
+
* Since the child may not have called `execve()` yet,
|
|
2271
|
+
* there is no telling what process receives the signal,
|
|
2272
|
+
* our fork or /bin/cat.
|
|
2273
|
+
*
|
|
2274
|
+
* To avoid ambiguity, we create a pipe with both ends
|
|
2275
|
+
* marked close-on-exec. Then, after the call to `fork()`,
|
|
2276
|
+
* the parent polls the read end until it sees POLLHUP.
|
|
2277
|
+
*/
|
|
2278
|
+
#ifdef HAVE_PIPE2
|
|
2279
|
+
if (pipe2(signal_pipe, O_CLOEXEC | O_NONBLOCK) < 0) {
|
|
2280
|
+
goto error;
|
|
2281
|
+
}
|
|
2282
|
+
#else
|
|
2283
|
+
if (pipe(signal_pipe) < 0) {
|
|
2284
|
+
goto error;
|
|
2285
|
+
}
|
|
2286
|
+
uv__cloexec(signal_pipe[0], 1);
|
|
2287
|
+
uv__cloexec(signal_pipe[1], 1);
|
|
2288
|
+
uv__nonblock(signal_pipe[0], 1);
|
|
2289
|
+
uv__nonblock(signal_pipe[1], 1);
|
|
2290
|
+
#endif
|
|
2291
|
+
|
|
2292
|
+
pid = fork();
|
|
2293
|
+
|
|
2294
|
+
if (pid == -1) {
|
|
2295
|
+
uv__close(signal_pipe[0]);
|
|
2296
|
+
uv__close(signal_pipe[1]);
|
|
2297
|
+
environ = save_our_env;
|
|
2298
|
+
goto error;
|
|
2299
|
+
}
|
|
2300
|
+
|
|
2301
|
+
if (pid == 0) {
|
|
2302
|
+
if (stdin_pipe[0] >= 0) {
|
|
2303
|
+
uv__close(stdin_pipe[1]);
|
|
2304
|
+
dup2(stdin_pipe[0], STDIN_FILENO);
|
|
2305
|
+
}
|
|
2306
|
+
|
|
2307
|
+
if (stdout_pipe[1] >= 0) {
|
|
2308
|
+
uv__close(stdout_pipe[0]);
|
|
2309
|
+
dup2(stdout_pipe[1], STDOUT_FILENO);
|
|
2310
|
+
}
|
|
2311
|
+
|
|
2312
|
+
if (stderr_pipe[1] >= 0) {
|
|
2313
|
+
uv__close(stderr_pipe[0]);
|
|
2314
|
+
dup2(stderr_pipe[1], STDERR_FILENO);
|
|
2315
|
+
}
|
|
2316
|
+
|
|
2317
|
+
if (options.cwd && chdir(options.cwd)) {
|
|
2318
|
+
perror("chdir()");
|
|
2319
|
+
_exit(127);
|
|
2320
|
+
}
|
|
2321
|
+
|
|
2322
|
+
environ = options.env;
|
|
2323
|
+
|
|
2324
|
+
execvp(options.file, options.args);
|
|
2325
|
+
perror("execvp()");
|
|
2326
|
+
_exit(127);
|
|
2327
|
+
/* Execution never reaches here. */
|
|
2328
|
+
}
|
|
2329
|
+
|
|
2330
|
+
/* Parent. */
|
|
2331
|
+
|
|
2332
|
+
/* Restore environment. */
|
|
2333
|
+
environ = save_our_env;
|
|
2334
|
+
|
|
2335
|
+
/* POLLHUP signals child has exited or execve()'d. */
|
|
2336
|
+
uv__close(signal_pipe[1]);
|
|
2337
|
+
do {
|
|
2338
|
+
pfd.fd = signal_pipe[0];
|
|
2339
|
+
pfd.events = POLLIN|POLLHUP;
|
|
2340
|
+
pfd.revents = 0;
|
|
2341
|
+
errno = 0, status = poll(&pfd, 1, -1);
|
|
2342
|
+
}
|
|
2343
|
+
while (status == -1 && (errno == EINTR || errno == ENOMEM));
|
|
2344
|
+
|
|
2345
|
+
uv__close(signal_pipe[0]);
|
|
2346
|
+
|
|
2347
|
+
assert((status == 1)
|
|
2348
|
+
&& "poll() on pipe read end failed");
|
|
2349
|
+
assert((pfd.revents & POLLHUP) == POLLHUP
|
|
2350
|
+
&& "no POLLHUP on pipe read end");
|
|
2351
|
+
|
|
2352
|
+
process->pid = pid;
|
|
2353
|
+
|
|
2354
|
+
ev_child_init(&process->child_watcher, uv__chld, pid, 0);
|
|
2355
|
+
ev_child_start(EV_DEFAULT_UC_ &process->child_watcher);
|
|
2356
|
+
process->child_watcher.data = process;
|
|
2357
|
+
|
|
2358
|
+
if (stdin_pipe[1] >= 0) {
|
|
2359
|
+
assert(options.stdin_stream);
|
|
2360
|
+
assert(stdin_pipe[0] >= 0);
|
|
2361
|
+
uv__close(stdin_pipe[0]);
|
|
2362
|
+
uv__nonblock(stdin_pipe[1], 1);
|
|
2363
|
+
uv__stream_open((uv_stream_t*)options.stdin_stream, stdin_pipe[1],
|
|
2364
|
+
UV_WRITABLE);
|
|
2365
|
+
}
|
|
2366
|
+
|
|
2367
|
+
if (stdout_pipe[0] >= 0) {
|
|
2368
|
+
assert(options.stdout_stream);
|
|
2369
|
+
assert(stdout_pipe[1] >= 0);
|
|
2370
|
+
uv__close(stdout_pipe[1]);
|
|
2371
|
+
uv__nonblock(stdout_pipe[0], 1);
|
|
2372
|
+
uv__stream_open((uv_stream_t*)options.stdout_stream, stdout_pipe[0],
|
|
2373
|
+
UV_READABLE);
|
|
2374
|
+
}
|
|
2375
|
+
|
|
2376
|
+
if (stderr_pipe[0] >= 0) {
|
|
2377
|
+
assert(options.stderr_stream);
|
|
2378
|
+
assert(stderr_pipe[1] >= 0);
|
|
2379
|
+
uv__close(stderr_pipe[1]);
|
|
2380
|
+
uv__nonblock(stderr_pipe[0], 1);
|
|
2381
|
+
uv__stream_open((uv_stream_t*)options.stderr_stream, stderr_pipe[0],
|
|
2382
|
+
UV_READABLE);
|
|
2383
|
+
}
|
|
2384
|
+
|
|
2385
|
+
return 0;
|
|
2386
|
+
|
|
2387
|
+
error:
|
|
2388
|
+
uv_err_new((uv_handle_t*)process, errno);
|
|
2389
|
+
uv__close(stdin_pipe[0]);
|
|
2390
|
+
uv__close(stdin_pipe[1]);
|
|
2391
|
+
uv__close(stdout_pipe[0]);
|
|
2392
|
+
uv__close(stdout_pipe[1]);
|
|
2393
|
+
uv__close(stderr_pipe[0]);
|
|
2394
|
+
uv__close(stderr_pipe[1]);
|
|
2395
|
+
return -1;
|
|
2396
|
+
}
|
|
2397
|
+
|
|
2398
|
+
|
|
2399
|
+
int uv_process_kill(uv_process_t* process, int signum) {
|
|
2400
|
+
int r = kill(process->pid, signum);
|
|
2401
|
+
|
|
2402
|
+
if (r) {
|
|
2403
|
+
uv_err_new((uv_handle_t*)process, errno);
|
|
2404
|
+
return -1;
|
|
2405
|
+
} else {
|
|
2406
|
+
return 0;
|
|
2407
|
+
}
|
|
2408
|
+
}
|