noderb 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (424) hide show
  1. data/LICENSE +19 -0
  2. data/README.md +25 -0
  3. data/ext/noderb_extension/extconf.rb +11 -0
  4. data/ext/noderb_extension/libuv/AUTHORS +11 -0
  5. data/ext/noderb_extension/libuv/LICENSE +48 -0
  6. data/ext/noderb_extension/libuv/Makefile +119 -0
  7. data/ext/noderb_extension/libuv/README +45 -0
  8. data/ext/noderb_extension/libuv/build/all.gyp +254 -0
  9. data/ext/noderb_extension/libuv/build/common.gypi +13 -0
  10. data/ext/noderb_extension/libuv/build/gyp_uv +43 -0
  11. data/ext/noderb_extension/libuv/config-mingw.mk +67 -0
  12. data/ext/noderb_extension/libuv/config-unix.mk +121 -0
  13. data/ext/noderb_extension/libuv/create-msvs-files.bat +14 -0
  14. data/ext/noderb_extension/libuv/deps/pthread-win32/ANNOUNCE +482 -0
  15. data/ext/noderb_extension/libuv/deps/pthread-win32/BUGS +141 -0
  16. data/ext/noderb_extension/libuv/deps/pthread-win32/Bmakefile +268 -0
  17. data/ext/noderb_extension/libuv/deps/pthread-win32/CONTRIBUTORS +140 -0
  18. data/ext/noderb_extension/libuv/deps/pthread-win32/COPYING +150 -0
  19. data/ext/noderb_extension/libuv/deps/pthread-win32/COPYING.LIB +504 -0
  20. data/ext/noderb_extension/libuv/deps/pthread-win32/ChangeLog +5194 -0
  21. data/ext/noderb_extension/libuv/deps/pthread-win32/FAQ +451 -0
  22. data/ext/noderb_extension/libuv/deps/pthread-win32/GNUmakefile +593 -0
  23. data/ext/noderb_extension/libuv/deps/pthread-win32/MAINTAINERS +4 -0
  24. data/ext/noderb_extension/libuv/deps/pthread-win32/Makefile +516 -0
  25. data/ext/noderb_extension/libuv/deps/pthread-win32/NEWS +1245 -0
  26. data/ext/noderb_extension/libuv/deps/pthread-win32/Nmakefile +24 -0
  27. data/ext/noderb_extension/libuv/deps/pthread-win32/Nmakefile.tests +260 -0
  28. data/ext/noderb_extension/libuv/deps/pthread-win32/PROGRESS +4 -0
  29. data/ext/noderb_extension/libuv/deps/pthread-win32/README +601 -0
  30. data/ext/noderb_extension/libuv/deps/pthread-win32/README.Borland +57 -0
  31. data/ext/noderb_extension/libuv/deps/pthread-win32/README.CV +3036 -0
  32. data/ext/noderb_extension/libuv/deps/pthread-win32/README.NONPORTABLE +783 -0
  33. data/ext/noderb_extension/libuv/deps/pthread-win32/README.Watcom +62 -0
  34. data/ext/noderb_extension/libuv/deps/pthread-win32/README.WinCE +6 -0
  35. data/ext/noderb_extension/libuv/deps/pthread-win32/TODO +7 -0
  36. data/ext/noderb_extension/libuv/deps/pthread-win32/WinCE-PORT +222 -0
  37. data/ext/noderb_extension/libuv/deps/pthread-win32/attr.c +53 -0
  38. data/ext/noderb_extension/libuv/deps/pthread-win32/autostatic.c +69 -0
  39. data/ext/noderb_extension/libuv/deps/pthread-win32/barrier.c +47 -0
  40. data/ext/noderb_extension/libuv/deps/pthread-win32/build/all.gyp +207 -0
  41. data/ext/noderb_extension/libuv/deps/pthread-win32/builddmc.bat +9 -0
  42. data/ext/noderb_extension/libuv/deps/pthread-win32/cancel.c +44 -0
  43. data/ext/noderb_extension/libuv/deps/pthread-win32/cleanup.c +148 -0
  44. data/ext/noderb_extension/libuv/deps/pthread-win32/condvar.c +50 -0
  45. data/ext/noderb_extension/libuv/deps/pthread-win32/config.h +153 -0
  46. data/ext/noderb_extension/libuv/deps/pthread-win32/context.h +74 -0
  47. data/ext/noderb_extension/libuv/deps/pthread-win32/create.c +308 -0
  48. data/ext/noderb_extension/libuv/deps/pthread-win32/dll.c +92 -0
  49. data/ext/noderb_extension/libuv/deps/pthread-win32/errno.c +94 -0
  50. data/ext/noderb_extension/libuv/deps/pthread-win32/exit.c +44 -0
  51. data/ext/noderb_extension/libuv/deps/pthread-win32/fork.c +39 -0
  52. data/ext/noderb_extension/libuv/deps/pthread-win32/global.c +107 -0
  53. data/ext/noderb_extension/libuv/deps/pthread-win32/implement.h +944 -0
  54. data/ext/noderb_extension/libuv/deps/pthread-win32/misc.c +50 -0
  55. data/ext/noderb_extension/libuv/deps/pthread-win32/mutex.c +62 -0
  56. data/ext/noderb_extension/libuv/deps/pthread-win32/need_errno.h +145 -0
  57. data/ext/noderb_extension/libuv/deps/pthread-win32/nonportable.c +47 -0
  58. data/ext/noderb_extension/libuv/deps/pthread-win32/private.c +54 -0
  59. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.c +66 -0
  60. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.dsp +142 -0
  61. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.dsw +29 -0
  62. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread.h +1368 -0
  63. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_destroy.c +79 -0
  64. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getdetachstate.c +86 -0
  65. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getinheritsched.c +51 -0
  66. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getschedparam.c +52 -0
  67. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getschedpolicy.c +61 -0
  68. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getscope.c +54 -0
  69. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getstackaddr.c +97 -0
  70. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_getstacksize.c +100 -0
  71. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_init.c +117 -0
  72. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setdetachstate.c +91 -0
  73. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setinheritsched.c +57 -0
  74. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setschedparam.c +63 -0
  75. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setschedpolicy.c +55 -0
  76. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setscope.c +62 -0
  77. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setstackaddr.c +97 -0
  78. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_attr_setstacksize.c +110 -0
  79. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_destroy.c +103 -0
  80. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_init.c +69 -0
  81. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrier_wait.c +104 -0
  82. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_destroy.c +83 -0
  83. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_getpshared.c +95 -0
  84. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_init.c +85 -0
  85. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_barrierattr_setpshared.c +119 -0
  86. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cancel.c +189 -0
  87. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_destroy.c +253 -0
  88. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_init.c +167 -0
  89. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_signal.c +231 -0
  90. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_cond_wait.c +567 -0
  91. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_destroy.c +86 -0
  92. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_getpshared.c +97 -0
  93. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_init.c +87 -0
  94. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_condattr_setpshared.c +117 -0
  95. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_delay_np.c +172 -0
  96. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_detach.c +136 -0
  97. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_equal.c +76 -0
  98. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_exit.c +106 -0
  99. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getconcurrency.c +45 -0
  100. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getschedparam.c +75 -0
  101. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getspecific.c +87 -0
  102. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getunique_np.c +47 -0
  103. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_getw32threadhandle_np.c +65 -0
  104. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_join.c +157 -0
  105. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_key_create.c +108 -0
  106. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_key_delete.c +125 -0
  107. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_kill.c +105 -0
  108. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_consistent.c +187 -0
  109. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_destroy.c +148 -0
  110. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_init.c +130 -0
  111. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_lock.c +269 -0
  112. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_timedlock.c +324 -0
  113. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_trylock.c +154 -0
  114. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutex_unlock.c +175 -0
  115. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_destroy.c +83 -0
  116. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getkind_np.c +44 -0
  117. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getpshared.c +95 -0
  118. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_getrobust.c +113 -0
  119. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_gettype.c +56 -0
  120. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_init.c +86 -0
  121. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setkind_np.c +44 -0
  122. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setpshared.c +119 -0
  123. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_setrobust.c +119 -0
  124. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_mutexattr_settype.c +143 -0
  125. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_num_processors_np.c +56 -0
  126. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_once.c +79 -0
  127. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_destroy.c +143 -0
  128. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_init.c +109 -0
  129. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_rdlock.c +102 -0
  130. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_timedrdlock.c +109 -0
  131. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_timedwrlock.c +139 -0
  132. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_tryrdlock.c +102 -0
  133. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_trywrlock.c +122 -0
  134. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_unlock.c +93 -0
  135. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlock_wrlock.c +133 -0
  136. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_destroy.c +84 -0
  137. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_getpshared.c +97 -0
  138. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_init.c +83 -0
  139. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_rwlockattr_setpshared.c +120 -0
  140. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_self.c +141 -0
  141. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setcancelstate.c +125 -0
  142. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setcanceltype.c +126 -0
  143. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setconcurrency.c +53 -0
  144. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setschedparam.c +123 -0
  145. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_setspecific.c +167 -0
  146. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_destroy.c +111 -0
  147. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_init.c +123 -0
  148. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_lock.c +80 -0
  149. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_trylock.c +77 -0
  150. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_spin_unlock.c +71 -0
  151. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_testcancel.c +103 -0
  152. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_timechange_handler_np.c +108 -0
  153. data/ext/noderb_extension/libuv/deps/pthread-win32/pthread_win32_attach_detach_np.c +258 -0
  154. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_MCS_lock.c +278 -0
  155. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_callUserDestroyRoutines.c +232 -0
  156. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_calloc.c +56 -0
  157. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_cond_check_need_init.c +78 -0
  158. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_getprocessors.c +91 -0
  159. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_is_attr.c +47 -0
  160. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_mutex_check_need_init.c +92 -0
  161. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_new.c +94 -0
  162. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_processInitialize.c +92 -0
  163. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_processTerminate.c +105 -0
  164. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_relmillisecs.c +132 -0
  165. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_reuse.c +151 -0
  166. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_rwlock_cancelwrwait.c +50 -0
  167. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_rwlock_check_need_init.c +77 -0
  168. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_semwait.c +135 -0
  169. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_spinlock_check_need_init.c +78 -0
  170. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_threadDestroy.c +79 -0
  171. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_threadStart.c +357 -0
  172. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_throw.c +189 -0
  173. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_timespec.c +83 -0
  174. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_tkAssocCreate.c +118 -0
  175. data/ext/noderb_extension/libuv/deps/pthread-win32/ptw32_tkAssocDestroy.c +114 -0
  176. data/ext/noderb_extension/libuv/deps/pthread-win32/rwlock.c +51 -0
  177. data/ext/noderb_extension/libuv/deps/pthread-win32/sched.c +53 -0
  178. data/ext/noderb_extension/libuv/deps/pthread-win32/sched.h +183 -0
  179. data/ext/noderb_extension/libuv/deps/pthread-win32/sched_get_priority_max.c +134 -0
  180. data/ext/noderb_extension/libuv/deps/pthread-win32/sched_get_priority_min.c +135 -0
  181. data/ext/noderb_extension/libuv/deps/pthread-win32/sched_getscheduler.c +71 -0
  182. data/ext/noderb_extension/libuv/deps/pthread-win32/sched_setscheduler.c +83 -0
  183. data/ext/noderb_extension/libuv/deps/pthread-win32/sched_yield.c +71 -0
  184. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_close.c +58 -0
  185. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_destroy.c +144 -0
  186. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_getvalue.c +110 -0
  187. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_init.c +169 -0
  188. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_open.c +58 -0
  189. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_post.c +128 -0
  190. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_post_multiple.c +142 -0
  191. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_timedwait.c +238 -0
  192. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_trywait.c +117 -0
  193. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_unlink.c +58 -0
  194. data/ext/noderb_extension/libuv/deps/pthread-win32/sem_wait.c +187 -0
  195. data/ext/noderb_extension/libuv/deps/pthread-win32/semaphore.c +69 -0
  196. data/ext/noderb_extension/libuv/deps/pthread-win32/semaphore.h +169 -0
  197. data/ext/noderb_extension/libuv/deps/pthread-win32/signal.c +179 -0
  198. data/ext/noderb_extension/libuv/deps/pthread-win32/spin.c +46 -0
  199. data/ext/noderb_extension/libuv/deps/pthread-win32/sync.c +43 -0
  200. data/ext/noderb_extension/libuv/deps/pthread-win32/tsd.c +44 -0
  201. data/ext/noderb_extension/libuv/deps/pthread-win32/version.rc +388 -0
  202. data/ext/noderb_extension/libuv/deps/pthread-win32/w32_CancelableWait.c +161 -0
  203. data/ext/noderb_extension/libuv/doc/desired-api.md +159 -0
  204. data/ext/noderb_extension/libuv/doc/iocp-links.html +574 -0
  205. data/ext/noderb_extension/libuv/include/ares.h +582 -0
  206. data/ext/noderb_extension/libuv/include/ares_version.h +24 -0
  207. data/ext/noderb_extension/libuv/include/eio.h +376 -0
  208. data/ext/noderb_extension/libuv/include/ev.h +835 -0
  209. data/ext/noderb_extension/libuv/include/ngx-queue.h +102 -0
  210. data/ext/noderb_extension/libuv/include/tree.h +762 -0
  211. data/ext/noderb_extension/libuv/include/uv-unix.h +138 -0
  212. data/ext/noderb_extension/libuv/include/uv-win.h +187 -0
  213. data/ext/noderb_extension/libuv/include/uv.h +635 -0
  214. data/ext/noderb_extension/libuv/src/ares/AUTHORS +37 -0
  215. data/ext/noderb_extension/libuv/src/ares/CHANGES +1198 -0
  216. data/ext/noderb_extension/libuv/src/ares/CMakeLists.txt +22 -0
  217. data/ext/noderb_extension/libuv/src/ares/NEWS +21 -0
  218. data/ext/noderb_extension/libuv/src/ares/README +60 -0
  219. data/ext/noderb_extension/libuv/src/ares/README.cares +13 -0
  220. data/ext/noderb_extension/libuv/src/ares/README.msvc +118 -0
  221. data/ext/noderb_extension/libuv/src/ares/README.node +21 -0
  222. data/ext/noderb_extension/libuv/src/ares/RELEASE-NOTES +25 -0
  223. data/ext/noderb_extension/libuv/src/ares/TODO +23 -0
  224. data/ext/noderb_extension/libuv/src/ares/ares__close_sockets.c +66 -0
  225. data/ext/noderb_extension/libuv/src/ares/ares__get_hostent.c +263 -0
  226. data/ext/noderb_extension/libuv/src/ares/ares__read_line.c +71 -0
  227. data/ext/noderb_extension/libuv/src/ares/ares__timeval.c +111 -0
  228. data/ext/noderb_extension/libuv/src/ares/ares_cancel.c +63 -0
  229. data/ext/noderb_extension/libuv/src/ares/ares_data.c +190 -0
  230. data/ext/noderb_extension/libuv/src/ares/ares_data.h +65 -0
  231. data/ext/noderb_extension/libuv/src/ares/ares_destroy.c +105 -0
  232. data/ext/noderb_extension/libuv/src/ares/ares_dns.h +90 -0
  233. data/ext/noderb_extension/libuv/src/ares/ares_expand_name.c +193 -0
  234. data/ext/noderb_extension/libuv/src/ares/ares_expand_string.c +75 -0
  235. data/ext/noderb_extension/libuv/src/ares/ares_fds.c +62 -0
  236. data/ext/noderb_extension/libuv/src/ares/ares_free_hostent.c +39 -0
  237. data/ext/noderb_extension/libuv/src/ares/ares_free_string.c +25 -0
  238. data/ext/noderb_extension/libuv/src/ares/ares_gethostbyaddr.c +292 -0
  239. data/ext/noderb_extension/libuv/src/ares/ares_gethostbyname.c +515 -0
  240. data/ext/noderb_extension/libuv/src/ares/ares_getnameinfo.c +426 -0
  241. data/ext/noderb_extension/libuv/src/ares/ares_getopt.c +122 -0
  242. data/ext/noderb_extension/libuv/src/ares/ares_getopt.h +53 -0
  243. data/ext/noderb_extension/libuv/src/ares/ares_getsock.c +72 -0
  244. data/ext/noderb_extension/libuv/src/ares/ares_init.c +1665 -0
  245. data/ext/noderb_extension/libuv/src/ares/ares_ipv6.h +78 -0
  246. data/ext/noderb_extension/libuv/src/ares/ares_library_init.c +132 -0
  247. data/ext/noderb_extension/libuv/src/ares/ares_library_init.h +39 -0
  248. data/ext/noderb_extension/libuv/src/ares/ares_llist.c +86 -0
  249. data/ext/noderb_extension/libuv/src/ares/ares_llist.h +42 -0
  250. data/ext/noderb_extension/libuv/src/ares/ares_mkquery.c +195 -0
  251. data/ext/noderb_extension/libuv/src/ares/ares_nowarn.c +59 -0
  252. data/ext/noderb_extension/libuv/src/ares/ares_nowarn.h +24 -0
  253. data/ext/noderb_extension/libuv/src/ares/ares_options.c +253 -0
  254. data/ext/noderb_extension/libuv/src/ares/ares_parse_a_reply.c +260 -0
  255. data/ext/noderb_extension/libuv/src/ares/ares_parse_aaaa_reply.c +256 -0
  256. data/ext/noderb_extension/libuv/src/ares/ares_parse_mx_reply.c +170 -0
  257. data/ext/noderb_extension/libuv/src/ares/ares_parse_ns_reply.c +182 -0
  258. data/ext/noderb_extension/libuv/src/ares/ares_parse_ptr_reply.c +208 -0
  259. data/ext/noderb_extension/libuv/src/ares/ares_parse_srv_reply.c +179 -0
  260. data/ext/noderb_extension/libuv/src/ares/ares_parse_txt_reply.c +201 -0
  261. data/ext/noderb_extension/libuv/src/ares/ares_private.h +351 -0
  262. data/ext/noderb_extension/libuv/src/ares/ares_process.c +1296 -0
  263. data/ext/noderb_extension/libuv/src/ares/ares_query.c +183 -0
  264. data/ext/noderb_extension/libuv/src/ares/ares_rules.h +144 -0
  265. data/ext/noderb_extension/libuv/src/ares/ares_search.c +322 -0
  266. data/ext/noderb_extension/libuv/src/ares/ares_send.c +134 -0
  267. data/ext/noderb_extension/libuv/src/ares/ares_setup.h +191 -0
  268. data/ext/noderb_extension/libuv/src/ares/ares_strcasecmp.c +66 -0
  269. data/ext/noderb_extension/libuv/src/ares/ares_strcasecmp.h +30 -0
  270. data/ext/noderb_extension/libuv/src/ares/ares_strdup.c +42 -0
  271. data/ext/noderb_extension/libuv/src/ares/ares_strdup.h +26 -0
  272. data/ext/noderb_extension/libuv/src/ares/ares_strerror.c +56 -0
  273. data/ext/noderb_extension/libuv/src/ares/ares_timeout.c +80 -0
  274. data/ext/noderb_extension/libuv/src/ares/ares_version.c +11 -0
  275. data/ext/noderb_extension/libuv/src/ares/ares_writev.c +79 -0
  276. data/ext/noderb_extension/libuv/src/ares/ares_writev.h +36 -0
  277. data/ext/noderb_extension/libuv/src/ares/bitncmp.c +59 -0
  278. data/ext/noderb_extension/libuv/src/ares/bitncmp.h +26 -0
  279. data/ext/noderb_extension/libuv/src/ares/config_cygwin/ares_config.h +510 -0
  280. data/ext/noderb_extension/libuv/src/ares/config_darwin/ares_config.h +510 -0
  281. data/ext/noderb_extension/libuv/src/ares/config_freebsd/ares_config.h +510 -0
  282. data/ext/noderb_extension/libuv/src/ares/config_linux/ares_config.h +510 -0
  283. data/ext/noderb_extension/libuv/src/ares/config_openbsd/ares_config.h +510 -0
  284. data/ext/noderb_extension/libuv/src/ares/config_sunos/ares_config.h +510 -0
  285. data/ext/noderb_extension/libuv/src/ares/config_win32/ares_config.h +369 -0
  286. data/ext/noderb_extension/libuv/src/ares/get_ver.awk +35 -0
  287. data/ext/noderb_extension/libuv/src/ares/inet_net_pton.c +450 -0
  288. data/ext/noderb_extension/libuv/src/ares/inet_net_pton.h +31 -0
  289. data/ext/noderb_extension/libuv/src/ares/inet_ntop.c +232 -0
  290. data/ext/noderb_extension/libuv/src/ares/inet_ntop.h +27 -0
  291. data/ext/noderb_extension/libuv/src/ares/nameser.h +193 -0
  292. data/ext/noderb_extension/libuv/src/ares/setup_once.h +488 -0
  293. data/ext/noderb_extension/libuv/src/ares/windows_port.c +22 -0
  294. data/ext/noderb_extension/libuv/src/eio/Changes +63 -0
  295. data/ext/noderb_extension/libuv/src/eio/LICENSE +36 -0
  296. data/ext/noderb_extension/libuv/src/eio/Makefile.am +15 -0
  297. data/ext/noderb_extension/libuv/src/eio/aclocal.m4 +8957 -0
  298. data/ext/noderb_extension/libuv/src/eio/autogen.sh +3 -0
  299. data/ext/noderb_extension/libuv/src/eio/config.h.in +86 -0
  300. data/ext/noderb_extension/libuv/src/eio/config_cygwin.h +77 -0
  301. data/ext/noderb_extension/libuv/src/eio/config_darwin.h +137 -0
  302. data/ext/noderb_extension/libuv/src/eio/config_freebsd.h +78 -0
  303. data/ext/noderb_extension/libuv/src/eio/config_linux.h +101 -0
  304. data/ext/noderb_extension/libuv/src/eio/config_sunos.h +81 -0
  305. data/ext/noderb_extension/libuv/src/eio/configure.ac +22 -0
  306. data/ext/noderb_extension/libuv/src/eio/demo.c +194 -0
  307. data/ext/noderb_extension/libuv/src/eio/ecb.h +370 -0
  308. data/ext/noderb_extension/libuv/src/eio/eio.3 +3428 -0
  309. data/ext/noderb_extension/libuv/src/eio/eio.c +2562 -0
  310. data/ext/noderb_extension/libuv/src/eio/eio.pod +969 -0
  311. data/ext/noderb_extension/libuv/src/eio/libeio.m4 +195 -0
  312. data/ext/noderb_extension/libuv/src/eio/xthread.h +164 -0
  313. data/ext/noderb_extension/libuv/src/ev/Changes +388 -0
  314. data/ext/noderb_extension/libuv/src/ev/LICENSE +36 -0
  315. data/ext/noderb_extension/libuv/src/ev/Makefile.am +18 -0
  316. data/ext/noderb_extension/libuv/src/ev/Makefile.in +771 -0
  317. data/ext/noderb_extension/libuv/src/ev/README +58 -0
  318. data/ext/noderb_extension/libuv/src/ev/aclocal.m4 +8957 -0
  319. data/ext/noderb_extension/libuv/src/ev/autogen.sh +6 -0
  320. data/ext/noderb_extension/libuv/src/ev/config.guess +1526 -0
  321. data/ext/noderb_extension/libuv/src/ev/config.h.in +125 -0
  322. data/ext/noderb_extension/libuv/src/ev/config.sub +1658 -0
  323. data/ext/noderb_extension/libuv/src/ev/config_cygwin.h +123 -0
  324. data/ext/noderb_extension/libuv/src/ev/config_darwin.h +122 -0
  325. data/ext/noderb_extension/libuv/src/ev/config_freebsd.h +120 -0
  326. data/ext/noderb_extension/libuv/src/ev/config_linux.h +130 -0
  327. data/ext/noderb_extension/libuv/src/ev/config_sunos.h +122 -0
  328. data/ext/noderb_extension/libuv/src/ev/configure +13037 -0
  329. data/ext/noderb_extension/libuv/src/ev/configure.ac +18 -0
  330. data/ext/noderb_extension/libuv/src/ev/depcomp +630 -0
  331. data/ext/noderb_extension/libuv/src/ev/ev++.h +816 -0
  332. data/ext/noderb_extension/libuv/src/ev/ev.3 +5311 -0
  333. data/ext/noderb_extension/libuv/src/ev/ev.c +3913 -0
  334. data/ext/noderb_extension/libuv/src/ev/ev.pod +5243 -0
  335. data/ext/noderb_extension/libuv/src/ev/ev_epoll.c +266 -0
  336. data/ext/noderb_extension/libuv/src/ev/ev_kqueue.c +198 -0
  337. data/ext/noderb_extension/libuv/src/ev/ev_poll.c +148 -0
  338. data/ext/noderb_extension/libuv/src/ev/ev_port.c +179 -0
  339. data/ext/noderb_extension/libuv/src/ev/ev_select.c +310 -0
  340. data/ext/noderb_extension/libuv/src/ev/ev_vars.h +203 -0
  341. data/ext/noderb_extension/libuv/src/ev/ev_win32.c +153 -0
  342. data/ext/noderb_extension/libuv/src/ev/ev_wrap.h +196 -0
  343. data/ext/noderb_extension/libuv/src/ev/event.c +402 -0
  344. data/ext/noderb_extension/libuv/src/ev/event.h +170 -0
  345. data/ext/noderb_extension/libuv/src/ev/install-sh +294 -0
  346. data/ext/noderb_extension/libuv/src/ev/libev.m4 +39 -0
  347. data/ext/noderb_extension/libuv/src/ev/ltmain.sh +8413 -0
  348. data/ext/noderb_extension/libuv/src/ev/missing +336 -0
  349. data/ext/noderb_extension/libuv/src/ev/mkinstalldirs +111 -0
  350. data/ext/noderb_extension/libuv/src/uv-common.c +172 -0
  351. data/ext/noderb_extension/libuv/src/uv-common.h +53 -0
  352. data/ext/noderb_extension/libuv/src/uv-cygwin.c +52 -0
  353. data/ext/noderb_extension/libuv/src/uv-darwin.c +64 -0
  354. data/ext/noderb_extension/libuv/src/uv-eio.c +113 -0
  355. data/ext/noderb_extension/libuv/src/uv-eio.h +13 -0
  356. data/ext/noderb_extension/libuv/src/uv-freebsd.c +65 -0
  357. data/ext/noderb_extension/libuv/src/uv-linux.c +51 -0
  358. data/ext/noderb_extension/libuv/src/uv-sunos.c +60 -0
  359. data/ext/noderb_extension/libuv/src/uv-unix.c +2408 -0
  360. data/ext/noderb_extension/libuv/src/win/async.c +129 -0
  361. data/ext/noderb_extension/libuv/src/win/cares.c +304 -0
  362. data/ext/noderb_extension/libuv/src/win/core.c +155 -0
  363. data/ext/noderb_extension/libuv/src/win/error.c +140 -0
  364. data/ext/noderb_extension/libuv/src/win/getaddrinfo.c +341 -0
  365. data/ext/noderb_extension/libuv/src/win/handle.c +176 -0
  366. data/ext/noderb_extension/libuv/src/win/internal.h +237 -0
  367. data/ext/noderb_extension/libuv/src/win/loop-watcher.c +128 -0
  368. data/ext/noderb_extension/libuv/src/win/pipe.c +828 -0
  369. data/ext/noderb_extension/libuv/src/win/process.c +936 -0
  370. data/ext/noderb_extension/libuv/src/win/req.c +141 -0
  371. data/ext/noderb_extension/libuv/src/win/stdio.c +75 -0
  372. data/ext/noderb_extension/libuv/src/win/stream.c +149 -0
  373. data/ext/noderb_extension/libuv/src/win/tcp.c +895 -0
  374. data/ext/noderb_extension/libuv/src/win/timer.c +269 -0
  375. data/ext/noderb_extension/libuv/src/win/util.c +82 -0
  376. data/ext/noderb_extension/libuv/test/benchmark-ares.c +117 -0
  377. data/ext/noderb_extension/libuv/test/benchmark-getaddrinfo.c +90 -0
  378. data/ext/noderb_extension/libuv/test/benchmark-list.h +77 -0
  379. data/ext/noderb_extension/libuv/test/benchmark-ping-pongs.c +210 -0
  380. data/ext/noderb_extension/libuv/test/benchmark-pound.c +237 -0
  381. data/ext/noderb_extension/libuv/test/benchmark-pump.c +459 -0
  382. data/ext/noderb_extension/libuv/test/benchmark-sizes.c +39 -0
  383. data/ext/noderb_extension/libuv/test/benchmark-spawn.c +154 -0
  384. data/ext/noderb_extension/libuv/test/dns-server.c +323 -0
  385. data/ext/noderb_extension/libuv/test/echo-server.c +299 -0
  386. data/ext/noderb_extension/libuv/test/run-benchmarks.c +64 -0
  387. data/ext/noderb_extension/libuv/test/run-tests.c +82 -0
  388. data/ext/noderb_extension/libuv/test/runner-unix.c +335 -0
  389. data/ext/noderb_extension/libuv/test/runner-unix.h +36 -0
  390. data/ext/noderb_extension/libuv/test/runner-win.c +343 -0
  391. data/ext/noderb_extension/libuv/test/runner-win.h +42 -0
  392. data/ext/noderb_extension/libuv/test/runner.c +311 -0
  393. data/ext/noderb_extension/libuv/test/runner.h +155 -0
  394. data/ext/noderb_extension/libuv/test/task.h +111 -0
  395. data/ext/noderb_extension/libuv/test/test-async.c +218 -0
  396. data/ext/noderb_extension/libuv/test/test-callback-stack.c +205 -0
  397. data/ext/noderb_extension/libuv/test/test-connection-fail.c +149 -0
  398. data/ext/noderb_extension/libuv/test/test-delayed-accept.c +198 -0
  399. data/ext/noderb_extension/libuv/test/test-fail-always.c +29 -0
  400. data/ext/noderb_extension/libuv/test/test-get-currentexe.c +53 -0
  401. data/ext/noderb_extension/libuv/test/test-getaddrinfo.c +110 -0
  402. data/ext/noderb_extension/libuv/test/test-gethostbyname.c +192 -0
  403. data/ext/noderb_extension/libuv/test/test-getsockname.c +196 -0
  404. data/ext/noderb_extension/libuv/test/test-hrtime.c +51 -0
  405. data/ext/noderb_extension/libuv/test/test-idle.c +83 -0
  406. data/ext/noderb_extension/libuv/test/test-list.h +165 -0
  407. data/ext/noderb_extension/libuv/test/test-loop-handles.c +361 -0
  408. data/ext/noderb_extension/libuv/test/test-pass-always.c +28 -0
  409. data/ext/noderb_extension/libuv/test/test-ping-pong.c +256 -0
  410. data/ext/noderb_extension/libuv/test/test-pipe-bind-error.c +148 -0
  411. data/ext/noderb_extension/libuv/test/test-ref.c +91 -0
  412. data/ext/noderb_extension/libuv/test/test-shutdown-eof.c +183 -0
  413. data/ext/noderb_extension/libuv/test/test-spawn.c +345 -0
  414. data/ext/noderb_extension/libuv/test/test-tcp-bind-error.c +204 -0
  415. data/ext/noderb_extension/libuv/test/test-tcp-bind6-error.c +164 -0
  416. data/ext/noderb_extension/libuv/test/test-tcp-writealot.c +198 -0
  417. data/ext/noderb_extension/libuv/test/test-timer-again.c +141 -0
  418. data/ext/noderb_extension/libuv/test/test-timer.c +134 -0
  419. data/ext/noderb_extension/noderb.c +340 -0
  420. data/ext/noderb_extension/noderb.h +2 -0
  421. data/lib/noderb/connection.rb +21 -0
  422. data/lib/noderb/process.rb +17 -0
  423. data/lib/noderb.rb +25 -0
  424. metadata +470 -0
@@ -0,0 +1,3036 @@
1
+ README.CV -- Condition Variables
2
+ --------------------------------
3
+
4
+ The original implementation of condition variables in
5
+ pthreads-win32 was based on a discussion paper:
6
+
7
+ "Strategies for Implementing POSIX Condition Variables
8
+ on Win32": http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
9
+
10
+ The changes suggested below were made on Feb 6 2001. This
11
+ file is included in the package for the benefit of anyone
12
+ interested in understanding the pthreads-win32 implementation
13
+ of condition variables and the (sometimes subtle) issues that
14
+ it attempts to resolve.
15
+
16
+ Thanks go to the individuals whose names appear throughout
17
+ the following text.
18
+
19
+ Ross Johnson
20
+
21
+ --------------------------------------------------------------------
22
+
23
+ fyi.. (more detailed problem description/demos + possible fix/patch)
24
+
25
+ regards,
26
+ alexander.
27
+
28
+
29
+ Alexander Terekhov
30
+ 31.01.2001 17:43
31
+
32
+ To: ace-bugs@cs.wustl.edu
33
+ cc:
34
+ From: Alexander Terekhov/Germany/IBM@IBMDE
35
+ Subject: Implementation of POSIX CVs: spur.wakeups/lost
36
+ signals/deadlocks/unfairness
37
+
38
+
39
+
40
+ ACE VERSION:
41
+
42
+ 5.1.12 (pthread-win32 snapshot 2000-12-29)
43
+
44
+ HOST MACHINE and OPERATING SYSTEM:
45
+
46
+ IBM IntelliStation Z Pro, 2 x XEON 1GHz, Win2K
47
+
48
+ TARGET MACHINE and OPERATING SYSTEM, if different from HOST:
49
+ COMPILER NAME AND VERSION (AND PATCHLEVEL):
50
+
51
+ Microsoft Visual C++ 6.0
52
+
53
+ AREA/CLASS/EXAMPLE AFFECTED:
54
+
55
+ Implementation of POSIX condition variables - OS.cpp/.h
56
+
57
+ DOES THE PROBLEM AFFECT:
58
+
59
+ EXECUTION? YES!
60
+
61
+ SYNOPSIS:
62
+
63
+ a) spurious wakeups (minor problem)
64
+ b) lost signals
65
+ c) broadcast deadlock
66
+ d) unfairness (minor problem)
67
+
68
+ DESCRIPTION:
69
+
70
+ Please see attached copy of discussion thread
71
+ from comp.programming.threads for more details on
72
+ some reported problems. (i've also posted a "fyi"
73
+ message to ace-users a week or two ago but
74
+ unfortunately did not get any response so far).
75
+
76
+ It seems that current implementation suffers from
77
+ two essential problems:
78
+
79
+ 1) cond.waiters_count does not accurately reflect
80
+ number of waiters blocked on semaphore - w/o
81
+ proper synchronisation that could result (in the
82
+ time window when counter is not accurate)
83
+ in spurious wakeups organised by subsequent
84
+ _signals and _broadcasts.
85
+
86
+ 2) Always having (with no e.g. copy_and_clear/..)
87
+ the same queue in use (semaphore+counter)
88
+ neither signal nor broadcast provide 'atomic'
89
+ behaviour with respect to other threads/subsequent
90
+ calls to signal/broadcast/wait.
91
+
92
+ Each problem and combination of both could produce
93
+ various nasty things:
94
+
95
+ a) spurious wakeups (minor problem)
96
+
97
+ it is possible that waiter(s) which was already
98
+ unblocked even so is still counted as blocked
99
+ waiter. signal and broadcast will release
100
+ semaphore which will produce a spurious wakeup
101
+ for a 'real' waiter coming later.
102
+
103
+ b) lost signals
104
+
105
+ signalling thread ends up consuming its own
106
+ signal. please see demo/discussion below.
107
+
108
+ c) broadcast deadlock
109
+
110
+ last_waiter processing code does not correctly
111
+ handle the case with multiple threads
112
+ waiting for the end of broadcast.
113
+ please see demo/discussion below.
114
+
115
+ d) unfairness (minor problem)
116
+
117
+ without SignalObjectAndWait some waiter(s)
118
+ may end up consuming broadcasted signals
119
+ multiple times (spurious wakeups) because waiter
120
+ thread(s) can be preempted before they call
121
+ semaphore wait (but after count++ and mtx.unlock).
122
+
123
+ REPEAT BY:
124
+
125
+ See below... run problem demos programs (tennis.cpp and
126
+ tennisb.cpp) number of times concurrently (on multiprocessor)
127
+ and in multiple sessions or just add a couple of "Sleep"s
128
+ as described in the attached copy of discussion thread
129
+ from comp.programming.threads
130
+
131
+ SAMPLE FIX/WORKAROUND:
132
+
133
+ See attached patch to pthread-win32.. well, I can not
134
+ claim that it is completely bug free but at least my
135
+ test and tests provided by pthreads-win32 seem to work.
136
+ Perhaps that will help.
137
+
138
+ regards,
139
+ alexander.
140
+
141
+
142
+ >> Forum: comp.programming.threads
143
+ >> Thread: pthread_cond_* implementation questions
144
+ .
145
+ .
146
+ .
147
+ David Schwartz <davids@webmaster.com> wrote:
148
+
149
+ > terekhov@my-deja.com wrote:
150
+ >
151
+ >> BTW, could you please also share your view on other perceived
152
+ >> "problems" such as nested broadcast deadlock, spurious wakeups
153
+ >> and (the latest one) lost signals??
154
+ >
155
+ >I'm not sure what you mean. The standard allows an implementation
156
+ >to do almost whatever it likes. In fact, you could implement
157
+ >pthread_cond_wait by releasing the mutex, sleeping a random
158
+ >amount of time, and then reacquiring the mutex. Of course,
159
+ >this would be a pretty poor implementation, but any code that
160
+ >didn't work under that implementation wouldn't be strictly
161
+ >compliant.
162
+
163
+ The implementation you suggested is indeed correct
164
+ one (yes, now I see it :). However it requires from
165
+ signal/broadcast nothing more than to "{ return 0; }"
166
+ That is not the case for pthread-win32 and ACE
167
+ implementations. I do think that these implementations
168
+ (basically the same implementation) have some serious
169
+ problems with wait/signal/broadcast calls. I am looking
170
+ for help to clarify whether these problems are real
171
+ or not. I think that I can demonstrate what I mean
172
+ using one or two small sample programs.
173
+ .
174
+ .
175
+ .
176
+ ==========
177
+ tennis.cpp
178
+ ==========
179
+
180
+ #include "ace/Synch.h"
181
+ #include "ace/Thread.h"
182
+
183
+ enum GAME_STATE {
184
+
185
+ START_GAME,
186
+ PLAYER_A, // Player A playes the ball
187
+ PLAYER_B, // Player B playes the ball
188
+ GAME_OVER,
189
+ ONE_PLAYER_GONE,
190
+ BOTH_PLAYERS_GONE
191
+
192
+ };
193
+
194
+ enum GAME_STATE eGameState;
195
+ ACE_Mutex* pmtxGameStateLock;
196
+ ACE_Condition< ACE_Mutex >* pcndGameStateChange;
197
+
198
+ void*
199
+ playerA(
200
+ void* pParm
201
+ )
202
+ {
203
+
204
+ // For access to game state variable
205
+ pmtxGameStateLock->acquire();
206
+
207
+ // Play loop
208
+ while ( eGameState < GAME_OVER ) {
209
+
210
+ // Play the ball
211
+ cout << endl << "PLAYER-A" << endl;
212
+
213
+ // Now its PLAYER-B's turn
214
+ eGameState = PLAYER_B;
215
+
216
+ // Signal to PLAYER-B that now it is his turn
217
+ pcndGameStateChange->signal();
218
+
219
+ // Wait until PLAYER-B finishes playing the ball
220
+ do {
221
+
222
+ pcndGameStateChange->wait();
223
+
224
+ if ( PLAYER_B == eGameState )
225
+ cout << endl << "----PLAYER-A: SPURIOUS WAKEUP!!!" << endl;
226
+
227
+ } while ( PLAYER_B == eGameState );
228
+
229
+ }
230
+
231
+ // PLAYER-A gone
232
+ eGameState = (GAME_STATE)(eGameState+1);
233
+ cout << endl << "PLAYER-A GONE" << endl;
234
+
235
+ // No more access to state variable needed
236
+ pmtxGameStateLock->release();
237
+
238
+ // Signal PLAYER-A gone event
239
+ pcndGameStateChange->broadcast();
240
+
241
+ return 0;
242
+
243
+ }
244
+
245
+ void*
246
+ playerB(
247
+ void* pParm
248
+ )
249
+ {
250
+
251
+ // For access to game state variable
252
+ pmtxGameStateLock->acquire();
253
+
254
+ // Play loop
255
+ while ( eGameState < GAME_OVER ) {
256
+
257
+ // Play the ball
258
+ cout << endl << "PLAYER-B" << endl;
259
+
260
+ // Now its PLAYER-A's turn
261
+ eGameState = PLAYER_A;
262
+
263
+ // Signal to PLAYER-A that now it is his turn
264
+ pcndGameStateChange->signal();
265
+
266
+ // Wait until PLAYER-A finishes playing the ball
267
+ do {
268
+
269
+ pcndGameStateChange->wait();
270
+
271
+ if ( PLAYER_A == eGameState )
272
+ cout << endl << "----PLAYER-B: SPURIOUS WAKEUP!!!" << endl;
273
+
274
+ } while ( PLAYER_A == eGameState );
275
+
276
+ }
277
+
278
+ // PLAYER-B gone
279
+ eGameState = (GAME_STATE)(eGameState+1);
280
+ cout << endl << "PLAYER-B GONE" << endl;
281
+
282
+ // No more access to state variable needed
283
+ pmtxGameStateLock->release();
284
+
285
+ // Signal PLAYER-B gone event
286
+ pcndGameStateChange->broadcast();
287
+
288
+ return 0;
289
+
290
+ }
291
+
292
+
293
+ int
294
+ main (int, ACE_TCHAR *[])
295
+ {
296
+
297
+ pmtxGameStateLock = new ACE_Mutex();
298
+ pcndGameStateChange = new ACE_Condition< ACE_Mutex >( *pmtxGameStateLock
299
+ );
300
+
301
+ // Set initial state
302
+ eGameState = START_GAME;
303
+
304
+ // Create players
305
+ ACE_Thread::spawn( playerA );
306
+ ACE_Thread::spawn( playerB );
307
+
308
+ // Give them 5 sec. to play
309
+ Sleep( 5000 );//sleep( 5 );
310
+
311
+ // Set game over state
312
+ pmtxGameStateLock->acquire();
313
+ eGameState = GAME_OVER;
314
+
315
+ // Let them know
316
+ pcndGameStateChange->broadcast();
317
+
318
+ // Wait for players to stop
319
+ do {
320
+
321
+ pcndGameStateChange->wait();
322
+
323
+ } while ( eGameState < BOTH_PLAYERS_GONE );
324
+
325
+ // Cleanup
326
+ cout << endl << "GAME OVER" << endl;
327
+ pmtxGameStateLock->release();
328
+ delete pcndGameStateChange;
329
+ delete pmtxGameStateLock;
330
+
331
+ return 0;
332
+
333
+ }
334
+
335
+ ===========
336
+ tennisb.cpp
337
+ ===========
338
+ #include "ace/Synch.h"
339
+ #include "ace/Thread.h"
340
+
341
+ enum GAME_STATE {
342
+
343
+ START_GAME,
344
+ PLAYER_A, // Player A playes the ball
345
+ PLAYER_B, // Player B playes the ball
346
+ GAME_OVER,
347
+ ONE_PLAYER_GONE,
348
+ BOTH_PLAYERS_GONE
349
+
350
+ };
351
+
352
+ enum GAME_STATE eGameState;
353
+ ACE_Mutex* pmtxGameStateLock;
354
+ ACE_Condition< ACE_Mutex >* pcndGameStateChange;
355
+
356
+ void*
357
+ playerA(
358
+ void* pParm
359
+ )
360
+ {
361
+
362
+ // For access to game state variable
363
+ pmtxGameStateLock->acquire();
364
+
365
+ // Play loop
366
+ while ( eGameState < GAME_OVER ) {
367
+
368
+ // Play the ball
369
+ cout << endl << "PLAYER-A" << endl;
370
+
371
+ // Now its PLAYER-B's turn
372
+ eGameState = PLAYER_B;
373
+
374
+ // Signal to PLAYER-B that now it is his turn
375
+ pcndGameStateChange->broadcast();
376
+
377
+ // Wait until PLAYER-B finishes playing the ball
378
+ do {
379
+
380
+ pcndGameStateChange->wait();
381
+
382
+ if ( PLAYER_B == eGameState )
383
+ cout << endl << "----PLAYER-A: SPURIOUS WAKEUP!!!" << endl;
384
+
385
+ } while ( PLAYER_B == eGameState );
386
+
387
+ }
388
+
389
+ // PLAYER-A gone
390
+ eGameState = (GAME_STATE)(eGameState+1);
391
+ cout << endl << "PLAYER-A GONE" << endl;
392
+
393
+ // No more access to state variable needed
394
+ pmtxGameStateLock->release();
395
+
396
+ // Signal PLAYER-A gone event
397
+ pcndGameStateChange->broadcast();
398
+
399
+ return 0;
400
+
401
+ }
402
+
403
+ void*
404
+ playerB(
405
+ void* pParm
406
+ )
407
+ {
408
+
409
+ // For access to game state variable
410
+ pmtxGameStateLock->acquire();
411
+
412
+ // Play loop
413
+ while ( eGameState < GAME_OVER ) {
414
+
415
+ // Play the ball
416
+ cout << endl << "PLAYER-B" << endl;
417
+
418
+ // Now its PLAYER-A's turn
419
+ eGameState = PLAYER_A;
420
+
421
+ // Signal to PLAYER-A that now it is his turn
422
+ pcndGameStateChange->broadcast();
423
+
424
+ // Wait until PLAYER-A finishes playing the ball
425
+ do {
426
+
427
+ pcndGameStateChange->wait();
428
+
429
+ if ( PLAYER_A == eGameState )
430
+ cout << endl << "----PLAYER-B: SPURIOUS WAKEUP!!!" << endl;
431
+
432
+ } while ( PLAYER_A == eGameState );
433
+
434
+ }
435
+
436
+ // PLAYER-B gone
437
+ eGameState = (GAME_STATE)(eGameState+1);
438
+ cout << endl << "PLAYER-B GONE" << endl;
439
+
440
+ // No more access to state variable needed
441
+ pmtxGameStateLock->release();
442
+
443
+ // Signal PLAYER-B gone event
444
+ pcndGameStateChange->broadcast();
445
+
446
+ return 0;
447
+
448
+ }
449
+
450
+
451
+ int
452
+ main (int, ACE_TCHAR *[])
453
+ {
454
+
455
+ pmtxGameStateLock = new ACE_Mutex();
456
+ pcndGameStateChange = new ACE_Condition< ACE_Mutex >( *pmtxGameStateLock
457
+ );
458
+
459
+ // Set initial state
460
+ eGameState = START_GAME;
461
+
462
+ // Create players
463
+ ACE_Thread::spawn( playerA );
464
+ ACE_Thread::spawn( playerB );
465
+
466
+ // Give them 5 sec. to play
467
+ Sleep( 5000 );//sleep( 5 );
468
+
469
+ // Make some noise
470
+ pmtxGameStateLock->acquire();
471
+ cout << endl << "---Noise ON..." << endl;
472
+ pmtxGameStateLock->release();
473
+ for ( int i = 0; i < 100000; i++ )
474
+ pcndGameStateChange->broadcast();
475
+ cout << endl << "---Noise OFF" << endl;
476
+
477
+ // Set game over state
478
+ pmtxGameStateLock->acquire();
479
+ eGameState = GAME_OVER;
480
+ cout << endl << "---Stopping the game..." << endl;
481
+
482
+ // Let them know
483
+ pcndGameStateChange->broadcast();
484
+
485
+ // Wait for players to stop
486
+ do {
487
+
488
+ pcndGameStateChange->wait();
489
+
490
+ } while ( eGameState < BOTH_PLAYERS_GONE );
491
+
492
+ // Cleanup
493
+ cout << endl << "GAME OVER" << endl;
494
+ pmtxGameStateLock->release();
495
+ delete pcndGameStateChange;
496
+ delete pmtxGameStateLock;
497
+
498
+ return 0;
499
+
500
+ }
501
+ .
502
+ .
503
+ .
504
+ David Schwartz <davids@webmaster.com> wrote:
505
+ >> > It's compliant
506
+ >>
507
+ >> That is really good.
508
+ >
509
+ >> Tomorrow (I have to go urgently now) I will try to
510
+ >> demonstrate the lost-signal "problem" of current
511
+ >> pthread-win32 and ACE-(variant w/o SingleObjectAndWait)
512
+ >> implementations: players start suddenly drop their balls :-)
513
+ >> (with no change in source code).
514
+ >
515
+ >Signals aren't lost, they're going to the main thread,
516
+ >which isn't coded correctly to handle them. Try this:
517
+ >
518
+ > // Wait for players to stop
519
+ > do {
520
+ >
521
+ > pthread_cond_wait( &cndGameStateChange,&mtxGameStateLock );
522
+ >printf("Main thread stole a signal\n");
523
+ >
524
+ > } while ( eGameState < BOTH_PLAYERS_GONE );
525
+ >
526
+ >I bet everytime you thing a signal is lost, you'll see that printf.
527
+ >The signal isn't lost, it was stolen by another thread.
528
+
529
+ well, you can probably loose your bet.. it was indeed stolen
530
+ by "another" thread but not the one you seem to think of.
531
+
532
+ I think that what actually happens is the following:
533
+
534
+ H:\SA\UXX\pt\PTHREADS\TESTS>tennis3.exe
535
+
536
+ PLAYER-A
537
+
538
+ PLAYER-B
539
+
540
+ ----PLAYER-B: SPURIOUS WAKEUP!!!
541
+
542
+ PLAYER-A GONE
543
+
544
+ PLAYER-B GONE
545
+
546
+ GAME OVER
547
+
548
+ H:\SA\UXX\pt\PTHREADS\TESTS>
549
+
550
+ here you can see that PLAYER-B after playing his first
551
+ ball (which came via signal from PLAYER-A) just dropped
552
+ it down. What happened is that his signal to player A
553
+ was consumed as spurious wakeup by himself (player B).
554
+
555
+ The implementation has a problem:
556
+
557
+ ================
558
+ waiting threads:
559
+ ================
560
+
561
+ { /** Critical Section
562
+
563
+ inc cond.waiters_count
564
+
565
+ }
566
+
567
+ /*
568
+ /* Atomic only if using Win32 SignalObjectAndWait
569
+ /*
570
+ cond.mtx.release
571
+
572
+ /*** ^^-- A THREAD WHICH DID SIGNAL MAY ACQUIRE THE MUTEX,
573
+ /*** GO INTO WAIT ON THE SAME CONDITION AND OVERTAKE
574
+ /*** ORIGINAL WAITER(S) CONSUMING ITS OWN SIGNAL!
575
+
576
+ cond.sem.wait
577
+
578
+ Player-A after playing game's initial ball went into
579
+ wait (called _wait) but was pre-empted before reaching
580
+ wait semaphore. He was counted as waiter but was not
581
+ actually waiting/blocked yet.
582
+
583
+ ===============
584
+ signal threads:
585
+ ===============
586
+
587
+ { /** Critical Section
588
+
589
+ waiters_count = cond.waiters_count
590
+
591
+ }
592
+
593
+ if ( waiters_count != 0 )
594
+
595
+ sem.post 1
596
+
597
+ endif
598
+
599
+ Player-B after he received signal/ball from Player A
600
+ called _signal. The _signal did see that there was
601
+ one waiter blocked on the condition (Player-A) and
602
+ released the semaphore.. (but it did not unblock
603
+ Player-A because he was not actually blocked).
604
+ Player-B thread continued its execution, called _wait,
605
+ was counted as second waiter BUT was allowed to slip
606
+ through opened semaphore gate (which was opened for
607
+ Player-B) and received his own signal. Player B remained
608
+ blocked followed by Player A. Deadlock happened which
609
+ lasted until main thread came in and said game over.
610
+
611
+ It seems to me that the implementation fails to
612
+ correctly implement the following statement
613
+ from specification:
614
+
615
+ http://www.opengroup.org/
616
+ onlinepubs/007908799/xsh/pthread_cond_wait.html
617
+
618
+ "These functions atomically release mutex and cause
619
+ the calling thread to block on the condition variable
620
+ cond; atomically here means "atomically with respect
621
+ to access by another thread to the mutex and then the
622
+ condition variable". That is, if another thread is
623
+ able to acquire the mutex after the about-to-block
624
+ thread has released it, then a subsequent call to
625
+ pthread_cond_signal() or pthread_cond_broadcast()
626
+ in that thread behaves as if it were issued after
627
+ the about-to-block thread has blocked."
628
+
629
+ Question: Am I right?
630
+
631
+ (I produced the program output above by simply
632
+ adding ?Sleep( 1 )?:
633
+
634
+ ================
635
+ waiting threads:
636
+ ================
637
+
638
+ { /** Critical Section
639
+
640
+ inc cond.waiters_count
641
+
642
+ }
643
+
644
+ /*
645
+ /* Atomic only if using Win32 SignalObjectAndWait
646
+ /*
647
+ cond.mtx.release
648
+
649
+ Sleep( 1 ); // Win32
650
+
651
+ /*** ^^-- A THREAD WHICH DID SIGNAL MAY ACQUIRE THE MUTEX,
652
+ /*** GO INTO WAIT ON THE SAME CONDITION AND OVERTAKE
653
+ /*** ORIGINAL WAITER(S) CONSUMING ITS OWN SIGNAL!
654
+
655
+ cond.sem.wait
656
+
657
+ to the source code of pthread-win32 implementation:
658
+
659
+ http://sources.redhat.com/cgi-bin/cvsweb.cgi/pthreads/
660
+ condvar.c?rev=1.36&content-type=text/
661
+ x-cvsweb-markup&cvsroot=pthreads-win32
662
+
663
+
664
+ /*
665
+ * We keep the lock held just long enough to increment the count of
666
+ * waiters by one (above).
667
+ * Note that we can't keep it held across the
668
+ * call to sem_wait since that will deadlock other calls
669
+ * to pthread_cond_signal
670
+ */
671
+ cleanup_args.mutexPtr = mutex;
672
+ cleanup_args.cv = cv;
673
+ cleanup_args.resultPtr = &result;
674
+
675
+ pthread_cleanup_push (ptw32_cond_wait_cleanup, (void *)
676
+ &cleanup_args);
677
+
678
+ if ((result = pthread_mutex_unlock (mutex)) == 0)
679
+ {((result
680
+ Sleep( 1 ); // @AT
681
+
682
+ /*
683
+ * Wait to be awakened by
684
+ * pthread_cond_signal, or
685
+ * pthread_cond_broadcast, or
686
+ * a timeout
687
+ *
688
+ * Note:
689
+ * ptw32_sem_timedwait is a cancelation point,
690
+ * hence providing the
691
+ * mechanism for making pthread_cond_wait a cancelation
692
+ * point. We use the cleanup mechanism to ensure we
693
+ * re-lock the mutex and decrement the waiters count
694
+ * if we are canceled.
695
+ */
696
+ if (ptw32_sem_timedwait (&(cv->sema), abstime) == -1) {
697
+ result = errno;
698
+ }
699
+ }
700
+
701
+ pthread_cleanup_pop (1); /* Always cleanup */
702
+
703
+
704
+ BTW, on my system (2 CPUs) I can manage to get
705
+ signals lost even without any source code modification
706
+ if I run the tennis program many times in different
707
+ shell sessions.
708
+ .
709
+ .
710
+ .
711
+ David Schwartz <davids@webmaster.com> wrote:
712
+ >terekhov@my-deja.com wrote:
713
+ >
714
+ >> well, it might be that the program is in fact buggy.
715
+ >> but you did not show me any bug.
716
+ >
717
+ >You're right. I was close but not dead on. I was correct, however,
718
+ >that the code is buggy because it uses 'pthread_cond_signal' even
719
+ >though not any thread waiting on the condition variable can do the
720
+ >job. I was wrong in which thread could be waiting on the cv but
721
+ >unable to do the job.
722
+
723
+ Okay, lets change 'pthread_cond_signal' to 'pthread_cond_broadcast'
724
+ but also add some noise from main() right before declaring the game
725
+ to be over (I need it in order to demonstrate another problem of
726
+ pthread-win32/ACE implementations - broadcast deadlock)...
727
+ .
728
+ .
729
+ .
730
+ It is my understanding of POSIX conditions,
731
+ that on correct implementation added noise
732
+ in form of unnecessary broadcasts from main,
733
+ should not break the tennis program. The
734
+ only 'side effect' of added noise on correct
735
+ implementation would be 'spurious wakeups' of
736
+ players (in fact they are not spurious,
737
+ players just see them as spurious) unblocked,
738
+ not by another player but by main before
739
+ another player had a chance to acquire the
740
+ mutex and change the game state variable:
741
+ .
742
+ .
743
+ .
744
+
745
+ PLAYER-B
746
+
747
+ PLAYER-A
748
+
749
+ ---Noise ON...
750
+
751
+ PLAYER-B
752
+
753
+ PLAYER-A
754
+
755
+ .
756
+ .
757
+ .
758
+
759
+ PLAYER-B
760
+
761
+ PLAYER-A
762
+
763
+ ----PLAYER-A: SPURIOUS WAKEUP!!!
764
+
765
+ PLAYER-B
766
+
767
+ PLAYER-A
768
+
769
+ ---Noise OFF
770
+
771
+ PLAYER-B
772
+
773
+ ---Stopping the game...
774
+
775
+ PLAYER-A GONE
776
+
777
+ PLAYER-B GONE
778
+
779
+ GAME OVER
780
+
781
+ H:\SA\UXX\pt\PTHREADS\TESTS>
782
+
783
+ On pthread-win32/ACE implementations the
784
+ program could stall:
785
+
786
+ .
787
+ .
788
+ .
789
+
790
+ PLAYER-A
791
+
792
+ PLAYER-B
793
+
794
+ PLAYER-A
795
+
796
+ PLAYER-B
797
+
798
+ PLAYER-A
799
+
800
+ PLAYER-B
801
+
802
+ PLAYER-A
803
+
804
+ PLAYER-B
805
+
806
+ ---Noise ON...
807
+
808
+ PLAYER-A
809
+
810
+ ---Noise OFF
811
+ ^C
812
+ H:\SA\UXX\pt\PTHREADS\TESTS>
813
+
814
+
815
+ The implementation has problems:
816
+
817
+ ================
818
+ waiting threads:
819
+ ================
820
+
821
+ { /** Critical Section
822
+
823
+ inc cond.waiters_count
824
+
825
+ }
826
+
827
+ /*
828
+ /* Atomic only if using Win32 SignalObjectAndWait
829
+ /*
830
+ cond.mtx.release
831
+ cond.sem.wait
832
+
833
+ /*** ^^-- WAITER CAN BE PREEMPTED AFTER BEING UNBLOCKED...
834
+
835
+ { /** Critical Section
836
+
837
+ dec cond.waiters_count
838
+
839
+ /*** ^^- ...AND BEFORE DECREMENTING THE COUNT (1)
840
+
841
+ last_waiter = ( cond.was_broadcast &&
842
+ cond.waiters_count == 0 )
843
+
844
+ if ( last_waiter )
845
+
846
+ cond.was_broadcast = FALSE
847
+
848
+ endif
849
+
850
+ }
851
+
852
+ if ( last_waiter )
853
+
854
+ /*
855
+ /* Atomic only if using Win32 SignalObjectAndWait
856
+ /*
857
+ cond.auto_reset_event_or_sem.post /* Event for Win32
858
+ cond.mtx.acquire
859
+
860
+ /*** ^^-- ...AND BEFORE CALL TO mtx.acquire (2)
861
+
862
+ /*** ^^-- NESTED BROADCASTS RESULT IN A DEADLOCK
863
+
864
+
865
+ else
866
+
867
+ cond.mtx.acquire
868
+
869
+ /*** ^^-- ...AND BEFORE CALL TO mtx.acquire (3)
870
+
871
+ endif
872
+
873
+
874
+ ==================
875
+ broadcast threads:
876
+ ==================
877
+
878
+ { /** Critical Section
879
+
880
+ waiters_count = cond.waiters_count
881
+
882
+ if ( waiters_count != 0 )
883
+
884
+ cond.was_broadcast = TRUE
885
+
886
+ endif
887
+
888
+ }
889
+
890
+ if ( waiters_count != 0 )
891
+
892
+ cond.sem.post waiters_count
893
+
894
+ /*** ^^^^^--- SPURIOUS WAKEUPS DUE TO (1)
895
+
896
+ cond.auto_reset_event_or_sem.wait /* Event for Win32
897
+
898
+ /*** ^^^^^--- DEADLOCK FOR FURTHER BROADCASTS IF THEY
899
+ HAPPEN TO GO INTO WAIT WHILE PREVIOUS
900
+ BROADCAST IS STILL IN PROGRESS/WAITING
901
+
902
+ endif
903
+
904
+ a) cond.waiters_count does not accurately reflect
905
+ number of waiters blocked on semaphore - that could
906
+ result (in the time window when counter is not accurate)
907
+ in spurios wakeups organised by subsequent _signals
908
+ and _broadcasts. From standard compliance point of view
909
+ that is OK but that could be a real problem from
910
+ performance/efficiency point of view.
911
+
912
+ b) If subsequent broadcast happen to go into wait on
913
+ cond.auto_reset_event_or_sem before previous
914
+ broadcast was unblocked from cond.auto_reset_event_or_sem
915
+ by its last waiter, one of two blocked threads will
916
+ remain blocked because last_waiter processing code
917
+ fails to unblock both threads.
918
+
919
+ In the situation with tennisb.c the Player-B was put
920
+ in a deadlock by noise (broadcast) coming from main
921
+ thread. And since Player-B holds the game state
922
+ mutex when it calls broadcast, the whole program
923
+ stalled: Player-A was deadlocked on mutex and
924
+ main thread after finishing with producing the noise
925
+ was deadlocked on mutex too (needed to declare the
926
+ game over)
927
+
928
+ (I produced the program output above by simply
929
+ adding ?Sleep( 1 )?:
930
+
931
+ ==================
932
+ broadcast threads:
933
+ ==================
934
+
935
+ { /** Critical Section
936
+
937
+ waiters_count = cond.waiters_count
938
+
939
+ if ( waiters_count != 0 )
940
+
941
+ cond.was_broadcast = TRUE
942
+
943
+ endif
944
+
945
+ }
946
+
947
+ if ( waiters_count != 0 )
948
+
949
+ Sleep( 1 ); //Win32
950
+
951
+ cond.sem.post waiters_count
952
+
953
+ /*** ^^^^^--- SPURIOUS WAKEUPS DUE TO (1)
954
+
955
+ cond.auto_reset_event_or_sem.wait /* Event for Win32
956
+
957
+ /*** ^^^^^--- DEADLOCK FOR FURTHER BROADCASTS IF THEY
958
+ HAPPEN TO GO INTO WAIT WHILE PREVIOUS
959
+ BROADCAST IS STILL IN PROGRESS/WAITING
960
+
961
+ endif
962
+
963
+ to the source code of pthread-win32 implementation:
964
+
965
+ http://sources.redhat.com/cgi-bin/cvsweb.cgi/pthreads/
966
+ condvar.c?rev=1.36&content-type=text/
967
+ x-cvsweb-markup&cvsroot=pthreads-win32
968
+
969
+ if (wereWaiters)
970
+ {(wereWaiters)sroot=pthreads-win32eb.cgi/pthreads/Yem...m
971
+ /*
972
+ * Wake up all waiters
973
+ */
974
+
975
+ Sleep( 1 ); //@AT
976
+
977
+ #ifdef NEED_SEM
978
+
979
+ result = (ptw32_increase_semaphore( &cv->sema, cv->waiters )
980
+ ? 0
981
+ : EINVAL);
982
+
983
+ #else /* NEED_SEM */
984
+
985
+ result = (ReleaseSemaphore( cv->sema, cv->waiters, NULL )
986
+ ? 0
987
+ : EINVAL);
988
+
989
+ #endif /* NEED_SEM */
990
+
991
+ }
992
+
993
+ (void) pthread_mutex_unlock(&(cv->waitersLock));
994
+
995
+ if (wereWaiters && result == 0)
996
+ {(wereWaiters
997
+ /*
998
+ * Wait for all the awakened threads to acquire their part of
999
+ * the counting semaphore
1000
+ */
1001
+
1002
+ if (WaitForSingleObject (cv->waitersDone, INFINITE)
1003
+ == WAIT_OBJECT_0)
1004
+ {
1005
+ result = 0;
1006
+ }
1007
+ else
1008
+ {
1009
+ result = EINVAL;
1010
+ }
1011
+
1012
+ }
1013
+
1014
+ return (result);
1015
+
1016
+ }
1017
+
1018
+ BTW, on my system (2 CPUs) I can manage to get
1019
+ the program stalled even without any source code
1020
+ modification if I run the tennisb program many
1021
+ times in different shell sessions.
1022
+
1023
+ ===================
1024
+ pthread-win32 patch
1025
+ ===================
1026
+ struct pthread_cond_t_ {
1027
+ long nWaitersBlocked; /* Number of threads blocked
1028
+ */
1029
+ long nWaitersUnblocked; /* Number of threads unblocked
1030
+ */
1031
+ long nWaitersToUnblock; /* Number of threads to unblock
1032
+ */
1033
+ sem_t semBlockQueue; /* Queue up threads waiting for the
1034
+ */
1035
+ /* condition to become signalled
1036
+ */
1037
+ sem_t semBlockLock; /* Semaphore that guards access to
1038
+ */
1039
+ /* | waiters blocked count/block queue
1040
+ */
1041
+ /* +-> Mandatory Sync.LEVEL-1
1042
+ */
1043
+ pthread_mutex_t mtxUnblockLock; /* Mutex that guards access to
1044
+ */
1045
+ /* | waiters (to)unblock(ed) counts
1046
+ */
1047
+ /* +-> Optional* Sync.LEVEL-2
1048
+ */
1049
+ }; /* Opt*) for _timedwait and
1050
+ cancellation*/
1051
+
1052
+ int
1053
+ pthread_cond_init (pthread_cond_t * cond, const pthread_condattr_t * attr)
1054
+ int result = EAGAIN;
1055
+ pthread_cond_t cv = NULL;
1056
+
1057
+ if (cond == NULL)
1058
+ {(cond
1059
+ return EINVAL;
1060
+ }
1061
+
1062
+ if ((attr != NULL && *attr != NULL) &&
1063
+ ((*attr)->pshared == PTHREAD_PROCESS_SHARED))
1064
+ {
1065
+ /*
1066
+ * Creating condition variable that can be shared between
1067
+ * processes.
1068
+ */
1069
+ result = ENOSYS;
1070
+
1071
+ goto FAIL0;
1072
+ }
1073
+
1074
+ cv = (pthread_cond_t) calloc (1, sizeof (*cv));
1075
+
1076
+ if (cv == NULL)
1077
+ {(cv
1078
+ result = ENOMEM;
1079
+ goto FAIL0;
1080
+ }
1081
+
1082
+ cv->nWaitersBlocked = 0;
1083
+ cv->nWaitersUnblocked = 0;
1084
+ cv->nWaitersToUnblock = 0;
1085
+
1086
+ if (sem_init (&(cv->semBlockLock), 0, 1) != 0)
1087
+ {(sem_init
1088
+ goto FAIL0;
1089
+ }
1090
+
1091
+ if (sem_init (&(cv->semBlockQueue), 0, 0) != 0)
1092
+ {(sem_init
1093
+ goto FAIL1;
1094
+ }
1095
+
1096
+ if (pthread_mutex_init (&(cv->mtxUnblockLock), 0) != 0)
1097
+ {(pthread_mutex_init
1098
+ goto FAIL2;
1099
+ }
1100
+
1101
+
1102
+ result = 0;
1103
+
1104
+ goto DONE;
1105
+
1106
+ /*
1107
+ * -------------
1108
+ * Failed...
1109
+ * -------------
1110
+ */
1111
+ FAIL2:
1112
+ (void) sem_destroy (&(cv->semBlockQueue));
1113
+
1114
+ FAIL1:
1115
+ (void) sem_destroy (&(cv->semBlockLock));
1116
+
1117
+ FAIL0:
1118
+ DONE:
1119
+ *cond = cv;
1120
+
1121
+ return (result);
1122
+
1123
+ } /* pthread_cond_init */
1124
+
1125
+ int
1126
+ pthread_cond_destroy (pthread_cond_t * cond)
1127
+ {
1128
+ int result = 0;
1129
+ pthread_cond_t cv;
1130
+
1131
+ /*
1132
+ * Assuming any race condition here is harmless.
1133
+ */
1134
+ if (cond == NULL
1135
+ || *cond == NULL)
1136
+ {
1137
+ return EINVAL;
1138
+ }
1139
+
1140
+ if (*cond != (pthread_cond_t) PTW32_OBJECT_AUTO_INIT)
1141
+ {(*cond
1142
+ cv = *cond;
1143
+
1144
+ /*
1145
+ * Synchronize access to waiters blocked count (LEVEL-1)
1146
+ */
1147
+ if (sem_wait(&(cv->semBlockLock)) != 0)
1148
+ {(sem_wait(&(cv->semBlockLock))
1149
+ return errno;
1150
+ }
1151
+
1152
+ /*
1153
+ * Synchronize access to waiters (to)unblock(ed) counts (LEVEL-2)
1154
+ */
1155
+ if ((result = pthread_mutex_lock(&(cv->mtxUnblockLock))) != 0)
1156
+ {((result
1157
+ (void) sem_post(&(cv->semBlockLock));
1158
+ return result;
1159
+ }
1160
+
1161
+ /*
1162
+ * Check whether cv is still busy (still has waiters blocked)
1163
+ */
1164
+ if (cv->nWaitersBlocked - cv->nWaitersUnblocked > 0)
1165
+ {(cv->nWaitersBlocked
1166
+ (void) sem_post(&(cv->semBlockLock));
1167
+ (void) pthread_mutex_unlock(&(cv->mtxUnblockLock));
1168
+ return EBUSY;
1169
+ }
1170
+
1171
+ /*
1172
+ * Now it is safe to destroy
1173
+ */
1174
+ (void) sem_destroy (&(cv->semBlockLock));
1175
+ (void) sem_destroy (&(cv->semBlockQueue));
1176
+ (void) pthread_mutex_unlock (&(cv->mtxUnblockLock));
1177
+ (void) pthread_mutex_destroy (&(cv->mtxUnblockLock));
1178
+
1179
+ free(cv);
1180
+ *cond = NULL;
1181
+ }
1182
+ else
1183
+ {
1184
+ /*
1185
+ * See notes in ptw32_cond_check_need_init() above also.
1186
+ */
1187
+ EnterCriticalSection(&ptw32_cond_test_init_lock);
1188
+
1189
+ /*
1190
+ * Check again.
1191
+ */
1192
+ if (*cond == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT)
1193
+ {(*cond
1194
+ /*
1195
+ * This is all we need to do to destroy a statically
1196
+ * initialised cond that has not yet been used (initialised).
1197
+ * If we get to here, another thread
1198
+ * waiting to initialise this cond will get an EINVAL.
1199
+ */
1200
+ *cond = NULL;
1201
+ }
1202
+ else
1203
+ {
1204
+ /*
1205
+ * The cv has been initialised while we were waiting
1206
+ * so assume it's in use.
1207
+ */
1208
+ result = EBUSY;
1209
+ }
1210
+
1211
+ LeaveCriticalSection(&ptw32_cond_test_init_lock);
1212
+ }
1213
+
1214
+ return (result);
1215
+ }
1216
+
1217
+ /*
1218
+ * Arguments for cond_wait_cleanup, since we can only pass a
1219
+ * single void * to it.
1220
+ */
1221
+ typedef struct {
1222
+ pthread_mutex_t * mutexPtr;
1223
+ pthread_cond_t cv;
1224
+ int * resultPtr;
1225
+ } ptw32_cond_wait_cleanup_args_t;
1226
+
1227
+ static void
1228
+ ptw32_cond_wait_cleanup(void * args)
1229
+ {
1230
+ ptw32_cond_wait_cleanup_args_t * cleanup_args =
1231
+ (ptw32_cond_wait_cleanup_args_t *) args;
1232
+ pthread_cond_t cv = cleanup_args->cv;
1233
+ int * resultPtr = cleanup_args->resultPtr;
1234
+ int eLastSignal; /* enum: 1=yes 0=no -1=cancelled/timedout w/o signal(s)
1235
+ */
1236
+ int result;
1237
+
1238
+ /*
1239
+ * Whether we got here as a result of signal/broadcast or because of
1240
+ * timeout on wait or thread cancellation we indicate that we are no
1241
+ * longer waiting. The waiter is responsible for adjusting waiters
1242
+ * (to)unblock(ed) counts (protected by unblock lock).
1243
+ * Unblock lock/Sync.LEVEL-2 supports _timedwait and cancellation.
1244
+ */
1245
+ if ((result = pthread_mutex_lock(&(cv->mtxUnblockLock))) != 0)
1246
+ {((result
1247
+ *resultPtr = result;
1248
+ return;
1249
+ }
1250
+
1251
+ cv->nWaitersUnblocked++;
1252
+
1253
+ eLastSignal = (cv->nWaitersToUnblock == 0) ?
1254
+ -1 : (--cv->nWaitersToUnblock == 0);
1255
+
1256
+ /*
1257
+ * No more LEVEL-2 access to waiters (to)unblock(ed) counts needed
1258
+ */
1259
+ if ((result = pthread_mutex_unlock(&(cv->mtxUnblockLock))) != 0)
1260
+ {((result
1261
+ *resultPtr = result;
1262
+ return;
1263
+ }
1264
+
1265
+ /*
1266
+ * If last signal...
1267
+ */
1268
+ if (eLastSignal == 1)
1269
+ {(eLastSignal
1270
+ /*
1271
+ * ...it means that we have end of 'atomic' signal/broadcast
1272
+ */
1273
+ if (sem_post(&(cv->semBlockLock)) != 0)
1274
+ {(sem_post(&(cv->semBlockLock))
1275
+ *resultPtr = errno;
1276
+ return;
1277
+ }
1278
+ }
1279
+ /*
1280
+ * If not last signal and not timed out/cancelled wait w/o signal...
1281
+ */
1282
+ else if (eLastSignal == 0)
1283
+ {
1284
+ /*
1285
+ * ...it means that next waiter can go through semaphore
1286
+ */
1287
+ if (sem_post(&(cv->semBlockQueue)) != 0)
1288
+ {(sem_post(&(cv->semBlockQueue))
1289
+ *resultPtr = errno;
1290
+ return;
1291
+ }
1292
+ }
1293
+
1294
+ /*
1295
+ * XSH: Upon successful return, the mutex has been locked and is owned
1296
+ * by the calling thread
1297
+ */
1298
+ if ((result = pthread_mutex_lock(cleanup_args->mutexPtr)) != 0)
1299
+ {((result
1300
+ *resultPtr = result;
1301
+ }
1302
+
1303
+ } /* ptw32_cond_wait_cleanup */
1304
+
1305
+ static int
1306
+ ptw32_cond_timedwait (pthread_cond_t * cond,
1307
+ pthread_mutex_t * mutex,
1308
+ const struct timespec *abstime)
1309
+ {
1310
+ int result = 0;
1311
+ pthread_cond_t cv;
1312
+ ptw32_cond_wait_cleanup_args_t cleanup_args;
1313
+
1314
+ if (cond == NULL || *cond == NULL)
1315
+ {(cond
1316
+ return EINVAL;
1317
+ }
1318
+
1319
+ /*
1320
+ * We do a quick check to see if we need to do more work
1321
+ * to initialise a static condition variable. We check
1322
+ * again inside the guarded section of ptw32_cond_check_need_init()
1323
+ * to avoid race conditions.
1324
+ */
1325
+ if (*cond == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT)
1326
+ {(*cond
1327
+ result = ptw32_cond_check_need_init(cond);
1328
+ }
1329
+
1330
+ if (result != 0 && result != EBUSY)
1331
+ {(result
1332
+ return result;
1333
+ }
1334
+
1335
+ cv = *cond;
1336
+
1337
+ /*
1338
+ * Synchronize access to waiters blocked count (LEVEL-1)
1339
+ */
1340
+ if (sem_wait(&(cv->semBlockLock)) != 0)
1341
+ {(sem_wait(&(cv->semBlockLock))
1342
+ return errno;
1343
+ }
1344
+
1345
+ cv->nWaitersBlocked++;
1346
+
1347
+ /*
1348
+ * Thats it. Counted means waiting, no more access needed
1349
+ */
1350
+ if (sem_post(&(cv->semBlockLock)) != 0)
1351
+ {(sem_post(&(cv->semBlockLock))
1352
+ return errno;
1353
+ }
1354
+
1355
+ /*
1356
+ * Setup this waiter cleanup handler
1357
+ */
1358
+ cleanup_args.mutexPtr = mutex;
1359
+ cleanup_args.cv = cv;
1360
+ cleanup_args.resultPtr = &result;
1361
+
1362
+ pthread_cleanup_push (ptw32_cond_wait_cleanup, (void *) &cleanup_args);
1363
+
1364
+ /*
1365
+ * Now we can release 'mutex' and...
1366
+ */
1367
+ if ((result = pthread_mutex_unlock (mutex)) == 0)
1368
+ {((result
1369
+
1370
+ /*
1371
+ * ...wait to be awakened by
1372
+ * pthread_cond_signal, or
1373
+ * pthread_cond_broadcast, or
1374
+ * timeout, or
1375
+ * thread cancellation
1376
+ *
1377
+ * Note:
1378
+ *
1379
+ * ptw32_sem_timedwait is a cancellation point,
1380
+ * hence providing the mechanism for making
1381
+ * pthread_cond_wait a cancellation point.
1382
+ * We use the cleanup mechanism to ensure we
1383
+ * re-lock the mutex and adjust (to)unblock(ed) waiters
1384
+ * counts if we are cancelled, timed out or signalled.
1385
+ */
1386
+ if (ptw32_sem_timedwait (&(cv->semBlockQueue), abstime) != 0)
1387
+ {(ptw32_sem_timedwait
1388
+ result = errno;
1389
+ }
1390
+ }
1391
+
1392
+ /*
1393
+ * Always cleanup
1394
+ */
1395
+ pthread_cleanup_pop (1);
1396
+
1397
+
1398
+ /*
1399
+ * "result" can be modified by the cleanup handler.
1400
+ */
1401
+ return (result);
1402
+
1403
+ } /* ptw32_cond_timedwait */
1404
+
1405
+
1406
+ static int
1407
+ ptw32_cond_unblock (pthread_cond_t * cond,
1408
+ int unblockAll)
1409
+ {
1410
+ int result;
1411
+ pthread_cond_t cv;
1412
+
1413
+ if (cond == NULL || *cond == NULL)
1414
+ {(cond
1415
+ return EINVAL;
1416
+ }
1417
+
1418
+ cv = *cond;
1419
+
1420
+ /*
1421
+ * No-op if the CV is static and hasn't been initialised yet.
1422
+ * Assuming that any race condition is harmless.
1423
+ */
1424
+ if (cv == (pthread_cond_t) PTW32_OBJECT_AUTO_INIT)
1425
+ {(cv
1426
+ return 0;
1427
+ }
1428
+
1429
+ /*
1430
+ * Synchronize access to waiters blocked count (LEVEL-1)
1431
+ */
1432
+ if (sem_wait(&(cv->semBlockLock)) != 0)
1433
+ {(sem_wait(&(cv->semBlockLock))
1434
+ return errno;
1435
+ }
1436
+
1437
+ /*
1438
+ * Synchronize access to waiters (to)unblock(ed) counts (LEVEL-2)
1439
+ * This sync.level supports _timedwait and cancellation
1440
+ */
1441
+ if ((result = pthread_mutex_lock(&(cv->mtxUnblockLock))) != 0)
1442
+ {((result
1443
+ return result;
1444
+ }
1445
+
1446
+ /*
1447
+ * Adjust waiters blocked and unblocked counts (collect garbage)
1448
+ */
1449
+ if (cv->nWaitersUnblocked != 0)
1450
+ {(cv->nWaitersUnblocked
1451
+ cv->nWaitersBlocked -= cv->nWaitersUnblocked;
1452
+ cv->nWaitersUnblocked = 0;
1453
+ }
1454
+
1455
+ /*
1456
+ * If (after adjustment) there are still some waiters blocked counted...
1457
+ */
1458
+ if ( cv->nWaitersBlocked > 0)
1459
+ {(
1460
+ /*
1461
+ * We will unblock first waiter and leave semBlockLock/LEVEL-1 locked
1462
+ * LEVEL-1 access is left disabled until last signal/unblock
1463
+ completes
1464
+ */
1465
+ cv->nWaitersToUnblock = (unblockAll) ? cv->nWaitersBlocked : 1;
1466
+
1467
+ /*
1468
+ * No more LEVEL-2 access to waiters (to)unblock(ed) counts needed
1469
+ * This sync.level supports _timedwait and cancellation
1470
+ */
1471
+ if ((result = pthread_mutex_unlock(&(cv->mtxUnblockLock))) != 0)
1472
+ {((result
1473
+ return result;
1474
+ }
1475
+
1476
+
1477
+ /*
1478
+ * Now, with LEVEL-2 lock released let first waiter go through
1479
+ semaphore
1480
+ */
1481
+ if (sem_post(&(cv->semBlockQueue)) != 0)
1482
+ {(sem_post(&(cv->semBlockQueue))
1483
+ return errno;
1484
+ }
1485
+ }
1486
+ /*
1487
+ * No waiter blocked - no more LEVEL-1 access to blocked count needed...
1488
+ */
1489
+ else if (sem_post(&(cv->semBlockLock)) != 0)
1490
+ {
1491
+ return errno;
1492
+ }
1493
+ /*
1494
+ * ...and no more LEVEL-2 access to waiters (to)unblock(ed) counts needed
1495
+ too
1496
+ * This sync.level supports _timedwait and cancellation
1497
+ */
1498
+ else
1499
+ {
1500
+ result = pthread_mutex_unlock(&(cv->mtxUnblockLock));
1501
+ }
1502
+
1503
+ return(result);
1504
+
1505
+ } /* ptw32_cond_unblock */
1506
+
1507
+ int
1508
+ pthread_cond_wait (pthread_cond_t * cond,
1509
+ pthread_mutex_t * mutex)
1510
+ {
1511
+ /* The NULL abstime arg means INFINITE waiting. */
1512
+ return(ptw32_cond_timedwait(cond, mutex, NULL));
1513
+ } /* pthread_cond_wait */
1514
+
1515
+
1516
+ int
1517
+ pthread_cond_timedwait (pthread_cond_t * cond,
1518
+ pthread_mutex_t * mutex,
1519
+ const struct timespec *abstime)
1520
+ {
1521
+ if (abstime == NULL)
1522
+ {(abstime
1523
+ return EINVAL;
1524
+ }
1525
+
1526
+ return(ptw32_cond_timedwait(cond, mutex, abstime));
1527
+ } /* pthread_cond_timedwait */
1528
+
1529
+
1530
+ int
1531
+ pthread_cond_signal (pthread_cond_t * cond)
1532
+ {
1533
+ /* The '0'(FALSE) unblockAll arg means unblock ONE waiter. */
1534
+ return(ptw32_cond_unblock(cond, 0));
1535
+ } /* pthread_cond_signal */
1536
+
1537
+ int
1538
+ pthread_cond_broadcast (pthread_cond_t * cond)
1539
+ {
1540
+ /* The '1'(TRUE) unblockAll arg means unblock ALL waiters. */
1541
+ return(ptw32_cond_unblock(cond, 1));
1542
+ } /* pthread_cond_broadcast */
1543
+
1544
+
1545
+
1546
+
1547
+ TEREKHOV@de.ibm.com on 17.01.2001 01:00:57
1548
+
1549
+ Please respond to TEREKHOV@de.ibm.com
1550
+
1551
+ To: pthreads-win32@sourceware.cygnus.com
1552
+ cc: schmidt@uci.edu
1553
+ Subject: win32 conditions: sem+counter+event = broadcast_deadlock +
1554
+ spur.wakeup/unfairness/incorrectness ??
1555
+
1556
+
1557
+
1558
+
1559
+
1560
+
1561
+
1562
+ Hi,
1563
+
1564
+ Problem 1: broadcast_deadlock
1565
+
1566
+ It seems that current implementation does not provide "atomic"
1567
+ broadcasts. That may lead to "nested" broadcasts... and it seems
1568
+ that nested case is not handled correctly -> producing a broadcast
1569
+ DEADLOCK as a result.
1570
+
1571
+ Scenario:
1572
+
1573
+ N (>1) waiting threads W1..N are blocked (in _wait) on condition's
1574
+ semaphore.
1575
+
1576
+ Thread B1 calls pthread_cond_broadcast, which results in "releasing" N
1577
+ W threads via incrementing semaphore counter by N (stored in
1578
+ cv->waiters) BUT cv->waiters counter does not change!! The caller
1579
+ thread B1 remains blocked on cv->waitersDone event (auto-reset!!) BUT
1580
+ condition is not protected from starting another broadcast (when called
1581
+ on another thread) while still waiting for the "old" broadcast to
1582
+ complete on thread B1.
1583
+
1584
+ M (>=0, <N) W threads are fast enough to go thru their _wait call and
1585
+ decrement cv->waiters counter.
1586
+
1587
+ L (N-M) "late" waiter W threads are a) still blocked/not returned from
1588
+ their semaphore wait call or b) were preempted after sem_wait but before
1589
+ lock( &cv->waitersLock ) or c) are blocked on cv->waitersLock.
1590
+
1591
+ cv->waiters is still > 0 (= L).
1592
+
1593
+ Another thread B2 (or some W thread from M group) calls
1594
+ pthread_cond_broadcast and gains access to counter... neither a) nor b)
1595
+ prevent thread B2 in pthread_cond_broadcast from gaining access to
1596
+ counter and starting another broadcast ( for c) - it depends on
1597
+ cv->waitersLock scheduling rules: FIFO=OK, PRTY=PROBLEM,... )
1598
+
1599
+ That call to pthread_cond_broadcast (on thread B2) will result in
1600
+ incrementing semaphore by cv->waiters (=L) which is INCORRECT (all
1601
+ W1..N were in fact already released by thread B1) and waiting on
1602
+ _auto-reset_ event cv->waitersDone which is DEADLY WRONG (produces a
1603
+ deadlock)...
1604
+
1605
+ All late W1..L threads now have a chance to complete their _wait call.
1606
+ Last W_L thread sets an auto-reselt event cv->waitersDone which will
1607
+ release either B1 or B2 leaving one of B threads in a deadlock.
1608
+
1609
+ Problem 2: spur.wakeup/unfairness/incorrectness
1610
+
1611
+ It seems that:
1612
+
1613
+ a) because of the same problem with counter which does not reflect the
1614
+ actual number of NOT RELEASED waiters, the signal call may increment
1615
+ a semaphore counter w/o having a waiter blocked on it. That will result
1616
+ in (best case) spurious wake ups - performance degradation due to
1617
+ unnecessary context switches and predicate re-checks and (in worth case)
1618
+ unfairness/incorrectness problem - see b)
1619
+
1620
+ b) neither signal nor broadcast prevent other threads - "new waiters"
1621
+ (and in the case of signal, the caller thread as well) from going into
1622
+ _wait and overtaking "old" waiters (already released but still not returned
1623
+ from sem_wait on condition's semaphore). Win semaphore just [API DOC]:
1624
+ "Maintains a count between zero and some maximum value, limiting the number
1625
+ of threads that are simultaneously accessing a shared resource." Calling
1626
+ ReleaseSemaphore does not imply (at least not documented) that on return
1627
+ from ReleaseSemaphore all waiters will in fact become released (returned
1628
+ from their Wait... call) and/or that new waiters calling Wait... afterwards
1629
+ will become less importance. It is NOT documented to be an atomic release
1630
+ of
1631
+ waiters... And even if it would be there is still a problem with a thread
1632
+ being preempted after Wait on semaphore and before Wait on cv->waitersLock
1633
+ and scheduling rules for cv->waitersLock itself
1634
+ (??WaitForMultipleObjects??)
1635
+ That may result in unfairness/incorrectness problem as described
1636
+ for SetEvent impl. in "Strategies for Implementing POSIX Condition
1637
+ Variables
1638
+ on Win32": http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
1639
+
1640
+ Unfairness -- The semantics of the POSIX pthread_cond_broadcast function is
1641
+ to wake up all threads currently blocked in wait calls on the condition
1642
+ variable. The awakened threads then compete for the external_mutex. To
1643
+ ensure
1644
+ fairness, all of these threads should be released from their
1645
+ pthread_cond_wait calls and allowed to recheck their condition expressions
1646
+ before other threads can successfully complete a wait on the condition
1647
+ variable.
1648
+
1649
+ Unfortunately, the SetEvent implementation above does not guarantee that
1650
+ all
1651
+ threads sleeping on the condition variable when cond_broadcast is called
1652
+ will
1653
+ acquire the external_mutex and check their condition expressions. Although
1654
+ the Pthreads specification does not mandate this degree of fairness, the
1655
+ lack of fairness can cause starvation.
1656
+
1657
+ To illustrate the unfairness problem, imagine there are 2 threads, C1 and
1658
+ C2,
1659
+ that are blocked in pthread_cond_wait on condition variable not_empty_ that
1660
+ is guarding a thread-safe message queue. Another thread, P1 then places two
1661
+ messages onto the queue and calls pthread_cond_broadcast. If C1 returns
1662
+ from
1663
+ pthread_cond_wait, dequeues and processes the message, and immediately
1664
+ waits
1665
+ again then it and only it may end up acquiring both messages. Thus, C2 will
1666
+ never get a chance to dequeue a message and run.
1667
+
1668
+ The following illustrates the sequence of events:
1669
+
1670
+ 1. Thread C1 attempts to dequeue and waits on CV non_empty_
1671
+ 2. Thread C2 attempts to dequeue and waits on CV non_empty_
1672
+ 3. Thread P1 enqueues 2 messages and broadcasts to CV not_empty_
1673
+ 4. Thread P1 exits
1674
+ 5. Thread C1 wakes up from CV not_empty_, dequeues a message and runs
1675
+ 6. Thread C1 waits again on CV not_empty_, immediately dequeues the 2nd
1676
+ message and runs
1677
+ 7. Thread C1 exits
1678
+ 8. Thread C2 is the only thread left and blocks forever since
1679
+ not_empty_ will never be signaled
1680
+
1681
+ Depending on the algorithm being implemented, this lack of fairness may
1682
+ yield
1683
+ concurrent programs that have subtle bugs. Of course, application
1684
+ developers
1685
+ should not rely on the fairness semantics of pthread_cond_broadcast.
1686
+ However,
1687
+ there are many cases where fair implementations of condition variables can
1688
+ simplify application code.
1689
+
1690
+ Incorrectness -- A variation on the unfairness problem described above
1691
+ occurs
1692
+ when a third consumer thread, C3, is allowed to slip through even though it
1693
+ was not waiting on condition variable not_empty_ when a broadcast occurred.
1694
+
1695
+ To illustrate this, we will use the same scenario as above: 2 threads, C1
1696
+ and
1697
+ C2, are blocked dequeuing messages from the message queue. Another thread,
1698
+ P1
1699
+ then places two messages onto the queue and calls pthread_cond_broadcast.
1700
+ C1
1701
+ returns from pthread_cond_wait, dequeues and processes the message. At this
1702
+ time, C3 acquires the external_mutex, calls pthread_cond_wait and waits on
1703
+ the events in WaitForMultipleObjects. Since C2 has not had a chance to run
1704
+ yet, the BROADCAST event is still signaled. C3 then returns from
1705
+ WaitForMultipleObjects, and dequeues and processes the message in the
1706
+ queue.
1707
+ Thus, C2 will never get a chance to dequeue a message and run.
1708
+
1709
+ The following illustrates the sequence of events:
1710
+
1711
+ 1. Thread C1 attempts to dequeue and waits on CV non_empty_
1712
+ 2. Thread C2 attempts to dequeue and waits on CV non_empty_
1713
+ 3. Thread P1 enqueues 2 messages and broadcasts to CV not_empty_
1714
+ 4. Thread P1 exits
1715
+ 5. Thread C1 wakes up from CV not_empty_, dequeues a message and runs
1716
+ 6. Thread C1 exits
1717
+ 7. Thread C3 waits on CV not_empty_, immediately dequeues the 2nd
1718
+ message and runs
1719
+ 8. Thread C3 exits
1720
+ 9. Thread C2 is the only thread left and blocks forever since
1721
+ not_empty_ will never be signaled
1722
+
1723
+ In the above case, a thread that was not waiting on the condition variable
1724
+ when a broadcast occurred was allowed to proceed. This leads to incorrect
1725
+ semantics for a condition variable.
1726
+
1727
+
1728
+ COMMENTS???
1729
+
1730
+ regards,
1731
+ alexander.
1732
+
1733
+ -----------------------------------------------------------------------------
1734
+
1735
+ Subject: RE: FYI/comp.programming.threads/Re: pthread_cond_*
1736
+ implementation questions
1737
+ Date: Wed, 21 Feb 2001 11:54:47 +0100
1738
+ From: TEREKHOV@de.ibm.com
1739
+ To: lthomas@arbitrade.com
1740
+ CC: rpj@ise.canberra.edu.au, Thomas Pfaff <tpfaff@gmx.net>,
1741
+ Nanbor Wang <nanbor@cs.wustl.edu>
1742
+
1743
+ Hi Louis,
1744
+
1745
+ generation number 8..
1746
+
1747
+ had some time to revisit timeouts/spurious wakeup problem..
1748
+ found some bugs (in 7.b/c/d) and something to improve
1749
+ (7a - using IPC semaphores but it should speedup Win32
1750
+ version as well).
1751
+
1752
+ regards,
1753
+ alexander.
1754
+
1755
+ ---------- Algorithm 8a / IMPL_SEM,UNBLOCK_STRATEGY == UNBLOCK_ALL ------
1756
+ given:
1757
+ semBlockLock - bin.semaphore
1758
+ semBlockQueue - semaphore
1759
+ mtxExternal - mutex or CS
1760
+ mtxUnblockLock - mutex or CS
1761
+ nWaitersGone - int
1762
+ nWaitersBlocked - int
1763
+ nWaitersToUnblock - int
1764
+
1765
+ wait( timeout ) {
1766
+
1767
+ [auto: register int result ] // error checking omitted
1768
+ [auto: register int nSignalsWasLeft ]
1769
+ [auto: register int nWaitersWasGone ]
1770
+
1771
+ sem_wait( semBlockLock );
1772
+ nWaitersBlocked++;
1773
+ sem_post( semBlockLock );
1774
+
1775
+ unlock( mtxExternal );
1776
+ bTimedOut = sem_wait( semBlockQueue,timeout );
1777
+
1778
+ lock( mtxUnblockLock );
1779
+ if ( 0 != (nSignalsWasLeft = nWaitersToUnblock) ) {
1780
+ if ( bTimeout ) { // timeout (or canceled)
1781
+ if ( 0 != nWaitersBlocked ) {
1782
+ nWaitersBlocked--;
1783
+ }
1784
+ else {
1785
+ nWaitersGone++; // count spurious wakeups
1786
+ }
1787
+ }
1788
+ if ( 0 == --nWaitersToUnblock ) {
1789
+ if ( 0 != nWaitersBlocked ) {
1790
+ sem_post( semBlockLock ); // open the gate
1791
+ nSignalsWasLeft = 0; // do not open the gate below
1792
+ again
1793
+ }
1794
+ else if ( 0 != (nWaitersWasGone = nWaitersGone) ) {
1795
+ nWaitersGone = 0;
1796
+ }
1797
+ }
1798
+ }
1799
+ else if ( INT_MAX/2 == ++nWaitersGone ) { // timeout/canceled or spurious
1800
+ semaphore :-)
1801
+ sem_wait( semBlockLock );
1802
+ nWaitersBlocked -= nWaitersGone; // something is going on here -
1803
+ test of timeouts? :-)
1804
+ sem_post( semBlockLock );
1805
+ nWaitersGone = 0;
1806
+ }
1807
+ unlock( mtxUnblockLock );
1808
+
1809
+ if ( 1 == nSignalsWasLeft ) {
1810
+ if ( 0 != nWaitersWasGone ) {
1811
+ // sem_adjust( -nWaitersWasGone );
1812
+ while ( nWaitersWasGone-- ) {
1813
+ sem_wait( semBlockLock ); // better now than spurious
1814
+ later
1815
+ }
1816
+ }
1817
+ sem_post( semBlockLock ); // open the gate
1818
+ }
1819
+
1820
+ lock( mtxExternal );
1821
+
1822
+ return ( bTimedOut ) ? ETIMEOUT : 0;
1823
+ }
1824
+
1825
+ signal(bAll) {
1826
+
1827
+ [auto: register int result ]
1828
+ [auto: register int nSignalsToIssue]
1829
+
1830
+ lock( mtxUnblockLock );
1831
+
1832
+ if ( 0 != nWaitersToUnblock ) { // the gate is closed!!!
1833
+ if ( 0 == nWaitersBlocked ) { // NO-OP
1834
+ return unlock( mtxUnblockLock );
1835
+ }
1836
+ if (bAll) {
1837
+ nWaitersToUnblock += nSignalsToIssue=nWaitersBlocked;
1838
+ nWaitersBlocked = 0;
1839
+ }
1840
+ else {
1841
+ nSignalsToIssue = 1;
1842
+ nWaitersToUnblock++;
1843
+ nWaitersBlocked--;
1844
+ }
1845
+ }
1846
+ else if ( nWaitersBlocked > nWaitersGone ) { // HARMLESS RACE CONDITION!
1847
+ sem_wait( semBlockLock ); // close the gate
1848
+ if ( 0 != nWaitersGone ) {
1849
+ nWaitersBlocked -= nWaitersGone;
1850
+ nWaitersGone = 0;
1851
+ }
1852
+ if (bAll) {
1853
+ nSignalsToIssue = nWaitersToUnblock = nWaitersBlocked;
1854
+ nWaitersBlocked = 0;
1855
+ }
1856
+ else {
1857
+ nSignalsToIssue = nWaitersToUnblock = 1;
1858
+ nWaitersBlocked--;
1859
+ }
1860
+ }
1861
+ else { // NO-OP
1862
+ return unlock( mtxUnblockLock );
1863
+ }
1864
+
1865
+ unlock( mtxUnblockLock );
1866
+ sem_post( semBlockQueue,nSignalsToIssue );
1867
+ return result;
1868
+ }
1869
+
1870
+ ---------- Algorithm 8b / IMPL_SEM,UNBLOCK_STRATEGY == UNBLOCK_ONEBYONE
1871
+ ------
1872
+ given:
1873
+ semBlockLock - bin.semaphore
1874
+ semBlockQueue - bin.semaphore
1875
+ mtxExternal - mutex or CS
1876
+ mtxUnblockLock - mutex or CS
1877
+ nWaitersGone - int
1878
+ nWaitersBlocked - int
1879
+ nWaitersToUnblock - int
1880
+
1881
+ wait( timeout ) {
1882
+
1883
+ [auto: register int result ] // error checking omitted
1884
+ [auto: register int nWaitersWasGone ]
1885
+ [auto: register int nSignalsWasLeft ]
1886
+
1887
+ sem_wait( semBlockLock );
1888
+ nWaitersBlocked++;
1889
+ sem_post( semBlockLock );
1890
+
1891
+ unlock( mtxExternal );
1892
+ bTimedOut = sem_wait( semBlockQueue,timeout );
1893
+
1894
+ lock( mtxUnblockLock );
1895
+ if ( 0 != (nSignalsWasLeft = nWaitersToUnblock) ) {
1896
+ if ( bTimeout ) { // timeout (or canceled)
1897
+ if ( 0 != nWaitersBlocked ) {
1898
+ nWaitersBlocked--;
1899
+ nSignalsWasLeft = 0; // do not unblock next waiter
1900
+ below (already unblocked)
1901
+ }
1902
+ else {
1903
+ nWaitersGone = 1; // spurious wakeup pending!!
1904
+ }
1905
+ }
1906
+ if ( 0 == --nWaitersToUnblock &&
1907
+ if ( 0 != nWaitersBlocked ) {
1908
+ sem_post( semBlockLock ); // open the gate
1909
+ nSignalsWasLeft = 0; // do not open the gate below
1910
+ again
1911
+ }
1912
+ else if ( 0 != (nWaitersWasGone = nWaitersGone) ) {
1913
+ nWaitersGone = 0;
1914
+ }
1915
+ }
1916
+ }
1917
+ else if ( INT_MAX/2 == ++nWaitersGone ) { // timeout/canceled or spurious
1918
+ semaphore :-)
1919
+ sem_wait( semBlockLock );
1920
+ nWaitersBlocked -= nWaitersGone; // something is going on here -
1921
+ test of timeouts? :-)
1922
+ sem_post( semBlockLock );
1923
+ nWaitersGone = 0;
1924
+ }
1925
+ unlock( mtxUnblockLock );
1926
+
1927
+ if ( 1 == nSignalsWasLeft ) {
1928
+ if ( 0 != nWaitersWasGone ) {
1929
+ // sem_adjust( -1 );
1930
+ sem_wait( semBlockQueue ); // better now than spurious
1931
+ later
1932
+ }
1933
+ sem_post( semBlockLock ); // open the gate
1934
+ }
1935
+ else if ( 0 != nSignalsWasLeft ) {
1936
+ sem_post( semBlockQueue ); // unblock next waiter
1937
+ }
1938
+
1939
+ lock( mtxExternal );
1940
+
1941
+ return ( bTimedOut ) ? ETIMEOUT : 0;
1942
+ }
1943
+
1944
+ signal(bAll) {
1945
+
1946
+ [auto: register int result ]
1947
+
1948
+ lock( mtxUnblockLock );
1949
+
1950
+ if ( 0 != nWaitersToUnblock ) { // the gate is closed!!!
1951
+ if ( 0 == nWaitersBlocked ) { // NO-OP
1952
+ return unlock( mtxUnblockLock );
1953
+ }
1954
+ if (bAll) {
1955
+ nWaitersToUnblock += nWaitersBlocked;
1956
+ nWaitersBlocked = 0;
1957
+ }
1958
+ else {
1959
+ nWaitersToUnblock++;
1960
+ nWaitersBlocked--;
1961
+ }
1962
+ unlock( mtxUnblockLock );
1963
+ }
1964
+ else if ( nWaitersBlocked > nWaitersGone ) { // HARMLESS RACE CONDITION!
1965
+ sem_wait( semBlockLock ); // close the gate
1966
+ if ( 0 != nWaitersGone ) {
1967
+ nWaitersBlocked -= nWaitersGone;
1968
+ nWaitersGone = 0;
1969
+ }
1970
+ if (bAll) {
1971
+ nWaitersToUnblock = nWaitersBlocked;
1972
+ nWaitersBlocked = 0;
1973
+ }
1974
+ else {
1975
+ nWaitersToUnblock = 1;
1976
+ nWaitersBlocked--;
1977
+ }
1978
+ unlock( mtxUnblockLock );
1979
+ sem_post( semBlockQueue );
1980
+ }
1981
+ else { // NO-OP
1982
+ unlock( mtxUnblockLock );
1983
+ }
1984
+
1985
+ return result;
1986
+ }
1987
+
1988
+ ---------- Algorithm 8c / IMPL_EVENT,UNBLOCK_STRATEGY == UNBLOCK_ONEBYONE
1989
+ ---------
1990
+ given:
1991
+ hevBlockLock - auto-reset event
1992
+ hevBlockQueue - auto-reset event
1993
+ mtxExternal - mutex or CS
1994
+ mtxUnblockLock - mutex or CS
1995
+ nWaitersGone - int
1996
+ nWaitersBlocked - int
1997
+ nWaitersToUnblock - int
1998
+
1999
+ wait( timeout ) {
2000
+
2001
+ [auto: register int result ] // error checking omitted
2002
+ [auto: register int nSignalsWasLeft ]
2003
+ [auto: register int nWaitersWasGone ]
2004
+
2005
+ wait( hevBlockLock,INFINITE );
2006
+ nWaitersBlocked++;
2007
+ set_event( hevBlockLock );
2008
+
2009
+ unlock( mtxExternal );
2010
+ bTimedOut = wait( hevBlockQueue,timeout );
2011
+
2012
+ lock( mtxUnblockLock );
2013
+ if ( 0 != (SignalsWasLeft = nWaitersToUnblock) ) {
2014
+ if ( bTimeout ) { // timeout (or canceled)
2015
+ if ( 0 != nWaitersBlocked ) {
2016
+ nWaitersBlocked--;
2017
+ nSignalsWasLeft = 0; // do not unblock next waiter
2018
+ below (already unblocked)
2019
+ }
2020
+ else {
2021
+ nWaitersGone = 1; // spurious wakeup pending!!
2022
+ }
2023
+ }
2024
+ if ( 0 == --nWaitersToUnblock )
2025
+ if ( 0 != nWaitersBlocked ) {
2026
+ set_event( hevBlockLock ); // open the gate
2027
+ nSignalsWasLeft = 0; // do not open the gate below
2028
+ again
2029
+ }
2030
+ else if ( 0 != (nWaitersWasGone = nWaitersGone) ) {
2031
+ nWaitersGone = 0;
2032
+ }
2033
+ }
2034
+ }
2035
+ else if ( INT_MAX/2 == ++nWaitersGone ) { // timeout/canceled or spurious
2036
+ event :-)
2037
+ wait( hevBlockLock,INFINITE );
2038
+ nWaitersBlocked -= nWaitersGone; // something is going on here -
2039
+ test of timeouts? :-)
2040
+ set_event( hevBlockLock );
2041
+ nWaitersGone = 0;
2042
+ }
2043
+ unlock( mtxUnblockLock );
2044
+
2045
+ if ( 1 == nSignalsWasLeft ) {
2046
+ if ( 0 != nWaitersWasGone ) {
2047
+ reset_event( hevBlockQueue ); // better now than spurious
2048
+ later
2049
+ }
2050
+ set_event( hevBlockLock ); // open the gate
2051
+ }
2052
+ else if ( 0 != nSignalsWasLeft ) {
2053
+ set_event( hevBlockQueue ); // unblock next waiter
2054
+ }
2055
+
2056
+ lock( mtxExternal );
2057
+
2058
+ return ( bTimedOut ) ? ETIMEOUT : 0;
2059
+ }
2060
+
2061
+ signal(bAll) {
2062
+
2063
+ [auto: register int result ]
2064
+
2065
+ lock( mtxUnblockLock );
2066
+
2067
+ if ( 0 != nWaitersToUnblock ) { // the gate is closed!!!
2068
+ if ( 0 == nWaitersBlocked ) { // NO-OP
2069
+ return unlock( mtxUnblockLock );
2070
+ }
2071
+ if (bAll) {
2072
+ nWaitersToUnblock += nWaitersBlocked;
2073
+ nWaitersBlocked = 0;
2074
+ }
2075
+ else {
2076
+ nWaitersToUnblock++;
2077
+ nWaitersBlocked--;
2078
+ }
2079
+ unlock( mtxUnblockLock );
2080
+ }
2081
+ else if ( nWaitersBlocked > nWaitersGone ) { // HARMLESS RACE CONDITION!
2082
+ wait( hevBlockLock,INFINITE ); // close the gate
2083
+ if ( 0 != nWaitersGone ) {
2084
+ nWaitersBlocked -= nWaitersGone;
2085
+ nWaitersGone = 0;
2086
+ }
2087
+ if (bAll) {
2088
+ nWaitersToUnblock = nWaitersBlocked;
2089
+ nWaitersBlocked = 0;
2090
+ }
2091
+ else {
2092
+ nWaitersToUnblock = 1;
2093
+ nWaitersBlocked--;
2094
+ }
2095
+ unlock( mtxUnblockLock );
2096
+ set_event( hevBlockQueue );
2097
+ }
2098
+ else { // NO-OP
2099
+ unlock( mtxUnblockLock );
2100
+ }
2101
+
2102
+ return result;
2103
+ }
2104
+
2105
+ ---------- Algorithm 8d / IMPL_EVENT,UNBLOCK_STRATEGY == UNBLOCK_ALL ------
2106
+ given:
2107
+ hevBlockLock - auto-reset event
2108
+ hevBlockQueueS - auto-reset event // for signals
2109
+ hevBlockQueueB - manual-reset even // for broadcasts
2110
+ mtxExternal - mutex or CS
2111
+ mtxUnblockLock - mutex or CS
2112
+ eBroadcast - int // 0: no broadcast, 1: broadcast, 2:
2113
+ broadcast after signal(s)
2114
+ nWaitersGone - int
2115
+ nWaitersBlocked - int
2116
+ nWaitersToUnblock - int
2117
+
2118
+ wait( timeout ) {
2119
+
2120
+ [auto: register int result ] // error checking omitted
2121
+ [auto: register int eWasBroadcast ]
2122
+ [auto: register int nSignalsWasLeft ]
2123
+ [auto: register int nWaitersWasGone ]
2124
+
2125
+ wait( hevBlockLock,INFINITE );
2126
+ nWaitersBlocked++;
2127
+ set_event( hevBlockLock );
2128
+
2129
+ unlock( mtxExternal );
2130
+ bTimedOut = waitformultiple( hevBlockQueueS,hevBlockQueueB,timeout,ONE );
2131
+
2132
+ lock( mtxUnblockLock );
2133
+ if ( 0 != (SignalsWasLeft = nWaitersToUnblock) ) {
2134
+ if ( bTimeout ) { // timeout (or canceled)
2135
+ if ( 0 != nWaitersBlocked ) {
2136
+ nWaitersBlocked--;
2137
+ nSignalsWasLeft = 0; // do not unblock next waiter
2138
+ below (already unblocked)
2139
+ }
2140
+ else if ( 1 != eBroadcast ) {
2141
+ nWaitersGone = 1;
2142
+ }
2143
+ }
2144
+ if ( 0 == --nWaitersToUnblock ) {
2145
+ if ( 0 != nWaitersBlocked ) {
2146
+ set_event( hevBlockLock ); // open the gate
2147
+ nSignalsWasLeft = 0; // do not open the gate below
2148
+ again
2149
+ }
2150
+ else {
2151
+ if ( 0 != (eWasBroadcast = eBroadcast) ) {
2152
+ eBroadcast = 0;
2153
+ }
2154
+ if ( 0 != (nWaitersWasGone = nWaitersGone ) {
2155
+ nWaitersGone = 0;
2156
+ }
2157
+ }
2158
+ }
2159
+ else if ( 0 != eBroadcast ) {
2160
+ nSignalsWasLeft = 0; // do not unblock next waiter
2161
+ below (already unblocked)
2162
+ }
2163
+ }
2164
+ else if ( INT_MAX/2 == ++nWaitersGone ) { // timeout/canceled or spurious
2165
+ event :-)
2166
+ wait( hevBlockLock,INFINITE );
2167
+ nWaitersBlocked -= nWaitersGone; // something is going on here -
2168
+ test of timeouts? :-)
2169
+ set_event( hevBlockLock );
2170
+ nWaitersGone = 0;
2171
+ }
2172
+ unlock( mtxUnblockLock );
2173
+
2174
+ if ( 1 == nSignalsWasLeft ) {
2175
+ if ( 0 != eWasBroadcast ) {
2176
+ reset_event( hevBlockQueueB );
2177
+ }
2178
+ if ( 0 != nWaitersWasGone ) {
2179
+ reset_event( hevBlockQueueS ); // better now than spurious
2180
+ later
2181
+ }
2182
+ set_event( hevBlockLock ); // open the gate
2183
+ }
2184
+ else if ( 0 != nSignalsWasLeft ) {
2185
+ set_event( hevBlockQueueS ); // unblock next waiter
2186
+ }
2187
+
2188
+ lock( mtxExternal );
2189
+
2190
+ return ( bTimedOut ) ? ETIMEOUT : 0;
2191
+ }
2192
+
2193
+ signal(bAll) {
2194
+
2195
+ [auto: register int result ]
2196
+ [auto: register HANDLE hevBlockQueue ]
2197
+
2198
+ lock( mtxUnblockLock );
2199
+
2200
+ if ( 0 != nWaitersToUnblock ) { // the gate is closed!!!
2201
+ if ( 0 == nWaitersBlocked ) { // NO-OP
2202
+ return unlock( mtxUnblockLock );
2203
+ }
2204
+ if (bAll) {
2205
+ nWaitersToUnblock += nWaitersBlocked;
2206
+ nWaitersBlocked = 0;
2207
+ eBroadcast = 2;
2208
+ hevBlockQueue = hevBlockQueueB;
2209
+ }
2210
+ else {
2211
+ nWaitersToUnblock++;
2212
+ nWaitersBlocked--;
2213
+ return unlock( mtxUnblockLock );
2214
+ }
2215
+ }
2216
+ else if ( nWaitersBlocked > nWaitersGone ) { // HARMLESS RACE CONDITION!
2217
+ wait( hevBlockLock,INFINITE ); // close the gate
2218
+ if ( 0 != nWaitersGone ) {
2219
+ nWaitersBlocked -= nWaitersGone;
2220
+ nWaitersGone = 0;
2221
+ }
2222
+ if (bAll) {
2223
+ nWaitersToUnblock = nWaitersBlocked;
2224
+ nWaitersBlocked = 0;
2225
+ eBroadcast = 1;
2226
+ hevBlockQueue = hevBlockQueueB;
2227
+ }
2228
+ else {
2229
+ nWaitersToUnblock = 1;
2230
+ nWaitersBlocked--;
2231
+ hevBlockQueue = hevBlockQueueS;
2232
+ }
2233
+ }
2234
+ else { // NO-OP
2235
+ return unlock( mtxUnblockLock );
2236
+ }
2237
+
2238
+ unlock( mtxUnblockLock );
2239
+ set_event( hevBlockQueue );
2240
+ return result;
2241
+ }
2242
+ ---------------------- Forwarded by Alexander Terekhov/Germany/IBM on
2243
+ 02/21/2001 09:13 AM ---------------------------
2244
+
2245
+ Alexander Terekhov
2246
+ 02/20/2001 04:33 PM
2247
+
2248
+ To: Louis Thomas <lthomas@arbitrade.com>
2249
+ cc:
2250
+
2251
+ From: Alexander Terekhov/Germany/IBM@IBMDE
2252
+ Subject: RE: FYI/comp.programming.threads/Re: pthread_cond_* implementatio
2253
+ n questions
2254
+ Importance: Normal
2255
+
2256
+ >Sorry, gotta take a break and work on something else for a while.
2257
+ >Real work
2258
+ >calls, unfortunately. I'll get back to you in two or three days.
2259
+
2260
+ ok. no problem. here is some more stuff for pauses you might have
2261
+ in between :)
2262
+
2263
+ ---------- Algorithm 7d / IMPL_EVENT,UNBLOCK_STRATEGY == UNBLOCK_ALL ------
2264
+ given:
2265
+ hevBlockLock - auto-reset event
2266
+ hevBlockQueueS - auto-reset event // for signals
2267
+ hevBlockQueueB - manual-reset even // for broadcasts
2268
+ mtxExternal - mutex or CS
2269
+ mtxUnblockLock - mutex or CS
2270
+ bBroadcast - int
2271
+ nWaitersGone - int
2272
+ nWaitersBlocked - int
2273
+ nWaitersToUnblock - int
2274
+
2275
+ wait( timeout ) {
2276
+
2277
+ [auto: register int result ] // error checking omitted
2278
+ [auto: register int bWasBroadcast ]
2279
+ [auto: register int nSignalsWasLeft ]
2280
+
2281
+ wait( hevBlockLock,INFINITE );
2282
+ nWaitersBlocked++;
2283
+ set_event( hevBlockLock );
2284
+
2285
+ unlock( mtxExternal );
2286
+ bTimedOut = waitformultiple( hevBlockQueueS,hevBlockQueueB,timeout,ONE );
2287
+
2288
+ lock( mtxUnblockLock );
2289
+ if ( 0 != (SignalsWasLeft = nWaitersToUnblock) ) {
2290
+ if ( bTimeout ) { // timeout (or canceled)
2291
+ if ( 0 != nWaitersBlocked ) {
2292
+ nWaitersBlocked--;
2293
+ nSignalsWasLeft = 0; // do not unblock next waiter
2294
+ below (already unblocked)
2295
+ }
2296
+ else if ( !bBroadcast ) {
2297
+ wait( hevBlockQueueS,INFINITE ); // better now than spurious
2298
+ later
2299
+ }
2300
+ }
2301
+ if ( 0 == --nWaitersToUnblock ) {
2302
+ if ( 0 != nWaitersBlocked ) {
2303
+ if ( bBroadcast ) {
2304
+ reset_event( hevBlockQueueB );
2305
+ bBroadcast = false;
2306
+ }
2307
+ set_event( hevBlockLock ); // open the gate
2308
+ nSignalsWasLeft = 0; // do not open the gate below
2309
+ again
2310
+ }
2311
+ else if ( false != (bWasBroadcast = bBroadcast) ) {
2312
+ bBroadcast = false;
2313
+ }
2314
+ }
2315
+ else {
2316
+ bWasBroadcast = bBroadcast;
2317
+ }
2318
+ }
2319
+ else if ( INT_MAX/2 == ++nWaitersGone ) { // timeout/canceled or spurious
2320
+ event :-)
2321
+ wait( hevBlockLock,INFINITE );
2322
+ nWaitersBlocked -= nWaitersGone; // something is going on here -
2323
+ test of timeouts? :-)
2324
+ set_event( hevBlockLock );
2325
+ nWaitersGone = 0;
2326
+ }
2327
+ unlock( mtxUnblockLock );
2328
+
2329
+ if ( 1 == nSignalsWasLeft ) {
2330
+ if ( bWasBroadcast ) {
2331
+ reset_event( hevBlockQueueB );
2332
+ }
2333
+ set_event( hevBlockLock ); // open the gate
2334
+ }
2335
+ else if ( 0 != nSignalsWasLeft && !bWasBroadcast ) {
2336
+ set_event( hevBlockQueueS ); // unblock next waiter
2337
+ }
2338
+
2339
+ lock( mtxExternal );
2340
+
2341
+ return ( bTimedOut ) ? ETIMEOUT : 0;
2342
+ }
2343
+
2344
+ signal(bAll) {
2345
+
2346
+ [auto: register int result ]
2347
+ [auto: register HANDLE hevBlockQueue ]
2348
+
2349
+ lock( mtxUnblockLock );
2350
+
2351
+ if ( 0 != nWaitersToUnblock ) { // the gate is closed!!!
2352
+ if ( 0 == nWaitersBlocked ) { // NO-OP
2353
+ return unlock( mtxUnblockLock );
2354
+ }
2355
+ if (bAll) {
2356
+ nWaitersToUnblock += nWaitersBlocked;
2357
+ nWaitersBlocked = 0;
2358
+ bBroadcast = true;
2359
+ hevBlockQueue = hevBlockQueueB;
2360
+ }
2361
+ else {
2362
+ nWaitersToUnblock++;
2363
+ nWaitersBlocked--;
2364
+ return unlock( mtxUnblockLock );
2365
+ }
2366
+ }
2367
+ else if ( nWaitersBlocked > nWaitersGone ) { // HARMLESS RACE CONDITION!
2368
+ wait( hevBlockLock,INFINITE ); // close the gate
2369
+ if ( 0 != nWaitersGone ) {
2370
+ nWaitersBlocked -= nWaitersGone;
2371
+ nWaitersGone = 0;
2372
+ }
2373
+ if (bAll) {
2374
+ nWaitersToUnblock = nWaitersBlocked;
2375
+ nWaitersBlocked = 0;
2376
+ bBroadcast = true;
2377
+ hevBlockQueue = hevBlockQueueB;
2378
+ }
2379
+ else {
2380
+ nWaitersToUnblock = 1;
2381
+ nWaitersBlocked--;
2382
+ hevBlockQueue = hevBlockQueueS;
2383
+ }
2384
+ }
2385
+ else { // NO-OP
2386
+ return unlock( mtxUnblockLock );
2387
+ }
2388
+
2389
+ unlock( mtxUnblockLock );
2390
+ set_event( hevBlockQueue );
2391
+ return result;
2392
+ }
2393
+
2394
+
2395
+ ----------------------------------------------------------------------------
2396
+
2397
+ Subject: RE: FYI/comp.programming.threads/Re: pthread_cond_* implementatio
2398
+ n questions
2399
+ Date: Mon, 26 Feb 2001 22:20:12 -0600
2400
+ From: Louis Thomas <lthomas@arbitrade.com>
2401
+ To: "'TEREKHOV@de.ibm.com'" <TEREKHOV@de.ibm.com>
2402
+ CC: rpj@ise.canberra.edu.au, Thomas Pfaff <tpfaff@gmx.net>,
2403
+ Nanbor Wang
2404
+ <nanbor@cs.wustl.edu>
2405
+
2406
+ Sorry all. Busy week.
2407
+
2408
+ > this insures the fairness
2409
+ > which POSIX does not (e.g. two subsequent broadcasts - the gate does
2410
+ insure
2411
+ > that first wave waiters will start the race for the mutex before waiters
2412
+ > from the second wave - Linux pthreads process/unblock both waves
2413
+ > concurrently...)
2414
+
2415
+ I'm not sure how we are any more fair about this than Linux. We certainly
2416
+ don't guarantee that the threads released by the first broadcast will get
2417
+ the external mutex before the threads of the second wave. In fact, it is
2418
+ possible that those threads will never get the external mutex if there is
2419
+ enough contention for it.
2420
+
2421
+ > e.g. i was thinking about implementation with a pool of
2422
+ > N semaphores/counters [...]
2423
+
2424
+ I considered that too. The problem is as you mentioned in a). You really
2425
+ need to assign threads to semaphores once you know how you want to wake them
2426
+ up, not when they first begin waiting which is the only time you can assign
2427
+ them.
2428
+
2429
+ > well, i am not quite sure that i've fully understood your scenario,
2430
+
2431
+ Hmm. Well, it think it's an important example, so I'll try again. First, we
2432
+ have thread A which we KNOW is waiting on a condition. As soon as it becomes
2433
+ unblocked for any reason, we will know because it will set a flag. Since the
2434
+ flag is not set, we are 100% confident that thread A is waiting on the
2435
+ condition. We have another thread, thread B, which has acquired the mutex
2436
+ and is about to wait on the condition. Thus it is pretty clear that at any
2437
+ point, either just A is waiting, or A and B are waiting. Now thread C comes
2438
+ along. C is about to do a broadcast on the condition. A broadcast is
2439
+ guaranteed to unblock all threads currently waiting on a condition, right?
2440
+ Again, we said that either just A is waiting, or A and B are both waiting.
2441
+ So, when C does its broadcast, depending upon whether B has started waiting
2442
+ or not, thread C will unblock A or unblock A and B. Either way, C must
2443
+ unblock A, right?
2444
+
2445
+ Now, you said anything that happens is correct so long as a) "a signal is
2446
+ not lost between unlocking the mutex and waiting on the condition" and b) "a
2447
+ thread must not steal a signal it sent", correct? Requirement b) is easy to
2448
+ satisfy: in this scenario, thread C will never wait on the condition, so it
2449
+ won't steal any signals. Requirement a) is not hard either. The only way we
2450
+ could fail to meet requirement a) in this scenario is if thread B was
2451
+ started waiting but didn't wake up because a signal was lost. This will not
2452
+ happen.
2453
+
2454
+ Now, here is what happens. Assume thread C beats thread B. Thread C looks to
2455
+ see how many threads are waiting on the condition. Thread C sees just one
2456
+ thread, thread A, waiting. It does a broadcast waking up just one thread
2457
+ because just one thread is waiting. Next, before A can become unblocked,
2458
+ thread B begins waiting. Now there are two threads waiting, but only one
2459
+ will be unblocked. Suppose B wins. B will become unblocked. A will not
2460
+ become unblocked, because C only unblocked one thread (sema_post cond, 1).
2461
+ So at the end, B finishes and A remains blocked.
2462
+
2463
+ We have met both of your requirements, so by your rules, this is an
2464
+ acceptable outcome. However, I think that the spec says this is an
2465
+ unacceptable outcome! We know for certain that A was waiting and that C did
2466
+ a broadcast, but A did not become unblocked! Yet, the spec says that a
2467
+ broadcast wakes up all waiting threads. This did not happen. Do you agree
2468
+ that this shows your rules are not strict enough?
2469
+
2470
+ > and what about N2? :) this one does allow almost everything.
2471
+
2472
+ Don't get me started about rule #2. I'll NEVER advocate an algorithm that
2473
+ uses rule 2 as an excuse to suck!
2474
+
2475
+ > but it is done (decrement)under mutex protection - this is not a subject
2476
+ > of a race condition.
2477
+
2478
+ You are correct. My mistake.
2479
+
2480
+ > i would remove "_bTimedOut=false".. after all, it was a real timeout..
2481
+
2482
+ I disagree. A thread that can't successfully retract its waiter status can't
2483
+ really have timed out. If a thread can't return without executing extra code
2484
+ to deal with the fact that someone tried to unblock it, I think it is a poor
2485
+ idea to pretend we
2486
+ didn't realize someone was trying to signal us. After all, a signal is more
2487
+ important than a time out.
2488
+
2489
+ > when nSignaled != 0, it is possible to update nWaiters (--) and do not
2490
+ > touch nGone
2491
+
2492
+ I realize this, but I was thinking that writing it the other ways saves
2493
+ another if statement.
2494
+
2495
+ > adjust only if nGone != 0 and save one cache memory write - probably much
2496
+ slower than 'if'
2497
+
2498
+ Hmm. You are probably right.
2499
+
2500
+ > well, in a strange (e.g. timeout test) program you may (theoretically)
2501
+ > have an overflow of nWaiters/nGone counters (with waiters repeatedly
2502
+ timing
2503
+ > out and no signals at all).
2504
+
2505
+ Also true. Not only that, but you also have the possibility that one could
2506
+ overflow the number of waiters as well! However, considering the limit you
2507
+ have chosen for nWaitersGone, I suppose it is unlikely that anyone would be
2508
+ able to get INT_MAX/2 threads waiting on a single condition. :)
2509
+
2510
+ Analysis of 8a:
2511
+
2512
+ It looks correct to me.
2513
+
2514
+ What are IPC semaphores?
2515
+
2516
+ In the line where you state, "else if ( nWaitersBlocked > nWaitersGone ) {
2517
+ // HARMLESS RACE CONDITION!" there is no race condition for nWaitersGone
2518
+ because nWaitersGone is never modified without holding mtxUnblockLock. You
2519
+ are correct that there is a harmless race on nWaitersBlocked, which can
2520
+ increase and make the condition become true just after we check it. If this
2521
+ happens, we interpret it as the wait starting after the signal.
2522
+
2523
+ I like your optimization of this. You could improve Alg. 6 as follows:
2524
+ ---------- Algorithm 6b ----------
2525
+ signal(bAll) {
2526
+ _nSig=0
2527
+ lock counters
2528
+ // this is safe because nWaiting can only be decremented by a thread that
2529
+ // owns counters and nGone can only be changed by a thread that owns
2530
+ counters.
2531
+ if (nWaiting>nGone) {
2532
+ if (0==nSignaled) {
2533
+ sema_wait gate // close gate if not already closed
2534
+ }
2535
+ if (nGone>0) {
2536
+ nWaiting-=nGone
2537
+ nGone=0
2538
+ }
2539
+ _nSig=bAll?nWaiting:1
2540
+ nSignaled+=_nSig
2541
+ nWaiting-=_nSig
2542
+ }
2543
+ unlock counters
2544
+ if (0!=_nSig) {
2545
+ sema_post queue, _nSig
2546
+ }
2547
+ }
2548
+ ---------- ---------- ----------
2549
+ I guess this wouldn't apply to Alg 8a because nWaitersGone changes meanings
2550
+ depending upon whether the gate is open or closed.
2551
+
2552
+ In the loop "while ( nWaitersWasGone-- ) {" you do a sema_wait on
2553
+ semBlockLock. Perhaps waiting on semBlockQueue would be a better idea.
2554
+
2555
+ What have you gained by making the last thread to be signaled do the waits
2556
+ for all the timed out threads, besides added complexity? It took me a long
2557
+ time to figure out what your objective was with this, to realize you were
2558
+ using nWaitersGone to mean two different things, and to verify that you
2559
+ hadn't introduced any bug by doing this. Even now I'm not 100% sure.
2560
+
2561
+ What has all this playing about with nWaitersGone really gained us besides a
2562
+ lot of complexity (it is much harder to verify that this solution is
2563
+ correct), execution overhead (we now have a lot more if statements to
2564
+ evaluate), and space overhead (more space for the extra code, and another
2565
+ integer in our data)? We did manage to save a lock/unlock pair in an
2566
+ uncommon case (when a time out occurs) at the above mentioned expenses in
2567
+ the common cases.
2568
+
2569
+ As for 8b, c, and d, they look ok though I haven't studied them thoroughly.
2570
+ What would you use them for?
2571
+
2572
+ Later,
2573
+ -Louis! :)
2574
+
2575
+ -----------------------------------------------------------------------------
2576
+
2577
+ Subject: RE: FYI/comp.programming.threads/Re: pthread_cond_* implementatio
2578
+ n questions
2579
+ Date: Tue, 27 Feb 2001 15:51:28 +0100
2580
+ From: TEREKHOV@de.ibm.com
2581
+ To: Louis Thomas <lthomas@arbitrade.com>
2582
+ CC: rpj@ise.canberra.edu.au, Thomas Pfaff <tpfaff@gmx.net>,
2583
+ Nanbor Wang <nanbor@cs.wustl.edu>
2584
+
2585
+ Hi Louis,
2586
+
2587
+ >> that first wave waiters will start the race for the mutex before waiters
2588
+ >> from the second wave - Linux pthreads process/unblock both waves
2589
+ >> concurrently...)
2590
+ >
2591
+ >I'm not sure how we are any more fair about this than Linux. We certainly
2592
+ >don't guarantee that the threads released by the first broadcast will get
2593
+ >the external mutex before the threads of the second wave. In fact, it is
2594
+ >possible that those threads will never get the external mutex if there is
2595
+ >enough contention for it.
2596
+
2597
+ correct. but gate is nevertheless more fair than Linux because of the
2598
+ barrier it establishes between two races (1st and 2nd wave waiters) for
2599
+ the mutex which under 'normal' circumstances (e.g. all threads of equal
2600
+ priorities,..) will 'probably' result in fair behaviour with respect to
2601
+ mutex ownership.
2602
+
2603
+ >> well, i am not quite sure that i've fully understood your scenario,
2604
+ >
2605
+ >Hmm. Well, it think it's an important example, so I'll try again. ...
2606
+
2607
+ ok. now i seem to understand this example. well, now it seems to me
2608
+ that the only meaningful rule is just:
2609
+
2610
+ a) "a signal is not lost between unlocking the mutex and waiting on the
2611
+ condition"
2612
+
2613
+ and that the rule
2614
+
2615
+ b) "a thread must not steal a signal it sent"
2616
+
2617
+ is not needed at all because a thread which violates b) also violates a).
2618
+
2619
+ i'll try to explain..
2620
+
2621
+ i think that the most important thing is how POSIX defines waiter's
2622
+ visibility:
2623
+
2624
+ "if another thread is able to acquire the mutex after the about-to-block
2625
+ thread
2626
+ has released it, then a subsequent call to pthread_cond_signal() or
2627
+ pthread_cond_broadcast() in that thread behaves as if it were issued after
2628
+ the about-to-block thread has blocked. "
2629
+
2630
+ my understanding is the following:
2631
+
2632
+ 1) there is no guarantees whatsoever with respect to whether
2633
+ signal/broadcast
2634
+ will actually unblock any 'waiter' if it is done w/o acquiring the mutex
2635
+ first
2636
+ (note that a thread may release it before signal/broadcast - it does not
2637
+ matter).
2638
+
2639
+ 2) it is guaranteed that waiters become 'visible' - eligible for unblock as
2640
+ soon
2641
+ as signalling thread acquires the mutex (but not before!!)
2642
+
2643
+ so..
2644
+
2645
+ >So, when C does its broadcast, depending upon whether B has started
2646
+ waiting
2647
+ >or not, thread C will unblock A or unblock A and B. Either way, C must
2648
+ >unblock A, right?
2649
+
2650
+ right. but only if C did acquire the mutex prior to broadcast (it may
2651
+ release it before broadcast as well).
2652
+
2653
+ implementation will violate waiters visibility rule (signal will become
2654
+ lost)
2655
+ if C will not unblock A.
2656
+
2657
+ >Now, here is what happens. Assume thread C beats thread B. Thread C looks
2658
+ to
2659
+ >see how many threads are waiting on the condition. Thread C sees just one
2660
+ >thread, thread A, waiting. It does a broadcast waking up just one thread
2661
+ >because just one thread is waiting. Next, before A can become unblocked,
2662
+ >thread B begins waiting. Now there are two threads waiting, but only one
2663
+ >will be unblocked. Suppose B wins. B will become unblocked. A will not
2664
+ >become unblocked, because C only unblocked one thread (sema_post cond, 1).
2665
+ >So at the end, B finishes and A remains blocked.
2666
+
2667
+ thread C did acquire the mutex ("Thread C sees just one thread, thread A,
2668
+ waiting"). beginning from that moment it is guaranteed that subsequent
2669
+ broadcast will unblock A. Otherwise we will have a lost signal with respect
2670
+ to A. I do think that it does not matter whether the signal was physically
2671
+ (completely) lost or was just stolen by another thread (B) - in both cases
2672
+ it was simply lost with respect to A.
2673
+
2674
+ >..Do you agree that this shows your rules are not strict enough?
2675
+
2676
+ probably the opposite.. :-) i think that it shows that the only meaningful
2677
+ rule is
2678
+
2679
+ a) "a signal is not lost between unlocking the mutex and waiting on the
2680
+ condition"
2681
+
2682
+ with clarification of waiters visibility as defined by POSIX above.
2683
+
2684
+ >> i would remove "_bTimedOut=false".. after all, it was a real timeout..
2685
+ >
2686
+ >I disagree. A thread that can't successfully retract its waiter status
2687
+ can't
2688
+ >really have timed out. If a thread can't return without executing extra
2689
+ code
2690
+ >to deal with the fact that someone tried to unblock it, I think it is a
2691
+ poor
2692
+ >idea to pretend we
2693
+ >didn't realize someone was trying to signal us. After all, a signal is
2694
+ more
2695
+ >important than a time out.
2696
+
2697
+ a) POSIX does allow timed out thread to consume a signal (cancelled is
2698
+ not).
2699
+ b) ETIMEDOUT status just says that: "The time specified by abstime to
2700
+ pthread_cond_timedwait() has passed."
2701
+ c) it seem to me that hiding timeouts would violate "The
2702
+ pthread_cond_timedwait()
2703
+ function is the same as pthread_cond_wait() except that an error is
2704
+ returned if
2705
+ the absolute time specified by abstime passes (that is, system time equals
2706
+ or
2707
+ exceeds abstime) before the condition cond is signaled or broadcasted"
2708
+ because
2709
+ the abs. time did really pass before cond was signaled (waiter was
2710
+ released via semaphore). however, if it really matters, i could imaging
2711
+ that we
2712
+ can save an abs. time of signal/broadcast and compare it with timeout after
2713
+ unblock to find out whether it was a 'real' timeout or not. absent this
2714
+ check
2715
+ i do think that hiding timeouts would result in technical violation of
2716
+ specification.. but i think that this check is not important and we can
2717
+ simply
2718
+ trust timeout error code provided by wait since we are not trying to make
2719
+ 'hard' realtime implementation.
2720
+
2721
+ >What are IPC semaphores?
2722
+
2723
+ <sys/sem.h>
2724
+ int semctl(int, int, int, ...);
2725
+ int semget(key_t, int, int);
2726
+ int semop(int, struct sembuf *, size_t);
2727
+
2728
+ they support adjustment of semaphore counter (semvalue)
2729
+ in one single call - imaging Win32 ReleaseSemaphore( hsem,-N )
2730
+
2731
+ >In the line where you state, "else if ( nWaitersBlocked > nWaitersGone ) {
2732
+ >// HARMLESS RACE CONDITION!" there is no race condition for nWaitersGone
2733
+ >because nWaitersGone is never modified without holding mtxUnblockLock. You
2734
+ >are correct that there is a harmless race on nWaitersBlocked, which can
2735
+ >increase and make the condition become true just after we check it. If
2736
+ this
2737
+ >happens, we interpret it as the wait starting after the signal.
2738
+
2739
+ well, the reason why i've asked on comp.programming.threads whether this
2740
+ race
2741
+ condition is harmless or not is that in order to be harmless it should not
2742
+ violate the waiters visibility rule (see above). Fortunately, we increment
2743
+ the counter under protection of external mutex.. so that any (signalling)
2744
+ thread which will acquire the mutex next, should see the updated counter
2745
+ (in signal) according to POSIX memory visibility rules and mutexes
2746
+ (memory barriers). But i am not so sure how it actually works on
2747
+ Win32/INTEL
2748
+ which does not explicitly define any memory visibility rules :(
2749
+
2750
+ >I like your optimization of this. You could improve Alg. 6 as follows:
2751
+ >---------- Algorithm 6b ----------
2752
+ >signal(bAll) {
2753
+ > _nSig=0
2754
+ > lock counters
2755
+ > // this is safe because nWaiting can only be decremented by a thread
2756
+ that
2757
+ > // owns counters and nGone can only be changed by a thread that owns
2758
+ >counters.
2759
+ > if (nWaiting>nGone) {
2760
+ > if (0==nSignaled) {
2761
+ > sema_wait gate // close gate if not already closed
2762
+ > }
2763
+ > if (nGone>0) {
2764
+ > nWaiting-=nGone
2765
+ > nGone=0
2766
+ > }
2767
+ > _nSig=bAll?nWaiting:1
2768
+ > nSignaled+=_nSig
2769
+ > nWaiting-=_nSig
2770
+ > }
2771
+ > unlock counters
2772
+ > if (0!=_nSig) {
2773
+ > sema_post queue, _nSig
2774
+ > }
2775
+ >}
2776
+ >---------- ---------- ----------
2777
+ >I guess this wouldn't apply to Alg 8a because nWaitersGone changes
2778
+ meanings
2779
+ >depending upon whether the gate is open or closed.
2780
+
2781
+ agree.
2782
+
2783
+ >In the loop "while ( nWaitersWasGone-- ) {" you do a sema_wait on
2784
+ >semBlockLock. Perhaps waiting on semBlockQueue would be a better idea.
2785
+
2786
+ you are correct. my mistake.
2787
+
2788
+ >What have you gained by making the last thread to be signaled do the waits
2789
+ >for all the timed out threads, besides added complexity? It took me a long
2790
+ >time to figure out what your objective was with this, to realize you were
2791
+ >using nWaitersGone to mean two different things, and to verify that you
2792
+ >hadn't introduced any bug by doing this. Even now I'm not 100% sure.
2793
+ >
2794
+ >What has all this playing about with nWaitersGone really gained us besides
2795
+ a
2796
+ >lot of complexity (it is much harder to verify that this solution is
2797
+ >correct), execution overhead (we now have a lot more if statements to
2798
+ >evaluate), and space overhead (more space for the extra code, and another
2799
+ >integer in our data)? We did manage to save a lock/unlock pair in an
2800
+ >uncommon case (when a time out occurs) at the above mentioned expenses in
2801
+ >the common cases.
2802
+
2803
+ well, please consider the following:
2804
+
2805
+ 1) with multiple waiters unblocked (but some timed out) the trick with
2806
+ counter
2807
+ seem to ensure potentially higher level of concurrency by not delaying
2808
+ most of unblocked waiters for semaphore cleanup - only the last one
2809
+ will be delayed but all others would already contend/acquire/release
2810
+ the external mutex - the critical section protected by mtxUnblockLock is
2811
+ made smaller (increment + couple of IFs is faster than system/kernel call)
2812
+ which i think is good in general. however, you are right, this is done
2813
+ at expense of 'normal' waiters..
2814
+
2815
+ 2) some semaphore APIs (e.g. POSIX IPC sems) do allow to adjust the
2816
+ semaphore counter in one call => less system/kernel calls.. imagine:
2817
+
2818
+ if ( 1 == nSignalsWasLeft ) {
2819
+ if ( 0 != nWaitersWasGone ) {
2820
+ ReleaseSemaphore( semBlockQueue,-nWaitersWasGone ); // better now
2821
+ than spurious later
2822
+ }
2823
+ sem_post( semBlockLock ); // open the gate
2824
+ }
2825
+
2826
+ 3) even on win32 a single thread doing multiple cleanup calls (to wait)
2827
+ will probably result in faster execution (because of processor caching)
2828
+ than multiple threads each doing a single call to wait.
2829
+
2830
+ >As for 8b, c, and d, they look ok though I haven't studied them
2831
+ thoroughly.
2832
+ >What would you use them for?
2833
+
2834
+ 8b) for semaphores which do not allow to unblock multiple waiters
2835
+ in a single call to post/release (e.g. POSIX realtime semaphores -
2836
+ <semaphore.h>)
2837
+
2838
+ 8c/8d) for WinCE prior to 3.0 (WinCE 3.0 does have semaphores)
2839
+
2840
+ ok. so, which one is the 'final' algorithm(s) which we should use in
2841
+ pthreads-win32??
2842
+
2843
+ regards,
2844
+ alexander.
2845
+
2846
+ ----------------------------------------------------------------------------
2847
+
2848
+ Louis Thomas <lthomas@arbitrade.com> on 02/27/2001 05:20:12 AM
2849
+
2850
+ Please respond to Louis Thomas <lthomas@arbitrade.com>
2851
+
2852
+ To: Alexander Terekhov/Germany/IBM@IBMDE
2853
+ cc: rpj@ise.canberra.edu.au, Thomas Pfaff <tpfaff@gmx.net>, Nanbor Wang
2854
+ <nanbor@cs.wustl.edu>
2855
+ Subject: RE: FYI/comp.programming.threads/Re: pthread_cond_* implementatio
2856
+ n questions
2857
+
2858
+ Sorry all. Busy week.
2859
+
2860
+ > this insures the fairness
2861
+ > which POSIX does not (e.g. two subsequent broadcasts - the gate does
2862
+ insure
2863
+ > that first wave waiters will start the race for the mutex before waiters
2864
+ > from the second wave - Linux pthreads process/unblock both waves
2865
+ > concurrently...)
2866
+
2867
+ I'm not sure how we are any more fair about this than Linux. We certainly
2868
+ don't guarantee that the threads released by the first broadcast will get
2869
+ the external mutex before the threads of the second wave. In fact, it is
2870
+ possible that those threads will never get the external mutex if there is
2871
+ enough contention for it.
2872
+
2873
+ > e.g. i was thinking about implementation with a pool of
2874
+ > N semaphores/counters [...]
2875
+
2876
+ I considered that too. The problem is as you mentioned in a). You really
2877
+ need to assign threads to semaphores once you know how you want to wake
2878
+ them
2879
+ up, not when they first begin waiting which is the only time you can assign
2880
+ them.
2881
+
2882
+ > well, i am not quite sure that i've fully understood your scenario,
2883
+
2884
+ Hmm. Well, it think it's an important example, so I'll try again. First, we
2885
+ have thread A which we KNOW is waiting on a condition. As soon as it
2886
+ becomes
2887
+ unblocked for any reason, we will know because it will set a flag. Since
2888
+ the
2889
+ flag is not set, we are 100% confident that thread A is waiting on the
2890
+ condition. We have another thread, thread B, which has acquired the mutex
2891
+ and is about to wait on the condition. Thus it is pretty clear that at any
2892
+ point, either just A is waiting, or A and B are waiting. Now thread C comes
2893
+ along. C is about to do a broadcast on the condition. A broadcast is
2894
+ guaranteed to unblock all threads currently waiting on a condition, right?
2895
+ Again, we said that either just A is waiting, or A and B are both waiting.
2896
+ So, when C does its broadcast, depending upon whether B has started waiting
2897
+ or not, thread C will unblock A or unblock A and B. Either way, C must
2898
+ unblock A, right?
2899
+
2900
+ Now, you said anything that happens is correct so long as a) "a signal is
2901
+ not lost between unlocking the mutex and waiting on the condition" and b)
2902
+ "a
2903
+ thread must not steal a signal it sent", correct? Requirement b) is easy to
2904
+ satisfy: in this scenario, thread C will never wait on the condition, so it
2905
+ won't steal any signals. Requirement a) is not hard either. The only way
2906
+ we
2907
+ could fail to meet requirement a) in this scenario is if thread B was
2908
+ started waiting but didn't wake up because a signal was lost. This will not
2909
+ happen.
2910
+
2911
+ Now, here is what happens. Assume thread C beats thread B. Thread C looks
2912
+ to
2913
+ see how many threads are waiting on the condition. Thread C sees just one
2914
+ thread, thread A, waiting. It does a broadcast waking up just one thread
2915
+ because just one thread is waiting. Next, before A can become unblocked,
2916
+ thread B begins waiting. Now there are two threads waiting, but only one
2917
+ will be unblocked. Suppose B wins. B will become unblocked. A will not
2918
+ become unblocked, because C only unblocked one thread (sema_post cond, 1).
2919
+ So at the end, B finishes and A remains blocked.
2920
+
2921
+ We have met both of your requirements, so by your rules, this is an
2922
+ acceptable outcome. However, I think that the spec says this is an
2923
+ unacceptable outcome! We know for certain that A was waiting and that C did
2924
+ a broadcast, but A did not become unblocked! Yet, the spec says that a
2925
+ broadcast wakes up all waiting threads. This did not happen. Do you agree
2926
+ that this shows your rules are not strict enough?
2927
+
2928
+ > and what about N2? :) this one does allow almost everything.
2929
+
2930
+ Don't get me started about rule #2. I'll NEVER advocate an algorithm that
2931
+ uses rule 2 as an excuse to suck!
2932
+
2933
+ > but it is done (decrement)under mutex protection - this is not a subject
2934
+ > of a race condition.
2935
+
2936
+ You are correct. My mistake.
2937
+
2938
+ > i would remove "_bTimedOut=false".. after all, it was a real timeout..
2939
+
2940
+ I disagree. A thread that can't successfully retract its waiter status
2941
+ can't
2942
+ really have timed out. If a thread can't return without executing extra
2943
+ code
2944
+ to deal with the fact that someone tried to unblock it, I think it is a
2945
+ poor
2946
+ idea to pretend we
2947
+ didn't realize someone was trying to signal us. After all, a signal is more
2948
+ important than a time out.
2949
+
2950
+ > when nSignaled != 0, it is possible to update nWaiters (--) and do not
2951
+ > touch nGone
2952
+
2953
+ I realize this, but I was thinking that writing it the other ways saves
2954
+ another if statement.
2955
+
2956
+ > adjust only if nGone != 0 and save one cache memory write - probably much
2957
+ slower than 'if'
2958
+
2959
+ Hmm. You are probably right.
2960
+
2961
+ > well, in a strange (e.g. timeout test) program you may (theoretically)
2962
+ > have an overflow of nWaiters/nGone counters (with waiters repeatedly
2963
+ timing
2964
+ > out and no signals at all).
2965
+
2966
+ Also true. Not only that, but you also have the possibility that one could
2967
+ overflow the number of waiters as well! However, considering the limit you
2968
+ have chosen for nWaitersGone, I suppose it is unlikely that anyone would be
2969
+ able to get INT_MAX/2 threads waiting on a single condition. :)
2970
+
2971
+ Analysis of 8a:
2972
+
2973
+ It looks correct to me.
2974
+
2975
+ What are IPC semaphores?
2976
+
2977
+ In the line where you state, "else if ( nWaitersBlocked > nWaitersGone ) {
2978
+ // HARMLESS RACE CONDITION!" there is no race condition for nWaitersGone
2979
+ because nWaitersGone is never modified without holding mtxUnblockLock. You
2980
+ are correct that there is a harmless race on nWaitersBlocked, which can
2981
+ increase and make the condition become true just after we check it. If this
2982
+ happens, we interpret it as the wait starting after the signal.
2983
+
2984
+ I like your optimization of this. You could improve Alg. 6 as follows:
2985
+ ---------- Algorithm 6b ----------
2986
+ signal(bAll) {
2987
+ _nSig=0
2988
+ lock counters
2989
+ // this is safe because nWaiting can only be decremented by a thread that
2990
+ // owns counters and nGone can only be changed by a thread that owns
2991
+ counters.
2992
+ if (nWaiting>nGone) {
2993
+ if (0==nSignaled) {
2994
+ sema_wait gate // close gate if not already closed
2995
+ }
2996
+ if (nGone>0) {
2997
+ nWaiting-=nGone
2998
+ nGone=0
2999
+ }
3000
+ _nSig=bAll?nWaiting:1
3001
+ nSignaled+=_nSig
3002
+ nWaiting-=_nSig
3003
+ }
3004
+ unlock counters
3005
+ if (0!=_nSig) {
3006
+ sema_post queue, _nSig
3007
+ }
3008
+ }
3009
+ ---------- ---------- ----------
3010
+ I guess this wouldn't apply to Alg 8a because nWaitersGone changes meanings
3011
+ depending upon whether the gate is open or closed.
3012
+
3013
+ In the loop "while ( nWaitersWasGone-- ) {" you do a sema_wait on
3014
+ semBlockLock. Perhaps waiting on semBlockQueue would be a better idea.
3015
+
3016
+ What have you gained by making the last thread to be signaled do the waits
3017
+ for all the timed out threads, besides added complexity? It took me a long
3018
+ time to figure out what your objective was with this, to realize you were
3019
+ using nWaitersGone to mean two different things, and to verify that you
3020
+ hadn't introduced any bug by doing this. Even now I'm not 100% sure.
3021
+
3022
+ What has all this playing about with nWaitersGone really gained us besides
3023
+ a
3024
+ lot of complexity (it is much harder to verify that this solution is
3025
+ correct), execution overhead (we now have a lot more if statements to
3026
+ evaluate), and space overhead (more space for the extra code, and another
3027
+ integer in our data)? We did manage to save a lock/unlock pair in an
3028
+ uncommon case (when a time out occurs) at the above mentioned expenses in
3029
+ the common cases.
3030
+
3031
+ As for 8b, c, and d, they look ok though I haven't studied them thoroughly.
3032
+ What would you use them for?
3033
+
3034
+ Later,
3035
+ -Louis! :)
3036
+