re2 1.20.0 → 1.20.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1492) hide show
  1. package/.github/actions/linux-alpine-node-16/entrypoint.sh +2 -0
  2. package/.github/actions/linux-alpine-node-18/entrypoint.sh +3 -1
  3. package/.github/actions/linux-alpine-node-20/entrypoint.sh +3 -1
  4. package/.github/actions/linux-node-12/entrypoint.sh +2 -0
  5. package/.github/actions/linux-node-18/entrypoint.sh +3 -1
  6. package/.github/actions/linux-node-20/entrypoint.sh +3 -1
  7. package/README.md +2 -0
  8. package/binding.gyp +1 -0
  9. package/package.json +2 -2
  10. package/vendor/abseil-cpp/.clang-format +4 -0
  11. package/vendor/abseil-cpp/.github/ISSUE_TEMPLATE/00-bug_report.yml +53 -0
  12. package/vendor/abseil-cpp/.github/ISSUE_TEMPLATE/config.yml +5 -0
  13. package/vendor/abseil-cpp/ABSEIL_ISSUE_TEMPLATE.md +22 -0
  14. package/vendor/abseil-cpp/AUTHORS +6 -0
  15. package/vendor/abseil-cpp/BUILD.bazel +25 -0
  16. package/vendor/abseil-cpp/CMake/AbseilDll.cmake +809 -0
  17. package/vendor/abseil-cpp/CMake/AbseilHelpers.cmake +466 -0
  18. package/vendor/abseil-cpp/CMake/Googletest/CMakeLists.txt.in +14 -0
  19. package/vendor/abseil-cpp/CMake/Googletest/DownloadGTest.cmake +41 -0
  20. package/vendor/abseil-cpp/CMake/README.md +188 -0
  21. package/vendor/abseil-cpp/CMake/abslConfig.cmake.in +8 -0
  22. package/vendor/abseil-cpp/CMake/install_test_project/CMakeLists.txt +25 -0
  23. package/vendor/abseil-cpp/CMake/install_test_project/simple.cc +32 -0
  24. package/vendor/abseil-cpp/CMake/install_test_project/test.sh +112 -0
  25. package/vendor/abseil-cpp/CMakeLists.txt +239 -0
  26. package/vendor/abseil-cpp/CONTRIBUTING.md +141 -0
  27. package/vendor/abseil-cpp/FAQ.md +167 -0
  28. package/vendor/abseil-cpp/LICENSE +203 -0
  29. package/vendor/abseil-cpp/README.md +163 -0
  30. package/vendor/abseil-cpp/UPGRADES.md +17 -0
  31. package/vendor/abseil-cpp/WORKSPACE +62 -0
  32. package/vendor/abseil-cpp/absl/BUILD.bazel +125 -0
  33. package/vendor/abseil-cpp/absl/CMakeLists.txt +44 -0
  34. package/vendor/abseil-cpp/absl/abseil.podspec.gen.py +229 -0
  35. package/vendor/abseil-cpp/absl/algorithm/BUILD.bazel +92 -0
  36. package/vendor/abseil-cpp/absl/algorithm/CMakeLists.txt +70 -0
  37. package/vendor/abseil-cpp/absl/algorithm/algorithm.h +159 -0
  38. package/vendor/abseil-cpp/absl/algorithm/algorithm_test.cc +191 -0
  39. package/vendor/abseil-cpp/absl/algorithm/container.h +1773 -0
  40. package/vendor/abseil-cpp/absl/algorithm/container_test.cc +1127 -0
  41. package/vendor/abseil-cpp/absl/algorithm/equal_benchmark.cc +126 -0
  42. package/vendor/abseil-cpp/absl/base/BUILD.bazel +783 -0
  43. package/vendor/abseil-cpp/absl/base/CMakeLists.txt +685 -0
  44. package/vendor/abseil-cpp/absl/base/attributes.h +782 -0
  45. package/vendor/abseil-cpp/absl/base/bit_cast_test.cc +109 -0
  46. package/vendor/abseil-cpp/absl/base/call_once.h +219 -0
  47. package/vendor/abseil-cpp/absl/base/call_once_test.cc +107 -0
  48. package/vendor/abseil-cpp/absl/base/casts.h +180 -0
  49. package/vendor/abseil-cpp/absl/base/config.h +954 -0
  50. package/vendor/abseil-cpp/absl/base/config_test.cc +60 -0
  51. package/vendor/abseil-cpp/absl/base/const_init.h +76 -0
  52. package/vendor/abseil-cpp/absl/base/dynamic_annotations.h +471 -0
  53. package/vendor/abseil-cpp/absl/base/exception_safety_testing_test.cc +959 -0
  54. package/vendor/abseil-cpp/absl/base/inline_variable_test.cc +64 -0
  55. package/vendor/abseil-cpp/absl/base/inline_variable_test_a.cc +27 -0
  56. package/vendor/abseil-cpp/absl/base/inline_variable_test_b.cc +27 -0
  57. package/vendor/abseil-cpp/absl/base/internal/atomic_hook.h +200 -0
  58. package/vendor/abseil-cpp/absl/base/internal/atomic_hook_test.cc +97 -0
  59. package/vendor/abseil-cpp/absl/base/internal/atomic_hook_test_helper.cc +32 -0
  60. package/vendor/abseil-cpp/absl/base/internal/atomic_hook_test_helper.h +34 -0
  61. package/vendor/abseil-cpp/absl/base/internal/cmake_thread_test.cc +22 -0
  62. package/vendor/abseil-cpp/absl/base/internal/cycleclock.cc +77 -0
  63. package/vendor/abseil-cpp/absl/base/internal/cycleclock.h +144 -0
  64. package/vendor/abseil-cpp/absl/base/internal/cycleclock_config.h +55 -0
  65. package/vendor/abseil-cpp/absl/base/internal/direct_mmap.h +170 -0
  66. package/vendor/abseil-cpp/absl/base/internal/dynamic_annotations.h +398 -0
  67. package/vendor/abseil-cpp/absl/base/internal/endian.h +282 -0
  68. package/vendor/abseil-cpp/absl/base/internal/endian_test.cc +263 -0
  69. package/vendor/abseil-cpp/absl/base/internal/errno_saver.h +43 -0
  70. package/vendor/abseil-cpp/absl/base/internal/errno_saver_test.cc +45 -0
  71. package/vendor/abseil-cpp/absl/base/internal/exception_safety_testing.cc +79 -0
  72. package/vendor/abseil-cpp/absl/base/internal/exception_safety_testing.h +1109 -0
  73. package/vendor/abseil-cpp/absl/base/internal/exception_testing.h +42 -0
  74. package/vendor/abseil-cpp/absl/base/internal/fast_type_id.h +50 -0
  75. package/vendor/abseil-cpp/absl/base/internal/fast_type_id_test.cc +123 -0
  76. package/vendor/abseil-cpp/absl/base/internal/hide_ptr.h +51 -0
  77. package/vendor/abseil-cpp/absl/base/internal/identity.h +37 -0
  78. package/vendor/abseil-cpp/absl/base/internal/inline_variable.h +107 -0
  79. package/vendor/abseil-cpp/absl/base/internal/inline_variable_testing.h +46 -0
  80. package/vendor/abseil-cpp/absl/base/internal/invoke.h +241 -0
  81. package/vendor/abseil-cpp/absl/base/internal/low_level_alloc.cc +620 -0
  82. package/vendor/abseil-cpp/absl/base/internal/low_level_alloc.h +126 -0
  83. package/vendor/abseil-cpp/absl/base/internal/low_level_alloc_test.cc +180 -0
  84. package/vendor/abseil-cpp/absl/base/internal/low_level_scheduling.h +134 -0
  85. package/vendor/abseil-cpp/absl/base/internal/per_thread_tls.h +52 -0
  86. package/vendor/abseil-cpp/absl/base/internal/prefetch.h +138 -0
  87. package/vendor/abseil-cpp/absl/base/internal/prefetch_test.cc +43 -0
  88. package/vendor/abseil-cpp/absl/base/internal/pretty_function.h +33 -0
  89. package/vendor/abseil-cpp/absl/base/internal/raw_logging.cc +253 -0
  90. package/vendor/abseil-cpp/absl/base/internal/raw_logging.h +195 -0
  91. package/vendor/abseil-cpp/absl/base/internal/scheduling_mode.h +58 -0
  92. package/vendor/abseil-cpp/absl/base/internal/scoped_set_env.cc +81 -0
  93. package/vendor/abseil-cpp/absl/base/internal/scoped_set_env.h +45 -0
  94. package/vendor/abseil-cpp/absl/base/internal/scoped_set_env_test.cc +99 -0
  95. package/vendor/abseil-cpp/absl/base/internal/spinlock.cc +232 -0
  96. package/vendor/abseil-cpp/absl/base/internal/spinlock.h +252 -0
  97. package/vendor/abseil-cpp/absl/base/internal/spinlock_akaros.inc +35 -0
  98. package/vendor/abseil-cpp/absl/base/internal/spinlock_benchmark.cc +52 -0
  99. package/vendor/abseil-cpp/absl/base/internal/spinlock_linux.inc +71 -0
  100. package/vendor/abseil-cpp/absl/base/internal/spinlock_posix.inc +46 -0
  101. package/vendor/abseil-cpp/absl/base/internal/spinlock_wait.cc +81 -0
  102. package/vendor/abseil-cpp/absl/base/internal/spinlock_wait.h +95 -0
  103. package/vendor/abseil-cpp/absl/base/internal/spinlock_win32.inc +40 -0
  104. package/vendor/abseil-cpp/absl/base/internal/strerror.cc +88 -0
  105. package/vendor/abseil-cpp/absl/base/internal/strerror.h +39 -0
  106. package/vendor/abseil-cpp/absl/base/internal/strerror_benchmark.cc +29 -0
  107. package/vendor/abseil-cpp/absl/base/internal/strerror_test.cc +88 -0
  108. package/vendor/abseil-cpp/absl/base/internal/sysinfo.cc +511 -0
  109. package/vendor/abseil-cpp/absl/base/internal/sysinfo.h +74 -0
  110. package/vendor/abseil-cpp/absl/base/internal/sysinfo_test.cc +88 -0
  111. package/vendor/abseil-cpp/absl/base/internal/thread_annotations.h +280 -0
  112. package/vendor/abseil-cpp/absl/base/internal/thread_identity.cc +156 -0
  113. package/vendor/abseil-cpp/absl/base/internal/thread_identity.h +269 -0
  114. package/vendor/abseil-cpp/absl/base/internal/thread_identity_benchmark.cc +38 -0
  115. package/vendor/abseil-cpp/absl/base/internal/thread_identity_test.cc +129 -0
  116. package/vendor/abseil-cpp/absl/base/internal/throw_delegate.cc +212 -0
  117. package/vendor/abseil-cpp/absl/base/internal/throw_delegate.h +75 -0
  118. package/vendor/abseil-cpp/absl/base/internal/tsan_mutex_interface.h +68 -0
  119. package/vendor/abseil-cpp/absl/base/internal/unaligned_access.h +82 -0
  120. package/vendor/abseil-cpp/absl/base/internal/unique_small_name_test.cc +77 -0
  121. package/vendor/abseil-cpp/absl/base/internal/unscaledcycleclock.cc +153 -0
  122. package/vendor/abseil-cpp/absl/base/internal/unscaledcycleclock.h +96 -0
  123. package/vendor/abseil-cpp/absl/base/internal/unscaledcycleclock_config.h +62 -0
  124. package/vendor/abseil-cpp/absl/base/invoke_test.cc +331 -0
  125. package/vendor/abseil-cpp/absl/base/log_severity.cc +55 -0
  126. package/vendor/abseil-cpp/absl/base/log_severity.h +172 -0
  127. package/vendor/abseil-cpp/absl/base/log_severity_test.cc +245 -0
  128. package/vendor/abseil-cpp/absl/base/macros.h +141 -0
  129. package/vendor/abseil-cpp/absl/base/optimization.h +304 -0
  130. package/vendor/abseil-cpp/absl/base/optimization_test.cc +129 -0
  131. package/vendor/abseil-cpp/absl/base/options.h +232 -0
  132. package/vendor/abseil-cpp/absl/base/policy_checks.h +113 -0
  133. package/vendor/abseil-cpp/absl/base/port.h +25 -0
  134. package/vendor/abseil-cpp/absl/base/raw_logging_test.cc +79 -0
  135. package/vendor/abseil-cpp/absl/base/spinlock_test_common.cc +272 -0
  136. package/vendor/abseil-cpp/absl/base/thread_annotations.h +335 -0
  137. package/vendor/abseil-cpp/absl/base/throw_delegate_test.cc +175 -0
  138. package/vendor/abseil-cpp/absl/cleanup/BUILD.bazel +65 -0
  139. package/vendor/abseil-cpp/absl/cleanup/CMakeLists.txt +56 -0
  140. package/vendor/abseil-cpp/absl/cleanup/cleanup.h +140 -0
  141. package/vendor/abseil-cpp/absl/cleanup/cleanup_test.cc +311 -0
  142. package/vendor/abseil-cpp/absl/cleanup/internal/cleanup.h +100 -0
  143. package/vendor/abseil-cpp/absl/container/BUILD.bazel +1031 -0
  144. package/vendor/abseil-cpp/absl/container/CMakeLists.txt +967 -0
  145. package/vendor/abseil-cpp/absl/container/btree_benchmark.cc +764 -0
  146. package/vendor/abseil-cpp/absl/container/btree_map.h +885 -0
  147. package/vendor/abseil-cpp/absl/container/btree_set.h +821 -0
  148. package/vendor/abseil-cpp/absl/container/btree_test.cc +3470 -0
  149. package/vendor/abseil-cpp/absl/container/btree_test.h +166 -0
  150. package/vendor/abseil-cpp/absl/container/fixed_array.h +531 -0
  151. package/vendor/abseil-cpp/absl/container/fixed_array_benchmark.cc +67 -0
  152. package/vendor/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc +201 -0
  153. package/vendor/abseil-cpp/absl/container/fixed_array_test.cc +837 -0
  154. package/vendor/abseil-cpp/absl/container/flat_hash_map.h +613 -0
  155. package/vendor/abseil-cpp/absl/container/flat_hash_map_test.cc +325 -0
  156. package/vendor/abseil-cpp/absl/container/flat_hash_set.h +503 -0
  157. package/vendor/abseil-cpp/absl/container/flat_hash_set_test.cc +178 -0
  158. package/vendor/abseil-cpp/absl/container/inlined_vector.h +914 -0
  159. package/vendor/abseil-cpp/absl/container/inlined_vector_benchmark.cc +829 -0
  160. package/vendor/abseil-cpp/absl/container/inlined_vector_exception_safety_test.cc +508 -0
  161. package/vendor/abseil-cpp/absl/container/inlined_vector_test.cc +2060 -0
  162. package/vendor/abseil-cpp/absl/container/internal/btree.h +2982 -0
  163. package/vendor/abseil-cpp/absl/container/internal/btree_container.h +713 -0
  164. package/vendor/abseil-cpp/absl/container/internal/common.h +207 -0
  165. package/vendor/abseil-cpp/absl/container/internal/common_policy_traits.h +132 -0
  166. package/vendor/abseil-cpp/absl/container/internal/common_policy_traits_test.cc +120 -0
  167. package/vendor/abseil-cpp/absl/container/internal/compressed_tuple.h +290 -0
  168. package/vendor/abseil-cpp/absl/container/internal/compressed_tuple_test.cc +419 -0
  169. package/vendor/abseil-cpp/absl/container/internal/container_memory.h +454 -0
  170. package/vendor/abseil-cpp/absl/container/internal/container_memory_test.cc +257 -0
  171. package/vendor/abseil-cpp/absl/container/internal/counting_allocator.h +122 -0
  172. package/vendor/abseil-cpp/absl/container/internal/hash_function_defaults.h +163 -0
  173. package/vendor/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc +383 -0
  174. package/vendor/abseil-cpp/absl/container/internal/hash_generator_testing.cc +76 -0
  175. package/vendor/abseil-cpp/absl/container/internal/hash_generator_testing.h +182 -0
  176. package/vendor/abseil-cpp/absl/container/internal/hash_policy_testing.h +184 -0
  177. package/vendor/abseil-cpp/absl/container/internal/hash_policy_testing_test.cc +45 -0
  178. package/vendor/abseil-cpp/absl/container/internal/hash_policy_traits.h +157 -0
  179. package/vendor/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc +80 -0
  180. package/vendor/abseil-cpp/absl/container/internal/hashtable_debug.h +110 -0
  181. package/vendor/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h +85 -0
  182. package/vendor/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +283 -0
  183. package/vendor/abseil-cpp/absl/container/internal/hashtablez_sampler.h +267 -0
  184. package/vendor/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +31 -0
  185. package/vendor/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc +428 -0
  186. package/vendor/abseil-cpp/absl/container/internal/inlined_vector.h +1031 -0
  187. package/vendor/abseil-cpp/absl/container/internal/layout.h +743 -0
  188. package/vendor/abseil-cpp/absl/container/internal/layout_benchmark.cc +122 -0
  189. package/vendor/abseil-cpp/absl/container/internal/layout_test.cc +1641 -0
  190. package/vendor/abseil-cpp/absl/container/internal/node_slot_policy.h +92 -0
  191. package/vendor/abseil-cpp/absl/container/internal/node_slot_policy_test.cc +69 -0
  192. package/vendor/abseil-cpp/absl/container/internal/raw_hash_map.h +198 -0
  193. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set.cc +222 -0
  194. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set.h +2685 -0
  195. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc +505 -0
  196. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc +544 -0
  197. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc +590 -0
  198. package/vendor/abseil-cpp/absl/container/internal/raw_hash_set_test.cc +2324 -0
  199. package/vendor/abseil-cpp/absl/container/internal/test_instance_tracker.cc +29 -0
  200. package/vendor/abseil-cpp/absl/container/internal/test_instance_tracker.h +274 -0
  201. package/vendor/abseil-cpp/absl/container/internal/test_instance_tracker_test.cc +184 -0
  202. package/vendor/abseil-cpp/absl/container/internal/tracked.h +83 -0
  203. package/vendor/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h +494 -0
  204. package/vendor/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h +117 -0
  205. package/vendor/abseil-cpp/absl/container/internal/unordered_map_members_test.h +87 -0
  206. package/vendor/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h +352 -0
  207. package/vendor/abseil-cpp/absl/container/internal/unordered_map_test.cc +50 -0
  208. package/vendor/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h +496 -0
  209. package/vendor/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h +91 -0
  210. package/vendor/abseil-cpp/absl/container/internal/unordered_set_members_test.h +86 -0
  211. package/vendor/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h +221 -0
  212. package/vendor/abseil-cpp/absl/container/internal/unordered_set_test.cc +41 -0
  213. package/vendor/abseil-cpp/absl/container/node_hash_map.h +604 -0
  214. package/vendor/abseil-cpp/absl/container/node_hash_map_test.cc +286 -0
  215. package/vendor/abseil-cpp/absl/container/node_hash_set.h +500 -0
  216. package/vendor/abseil-cpp/absl/container/node_hash_set_test.cc +143 -0
  217. package/vendor/abseil-cpp/absl/container/sample_element_size_test.cc +114 -0
  218. package/vendor/abseil-cpp/absl/copts/AbseilConfigureCopts.cmake +96 -0
  219. package/vendor/abseil-cpp/absl/copts/GENERATED_AbseilCopts.cmake +229 -0
  220. package/vendor/abseil-cpp/absl/copts/GENERATED_copts.bzl +230 -0
  221. package/vendor/abseil-cpp/absl/copts/configure_copts.bzl +82 -0
  222. package/vendor/abseil-cpp/absl/copts/copts.py +191 -0
  223. package/vendor/abseil-cpp/absl/copts/generate_copts.py +109 -0
  224. package/vendor/abseil-cpp/absl/crc/BUILD.bazel +210 -0
  225. package/vendor/abseil-cpp/absl/crc/CMakeLists.txt +176 -0
  226. package/vendor/abseil-cpp/absl/crc/crc32c.cc +99 -0
  227. package/vendor/abseil-cpp/absl/crc/crc32c.h +183 -0
  228. package/vendor/abseil-cpp/absl/crc/crc32c_benchmark.cc +183 -0
  229. package/vendor/abseil-cpp/absl/crc/crc32c_test.cc +194 -0
  230. package/vendor/abseil-cpp/absl/crc/internal/cpu_detect.cc +256 -0
  231. package/vendor/abseil-cpp/absl/crc/internal/cpu_detect.h +57 -0
  232. package/vendor/abseil-cpp/absl/crc/internal/crc.cc +468 -0
  233. package/vendor/abseil-cpp/absl/crc/internal/crc.h +91 -0
  234. package/vendor/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h +269 -0
  235. package/vendor/abseil-cpp/absl/crc/internal/crc32c.h +39 -0
  236. package/vendor/abseil-cpp/absl/crc/internal/crc32c_inline.h +72 -0
  237. package/vendor/abseil-cpp/absl/crc/internal/crc_cord_state.cc +130 -0
  238. package/vendor/abseil-cpp/absl/crc/internal/crc_cord_state.h +159 -0
  239. package/vendor/abseil-cpp/absl/crc/internal/crc_cord_state_test.cc +124 -0
  240. package/vendor/abseil-cpp/absl/crc/internal/crc_internal.h +179 -0
  241. package/vendor/abseil-cpp/absl/crc/internal/crc_memcpy.h +119 -0
  242. package/vendor/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc +75 -0
  243. package/vendor/abseil-cpp/absl/crc/internal/crc_memcpy_test.cc +169 -0
  244. package/vendor/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc +434 -0
  245. package/vendor/abseil-cpp/absl/crc/internal/crc_non_temporal_memcpy.cc +93 -0
  246. package/vendor/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc +725 -0
  247. package/vendor/abseil-cpp/absl/crc/internal/non_temporal_arm_intrinsics.h +79 -0
  248. package/vendor/abseil-cpp/absl/crc/internal/non_temporal_memcpy.h +180 -0
  249. package/vendor/abseil-cpp/absl/crc/internal/non_temporal_memcpy_test.cc +88 -0
  250. package/vendor/abseil-cpp/absl/debugging/BUILD.bazel +322 -0
  251. package/vendor/abseil-cpp/absl/debugging/CMakeLists.txt +294 -0
  252. package/vendor/abseil-cpp/absl/debugging/failure_signal_handler.cc +389 -0
  253. package/vendor/abseil-cpp/absl/debugging/failure_signal_handler.h +121 -0
  254. package/vendor/abseil-cpp/absl/debugging/failure_signal_handler_test.cc +165 -0
  255. package/vendor/abseil-cpp/absl/debugging/internal/address_is_readable.cc +96 -0
  256. package/vendor/abseil-cpp/absl/debugging/internal/address_is_readable.h +32 -0
  257. package/vendor/abseil-cpp/absl/debugging/internal/demangle.cc +1988 -0
  258. package/vendor/abseil-cpp/absl/debugging/internal/demangle.h +71 -0
  259. package/vendor/abseil-cpp/absl/debugging/internal/demangle_test.cc +243 -0
  260. package/vendor/abseil-cpp/absl/debugging/internal/elf_mem_image.cc +386 -0
  261. package/vendor/abseil-cpp/absl/debugging/internal/elf_mem_image.h +139 -0
  262. package/vendor/abseil-cpp/absl/debugging/internal/examine_stack.cc +317 -0
  263. package/vendor/abseil-cpp/absl/debugging/internal/examine_stack.h +64 -0
  264. package/vendor/abseil-cpp/absl/debugging/internal/stack_consumption.cc +185 -0
  265. package/vendor/abseil-cpp/absl/debugging/internal/stack_consumption.h +50 -0
  266. package/vendor/abseil-cpp/absl/debugging/internal/stack_consumption_test.cc +50 -0
  267. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +206 -0
  268. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc +139 -0
  269. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_config.h +88 -0
  270. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc +110 -0
  271. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc +108 -0
  272. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +258 -0
  273. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc +191 -0
  274. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc +24 -0
  275. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc +94 -0
  276. package/vendor/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +394 -0
  277. package/vendor/abseil-cpp/absl/debugging/internal/symbolize.h +153 -0
  278. package/vendor/abseil-cpp/absl/debugging/internal/vdso_support.cc +205 -0
  279. package/vendor/abseil-cpp/absl/debugging/internal/vdso_support.h +158 -0
  280. package/vendor/abseil-cpp/absl/debugging/leak_check.cc +73 -0
  281. package/vendor/abseil-cpp/absl/debugging/leak_check.h +150 -0
  282. package/vendor/abseil-cpp/absl/debugging/leak_check_fail_test.cc +41 -0
  283. package/vendor/abseil-cpp/absl/debugging/leak_check_test.cc +41 -0
  284. package/vendor/abseil-cpp/absl/debugging/stacktrace.cc +142 -0
  285. package/vendor/abseil-cpp/absl/debugging/stacktrace.h +231 -0
  286. package/vendor/abseil-cpp/absl/debugging/stacktrace_benchmark.cc +55 -0
  287. package/vendor/abseil-cpp/absl/debugging/stacktrace_test.cc +47 -0
  288. package/vendor/abseil-cpp/absl/debugging/symbolize.cc +43 -0
  289. package/vendor/abseil-cpp/absl/debugging/symbolize.h +99 -0
  290. package/vendor/abseil-cpp/absl/debugging/symbolize_darwin.inc +102 -0
  291. package/vendor/abseil-cpp/absl/debugging/symbolize_elf.inc +1637 -0
  292. package/vendor/abseil-cpp/absl/debugging/symbolize_emscripten.inc +72 -0
  293. package/vendor/abseil-cpp/absl/debugging/symbolize_test.cc +612 -0
  294. package/vendor/abseil-cpp/absl/debugging/symbolize_unimplemented.inc +40 -0
  295. package/vendor/abseil-cpp/absl/debugging/symbolize_win32.inc +82 -0
  296. package/vendor/abseil-cpp/absl/flags/BUILD.bazel +583 -0
  297. package/vendor/abseil-cpp/absl/flags/CMakeLists.txt +471 -0
  298. package/vendor/abseil-cpp/absl/flags/commandlineflag.cc +34 -0
  299. package/vendor/abseil-cpp/absl/flags/commandlineflag.h +200 -0
  300. package/vendor/abseil-cpp/absl/flags/commandlineflag_test.cc +231 -0
  301. package/vendor/abseil-cpp/absl/flags/config.h +68 -0
  302. package/vendor/abseil-cpp/absl/flags/config_test.cc +61 -0
  303. package/vendor/abseil-cpp/absl/flags/declare.h +73 -0
  304. package/vendor/abseil-cpp/absl/flags/flag.cc +38 -0
  305. package/vendor/abseil-cpp/absl/flags/flag.h +310 -0
  306. package/vendor/abseil-cpp/absl/flags/flag_benchmark.cc +251 -0
  307. package/vendor/abseil-cpp/absl/flags/flag_benchmark.lds +13 -0
  308. package/vendor/abseil-cpp/absl/flags/flag_test.cc +1166 -0
  309. package/vendor/abseil-cpp/absl/flags/flag_test_defs.cc +24 -0
  310. package/vendor/abseil-cpp/absl/flags/internal/commandlineflag.cc +26 -0
  311. package/vendor/abseil-cpp/absl/flags/internal/commandlineflag.h +68 -0
  312. package/vendor/abseil-cpp/absl/flags/internal/flag.cc +615 -0
  313. package/vendor/abseil-cpp/absl/flags/internal/flag.h +800 -0
  314. package/vendor/abseil-cpp/absl/flags/internal/flag_msvc.inc +116 -0
  315. package/vendor/abseil-cpp/absl/flags/internal/parse.h +63 -0
  316. package/vendor/abseil-cpp/absl/flags/internal/path_util.h +62 -0
  317. package/vendor/abseil-cpp/absl/flags/internal/path_util_test.cc +46 -0
  318. package/vendor/abseil-cpp/absl/flags/internal/private_handle_accessor.cc +65 -0
  319. package/vendor/abseil-cpp/absl/flags/internal/private_handle_accessor.h +61 -0
  320. package/vendor/abseil-cpp/absl/flags/internal/program_name.cc +60 -0
  321. package/vendor/abseil-cpp/absl/flags/internal/program_name.h +50 -0
  322. package/vendor/abseil-cpp/absl/flags/internal/program_name_test.cc +61 -0
  323. package/vendor/abseil-cpp/absl/flags/internal/registry.h +97 -0
  324. package/vendor/abseil-cpp/absl/flags/internal/sequence_lock.h +187 -0
  325. package/vendor/abseil-cpp/absl/flags/internal/sequence_lock_test.cc +169 -0
  326. package/vendor/abseil-cpp/absl/flags/internal/usage.cc +526 -0
  327. package/vendor/abseil-cpp/absl/flags/internal/usage.h +104 -0
  328. package/vendor/abseil-cpp/absl/flags/internal/usage_test.cc +504 -0
  329. package/vendor/abseil-cpp/absl/flags/marshalling.cc +241 -0
  330. package/vendor/abseil-cpp/absl/flags/marshalling.h +356 -0
  331. package/vendor/abseil-cpp/absl/flags/marshalling_test.cc +1070 -0
  332. package/vendor/abseil-cpp/absl/flags/parse.cc +890 -0
  333. package/vendor/abseil-cpp/absl/flags/parse.h +60 -0
  334. package/vendor/abseil-cpp/absl/flags/parse_test.cc +1004 -0
  335. package/vendor/abseil-cpp/absl/flags/reflection.cc +354 -0
  336. package/vendor/abseil-cpp/absl/flags/reflection.h +90 -0
  337. package/vendor/abseil-cpp/absl/flags/reflection_test.cc +265 -0
  338. package/vendor/abseil-cpp/absl/flags/usage.cc +65 -0
  339. package/vendor/abseil-cpp/absl/flags/usage.h +43 -0
  340. package/vendor/abseil-cpp/absl/flags/usage_config.cc +165 -0
  341. package/vendor/abseil-cpp/absl/flags/usage_config.h +135 -0
  342. package/vendor/abseil-cpp/absl/flags/usage_config_test.cc +205 -0
  343. package/vendor/abseil-cpp/absl/functional/BUILD.bazel +128 -0
  344. package/vendor/abseil-cpp/absl/functional/CMakeLists.txt +109 -0
  345. package/vendor/abseil-cpp/absl/functional/any_invocable.h +316 -0
  346. package/vendor/abseil-cpp/absl/functional/any_invocable_test.cc +1719 -0
  347. package/vendor/abseil-cpp/absl/functional/bind_front.h +193 -0
  348. package/vendor/abseil-cpp/absl/functional/bind_front_test.cc +231 -0
  349. package/vendor/abseil-cpp/absl/functional/function_ref.h +143 -0
  350. package/vendor/abseil-cpp/absl/functional/function_ref_test.cc +258 -0
  351. package/vendor/abseil-cpp/absl/functional/function_type_benchmark.cc +176 -0
  352. package/vendor/abseil-cpp/absl/functional/internal/any_invocable.h +878 -0
  353. package/vendor/abseil-cpp/absl/functional/internal/front_binder.h +95 -0
  354. package/vendor/abseil-cpp/absl/functional/internal/function_ref.h +106 -0
  355. package/vendor/abseil-cpp/absl/hash/BUILD.bazel +176 -0
  356. package/vendor/abseil-cpp/absl/hash/CMakeLists.txt +160 -0
  357. package/vendor/abseil-cpp/absl/hash/hash.h +421 -0
  358. package/vendor/abseil-cpp/absl/hash/hash_benchmark.cc +323 -0
  359. package/vendor/abseil-cpp/absl/hash/hash_test.cc +1277 -0
  360. package/vendor/abseil-cpp/absl/hash/hash_testing.h +378 -0
  361. package/vendor/abseil-cpp/absl/hash/internal/city.cc +349 -0
  362. package/vendor/abseil-cpp/absl/hash/internal/city.h +78 -0
  363. package/vendor/abseil-cpp/absl/hash/internal/city_test.cc +597 -0
  364. package/vendor/abseil-cpp/absl/hash/internal/hash.cc +69 -0
  365. package/vendor/abseil-cpp/absl/hash/internal/hash.h +1305 -0
  366. package/vendor/abseil-cpp/absl/hash/internal/low_level_hash.cc +112 -0
  367. package/vendor/abseil-cpp/absl/hash/internal/low_level_hash.h +50 -0
  368. package/vendor/abseil-cpp/absl/hash/internal/low_level_hash_test.cc +532 -0
  369. package/vendor/abseil-cpp/absl/hash/internal/print_hash_of.cc +23 -0
  370. package/vendor/abseil-cpp/absl/hash/internal/spy_hash_state.h +266 -0
  371. package/vendor/abseil-cpp/absl/log/BUILD.bazel +596 -0
  372. package/vendor/abseil-cpp/absl/log/CMakeLists.txt +1042 -0
  373. package/vendor/abseil-cpp/absl/log/absl_check.h +105 -0
  374. package/vendor/abseil-cpp/absl/log/absl_check_test.cc +58 -0
  375. package/vendor/abseil-cpp/absl/log/absl_log.h +94 -0
  376. package/vendor/abseil-cpp/absl/log/absl_log_basic_test.cc +21 -0
  377. package/vendor/abseil-cpp/absl/log/check.h +183 -0
  378. package/vendor/abseil-cpp/absl/log/check_test.cc +58 -0
  379. package/vendor/abseil-cpp/absl/log/check_test_impl.h +528 -0
  380. package/vendor/abseil-cpp/absl/log/die_if_null.cc +32 -0
  381. package/vendor/abseil-cpp/absl/log/die_if_null.h +76 -0
  382. package/vendor/abseil-cpp/absl/log/die_if_null_test.cc +107 -0
  383. package/vendor/abseil-cpp/absl/log/flags.cc +112 -0
  384. package/vendor/abseil-cpp/absl/log/flags.h +43 -0
  385. package/vendor/abseil-cpp/absl/log/flags_test.cc +184 -0
  386. package/vendor/abseil-cpp/absl/log/globals.cc +148 -0
  387. package/vendor/abseil-cpp/absl/log/globals.h +165 -0
  388. package/vendor/abseil-cpp/absl/log/globals_test.cc +91 -0
  389. package/vendor/abseil-cpp/absl/log/initialize.cc +34 -0
  390. package/vendor/abseil-cpp/absl/log/initialize.h +45 -0
  391. package/vendor/abseil-cpp/absl/log/internal/BUILD.bazel +383 -0
  392. package/vendor/abseil-cpp/absl/log/internal/append_truncated.h +47 -0
  393. package/vendor/abseil-cpp/absl/log/internal/check_impl.h +150 -0
  394. package/vendor/abseil-cpp/absl/log/internal/check_op.cc +118 -0
  395. package/vendor/abseil-cpp/absl/log/internal/check_op.h +392 -0
  396. package/vendor/abseil-cpp/absl/log/internal/conditions.cc +83 -0
  397. package/vendor/abseil-cpp/absl/log/internal/conditions.h +222 -0
  398. package/vendor/abseil-cpp/absl/log/internal/config.h +45 -0
  399. package/vendor/abseil-cpp/absl/log/internal/flags.h +53 -0
  400. package/vendor/abseil-cpp/absl/log/internal/globals.cc +125 -0
  401. package/vendor/abseil-cpp/absl/log/internal/globals.h +101 -0
  402. package/vendor/abseil-cpp/absl/log/internal/log_format.cc +203 -0
  403. package/vendor/abseil-cpp/absl/log/internal/log_format.h +78 -0
  404. package/vendor/abseil-cpp/absl/log/internal/log_impl.h +212 -0
  405. package/vendor/abseil-cpp/absl/log/internal/log_message.cc +618 -0
  406. package/vendor/abseil-cpp/absl/log/internal/log_message.h +355 -0
  407. package/vendor/abseil-cpp/absl/log/internal/log_sink_set.cc +296 -0
  408. package/vendor/abseil-cpp/absl/log/internal/log_sink_set.h +54 -0
  409. package/vendor/abseil-cpp/absl/log/internal/nullguard.cc +35 -0
  410. package/vendor/abseil-cpp/absl/log/internal/nullguard.h +88 -0
  411. package/vendor/abseil-cpp/absl/log/internal/nullstream.h +134 -0
  412. package/vendor/abseil-cpp/absl/log/internal/proto.cc +220 -0
  413. package/vendor/abseil-cpp/absl/log/internal/proto.h +288 -0
  414. package/vendor/abseil-cpp/absl/log/internal/stderr_log_sink_test.cc +105 -0
  415. package/vendor/abseil-cpp/absl/log/internal/strip.h +71 -0
  416. package/vendor/abseil-cpp/absl/log/internal/structured.h +58 -0
  417. package/vendor/abseil-cpp/absl/log/internal/test_actions.cc +75 -0
  418. package/vendor/abseil-cpp/absl/log/internal/test_actions.h +90 -0
  419. package/vendor/abseil-cpp/absl/log/internal/test_helpers.cc +82 -0
  420. package/vendor/abseil-cpp/absl/log/internal/test_helpers.h +71 -0
  421. package/vendor/abseil-cpp/absl/log/internal/test_matchers.cc +217 -0
  422. package/vendor/abseil-cpp/absl/log/internal/test_matchers.h +94 -0
  423. package/vendor/abseil-cpp/absl/log/internal/voidify.h +44 -0
  424. package/vendor/abseil-cpp/absl/log/log.h +308 -0
  425. package/vendor/abseil-cpp/absl/log/log_basic_test.cc +21 -0
  426. package/vendor/abseil-cpp/absl/log/log_basic_test_impl.h +455 -0
  427. package/vendor/abseil-cpp/absl/log/log_benchmark.cc +97 -0
  428. package/vendor/abseil-cpp/absl/log/log_entry.cc +29 -0
  429. package/vendor/abseil-cpp/absl/log/log_entry.h +220 -0
  430. package/vendor/abseil-cpp/absl/log/log_entry_test.cc +468 -0
  431. package/vendor/abseil-cpp/absl/log/log_format_test.cc +1872 -0
  432. package/vendor/abseil-cpp/absl/log/log_macro_hygiene_test.cc +187 -0
  433. package/vendor/abseil-cpp/absl/log/log_modifier_methods_test.cc +233 -0
  434. package/vendor/abseil-cpp/absl/log/log_sink.cc +23 -0
  435. package/vendor/abseil-cpp/absl/log/log_sink.h +64 -0
  436. package/vendor/abseil-cpp/absl/log/log_sink_registry.h +61 -0
  437. package/vendor/abseil-cpp/absl/log/log_sink_test.cc +419 -0
  438. package/vendor/abseil-cpp/absl/log/log_streamer.h +171 -0
  439. package/vendor/abseil-cpp/absl/log/log_streamer_test.cc +365 -0
  440. package/vendor/abseil-cpp/absl/log/scoped_mock_log.cc +86 -0
  441. package/vendor/abseil-cpp/absl/log/scoped_mock_log.h +194 -0
  442. package/vendor/abseil-cpp/absl/log/scoped_mock_log_test.cc +290 -0
  443. package/vendor/abseil-cpp/absl/log/stripping_test.cc +340 -0
  444. package/vendor/abseil-cpp/absl/log/structured.h +70 -0
  445. package/vendor/abseil-cpp/absl/log/structured_test.cc +63 -0
  446. package/vendor/abseil-cpp/absl/memory/BUILD.bazel +52 -0
  447. package/vendor/abseil-cpp/absl/memory/CMakeLists.txt +41 -0
  448. package/vendor/abseil-cpp/absl/memory/memory.h +278 -0
  449. package/vendor/abseil-cpp/absl/memory/memory_test.cc +222 -0
  450. package/vendor/abseil-cpp/absl/meta/BUILD.bazel +50 -0
  451. package/vendor/abseil-cpp/absl/meta/CMakeLists.txt +53 -0
  452. package/vendor/abseil-cpp/absl/meta/type_traits.h +889 -0
  453. package/vendor/abseil-cpp/absl/meta/type_traits_test.cc +1448 -0
  454. package/vendor/abseil-cpp/absl/numeric/BUILD.bazel +127 -0
  455. package/vendor/abseil-cpp/absl/numeric/CMakeLists.txt +99 -0
  456. package/vendor/abseil-cpp/absl/numeric/bits.h +177 -0
  457. package/vendor/abseil-cpp/absl/numeric/bits_benchmark.cc +73 -0
  458. package/vendor/abseil-cpp/absl/numeric/bits_test.cc +573 -0
  459. package/vendor/abseil-cpp/absl/numeric/int128.cc +387 -0
  460. package/vendor/abseil-cpp/absl/numeric/int128.h +1165 -0
  461. package/vendor/abseil-cpp/absl/numeric/int128_benchmark.cc +282 -0
  462. package/vendor/abseil-cpp/absl/numeric/int128_have_intrinsic.inc +296 -0
  463. package/vendor/abseil-cpp/absl/numeric/int128_no_intrinsic.inc +311 -0
  464. package/vendor/abseil-cpp/absl/numeric/int128_stream_test.cc +1393 -0
  465. package/vendor/abseil-cpp/absl/numeric/int128_test.cc +1261 -0
  466. package/vendor/abseil-cpp/absl/numeric/internal/bits.h +358 -0
  467. package/vendor/abseil-cpp/absl/numeric/internal/representation.h +55 -0
  468. package/vendor/abseil-cpp/absl/profiling/BUILD.bazel +129 -0
  469. package/vendor/abseil-cpp/absl/profiling/CMakeLists.txt +93 -0
  470. package/vendor/abseil-cpp/absl/profiling/internal/exponential_biased.cc +93 -0
  471. package/vendor/abseil-cpp/absl/profiling/internal/exponential_biased.h +130 -0
  472. package/vendor/abseil-cpp/absl/profiling/internal/exponential_biased_test.cc +203 -0
  473. package/vendor/abseil-cpp/absl/profiling/internal/periodic_sampler.cc +53 -0
  474. package/vendor/abseil-cpp/absl/profiling/internal/periodic_sampler.h +211 -0
  475. package/vendor/abseil-cpp/absl/profiling/internal/periodic_sampler_benchmark.cc +79 -0
  476. package/vendor/abseil-cpp/absl/profiling/internal/periodic_sampler_test.cc +177 -0
  477. package/vendor/abseil-cpp/absl/profiling/internal/sample_recorder.h +253 -0
  478. package/vendor/abseil-cpp/absl/profiling/internal/sample_recorder_test.cc +184 -0
  479. package/vendor/abseil-cpp/absl/random/BUILD.bazel +517 -0
  480. package/vendor/abseil-cpp/absl/random/CMakeLists.txt +1216 -0
  481. package/vendor/abseil-cpp/absl/random/benchmarks.cc +383 -0
  482. package/vendor/abseil-cpp/absl/random/bernoulli_distribution.h +200 -0
  483. package/vendor/abseil-cpp/absl/random/bernoulli_distribution_test.cc +217 -0
  484. package/vendor/abseil-cpp/absl/random/beta_distribution.h +427 -0
  485. package/vendor/abseil-cpp/absl/random/beta_distribution_test.cc +620 -0
  486. package/vendor/abseil-cpp/absl/random/bit_gen_ref.h +185 -0
  487. package/vendor/abseil-cpp/absl/random/bit_gen_ref_test.cc +102 -0
  488. package/vendor/abseil-cpp/absl/random/discrete_distribution.cc +98 -0
  489. package/vendor/abseil-cpp/absl/random/discrete_distribution.h +247 -0
  490. package/vendor/abseil-cpp/absl/random/discrete_distribution_test.cc +251 -0
  491. package/vendor/abseil-cpp/absl/random/distributions.h +452 -0
  492. package/vendor/abseil-cpp/absl/random/distributions_test.cc +466 -0
  493. package/vendor/abseil-cpp/absl/random/examples_test.cc +99 -0
  494. package/vendor/abseil-cpp/absl/random/exponential_distribution.h +165 -0
  495. package/vendor/abseil-cpp/absl/random/exponential_distribution_test.cc +428 -0
  496. package/vendor/abseil-cpp/absl/random/gaussian_distribution.cc +104 -0
  497. package/vendor/abseil-cpp/absl/random/gaussian_distribution.h +275 -0
  498. package/vendor/abseil-cpp/absl/random/gaussian_distribution_test.cc +563 -0
  499. package/vendor/abseil-cpp/absl/random/generators_test.cc +185 -0
  500. package/vendor/abseil-cpp/absl/random/internal/BUILD.bazel +753 -0
  501. package/vendor/abseil-cpp/absl/random/internal/chi_square.cc +233 -0
  502. package/vendor/abseil-cpp/absl/random/internal/chi_square.h +89 -0
  503. package/vendor/abseil-cpp/absl/random/internal/chi_square_test.cc +365 -0
  504. package/vendor/abseil-cpp/absl/random/internal/distribution_caller.h +95 -0
  505. package/vendor/abseil-cpp/absl/random/internal/distribution_test_util.cc +418 -0
  506. package/vendor/abseil-cpp/absl/random/internal/distribution_test_util.h +113 -0
  507. package/vendor/abseil-cpp/absl/random/internal/distribution_test_util_test.cc +193 -0
  508. package/vendor/abseil-cpp/absl/random/internal/explicit_seed_seq.h +92 -0
  509. package/vendor/abseil-cpp/absl/random/internal/explicit_seed_seq_test.cc +237 -0
  510. package/vendor/abseil-cpp/absl/random/internal/fast_uniform_bits.h +270 -0
  511. package/vendor/abseil-cpp/absl/random/internal/fast_uniform_bits_test.cc +336 -0
  512. package/vendor/abseil-cpp/absl/random/internal/fastmath.h +57 -0
  513. package/vendor/abseil-cpp/absl/random/internal/fastmath_test.cc +97 -0
  514. package/vendor/abseil-cpp/absl/random/internal/gaussian_distribution_gentables.cc +143 -0
  515. package/vendor/abseil-cpp/absl/random/internal/generate_real.h +144 -0
  516. package/vendor/abseil-cpp/absl/random/internal/generate_real_test.cc +496 -0
  517. package/vendor/abseil-cpp/absl/random/internal/iostream_state_saver.h +245 -0
  518. package/vendor/abseil-cpp/absl/random/internal/iostream_state_saver_test.cc +372 -0
  519. package/vendor/abseil-cpp/absl/random/internal/mock_helpers.h +135 -0
  520. package/vendor/abseil-cpp/absl/random/internal/mock_overload_set.h +100 -0
  521. package/vendor/abseil-cpp/absl/random/internal/nanobenchmark.cc +804 -0
  522. package/vendor/abseil-cpp/absl/random/internal/nanobenchmark.h +172 -0
  523. package/vendor/abseil-cpp/absl/random/internal/nanobenchmark_test.cc +77 -0
  524. package/vendor/abseil-cpp/absl/random/internal/nonsecure_base.h +161 -0
  525. package/vendor/abseil-cpp/absl/random/internal/nonsecure_base_test.cc +227 -0
  526. package/vendor/abseil-cpp/absl/random/internal/pcg_engine.h +287 -0
  527. package/vendor/abseil-cpp/absl/random/internal/pcg_engine_test.cc +638 -0
  528. package/vendor/abseil-cpp/absl/random/internal/platform.h +171 -0
  529. package/vendor/abseil-cpp/absl/random/internal/pool_urbg.cc +253 -0
  530. package/vendor/abseil-cpp/absl/random/internal/pool_urbg.h +131 -0
  531. package/vendor/abseil-cpp/absl/random/internal/pool_urbg_test.cc +182 -0
  532. package/vendor/abseil-cpp/absl/random/internal/randen.cc +91 -0
  533. package/vendor/abseil-cpp/absl/random/internal/randen.h +96 -0
  534. package/vendor/abseil-cpp/absl/random/internal/randen_benchmarks.cc +174 -0
  535. package/vendor/abseil-cpp/absl/random/internal/randen_detect.cc +225 -0
  536. package/vendor/abseil-cpp/absl/random/internal/randen_detect.h +33 -0
  537. package/vendor/abseil-cpp/absl/random/internal/randen_engine.h +264 -0
  538. package/vendor/abseil-cpp/absl/random/internal/randen_engine_test.cc +656 -0
  539. package/vendor/abseil-cpp/absl/random/internal/randen_hwaes.cc +526 -0
  540. package/vendor/abseil-cpp/absl/random/internal/randen_hwaes.h +50 -0
  541. package/vendor/abseil-cpp/absl/random/internal/randen_hwaes_test.cc +99 -0
  542. package/vendor/abseil-cpp/absl/random/internal/randen_round_keys.cc +462 -0
  543. package/vendor/abseil-cpp/absl/random/internal/randen_slow.cc +471 -0
  544. package/vendor/abseil-cpp/absl/random/internal/randen_slow.h +40 -0
  545. package/vendor/abseil-cpp/absl/random/internal/randen_slow_test.cc +61 -0
  546. package/vendor/abseil-cpp/absl/random/internal/randen_test.cc +75 -0
  547. package/vendor/abseil-cpp/absl/random/internal/randen_traits.h +88 -0
  548. package/vendor/abseil-cpp/absl/random/internal/salted_seed_seq.h +165 -0
  549. package/vendor/abseil-cpp/absl/random/internal/salted_seed_seq_test.cc +168 -0
  550. package/vendor/abseil-cpp/absl/random/internal/seed_material.cc +267 -0
  551. package/vendor/abseil-cpp/absl/random/internal/seed_material.h +104 -0
  552. package/vendor/abseil-cpp/absl/random/internal/seed_material_test.cc +202 -0
  553. package/vendor/abseil-cpp/absl/random/internal/sequence_urbg.h +60 -0
  554. package/vendor/abseil-cpp/absl/random/internal/traits.h +149 -0
  555. package/vendor/abseil-cpp/absl/random/internal/traits_test.cc +126 -0
  556. package/vendor/abseil-cpp/absl/random/internal/uniform_helper.h +244 -0
  557. package/vendor/abseil-cpp/absl/random/internal/uniform_helper_test.cc +279 -0
  558. package/vendor/abseil-cpp/absl/random/internal/wide_multiply.h +96 -0
  559. package/vendor/abseil-cpp/absl/random/internal/wide_multiply_test.cc +119 -0
  560. package/vendor/abseil-cpp/absl/random/log_uniform_int_distribution.h +256 -0
  561. package/vendor/abseil-cpp/absl/random/log_uniform_int_distribution_test.cc +280 -0
  562. package/vendor/abseil-cpp/absl/random/mock_distributions.h +266 -0
  563. package/vendor/abseil-cpp/absl/random/mock_distributions_test.cc +72 -0
  564. package/vendor/abseil-cpp/absl/random/mocking_bit_gen.h +240 -0
  565. package/vendor/abseil-cpp/absl/random/mocking_bit_gen_test.cc +394 -0
  566. package/vendor/abseil-cpp/absl/random/poisson_distribution.h +261 -0
  567. package/vendor/abseil-cpp/absl/random/poisson_distribution_test.cc +573 -0
  568. package/vendor/abseil-cpp/absl/random/random.h +189 -0
  569. package/vendor/abseil-cpp/absl/random/seed_gen_exception.cc +46 -0
  570. package/vendor/abseil-cpp/absl/random/seed_gen_exception.h +55 -0
  571. package/vendor/abseil-cpp/absl/random/seed_sequences.cc +29 -0
  572. package/vendor/abseil-cpp/absl/random/seed_sequences.h +111 -0
  573. package/vendor/abseil-cpp/absl/random/seed_sequences_test.cc +126 -0
  574. package/vendor/abseil-cpp/absl/random/uniform_int_distribution.h +275 -0
  575. package/vendor/abseil-cpp/absl/random/uniform_int_distribution_test.cc +260 -0
  576. package/vendor/abseil-cpp/absl/random/uniform_real_distribution.h +202 -0
  577. package/vendor/abseil-cpp/absl/random/uniform_real_distribution_test.cc +395 -0
  578. package/vendor/abseil-cpp/absl/random/zipf_distribution.h +272 -0
  579. package/vendor/abseil-cpp/absl/random/zipf_distribution_test.cc +427 -0
  580. package/vendor/abseil-cpp/absl/status/BUILD.bazel +109 -0
  581. package/vendor/abseil-cpp/absl/status/CMakeLists.txt +91 -0
  582. package/vendor/abseil-cpp/absl/status/internal/status_internal.h +87 -0
  583. package/vendor/abseil-cpp/absl/status/internal/statusor_internal.h +396 -0
  584. package/vendor/abseil-cpp/absl/status/status.cc +623 -0
  585. package/vendor/abseil-cpp/absl/status/status.h +892 -0
  586. package/vendor/abseil-cpp/absl/status/status_payload_printer.cc +38 -0
  587. package/vendor/abseil-cpp/absl/status/status_payload_printer.h +51 -0
  588. package/vendor/abseil-cpp/absl/status/status_test.cc +509 -0
  589. package/vendor/abseil-cpp/absl/status/statusor.cc +103 -0
  590. package/vendor/abseil-cpp/absl/status/statusor.h +776 -0
  591. package/vendor/abseil-cpp/absl/status/statusor_test.cc +1847 -0
  592. package/vendor/abseil-cpp/absl/strings/BUILD.bazel +1319 -0
  593. package/vendor/abseil-cpp/absl/strings/CMakeLists.txt +1119 -0
  594. package/vendor/abseil-cpp/absl/strings/ascii.cc +200 -0
  595. package/vendor/abseil-cpp/absl/strings/ascii.h +242 -0
  596. package/vendor/abseil-cpp/absl/strings/ascii_benchmark.cc +120 -0
  597. package/vendor/abseil-cpp/absl/strings/ascii_test.cc +357 -0
  598. package/vendor/abseil-cpp/absl/strings/atod_manual_test.cc +193 -0
  599. package/vendor/abseil-cpp/absl/strings/charconv.cc +1422 -0
  600. package/vendor/abseil-cpp/absl/strings/charconv.h +120 -0
  601. package/vendor/abseil-cpp/absl/strings/charconv_benchmark.cc +204 -0
  602. package/vendor/abseil-cpp/absl/strings/charconv_test.cc +782 -0
  603. package/vendor/abseil-cpp/absl/strings/cord.cc +1380 -0
  604. package/vendor/abseil-cpp/absl/strings/cord.h +1633 -0
  605. package/vendor/abseil-cpp/absl/strings/cord_analysis.cc +188 -0
  606. package/vendor/abseil-cpp/absl/strings/cord_analysis.h +44 -0
  607. package/vendor/abseil-cpp/absl/strings/cord_buffer.cc +30 -0
  608. package/vendor/abseil-cpp/absl/strings/cord_buffer.h +575 -0
  609. package/vendor/abseil-cpp/absl/strings/cord_buffer_test.cc +320 -0
  610. package/vendor/abseil-cpp/absl/strings/cord_ring_reader_test.cc +180 -0
  611. package/vendor/abseil-cpp/absl/strings/cord_ring_test.cc +1454 -0
  612. package/vendor/abseil-cpp/absl/strings/cord_test.cc +3125 -0
  613. package/vendor/abseil-cpp/absl/strings/cord_test_helpers.h +122 -0
  614. package/vendor/abseil-cpp/absl/strings/cordz_test.cc +466 -0
  615. package/vendor/abseil-cpp/absl/strings/cordz_test_helpers.h +151 -0
  616. package/vendor/abseil-cpp/absl/strings/escaping.cc +960 -0
  617. package/vendor/abseil-cpp/absl/strings/escaping.h +169 -0
  618. package/vendor/abseil-cpp/absl/strings/escaping_benchmark.cc +94 -0
  619. package/vendor/abseil-cpp/absl/strings/escaping_test.cc +706 -0
  620. package/vendor/abseil-cpp/absl/strings/internal/char_map.h +158 -0
  621. package/vendor/abseil-cpp/absl/strings/internal/char_map_benchmark.cc +61 -0
  622. package/vendor/abseil-cpp/absl/strings/internal/char_map_test.cc +172 -0
  623. package/vendor/abseil-cpp/absl/strings/internal/charconv_bigint.cc +359 -0
  624. package/vendor/abseil-cpp/absl/strings/internal/charconv_bigint.h +423 -0
  625. package/vendor/abseil-cpp/absl/strings/internal/charconv_bigint_test.cc +260 -0
  626. package/vendor/abseil-cpp/absl/strings/internal/charconv_parse.cc +504 -0
  627. package/vendor/abseil-cpp/absl/strings/internal/charconv_parse.h +99 -0
  628. package/vendor/abseil-cpp/absl/strings/internal/charconv_parse_test.cc +357 -0
  629. package/vendor/abseil-cpp/absl/strings/internal/cord_data_edge.h +63 -0
  630. package/vendor/abseil-cpp/absl/strings/internal/cord_data_edge_test.cc +130 -0
  631. package/vendor/abseil-cpp/absl/strings/internal/cord_internal.cc +77 -0
  632. package/vendor/abseil-cpp/absl/strings/internal/cord_internal.h +915 -0
  633. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree.cc +1232 -0
  634. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree.h +936 -0
  635. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc +187 -0
  636. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.h +267 -0
  637. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator_test.cc +346 -0
  638. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc +69 -0
  639. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.h +212 -0
  640. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_reader_test.cc +293 -0
  641. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_btree_test.cc +1568 -0
  642. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_consume.cc +62 -0
  643. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_consume.h +50 -0
  644. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_crc.cc +56 -0
  645. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_crc.h +103 -0
  646. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_crc_test.cc +130 -0
  647. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_flat.h +187 -0
  648. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_ring.cc +773 -0
  649. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_ring.h +607 -0
  650. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h +118 -0
  651. package/vendor/abseil-cpp/absl/strings/internal/cord_rep_test_util.h +205 -0
  652. package/vendor/abseil-cpp/absl/strings/internal/cordz_functions.cc +96 -0
  653. package/vendor/abseil-cpp/absl/strings/internal/cordz_functions.h +77 -0
  654. package/vendor/abseil-cpp/absl/strings/internal/cordz_functions_test.cc +149 -0
  655. package/vendor/abseil-cpp/absl/strings/internal/cordz_handle.cc +139 -0
  656. package/vendor/abseil-cpp/absl/strings/internal/cordz_handle.h +131 -0
  657. package/vendor/abseil-cpp/absl/strings/internal/cordz_handle_test.cc +265 -0
  658. package/vendor/abseil-cpp/absl/strings/internal/cordz_info.cc +421 -0
  659. package/vendor/abseil-cpp/absl/strings/internal/cordz_info.h +298 -0
  660. package/vendor/abseil-cpp/absl/strings/internal/cordz_info_statistics_test.cc +557 -0
  661. package/vendor/abseil-cpp/absl/strings/internal/cordz_info_test.cc +342 -0
  662. package/vendor/abseil-cpp/absl/strings/internal/cordz_sample_token.cc +64 -0
  663. package/vendor/abseil-cpp/absl/strings/internal/cordz_sample_token.h +97 -0
  664. package/vendor/abseil-cpp/absl/strings/internal/cordz_sample_token_test.cc +208 -0
  665. package/vendor/abseil-cpp/absl/strings/internal/cordz_statistics.h +88 -0
  666. package/vendor/abseil-cpp/absl/strings/internal/cordz_update_scope.h +71 -0
  667. package/vendor/abseil-cpp/absl/strings/internal/cordz_update_scope_test.cc +49 -0
  668. package/vendor/abseil-cpp/absl/strings/internal/cordz_update_tracker.h +123 -0
  669. package/vendor/abseil-cpp/absl/strings/internal/cordz_update_tracker_test.cc +147 -0
  670. package/vendor/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.cc +93 -0
  671. package/vendor/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance.h +34 -0
  672. package/vendor/abseil-cpp/absl/strings/internal/damerau_levenshtein_distance_test.cc +99 -0
  673. package/vendor/abseil-cpp/absl/strings/internal/escaping.cc +183 -0
  674. package/vendor/abseil-cpp/absl/strings/internal/escaping.h +56 -0
  675. package/vendor/abseil-cpp/absl/strings/internal/escaping_test_common.h +133 -0
  676. package/vendor/abseil-cpp/absl/strings/internal/has_absl_stringify.h +55 -0
  677. package/vendor/abseil-cpp/absl/strings/internal/memutil.cc +115 -0
  678. package/vendor/abseil-cpp/absl/strings/internal/memutil.h +148 -0
  679. package/vendor/abseil-cpp/absl/strings/internal/memutil_benchmark.cc +323 -0
  680. package/vendor/abseil-cpp/absl/strings/internal/memutil_test.cc +179 -0
  681. package/vendor/abseil-cpp/absl/strings/internal/numbers_test_common.h +184 -0
  682. package/vendor/abseil-cpp/absl/strings/internal/ostringstream.cc +43 -0
  683. package/vendor/abseil-cpp/absl/strings/internal/ostringstream.h +114 -0
  684. package/vendor/abseil-cpp/absl/strings/internal/ostringstream_benchmark.cc +106 -0
  685. package/vendor/abseil-cpp/absl/strings/internal/ostringstream_test.cc +131 -0
  686. package/vendor/abseil-cpp/absl/strings/internal/pow10_helper.cc +122 -0
  687. package/vendor/abseil-cpp/absl/strings/internal/pow10_helper.h +40 -0
  688. package/vendor/abseil-cpp/absl/strings/internal/pow10_helper_test.cc +122 -0
  689. package/vendor/abseil-cpp/absl/strings/internal/resize_uninitialized.h +119 -0
  690. package/vendor/abseil-cpp/absl/strings/internal/resize_uninitialized_test.cc +133 -0
  691. package/vendor/abseil-cpp/absl/strings/internal/stl_type_traits.h +248 -0
  692. package/vendor/abseil-cpp/absl/strings/internal/str_format/arg.cc +555 -0
  693. package/vendor/abseil-cpp/absl/strings/internal/str_format/arg.h +623 -0
  694. package/vendor/abseil-cpp/absl/strings/internal/str_format/arg_test.cc +130 -0
  695. package/vendor/abseil-cpp/absl/strings/internal/str_format/bind.cc +259 -0
  696. package/vendor/abseil-cpp/absl/strings/internal/str_format/bind.h +249 -0
  697. package/vendor/abseil-cpp/absl/strings/internal/str_format/bind_test.cc +157 -0
  698. package/vendor/abseil-cpp/absl/strings/internal/str_format/checker.h +100 -0
  699. package/vendor/abseil-cpp/absl/strings/internal/str_format/checker_test.cc +176 -0
  700. package/vendor/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h +351 -0
  701. package/vendor/abseil-cpp/absl/strings/internal/str_format/convert_test.cc +1257 -0
  702. package/vendor/abseil-cpp/absl/strings/internal/str_format/extension.cc +75 -0
  703. package/vendor/abseil-cpp/absl/strings/internal/str_format/extension.h +450 -0
  704. package/vendor/abseil-cpp/absl/strings/internal/str_format/extension_test.cc +109 -0
  705. package/vendor/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc +1457 -0
  706. package/vendor/abseil-cpp/absl/strings/internal/str_format/float_conversion.h +37 -0
  707. package/vendor/abseil-cpp/absl/strings/internal/str_format/output.cc +72 -0
  708. package/vendor/abseil-cpp/absl/strings/internal/str_format/output.h +97 -0
  709. package/vendor/abseil-cpp/absl/strings/internal/str_format/output_test.cc +79 -0
  710. package/vendor/abseil-cpp/absl/strings/internal/str_format/parser.cc +140 -0
  711. package/vendor/abseil-cpp/absl/strings/internal/str_format/parser.h +268 -0
  712. package/vendor/abseil-cpp/absl/strings/internal/str_format/parser_test.cc +438 -0
  713. package/vendor/abseil-cpp/absl/strings/internal/str_join_internal.h +317 -0
  714. package/vendor/abseil-cpp/absl/strings/internal/str_split_internal.h +431 -0
  715. package/vendor/abseil-cpp/absl/strings/internal/string_constant.h +72 -0
  716. package/vendor/abseil-cpp/absl/strings/internal/string_constant_test.cc +60 -0
  717. package/vendor/abseil-cpp/absl/strings/internal/stringify_sink.cc +28 -0
  718. package/vendor/abseil-cpp/absl/strings/internal/stringify_sink.h +57 -0
  719. package/vendor/abseil-cpp/absl/strings/internal/utf8.cc +53 -0
  720. package/vendor/abseil-cpp/absl/strings/internal/utf8.h +50 -0
  721. package/vendor/abseil-cpp/absl/strings/internal/utf8_test.cc +66 -0
  722. package/vendor/abseil-cpp/absl/strings/match.cc +43 -0
  723. package/vendor/abseil-cpp/absl/strings/match.h +100 -0
  724. package/vendor/abseil-cpp/absl/strings/match_test.cc +127 -0
  725. package/vendor/abseil-cpp/absl/strings/numbers.cc +1096 -0
  726. package/vendor/abseil-cpp/absl/strings/numbers.h +303 -0
  727. package/vendor/abseil-cpp/absl/strings/numbers_benchmark.cc +286 -0
  728. package/vendor/abseil-cpp/absl/strings/numbers_test.cc +1722 -0
  729. package/vendor/abseil-cpp/absl/strings/str_cat.cc +249 -0
  730. package/vendor/abseil-cpp/absl/strings/str_cat.h +462 -0
  731. package/vendor/abseil-cpp/absl/strings/str_cat_benchmark.cc +187 -0
  732. package/vendor/abseil-cpp/absl/strings/str_cat_test.cc +665 -0
  733. package/vendor/abseil-cpp/absl/strings/str_format.h +874 -0
  734. package/vendor/abseil-cpp/absl/strings/str_format_test.cc +1212 -0
  735. package/vendor/abseil-cpp/absl/strings/str_join.h +287 -0
  736. package/vendor/abseil-cpp/absl/strings/str_join_benchmark.cc +97 -0
  737. package/vendor/abseil-cpp/absl/strings/str_join_test.cc +608 -0
  738. package/vendor/abseil-cpp/absl/strings/str_replace.cc +82 -0
  739. package/vendor/abseil-cpp/absl/strings/str_replace.h +219 -0
  740. package/vendor/abseil-cpp/absl/strings/str_replace_benchmark.cc +122 -0
  741. package/vendor/abseil-cpp/absl/strings/str_replace_test.cc +341 -0
  742. package/vendor/abseil-cpp/absl/strings/str_split.cc +139 -0
  743. package/vendor/abseil-cpp/absl/strings/str_split.h +547 -0
  744. package/vendor/abseil-cpp/absl/strings/str_split_benchmark.cc +180 -0
  745. package/vendor/abseil-cpp/absl/strings/str_split_test.cc +981 -0
  746. package/vendor/abseil-cpp/absl/strings/string_view.cc +219 -0
  747. package/vendor/abseil-cpp/absl/strings/string_view.h +704 -0
  748. package/vendor/abseil-cpp/absl/strings/string_view_benchmark.cc +381 -0
  749. package/vendor/abseil-cpp/absl/strings/string_view_test.cc +1308 -0
  750. package/vendor/abseil-cpp/absl/strings/strip.h +93 -0
  751. package/vendor/abseil-cpp/absl/strings/strip_test.cc +198 -0
  752. package/vendor/abseil-cpp/absl/strings/substitute.cc +174 -0
  753. package/vendor/abseil-cpp/absl/strings/substitute.h +755 -0
  754. package/vendor/abseil-cpp/absl/strings/substitute_test.cc +285 -0
  755. package/vendor/abseil-cpp/absl/synchronization/BUILD.bazel +333 -0
  756. package/vendor/abseil-cpp/absl/synchronization/CMakeLists.txt +235 -0
  757. package/vendor/abseil-cpp/absl/synchronization/barrier.cc +52 -0
  758. package/vendor/abseil-cpp/absl/synchronization/barrier.h +79 -0
  759. package/vendor/abseil-cpp/absl/synchronization/barrier_test.cc +75 -0
  760. package/vendor/abseil-cpp/absl/synchronization/blocking_counter.cc +67 -0
  761. package/vendor/abseil-cpp/absl/synchronization/blocking_counter.h +101 -0
  762. package/vendor/abseil-cpp/absl/synchronization/blocking_counter_benchmark.cc +83 -0
  763. package/vendor/abseil-cpp/absl/synchronization/blocking_counter_test.cc +80 -0
  764. package/vendor/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc +143 -0
  765. package/vendor/abseil-cpp/absl/synchronization/internal/create_thread_identity.h +56 -0
  766. package/vendor/abseil-cpp/absl/synchronization/internal/futex.h +157 -0
  767. package/vendor/abseil-cpp/absl/synchronization/internal/graphcycles.cc +704 -0
  768. package/vendor/abseil-cpp/absl/synchronization/internal/graphcycles.h +141 -0
  769. package/vendor/abseil-cpp/absl/synchronization/internal/graphcycles_benchmark.cc +44 -0
  770. package/vendor/abseil-cpp/absl/synchronization/internal/graphcycles_test.cc +464 -0
  771. package/vendor/abseil-cpp/absl/synchronization/internal/kernel_timeout.h +170 -0
  772. package/vendor/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc +102 -0
  773. package/vendor/abseil-cpp/absl/synchronization/internal/per_thread_sem.h +110 -0
  774. package/vendor/abseil-cpp/absl/synchronization/internal/per_thread_sem_test.cc +190 -0
  775. package/vendor/abseil-cpp/absl/synchronization/internal/thread_pool.h +96 -0
  776. package/vendor/abseil-cpp/absl/synchronization/internal/waiter.cc +403 -0
  777. package/vendor/abseil-cpp/absl/synchronization/internal/waiter.h +161 -0
  778. package/vendor/abseil-cpp/absl/synchronization/lifetime_test.cc +181 -0
  779. package/vendor/abseil-cpp/absl/synchronization/mutex.cc +2835 -0
  780. package/vendor/abseil-cpp/absl/synchronization/mutex.h +1129 -0
  781. package/vendor/abseil-cpp/absl/synchronization/mutex_benchmark.cc +310 -0
  782. package/vendor/abseil-cpp/absl/synchronization/mutex_method_pointer_test.cc +138 -0
  783. package/vendor/abseil-cpp/absl/synchronization/mutex_test.cc +1733 -0
  784. package/vendor/abseil-cpp/absl/synchronization/notification.cc +77 -0
  785. package/vendor/abseil-cpp/absl/synchronization/notification.h +123 -0
  786. package/vendor/abseil-cpp/absl/synchronization/notification_test.cc +133 -0
  787. package/vendor/abseil-cpp/absl/time/BUILD.bazel +148 -0
  788. package/vendor/abseil-cpp/absl/time/CMakeLists.txt +140 -0
  789. package/vendor/abseil-cpp/absl/time/civil_time.cc +199 -0
  790. package/vendor/abseil-cpp/absl/time/civil_time.h +563 -0
  791. package/vendor/abseil-cpp/absl/time/civil_time_benchmark.cc +127 -0
  792. package/vendor/abseil-cpp/absl/time/civil_time_test.cc +1243 -0
  793. package/vendor/abseil-cpp/absl/time/clock.cc +591 -0
  794. package/vendor/abseil-cpp/absl/time/clock.h +74 -0
  795. package/vendor/abseil-cpp/absl/time/clock_benchmark.cc +74 -0
  796. package/vendor/abseil-cpp/absl/time/clock_test.cc +122 -0
  797. package/vendor/abseil-cpp/absl/time/duration.cc +955 -0
  798. package/vendor/abseil-cpp/absl/time/duration_benchmark.cc +444 -0
  799. package/vendor/abseil-cpp/absl/time/duration_test.cc +1856 -0
  800. package/vendor/abseil-cpp/absl/time/flag_test.cc +147 -0
  801. package/vendor/abseil-cpp/absl/time/format.cc +161 -0
  802. package/vendor/abseil-cpp/absl/time/format_benchmark.cc +64 -0
  803. package/vendor/abseil-cpp/absl/time/format_test.cc +441 -0
  804. package/vendor/abseil-cpp/absl/time/internal/cctz/BUILD.bazel +159 -0
  805. package/vendor/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time.h +332 -0
  806. package/vendor/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +632 -0
  807. package/vendor/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h +459 -0
  808. package/vendor/abseil-cpp/absl/time/internal/cctz/include/cctz/zone_info_source.h +102 -0
  809. package/vendor/abseil-cpp/absl/time/internal/cctz/src/cctz_benchmark.cc +1033 -0
  810. package/vendor/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc +94 -0
  811. package/vendor/abseil-cpp/absl/time/internal/cctz/src/civil_time_test.cc +1066 -0
  812. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc +140 -0
  813. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.h +52 -0
  814. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc +1029 -0
  815. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_format_test.cc +1688 -0
  816. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc +45 -0
  817. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h +77 -0
  818. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc +113 -0
  819. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h +93 -0
  820. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc +1048 -0
  821. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h +137 -0
  822. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc +315 -0
  823. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h +55 -0
  824. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc +237 -0
  825. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup_test.cc +1504 -0
  826. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc +159 -0
  827. package/vendor/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h +132 -0
  828. package/vendor/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +122 -0
  829. package/vendor/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc +116 -0
  830. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/README.zoneinfo +38 -0
  831. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/version +1 -0
  832. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan +0 -0
  833. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra +0 -0
  834. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa +0 -0
  835. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers +0 -0
  836. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara +0 -0
  837. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera +0 -0
  838. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako +0 -0
  839. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui +0 -0
  840. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul +0 -0
  841. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau +0 -0
  842. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre +0 -0
  843. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville +0 -0
  844. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura +0 -0
  845. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo +0 -0
  846. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca +0 -0
  847. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta +0 -0
  848. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry +0 -0
  849. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar +0 -0
  850. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam +0 -0
  851. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti +0 -0
  852. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala +0 -0
  853. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun +0 -0
  854. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown +0 -0
  855. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone +0 -0
  856. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare +0 -0
  857. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg +0 -0
  858. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba +0 -0
  859. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala +0 -0
  860. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum +0 -0
  861. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali +0 -0
  862. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa +0 -0
  863. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos +0 -0
  864. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville +0 -0
  865. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome +0 -0
  866. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda +0 -0
  867. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi +0 -0
  868. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka +0 -0
  869. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo +0 -0
  870. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo +0 -0
  871. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru +0 -0
  872. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane +0 -0
  873. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu +0 -0
  874. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia +0 -0
  875. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi +0 -0
  876. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena +0 -0
  877. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey +0 -0
  878. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott +0 -0
  879. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou +0 -0
  880. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo +0 -0
  881. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome +0 -0
  882. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu +0 -0
  883. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli +0 -0
  884. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis +0 -0
  885. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek +0 -0
  886. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Adak +0 -0
  887. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage +0 -0
  888. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla +0 -0
  889. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua +0 -0
  890. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina +0 -0
  891. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires +0 -0
  892. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca +0 -0
  893. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia +0 -0
  894. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba +0 -0
  895. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy +0 -0
  896. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja +0 -0
  897. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza +0 -0
  898. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos +0 -0
  899. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta +0 -0
  900. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan +0 -0
  901. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis +0 -0
  902. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman +0 -0
  903. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia +0 -0
  904. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba +0 -0
  905. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion +0 -0
  906. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan +0 -0
  907. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Atka +0 -0
  908. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia +0 -0
  909. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas +0 -0
  910. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados +0 -0
  911. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belem +0 -0
  912. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Belize +0 -0
  913. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon +0 -0
  914. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista +0 -0
  915. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota +0 -0
  916. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Boise +0 -0
  917. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires +0 -0
  918. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay +0 -0
  919. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande +0 -0
  920. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun +0 -0
  921. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas +0 -0
  922. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca +0 -0
  923. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne +0 -0
  924. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman +0 -0
  925. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago +0 -0
  926. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua +0 -0
  927. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ciudad_Juarez +0 -0
  928. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour +0 -0
  929. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba +0 -0
  930. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica +0 -0
  931. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Creston +0 -0
  932. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba +0 -0
  933. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao +0 -0
  934. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn +0 -0
  935. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson +0 -0
  936. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek +0 -0
  937. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Denver +0 -0
  938. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit +0 -0
  939. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica +0 -0
  940. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton +0 -0
  941. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe +0 -0
  942. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador +0 -0
  943. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada +0 -0
  944. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson +0 -0
  945. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne +0 -0
  946. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza +0 -0
  947. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay +0 -0
  948. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab +0 -0
  949. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay +0 -0
  950. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk +0 -0
  951. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada +0 -0
  952. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe +0 -0
  953. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala +0 -0
  954. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil +0 -0
  955. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana +0 -0
  956. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax +0 -0
  957. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Havana +0 -0
  958. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo +0 -0
  959. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis +0 -0
  960. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox +0 -0
  961. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo +0 -0
  962. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg +0 -0
  963. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City +0 -0
  964. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay +0 -0
  965. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes +0 -0
  966. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac +0 -0
  967. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis +0 -0
  968. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik +0 -0
  969. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit +0 -0
  970. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica +0 -0
  971. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy +0 -0
  972. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau +0 -0
  973. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville +0 -0
  974. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello +0 -0
  975. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN +0 -0
  976. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk +0 -0
  977. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz +0 -0
  978. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lima +0 -0
  979. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles +0 -0
  980. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville +0 -0
  981. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes +0 -0
  982. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio +0 -0
  983. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Managua +0 -0
  984. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus +0 -0
  985. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot +0 -0
  986. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique +0 -0
  987. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros +0 -0
  988. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan +0 -0
  989. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza +0 -0
  990. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee +0 -0
  991. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Merida +0 -0
  992. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla +0 -0
  993. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City +0 -0
  994. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon +0 -0
  995. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton +0 -0
  996. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey +0 -0
  997. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo +0 -0
  998. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal +0 -0
  999. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat +0 -0
  1000. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau +0 -0
  1001. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/New_York +0 -0
  1002. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon +0 -0
  1003. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nome +0 -0
  1004. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha +0 -0
  1005. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah +0 -0
  1006. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center +0 -0
  1007. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem +0 -0
  1008. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk +0 -0
  1009. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga +0 -0
  1010. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Panama +0 -0
  1011. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung +0 -0
  1012. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo +0 -0
  1013. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix +0 -0
  1014. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince +0 -0
  1015. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain +0 -0
  1016. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre +0 -0
  1017. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho +0 -0
  1018. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico +0 -0
  1019. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas +0 -0
  1020. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River +0 -0
  1021. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet +0 -0
  1022. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Recife +0 -0
  1023. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Regina +0 -0
  1024. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute +0 -0
  1025. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco +0 -0
  1026. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario +0 -0
  1027. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel +0 -0
  1028. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem +0 -0
  1029. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago +0 -0
  1030. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo +0 -0
  1031. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo +0 -0
  1032. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund +0 -0
  1033. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock +0 -0
  1034. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka +0 -0
  1035. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy +0 -0
  1036. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns +0 -0
  1037. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts +0 -0
  1038. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia +0 -0
  1039. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas +0 -0
  1040. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent +0 -0
  1041. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current +0 -0
  1042. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa +0 -0
  1043. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thule +0 -0
  1044. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay +0 -0
  1045. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana +0 -0
  1046. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto +0 -0
  1047. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola +0 -0
  1048. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver +0 -0
  1049. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin +0 -0
  1050. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse +0 -0
  1051. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg +0 -0
  1052. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat +0 -0
  1053. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife +0 -0
  1054. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey +0 -0
  1055. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis +0 -0
  1056. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville +0 -0
  1057. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie +0 -0
  1058. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson +0 -0
  1059. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo +0 -0
  1060. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer +0 -0
  1061. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera +0 -0
  1062. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole +0 -0
  1063. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa +0 -0
  1064. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll +0 -0
  1065. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok +0 -0
  1066. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen +0 -0
  1067. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden +0 -0
  1068. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty +0 -0
  1069. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman +0 -0
  1070. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr +0 -0
  1071. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau +0 -0
  1072. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe +0 -0
  1073. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat +0 -0
  1074. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad +0 -0
  1075. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau +0 -0
  1076. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad +0 -0
  1077. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain +0 -0
  1078. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku +0 -0
  1079. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok +0 -0
  1080. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul +0 -0
  1081. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut +0 -0
  1082. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek +0 -0
  1083. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei +0 -0
  1084. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta +0 -0
  1085. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita +0 -0
  1086. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan +0 -0
  1087. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing +0 -0
  1088. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking +0 -0
  1089. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo +0 -0
  1090. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca +0 -0
  1091. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus +0 -0
  1092. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka +0 -0
  1093. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili +0 -0
  1094. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai +0 -0
  1095. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe +0 -0
  1096. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta +0 -0
  1097. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza +0 -0
  1098. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin +0 -0
  1099. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron +0 -0
  1100. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh +0 -0
  1101. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong +0 -0
  1102. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd +0 -0
  1103. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk +0 -0
  1104. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul +0 -0
  1105. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta +0 -0
  1106. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura +0 -0
  1107. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem +0 -0
  1108. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul +0 -0
  1109. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka +0 -0
  1110. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi +0 -0
  1111. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar +0 -0
  1112. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu +0 -0
  1113. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu +0 -0
  1114. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga +0 -0
  1115. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata +0 -0
  1116. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk +0 -0
  1117. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur +0 -0
  1118. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching +0 -0
  1119. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait +0 -0
  1120. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao +0 -0
  1121. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau +0 -0
  1122. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan +0 -0
  1123. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar +0 -0
  1124. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila +0 -0
  1125. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat +0 -0
  1126. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia +0 -0
  1127. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk +0 -0
  1128. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk +0 -0
  1129. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk +0 -0
  1130. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral +0 -0
  1131. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh +0 -0
  1132. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak +0 -0
  1133. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang +0 -0
  1134. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar +0 -0
  1135. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay +0 -0
  1136. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda +0 -0
  1137. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon +0 -0
  1138. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh +0 -0
  1139. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon +0 -0
  1140. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin +0 -0
  1141. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand +0 -0
  1142. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul +0 -0
  1143. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai +0 -0
  1144. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore +0 -0
  1145. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk +0 -0
  1146. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei +0 -0
  1147. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent +0 -0
  1148. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi +0 -0
  1149. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran +0 -0
  1150. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv +0 -0
  1151. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu +0 -0
  1152. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu +0 -0
  1153. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo +0 -0
  1154. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk +0 -0
  1155. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang +0 -0
  1156. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar +0 -0
  1157. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator +0 -0
  1158. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi +0 -0
  1159. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera +0 -0
  1160. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane +0 -0
  1161. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok +0 -0
  1162. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk +0 -0
  1163. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon +0 -0
  1164. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg +0 -0
  1165. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan +0 -0
  1166. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores +0 -0
  1167. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda +0 -0
  1168. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary +0 -0
  1169. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde +0 -0
  1170. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe +0 -0
  1171. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe +0 -0
  1172. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen +0 -0
  1173. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira +0 -0
  1174. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik +0 -0
  1175. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia +0 -0
  1176. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena +0 -0
  1177. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley +0 -0
  1178. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT +0 -0
  1179. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide +0 -0
  1180. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane +0 -0
  1181. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill +0 -0
  1182. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra +0 -0
  1183. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie +0 -0
  1184. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin +0 -0
  1185. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla +0 -0
  1186. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart +0 -0
  1187. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI +0 -0
  1188. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman +0 -0
  1189. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe +0 -0
  1190. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne +0 -0
  1191. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW +0 -0
  1192. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/North +0 -0
  1193. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth +0 -0
  1194. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland +0 -0
  1195. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/South +0 -0
  1196. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney +0 -0
  1197. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania +0 -0
  1198. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria +0 -0
  1199. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/West +0 -0
  1200. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna +0 -0
  1201. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre +0 -0
  1202. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha +0 -0
  1203. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East +0 -0
  1204. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West +0 -0
  1205. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CET +0 -0
  1206. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT +0 -0
  1207. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic +0 -0
  1208. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central +0 -0
  1209. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern +0 -0
  1210. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain +0 -0
  1211. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland +0 -0
  1212. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific +0 -0
  1213. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan +0 -0
  1214. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon +0 -0
  1215. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental +0 -0
  1216. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland +0 -0
  1217. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Cuba +0 -0
  1218. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EET +0 -0
  1219. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST +0 -0
  1220. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT +0 -0
  1221. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Egypt +0 -0
  1222. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Eire +0 -0
  1223. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT +0 -0
  1224. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0 +0 -0
  1225. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1 +0 -0
  1226. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10 +0 -0
  1227. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11 +0 -0
  1228. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12 +0 -0
  1229. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2 +0 -0
  1230. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3 +0 -0
  1231. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4 +0 -0
  1232. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5 +0 -0
  1233. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6 +0 -0
  1234. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7 +0 -0
  1235. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8 +0 -0
  1236. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9 +0 -0
  1237. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0 +0 -0
  1238. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1 +0 -0
  1239. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10 +0 -0
  1240. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11 +0 -0
  1241. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12 +0 -0
  1242. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13 +0 -0
  1243. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14 +0 -0
  1244. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2 +0 -0
  1245. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3 +0 -0
  1246. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4 +0 -0
  1247. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5 +0 -0
  1248. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6 +0 -0
  1249. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7 +0 -0
  1250. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8 +0 -0
  1251. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9 +0 -0
  1252. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0 +0 -0
  1253. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich +0 -0
  1254. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT +0 -0
  1255. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC +0 -0
  1256. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal +0 -0
  1257. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu +0 -0
  1258. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam +0 -0
  1259. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra +0 -0
  1260. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan +0 -0
  1261. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens +0 -0
  1262. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast +0 -0
  1263. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade +0 -0
  1264. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin +0 -0
  1265. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava +0 -0
  1266. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels +0 -0
  1267. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest +0 -0
  1268. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest +0 -0
  1269. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen +0 -0
  1270. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau +0 -0
  1271. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen +0 -0
  1272. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin +0 -0
  1273. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar +0 -0
  1274. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey +0 -0
  1275. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki +0 -0
  1276. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man +0 -0
  1277. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul +0 -0
  1278. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey +0 -0
  1279. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad +0 -0
  1280. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev +0 -0
  1281. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov +0 -0
  1282. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv +0 -0
  1283. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon +0 -0
  1284. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana +0 -0
  1285. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/London +0 -0
  1286. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg +0 -0
  1287. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid +0 -0
  1288. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta +0 -0
  1289. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn +0 -0
  1290. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk +0 -0
  1291. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco +0 -0
  1292. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow +0 -0
  1293. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia +0 -0
  1294. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo +0 -0
  1295. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris +0 -0
  1296. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica +0 -0
  1297. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague +0 -0
  1298. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga +0 -0
  1299. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome +0 -0
  1300. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara +0 -0
  1301. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino +0 -0
  1302. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo +0 -0
  1303. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov +0 -0
  1304. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol +0 -0
  1305. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje +0 -0
  1306. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia +0 -0
  1307. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm +0 -0
  1308. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn +0 -0
  1309. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane +0 -0
  1310. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol +0 -0
  1311. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk +0 -0
  1312. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod +0 -0
  1313. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz +0 -0
  1314. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican +0 -0
  1315. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna +0 -0
  1316. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius +0 -0
  1317. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd +0 -0
  1318. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw +0 -0
  1319. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb +0 -0
  1320. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye +0 -0
  1321. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich +0 -0
  1322. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Factory +0 -0
  1323. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB +0 -0
  1324. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire +0 -0
  1325. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT +0 -0
  1326. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT+0 +0 -0
  1327. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT-0 +0 -0
  1328. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/GMT0 +0 -0
  1329. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Greenwich +0 -0
  1330. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/HST +0 -0
  1331. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Hongkong +0 -0
  1332. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iceland +0 -0
  1333. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo +0 -0
  1334. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos +0 -0
  1335. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas +0 -0
  1336. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos +0 -0
  1337. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro +0 -0
  1338. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen +0 -0
  1339. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe +0 -0
  1340. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives +0 -0
  1341. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius +0 -0
  1342. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte +0 -0
  1343. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion +0 -0
  1344. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Iran +0 -0
  1345. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Israel +0 -0
  1346. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Jamaica +0 -0
  1347. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Japan +0 -0
  1348. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein +0 -0
  1349. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Libya +0 -0
  1350. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MET +0 -0
  1351. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST +0 -0
  1352. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT +0 -0
  1353. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte +0 -0
  1354. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur +0 -0
  1355. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General +0 -0
  1356. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ +0 -0
  1357. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT +0 -0
  1358. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Navajo +0 -0
  1359. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PRC +0 -0
  1360. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT +0 -0
  1361. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia +0 -0
  1362. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland +0 -0
  1363. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville +0 -0
  1364. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham +0 -0
  1365. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk +0 -0
  1366. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter +0 -0
  1367. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate +0 -0
  1368. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury +0 -0
  1369. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo +0 -0
  1370. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji +0 -0
  1371. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti +0 -0
  1372. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos +0 -0
  1373. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier +0 -0
  1374. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal +0 -0
  1375. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam +0 -0
  1376. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu +0 -0
  1377. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston +0 -0
  1378. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton +0 -0
  1379. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati +0 -0
  1380. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae +0 -0
  1381. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein +0 -0
  1382. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro +0 -0
  1383. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas +0 -0
  1384. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway +0 -0
  1385. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru +0 -0
  1386. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue +0 -0
  1387. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk +0 -0
  1388. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea +0 -0
  1389. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago +0 -0
  1390. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau +0 -0
  1391. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn +0 -0
  1392. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei +0 -0
  1393. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape +0 -0
  1394. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby +0 -0
  1395. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga +0 -0
  1396. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan +0 -0
  1397. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa +0 -0
  1398. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti +0 -0
  1399. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa +0 -0
  1400. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu +0 -0
  1401. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk +0 -0
  1402. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake +0 -0
  1403. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis +0 -0
  1404. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap +0 -0
  1405. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Poland +0 -0
  1406. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Portugal +0 -0
  1407. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROC +0 -0
  1408. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/ROK +0 -0
  1409. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Singapore +0 -0
  1410. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Turkey +0 -0
  1411. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UCT +0 -0
  1412. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska +0 -0
  1413. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian +0 -0
  1414. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona +0 -0
  1415. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Central +0 -0
  1416. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana +0 -0
  1417. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern +0 -0
  1418. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii +0 -0
  1419. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke +0 -0
  1420. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan +0 -0
  1421. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain +0 -0
  1422. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific +0 -0
  1423. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa +0 -0
  1424. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/UTC +0 -0
  1425. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Universal +0 -0
  1426. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/W-SU +0 -0
  1427. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/WET +0 -0
  1428. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/Zulu +0 -0
  1429. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab +274 -0
  1430. package/vendor/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab +372 -0
  1431. package/vendor/abseil-cpp/absl/time/internal/get_current_time_chrono.inc +31 -0
  1432. package/vendor/abseil-cpp/absl/time/internal/get_current_time_posix.inc +24 -0
  1433. package/vendor/abseil-cpp/absl/time/internal/test_util.cc +32 -0
  1434. package/vendor/abseil-cpp/absl/time/internal/test_util.h +33 -0
  1435. package/vendor/abseil-cpp/absl/time/time.cc +500 -0
  1436. package/vendor/abseil-cpp/absl/time/time.h +1715 -0
  1437. package/vendor/abseil-cpp/absl/time/time_benchmark.cc +321 -0
  1438. package/vendor/abseil-cpp/absl/time/time_test.cc +1290 -0
  1439. package/vendor/abseil-cpp/absl/time/time_zone_test.cc +97 -0
  1440. package/vendor/abseil-cpp/absl/types/BUILD.bazel +336 -0
  1441. package/vendor/abseil-cpp/absl/types/CMakeLists.txt +371 -0
  1442. package/vendor/abseil-cpp/absl/types/any.h +517 -0
  1443. package/vendor/abseil-cpp/absl/types/any_exception_safety_test.cc +173 -0
  1444. package/vendor/abseil-cpp/absl/types/any_test.cc +778 -0
  1445. package/vendor/abseil-cpp/absl/types/bad_any_cast.cc +46 -0
  1446. package/vendor/abseil-cpp/absl/types/bad_any_cast.h +75 -0
  1447. package/vendor/abseil-cpp/absl/types/bad_optional_access.cc +48 -0
  1448. package/vendor/abseil-cpp/absl/types/bad_optional_access.h +78 -0
  1449. package/vendor/abseil-cpp/absl/types/bad_variant_access.cc +64 -0
  1450. package/vendor/abseil-cpp/absl/types/bad_variant_access.h +82 -0
  1451. package/vendor/abseil-cpp/absl/types/compare.h +599 -0
  1452. package/vendor/abseil-cpp/absl/types/compare_test.cc +389 -0
  1453. package/vendor/abseil-cpp/absl/types/internal/conformance_aliases.h +447 -0
  1454. package/vendor/abseil-cpp/absl/types/internal/conformance_archetype.h +978 -0
  1455. package/vendor/abseil-cpp/absl/types/internal/conformance_profile.h +933 -0
  1456. package/vendor/abseil-cpp/absl/types/internal/conformance_testing.h +1386 -0
  1457. package/vendor/abseil-cpp/absl/types/internal/conformance_testing_helpers.h +391 -0
  1458. package/vendor/abseil-cpp/absl/types/internal/conformance_testing_test.cc +1556 -0
  1459. package/vendor/abseil-cpp/absl/types/internal/optional.h +404 -0
  1460. package/vendor/abseil-cpp/absl/types/internal/parentheses.h +34 -0
  1461. package/vendor/abseil-cpp/absl/types/internal/span.h +139 -0
  1462. package/vendor/abseil-cpp/absl/types/internal/transform_args.h +246 -0
  1463. package/vendor/abseil-cpp/absl/types/internal/variant.h +1634 -0
  1464. package/vendor/abseil-cpp/absl/types/optional.h +779 -0
  1465. package/vendor/abseil-cpp/absl/types/optional_exception_safety_test.cc +292 -0
  1466. package/vendor/abseil-cpp/absl/types/optional_test.cc +1675 -0
  1467. package/vendor/abseil-cpp/absl/types/span.h +749 -0
  1468. package/vendor/abseil-cpp/absl/types/span_test.cc +848 -0
  1469. package/vendor/abseil-cpp/absl/types/variant.h +866 -0
  1470. package/vendor/abseil-cpp/absl/types/variant_benchmark.cc +222 -0
  1471. package/vendor/abseil-cpp/absl/types/variant_exception_safety_test.cc +532 -0
  1472. package/vendor/abseil-cpp/absl/types/variant_test.cc +2718 -0
  1473. package/vendor/abseil-cpp/absl/utility/BUILD.bazel +54 -0
  1474. package/vendor/abseil-cpp/absl/utility/CMakeLists.txt +44 -0
  1475. package/vendor/abseil-cpp/absl/utility/utility.h +350 -0
  1476. package/vendor/abseil-cpp/absl/utility/utility_test.cc +376 -0
  1477. package/vendor/abseil-cpp/ci/absl_alternate_options.h +29 -0
  1478. package/vendor/abseil-cpp/ci/cmake_common.sh +25 -0
  1479. package/vendor/abseil-cpp/ci/cmake_install_test.sh +58 -0
  1480. package/vendor/abseil-cpp/ci/linux_clang-latest_libcxx_asan_bazel.sh +102 -0
  1481. package/vendor/abseil-cpp/ci/linux_clang-latest_libcxx_bazel.sh +100 -0
  1482. package/vendor/abseil-cpp/ci/linux_clang-latest_libcxx_tsan_bazel.sh +97 -0
  1483. package/vendor/abseil-cpp/ci/linux_clang-latest_libstdcxx_bazel.sh +95 -0
  1484. package/vendor/abseil-cpp/ci/linux_docker_containers.sh +21 -0
  1485. package/vendor/abseil-cpp/ci/linux_gcc-floor_libstdcxx_bazel.sh +92 -0
  1486. package/vendor/abseil-cpp/ci/linux_gcc-latest_libstdcxx_bazel.sh +98 -0
  1487. package/vendor/abseil-cpp/ci/linux_gcc-latest_libstdcxx_cmake.sh +66 -0
  1488. package/vendor/abseil-cpp/ci/linux_gcc_alpine_cmake.sh +65 -0
  1489. package/vendor/abseil-cpp/ci/macos_xcode_bazel.sh +65 -0
  1490. package/vendor/abseil-cpp/ci/macos_xcode_cmake.sh +57 -0
  1491. package/vendor/abseil-cpp/conanfile.py +51 -0
  1492. package/vendor/abseil-cpp/create_lts.py +133 -0
@@ -0,0 +1,2835 @@
1
+ // Copyright 2017 The Abseil Authors.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // https://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ #include "absl/synchronization/mutex.h"
16
+
17
+ #ifdef _WIN32
18
+ #include <windows.h>
19
+ #ifdef ERROR
20
+ #undef ERROR
21
+ #endif
22
+ #else
23
+ #include <fcntl.h>
24
+ #include <pthread.h>
25
+ #include <sched.h>
26
+ #include <sys/time.h>
27
+ #endif
28
+
29
+ #include <assert.h>
30
+ #include <errno.h>
31
+ #include <stdio.h>
32
+ #include <stdlib.h>
33
+ #include <string.h>
34
+ #include <time.h>
35
+
36
+ #include <algorithm>
37
+ #include <atomic>
38
+ #include <cinttypes>
39
+ #include <cstddef>
40
+ #include <cstring>
41
+ #include <iterator>
42
+ #include <thread> // NOLINT(build/c++11)
43
+
44
+ #include "absl/base/attributes.h"
45
+ #include "absl/base/call_once.h"
46
+ #include "absl/base/config.h"
47
+ #include "absl/base/dynamic_annotations.h"
48
+ #include "absl/base/internal/atomic_hook.h"
49
+ #include "absl/base/internal/cycleclock.h"
50
+ #include "absl/base/internal/hide_ptr.h"
51
+ #include "absl/base/internal/low_level_alloc.h"
52
+ #include "absl/base/internal/raw_logging.h"
53
+ #include "absl/base/internal/spinlock.h"
54
+ #include "absl/base/internal/sysinfo.h"
55
+ #include "absl/base/internal/thread_identity.h"
56
+ #include "absl/base/internal/tsan_mutex_interface.h"
57
+ #include "absl/base/optimization.h"
58
+ #include "absl/base/port.h"
59
+ #include "absl/debugging/stacktrace.h"
60
+ #include "absl/debugging/symbolize.h"
61
+ #include "absl/synchronization/internal/graphcycles.h"
62
+ #include "absl/synchronization/internal/per_thread_sem.h"
63
+ #include "absl/time/time.h"
64
+
65
+ using absl::base_internal::CurrentThreadIdentityIfPresent;
66
+ using absl::base_internal::PerThreadSynch;
67
+ using absl::base_internal::SchedulingGuard;
68
+ using absl::base_internal::ThreadIdentity;
69
+ using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
70
+ using absl::synchronization_internal::GraphCycles;
71
+ using absl::synchronization_internal::GraphId;
72
+ using absl::synchronization_internal::InvalidGraphId;
73
+ using absl::synchronization_internal::KernelTimeout;
74
+ using absl::synchronization_internal::PerThreadSem;
75
+
76
+ extern "C" {
77
+ ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
78
+ std::this_thread::yield();
79
+ }
80
+ } // extern "C"
81
+
82
+ namespace absl {
83
+ ABSL_NAMESPACE_BEGIN
84
+
85
+ namespace {
86
+
87
+ #if defined(ABSL_HAVE_THREAD_SANITIZER)
88
+ constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
89
+ #else
90
+ constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
91
+ #endif
92
+
93
+ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
94
+ kDeadlockDetectionDefault);
95
+ ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
96
+
97
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
98
+ absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
99
+ submit_profile_data;
100
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
101
+ const char *msg, const void *obj, int64_t wait_cycles)>
102
+ mutex_tracer;
103
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
104
+ absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
105
+ cond_var_tracer;
106
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
107
+ bool (*)(const void *pc, char *out, int out_size)>
108
+ symbolizer(absl::Symbolize);
109
+
110
+ } // namespace
111
+
112
+ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
113
+ bool locking, bool trylock,
114
+ bool read_lock);
115
+
116
+ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
117
+ submit_profile_data.Store(fn);
118
+ }
119
+
120
+ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
121
+ int64_t wait_cycles)) {
122
+ mutex_tracer.Store(fn);
123
+ }
124
+
125
+ void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
126
+ cond_var_tracer.Store(fn);
127
+ }
128
+
129
+ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
130
+ symbolizer.Store(fn);
131
+ }
132
+
133
+ namespace {
134
+ // Represents the strategy for spin and yield.
135
+ // See the comment in GetMutexGlobals() for more information.
136
+ enum DelayMode { AGGRESSIVE, GENTLE };
137
+
138
+ struct ABSL_CACHELINE_ALIGNED MutexGlobals {
139
+ absl::once_flag once;
140
+ int spinloop_iterations = 0;
141
+ int32_t mutex_sleep_spins[2] = {};
142
+ absl::Duration mutex_sleep_time;
143
+ };
144
+
145
+ absl::Duration MeasureTimeToYield() {
146
+ absl::Time before = absl::Now();
147
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
148
+ return absl::Now() - before;
149
+ }
150
+
151
+ const MutexGlobals &GetMutexGlobals() {
152
+ ABSL_CONST_INIT static MutexGlobals data;
153
+ absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
154
+ const int num_cpus = absl::base_internal::NumCPUs();
155
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
156
+ // If this a uniprocessor, only yield/sleep.
157
+ // Real-time threads are often unable to yield, so the sleep time needs
158
+ // to be long enough to keep the calling thread asleep until scheduling
159
+ // happens.
160
+ // If this is multiprocessor, allow spinning. If the mode is
161
+ // aggressive then spin many times before yielding. If the mode is
162
+ // gentle then spin only a few times before yielding. Aggressive spinning
163
+ // is used to ensure that an Unlock() call, which must get the spin lock
164
+ // for any thread to make progress gets it without undue delay.
165
+ if (num_cpus > 1) {
166
+ data.mutex_sleep_spins[AGGRESSIVE] = 5000;
167
+ data.mutex_sleep_spins[GENTLE] = 250;
168
+ data.mutex_sleep_time = absl::Microseconds(10);
169
+ } else {
170
+ data.mutex_sleep_spins[AGGRESSIVE] = 0;
171
+ data.mutex_sleep_spins[GENTLE] = 0;
172
+ data.mutex_sleep_time = MeasureTimeToYield() * 5;
173
+ data.mutex_sleep_time =
174
+ std::min(data.mutex_sleep_time, absl::Milliseconds(1));
175
+ data.mutex_sleep_time =
176
+ std::max(data.mutex_sleep_time, absl::Microseconds(10));
177
+ }
178
+ });
179
+ return data;
180
+ }
181
+ } // namespace
182
+
183
+ namespace synchronization_internal {
184
+ // Returns the Mutex delay on iteration `c` depending on the given `mode`.
185
+ // The returned value should be used as `c` for the next call to `MutexDelay`.
186
+ int MutexDelay(int32_t c, int mode) {
187
+ const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
188
+ const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
189
+ if (c < limit) {
190
+ // Spin.
191
+ c++;
192
+ } else {
193
+ SchedulingGuard::ScopedEnable enable_rescheduling;
194
+ ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
195
+ if (c == limit) {
196
+ // Yield once.
197
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
198
+ c++;
199
+ } else {
200
+ // Then wait.
201
+ absl::SleepFor(sleep_time);
202
+ c = 0;
203
+ }
204
+ ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
205
+ }
206
+ return c;
207
+ }
208
+ } // namespace synchronization_internal
209
+
210
+ // --------------------------Generic atomic ops
211
+ // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
212
+ // "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
213
+ // before making any change.
214
+ // This is used to set flags in mutex and condition variable words.
215
+ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
216
+ intptr_t wait_until_clear) {
217
+ intptr_t v;
218
+ do {
219
+ v = pv->load(std::memory_order_relaxed);
220
+ } while ((v & bits) != bits &&
221
+ ((v & wait_until_clear) != 0 ||
222
+ !pv->compare_exchange_weak(v, v | bits,
223
+ std::memory_order_release,
224
+ std::memory_order_relaxed)));
225
+ }
226
+
227
+ // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
228
+ // "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
229
+ // before making any change.
230
+ // This is used to unset flags in mutex and condition variable words.
231
+ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
232
+ intptr_t wait_until_clear) {
233
+ intptr_t v;
234
+ do {
235
+ v = pv->load(std::memory_order_relaxed);
236
+ } while ((v & bits) != 0 &&
237
+ ((v & wait_until_clear) != 0 ||
238
+ !pv->compare_exchange_weak(v, v & ~bits,
239
+ std::memory_order_release,
240
+ std::memory_order_relaxed)));
241
+ }
242
+
243
+ //------------------------------------------------------------------
244
+
245
+ // Data for doing deadlock detection.
246
+ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
247
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
248
+
249
+ // Graph used to detect deadlocks.
250
+ ABSL_CONST_INIT static GraphCycles *deadlock_graph
251
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
252
+
253
+ //------------------------------------------------------------------
254
+ // An event mechanism for debugging mutex use.
255
+ // It also allows mutexes to be given names for those who can't handle
256
+ // addresses, and instead like to give their data structures names like
257
+ // "Henry", "Fido", or "Rupert IV, King of Yondavia".
258
+
259
+ namespace { // to prevent name pollution
260
+ enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
261
+ // Mutex events
262
+ SYNCH_EV_TRYLOCK_SUCCESS,
263
+ SYNCH_EV_TRYLOCK_FAILED,
264
+ SYNCH_EV_READERTRYLOCK_SUCCESS,
265
+ SYNCH_EV_READERTRYLOCK_FAILED,
266
+ SYNCH_EV_LOCK,
267
+ SYNCH_EV_LOCK_RETURNING,
268
+ SYNCH_EV_READERLOCK,
269
+ SYNCH_EV_READERLOCK_RETURNING,
270
+ SYNCH_EV_UNLOCK,
271
+ SYNCH_EV_READERUNLOCK,
272
+
273
+ // CondVar events
274
+ SYNCH_EV_WAIT,
275
+ SYNCH_EV_WAIT_RETURNING,
276
+ SYNCH_EV_SIGNAL,
277
+ SYNCH_EV_SIGNALALL,
278
+ };
279
+
280
+ enum { // Event flags
281
+ SYNCH_F_R = 0x01, // reader event
282
+ SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
283
+ SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
284
+ SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
285
+
286
+ SYNCH_F_LCK_W = SYNCH_F_LCK,
287
+ SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
288
+ };
289
+ } // anonymous namespace
290
+
291
+ // Properties of the events.
292
+ static const struct {
293
+ int flags;
294
+ const char *msg;
295
+ } event_properties[] = {
296
+ {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
297
+ {0, "TryLock failed "},
298
+ {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
299
+ {0, "ReaderTryLock failed "},
300
+ {0, "Lock blocking "},
301
+ {SYNCH_F_LCK_W, "Lock returning "},
302
+ {0, "ReaderLock blocking "},
303
+ {SYNCH_F_LCK_R, "ReaderLock returning "},
304
+ {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
305
+ {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
306
+ {0, "Wait on "},
307
+ {0, "Wait unblocked "},
308
+ {0, "Signal on "},
309
+ {0, "SignalAll on "},
310
+ };
311
+
312
+ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
313
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
314
+
315
+ // Hash table size; should be prime > 2.
316
+ // Can't be too small, as it's used for deadlock detection information.
317
+ static constexpr uint32_t kNSynchEvent = 1031;
318
+
319
+ static struct SynchEvent { // this is a trivial hash table for the events
320
+ // struct is freed when refcount reaches 0
321
+ int refcount ABSL_GUARDED_BY(synch_event_mu);
322
+
323
+ // buckets have linear, 0-terminated chains
324
+ SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
325
+
326
+ // Constant after initialization
327
+ uintptr_t masked_addr; // object at this address is called "name"
328
+
329
+ // No explicit synchronization used. Instead we assume that the
330
+ // client who enables/disables invariants/logging on a Mutex does so
331
+ // while the Mutex is not being concurrently accessed by others.
332
+ void (*invariant)(void *arg); // called on each event
333
+ void *arg; // first arg to (*invariant)()
334
+ bool log; // logging turned on
335
+
336
+ // Constant after initialization
337
+ char name[1]; // actually longer---NUL-terminated string
338
+ } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
339
+
340
+ // Ensure that the object at "addr" has a SynchEvent struct associated with it,
341
+ // set "bits" in the word there (waiting until lockbit is clear before doing
342
+ // so), and return a refcounted reference that will remain valid until
343
+ // UnrefSynchEvent() is called. If a new SynchEvent is allocated,
344
+ // the string name is copied into it.
345
+ // When used with a mutex, the caller should also ensure that kMuEvent
346
+ // is set in the mutex word, and similarly for condition variables and kCVEvent.
347
+ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
348
+ const char *name, intptr_t bits,
349
+ intptr_t lockbit) {
350
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
351
+ SynchEvent *e;
352
+ // first look for existing SynchEvent struct..
353
+ synch_event_mu.Lock();
354
+ for (e = synch_event[h];
355
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
356
+ e = e->next) {
357
+ }
358
+ if (e == nullptr) { // no SynchEvent struct found; make one.
359
+ if (name == nullptr) {
360
+ name = "";
361
+ }
362
+ size_t l = strlen(name);
363
+ e = reinterpret_cast<SynchEvent *>(
364
+ base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
365
+ e->refcount = 2; // one for return value, one for linked list
366
+ e->masked_addr = base_internal::HidePtr(addr);
367
+ e->invariant = nullptr;
368
+ e->arg = nullptr;
369
+ e->log = false;
370
+ strcpy(e->name, name); // NOLINT(runtime/printf)
371
+ e->next = synch_event[h];
372
+ AtomicSetBits(addr, bits, lockbit);
373
+ synch_event[h] = e;
374
+ } else {
375
+ e->refcount++; // for return value
376
+ }
377
+ synch_event_mu.Unlock();
378
+ return e;
379
+ }
380
+
381
+ // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
382
+ static void DeleteSynchEvent(SynchEvent *e) {
383
+ base_internal::LowLevelAlloc::Free(e);
384
+ }
385
+
386
+ // Decrement the reference count of *e, or do nothing if e==null.
387
+ static void UnrefSynchEvent(SynchEvent *e) {
388
+ if (e != nullptr) {
389
+ synch_event_mu.Lock();
390
+ bool del = (--(e->refcount) == 0);
391
+ synch_event_mu.Unlock();
392
+ if (del) {
393
+ DeleteSynchEvent(e);
394
+ }
395
+ }
396
+ }
397
+
398
+ // Forget the mapping from the object (Mutex or CondVar) at address addr
399
+ // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
400
+ // is clear before doing so).
401
+ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
402
+ intptr_t lockbit) {
403
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
404
+ SynchEvent **pe;
405
+ SynchEvent *e;
406
+ synch_event_mu.Lock();
407
+ for (pe = &synch_event[h];
408
+ (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
409
+ pe = &e->next) {
410
+ }
411
+ bool del = false;
412
+ if (e != nullptr) {
413
+ *pe = e->next;
414
+ del = (--(e->refcount) == 0);
415
+ }
416
+ AtomicClearBits(addr, bits, lockbit);
417
+ synch_event_mu.Unlock();
418
+ if (del) {
419
+ DeleteSynchEvent(e);
420
+ }
421
+ }
422
+
423
+ // Return a refcounted reference to the SynchEvent of the object at address
424
+ // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
425
+ // called.
426
+ static SynchEvent *GetSynchEvent(const void *addr) {
427
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
428
+ SynchEvent *e;
429
+ synch_event_mu.Lock();
430
+ for (e = synch_event[h];
431
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
432
+ e = e->next) {
433
+ }
434
+ if (e != nullptr) {
435
+ e->refcount++;
436
+ }
437
+ synch_event_mu.Unlock();
438
+ return e;
439
+ }
440
+
441
+ // Called when an event "ev" occurs on a Mutex of CondVar "obj"
442
+ // if event recording is on
443
+ static void PostSynchEvent(void *obj, int ev) {
444
+ SynchEvent *e = GetSynchEvent(obj);
445
+ // logging is on if event recording is on and either there's no event struct,
446
+ // or it explicitly says to log
447
+ if (e == nullptr || e->log) {
448
+ void *pcs[40];
449
+ int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
450
+ // A buffer with enough space for the ASCII for all the PCs, even on a
451
+ // 64-bit machine.
452
+ char buffer[ABSL_ARRAYSIZE(pcs) * 24];
453
+ int pos = snprintf(buffer, sizeof (buffer), " @");
454
+ for (int i = 0; i != n; i++) {
455
+ int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
456
+ " %p", pcs[i]);
457
+ if (b < 0 ||
458
+ static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
459
+ break;
460
+ }
461
+ pos += b;
462
+ }
463
+ ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
464
+ (e == nullptr ? "" : e->name), buffer);
465
+ }
466
+ const int flags = event_properties[ev].flags;
467
+ if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
468
+ // Calling the invariant as is causes problems under ThreadSanitizer.
469
+ // We are currently inside of Mutex Lock/Unlock and are ignoring all
470
+ // memory accesses and synchronization. If the invariant transitively
471
+ // synchronizes something else and we ignore the synchronization, we will
472
+ // get false positive race reports later.
473
+ // Reuse EvalConditionAnnotated to properly call into user code.
474
+ struct local {
475
+ static bool pred(SynchEvent *ev) {
476
+ (*ev->invariant)(ev->arg);
477
+ return false;
478
+ }
479
+ };
480
+ Condition cond(&local::pred, e);
481
+ Mutex *mu = static_cast<Mutex *>(obj);
482
+ const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
483
+ const bool trylock = (flags & SYNCH_F_TRY) != 0;
484
+ const bool read_lock = (flags & SYNCH_F_R) != 0;
485
+ EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
486
+ }
487
+ UnrefSynchEvent(e);
488
+ }
489
+
490
+ //------------------------------------------------------------------
491
+
492
+ // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
493
+ // whether it has a timeout, the condition, exclusive/shared, and whether a
494
+ // condition variable wait has an associated Mutex (as opposed to another
495
+ // type of lock). It also points to the PerThreadSynch struct of its thread.
496
+ // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
497
+ //
498
+ // This structure is held on the stack rather than directly in
499
+ // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
500
+ // while waiting on one Mutex, the implementation calls a client callback
501
+ // (such as a Condition function) that acquires another Mutex. We don't
502
+ // strictly need to allow this, but programmers become confused if we do not
503
+ // allow them to use functions such a LOG() within Condition functions. The
504
+ // PerThreadSynch struct points at the most recent SynchWaitParams struct when
505
+ // the thread is on a Mutex's waiter queue.
506
+ struct SynchWaitParams {
507
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
508
+ KernelTimeout timeout_arg, Mutex *cvmu_arg,
509
+ PerThreadSynch *thread_arg,
510
+ std::atomic<intptr_t> *cv_word_arg)
511
+ : how(how_arg),
512
+ cond(cond_arg),
513
+ timeout(timeout_arg),
514
+ cvmu(cvmu_arg),
515
+ thread(thread_arg),
516
+ cv_word(cv_word_arg),
517
+ contention_start_cycles(base_internal::CycleClock::Now()),
518
+ should_submit_contention_data(false) {}
519
+
520
+ const Mutex::MuHow how; // How this thread needs to wait.
521
+ const Condition *cond; // The condition that this thread is waiting for.
522
+ // In Mutex, this field is set to zero if a timeout
523
+ // expires.
524
+ KernelTimeout timeout; // timeout expiry---absolute time
525
+ // In Mutex, this field is set to zero if a timeout
526
+ // expires.
527
+ Mutex *const cvmu; // used for transfer from cond var to mutex
528
+ PerThreadSynch *const thread; // thread that is waiting
529
+
530
+ // If not null, thread should be enqueued on the CondVar whose state
531
+ // word is cv_word instead of queueing normally on the Mutex.
532
+ std::atomic<intptr_t> *cv_word;
533
+
534
+ int64_t contention_start_cycles; // Time (in cycles) when this thread started
535
+ // to contend for the mutex.
536
+ bool should_submit_contention_data;
537
+ };
538
+
539
+ struct SynchLocksHeld {
540
+ int n; // number of valid entries in locks[]
541
+ bool overflow; // true iff we overflowed the array at some point
542
+ struct {
543
+ Mutex *mu; // lock acquired
544
+ int32_t count; // times acquired
545
+ GraphId id; // deadlock_graph id of acquired lock
546
+ } locks[40];
547
+ // If a thread overfills the array during deadlock detection, we
548
+ // continue, discarding information as needed. If no overflow has
549
+ // taken place, we can provide more error checking, such as
550
+ // detecting when a thread releases a lock it does not hold.
551
+ };
552
+
553
+ // A sentinel value in lists that is not 0.
554
+ // A 0 value is used to mean "not on a list".
555
+ static PerThreadSynch *const kPerThreadSynchNull =
556
+ reinterpret_cast<PerThreadSynch *>(1);
557
+
558
+ static SynchLocksHeld *LocksHeldAlloc() {
559
+ SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
560
+ base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
561
+ ret->n = 0;
562
+ ret->overflow = false;
563
+ return ret;
564
+ }
565
+
566
+ // Return the PerThreadSynch-struct for this thread.
567
+ static PerThreadSynch *Synch_GetPerThread() {
568
+ ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
569
+ return &identity->per_thread_synch;
570
+ }
571
+
572
+ static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
573
+ if (mu) {
574
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
575
+ }
576
+ PerThreadSynch *w = Synch_GetPerThread();
577
+ if (mu) {
578
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
579
+ }
580
+ return w;
581
+ }
582
+
583
+ static SynchLocksHeld *Synch_GetAllLocks() {
584
+ PerThreadSynch *s = Synch_GetPerThread();
585
+ if (s->all_locks == nullptr) {
586
+ s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
587
+ }
588
+ return s->all_locks;
589
+ }
590
+
591
+ // Post on "w"'s associated PerThreadSem.
592
+ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
593
+ if (mu) {
594
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
595
+ // We miss synchronization around passing PerThreadSynch between threads
596
+ // since it happens inside of the Mutex code, so we need to ignore all
597
+ // accesses to the object.
598
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
599
+ PerThreadSem::Post(w->thread_identity());
600
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
601
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
602
+ } else {
603
+ PerThreadSem::Post(w->thread_identity());
604
+ }
605
+ }
606
+
607
+ // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
608
+ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
609
+ if (mu) {
610
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
611
+ }
612
+ assert(w == Synch_GetPerThread());
613
+ static_cast<void>(w);
614
+ bool res = PerThreadSem::Wait(t);
615
+ if (mu) {
616
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
617
+ }
618
+ return res;
619
+ }
620
+
621
+ // We're in a fatal signal handler that hopes to use Mutex and to get
622
+ // lucky by not deadlocking. We try to improve its chances of success
623
+ // by effectively disabling some of the consistency checks. This will
624
+ // prevent certain ABSL_RAW_CHECK() statements from being triggered when
625
+ // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
626
+ // Mutex code checking that the "waitp" field has not been reused.
627
+ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
628
+ // Fix the per-thread state only if it exists.
629
+ ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
630
+ if (identity != nullptr) {
631
+ identity->per_thread_synch.suppress_fatal_errors = true;
632
+ }
633
+ // Don't do deadlock detection when we are already failing.
634
+ synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
635
+ std::memory_order_release);
636
+ }
637
+
638
+ // --------------------------time support
639
+
640
+ // Return the current time plus the timeout. Use the same clock as
641
+ // PerThreadSem::Wait() for consistency. Unfortunately, we don't have
642
+ // such a choice when a deadline is given directly.
643
+ static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
644
+ #ifndef _WIN32
645
+ struct timeval tv;
646
+ gettimeofday(&tv, nullptr);
647
+ return absl::TimeFromTimeval(tv) + timeout;
648
+ #else
649
+ return absl::Now() + timeout;
650
+ #endif
651
+ }
652
+
653
+ // --------------------------Mutexes
654
+
655
+ // In the layout below, the msb of the bottom byte is currently unused. Also,
656
+ // the following constraints were considered in choosing the layout:
657
+ // o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
658
+ // 0xcd) are illegal: reader and writer lock both held.
659
+ // o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
660
+ // bit-twiddling trick in Mutex::Unlock().
661
+ // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
662
+ // to enable the bit-twiddling trick in CheckForMutexCorruption().
663
+ static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
664
+ static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
665
+ static const intptr_t kMuWait = 0x0004L; // threads are waiting
666
+ static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
667
+ static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
668
+ // INVARIANT1: there's a thread that was blocked on the mutex, is
669
+ // no longer, yet has not yet acquired the mutex. If there's a
670
+ // designated waker, all threads can avoid taking the slow path in
671
+ // unlock because the designated waker will subsequently acquire
672
+ // the lock and wake someone. To maintain INVARIANT1 the bit is
673
+ // set when a thread is unblocked(INV1a), and threads that were
674
+ // unblocked reset the bit when they either acquire or re-block
675
+ // (INV1b).
676
+ static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
677
+ // for a reader
678
+ static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
679
+ static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
680
+ static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
681
+
682
+ // Hack to make constant values available to gdb pretty printer
683
+ enum {
684
+ kGdbMuSpin = kMuSpin,
685
+ kGdbMuEvent = kMuEvent,
686
+ kGdbMuWait = kMuWait,
687
+ kGdbMuWriter = kMuWriter,
688
+ kGdbMuDesig = kMuDesig,
689
+ kGdbMuWrWait = kMuWrWait,
690
+ kGdbMuReader = kMuReader,
691
+ kGdbMuLow = kMuLow,
692
+ };
693
+
694
+ // kMuWrWait implies kMuWait.
695
+ // kMuReader and kMuWriter are mutually exclusive.
696
+ // If kMuReader is zero, there are no readers.
697
+ // Otherwise, if kMuWait is zero, the high order bits contain a count of the
698
+ // number of readers. Otherwise, the reader count is held in
699
+ // PerThreadSynch::readers of the most recently queued waiter, again in the
700
+ // bits above kMuLow.
701
+ static const intptr_t kMuOne = 0x0100; // a count of one reader
702
+
703
+ // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
704
+ static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
705
+ static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
706
+
707
+ static_assert(PerThreadSynch::kAlignment > kMuLow,
708
+ "PerThreadSynch::kAlignment must be greater than kMuLow");
709
+
710
+ // This struct contains various bitmasks to be used in
711
+ // acquiring and releasing a mutex in a particular mode.
712
+ struct MuHowS {
713
+ // if all the bits in fast_need_zero are zero, the lock can be acquired by
714
+ // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
715
+ // this is the designated waker.
716
+ intptr_t fast_need_zero;
717
+ intptr_t fast_or;
718
+ intptr_t fast_add;
719
+
720
+ intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
721
+
722
+ intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
723
+ // zero a reader can acquire a read share by
724
+ // setting the reader bit and incrementing
725
+ // the reader count (in last waiter since
726
+ // we're now slow-path). kMuWrWait be may
727
+ // be ignored if we already waited once.
728
+ };
729
+
730
+ static const MuHowS kSharedS = {
731
+ // shared or read lock
732
+ kMuWriter | kMuWait | kMuEvent, // fast_need_zero
733
+ kMuReader, // fast_or
734
+ kMuOne, // fast_add
735
+ kMuWriter | kMuWait, // slow_need_zero
736
+ kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
737
+ };
738
+ static const MuHowS kExclusiveS = {
739
+ // exclusive or write lock
740
+ kMuWriter | kMuReader | kMuEvent, // fast_need_zero
741
+ kMuWriter, // fast_or
742
+ 0, // fast_add
743
+ kMuWriter | kMuReader, // slow_need_zero
744
+ ~static_cast<intptr_t>(0), // slow_inc_need_zero
745
+ };
746
+ static const Mutex::MuHow kShared = &kSharedS; // shared lock
747
+ static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
748
+
749
+ #ifdef NDEBUG
750
+ static constexpr bool kDebugMode = false;
751
+ #else
752
+ static constexpr bool kDebugMode = true;
753
+ #endif
754
+
755
+ #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
756
+ static unsigned TsanFlags(Mutex::MuHow how) {
757
+ return how == kShared ? __tsan_mutex_read_lock : 0;
758
+ }
759
+ #endif
760
+
761
+ static bool DebugOnlyIsExiting() {
762
+ return false;
763
+ }
764
+
765
+ Mutex::~Mutex() {
766
+ intptr_t v = mu_.load(std::memory_order_relaxed);
767
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
768
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
769
+ }
770
+ if (kDebugMode) {
771
+ this->ForgetDeadlockInfo();
772
+ }
773
+ ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
774
+ }
775
+
776
+ void Mutex::EnableDebugLog(const char *name) {
777
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
778
+ e->log = true;
779
+ UnrefSynchEvent(e);
780
+ }
781
+
782
+ void EnableMutexInvariantDebugging(bool enabled) {
783
+ synch_check_invariants.store(enabled, std::memory_order_release);
784
+ }
785
+
786
+ void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
787
+ void *arg) {
788
+ if (synch_check_invariants.load(std::memory_order_acquire) &&
789
+ invariant != nullptr) {
790
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
791
+ e->invariant = invariant;
792
+ e->arg = arg;
793
+ UnrefSynchEvent(e);
794
+ }
795
+ }
796
+
797
+ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
798
+ synch_deadlock_detection.store(mode, std::memory_order_release);
799
+ }
800
+
801
+ // Return true iff threads x and y are part of the same equivalence
802
+ // class of waiters. An equivalence class is defined as the set of
803
+ // waiters with the same condition, type of lock, and thread priority.
804
+ //
805
+ // Requires that x and y be waiting on the same Mutex queue.
806
+ static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
807
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
808
+ Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
809
+ }
810
+
811
+ // Given the contents of a mutex word containing a PerThreadSynch pointer,
812
+ // return the pointer.
813
+ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
814
+ return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
815
+ }
816
+
817
+ // The next several routines maintain the per-thread next and skip fields
818
+ // used in the Mutex waiter queue.
819
+ // The queue is a circular singly-linked list, of which the "head" is the
820
+ // last element, and head->next if the first element.
821
+ // The skip field has the invariant:
822
+ // For thread x, x->skip is one of:
823
+ // - invalid (iff x is not in a Mutex wait queue),
824
+ // - null, or
825
+ // - a pointer to a distinct thread waiting later in the same Mutex queue
826
+ // such that all threads in [x, x->skip] have the same condition, priority
827
+ // and lock type (MuEquivalentWaiter() is true for all pairs in [x,
828
+ // x->skip]).
829
+ // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
830
+ //
831
+ // By the spec of MuEquivalentWaiter(), it is not necessary when removing the
832
+ // first runnable thread y from the front a Mutex queue to adjust the skip
833
+ // field of another thread x because if x->skip==y, x->skip must (have) become
834
+ // invalid before y is removed. The function TryRemove can remove a specified
835
+ // thread from an arbitrary position in the queue whether runnable or not, so
836
+ // it fixes up skip fields that would otherwise be left dangling.
837
+ // The statement
838
+ // if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
839
+ // maintains the invariant provided x is not the last waiter in a Mutex queue
840
+ // The statement
841
+ // if (x->skip != null) { x->skip = x->skip->skip; }
842
+ // maintains the invariant.
843
+
844
+ // Returns the last thread y in a mutex waiter queue such that all threads in
845
+ // [x, y] inclusive share the same condition. Sets skip fields of some threads
846
+ // in that range to optimize future evaluation of Skip() on x values in
847
+ // the range. Requires thread x is in a mutex waiter queue.
848
+ // The locking is unusual. Skip() is called under these conditions:
849
+ // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
850
+ // - Mutex is held in call from UnlockSlow() by last unlocker, with
851
+ // maybe_unlocking == true
852
+ // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
853
+ // UnlockSlow()) and TryRemove()
854
+ // These cases are mutually exclusive, so Skip() never runs concurrently
855
+ // with itself on the same Mutex. The skip chain is used in these other places
856
+ // that cannot occur concurrently:
857
+ // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
858
+ // - Dequeue() (with spinlock and Mutex held)
859
+ // - UnlockSlow() (with spinlock and Mutex held)
860
+ // A more complex case is Enqueue()
861
+ // - Enqueue() (with spinlock held and maybe_unlocking == false)
862
+ // This is the first case in which Skip is called, above.
863
+ // - Enqueue() (without spinlock held; but queue is empty and being freshly
864
+ // formed)
865
+ // - Enqueue() (with spinlock held and maybe_unlocking == true)
866
+ // The first case has mutual exclusion, and the second isolation through
867
+ // working on an otherwise unreachable data structure.
868
+ // In the last case, Enqueue() is required to change no skip/next pointers
869
+ // except those in the added node and the former "head" node. This implies
870
+ // that the new node is added after head, and so must be the new head or the
871
+ // new front of the queue.
872
+ static PerThreadSynch *Skip(PerThreadSynch *x) {
873
+ PerThreadSynch *x0 = nullptr;
874
+ PerThreadSynch *x1 = x;
875
+ PerThreadSynch *x2 = x->skip;
876
+ if (x2 != nullptr) {
877
+ // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
878
+ // such that x1 == x0->skip && x2 == x1->skip
879
+ while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
880
+ x0->skip = x2; // short-circuit skip from x0 to x2
881
+ }
882
+ x->skip = x1; // short-circuit skip from x to result
883
+ }
884
+ return x1;
885
+ }
886
+
887
+ // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
888
+ // The latter is going to be removed out of order, because of a timeout.
889
+ // Check whether "ancestor" has a skip field pointing to "to_be_removed",
890
+ // and fix it if it does.
891
+ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
892
+ if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
893
+ if (to_be_removed->skip != nullptr) {
894
+ ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
895
+ } else if (ancestor->next != to_be_removed) { // they are not adjacent
896
+ ancestor->skip = ancestor->next; // can skip one past ancestor
897
+ } else {
898
+ ancestor->skip = nullptr; // can't skip at all
899
+ }
900
+ }
901
+ }
902
+
903
+ static void CondVarEnqueue(SynchWaitParams *waitp);
904
+
905
+ // Enqueue thread "waitp->thread" on a waiter queue.
906
+ // Called with mutex spinlock held if head != nullptr
907
+ // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
908
+ // idempotent; it alters no state associated with the existing (empty)
909
+ // queue.
910
+ //
911
+ // If waitp->cv_word == nullptr, queue the thread at either the front or
912
+ // the end (according to its priority) of the circular mutex waiter queue whose
913
+ // head is "head", and return the new head. mu is the previous mutex state,
914
+ // which contains the reader count (perhaps adjusted for the operation in
915
+ // progress) if the list was empty and a read lock held, and the holder hint if
916
+ // the list was empty and a write lock held. (flags & kMuIsCond) indicates
917
+ // whether this thread was transferred from a CondVar or is waiting for a
918
+ // non-trivial condition. In this case, Enqueue() never returns nullptr
919
+ //
920
+ // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
921
+ // returned. This mechanism is used by CondVar to queue a thread on the
922
+ // condition variable queue instead of the mutex queue in implementing Wait().
923
+ // In this case, Enqueue() can return nullptr (if head==nullptr).
924
+ static PerThreadSynch *Enqueue(PerThreadSynch *head,
925
+ SynchWaitParams *waitp, intptr_t mu, int flags) {
926
+ // If we have been given a cv_word, call CondVarEnqueue() and return
927
+ // the previous head of the Mutex waiter queue.
928
+ if (waitp->cv_word != nullptr) {
929
+ CondVarEnqueue(waitp);
930
+ return head;
931
+ }
932
+
933
+ PerThreadSynch *s = waitp->thread;
934
+ ABSL_RAW_CHECK(
935
+ s->waitp == nullptr || // normal case
936
+ s->waitp == waitp || // Fer()---transfer from condition variable
937
+ s->suppress_fatal_errors,
938
+ "detected illegal recursion into Mutex code");
939
+ s->waitp = waitp;
940
+ s->skip = nullptr; // maintain skip invariant (see above)
941
+ s->may_skip = true; // always true on entering queue
942
+ s->wake = false; // not being woken
943
+ s->cond_waiter = ((flags & kMuIsCond) != 0);
944
+ if (head == nullptr) { // s is the only waiter
945
+ s->next = s; // it's the only entry in the cycle
946
+ s->readers = mu; // reader count is from mu word
947
+ s->maybe_unlocking = false; // no one is searching an empty list
948
+ head = s; // s is new head
949
+ } else {
950
+ PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
951
+ #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
952
+ int64_t now_cycles = base_internal::CycleClock::Now();
953
+ if (s->next_priority_read_cycles < now_cycles) {
954
+ // Every so often, update our idea of the thread's priority.
955
+ // pthread_getschedparam() is 5% of the block/wakeup time;
956
+ // base_internal::CycleClock::Now() is 0.5%.
957
+ int policy;
958
+ struct sched_param param;
959
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
960
+ if (err != 0) {
961
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
962
+ } else {
963
+ s->priority = param.sched_priority;
964
+ s->next_priority_read_cycles =
965
+ now_cycles +
966
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
967
+ }
968
+ }
969
+ if (s->priority > head->priority) { // s's priority is above head's
970
+ // try to put s in priority-fifo order, or failing that at the front.
971
+ if (!head->maybe_unlocking) {
972
+ // No unlocker can be scanning the queue, so we can insert into the
973
+ // middle of the queue.
974
+ //
975
+ // Within a skip chain, all waiters have the same priority, so we can
976
+ // skip forward through the chains until we find one with a lower
977
+ // priority than the waiter to be enqueued.
978
+ PerThreadSynch *advance_to = head; // next value of enqueue_after
979
+ do {
980
+ enqueue_after = advance_to;
981
+ // (side-effect: optimizes skip chain)
982
+ advance_to = Skip(enqueue_after->next);
983
+ } while (s->priority <= advance_to->priority);
984
+ // termination guaranteed because s->priority > head->priority
985
+ // and head is the end of a skip chain
986
+ } else if (waitp->how == kExclusive &&
987
+ Condition::GuaranteedEqual(waitp->cond, nullptr)) {
988
+ // An unlocker could be scanning the queue, but we know it will recheck
989
+ // the queue front for writers that have no condition, which is what s
990
+ // is, so an insert at front is safe.
991
+ enqueue_after = head; // add after head, at front
992
+ }
993
+ }
994
+ #endif
995
+ if (enqueue_after != nullptr) {
996
+ s->next = enqueue_after->next;
997
+ enqueue_after->next = s;
998
+
999
+ // enqueue_after can be: head, Skip(...), or cur.
1000
+ // The first two imply enqueue_after->skip == nullptr, and
1001
+ // the last is used only if MuEquivalentWaiter(s, cur).
1002
+ // We require this because clearing enqueue_after->skip
1003
+ // is impossible; enqueue_after's predecessors might also
1004
+ // incorrectly skip over s if we were to allow other
1005
+ // insertion points.
1006
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
1007
+ MuEquivalentWaiter(enqueue_after, s),
1008
+ "Mutex Enqueue failure");
1009
+
1010
+ if (enqueue_after != head && enqueue_after->may_skip &&
1011
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
1012
+ // enqueue_after can skip to its new successor, s
1013
+ enqueue_after->skip = enqueue_after->next;
1014
+ }
1015
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
1016
+ s->skip = s->next; // s may skip to its successor
1017
+ }
1018
+ } else { // enqueue not done any other way, so
1019
+ // we're inserting s at the back
1020
+ // s will become new head; copy data from head into it
1021
+ s->next = head->next; // add s after head
1022
+ head->next = s;
1023
+ s->readers = head->readers; // reader count is from previous head
1024
+ s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
1025
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
1026
+ // head now has successor; may skip
1027
+ head->skip = s;
1028
+ }
1029
+ head = s; // s is new head
1030
+ }
1031
+ }
1032
+ s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1033
+ return head;
1034
+ }
1035
+
1036
+ // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1037
+ // whose last element is head. The new head element is returned, or null
1038
+ // if the list is made empty.
1039
+ // Dequeue is called with both spinlock and Mutex held.
1040
+ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1041
+ PerThreadSynch *w = pw->next;
1042
+ pw->next = w->next; // snip w out of list
1043
+ if (head == w) { // we removed the head
1044
+ head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
1045
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1046
+ // pw can skip to its new successor
1047
+ if (pw->next->skip !=
1048
+ nullptr) { // either skip to its successors skip target
1049
+ pw->skip = pw->next->skip;
1050
+ } else { // or to pw's successor
1051
+ pw->skip = pw->next;
1052
+ }
1053
+ }
1054
+ return head;
1055
+ }
1056
+
1057
+ // Traverse the elements [ pw->next, h] of the circular list whose last element
1058
+ // is head.
1059
+ // Remove all elements with wake==true and place them in the
1060
+ // singly-linked list wake_list in the order found. Assumes that
1061
+ // there is only one such element if the element has how == kExclusive.
1062
+ // Return the new head.
1063
+ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1064
+ PerThreadSynch *pw,
1065
+ PerThreadSynch **wake_tail) {
1066
+ PerThreadSynch *orig_h = head;
1067
+ PerThreadSynch *w = pw->next;
1068
+ bool skipped = false;
1069
+ do {
1070
+ if (w->wake) { // remove this element
1071
+ ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1072
+ // we're removing pw's successor so either pw->skip is zero or we should
1073
+ // already have removed pw since if pw->skip!=null, pw has the same
1074
+ // condition as w.
1075
+ head = Dequeue(head, pw);
1076
+ w->next = *wake_tail; // keep list terminated
1077
+ *wake_tail = w; // add w to wake_list;
1078
+ wake_tail = &w->next; // next addition to end
1079
+ if (w->waitp->how == kExclusive) { // wake at most 1 writer
1080
+ break;
1081
+ }
1082
+ } else { // not waking this one; skip
1083
+ pw = Skip(w); // skip as much as possible
1084
+ skipped = true;
1085
+ }
1086
+ w = pw->next;
1087
+ // We want to stop processing after we've considered the original head,
1088
+ // orig_h. We can't test for w==orig_h in the loop because w may skip over
1089
+ // it; we are guaranteed only that w's predecessor will not skip over
1090
+ // orig_h. When we've considered orig_h, either we've processed it and
1091
+ // removed it (so orig_h != head), or we considered it and skipped it (so
1092
+ // skipped==true && pw == head because skipping from head always skips by
1093
+ // just one, leaving pw pointing at head). So we want to
1094
+ // continue the loop with the negation of that expression.
1095
+ } while (orig_h == head && (pw != head || !skipped));
1096
+ return head;
1097
+ }
1098
+
1099
+ // Try to remove thread s from the list of waiters on this mutex.
1100
+ // Does nothing if s is not on the waiter list.
1101
+ void Mutex::TryRemove(PerThreadSynch *s) {
1102
+ SchedulingGuard::ScopedDisable disable_rescheduling;
1103
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1104
+ // acquire spinlock & lock
1105
+ if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1106
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1107
+ std::memory_order_acquire,
1108
+ std::memory_order_relaxed)) {
1109
+ PerThreadSynch *h = GetPerThreadSynch(v);
1110
+ if (h != nullptr) {
1111
+ PerThreadSynch *pw = h; // pw is w's predecessor
1112
+ PerThreadSynch *w;
1113
+ if ((w = pw->next) != s) { // search for thread,
1114
+ do { // processing at least one element
1115
+ // If the current element isn't equivalent to the waiter to be
1116
+ // removed, we can skip the entire chain.
1117
+ if (!MuEquivalentWaiter(s, w)) {
1118
+ pw = Skip(w); // so skip all that won't match
1119
+ // we don't have to worry about dangling skip fields
1120
+ // in the threads we skipped; none can point to s
1121
+ // because they are in a different equivalence class.
1122
+ } else { // seeking same condition
1123
+ FixSkip(w, s); // fix up any skip pointer from w to s
1124
+ pw = w;
1125
+ }
1126
+ // don't search further if we found the thread, or we're about to
1127
+ // process the first thread again.
1128
+ } while ((w = pw->next) != s && pw != h);
1129
+ }
1130
+ if (w == s) { // found thread; remove it
1131
+ // pw->skip may be non-zero here; the loop above ensured that
1132
+ // no ancestor of s can skip to s, so removal is safe anyway.
1133
+ h = Dequeue(h, pw);
1134
+ s->next = nullptr;
1135
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1136
+ }
1137
+ }
1138
+ intptr_t nv;
1139
+ do { // release spinlock and lock
1140
+ v = mu_.load(std::memory_order_relaxed);
1141
+ nv = v & (kMuDesig | kMuEvent);
1142
+ if (h != nullptr) {
1143
+ nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1144
+ h->readers = 0; // we hold writer lock
1145
+ h->maybe_unlocking = false; // finished unlocking
1146
+ }
1147
+ } while (!mu_.compare_exchange_weak(v, nv,
1148
+ std::memory_order_release,
1149
+ std::memory_order_relaxed));
1150
+ }
1151
+ }
1152
+
1153
+ // Wait until thread "s", which must be the current thread, is removed from the
1154
+ // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1155
+ // if the wait extends past the absolute time specified, even if "s" is still
1156
+ // on the mutex queue. In this case, remove "s" from the queue and return
1157
+ // true, otherwise return false.
1158
+ void Mutex::Block(PerThreadSynch *s) {
1159
+ while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1160
+ if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1161
+ // After a timeout, we go into a spin loop until we remove ourselves
1162
+ // from the queue, or someone else removes us. We can't be sure to be
1163
+ // able to remove ourselves in a single lock acquisition because this
1164
+ // mutex may be held, and the holder has the right to read the centre
1165
+ // of the waiter queue without holding the spinlock.
1166
+ this->TryRemove(s);
1167
+ int c = 0;
1168
+ while (s->next != nullptr) {
1169
+ c = synchronization_internal::MutexDelay(c, GENTLE);
1170
+ this->TryRemove(s);
1171
+ }
1172
+ if (kDebugMode) {
1173
+ // This ensures that we test the case that TryRemove() is called when s
1174
+ // is not on the queue.
1175
+ this->TryRemove(s);
1176
+ }
1177
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1178
+ s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1179
+ }
1180
+ }
1181
+ ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1182
+ "detected illegal recursion in Mutex code");
1183
+ s->waitp = nullptr;
1184
+ }
1185
+
1186
+ // Wake thread w, and return the next thread in the list.
1187
+ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1188
+ PerThreadSynch *next = w->next;
1189
+ w->next = nullptr;
1190
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1191
+ IncrementSynchSem(this, w);
1192
+
1193
+ return next;
1194
+ }
1195
+
1196
+ static GraphId GetGraphIdLocked(Mutex *mu)
1197
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1198
+ if (!deadlock_graph) { // (re)create the deadlock graph.
1199
+ deadlock_graph =
1200
+ new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1201
+ GraphCycles;
1202
+ }
1203
+ return deadlock_graph->GetId(mu);
1204
+ }
1205
+
1206
+ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1207
+ deadlock_graph_mu.Lock();
1208
+ GraphId id = GetGraphIdLocked(mu);
1209
+ deadlock_graph_mu.Unlock();
1210
+ return id;
1211
+ }
1212
+
1213
+ // Record a lock acquisition. This is used in debug mode for deadlock
1214
+ // detection. The held_locks pointer points to the relevant data
1215
+ // structure for each case.
1216
+ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1217
+ int n = held_locks->n;
1218
+ int i = 0;
1219
+ while (i != n && held_locks->locks[i].id != id) {
1220
+ i++;
1221
+ }
1222
+ if (i == n) {
1223
+ if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1224
+ held_locks->overflow = true; // lost some data
1225
+ } else { // we have room for lock
1226
+ held_locks->locks[i].mu = mu;
1227
+ held_locks->locks[i].count = 1;
1228
+ held_locks->locks[i].id = id;
1229
+ held_locks->n = n + 1;
1230
+ }
1231
+ } else {
1232
+ held_locks->locks[i].count++;
1233
+ }
1234
+ }
1235
+
1236
+ // Record a lock release. Each call to LockEnter(mu, id, x) should be
1237
+ // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1238
+ // It does not process the event if is not needed when deadlock detection is
1239
+ // disabled.
1240
+ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1241
+ int n = held_locks->n;
1242
+ int i = 0;
1243
+ while (i != n && held_locks->locks[i].id != id) {
1244
+ i++;
1245
+ }
1246
+ if (i == n) {
1247
+ if (!held_locks->overflow) {
1248
+ // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1249
+ // but in that case mu should still be present.
1250
+ i = 0;
1251
+ while (i != n && held_locks->locks[i].mu != mu) {
1252
+ i++;
1253
+ }
1254
+ if (i == n) { // mu missing means releasing unheld lock
1255
+ SynchEvent *mu_events = GetSynchEvent(mu);
1256
+ ABSL_RAW_LOG(FATAL,
1257
+ "thread releasing lock it does not hold: %p %s; "
1258
+ ,
1259
+ static_cast<void *>(mu),
1260
+ mu_events == nullptr ? "" : mu_events->name);
1261
+ }
1262
+ }
1263
+ } else if (held_locks->locks[i].count == 1) {
1264
+ held_locks->n = n - 1;
1265
+ held_locks->locks[i] = held_locks->locks[n - 1];
1266
+ held_locks->locks[n - 1].id = InvalidGraphId();
1267
+ held_locks->locks[n - 1].mu =
1268
+ nullptr; // clear mu to please the leak detector.
1269
+ } else {
1270
+ assert(held_locks->locks[i].count > 0);
1271
+ held_locks->locks[i].count--;
1272
+ }
1273
+ }
1274
+
1275
+ // Call LockEnter() if in debug mode and deadlock detection is enabled.
1276
+ static inline void DebugOnlyLockEnter(Mutex *mu) {
1277
+ if (kDebugMode) {
1278
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1279
+ OnDeadlockCycle::kIgnore) {
1280
+ LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1281
+ }
1282
+ }
1283
+ }
1284
+
1285
+ // Call LockEnter() if in debug mode and deadlock detection is enabled.
1286
+ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1287
+ if (kDebugMode) {
1288
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1289
+ OnDeadlockCycle::kIgnore) {
1290
+ LockEnter(mu, id, Synch_GetAllLocks());
1291
+ }
1292
+ }
1293
+ }
1294
+
1295
+ // Call LockLeave() if in debug mode and deadlock detection is enabled.
1296
+ static inline void DebugOnlyLockLeave(Mutex *mu) {
1297
+ if (kDebugMode) {
1298
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1299
+ OnDeadlockCycle::kIgnore) {
1300
+ LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1301
+ }
1302
+ }
1303
+ }
1304
+
1305
+ static char *StackString(void **pcs, int n, char *buf, int maxlen,
1306
+ bool symbolize) {
1307
+ static const int kSymLen = 200;
1308
+ char sym[kSymLen];
1309
+ int len = 0;
1310
+ for (int i = 0; i != n; i++) {
1311
+ if (len >= maxlen)
1312
+ return buf;
1313
+ size_t count = static_cast<size_t>(maxlen - len);
1314
+ if (symbolize) {
1315
+ if (!symbolizer(pcs[i], sym, kSymLen)) {
1316
+ sym[0] = '\0';
1317
+ }
1318
+ snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
1319
+ sym);
1320
+ } else {
1321
+ snprintf(buf + len, count, " %p", pcs[i]);
1322
+ }
1323
+ len += strlen(&buf[len]);
1324
+ }
1325
+ return buf;
1326
+ }
1327
+
1328
+ static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1329
+ void *pcs[40];
1330
+ return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1331
+ maxlen, symbolize);
1332
+ }
1333
+
1334
+ namespace {
1335
+ enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
1336
+ // a path this long would be remarkable
1337
+ // Buffers required to report a deadlock.
1338
+ // We do not allocate them on stack to avoid large stack frame.
1339
+ struct DeadlockReportBuffers {
1340
+ char buf[6100];
1341
+ GraphId path[kMaxDeadlockPathLen];
1342
+ };
1343
+
1344
+ struct ScopedDeadlockReportBuffers {
1345
+ ScopedDeadlockReportBuffers() {
1346
+ b = reinterpret_cast<DeadlockReportBuffers *>(
1347
+ base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1348
+ }
1349
+ ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1350
+ DeadlockReportBuffers *b;
1351
+ };
1352
+
1353
+ // Helper to pass to GraphCycles::UpdateStackTrace.
1354
+ int GetStack(void** stack, int max_depth) {
1355
+ return absl::GetStackTrace(stack, max_depth, 3);
1356
+ }
1357
+ } // anonymous namespace
1358
+
1359
+ // Called in debug mode when a thread is about to acquire a lock in a way that
1360
+ // may block.
1361
+ static GraphId DeadlockCheck(Mutex *mu) {
1362
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1363
+ OnDeadlockCycle::kIgnore) {
1364
+ return InvalidGraphId();
1365
+ }
1366
+
1367
+ SynchLocksHeld *all_locks = Synch_GetAllLocks();
1368
+
1369
+ absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1370
+ const GraphId mu_id = GetGraphIdLocked(mu);
1371
+
1372
+ if (all_locks->n == 0) {
1373
+ // There are no other locks held. Return now so that we don't need to
1374
+ // call GetSynchEvent(). This way we do not record the stack trace
1375
+ // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1376
+ // it can't always be the first lock acquired by a thread.
1377
+ return mu_id;
1378
+ }
1379
+
1380
+ // We prefer to keep stack traces that show a thread holding and acquiring
1381
+ // as many locks as possible. This increases the chances that a given edge
1382
+ // in the acquires-before graph will be represented in the stack traces
1383
+ // recorded for the locks.
1384
+ deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1385
+
1386
+ // For each other mutex already held by this thread:
1387
+ for (int i = 0; i != all_locks->n; i++) {
1388
+ const GraphId other_node_id = all_locks->locks[i].id;
1389
+ const Mutex *other =
1390
+ static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1391
+ if (other == nullptr) {
1392
+ // Ignore stale lock
1393
+ continue;
1394
+ }
1395
+
1396
+ // Add the acquired-before edge to the graph.
1397
+ if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1398
+ ScopedDeadlockReportBuffers scoped_buffers;
1399
+ DeadlockReportBuffers *b = scoped_buffers.b;
1400
+ static int number_of_reported_deadlocks = 0;
1401
+ number_of_reported_deadlocks++;
1402
+ // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1403
+ bool symbolize = number_of_reported_deadlocks <= 2;
1404
+ ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1405
+ CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1406
+ size_t len = 0;
1407
+ for (int j = 0; j != all_locks->n; j++) {
1408
+ void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1409
+ if (pr != nullptr) {
1410
+ snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1411
+ len += strlen(&b->buf[len]);
1412
+ }
1413
+ }
1414
+ ABSL_RAW_LOG(ERROR,
1415
+ "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1416
+ "historical lock ordering graph has been observed",
1417
+ static_cast<void *>(mu), b->buf);
1418
+ ABSL_RAW_LOG(ERROR, "Cycle: ");
1419
+ int path_len = deadlock_graph->FindPath(
1420
+ mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1421
+ for (int j = 0; j != path_len; j++) {
1422
+ GraphId id = b->path[j];
1423
+ Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1424
+ if (path_mu == nullptr) continue;
1425
+ void** stack;
1426
+ int depth = deadlock_graph->GetStackTrace(id, &stack);
1427
+ snprintf(b->buf, sizeof(b->buf),
1428
+ "mutex@%p stack: ", static_cast<void *>(path_mu));
1429
+ StackString(stack, depth, b->buf + strlen(b->buf),
1430
+ static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1431
+ symbolize);
1432
+ ABSL_RAW_LOG(ERROR, "%s", b->buf);
1433
+ }
1434
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1435
+ OnDeadlockCycle::kAbort) {
1436
+ deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1437
+ ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1438
+ return mu_id;
1439
+ }
1440
+ break; // report at most one potential deadlock per acquisition
1441
+ }
1442
+ }
1443
+
1444
+ return mu_id;
1445
+ }
1446
+
1447
+ // Invoke DeadlockCheck() iff we're in debug mode and
1448
+ // deadlock checking has been enabled.
1449
+ static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1450
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1451
+ OnDeadlockCycle::kIgnore) {
1452
+ return DeadlockCheck(mu);
1453
+ } else {
1454
+ return InvalidGraphId();
1455
+ }
1456
+ }
1457
+
1458
+ void Mutex::ForgetDeadlockInfo() {
1459
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1460
+ OnDeadlockCycle::kIgnore) {
1461
+ deadlock_graph_mu.Lock();
1462
+ if (deadlock_graph != nullptr) {
1463
+ deadlock_graph->RemoveNode(this);
1464
+ }
1465
+ deadlock_graph_mu.Unlock();
1466
+ }
1467
+ }
1468
+
1469
+ void Mutex::AssertNotHeld() const {
1470
+ // We have the data to allow this check only if in debug mode and deadlock
1471
+ // detection is enabled.
1472
+ if (kDebugMode &&
1473
+ (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1474
+ synch_deadlock_detection.load(std::memory_order_acquire) !=
1475
+ OnDeadlockCycle::kIgnore) {
1476
+ GraphId id = GetGraphId(const_cast<Mutex *>(this));
1477
+ SynchLocksHeld *locks = Synch_GetAllLocks();
1478
+ for (int i = 0; i != locks->n; i++) {
1479
+ if (locks->locks[i].id == id) {
1480
+ SynchEvent *mu_events = GetSynchEvent(this);
1481
+ ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1482
+ static_cast<const void *>(this),
1483
+ (mu_events == nullptr ? "" : mu_events->name));
1484
+ }
1485
+ }
1486
+ }
1487
+ }
1488
+
1489
+ // Attempt to acquire *mu, and return whether successful. The implementation
1490
+ // may spin for a short while if the lock cannot be acquired immediately.
1491
+ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1492
+ int c = GetMutexGlobals().spinloop_iterations;
1493
+ do { // do/while somewhat faster on AMD
1494
+ intptr_t v = mu->load(std::memory_order_relaxed);
1495
+ if ((v & (kMuReader|kMuEvent)) != 0) {
1496
+ return false; // a reader or tracing -> give up
1497
+ } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1498
+ mu->compare_exchange_strong(v, kMuWriter | v,
1499
+ std::memory_order_acquire,
1500
+ std::memory_order_relaxed)) {
1501
+ return true;
1502
+ }
1503
+ } while (--c > 0);
1504
+ return false;
1505
+ }
1506
+
1507
+ void Mutex::Lock() {
1508
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1509
+ GraphId id = DebugOnlyDeadlockCheck(this);
1510
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1511
+ // try fast acquire, then spin loop
1512
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1513
+ !mu_.compare_exchange_strong(v, kMuWriter | v,
1514
+ std::memory_order_acquire,
1515
+ std::memory_order_relaxed)) {
1516
+ // try spin acquire, then slow loop
1517
+ if (!TryAcquireWithSpinning(&this->mu_)) {
1518
+ this->LockSlow(kExclusive, nullptr, 0);
1519
+ }
1520
+ }
1521
+ DebugOnlyLockEnter(this, id);
1522
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1523
+ }
1524
+
1525
+ void Mutex::ReaderLock() {
1526
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1527
+ GraphId id = DebugOnlyDeadlockCheck(this);
1528
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1529
+ // try fast acquire, then slow loop
1530
+ if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1531
+ !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1532
+ std::memory_order_acquire,
1533
+ std::memory_order_relaxed)) {
1534
+ this->LockSlow(kShared, nullptr, 0);
1535
+ }
1536
+ DebugOnlyLockEnter(this, id);
1537
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1538
+ }
1539
+
1540
+ void Mutex::LockWhen(const Condition &cond) {
1541
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1542
+ GraphId id = DebugOnlyDeadlockCheck(this);
1543
+ this->LockSlow(kExclusive, &cond, 0);
1544
+ DebugOnlyLockEnter(this, id);
1545
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1546
+ }
1547
+
1548
+ bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1549
+ return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1550
+ }
1551
+
1552
+ bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1553
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1554
+ GraphId id = DebugOnlyDeadlockCheck(this);
1555
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
1556
+ KernelTimeout(deadline), 0);
1557
+ DebugOnlyLockEnter(this, id);
1558
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1559
+ return res;
1560
+ }
1561
+
1562
+ void Mutex::ReaderLockWhen(const Condition &cond) {
1563
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1564
+ GraphId id = DebugOnlyDeadlockCheck(this);
1565
+ this->LockSlow(kShared, &cond, 0);
1566
+ DebugOnlyLockEnter(this, id);
1567
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1568
+ }
1569
+
1570
+ bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1571
+ absl::Duration timeout) {
1572
+ return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1573
+ }
1574
+
1575
+ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1576
+ absl::Time deadline) {
1577
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1578
+ GraphId id = DebugOnlyDeadlockCheck(this);
1579
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1580
+ DebugOnlyLockEnter(this, id);
1581
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1582
+ return res;
1583
+ }
1584
+
1585
+ void Mutex::Await(const Condition &cond) {
1586
+ if (cond.Eval()) { // condition already true; nothing to do
1587
+ if (kDebugMode) {
1588
+ this->AssertReaderHeld();
1589
+ }
1590
+ } else { // normal case
1591
+ ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1592
+ "condition untrue on return from Await");
1593
+ }
1594
+ }
1595
+
1596
+ bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1597
+ return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1598
+ }
1599
+
1600
+ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1601
+ if (cond.Eval()) { // condition already true; nothing to do
1602
+ if (kDebugMode) {
1603
+ this->AssertReaderHeld();
1604
+ }
1605
+ return true;
1606
+ }
1607
+
1608
+ KernelTimeout t{deadline};
1609
+ bool res = this->AwaitCommon(cond, t);
1610
+ ABSL_RAW_CHECK(res || t.has_timeout(),
1611
+ "condition untrue on return from Await");
1612
+ return res;
1613
+ }
1614
+
1615
+ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1616
+ this->AssertReaderHeld();
1617
+ MuHow how =
1618
+ (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1619
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1620
+ SynchWaitParams waitp(
1621
+ how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1622
+ nullptr /*no cv_word*/);
1623
+ int flags = kMuHasBlocked;
1624
+ if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1625
+ flags |= kMuIsCond;
1626
+ }
1627
+ this->UnlockSlow(&waitp);
1628
+ this->Block(waitp.thread);
1629
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1630
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1631
+ this->LockSlowLoop(&waitp, flags);
1632
+ bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
1633
+ EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1634
+ ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1635
+ return res;
1636
+ }
1637
+
1638
+ bool Mutex::TryLock() {
1639
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1640
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1641
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1642
+ mu_.compare_exchange_strong(v, kMuWriter | v,
1643
+ std::memory_order_acquire,
1644
+ std::memory_order_relaxed)) {
1645
+ DebugOnlyLockEnter(this);
1646
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1647
+ return true;
1648
+ }
1649
+ if ((v & kMuEvent) != 0) { // we're recording events
1650
+ if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1651
+ mu_.compare_exchange_strong(
1652
+ v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1653
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1654
+ DebugOnlyLockEnter(this);
1655
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1656
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1657
+ return true;
1658
+ } else {
1659
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1660
+ }
1661
+ }
1662
+ ABSL_TSAN_MUTEX_POST_LOCK(
1663
+ this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1664
+ return false;
1665
+ }
1666
+
1667
+ bool Mutex::ReaderTryLock() {
1668
+ ABSL_TSAN_MUTEX_PRE_LOCK(this,
1669
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1670
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1671
+ // The while-loops (here and below) iterate only if the mutex word keeps
1672
+ // changing (typically because the reader count changes) under the CAS. We
1673
+ // limit the number of attempts to avoid having to think about livelock.
1674
+ int loop_limit = 5;
1675
+ while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1676
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1677
+ std::memory_order_acquire,
1678
+ std::memory_order_relaxed)) {
1679
+ DebugOnlyLockEnter(this);
1680
+ ABSL_TSAN_MUTEX_POST_LOCK(
1681
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1682
+ return true;
1683
+ }
1684
+ loop_limit--;
1685
+ v = mu_.load(std::memory_order_relaxed);
1686
+ }
1687
+ if ((v & kMuEvent) != 0) { // we're recording events
1688
+ loop_limit = 5;
1689
+ while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1690
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1691
+ std::memory_order_acquire,
1692
+ std::memory_order_relaxed)) {
1693
+ DebugOnlyLockEnter(this);
1694
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1695
+ ABSL_TSAN_MUTEX_POST_LOCK(
1696
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1697
+ return true;
1698
+ }
1699
+ loop_limit--;
1700
+ v = mu_.load(std::memory_order_relaxed);
1701
+ }
1702
+ if ((v & kMuEvent) != 0) {
1703
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1704
+ }
1705
+ }
1706
+ ABSL_TSAN_MUTEX_POST_LOCK(this,
1707
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1708
+ __tsan_mutex_try_lock_failed,
1709
+ 0);
1710
+ return false;
1711
+ }
1712
+
1713
+ void Mutex::Unlock() {
1714
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1715
+ DebugOnlyLockLeave(this);
1716
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1717
+
1718
+ if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1719
+ ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1720
+ static_cast<unsigned>(v));
1721
+ }
1722
+
1723
+ // should_try_cas is whether we'll try a compare-and-swap immediately.
1724
+ // NOTE: optimized out when kDebugMode is false.
1725
+ bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1726
+ (v & (kMuWait | kMuDesig)) != kMuWait);
1727
+ // But, we can use an alternate computation of it, that compilers
1728
+ // currently don't find on their own. When that changes, this function
1729
+ // can be simplified.
1730
+ intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1731
+ intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1732
+ // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1733
+ // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1734
+ // all possible non-zero values for x exceed all possible values for y.
1735
+ // Therefore, (x == 0 && y > 0) == (x < y).
1736
+ if (kDebugMode && should_try_cas != (x < y)) {
1737
+ // We would usually use PRIdPTR here, but is not correctly implemented
1738
+ // within the android toolchain.
1739
+ ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1740
+ static_cast<long long>(v), static_cast<long long>(x),
1741
+ static_cast<long long>(y));
1742
+ }
1743
+ if (x < y &&
1744
+ mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1745
+ std::memory_order_release,
1746
+ std::memory_order_relaxed)) {
1747
+ // fast writer release (writer with no waiters or with designated waker)
1748
+ } else {
1749
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1750
+ }
1751
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1752
+ }
1753
+
1754
+ // Requires v to represent a reader-locked state.
1755
+ static bool ExactlyOneReader(intptr_t v) {
1756
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
1757
+ assert((v & kMuHigh) != 0);
1758
+ // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1759
+ // on some architectures the following generates slightly smaller code.
1760
+ // It may be faster too.
1761
+ constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1762
+ return (v & kMuMultipleWaitersMask) == 0;
1763
+ }
1764
+
1765
+ void Mutex::ReaderUnlock() {
1766
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1767
+ DebugOnlyLockLeave(this);
1768
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1769
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
1770
+ if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1771
+ // fast reader release (reader with no waiters)
1772
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1773
+ if (mu_.compare_exchange_strong(v, v - clear,
1774
+ std::memory_order_release,
1775
+ std::memory_order_relaxed)) {
1776
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1777
+ return;
1778
+ }
1779
+ }
1780
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1781
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1782
+ }
1783
+
1784
+ // Clears the designated waker flag in the mutex if this thread has blocked, and
1785
+ // therefore may be the designated waker.
1786
+ static intptr_t ClearDesignatedWakerMask(int flag) {
1787
+ assert(flag >= 0);
1788
+ assert(flag <= 1);
1789
+ switch (flag) {
1790
+ case 0: // not blocked
1791
+ return ~static_cast<intptr_t>(0);
1792
+ case 1: // blocked; turn off the designated waker bit
1793
+ return ~static_cast<intptr_t>(kMuDesig);
1794
+ }
1795
+ ABSL_UNREACHABLE();
1796
+ }
1797
+
1798
+ // Conditionally ignores the existence of waiting writers if a reader that has
1799
+ // already blocked once wakes up.
1800
+ static intptr_t IgnoreWaitingWritersMask(int flag) {
1801
+ assert(flag >= 0);
1802
+ assert(flag <= 1);
1803
+ switch (flag) {
1804
+ case 0: // not blocked
1805
+ return ~static_cast<intptr_t>(0);
1806
+ case 1: // blocked; pretend there are no waiting writers
1807
+ return ~static_cast<intptr_t>(kMuWrWait);
1808
+ }
1809
+ ABSL_UNREACHABLE();
1810
+ }
1811
+
1812
+ // Internal version of LockWhen(). See LockSlowWithDeadline()
1813
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1814
+ int flags) {
1815
+ ABSL_RAW_CHECK(
1816
+ this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1817
+ "condition untrue on return from LockSlow");
1818
+ }
1819
+
1820
+ // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1821
+ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1822
+ bool locking, bool trylock,
1823
+ bool read_lock) {
1824
+ // Delicate annotation dance.
1825
+ // We are currently inside of read/write lock/unlock operation.
1826
+ // All memory accesses are ignored inside of mutex operations + for unlock
1827
+ // operation tsan considers that we've already released the mutex.
1828
+ bool res = false;
1829
+ #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1830
+ const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
1831
+ const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1832
+ #endif
1833
+ if (locking) {
1834
+ // For lock we pretend that we have finished the operation,
1835
+ // evaluate the predicate, then unlock the mutex and start locking it again
1836
+ // to match the annotation at the end of outer lock operation.
1837
+ // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1838
+ // will think the lock acquisition is recursive which will trigger
1839
+ // deadlock detector.
1840
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1841
+ res = cond->Eval();
1842
+ // There is no "try" version of Unlock, so use flags instead of tryflags.
1843
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1844
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1845
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1846
+ } else {
1847
+ // Similarly, for unlock we pretend that we have unlocked the mutex,
1848
+ // lock the mutex, evaluate the predicate, and start unlocking it again
1849
+ // to match the annotation at the end of outer unlock operation.
1850
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1851
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1852
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1853
+ res = cond->Eval();
1854
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1855
+ }
1856
+ // Prevent unused param warnings in non-TSAN builds.
1857
+ static_cast<void>(mu);
1858
+ static_cast<void>(trylock);
1859
+ static_cast<void>(read_lock);
1860
+ return res;
1861
+ }
1862
+
1863
+ // Compute cond->Eval() hiding it from race detectors.
1864
+ // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1865
+ // that was just added by a concurrent Lock operation; Lock adds the predicate
1866
+ // to the internal Mutex list without actually acquiring the Mutex
1867
+ // (it only acquires the internal spinlock, which is rightfully invisible for
1868
+ // tsan). As the result there is no tsan-visible synchronization between the
1869
+ // addition and this thread. So if we would enable race detection here,
1870
+ // it would race with the predicate initialization.
1871
+ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1872
+ // Memory accesses are already ignored inside of lock/unlock operations,
1873
+ // but synchronization operations are also ignored. When we evaluate the
1874
+ // predicate we must ignore only memory accesses but not synchronization,
1875
+ // because missed synchronization can lead to false reports later.
1876
+ // So we "divert" (which un-ignores both memory accesses and synchronization)
1877
+ // and then separately turn on ignores of memory accesses.
1878
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1879
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1880
+ bool res = cond->Eval();
1881
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1882
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1883
+ static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
1884
+ return res;
1885
+ }
1886
+
1887
+ // Internal equivalent of *LockWhenWithDeadline(), where
1888
+ // "t" represents the absolute timeout; !t.has_timeout() means "forever".
1889
+ // "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1890
+ // In flags, bits are ored together:
1891
+ // - kMuHasBlocked indicates that the client has already blocked on the call so
1892
+ // the designated waker bit must be cleared and waiting writers should not
1893
+ // obstruct this call
1894
+ // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1895
+ // Await, LockWhen) so contention profiling should be suppressed.
1896
+ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1897
+ KernelTimeout t, int flags) {
1898
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1899
+ bool unlock = false;
1900
+ if ((v & how->fast_need_zero) == 0 && // try fast acquire
1901
+ mu_.compare_exchange_strong(
1902
+ v,
1903
+ (how->fast_or |
1904
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1905
+ how->fast_add,
1906
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1907
+ if (cond == nullptr ||
1908
+ EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1909
+ return true;
1910
+ }
1911
+ unlock = true;
1912
+ }
1913
+ SynchWaitParams waitp(
1914
+ how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1915
+ nullptr /*no cv_word*/);
1916
+ if (!Condition::GuaranteedEqual(cond, nullptr)) {
1917
+ flags |= kMuIsCond;
1918
+ }
1919
+ if (unlock) {
1920
+ this->UnlockSlow(&waitp);
1921
+ this->Block(waitp.thread);
1922
+ flags |= kMuHasBlocked;
1923
+ }
1924
+ this->LockSlowLoop(&waitp, flags);
1925
+ return waitp.cond != nullptr || // => cond known true from LockSlowLoop
1926
+ cond == nullptr ||
1927
+ EvalConditionAnnotated(cond, this, true, false, how == kShared);
1928
+ }
1929
+
1930
+ // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1931
+ // the printf-style argument list. The format string must be a literal.
1932
+ // Arguments after the first are not evaluated unless the condition is true.
1933
+ #define RAW_CHECK_FMT(cond, ...) \
1934
+ do { \
1935
+ if (ABSL_PREDICT_FALSE(!(cond))) { \
1936
+ ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1937
+ } \
1938
+ } while (0)
1939
+
1940
+ static void CheckForMutexCorruption(intptr_t v, const char* label) {
1941
+ // Test for either of two situations that should not occur in v:
1942
+ // kMuWriter and kMuReader
1943
+ // kMuWrWait and !kMuWait
1944
+ const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
1945
+ // By flipping that bit, we can now test for:
1946
+ // kMuWriter and kMuReader in w
1947
+ // kMuWrWait and kMuWait in w
1948
+ // We've chosen these two pairs of values to be so that they will overlap,
1949
+ // respectively, when the word is left shifted by three. This allows us to
1950
+ // save a branch in the common (correct) case of them not being coincident.
1951
+ static_assert(kMuReader << 3 == kMuWriter, "must match");
1952
+ static_assert(kMuWait << 3 == kMuWrWait, "must match");
1953
+ if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1954
+ RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1955
+ "%s: Mutex corrupt: both reader and writer lock held: %p",
1956
+ label, reinterpret_cast<void *>(v));
1957
+ RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1958
+ "%s: Mutex corrupt: waiting writer with no waiters: %p",
1959
+ label, reinterpret_cast<void *>(v));
1960
+ assert(false);
1961
+ }
1962
+
1963
+ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1964
+ SchedulingGuard::ScopedDisable disable_rescheduling;
1965
+ int c = 0;
1966
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1967
+ if ((v & kMuEvent) != 0) {
1968
+ PostSynchEvent(this,
1969
+ waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1970
+ }
1971
+ ABSL_RAW_CHECK(
1972
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1973
+ "detected illegal recursion into Mutex code");
1974
+ for (;;) {
1975
+ v = mu_.load(std::memory_order_relaxed);
1976
+ CheckForMutexCorruption(v, "Lock");
1977
+ if ((v & waitp->how->slow_need_zero) == 0) {
1978
+ if (mu_.compare_exchange_strong(
1979
+ v,
1980
+ (waitp->how->fast_or |
1981
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1982
+ waitp->how->fast_add,
1983
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1984
+ if (waitp->cond == nullptr ||
1985
+ EvalConditionAnnotated(waitp->cond, this, true, false,
1986
+ waitp->how == kShared)) {
1987
+ break; // we timed out, or condition true, so return
1988
+ }
1989
+ this->UnlockSlow(waitp); // got lock but condition false
1990
+ this->Block(waitp->thread);
1991
+ flags |= kMuHasBlocked;
1992
+ c = 0;
1993
+ }
1994
+ } else { // need to access waiter list
1995
+ bool dowait = false;
1996
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
1997
+ // This thread tries to become the one and only waiter.
1998
+ PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1999
+ intptr_t nv =
2000
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2001
+ kMuWait;
2002
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
2003
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2004
+ nv |= kMuWrWait;
2005
+ }
2006
+ if (mu_.compare_exchange_strong(
2007
+ v, reinterpret_cast<intptr_t>(new_h) | nv,
2008
+ std::memory_order_release, std::memory_order_relaxed)) {
2009
+ dowait = true;
2010
+ } else { // attempted Enqueue() failed
2011
+ // zero out the waitp field set by Enqueue()
2012
+ waitp->thread->waitp = nullptr;
2013
+ }
2014
+ } else if ((v & waitp->how->slow_inc_need_zero &
2015
+ IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
2016
+ // This is a reader that needs to increment the reader count,
2017
+ // but the count is currently held in the last waiter.
2018
+ if (mu_.compare_exchange_strong(
2019
+ v,
2020
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2021
+ kMuSpin | kMuReader,
2022
+ std::memory_order_acquire, std::memory_order_relaxed)) {
2023
+ PerThreadSynch *h = GetPerThreadSynch(v);
2024
+ h->readers += kMuOne; // inc reader count in waiter
2025
+ do { // release spinlock
2026
+ v = mu_.load(std::memory_order_relaxed);
2027
+ } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2028
+ std::memory_order_release,
2029
+ std::memory_order_relaxed));
2030
+ if (waitp->cond == nullptr ||
2031
+ EvalConditionAnnotated(waitp->cond, this, true, false,
2032
+ waitp->how == kShared)) {
2033
+ break; // we timed out, or condition true, so return
2034
+ }
2035
+ this->UnlockSlow(waitp); // got lock but condition false
2036
+ this->Block(waitp->thread);
2037
+ flags |= kMuHasBlocked;
2038
+ c = 0;
2039
+ }
2040
+ } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
2041
+ mu_.compare_exchange_strong(
2042
+ v,
2043
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2044
+ kMuSpin | kMuWait,
2045
+ std::memory_order_acquire, std::memory_order_relaxed)) {
2046
+ PerThreadSynch *h = GetPerThreadSynch(v);
2047
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
2048
+ intptr_t wr_wait = 0;
2049
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2050
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2051
+ wr_wait = kMuWrWait; // give priority to a waiting writer
2052
+ }
2053
+ do { // release spinlock
2054
+ v = mu_.load(std::memory_order_relaxed);
2055
+ } while (!mu_.compare_exchange_weak(
2056
+ v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2057
+ reinterpret_cast<intptr_t>(new_h),
2058
+ std::memory_order_release, std::memory_order_relaxed));
2059
+ dowait = true;
2060
+ }
2061
+ if (dowait) {
2062
+ this->Block(waitp->thread); // wait until removed from list or timeout
2063
+ flags |= kMuHasBlocked;
2064
+ c = 0;
2065
+ }
2066
+ }
2067
+ ABSL_RAW_CHECK(
2068
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2069
+ "detected illegal recursion into Mutex code");
2070
+ // delay, then try again
2071
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2072
+ }
2073
+ ABSL_RAW_CHECK(
2074
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2075
+ "detected illegal recursion into Mutex code");
2076
+ if ((v & kMuEvent) != 0) {
2077
+ PostSynchEvent(this,
2078
+ waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2079
+ SYNCH_EV_READERLOCK_RETURNING);
2080
+ }
2081
+ }
2082
+
2083
+ // Unlock this mutex, which is held by the current thread.
2084
+ // If waitp is non-zero, it must be the wait parameters for the current thread
2085
+ // which holds the lock but is not runnable because its condition is false
2086
+ // or it is in the process of blocking on a condition variable; it must requeue
2087
+ // itself on the mutex/condvar to wait for its condition to become true.
2088
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2089
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2090
+ intptr_t v = mu_.load(std::memory_order_relaxed);
2091
+ this->AssertReaderHeld();
2092
+ CheckForMutexCorruption(v, "Unlock");
2093
+ if ((v & kMuEvent) != 0) {
2094
+ PostSynchEvent(this,
2095
+ (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2096
+ }
2097
+ int c = 0;
2098
+ // the waiter under consideration to wake, or zero
2099
+ PerThreadSynch *w = nullptr;
2100
+ // the predecessor to w or zero
2101
+ PerThreadSynch *pw = nullptr;
2102
+ // head of the list searched previously, or zero
2103
+ PerThreadSynch *old_h = nullptr;
2104
+ // a condition that's known to be false.
2105
+ const Condition *known_false = nullptr;
2106
+ PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
2107
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2108
+ // later writer could have acquired the lock
2109
+ // (starvation avoidance)
2110
+ ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2111
+ waitp->thread->suppress_fatal_errors,
2112
+ "detected illegal recursion into Mutex code");
2113
+ // This loop finds threads wake_list to wakeup if any, and removes them from
2114
+ // the list of waiters. In addition, it places waitp.thread on the queue of
2115
+ // waiters if waitp is non-zero.
2116
+ for (;;) {
2117
+ v = mu_.load(std::memory_order_relaxed);
2118
+ if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2119
+ waitp == nullptr) {
2120
+ // fast writer release (writer with no waiters or with designated waker)
2121
+ if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2122
+ std::memory_order_release,
2123
+ std::memory_order_relaxed)) {
2124
+ return;
2125
+ }
2126
+ } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2127
+ // fast reader release (reader with no waiters)
2128
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2129
+ if (mu_.compare_exchange_strong(v, v - clear,
2130
+ std::memory_order_release,
2131
+ std::memory_order_relaxed)) {
2132
+ return;
2133
+ }
2134
+ } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
2135
+ mu_.compare_exchange_strong(v, v | kMuSpin,
2136
+ std::memory_order_acquire,
2137
+ std::memory_order_relaxed)) {
2138
+ if ((v & kMuWait) == 0) { // no one to wake
2139
+ intptr_t nv;
2140
+ bool do_enqueue = true; // always Enqueue() the first time
2141
+ ABSL_RAW_CHECK(waitp != nullptr,
2142
+ "UnlockSlow is confused"); // about to sleep
2143
+ do { // must loop to release spinlock as reader count may change
2144
+ v = mu_.load(std::memory_order_relaxed);
2145
+ // decrement reader count if there are readers
2146
+ intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
2147
+ PerThreadSynch *new_h = nullptr;
2148
+ if (do_enqueue) {
2149
+ // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2150
+ // we must not retry here. The initial attempt will always have
2151
+ // succeeded, further attempts would enqueue us against *this due to
2152
+ // Fer() handling.
2153
+ do_enqueue = (waitp->cv_word == nullptr);
2154
+ new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2155
+ }
2156
+ intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
2157
+ if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
2158
+ clear = kMuWrWait | kMuReader; // clear read bit
2159
+ }
2160
+ nv = (v & kMuLow & ~clear & ~kMuSpin);
2161
+ if (new_h != nullptr) {
2162
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2163
+ } else { // new_h could be nullptr if we queued ourselves on a
2164
+ // CondVar
2165
+ // In that case, we must place the reader count back in the mutex
2166
+ // word, as Enqueue() did not store it in the new waiter.
2167
+ nv |= new_readers & kMuHigh;
2168
+ }
2169
+ // release spinlock & our lock; retry if reader-count changed
2170
+ // (writer count cannot change since we hold lock)
2171
+ } while (!mu_.compare_exchange_weak(v, nv,
2172
+ std::memory_order_release,
2173
+ std::memory_order_relaxed));
2174
+ break;
2175
+ }
2176
+
2177
+ // There are waiters.
2178
+ // Set h to the head of the circular waiter list.
2179
+ PerThreadSynch *h = GetPerThreadSynch(v);
2180
+ if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2181
+ // a reader but not the last
2182
+ h->readers -= kMuOne; // release our lock
2183
+ intptr_t nv = v; // normally just release spinlock
2184
+ if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2185
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2186
+ ABSL_RAW_CHECK(new_h != nullptr,
2187
+ "waiters disappeared during Enqueue()!");
2188
+ nv &= kMuLow;
2189
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2190
+ }
2191
+ mu_.store(nv, std::memory_order_release); // release spinlock
2192
+ // can release with a store because there were waiters
2193
+ break;
2194
+ }
2195
+
2196
+ // Either we didn't search before, or we marked the queue
2197
+ // as "maybe_unlocking" and no one else should have changed it.
2198
+ ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2199
+ "Mutex queue changed beneath us");
2200
+
2201
+ // The lock is becoming free, and there's a waiter
2202
+ if (old_h != nullptr &&
2203
+ !old_h->may_skip) { // we used old_h as a terminator
2204
+ old_h->may_skip = true; // allow old_h to skip once more
2205
+ ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2206
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2207
+ old_h->skip = old_h->next; // old_h not head & can skip to successor
2208
+ }
2209
+ }
2210
+ if (h->next->waitp->how == kExclusive &&
2211
+ Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2212
+ // easy case: writer with no condition; no need to search
2213
+ pw = h; // wake w, the successor of h (=pw)
2214
+ w = h->next;
2215
+ w->wake = true;
2216
+ // We are waking up a writer. This writer may be racing against
2217
+ // an already awake reader for the lock. We want the
2218
+ // writer to usually win this race,
2219
+ // because if it doesn't, we can potentially keep taking a reader
2220
+ // perpetually and writers will starve. Worse than
2221
+ // that, this can also starve other readers if kMuWrWait gets set
2222
+ // later.
2223
+ wr_wait = kMuWrWait;
2224
+ } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2225
+ // we found a waiter w to wake on a previous iteration and either it's
2226
+ // a writer, or we've searched the entire list so we have all the
2227
+ // readers.
2228
+ if (pw == nullptr) { // if w's predecessor is unknown, it must be h
2229
+ pw = h;
2230
+ }
2231
+ } else {
2232
+ // At this point we don't know all the waiters to wake, and the first
2233
+ // waiter has a condition or is a reader. We avoid searching over
2234
+ // waiters we've searched on previous iterations by starting at
2235
+ // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2236
+ if (old_h == h) { // we've searched before, and nothing's new
2237
+ // so there's no one to wake.
2238
+ intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2239
+ h->readers = 0;
2240
+ h->maybe_unlocking = false; // finished unlocking
2241
+ if (waitp != nullptr) { // we must queue ourselves and sleep
2242
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2243
+ nv &= kMuLow;
2244
+ if (new_h != nullptr) {
2245
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2246
+ } // else new_h could be nullptr if we queued ourselves on a
2247
+ // CondVar
2248
+ }
2249
+ // release spinlock & lock
2250
+ // can release with a store because there were waiters
2251
+ mu_.store(nv, std::memory_order_release);
2252
+ break;
2253
+ }
2254
+
2255
+ // set up to walk the list
2256
+ PerThreadSynch *w_walk; // current waiter during list walk
2257
+ PerThreadSynch *pw_walk; // previous waiter during list walk
2258
+ if (old_h != nullptr) { // we've searched up to old_h before
2259
+ pw_walk = old_h;
2260
+ w_walk = old_h->next;
2261
+ } else { // no prior search, start at beginning
2262
+ pw_walk =
2263
+ nullptr; // h->next's predecessor may change; don't record it
2264
+ w_walk = h->next;
2265
+ }
2266
+
2267
+ h->may_skip = false; // ensure we never skip past h in future searches
2268
+ // even if other waiters are queued after it.
2269
+ ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2270
+
2271
+ h->maybe_unlocking = true; // we're about to scan the waiter list
2272
+ // without the spinlock held.
2273
+ // Enqueue must be conservative about
2274
+ // priority queuing.
2275
+
2276
+ // We must release the spinlock to evaluate the conditions.
2277
+ mu_.store(v, std::memory_order_release); // release just spinlock
2278
+ // can release with a store because there were waiters
2279
+
2280
+ // h is the last waiter queued, and w_walk the first unsearched waiter.
2281
+ // Without the spinlock, the locations mu_ and h->next may now change
2282
+ // underneath us, but since we hold the lock itself, the only legal
2283
+ // change is to add waiters between h and w_walk. Therefore, it's safe
2284
+ // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2285
+ // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2286
+
2287
+ old_h = h; // remember we searched to here
2288
+
2289
+ // Walk the path upto and including h looking for waiters we can wake.
2290
+ while (pw_walk != h) {
2291
+ w_walk->wake = false;
2292
+ if (w_walk->waitp->cond ==
2293
+ nullptr || // no condition => vacuously true OR
2294
+ (w_walk->waitp->cond != known_false &&
2295
+ // this thread's condition is not known false, AND
2296
+ // is in fact true
2297
+ EvalConditionIgnored(this, w_walk->waitp->cond))) {
2298
+ if (w == nullptr) {
2299
+ w_walk->wake = true; // can wake this waiter
2300
+ w = w_walk;
2301
+ pw = pw_walk;
2302
+ if (w_walk->waitp->how == kExclusive) {
2303
+ wr_wait = kMuWrWait;
2304
+ break; // bail if waking this writer
2305
+ }
2306
+ } else if (w_walk->waitp->how == kShared) { // wake if a reader
2307
+ w_walk->wake = true;
2308
+ } else { // writer with true condition
2309
+ wr_wait = kMuWrWait;
2310
+ }
2311
+ } else { // can't wake; condition false
2312
+ known_false = w_walk->waitp->cond; // remember last false condition
2313
+ }
2314
+ if (w_walk->wake) { // we're waking reader w_walk
2315
+ pw_walk = w_walk; // don't skip similar waiters
2316
+ } else { // not waking; skip as much as possible
2317
+ pw_walk = Skip(w_walk);
2318
+ }
2319
+ // If pw_walk == h, then load of pw_walk->next can race with
2320
+ // concurrent write in Enqueue(). However, at the same time
2321
+ // we do not need to do the load, because we will bail out
2322
+ // from the loop anyway.
2323
+ if (pw_walk != h) {
2324
+ w_walk = pw_walk->next;
2325
+ }
2326
+ }
2327
+
2328
+ continue; // restart for(;;)-loop to wakeup w or to find more waiters
2329
+ }
2330
+ ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2331
+ // The first (and perhaps only) waiter we've chosen to wake is w, whose
2332
+ // predecessor is pw. If w is a reader, we must wake all the other
2333
+ // waiters with wake==true as well. We may also need to queue
2334
+ // ourselves if waitp != null. The spinlock and the lock are still
2335
+ // held.
2336
+
2337
+ // This traverses the list in [ pw->next, h ], where h is the head,
2338
+ // removing all elements with wake==true and placing them in the
2339
+ // singly-linked list wake_list. Returns the new head.
2340
+ h = DequeueAllWakeable(h, pw, &wake_list);
2341
+
2342
+ intptr_t nv = (v & kMuEvent) | kMuDesig;
2343
+ // assume no waiters left,
2344
+ // set kMuDesig for INV1a
2345
+
2346
+ if (waitp != nullptr) { // we must queue ourselves and sleep
2347
+ h = Enqueue(h, waitp, v, kMuIsCond);
2348
+ // h is new last waiter; could be null if we queued ourselves on a
2349
+ // CondVar
2350
+ }
2351
+
2352
+ ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2353
+ "unexpected empty wake list");
2354
+
2355
+ if (h != nullptr) { // there are waiters left
2356
+ h->readers = 0;
2357
+ h->maybe_unlocking = false; // finished unlocking
2358
+ nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2359
+ }
2360
+
2361
+ // release both spinlock & lock
2362
+ // can release with a store because there were waiters
2363
+ mu_.store(nv, std::memory_order_release);
2364
+ break; // out of for(;;)-loop
2365
+ }
2366
+ // aggressive here; no one can proceed till we do
2367
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2368
+ } // end of for(;;)-loop
2369
+
2370
+ if (wake_list != kPerThreadSynchNull) {
2371
+ int64_t total_wait_cycles = 0;
2372
+ int64_t max_wait_cycles = 0;
2373
+ int64_t now = base_internal::CycleClock::Now();
2374
+ do {
2375
+ // Profile lock contention events only if the waiter was trying to acquire
2376
+ // the lock, not waiting on a condition variable or Condition.
2377
+ if (!wake_list->cond_waiter) {
2378
+ int64_t cycles_waited =
2379
+ (now - wake_list->waitp->contention_start_cycles);
2380
+ total_wait_cycles += cycles_waited;
2381
+ if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
2382
+ wake_list->waitp->contention_start_cycles = now;
2383
+ wake_list->waitp->should_submit_contention_data = true;
2384
+ }
2385
+ wake_list = Wakeup(wake_list); // wake waiters
2386
+ } while (wake_list != kPerThreadSynchNull);
2387
+ if (total_wait_cycles > 0) {
2388
+ mutex_tracer("slow release", this, total_wait_cycles);
2389
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2390
+ submit_profile_data(total_wait_cycles);
2391
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2392
+ }
2393
+ }
2394
+ }
2395
+
2396
+ // Used by CondVar implementation to reacquire mutex after waking from
2397
+ // condition variable. This routine is used instead of Lock() because the
2398
+ // waiting thread may have been moved from the condition variable queue to the
2399
+ // mutex queue without a wakeup, by Trans(). In that case, when the thread is
2400
+ // finally woken, the woken thread will believe it has been woken from the
2401
+ // condition variable (i.e. its PC will be in when in the CondVar code), when
2402
+ // in fact it has just been woken from the mutex. Thus, it must enter the slow
2403
+ // path of the mutex in the same state as if it had just woken from the mutex.
2404
+ // That is, it must ensure to clear kMuDesig (INV1b).
2405
+ void Mutex::Trans(MuHow how) {
2406
+ this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2407
+ }
2408
+
2409
+ // Used by CondVar implementation to effectively wake thread w from the
2410
+ // condition variable. If this mutex is free, we simply wake the thread.
2411
+ // It will later acquire the mutex with high probability. Otherwise, we
2412
+ // enqueue thread w on this mutex.
2413
+ void Mutex::Fer(PerThreadSynch *w) {
2414
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2415
+ int c = 0;
2416
+ ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2417
+ "Mutex::Fer while waiting on Condition");
2418
+ ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2419
+ "Mutex::Fer while in timed wait");
2420
+ ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2421
+ "Mutex::Fer with pending CondVar queueing");
2422
+ for (;;) {
2423
+ intptr_t v = mu_.load(std::memory_order_relaxed);
2424
+ // Note: must not queue if the mutex is unlocked (nobody will wake it).
2425
+ // For example, we can have only kMuWait (conditional) or maybe
2426
+ // kMuWait|kMuWrWait.
2427
+ // conflicting != 0 implies that the waking thread cannot currently take
2428
+ // the mutex, which in turn implies that someone else has it and can wake
2429
+ // us if we queue.
2430
+ const intptr_t conflicting =
2431
+ kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2432
+ if ((v & conflicting) == 0) {
2433
+ w->next = nullptr;
2434
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2435
+ IncrementSynchSem(this, w);
2436
+ return;
2437
+ } else {
2438
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
2439
+ // This thread tries to become the one and only waiter.
2440
+ PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2441
+ ABSL_RAW_CHECK(new_h != nullptr,
2442
+ "Enqueue failed"); // we must queue ourselves
2443
+ if (mu_.compare_exchange_strong(
2444
+ v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2445
+ std::memory_order_release, std::memory_order_relaxed)) {
2446
+ return;
2447
+ }
2448
+ } else if ((v & kMuSpin) == 0 &&
2449
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2450
+ PerThreadSynch *h = GetPerThreadSynch(v);
2451
+ PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2452
+ ABSL_RAW_CHECK(new_h != nullptr,
2453
+ "Enqueue failed"); // we must queue ourselves
2454
+ do {
2455
+ v = mu_.load(std::memory_order_relaxed);
2456
+ } while (!mu_.compare_exchange_weak(
2457
+ v,
2458
+ (v & kMuLow & ~kMuSpin) | kMuWait |
2459
+ reinterpret_cast<intptr_t>(new_h),
2460
+ std::memory_order_release, std::memory_order_relaxed));
2461
+ return;
2462
+ }
2463
+ }
2464
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2465
+ }
2466
+ }
2467
+
2468
+ void Mutex::AssertHeld() const {
2469
+ if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2470
+ SynchEvent *e = GetSynchEvent(this);
2471
+ ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2472
+ static_cast<const void *>(this),
2473
+ (e == nullptr ? "" : e->name));
2474
+ }
2475
+ }
2476
+
2477
+ void Mutex::AssertReaderHeld() const {
2478
+ if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2479
+ SynchEvent *e = GetSynchEvent(this);
2480
+ ABSL_RAW_LOG(
2481
+ FATAL, "thread should hold at least a read lock on Mutex %p %s",
2482
+ static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2483
+ }
2484
+ }
2485
+
2486
+ // -------------------------------- condition variables
2487
+ static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
2488
+ static const intptr_t kCvEvent = 0x0002L; // record events
2489
+
2490
+ static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2491
+
2492
+ // Hack to make constant values available to gdb pretty printer
2493
+ enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2494
+
2495
+ static_assert(PerThreadSynch::kAlignment > kCvLow,
2496
+ "PerThreadSynch::kAlignment must be greater than kCvLow");
2497
+
2498
+ void CondVar::EnableDebugLog(const char *name) {
2499
+ SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2500
+ e->log = true;
2501
+ UnrefSynchEvent(e);
2502
+ }
2503
+
2504
+ CondVar::~CondVar() {
2505
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2506
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2507
+ }
2508
+ }
2509
+
2510
+
2511
+ // Remove thread s from the list of waiters on this condition variable.
2512
+ void CondVar::Remove(PerThreadSynch *s) {
2513
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2514
+ intptr_t v;
2515
+ int c = 0;
2516
+ for (v = cv_.load(std::memory_order_relaxed);;
2517
+ v = cv_.load(std::memory_order_relaxed)) {
2518
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2519
+ cv_.compare_exchange_strong(v, v | kCvSpin,
2520
+ std::memory_order_acquire,
2521
+ std::memory_order_relaxed)) {
2522
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2523
+ if (h != nullptr) {
2524
+ PerThreadSynch *w = h;
2525
+ while (w->next != s && w->next != h) { // search for thread
2526
+ w = w->next;
2527
+ }
2528
+ if (w->next == s) { // found thread; remove it
2529
+ w->next = s->next;
2530
+ if (h == s) {
2531
+ h = (w == s) ? nullptr : w;
2532
+ }
2533
+ s->next = nullptr;
2534
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2535
+ }
2536
+ }
2537
+ // release spinlock
2538
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2539
+ std::memory_order_release);
2540
+ return;
2541
+ } else {
2542
+ // try again after a delay
2543
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2544
+ }
2545
+ }
2546
+ }
2547
+
2548
+ // Queue thread waitp->thread on condition variable word cv_word using
2549
+ // wait parameters waitp.
2550
+ // We split this into a separate routine, rather than simply doing it as part
2551
+ // of WaitCommon(). If we were to queue ourselves on the condition variable
2552
+ // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2553
+ // the logging code, or via a Condition function) and might potentially attempt
2554
+ // to block this thread. That would be a problem if the thread were already on
2555
+ // a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
2556
+ // the unlock code to call CondVarEnqueue() to queue the thread on the condition
2557
+ // variable queue just before the mutex is to be unlocked, and (most
2558
+ // importantly) after any call to an external routine that might re-enter the
2559
+ // mutex code.
2560
+ static void CondVarEnqueue(SynchWaitParams *waitp) {
2561
+ // This thread might be transferred to the Mutex queue by Fer() when
2562
+ // we are woken. To make sure that is what happens, Enqueue() doesn't
2563
+ // call CondVarEnqueue() again but instead uses its normal code. We
2564
+ // must do this before we queue ourselves so that cv_word will be null
2565
+ // when seen by the dequeuer, who may wish immediately to requeue
2566
+ // this thread on another queue.
2567
+ std::atomic<intptr_t> *cv_word = waitp->cv_word;
2568
+ waitp->cv_word = nullptr;
2569
+
2570
+ intptr_t v = cv_word->load(std::memory_order_relaxed);
2571
+ int c = 0;
2572
+ while ((v & kCvSpin) != 0 || // acquire spinlock
2573
+ !cv_word->compare_exchange_weak(v, v | kCvSpin,
2574
+ std::memory_order_acquire,
2575
+ std::memory_order_relaxed)) {
2576
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2577
+ v = cv_word->load(std::memory_order_relaxed);
2578
+ }
2579
+ ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2580
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
2581
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2582
+ if (h == nullptr) { // add this thread to waiter list
2583
+ waitp->thread->next = waitp->thread;
2584
+ } else {
2585
+ waitp->thread->next = h->next;
2586
+ h->next = waitp->thread;
2587
+ }
2588
+ waitp->thread->state.store(PerThreadSynch::kQueued,
2589
+ std::memory_order_relaxed);
2590
+ cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2591
+ std::memory_order_release);
2592
+ }
2593
+
2594
+ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2595
+ bool rc = false; // return value; true iff we timed-out
2596
+
2597
+ intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2598
+ Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2599
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2600
+
2601
+ // maybe trace this call
2602
+ intptr_t v = cv_.load(std::memory_order_relaxed);
2603
+ cond_var_tracer("Wait", this);
2604
+ if ((v & kCvEvent) != 0) {
2605
+ PostSynchEvent(this, SYNCH_EV_WAIT);
2606
+ }
2607
+
2608
+ // Release mu and wait on condition variable.
2609
+ SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2610
+ Synch_GetPerThreadAnnotated(mutex), &cv_);
2611
+ // UnlockSlow() will call CondVarEnqueue() just before releasing the
2612
+ // Mutex, thus queuing this thread on the condition variable. See
2613
+ // CondVarEnqueue() for the reasons.
2614
+ mutex->UnlockSlow(&waitp);
2615
+
2616
+ // wait for signal
2617
+ while (waitp.thread->state.load(std::memory_order_acquire) ==
2618
+ PerThreadSynch::kQueued) {
2619
+ if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2620
+ // DecrementSynchSem returned due to timeout.
2621
+ // Now we will either (1) remove ourselves from the wait list in Remove
2622
+ // below, in which case Remove will set thread.state = kAvailable and
2623
+ // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2624
+ // has removed us concurrently and is calling Wakeup, which will set
2625
+ // thread.state = kAvailable and post to the semaphore.
2626
+ // It's important to reset the timeout for the case (2) because otherwise
2627
+ // we can live-lock in this loop since DecrementSynchSem will always
2628
+ // return immediately due to timeout, but Signal/SignalAll is not
2629
+ // necessary set thread.state = kAvailable yet (and is not scheduled
2630
+ // due to thread priorities or other scheduler artifacts).
2631
+ // Note this could also be resolved if Signal/SignalAll would set
2632
+ // thread.state = kAvailable while holding the wait list spin lock.
2633
+ // But this can't be easily done for SignalAll since it grabs the whole
2634
+ // wait list with a single compare-exchange and does not really grab
2635
+ // the spin lock.
2636
+ t = KernelTimeout::Never();
2637
+ this->Remove(waitp.thread);
2638
+ rc = true;
2639
+ }
2640
+ }
2641
+
2642
+ ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2643
+ waitp.thread->waitp = nullptr; // cleanup
2644
+
2645
+ // maybe trace this call
2646
+ cond_var_tracer("Unwait", this);
2647
+ if ((v & kCvEvent) != 0) {
2648
+ PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2649
+ }
2650
+
2651
+ // From synchronization point of view Wait is unlock of the mutex followed
2652
+ // by lock of the mutex. We've annotated start of unlock in the beginning
2653
+ // of the function. Now, finish unlock and annotate lock of the mutex.
2654
+ // (Trans is effectively lock).
2655
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2656
+ ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2657
+ mutex->Trans(mutex_how); // Reacquire mutex
2658
+ ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2659
+ return rc;
2660
+ }
2661
+
2662
+ bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2663
+ return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2664
+ }
2665
+
2666
+ bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2667
+ return WaitCommon(mu, KernelTimeout(deadline));
2668
+ }
2669
+
2670
+ void CondVar::Wait(Mutex *mu) {
2671
+ WaitCommon(mu, KernelTimeout::Never());
2672
+ }
2673
+
2674
+ // Wake thread w
2675
+ // If it was a timed wait, w will be waiting on w->cv
2676
+ // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2677
+ // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
2678
+ void CondVar::Wakeup(PerThreadSynch *w) {
2679
+ if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2680
+ // The waiting thread only needs to observe "w->state == kAvailable" to be
2681
+ // released, we must cache "cvmu" before clearing "next".
2682
+ Mutex *mu = w->waitp->cvmu;
2683
+ w->next = nullptr;
2684
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2685
+ Mutex::IncrementSynchSem(mu, w);
2686
+ } else {
2687
+ w->waitp->cvmu->Fer(w);
2688
+ }
2689
+ }
2690
+
2691
+ void CondVar::Signal() {
2692
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2693
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2694
+ intptr_t v;
2695
+ int c = 0;
2696
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
2697
+ v = cv_.load(std::memory_order_relaxed)) {
2698
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2699
+ cv_.compare_exchange_strong(v, v | kCvSpin,
2700
+ std::memory_order_acquire,
2701
+ std::memory_order_relaxed)) {
2702
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2703
+ PerThreadSynch *w = nullptr;
2704
+ if (h != nullptr) { // remove first waiter
2705
+ w = h->next;
2706
+ if (w == h) {
2707
+ h = nullptr;
2708
+ } else {
2709
+ h->next = w->next;
2710
+ }
2711
+ }
2712
+ // release spinlock
2713
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2714
+ std::memory_order_release);
2715
+ if (w != nullptr) {
2716
+ CondVar::Wakeup(w); // wake waiter, if there was one
2717
+ cond_var_tracer("Signal wakeup", this);
2718
+ }
2719
+ if ((v & kCvEvent) != 0) {
2720
+ PostSynchEvent(this, SYNCH_EV_SIGNAL);
2721
+ }
2722
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2723
+ return;
2724
+ } else {
2725
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2726
+ }
2727
+ }
2728
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2729
+ }
2730
+
2731
+ void CondVar::SignalAll () {
2732
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2733
+ intptr_t v;
2734
+ int c = 0;
2735
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
2736
+ v = cv_.load(std::memory_order_relaxed)) {
2737
+ // empty the list if spinlock free
2738
+ // We do this by simply setting the list to empty using
2739
+ // compare and swap. We then have the entire list in our hands,
2740
+ // which cannot be changing since we grabbed it while no one
2741
+ // held the lock.
2742
+ if ((v & kCvSpin) == 0 &&
2743
+ cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2744
+ std::memory_order_relaxed)) {
2745
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2746
+ if (h != nullptr) {
2747
+ PerThreadSynch *w;
2748
+ PerThreadSynch *n = h->next;
2749
+ do { // for every thread, wake it up
2750
+ w = n;
2751
+ n = n->next;
2752
+ CondVar::Wakeup(w);
2753
+ } while (w != h);
2754
+ cond_var_tracer("SignalAll wakeup", this);
2755
+ }
2756
+ if ((v & kCvEvent) != 0) {
2757
+ PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2758
+ }
2759
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2760
+ return;
2761
+ } else {
2762
+ // try again after a delay
2763
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2764
+ }
2765
+ }
2766
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2767
+ }
2768
+
2769
+ void ReleasableMutexLock::Release() {
2770
+ ABSL_RAW_CHECK(this->mu_ != nullptr,
2771
+ "ReleasableMutexLock::Release may only be called once");
2772
+ this->mu_->Unlock();
2773
+ this->mu_ = nullptr;
2774
+ }
2775
+
2776
+ #ifdef ABSL_HAVE_THREAD_SANITIZER
2777
+ extern "C" void __tsan_read1(void *addr);
2778
+ #else
2779
+ #define __tsan_read1(addr) // do nothing if TSan not enabled
2780
+ #endif
2781
+
2782
+ // A function that just returns its argument, dereferenced
2783
+ static bool Dereference(void *arg) {
2784
+ // ThreadSanitizer does not instrument this file for memory accesses.
2785
+ // This function dereferences a user variable that can participate
2786
+ // in a data race, so we need to manually tell TSan about this memory access.
2787
+ __tsan_read1(arg);
2788
+ return *(static_cast<bool *>(arg));
2789
+ }
2790
+
2791
+ ABSL_CONST_INIT const Condition Condition::kTrue;
2792
+
2793
+ Condition::Condition(bool (*func)(void *), void *arg)
2794
+ : eval_(&CallVoidPtrFunction),
2795
+ arg_(arg) {
2796
+ static_assert(sizeof(&func) <= sizeof(callback_),
2797
+ "An overlarge function pointer passed to Condition.");
2798
+ StoreCallback(func);
2799
+ }
2800
+
2801
+ bool Condition::CallVoidPtrFunction(const Condition *c) {
2802
+ using FunctionPointer = bool (*)(void *);
2803
+ FunctionPointer function_pointer;
2804
+ std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2805
+ return (*function_pointer)(c->arg_);
2806
+ }
2807
+
2808
+ Condition::Condition(const bool *cond)
2809
+ : eval_(CallVoidPtrFunction),
2810
+ // const_cast is safe since Dereference does not modify arg
2811
+ arg_(const_cast<bool *>(cond)) {
2812
+ using FunctionPointer = bool (*)(void *);
2813
+ const FunctionPointer dereference = Dereference;
2814
+ StoreCallback(dereference);
2815
+ }
2816
+
2817
+ bool Condition::Eval() const {
2818
+ // eval_ == null for kTrue
2819
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
2820
+ }
2821
+
2822
+ bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2823
+ // kTrue logic.
2824
+ if (a == nullptr || a->eval_ == nullptr) {
2825
+ return b == nullptr || b->eval_ == nullptr;
2826
+ } else if (b == nullptr || b->eval_ == nullptr) {
2827
+ return false;
2828
+ }
2829
+ // Check equality of the representative fields.
2830
+ return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
2831
+ !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
2832
+ }
2833
+
2834
+ ABSL_NAMESPACE_END
2835
+ } // namespace absl