grpc 1.57.0.pre1 → 1.58.0.pre1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (403) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +534 -284
  3. data/include/grpc/event_engine/event_engine.h +0 -1
  4. data/include/grpc/event_engine/memory_allocator.h +2 -2
  5. data/include/grpc/impl/channel_arg_names.h +371 -0
  6. data/include/grpc/impl/grpc_types.h +1 -353
  7. data/include/grpc/module.modulemap +1 -0
  8. data/src/core/ext/filters/backend_metrics/backend_metric_filter.cc +1 -1
  9. data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +2 -1
  10. data/src/core/ext/filters/client_channel/client_channel.cc +7 -3
  11. data/src/core/ext/filters/client_channel/http_proxy.cc +1 -1
  12. data/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc +10 -5
  13. data/src/core/ext/filters/client_channel/lb_policy/address_filtering.h +7 -5
  14. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +1 -0
  15. data/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc +5 -3
  16. data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +4 -2
  17. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +115 -109
  18. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.h +0 -5
  19. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +7 -2
  20. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +2 -1
  21. data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +1 -0
  22. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +1 -1
  23. data/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc +11 -3
  24. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +6 -1
  25. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +8 -5
  26. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +1 -1
  27. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +102 -11
  28. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +9 -4
  29. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +4 -1
  30. data/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_plugin.cc +6 -0
  31. data/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc +41 -14
  32. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +1 -1
  33. data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +3 -2
  34. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +1 -1
  35. data/src/core/ext/filters/client_channel/retry_filter.h +1 -0
  36. data/src/core/ext/filters/client_channel/retry_service_config.cc +1 -1
  37. data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +1 -1
  38. data/src/core/ext/filters/client_channel/subchannel.cc +9 -5
  39. data/src/core/ext/filters/client_channel/subchannel.h +8 -2
  40. data/src/core/ext/filters/deadline/deadline_filter.cc +1 -1
  41. data/src/core/ext/filters/http/client/http_client_filter.cc +1 -0
  42. data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
  43. data/src/core/ext/filters/http/message_compress/compression_filter.cc +1 -0
  44. data/src/core/ext/filters/http/server/http_server_filter.cc +1 -1
  45. data/src/core/ext/filters/message_size/message_size_filter.cc +1 -0
  46. data/src/core/ext/filters/rbac/rbac_service_config_parser.cc +4 -7
  47. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +1 -0
  48. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +8 -12
  49. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +357 -358
  50. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +5 -18
  51. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +63 -4
  52. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +7 -2
  53. data/src/core/ext/transport/chttp2/transport/internal.h +30 -57
  54. data/src/core/ext/transport/chttp2/transport/parsing.cc +16 -7
  55. data/src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc +80 -0
  56. data/src/core/ext/transport/chttp2/transport/ping_abuse_policy.h +55 -0
  57. data/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc +98 -0
  58. data/src/core/ext/transport/chttp2/transport/ping_rate_policy.h +73 -0
  59. data/src/core/ext/transport/chttp2/transport/writing.cc +81 -89
  60. data/src/core/ext/transport/inproc/inproc_transport.cc +1 -0
  61. data/src/core/ext/xds/xds_client_grpc.cc +1 -0
  62. data/src/core/ext/xds/xds_http_fault_filter.cc +1 -2
  63. data/src/core/ext/xds/xds_http_fault_filter.h +1 -2
  64. data/src/core/ext/xds/xds_http_filters.h +2 -4
  65. data/src/core/ext/xds/xds_http_rbac_filter.cc +3 -8
  66. data/src/core/ext/xds/xds_http_rbac_filter.h +1 -2
  67. data/src/core/ext/xds/xds_http_stateful_session_filter.cc +1 -2
  68. data/src/core/ext/xds/xds_http_stateful_session_filter.h +1 -2
  69. data/src/core/ext/xds/xds_lb_policy_registry.cc +3 -6
  70. data/src/core/ext/xds/xds_routing.cc +2 -2
  71. data/src/core/ext/xds/xds_transport_grpc.cc +1 -0
  72. data/src/core/lib/avl/avl.h +10 -173
  73. data/src/core/lib/channel/call_tracer.cc +289 -0
  74. data/src/core/lib/channel/call_tracer.h +35 -0
  75. data/src/core/lib/channel/channel_args.cc +84 -79
  76. data/src/core/lib/channel/channel_args.h +29 -17
  77. data/src/core/lib/channel/connected_channel.cc +0 -1
  78. data/src/core/lib/channel/promise_based_filter.cc +4 -1
  79. data/src/core/lib/compression/compression_internal.cc +8 -4
  80. data/src/core/lib/debug/stats_data.cc +93 -21
  81. data/src/core/lib/debug/stats_data.h +41 -0
  82. data/src/core/lib/event_engine/ares_resolver.cc +712 -0
  83. data/src/core/lib/event_engine/ares_resolver.h +150 -0
  84. data/src/core/lib/event_engine/cf_engine/cf_engine.cc +9 -3
  85. data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc +2 -2
  86. data/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc +229 -0
  87. data/src/core/lib/event_engine/cf_engine/dns_service_resolver.h +117 -0
  88. data/src/core/lib/event_engine/forkable.cc +15 -1
  89. data/src/core/lib/event_engine/forkable.h +15 -0
  90. data/src/core/lib/event_engine/grpc_polled_fd.h +73 -0
  91. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +25 -3
  92. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +10 -1
  93. data/src/core/lib/event_engine/posix_engine/grpc_polled_fd_posix.h +197 -0
  94. data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +3 -3
  95. data/src/core/lib/event_engine/posix_engine/posix_engine.cc +47 -1
  96. data/src/core/lib/event_engine/posix_engine/posix_engine.h +12 -1
  97. data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc +1 -0
  98. data/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc +2 -0
  99. data/src/core/lib/event_engine/thread_pool/thread_count.cc +58 -0
  100. data/src/core/lib/event_engine/thread_pool/thread_count.h +161 -0
  101. data/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc +7 -0
  102. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc +86 -111
  103. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h +15 -61
  104. data/src/core/lib/event_engine/windows/windows_endpoint.cc +3 -4
  105. data/src/core/lib/experiments/config.cc +14 -0
  106. data/src/core/lib/experiments/experiments.cc +141 -304
  107. data/src/core/lib/experiments/experiments.h +16 -17
  108. data/src/core/lib/gprpp/ref_counted.h +3 -1
  109. data/src/core/lib/gprpp/ref_counted_string.cc +44 -0
  110. data/src/core/lib/gprpp/ref_counted_string.h +146 -0
  111. data/src/core/lib/gprpp/time.h +2 -2
  112. data/src/core/lib/gprpp/work_serializer.cc +36 -0
  113. data/src/core/lib/gprpp/work_serializer.h +5 -0
  114. data/src/core/lib/http/httpcli_security_connector.cc +1 -0
  115. data/src/core/lib/iomgr/buffer_list.cc +2 -0
  116. data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +7 -22
  117. data/src/core/lib/iomgr/tcp_posix.cc +3 -3
  118. data/src/core/lib/promise/detail/basic_seq.h +1 -372
  119. data/src/core/lib/promise/detail/seq_state.h +2076 -0
  120. data/src/core/lib/promise/seq.h +19 -2
  121. data/src/core/lib/promise/sleep.h +5 -10
  122. data/src/core/lib/promise/try_seq.h +34 -2
  123. data/src/core/lib/resource_quota/api.cc +1 -0
  124. data/src/core/lib/resource_quota/arena.cc +2 -0
  125. data/src/core/lib/resource_quota/arena.h +42 -8
  126. data/src/core/lib/resource_quota/memory_quota.cc +0 -1
  127. data/src/core/lib/resource_quota/resource_quota.h +1 -0
  128. data/src/core/lib/security/authorization/authorization_policy_provider.h +1 -1
  129. data/src/core/lib/security/authorization/rbac_policy.h +1 -1
  130. data/src/core/lib/security/credentials/external/aws_request_signer.cc +8 -0
  131. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +1 -0
  132. data/src/core/lib/security/credentials/jwt/json_token.cc +17 -0
  133. data/src/core/lib/security/credentials/jwt/json_token.h +4 -0
  134. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +42 -0
  135. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +1 -0
  136. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +4 -0
  137. data/src/core/lib/security/credentials/tls/tls_credentials.cc +1 -0
  138. data/src/core/lib/security/credentials/xds/xds_credentials.cc +1 -0
  139. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +1 -0
  140. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +1 -0
  141. data/src/core/lib/security/security_connector/ssl_utils.cc +1 -0
  142. data/src/core/lib/security/transport/client_auth_filter.cc +8 -5
  143. data/src/core/lib/security/transport/security_handshaker.cc +1 -0
  144. data/src/core/lib/security/transport/server_auth_filter.cc +2 -0
  145. data/src/core/lib/surface/call.cc +32 -8
  146. data/src/core/lib/surface/channel.cc +1 -0
  147. data/src/core/lib/surface/completion_queue.cc +10 -0
  148. data/src/core/lib/surface/init.cc +1 -0
  149. data/src/core/lib/surface/server.cc +67 -64
  150. data/src/core/lib/surface/server.h +1 -15
  151. data/src/core/lib/surface/version.cc +2 -2
  152. data/src/core/tsi/alts/crypt/aes_gcm.cc +27 -2
  153. data/src/core/tsi/ssl_transport_security.cc +11 -0
  154. data/src/ruby/ext/grpc/rb_channel.c +1 -53
  155. data/src/ruby/lib/grpc/generic/active_call.rb +9 -14
  156. data/src/ruby/lib/grpc/version.rb +1 -1
  157. data/src/ruby/pb/test/client.rb +16 -0
  158. data/src/ruby/spec/generic/rpc_server_spec.rb +3 -3
  159. data/third_party/abseil-cpp/absl/algorithm/container.h +3 -2
  160. data/third_party/abseil-cpp/absl/base/attributes.h +58 -5
  161. data/third_party/abseil-cpp/absl/base/call_once.h +1 -1
  162. data/third_party/abseil-cpp/absl/base/casts.h +8 -8
  163. data/third_party/abseil-cpp/absl/base/config.h +89 -106
  164. data/third_party/abseil-cpp/absl/base/dynamic_annotations.h +26 -1
  165. data/third_party/abseil-cpp/absl/base/internal/direct_mmap.h +2 -2
  166. data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc +50 -39
  167. data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h +2 -1
  168. data/third_party/abseil-cpp/absl/base/internal/prefetch.h +17 -18
  169. data/third_party/abseil-cpp/absl/base/internal/raw_logging.cc +32 -3
  170. data/third_party/abseil-cpp/absl/base/internal/raw_logging.h +24 -4
  171. data/third_party/abseil-cpp/absl/base/internal/sysinfo.cc +31 -73
  172. data/third_party/abseil-cpp/absl/base/internal/thread_identity.cc +9 -8
  173. data/third_party/abseil-cpp/absl/base/internal/thread_identity.h +11 -11
  174. data/third_party/abseil-cpp/absl/base/internal/throw_delegate.cc +23 -32
  175. data/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc +2 -3
  176. data/third_party/abseil-cpp/absl/base/options.h +1 -1
  177. data/third_party/abseil-cpp/absl/base/policy_checks.h +3 -3
  178. data/third_party/abseil-cpp/absl/base/prefetch.h +198 -0
  179. data/third_party/abseil-cpp/absl/container/fixed_array.h +54 -29
  180. data/third_party/abseil-cpp/absl/container/flat_hash_map.h +5 -1
  181. data/third_party/abseil-cpp/absl/container/flat_hash_set.h +6 -2
  182. data/third_party/abseil-cpp/absl/container/inlined_vector.h +167 -79
  183. data/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h +1 -1
  184. data/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h +3 -21
  185. data/third_party/abseil-cpp/absl/container/internal/container_memory.h +1 -1
  186. data/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h +46 -0
  187. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +2 -0
  188. data/third_party/abseil-cpp/absl/container/internal/inlined_vector.h +85 -26
  189. data/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h +35 -18
  190. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +70 -29
  191. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +437 -236
  192. data/third_party/abseil-cpp/absl/crc/crc32c.h +8 -1
  193. data/third_party/abseil-cpp/absl/crc/internal/cpu_detect.cc +14 -8
  194. data/third_party/abseil-cpp/absl/crc/internal/crc.cc +4 -35
  195. data/third_party/abseil-cpp/absl/crc/internal/crc.h +2 -10
  196. data/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h +1 -1
  197. data/third_party/abseil-cpp/absl/crc/internal/crc_cord_state.cc +1 -1
  198. data/third_party/abseil-cpp/absl/crc/internal/crc_cord_state.h +4 -4
  199. data/third_party/abseil-cpp/absl/crc/internal/crc_internal.h +8 -10
  200. data/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_x86_64.cc +17 -19
  201. data/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc +8 -8
  202. data/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h +2 -1
  203. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +59 -23
  204. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +1 -1
  205. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +1 -1
  206. data/third_party/abseil-cpp/absl/debugging/internal/symbolize.h +1 -1
  207. data/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +43 -19
  208. data/third_party/abseil-cpp/absl/debugging/symbolize_emscripten.inc +3 -0
  209. data/third_party/abseil-cpp/absl/flags/commandlineflag.h +1 -1
  210. data/third_party/abseil-cpp/absl/flags/internal/commandlineflag.cc +1 -1
  211. data/third_party/abseil-cpp/absl/flags/internal/flag.cc +2 -2
  212. data/third_party/abseil-cpp/absl/flags/internal/flag.h +16 -15
  213. data/third_party/abseil-cpp/absl/flags/internal/flag_msvc.inc +1 -1
  214. data/third_party/abseil-cpp/absl/flags/marshalling.cc +43 -2
  215. data/third_party/abseil-cpp/absl/flags/marshalling.h +5 -0
  216. data/third_party/abseil-cpp/absl/functional/any_invocable.h +9 -1
  217. data/third_party/abseil-cpp/absl/functional/bind_front.h +1 -1
  218. data/third_party/abseil-cpp/absl/functional/function_ref.h +3 -3
  219. data/third_party/abseil-cpp/absl/functional/internal/any_invocable.h +37 -24
  220. data/third_party/abseil-cpp/absl/functional/internal/function_ref.h +19 -9
  221. data/third_party/abseil-cpp/absl/hash/hash.h +7 -4
  222. data/third_party/abseil-cpp/absl/hash/internal/hash.h +38 -15
  223. data/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc +6 -0
  224. data/third_party/abseil-cpp/absl/meta/type_traits.h +48 -373
  225. data/third_party/abseil-cpp/absl/numeric/bits.h +4 -4
  226. data/third_party/abseil-cpp/absl/numeric/int128.cc +20 -8
  227. data/third_party/abseil-cpp/absl/numeric/int128.h +36 -39
  228. data/third_party/abseil-cpp/absl/numeric/int128_have_intrinsic.inc +0 -3
  229. data/third_party/abseil-cpp/absl/numeric/int128_no_intrinsic.inc +47 -30
  230. data/third_party/abseil-cpp/absl/random/internal/fast_uniform_bits.h +4 -3
  231. data/third_party/abseil-cpp/absl/random/internal/generate_real.h +1 -1
  232. data/third_party/abseil-cpp/absl/random/internal/platform.h +1 -1
  233. data/third_party/abseil-cpp/absl/random/internal/randen_detect.cc +4 -0
  234. data/third_party/abseil-cpp/absl/random/internal/randen_engine.h +1 -1
  235. data/third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc +1 -1
  236. data/third_party/abseil-cpp/absl/random/internal/uniform_helper.h +1 -1
  237. data/third_party/abseil-cpp/absl/status/internal/status_internal.h +4 -0
  238. data/third_party/abseil-cpp/absl/status/internal/statusor_internal.h +12 -24
  239. data/third_party/abseil-cpp/absl/status/status.cc +11 -7
  240. data/third_party/abseil-cpp/absl/status/status.h +11 -2
  241. data/third_party/abseil-cpp/absl/status/statusor.h +22 -8
  242. data/third_party/abseil-cpp/absl/strings/ascii.cc +54 -6
  243. data/third_party/abseil-cpp/absl/strings/charconv.cc +21 -4
  244. data/third_party/abseil-cpp/absl/strings/charconv.h +2 -2
  245. data/third_party/abseil-cpp/absl/strings/cord.cc +1 -2
  246. data/third_party/abseil-cpp/absl/strings/cord.h +32 -5
  247. data/third_party/abseil-cpp/absl/strings/cord_analysis.cc +23 -1
  248. data/third_party/abseil-cpp/absl/strings/cord_analysis.h +18 -0
  249. data/third_party/abseil-cpp/absl/strings/cord_buffer.h +2 -5
  250. data/third_party/abseil-cpp/absl/strings/escaping.cc +10 -32
  251. data/third_party/abseil-cpp/absl/strings/escaping.h +1 -1
  252. data/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc +2 -4
  253. data/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.h +3 -3
  254. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc +0 -1
  255. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +15 -13
  256. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc +13 -4
  257. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.h +8 -0
  258. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc +5 -3
  259. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.h +4 -7
  260. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h +8 -0
  261. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h +2 -2
  262. data/third_party/abseil-cpp/absl/strings/internal/cordz_handle.cc +46 -20
  263. data/third_party/abseil-cpp/absl/strings/internal/cordz_handle.h +1 -34
  264. data/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc +2 -1
  265. data/third_party/abseil-cpp/absl/strings/internal/escaping.cc +23 -0
  266. data/third_party/abseil-cpp/absl/strings/internal/escaping.h +1 -0
  267. data/third_party/abseil-cpp/absl/strings/internal/memutil.cc +2 -77
  268. data/third_party/abseil-cpp/absl/strings/internal/memutil.h +4 -112
  269. data/third_party/abseil-cpp/absl/strings/internal/stl_type_traits.h +1 -1
  270. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +10 -31
  271. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +8 -8
  272. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h +5 -20
  273. data/third_party/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h +1 -0
  274. data/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h +1 -1
  275. data/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc +9 -9
  276. data/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h +56 -6
  277. data/third_party/abseil-cpp/absl/strings/match.cc +87 -0
  278. data/third_party/abseil-cpp/absl/strings/match.h +19 -0
  279. data/third_party/abseil-cpp/absl/strings/numbers.cc +154 -122
  280. data/third_party/abseil-cpp/absl/strings/numbers.h +1 -6
  281. data/third_party/abseil-cpp/absl/strings/str_cat.cc +7 -50
  282. data/third_party/abseil-cpp/absl/strings/str_cat.h +83 -15
  283. data/third_party/abseil-cpp/absl/strings/str_format.h +6 -3
  284. data/third_party/abseil-cpp/absl/strings/str_split.cc +9 -6
  285. data/third_party/abseil-cpp/absl/strings/string_view.cc +26 -4
  286. data/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc +5 -0
  287. data/third_party/abseil-cpp/absl/synchronization/internal/futex.h +63 -43
  288. data/third_party/abseil-cpp/absl/synchronization/internal/futex_waiter.cc +111 -0
  289. data/third_party/abseil-cpp/absl/synchronization/internal/futex_waiter.h +63 -0
  290. data/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc +11 -7
  291. data/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc +225 -0
  292. data/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h +122 -114
  293. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc +12 -8
  294. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h +10 -1
  295. data/third_party/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc +167 -0
  296. data/third_party/abseil-cpp/absl/synchronization/internal/pthread_waiter.h +60 -0
  297. data/third_party/abseil-cpp/absl/synchronization/internal/sem_waiter.cc +122 -0
  298. data/third_party/abseil-cpp/absl/synchronization/internal/sem_waiter.h +65 -0
  299. data/third_party/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc +91 -0
  300. data/third_party/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.h +56 -0
  301. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.h +19 -113
  302. data/third_party/abseil-cpp/absl/synchronization/internal/waiter_base.cc +42 -0
  303. data/third_party/abseil-cpp/absl/synchronization/internal/waiter_base.h +90 -0
  304. data/third_party/abseil-cpp/absl/synchronization/internal/win32_waiter.cc +151 -0
  305. data/third_party/abseil-cpp/absl/synchronization/internal/win32_waiter.h +70 -0
  306. data/third_party/abseil-cpp/absl/synchronization/mutex.cc +407 -411
  307. data/third_party/abseil-cpp/absl/synchronization/mutex.h +152 -118
  308. data/third_party/abseil-cpp/absl/time/clock.cc +6 -7
  309. data/third_party/abseil-cpp/absl/time/duration.cc +24 -26
  310. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h +1 -0
  311. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc +1 -1
  312. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc +3 -3
  313. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc +8 -6
  314. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h +6 -3
  315. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc +4 -2
  316. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h +4 -0
  317. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc +322 -295
  318. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h +8 -17
  319. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc +51 -33
  320. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h +7 -2
  321. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc +128 -2
  322. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h +1 -1
  323. data/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +5 -1
  324. data/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc +34 -34
  325. data/third_party/abseil-cpp/absl/time/time.cc +9 -2
  326. data/third_party/abseil-cpp/absl/time/time.h +115 -15
  327. data/third_party/abseil-cpp/absl/types/internal/optional.h +0 -52
  328. data/third_party/abseil-cpp/absl/types/internal/span.h +2 -2
  329. data/third_party/abseil-cpp/absl/types/internal/variant.h +2 -2
  330. data/third_party/abseil-cpp/absl/types/optional.h +15 -13
  331. data/third_party/abseil-cpp/absl/types/span.h +1 -2
  332. data/third_party/boringssl-with-bazel/err_data.c +15 -14
  333. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c +7 -3
  334. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c +7 -7
  335. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c +7 -3
  336. data/third_party/boringssl-with-bazel/src/crypto/bio/connect.c +5 -5
  337. data/third_party/boringssl-with-bazel/src/crypto/bio/errno.c +92 -0
  338. data/third_party/boringssl-with-bazel/src/crypto/bio/fd.c +4 -48
  339. data/third_party/boringssl-with-bazel/src/crypto/bio/file.c +11 -6
  340. data/third_party/boringssl-with-bazel/src/crypto/bio/internal.h +16 -6
  341. data/third_party/boringssl-with-bazel/src/crypto/bio/socket.c +2 -2
  342. data/third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c +9 -0
  343. data/third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c +1 -1
  344. data/third_party/boringssl-with-bazel/src/crypto/conf/conf.c +3 -7
  345. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_apple.c +0 -2
  346. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_fuchsia.c +0 -1
  347. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_linux.c +0 -2
  348. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_openbsd.c +0 -1
  349. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_sysreg.c +93 -0
  350. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_win.c +1 -1
  351. data/third_party/boringssl-with-bazel/src/crypto/cpu_arm_freebsd.c +0 -1
  352. data/third_party/boringssl-with-bazel/src/crypto/cpu_arm_linux.c +0 -2
  353. data/third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c +4 -0
  354. data/third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c +5 -0
  355. data/third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c +9 -14
  356. data/third_party/boringssl-with-bazel/src/crypto/ec_extra/internal.h +4 -6
  357. data/third_party/boringssl-with-bazel/src/crypto/err/err.c +10 -11
  358. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/dh/check.c +37 -8
  359. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/dh/dh.c +38 -19
  360. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/dh/internal.h +7 -0
  361. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/self_check/self_check.c +39 -16
  362. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/service_indicator/service_indicator.c +4 -7
  363. data/third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c +7 -0
  364. data/third_party/boringssl-with-bazel/src/crypto/internal.h +13 -21
  365. data/third_party/boringssl-with-bazel/src/crypto/obj/obj.c +6 -23
  366. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/getentropy.c +4 -0
  367. data/third_party/boringssl-with-bazel/src/crypto/stack/stack.c +5 -0
  368. data/third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c +1 -6
  369. data/third_party/boringssl-with-bazel/src/crypto/x509/by_file.c +0 -3
  370. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c +1 -3
  371. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c +2 -3
  372. data/third_party/boringssl-with-bazel/src/include/openssl/base.h +9 -1
  373. data/third_party/boringssl-with-bazel/src/include/openssl/bio.h +11 -1
  374. data/third_party/boringssl-with-bazel/src/include/openssl/dh.h +4 -1
  375. data/third_party/boringssl-with-bazel/src/include/openssl/ec.h +7 -9
  376. data/third_party/boringssl-with-bazel/src/include/openssl/hpke.h +6 -2
  377. data/third_party/boringssl-with-bazel/src/include/openssl/span.h +22 -20
  378. data/third_party/boringssl-with-bazel/src/include/openssl/ssl.h +4 -0
  379. data/third_party/boringssl-with-bazel/src/include/openssl/stack.h +20 -12
  380. data/third_party/boringssl-with-bazel/src/include/openssl/target.h +50 -3
  381. data/third_party/boringssl-with-bazel/src/include/openssl/x509.h +0 -4
  382. data/third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc +2 -2
  383. data/third_party/boringssl-with-bazel/src/ssl/ssl_file.cc +4 -4
  384. data/third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc +2 -0
  385. metadata +39 -31
  386. data/src/core/lib/promise/detail/basic_join.h +0 -197
  387. data/src/core/lib/promise/detail/switch.h +0 -1455
  388. data/src/core/lib/promise/try_join.h +0 -82
  389. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc +0 -403
  390. data/third_party/boringssl-with-bazel/src/crypto/cpu_aarch64_freebsd.c +0 -62
  391. data/third_party/boringssl-with-bazel/src/crypto/cpu_arm.c +0 -38
  392. data/third_party/boringssl-with-bazel/src/crypto/cpu_arm_openbsd.c +0 -31
  393. data/third_party/re2/util/benchmark.h +0 -156
  394. data/third_party/re2/util/flags.h +0 -26
  395. data/third_party/re2/util/malloc_counter.h +0 -19
  396. data/third_party/re2/util/pcre.cc +0 -1025
  397. data/third_party/re2/util/pcre.h +0 -681
  398. data/third_party/re2/util/test.h +0 -50
  399. data/third_party/upb/upb/mini_table.h +0 -36
  400. data/third_party/zlib/gzclose.c +0 -25
  401. data/third_party/zlib/gzlib.c +0 -639
  402. data/third_party/zlib/gzread.c +0 -650
  403. data/third_party/zlib/gzwrite.c +0 -677
@@ -35,10 +35,9 @@
35
35
 
36
36
  #include <algorithm>
37
37
  #include <atomic>
38
- #include <cinttypes>
39
38
  #include <cstddef>
39
+ #include <cstdlib>
40
40
  #include <cstring>
41
- #include <iterator>
42
41
  #include <thread> // NOLINT(build/c++11)
43
42
 
44
43
  #include "absl/base/attributes.h"
@@ -55,7 +54,6 @@
55
54
  #include "absl/base/internal/thread_identity.h"
56
55
  #include "absl/base/internal/tsan_mutex_interface.h"
57
56
  #include "absl/base/optimization.h"
58
- #include "absl/base/port.h"
59
57
  #include "absl/debugging/stacktrace.h"
60
58
  #include "absl/debugging/symbolize.h"
61
59
  #include "absl/synchronization/internal/graphcycles.h"
@@ -63,6 +61,7 @@
63
61
  #include "absl/time/time.h"
64
62
 
65
63
  using absl::base_internal::CurrentThreadIdentityIfPresent;
64
+ using absl::base_internal::CycleClock;
66
65
  using absl::base_internal::PerThreadSynch;
67
66
  using absl::base_internal::SchedulingGuard;
68
67
  using absl::base_internal::ThreadIdentity;
@@ -98,18 +97,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
98
97
  absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
99
98
  submit_profile_data;
100
99
  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
101
- const char *msg, const void *obj, int64_t wait_cycles)>
100
+ const char* msg, const void* obj, int64_t wait_cycles)>
102
101
  mutex_tracer;
103
102
  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
104
- absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
105
- cond_var_tracer;
106
- ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
107
- bool (*)(const void *pc, char *out, int out_size)>
108
- symbolizer(absl::Symbolize);
103
+ absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
104
+ cond_var_tracer;
109
105
 
110
106
  } // namespace
111
107
 
112
- static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
108
+ static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
113
109
  bool locking, bool trylock,
114
110
  bool read_lock);
115
111
 
@@ -117,19 +113,15 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
117
113
  submit_profile_data.Store(fn);
118
114
  }
119
115
 
120
- void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
116
+ void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
121
117
  int64_t wait_cycles)) {
122
118
  mutex_tracer.Store(fn);
123
119
  }
124
120
 
125
- void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
121
+ void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
126
122
  cond_var_tracer.Store(fn);
127
123
  }
128
124
 
129
- void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
130
- symbolizer.Store(fn);
131
- }
132
-
133
125
  namespace {
134
126
  // Represents the strategy for spin and yield.
135
127
  // See the comment in GetMutexGlobals() for more information.
@@ -148,25 +140,24 @@ absl::Duration MeasureTimeToYield() {
148
140
  return absl::Now() - before;
149
141
  }
150
142
 
151
- const MutexGlobals &GetMutexGlobals() {
143
+ const MutexGlobals& GetMutexGlobals() {
152
144
  ABSL_CONST_INIT static MutexGlobals data;
153
145
  absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
154
- const int num_cpus = absl::base_internal::NumCPUs();
155
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
156
- // If this a uniprocessor, only yield/sleep.
157
- // Real-time threads are often unable to yield, so the sleep time needs
158
- // to be long enough to keep the calling thread asleep until scheduling
159
- // happens.
160
- // If this is multiprocessor, allow spinning. If the mode is
161
- // aggressive then spin many times before yielding. If the mode is
162
- // gentle then spin only a few times before yielding. Aggressive spinning
163
- // is used to ensure that an Unlock() call, which must get the spin lock
164
- // for any thread to make progress gets it without undue delay.
165
- if (num_cpus > 1) {
146
+ if (absl::base_internal::NumCPUs() > 1) {
147
+ // If this is multiprocessor, allow spinning. If the mode is
148
+ // aggressive then spin many times before yielding. If the mode is
149
+ // gentle then spin only a few times before yielding. Aggressive spinning
150
+ // is used to ensure that an Unlock() call, which must get the spin lock
151
+ // for any thread to make progress gets it without undue delay.
152
+ data.spinloop_iterations = 1500;
166
153
  data.mutex_sleep_spins[AGGRESSIVE] = 5000;
167
154
  data.mutex_sleep_spins[GENTLE] = 250;
168
155
  data.mutex_sleep_time = absl::Microseconds(10);
169
156
  } else {
157
+ // If this a uniprocessor, only yield/sleep. Real-time threads are often
158
+ // unable to yield, so the sleep time needs to be long enough to keep
159
+ // the calling thread asleep until scheduling happens.
160
+ data.spinloop_iterations = 0;
170
161
  data.mutex_sleep_spins[AGGRESSIVE] = 0;
171
162
  data.mutex_sleep_spins[GENTLE] = 0;
172
163
  data.mutex_sleep_time = MeasureTimeToYield() * 5;
@@ -219,8 +210,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
219
210
  v = pv->load(std::memory_order_relaxed);
220
211
  } while ((v & bits) != bits &&
221
212
  ((v & wait_until_clear) != 0 ||
222
- !pv->compare_exchange_weak(v, v | bits,
223
- std::memory_order_release,
213
+ !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
224
214
  std::memory_order_relaxed)));
225
215
  }
226
216
 
@@ -235,8 +225,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
235
225
  v = pv->load(std::memory_order_relaxed);
236
226
  } while ((v & bits) != 0 &&
237
227
  ((v & wait_until_clear) != 0 ||
238
- !pv->compare_exchange_weak(v, v & ~bits,
239
- std::memory_order_release,
228
+ !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
240
229
  std::memory_order_relaxed)));
241
230
  }
242
231
 
@@ -247,7 +236,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
247
236
  absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
248
237
 
249
238
  // Graph used to detect deadlocks.
250
- ABSL_CONST_INIT static GraphCycles *deadlock_graph
239
+ ABSL_CONST_INIT static GraphCycles* deadlock_graph
251
240
  ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
252
241
 
253
242
  //------------------------------------------------------------------
@@ -291,7 +280,7 @@ enum { // Event flags
291
280
  // Properties of the events.
292
281
  static const struct {
293
282
  int flags;
294
- const char *msg;
283
+ const char* msg;
295
284
  } event_properties[] = {
296
285
  {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
297
286
  {0, "TryLock failed "},
@@ -316,12 +305,12 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
316
305
  // Can't be too small, as it's used for deadlock detection information.
317
306
  static constexpr uint32_t kNSynchEvent = 1031;
318
307
 
319
- static struct SynchEvent { // this is a trivial hash table for the events
308
+ static struct SynchEvent { // this is a trivial hash table for the events
320
309
  // struct is freed when refcount reaches 0
321
310
  int refcount ABSL_GUARDED_BY(synch_event_mu);
322
311
 
323
312
  // buckets have linear, 0-terminated chains
324
- SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
313
+ SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
325
314
 
326
315
  // Constant after initialization
327
316
  uintptr_t masked_addr; // object at this address is called "name"
@@ -329,13 +318,13 @@ static struct SynchEvent { // this is a trivial hash table for the events
329
318
  // No explicit synchronization used. Instead we assume that the
330
319
  // client who enables/disables invariants/logging on a Mutex does so
331
320
  // while the Mutex is not being concurrently accessed by others.
332
- void (*invariant)(void *arg); // called on each event
333
- void *arg; // first arg to (*invariant)()
334
- bool log; // logging turned on
321
+ void (*invariant)(void* arg); // called on each event
322
+ void* arg; // first arg to (*invariant)()
323
+ bool log; // logging turned on
335
324
 
336
325
  // Constant after initialization
337
- char name[1]; // actually longer---NUL-terminated string
338
- } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
326
+ char name[1]; // actually longer---NUL-terminated string
327
+ }* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
339
328
 
340
329
  // Ensure that the object at "addr" has a SynchEvent struct associated with it,
341
330
  // set "bits" in the word there (waiting until lockbit is clear before doing
@@ -344,11 +333,11 @@ static struct SynchEvent { // this is a trivial hash table for the events
344
333
  // the string name is copied into it.
345
334
  // When used with a mutex, the caller should also ensure that kMuEvent
346
335
  // is set in the mutex word, and similarly for condition variables and kCVEvent.
347
- static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
348
- const char *name, intptr_t bits,
336
+ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
337
+ const char* name, intptr_t bits,
349
338
  intptr_t lockbit) {
350
339
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
351
- SynchEvent *e;
340
+ SynchEvent* e;
352
341
  // first look for existing SynchEvent struct..
353
342
  synch_event_mu.Lock();
354
343
  for (e = synch_event[h];
@@ -360,9 +349,9 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
360
349
  name = "";
361
350
  }
362
351
  size_t l = strlen(name);
363
- e = reinterpret_cast<SynchEvent *>(
352
+ e = reinterpret_cast<SynchEvent*>(
364
353
  base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
365
- e->refcount = 2; // one for return value, one for linked list
354
+ e->refcount = 2; // one for return value, one for linked list
366
355
  e->masked_addr = base_internal::HidePtr(addr);
367
356
  e->invariant = nullptr;
368
357
  e->arg = nullptr;
@@ -372,19 +361,19 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
372
361
  AtomicSetBits(addr, bits, lockbit);
373
362
  synch_event[h] = e;
374
363
  } else {
375
- e->refcount++; // for return value
364
+ e->refcount++; // for return value
376
365
  }
377
366
  synch_event_mu.Unlock();
378
367
  return e;
379
368
  }
380
369
 
381
370
  // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
382
- static void DeleteSynchEvent(SynchEvent *e) {
371
+ static void DeleteSynchEvent(SynchEvent* e) {
383
372
  base_internal::LowLevelAlloc::Free(e);
384
373
  }
385
374
 
386
375
  // Decrement the reference count of *e, or do nothing if e==null.
387
- static void UnrefSynchEvent(SynchEvent *e) {
376
+ static void UnrefSynchEvent(SynchEvent* e) {
388
377
  if (e != nullptr) {
389
378
  synch_event_mu.Lock();
390
379
  bool del = (--(e->refcount) == 0);
@@ -398,11 +387,11 @@ static void UnrefSynchEvent(SynchEvent *e) {
398
387
  // Forget the mapping from the object (Mutex or CondVar) at address addr
399
388
  // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
400
389
  // is clear before doing so).
401
- static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
390
+ static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
402
391
  intptr_t lockbit) {
403
392
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
404
- SynchEvent **pe;
405
- SynchEvent *e;
393
+ SynchEvent** pe;
394
+ SynchEvent* e;
406
395
  synch_event_mu.Lock();
407
396
  for (pe = &synch_event[h];
408
397
  (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -423,9 +412,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
423
412
  // Return a refcounted reference to the SynchEvent of the object at address
424
413
  // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
425
414
  // called.
426
- static SynchEvent *GetSynchEvent(const void *addr) {
415
+ static SynchEvent* GetSynchEvent(const void* addr) {
427
416
  uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
428
- SynchEvent *e;
417
+ SynchEvent* e;
429
418
  synch_event_mu.Lock();
430
419
  for (e = synch_event[h];
431
420
  e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -440,17 +429,17 @@ static SynchEvent *GetSynchEvent(const void *addr) {
440
429
 
441
430
  // Called when an event "ev" occurs on a Mutex of CondVar "obj"
442
431
  // if event recording is on
443
- static void PostSynchEvent(void *obj, int ev) {
444
- SynchEvent *e = GetSynchEvent(obj);
432
+ static void PostSynchEvent(void* obj, int ev) {
433
+ SynchEvent* e = GetSynchEvent(obj);
445
434
  // logging is on if event recording is on and either there's no event struct,
446
435
  // or it explicitly says to log
447
436
  if (e == nullptr || e->log) {
448
- void *pcs[40];
437
+ void* pcs[40];
449
438
  int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
450
439
  // A buffer with enough space for the ASCII for all the PCs, even on a
451
440
  // 64-bit machine.
452
441
  char buffer[ABSL_ARRAYSIZE(pcs) * 24];
453
- int pos = snprintf(buffer, sizeof (buffer), " @");
442
+ int pos = snprintf(buffer, sizeof(buffer), " @");
454
443
  for (int i = 0; i != n; i++) {
455
444
  int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
456
445
  " %p", pcs[i]);
@@ -472,13 +461,13 @@ static void PostSynchEvent(void *obj, int ev) {
472
461
  // get false positive race reports later.
473
462
  // Reuse EvalConditionAnnotated to properly call into user code.
474
463
  struct local {
475
- static bool pred(SynchEvent *ev) {
464
+ static bool pred(SynchEvent* ev) {
476
465
  (*ev->invariant)(ev->arg);
477
466
  return false;
478
467
  }
479
468
  };
480
469
  Condition cond(&local::pred, e);
481
- Mutex *mu = static_cast<Mutex *>(obj);
470
+ Mutex* mu = static_cast<Mutex*>(obj);
482
471
  const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
483
472
  const bool trylock = (flags & SYNCH_F_TRY) != 0;
484
473
  const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -504,32 +493,32 @@ static void PostSynchEvent(void *obj, int ev) {
504
493
  // PerThreadSynch struct points at the most recent SynchWaitParams struct when
505
494
  // the thread is on a Mutex's waiter queue.
506
495
  struct SynchWaitParams {
507
- SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
508
- KernelTimeout timeout_arg, Mutex *cvmu_arg,
509
- PerThreadSynch *thread_arg,
510
- std::atomic<intptr_t> *cv_word_arg)
496
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
497
+ KernelTimeout timeout_arg, Mutex* cvmu_arg,
498
+ PerThreadSynch* thread_arg,
499
+ std::atomic<intptr_t>* cv_word_arg)
511
500
  : how(how_arg),
512
501
  cond(cond_arg),
513
502
  timeout(timeout_arg),
514
503
  cvmu(cvmu_arg),
515
504
  thread(thread_arg),
516
505
  cv_word(cv_word_arg),
517
- contention_start_cycles(base_internal::CycleClock::Now()),
506
+ contention_start_cycles(CycleClock::Now()),
518
507
  should_submit_contention_data(false) {}
519
508
 
520
509
  const Mutex::MuHow how; // How this thread needs to wait.
521
- const Condition *cond; // The condition that this thread is waiting for.
522
- // In Mutex, this field is set to zero if a timeout
523
- // expires.
510
+ const Condition* cond; // The condition that this thread is waiting for.
511
+ // In Mutex, this field is set to zero if a timeout
512
+ // expires.
524
513
  KernelTimeout timeout; // timeout expiry---absolute time
525
514
  // In Mutex, this field is set to zero if a timeout
526
515
  // expires.
527
- Mutex *const cvmu; // used for transfer from cond var to mutex
528
- PerThreadSynch *const thread; // thread that is waiting
516
+ Mutex* const cvmu; // used for transfer from cond var to mutex
517
+ PerThreadSynch* const thread; // thread that is waiting
529
518
 
530
519
  // If not null, thread should be enqueued on the CondVar whose state
531
520
  // word is cv_word instead of queueing normally on the Mutex.
532
- std::atomic<intptr_t> *cv_word;
521
+ std::atomic<intptr_t>* cv_word;
533
522
 
534
523
  int64_t contention_start_cycles; // Time (in cycles) when this thread started
535
524
  // to contend for the mutex.
@@ -537,12 +526,12 @@ struct SynchWaitParams {
537
526
  };
538
527
 
539
528
  struct SynchLocksHeld {
540
- int n; // number of valid entries in locks[]
541
- bool overflow; // true iff we overflowed the array at some point
529
+ int n; // number of valid entries in locks[]
530
+ bool overflow; // true iff we overflowed the array at some point
542
531
  struct {
543
- Mutex *mu; // lock acquired
544
- int32_t count; // times acquired
545
- GraphId id; // deadlock_graph id of acquired lock
532
+ Mutex* mu; // lock acquired
533
+ int32_t count; // times acquired
534
+ GraphId id; // deadlock_graph id of acquired lock
546
535
  } locks[40];
547
536
  // If a thread overfills the array during deadlock detection, we
548
537
  // continue, discarding information as needed. If no overflow has
@@ -552,11 +541,11 @@ struct SynchLocksHeld {
552
541
 
553
542
  // A sentinel value in lists that is not 0.
554
543
  // A 0 value is used to mean "not on a list".
555
- static PerThreadSynch *const kPerThreadSynchNull =
556
- reinterpret_cast<PerThreadSynch *>(1);
544
+ static PerThreadSynch* const kPerThreadSynchNull =
545
+ reinterpret_cast<PerThreadSynch*>(1);
557
546
 
558
- static SynchLocksHeld *LocksHeldAlloc() {
559
- SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
547
+ static SynchLocksHeld* LocksHeldAlloc() {
548
+ SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
560
549
  base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
561
550
  ret->n = 0;
562
551
  ret->overflow = false;
@@ -564,24 +553,24 @@ static SynchLocksHeld *LocksHeldAlloc() {
564
553
  }
565
554
 
566
555
  // Return the PerThreadSynch-struct for this thread.
567
- static PerThreadSynch *Synch_GetPerThread() {
568
- ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
556
+ static PerThreadSynch* Synch_GetPerThread() {
557
+ ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
569
558
  return &identity->per_thread_synch;
570
559
  }
571
560
 
572
- static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
561
+ static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
573
562
  if (mu) {
574
563
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
575
564
  }
576
- PerThreadSynch *w = Synch_GetPerThread();
565
+ PerThreadSynch* w = Synch_GetPerThread();
577
566
  if (mu) {
578
567
  ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
579
568
  }
580
569
  return w;
581
570
  }
582
571
 
583
- static SynchLocksHeld *Synch_GetAllLocks() {
584
- PerThreadSynch *s = Synch_GetPerThread();
572
+ static SynchLocksHeld* Synch_GetAllLocks() {
573
+ PerThreadSynch* s = Synch_GetPerThread();
585
574
  if (s->all_locks == nullptr) {
586
575
  s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
587
576
  }
@@ -589,7 +578,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
589
578
  }
590
579
 
591
580
  // Post on "w"'s associated PerThreadSem.
592
- void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
581
+ void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
593
582
  if (mu) {
594
583
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
595
584
  // We miss synchronization around passing PerThreadSynch between threads
@@ -605,7 +594,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
605
594
  }
606
595
 
607
596
  // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
608
- bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
597
+ bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
609
598
  if (mu) {
610
599
  ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
611
600
  }
@@ -626,7 +615,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
626
615
  // Mutex code checking that the "waitp" field has not been reused.
627
616
  void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
628
617
  // Fix the per-thread state only if it exists.
629
- ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
618
+ ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
630
619
  if (identity != nullptr) {
631
620
  identity->per_thread_synch.suppress_fatal_errors = true;
632
621
  }
@@ -635,21 +624,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
635
624
  std::memory_order_release);
636
625
  }
637
626
 
638
- // --------------------------time support
639
-
640
- // Return the current time plus the timeout. Use the same clock as
641
- // PerThreadSem::Wait() for consistency. Unfortunately, we don't have
642
- // such a choice when a deadline is given directly.
643
- static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
644
- #ifndef _WIN32
645
- struct timeval tv;
646
- gettimeofday(&tv, nullptr);
647
- return absl::TimeFromTimeval(tv) + timeout;
648
- #else
649
- return absl::Now() + timeout;
650
- #endif
651
- }
652
-
653
627
  // --------------------------Mutexes
654
628
 
655
629
  // In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -660,24 +634,29 @@ static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
660
634
  // bit-twiddling trick in Mutex::Unlock().
661
635
  // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
662
636
  // to enable the bit-twiddling trick in CheckForMutexCorruption().
663
- static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
664
- static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
665
- static const intptr_t kMuWait = 0x0004L; // threads are waiting
666
- static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
667
- static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
637
+ static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
638
+ // There's a designated waker.
668
639
  // INVARIANT1: there's a thread that was blocked on the mutex, is
669
640
  // no longer, yet has not yet acquired the mutex. If there's a
670
641
  // designated waker, all threads can avoid taking the slow path in
671
642
  // unlock because the designated waker will subsequently acquire
672
643
  // the lock and wake someone. To maintain INVARIANT1 the bit is
673
644
  // set when a thread is unblocked(INV1a), and threads that were
674
- // unblocked reset the bit when they either acquire or re-block
675
- // (INV1b).
676
- static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
677
- // for a reader
678
- static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
679
- static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
680
- static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
645
+ // unblocked reset the bit when they either acquire or re-block (INV1b).
646
+ static const intptr_t kMuDesig = 0x0002L;
647
+ static const intptr_t kMuWait = 0x0004L; // threads are waiting
648
+ static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
649
+ static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
650
+ // Runnable writer is waiting for a reader.
651
+ // If set, new readers will not lock the mutex to avoid writer starvation.
652
+ // Note: if a reader has higher priority than the writer, it will still lock
653
+ // the mutex ahead of the waiting writer, but in a very inefficient manner:
654
+ // the reader will first queue itself and block, but then the last unlocking
655
+ // reader will wake it.
656
+ static const intptr_t kMuWrWait = 0x0020L;
657
+ static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
658
+ static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
659
+ static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
681
660
 
682
661
  // Hack to make constant values available to gdb pretty printer
683
662
  enum {
@@ -773,8 +752,8 @@ Mutex::~Mutex() {
773
752
  ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
774
753
  }
775
754
 
776
- void Mutex::EnableDebugLog(const char *name) {
777
- SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
755
+ void Mutex::EnableDebugLog(const char* name) {
756
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
778
757
  e->log = true;
779
758
  UnrefSynchEvent(e);
780
759
  }
@@ -783,11 +762,10 @@ void EnableMutexInvariantDebugging(bool enabled) {
783
762
  synch_check_invariants.store(enabled, std::memory_order_release);
784
763
  }
785
764
 
786
- void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
787
- void *arg) {
765
+ void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
788
766
  if (synch_check_invariants.load(std::memory_order_acquire) &&
789
767
  invariant != nullptr) {
790
- SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
768
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
791
769
  e->invariant = invariant;
792
770
  e->arg = arg;
793
771
  UnrefSynchEvent(e);
@@ -803,15 +781,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
803
781
  // waiters with the same condition, type of lock, and thread priority.
804
782
  //
805
783
  // Requires that x and y be waiting on the same Mutex queue.
806
- static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
784
+ static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
807
785
  return x->waitp->how == y->waitp->how && x->priority == y->priority &&
808
786
  Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
809
787
  }
810
788
 
811
789
  // Given the contents of a mutex word containing a PerThreadSynch pointer,
812
790
  // return the pointer.
813
- static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
814
- return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
791
+ static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
792
+ return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
815
793
  }
816
794
 
817
795
  // The next several routines maintain the per-thread next and skip fields
@@ -869,17 +847,17 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
869
847
  // except those in the added node and the former "head" node. This implies
870
848
  // that the new node is added after head, and so must be the new head or the
871
849
  // new front of the queue.
872
- static PerThreadSynch *Skip(PerThreadSynch *x) {
873
- PerThreadSynch *x0 = nullptr;
874
- PerThreadSynch *x1 = x;
875
- PerThreadSynch *x2 = x->skip;
850
+ static PerThreadSynch* Skip(PerThreadSynch* x) {
851
+ PerThreadSynch* x0 = nullptr;
852
+ PerThreadSynch* x1 = x;
853
+ PerThreadSynch* x2 = x->skip;
876
854
  if (x2 != nullptr) {
877
855
  // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
878
856
  // such that x1 == x0->skip && x2 == x1->skip
879
857
  while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
880
- x0->skip = x2; // short-circuit skip from x0 to x2
858
+ x0->skip = x2; // short-circuit skip from x0 to x2
881
859
  }
882
- x->skip = x1; // short-circuit skip from x to result
860
+ x->skip = x1; // short-circuit skip from x to result
883
861
  }
884
862
  return x1;
885
863
  }
@@ -888,7 +866,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) {
888
866
  // The latter is going to be removed out of order, because of a timeout.
889
867
  // Check whether "ancestor" has a skip field pointing to "to_be_removed",
890
868
  // and fix it if it does.
891
- static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
869
+ static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
892
870
  if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
893
871
  if (to_be_removed->skip != nullptr) {
894
872
  ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
@@ -900,7 +878,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
900
878
  }
901
879
  }
902
880
 
903
- static void CondVarEnqueue(SynchWaitParams *waitp);
881
+ static void CondVarEnqueue(SynchWaitParams* waitp);
904
882
 
905
883
  // Enqueue thread "waitp->thread" on a waiter queue.
906
884
  // Called with mutex spinlock held if head != nullptr
@@ -921,8 +899,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp);
921
899
  // returned. This mechanism is used by CondVar to queue a thread on the
922
900
  // condition variable queue instead of the mutex queue in implementing Wait().
923
901
  // In this case, Enqueue() can return nullptr (if head==nullptr).
924
- static PerThreadSynch *Enqueue(PerThreadSynch *head,
925
- SynchWaitParams *waitp, intptr_t mu, int flags) {
902
+ static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
903
+ intptr_t mu, int flags) {
926
904
  // If we have been given a cv_word, call CondVarEnqueue() and return
927
905
  // the previous head of the Mutex waiter queue.
928
906
  if (waitp->cv_word != nullptr) {
@@ -930,42 +908,43 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
930
908
  return head;
931
909
  }
932
910
 
933
- PerThreadSynch *s = waitp->thread;
911
+ PerThreadSynch* s = waitp->thread;
934
912
  ABSL_RAW_CHECK(
935
913
  s->waitp == nullptr || // normal case
936
914
  s->waitp == waitp || // Fer()---transfer from condition variable
937
915
  s->suppress_fatal_errors,
938
916
  "detected illegal recursion into Mutex code");
939
917
  s->waitp = waitp;
940
- s->skip = nullptr; // maintain skip invariant (see above)
941
- s->may_skip = true; // always true on entering queue
942
- s->wake = false; // not being woken
918
+ s->skip = nullptr; // maintain skip invariant (see above)
919
+ s->may_skip = true; // always true on entering queue
920
+ s->wake = false; // not being woken
943
921
  s->cond_waiter = ((flags & kMuIsCond) != 0);
922
+ #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
923
+ int64_t now_cycles = CycleClock::Now();
924
+ if (s->next_priority_read_cycles < now_cycles) {
925
+ // Every so often, update our idea of the thread's priority.
926
+ // pthread_getschedparam() is 5% of the block/wakeup time;
927
+ // CycleClock::Now() is 0.5%.
928
+ int policy;
929
+ struct sched_param param;
930
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
931
+ if (err != 0) {
932
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
933
+ } else {
934
+ s->priority = param.sched_priority;
935
+ s->next_priority_read_cycles =
936
+ now_cycles + static_cast<int64_t>(CycleClock::Frequency());
937
+ }
938
+ }
939
+ #endif
944
940
  if (head == nullptr) { // s is the only waiter
945
941
  s->next = s; // it's the only entry in the cycle
946
942
  s->readers = mu; // reader count is from mu word
947
943
  s->maybe_unlocking = false; // no one is searching an empty list
948
944
  head = s; // s is new head
949
945
  } else {
950
- PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
946
+ PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
951
947
  #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
952
- int64_t now_cycles = base_internal::CycleClock::Now();
953
- if (s->next_priority_read_cycles < now_cycles) {
954
- // Every so often, update our idea of the thread's priority.
955
- // pthread_getschedparam() is 5% of the block/wakeup time;
956
- // base_internal::CycleClock::Now() is 0.5%.
957
- int policy;
958
- struct sched_param param;
959
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
960
- if (err != 0) {
961
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
962
- } else {
963
- s->priority = param.sched_priority;
964
- s->next_priority_read_cycles =
965
- now_cycles +
966
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
967
- }
968
- }
969
948
  if (s->priority > head->priority) { // s's priority is above head's
970
949
  // try to put s in priority-fifo order, or failing that at the front.
971
950
  if (!head->maybe_unlocking) {
@@ -975,20 +954,20 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
975
954
  // Within a skip chain, all waiters have the same priority, so we can
976
955
  // skip forward through the chains until we find one with a lower
977
956
  // priority than the waiter to be enqueued.
978
- PerThreadSynch *advance_to = head; // next value of enqueue_after
957
+ PerThreadSynch* advance_to = head; // next value of enqueue_after
979
958
  do {
980
959
  enqueue_after = advance_to;
981
960
  // (side-effect: optimizes skip chain)
982
961
  advance_to = Skip(enqueue_after->next);
983
962
  } while (s->priority <= advance_to->priority);
984
- // termination guaranteed because s->priority > head->priority
985
- // and head is the end of a skip chain
963
+ // termination guaranteed because s->priority > head->priority
964
+ // and head is the end of a skip chain
986
965
  } else if (waitp->how == kExclusive &&
987
966
  Condition::GuaranteedEqual(waitp->cond, nullptr)) {
988
967
  // An unlocker could be scanning the queue, but we know it will recheck
989
968
  // the queue front for writers that have no condition, which is what s
990
969
  // is, so an insert at front is safe.
991
- enqueue_after = head; // add after head, at front
970
+ enqueue_after = head; // add after head, at front
992
971
  }
993
972
  }
994
973
  #endif
@@ -1013,12 +992,12 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
1013
992
  enqueue_after->skip = enqueue_after->next;
1014
993
  }
1015
994
  if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
1016
- s->skip = s->next; // s may skip to its successor
995
+ s->skip = s->next; // s may skip to its successor
1017
996
  }
1018
- } else { // enqueue not done any other way, so
1019
- // we're inserting s at the back
997
+ } else { // enqueue not done any other way, so
998
+ // we're inserting s at the back
1020
999
  // s will become new head; copy data from head into it
1021
- s->next = head->next; // add s after head
1000
+ s->next = head->next; // add s after head
1022
1001
  head->next = s;
1023
1002
  s->readers = head->readers; // reader count is from previous head
1024
1003
  s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
@@ -1037,17 +1016,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
1037
1016
  // whose last element is head. The new head element is returned, or null
1038
1017
  // if the list is made empty.
1039
1018
  // Dequeue is called with both spinlock and Mutex held.
1040
- static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1041
- PerThreadSynch *w = pw->next;
1042
- pw->next = w->next; // snip w out of list
1043
- if (head == w) { // we removed the head
1019
+ static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
1020
+ PerThreadSynch* w = pw->next;
1021
+ pw->next = w->next; // snip w out of list
1022
+ if (head == w) { // we removed the head
1044
1023
  head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
1045
1024
  } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1046
1025
  // pw can skip to its new successor
1047
1026
  if (pw->next->skip !=
1048
1027
  nullptr) { // either skip to its successors skip target
1049
1028
  pw->skip = pw->next->skip;
1050
- } else { // or to pw's successor
1029
+ } else { // or to pw's successor
1051
1030
  pw->skip = pw->next;
1052
1031
  }
1053
1032
  }
@@ -1060,27 +1039,27 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1060
1039
  // singly-linked list wake_list in the order found. Assumes that
1061
1040
  // there is only one such element if the element has how == kExclusive.
1062
1041
  // Return the new head.
1063
- static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1064
- PerThreadSynch *pw,
1065
- PerThreadSynch **wake_tail) {
1066
- PerThreadSynch *orig_h = head;
1067
- PerThreadSynch *w = pw->next;
1042
+ static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
1043
+ PerThreadSynch* pw,
1044
+ PerThreadSynch** wake_tail) {
1045
+ PerThreadSynch* orig_h = head;
1046
+ PerThreadSynch* w = pw->next;
1068
1047
  bool skipped = false;
1069
1048
  do {
1070
- if (w->wake) { // remove this element
1049
+ if (w->wake) { // remove this element
1071
1050
  ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1072
1051
  // we're removing pw's successor so either pw->skip is zero or we should
1073
1052
  // already have removed pw since if pw->skip!=null, pw has the same
1074
1053
  // condition as w.
1075
1054
  head = Dequeue(head, pw);
1076
- w->next = *wake_tail; // keep list terminated
1077
- *wake_tail = w; // add w to wake_list;
1078
- wake_tail = &w->next; // next addition to end
1055
+ w->next = *wake_tail; // keep list terminated
1056
+ *wake_tail = w; // add w to wake_list;
1057
+ wake_tail = &w->next; // next addition to end
1079
1058
  if (w->waitp->how == kExclusive) { // wake at most 1 writer
1080
1059
  break;
1081
1060
  }
1082
- } else { // not waking this one; skip
1083
- pw = Skip(w); // skip as much as possible
1061
+ } else { // not waking this one; skip
1062
+ pw = Skip(w); // skip as much as possible
1084
1063
  skipped = true;
1085
1064
  }
1086
1065
  w = pw->next;
@@ -1098,7 +1077,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1098
1077
 
1099
1078
  // Try to remove thread s from the list of waiters on this mutex.
1100
1079
  // Does nothing if s is not on the waiter list.
1101
- void Mutex::TryRemove(PerThreadSynch *s) {
1080
+ void Mutex::TryRemove(PerThreadSynch* s) {
1102
1081
  SchedulingGuard::ScopedDisable disable_rescheduling;
1103
1082
  intptr_t v = mu_.load(std::memory_order_relaxed);
1104
1083
  // acquire spinlock & lock
@@ -1106,16 +1085,16 @@ void Mutex::TryRemove(PerThreadSynch *s) {
1106
1085
  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1107
1086
  std::memory_order_acquire,
1108
1087
  std::memory_order_relaxed)) {
1109
- PerThreadSynch *h = GetPerThreadSynch(v);
1088
+ PerThreadSynch* h = GetPerThreadSynch(v);
1110
1089
  if (h != nullptr) {
1111
- PerThreadSynch *pw = h; // pw is w's predecessor
1112
- PerThreadSynch *w;
1090
+ PerThreadSynch* pw = h; // pw is w's predecessor
1091
+ PerThreadSynch* w;
1113
1092
  if ((w = pw->next) != s) { // search for thread,
1114
1093
  do { // processing at least one element
1115
1094
  // If the current element isn't equivalent to the waiter to be
1116
1095
  // removed, we can skip the entire chain.
1117
1096
  if (!MuEquivalentWaiter(s, w)) {
1118
- pw = Skip(w); // so skip all that won't match
1097
+ pw = Skip(w); // so skip all that won't match
1119
1098
  // we don't have to worry about dangling skip fields
1120
1099
  // in the threads we skipped; none can point to s
1121
1100
  // because they are in a different equivalence class.
@@ -1127,7 +1106,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
1127
1106
  // process the first thread again.
1128
1107
  } while ((w = pw->next) != s && pw != h);
1129
1108
  }
1130
- if (w == s) { // found thread; remove it
1109
+ if (w == s) { // found thread; remove it
1131
1110
  // pw->skip may be non-zero here; the loop above ensured that
1132
1111
  // no ancestor of s can skip to s, so removal is safe anyway.
1133
1112
  h = Dequeue(h, pw);
@@ -1136,16 +1115,15 @@ void Mutex::TryRemove(PerThreadSynch *s) {
1136
1115
  }
1137
1116
  }
1138
1117
  intptr_t nv;
1139
- do { // release spinlock and lock
1118
+ do { // release spinlock and lock
1140
1119
  v = mu_.load(std::memory_order_relaxed);
1141
1120
  nv = v & (kMuDesig | kMuEvent);
1142
1121
  if (h != nullptr) {
1143
1122
  nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1144
- h->readers = 0; // we hold writer lock
1123
+ h->readers = 0; // we hold writer lock
1145
1124
  h->maybe_unlocking = false; // finished unlocking
1146
1125
  }
1147
- } while (!mu_.compare_exchange_weak(v, nv,
1148
- std::memory_order_release,
1126
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
1149
1127
  std::memory_order_relaxed));
1150
1128
  }
1151
1129
  }
@@ -1155,7 +1133,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
1155
1133
  // if the wait extends past the absolute time specified, even if "s" is still
1156
1134
  // on the mutex queue. In this case, remove "s" from the queue and return
1157
1135
  // true, otherwise return false.
1158
- void Mutex::Block(PerThreadSynch *s) {
1136
+ void Mutex::Block(PerThreadSynch* s) {
1159
1137
  while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1160
1138
  if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1161
1139
  // After a timeout, we go into a spin loop until we remove ourselves
@@ -1174,7 +1152,7 @@ void Mutex::Block(PerThreadSynch *s) {
1174
1152
  // is not on the queue.
1175
1153
  this->TryRemove(s);
1176
1154
  }
1177
- s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1155
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1178
1156
  s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1179
1157
  }
1180
1158
  }
@@ -1184,8 +1162,8 @@ void Mutex::Block(PerThreadSynch *s) {
1184
1162
  }
1185
1163
 
1186
1164
  // Wake thread w, and return the next thread in the list.
1187
- PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1188
- PerThreadSynch *next = w->next;
1165
+ PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
1166
+ PerThreadSynch* next = w->next;
1189
1167
  w->next = nullptr;
1190
1168
  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1191
1169
  IncrementSynchSem(this, w);
@@ -1193,7 +1171,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1193
1171
  return next;
1194
1172
  }
1195
1173
 
1196
- static GraphId GetGraphIdLocked(Mutex *mu)
1174
+ static GraphId GetGraphIdLocked(Mutex* mu)
1197
1175
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1198
1176
  if (!deadlock_graph) { // (re)create the deadlock graph.
1199
1177
  deadlock_graph =
@@ -1203,7 +1181,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
1203
1181
  return deadlock_graph->GetId(mu);
1204
1182
  }
1205
1183
 
1206
- static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1184
+ static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1207
1185
  deadlock_graph_mu.Lock();
1208
1186
  GraphId id = GetGraphIdLocked(mu);
1209
1187
  deadlock_graph_mu.Unlock();
@@ -1213,7 +1191,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1213
1191
  // Record a lock acquisition. This is used in debug mode for deadlock
1214
1192
  // detection. The held_locks pointer points to the relevant data
1215
1193
  // structure for each case.
1216
- static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1194
+ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1217
1195
  int n = held_locks->n;
1218
1196
  int i = 0;
1219
1197
  while (i != n && held_locks->locks[i].id != id) {
@@ -1237,7 +1215,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1237
1215
  // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1238
1216
  // It does not process the event if is not needed when deadlock detection is
1239
1217
  // disabled.
1240
- static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1218
+ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1241
1219
  int n = held_locks->n;
1242
1220
  int i = 0;
1243
1221
  while (i != n && held_locks->locks[i].id != id) {
@@ -1252,11 +1230,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1252
1230
  i++;
1253
1231
  }
1254
1232
  if (i == n) { // mu missing means releasing unheld lock
1255
- SynchEvent *mu_events = GetSynchEvent(mu);
1233
+ SynchEvent* mu_events = GetSynchEvent(mu);
1256
1234
  ABSL_RAW_LOG(FATAL,
1257
1235
  "thread releasing lock it does not hold: %p %s; "
1258
1236
  ,
1259
- static_cast<void *>(mu),
1237
+ static_cast<void*>(mu),
1260
1238
  mu_events == nullptr ? "" : mu_events->name);
1261
1239
  }
1262
1240
  }
@@ -1273,7 +1251,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1273
1251
  }
1274
1252
 
1275
1253
  // Call LockEnter() if in debug mode and deadlock detection is enabled.
1276
- static inline void DebugOnlyLockEnter(Mutex *mu) {
1254
+ static inline void DebugOnlyLockEnter(Mutex* mu) {
1277
1255
  if (kDebugMode) {
1278
1256
  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1279
1257
  OnDeadlockCycle::kIgnore) {
@@ -1283,7 +1261,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) {
1283
1261
  }
1284
1262
 
1285
1263
  // Call LockEnter() if in debug mode and deadlock detection is enabled.
1286
- static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1264
+ static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
1287
1265
  if (kDebugMode) {
1288
1266
  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1289
1267
  OnDeadlockCycle::kIgnore) {
@@ -1293,7 +1271,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1293
1271
  }
1294
1272
 
1295
1273
  // Call LockLeave() if in debug mode and deadlock detection is enabled.
1296
- static inline void DebugOnlyLockLeave(Mutex *mu) {
1274
+ static inline void DebugOnlyLockLeave(Mutex* mu) {
1297
1275
  if (kDebugMode) {
1298
1276
  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1299
1277
  OnDeadlockCycle::kIgnore) {
@@ -1302,9 +1280,9 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
1302
1280
  }
1303
1281
  }
1304
1282
 
1305
- static char *StackString(void **pcs, int n, char *buf, int maxlen,
1283
+ static char* StackString(void** pcs, int n, char* buf, int maxlen,
1306
1284
  bool symbolize) {
1307
- static const int kSymLen = 200;
1285
+ static constexpr int kSymLen = 200;
1308
1286
  char sym[kSymLen];
1309
1287
  int len = 0;
1310
1288
  for (int i = 0; i != n; i++) {
@@ -1312,7 +1290,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
1312
1290
  return buf;
1313
1291
  size_t count = static_cast<size_t>(maxlen - len);
1314
1292
  if (symbolize) {
1315
- if (!symbolizer(pcs[i], sym, kSymLen)) {
1293
+ if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
1316
1294
  sym[0] = '\0';
1317
1295
  }
1318
1296
  snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
@@ -1325,15 +1303,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
1325
1303
  return buf;
1326
1304
  }
1327
1305
 
1328
- static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1329
- void *pcs[40];
1306
+ static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
1307
+ void* pcs[40];
1330
1308
  return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1331
1309
  maxlen, symbolize);
1332
1310
  }
1333
1311
 
1334
1312
  namespace {
1335
- enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
1336
- // a path this long would be remarkable
1313
+ enum {
1314
+ kMaxDeadlockPathLen = 10
1315
+ }; // maximum length of a deadlock cycle;
1316
+ // a path this long would be remarkable
1337
1317
  // Buffers required to report a deadlock.
1338
1318
  // We do not allocate them on stack to avoid large stack frame.
1339
1319
  struct DeadlockReportBuffers {
@@ -1343,11 +1323,11 @@ struct DeadlockReportBuffers {
1343
1323
 
1344
1324
  struct ScopedDeadlockReportBuffers {
1345
1325
  ScopedDeadlockReportBuffers() {
1346
- b = reinterpret_cast<DeadlockReportBuffers *>(
1326
+ b = reinterpret_cast<DeadlockReportBuffers*>(
1347
1327
  base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1348
1328
  }
1349
1329
  ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1350
- DeadlockReportBuffers *b;
1330
+ DeadlockReportBuffers* b;
1351
1331
  };
1352
1332
 
1353
1333
  // Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1358,13 +1338,13 @@ int GetStack(void** stack, int max_depth) {
1358
1338
 
1359
1339
  // Called in debug mode when a thread is about to acquire a lock in a way that
1360
1340
  // may block.
1361
- static GraphId DeadlockCheck(Mutex *mu) {
1341
+ static GraphId DeadlockCheck(Mutex* mu) {
1362
1342
  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1363
1343
  OnDeadlockCycle::kIgnore) {
1364
1344
  return InvalidGraphId();
1365
1345
  }
1366
1346
 
1367
- SynchLocksHeld *all_locks = Synch_GetAllLocks();
1347
+ SynchLocksHeld* all_locks = Synch_GetAllLocks();
1368
1348
 
1369
1349
  absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1370
1350
  const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1386,8 +1366,8 @@ static GraphId DeadlockCheck(Mutex *mu) {
1386
1366
  // For each other mutex already held by this thread:
1387
1367
  for (int i = 0; i != all_locks->n; i++) {
1388
1368
  const GraphId other_node_id = all_locks->locks[i].id;
1389
- const Mutex *other =
1390
- static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1369
+ const Mutex* other =
1370
+ static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
1391
1371
  if (other == nullptr) {
1392
1372
  // Ignore stale lock
1393
1373
  continue;
@@ -1396,7 +1376,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
1396
1376
  // Add the acquired-before edge to the graph.
1397
1377
  if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1398
1378
  ScopedDeadlockReportBuffers scoped_buffers;
1399
- DeadlockReportBuffers *b = scoped_buffers.b;
1379
+ DeadlockReportBuffers* b = scoped_buffers.b;
1400
1380
  static int number_of_reported_deadlocks = 0;
1401
1381
  number_of_reported_deadlocks++;
1402
1382
  // Symbolize only 2 first deadlock report to avoid huge slowdowns.
@@ -1407,37 +1387,40 @@ static GraphId DeadlockCheck(Mutex *mu) {
1407
1387
  for (int j = 0; j != all_locks->n; j++) {
1408
1388
  void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1409
1389
  if (pr != nullptr) {
1410
- snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1390
+ snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
1411
1391
  len += strlen(&b->buf[len]);
1412
1392
  }
1413
1393
  }
1414
1394
  ABSL_RAW_LOG(ERROR,
1415
1395
  "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1416
1396
  "historical lock ordering graph has been observed",
1417
- static_cast<void *>(mu), b->buf);
1397
+ static_cast<void*>(mu), b->buf);
1418
1398
  ABSL_RAW_LOG(ERROR, "Cycle: ");
1419
- int path_len = deadlock_graph->FindPath(
1420
- mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1421
- for (int j = 0; j != path_len; j++) {
1399
+ int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
1400
+ ABSL_ARRAYSIZE(b->path), b->path);
1401
+ for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
1422
1402
  GraphId id = b->path[j];
1423
- Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1403
+ Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
1424
1404
  if (path_mu == nullptr) continue;
1425
1405
  void** stack;
1426
1406
  int depth = deadlock_graph->GetStackTrace(id, &stack);
1427
1407
  snprintf(b->buf, sizeof(b->buf),
1428
- "mutex@%p stack: ", static_cast<void *>(path_mu));
1408
+ "mutex@%p stack: ", static_cast<void*>(path_mu));
1429
1409
  StackString(stack, depth, b->buf + strlen(b->buf),
1430
1410
  static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1431
1411
  symbolize);
1432
1412
  ABSL_RAW_LOG(ERROR, "%s", b->buf);
1433
1413
  }
1414
+ if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
1415
+ ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
1416
+ }
1434
1417
  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1435
1418
  OnDeadlockCycle::kAbort) {
1436
1419
  deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1437
1420
  ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1438
1421
  return mu_id;
1439
1422
  }
1440
- break; // report at most one potential deadlock per acquisition
1423
+ break; // report at most one potential deadlock per acquisition
1441
1424
  }
1442
1425
  }
1443
1426
 
@@ -1446,7 +1429,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
1446
1429
 
1447
1430
  // Invoke DeadlockCheck() iff we're in debug mode and
1448
1431
  // deadlock checking has been enabled.
1449
- static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1432
+ static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
1450
1433
  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1451
1434
  OnDeadlockCycle::kIgnore) {
1452
1435
  return DeadlockCheck(mu);
@@ -1473,13 +1456,13 @@ void Mutex::AssertNotHeld() const {
1473
1456
  (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1474
1457
  synch_deadlock_detection.load(std::memory_order_acquire) !=
1475
1458
  OnDeadlockCycle::kIgnore) {
1476
- GraphId id = GetGraphId(const_cast<Mutex *>(this));
1477
- SynchLocksHeld *locks = Synch_GetAllLocks();
1459
+ GraphId id = GetGraphId(const_cast<Mutex*>(this));
1460
+ SynchLocksHeld* locks = Synch_GetAllLocks();
1478
1461
  for (int i = 0; i != locks->n; i++) {
1479
1462
  if (locks->locks[i].id == id) {
1480
- SynchEvent *mu_events = GetSynchEvent(this);
1463
+ SynchEvent* mu_events = GetSynchEvent(this);
1481
1464
  ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1482
- static_cast<const void *>(this),
1465
+ static_cast<const void*>(this),
1483
1466
  (mu_events == nullptr ? "" : mu_events->name));
1484
1467
  }
1485
1468
  }
@@ -1492,8 +1475,8 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1492
1475
  int c = GetMutexGlobals().spinloop_iterations;
1493
1476
  do { // do/while somewhat faster on AMD
1494
1477
  intptr_t v = mu->load(std::memory_order_relaxed);
1495
- if ((v & (kMuReader|kMuEvent)) != 0) {
1496
- return false; // a reader or tracing -> give up
1478
+ if ((v & (kMuReader | kMuEvent)) != 0) {
1479
+ return false; // a reader or tracing -> give up
1497
1480
  } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1498
1481
  mu->compare_exchange_strong(v, kMuWriter | v,
1499
1482
  std::memory_order_acquire,
@@ -1510,8 +1493,7 @@ void Mutex::Lock() {
1510
1493
  intptr_t v = mu_.load(std::memory_order_relaxed);
1511
1494
  // try fast acquire, then spin loop
1512
1495
  if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1513
- !mu_.compare_exchange_strong(v, kMuWriter | v,
1514
- std::memory_order_acquire,
1496
+ !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
1515
1497
  std::memory_order_relaxed)) {
1516
1498
  // try spin acquire, then slow loop
1517
1499
  if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1537,7 +1519,7 @@ void Mutex::ReaderLock() {
1537
1519
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1538
1520
  }
1539
1521
 
1540
- void Mutex::LockWhen(const Condition &cond) {
1522
+ void Mutex::LockWhen(const Condition& cond) {
1541
1523
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1542
1524
  GraphId id = DebugOnlyDeadlockCheck(this);
1543
1525
  this->LockSlow(kExclusive, &cond, 0);
@@ -1545,21 +1527,26 @@ void Mutex::LockWhen(const Condition &cond) {
1545
1527
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1546
1528
  }
1547
1529
 
1548
- bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1549
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1530
+ bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
1531
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1532
+ GraphId id = DebugOnlyDeadlockCheck(this);
1533
+ bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
1534
+ DebugOnlyLockEnter(this, id);
1535
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1536
+ return res;
1550
1537
  }
1551
1538
 
1552
- bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1539
+ bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
1553
1540
  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1554
1541
  GraphId id = DebugOnlyDeadlockCheck(this);
1555
- bool res = LockSlowWithDeadline(kExclusive, &cond,
1556
- KernelTimeout(deadline), 0);
1542
+ bool res =
1543
+ LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
1557
1544
  DebugOnlyLockEnter(this, id);
1558
1545
  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1559
1546
  return res;
1560
1547
  }
1561
1548
 
1562
- void Mutex::ReaderLockWhen(const Condition &cond) {
1549
+ void Mutex::ReaderLockWhen(const Condition& cond) {
1563
1550
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1564
1551
  GraphId id = DebugOnlyDeadlockCheck(this);
1565
1552
  this->LockSlow(kShared, &cond, 0);
@@ -1567,12 +1554,17 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
1567
1554
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1568
1555
  }
1569
1556
 
1570
- bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1557
+ bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
1571
1558
  absl::Duration timeout) {
1572
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1559
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1560
+ GraphId id = DebugOnlyDeadlockCheck(this);
1561
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
1562
+ DebugOnlyLockEnter(this, id);
1563
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1564
+ return res;
1573
1565
  }
1574
1566
 
1575
- bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1567
+ bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
1576
1568
  absl::Time deadline) {
1577
1569
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1578
1570
  GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1582,23 +1574,34 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1582
1574
  return res;
1583
1575
  }
1584
1576
 
1585
- void Mutex::Await(const Condition &cond) {
1586
- if (cond.Eval()) { // condition already true; nothing to do
1577
+ void Mutex::Await(const Condition& cond) {
1578
+ if (cond.Eval()) { // condition already true; nothing to do
1587
1579
  if (kDebugMode) {
1588
1580
  this->AssertReaderHeld();
1589
1581
  }
1590
- } else { // normal case
1582
+ } else { // normal case
1591
1583
  ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1592
1584
  "condition untrue on return from Await");
1593
1585
  }
1594
1586
  }
1595
1587
 
1596
- bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1597
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1588
+ bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
1589
+ if (cond.Eval()) { // condition already true; nothing to do
1590
+ if (kDebugMode) {
1591
+ this->AssertReaderHeld();
1592
+ }
1593
+ return true;
1594
+ }
1595
+
1596
+ KernelTimeout t{timeout};
1597
+ bool res = this->AwaitCommon(cond, t);
1598
+ ABSL_RAW_CHECK(res || t.has_timeout(),
1599
+ "condition untrue on return from Await");
1600
+ return res;
1598
1601
  }
1599
1602
 
1600
- bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1601
- if (cond.Eval()) { // condition already true; nothing to do
1603
+ bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
1604
+ if (cond.Eval()) { // condition already true; nothing to do
1602
1605
  if (kDebugMode) {
1603
1606
  this->AssertReaderHeld();
1604
1607
  }
@@ -1612,14 +1615,14 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1612
1615
  return res;
1613
1616
  }
1614
1617
 
1615
- bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1618
+ bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
1616
1619
  this->AssertReaderHeld();
1617
1620
  MuHow how =
1618
1621
  (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1619
1622
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1620
- SynchWaitParams waitp(
1621
- how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1622
- nullptr /*no cv_word*/);
1623
+ SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
1624
+ Synch_GetPerThreadAnnotated(this),
1625
+ nullptr /*no cv_word*/);
1623
1626
  int flags = kMuHasBlocked;
1624
1627
  if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1625
1628
  flags |= kMuIsCond;
@@ -1639,14 +1642,13 @@ bool Mutex::TryLock() {
1639
1642
  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1640
1643
  intptr_t v = mu_.load(std::memory_order_relaxed);
1641
1644
  if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1642
- mu_.compare_exchange_strong(v, kMuWriter | v,
1643
- std::memory_order_acquire,
1645
+ mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
1644
1646
  std::memory_order_relaxed)) {
1645
1647
  DebugOnlyLockEnter(this);
1646
1648
  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1647
1649
  return true;
1648
1650
  }
1649
- if ((v & kMuEvent) != 0) { // we're recording events
1651
+ if ((v & kMuEvent) != 0) { // we're recording events
1650
1652
  if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1651
1653
  mu_.compare_exchange_strong(
1652
1654
  v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1672,7 +1674,7 @@ bool Mutex::ReaderTryLock() {
1672
1674
  // changing (typically because the reader count changes) under the CAS. We
1673
1675
  // limit the number of attempts to avoid having to think about livelock.
1674
1676
  int loop_limit = 5;
1675
- while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1677
+ while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
1676
1678
  if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1677
1679
  std::memory_order_acquire,
1678
1680
  std::memory_order_relaxed)) {
@@ -1684,7 +1686,7 @@ bool Mutex::ReaderTryLock() {
1684
1686
  loop_limit--;
1685
1687
  v = mu_.load(std::memory_order_relaxed);
1686
1688
  }
1687
- if ((v & kMuEvent) != 0) { // we're recording events
1689
+ if ((v & kMuEvent) != 0) { // we're recording events
1688
1690
  loop_limit = 5;
1689
1691
  while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1690
1692
  if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1723,7 +1725,7 @@ void Mutex::Unlock() {
1723
1725
  // should_try_cas is whether we'll try a compare-and-swap immediately.
1724
1726
  // NOTE: optimized out when kDebugMode is false.
1725
1727
  bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1726
- (v & (kMuWait | kMuDesig)) != kMuWait);
1728
+ (v & (kMuWait | kMuDesig)) != kMuWait);
1727
1729
  // But, we can use an alternate computation of it, that compilers
1728
1730
  // currently don't find on their own. When that changes, this function
1729
1731
  // can be simplified.
@@ -1740,10 +1742,9 @@ void Mutex::Unlock() {
1740
1742
  static_cast<long long>(v), static_cast<long long>(x),
1741
1743
  static_cast<long long>(y));
1742
1744
  }
1743
- if (x < y &&
1744
- mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1745
- std::memory_order_release,
1746
- std::memory_order_relaxed)) {
1745
+ if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1746
+ std::memory_order_release,
1747
+ std::memory_order_relaxed)) {
1747
1748
  // fast writer release (writer with no waiters or with designated waker)
1748
1749
  } else {
1749
1750
  this->UnlockSlow(nullptr /*no waitp*/); // take slow path
@@ -1753,7 +1754,7 @@ void Mutex::Unlock() {
1753
1754
 
1754
1755
  // Requires v to represent a reader-locked state.
1755
1756
  static bool ExactlyOneReader(intptr_t v) {
1756
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
1757
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
1757
1758
  assert((v & kMuHigh) != 0);
1758
1759
  // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1759
1760
  // on some architectures the following generates slightly smaller code.
@@ -1766,12 +1767,11 @@ void Mutex::ReaderUnlock() {
1766
1767
  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1767
1768
  DebugOnlyLockLeave(this);
1768
1769
  intptr_t v = mu_.load(std::memory_order_relaxed);
1769
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
1770
- if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1770
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
1771
+ if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
1771
1772
  // fast reader release (reader with no waiters)
1772
- intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1773
- if (mu_.compare_exchange_strong(v, v - clear,
1774
- std::memory_order_release,
1773
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
1774
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
1775
1775
  std::memory_order_relaxed)) {
1776
1776
  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1777
1777
  return;
@@ -1810,7 +1810,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
1810
1810
  }
1811
1811
 
1812
1812
  // Internal version of LockWhen(). See LockSlowWithDeadline()
1813
- ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1813
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
1814
1814
  int flags) {
1815
1815
  ABSL_RAW_CHECK(
1816
1816
  this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1818,7 +1818,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1818
1818
  }
1819
1819
 
1820
1820
  // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1821
- static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1821
+ static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
1822
1822
  bool locking, bool trylock,
1823
1823
  bool read_lock) {
1824
1824
  // Delicate annotation dance.
@@ -1868,7 +1868,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1868
1868
  // tsan). As the result there is no tsan-visible synchronization between the
1869
1869
  // addition and this thread. So if we would enable race detection here,
1870
1870
  // it would race with the predicate initialization.
1871
- static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1871
+ static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
1872
1872
  // Memory accesses are already ignored inside of lock/unlock operations,
1873
1873
  // but synchronization operations are also ignored. When we evaluate the
1874
1874
  // predicate we must ignore only memory accesses but not synchronization,
@@ -1893,7 +1893,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1893
1893
  // obstruct this call
1894
1894
  // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1895
1895
  // Await, LockWhen) so contention profiling should be suppressed.
1896
- bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1896
+ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
1897
1897
  KernelTimeout t, int flags) {
1898
1898
  intptr_t v = mu_.load(std::memory_order_relaxed);
1899
1899
  bool unlock = false;
@@ -1910,9 +1910,9 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1910
1910
  }
1911
1911
  unlock = true;
1912
1912
  }
1913
- SynchWaitParams waitp(
1914
- how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1915
- nullptr /*no cv_word*/);
1913
+ SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
1914
+ Synch_GetPerThreadAnnotated(this),
1915
+ nullptr /*no cv_word*/);
1916
1916
  if (!Condition::GuaranteedEqual(cond, nullptr)) {
1917
1917
  flags |= kMuIsCond;
1918
1918
  }
@@ -1953,20 +1953,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
1953
1953
  if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1954
1954
  RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1955
1955
  "%s: Mutex corrupt: both reader and writer lock held: %p",
1956
- label, reinterpret_cast<void *>(v));
1956
+ label, reinterpret_cast<void*>(v));
1957
1957
  RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1958
- "%s: Mutex corrupt: waiting writer with no waiters: %p",
1959
- label, reinterpret_cast<void *>(v));
1958
+ "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
1959
+ reinterpret_cast<void*>(v));
1960
1960
  assert(false);
1961
1961
  }
1962
1962
 
1963
- void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1963
+ void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
1964
1964
  SchedulingGuard::ScopedDisable disable_rescheduling;
1965
1965
  int c = 0;
1966
1966
  intptr_t v = mu_.load(std::memory_order_relaxed);
1967
1967
  if ((v & kMuEvent) != 0) {
1968
- PostSynchEvent(this,
1969
- waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1968
+ PostSynchEvent(
1969
+ this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
1970
1970
  }
1971
1971
  ABSL_RAW_CHECK(
1972
1972
  waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -1991,11 +1991,11 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1991
1991
  flags |= kMuHasBlocked;
1992
1992
  c = 0;
1993
1993
  }
1994
- } else { // need to access waiter list
1994
+ } else { // need to access waiter list
1995
1995
  bool dowait = false;
1996
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
1996
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
1997
1997
  // This thread tries to become the one and only waiter.
1998
- PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1998
+ PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
1999
1999
  intptr_t nv =
2000
2000
  (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2001
2001
  kMuWait;
@@ -2007,7 +2007,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2007
2007
  v, reinterpret_cast<intptr_t>(new_h) | nv,
2008
2008
  std::memory_order_release, std::memory_order_relaxed)) {
2009
2009
  dowait = true;
2010
- } else { // attempted Enqueue() failed
2010
+ } else { // attempted Enqueue() failed
2011
2011
  // zero out the waitp field set by Enqueue()
2012
2012
  waitp->thread->waitp = nullptr;
2013
2013
  }
@@ -2020,9 +2020,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2020
2020
  (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2021
2021
  kMuSpin | kMuReader,
2022
2022
  std::memory_order_acquire, std::memory_order_relaxed)) {
2023
- PerThreadSynch *h = GetPerThreadSynch(v);
2024
- h->readers += kMuOne; // inc reader count in waiter
2025
- do { // release spinlock
2023
+ PerThreadSynch* h = GetPerThreadSynch(v);
2024
+ h->readers += kMuOne; // inc reader count in waiter
2025
+ do { // release spinlock
2026
2026
  v = mu_.load(std::memory_order_relaxed);
2027
2027
  } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2028
2028
  std::memory_order_release,
@@ -2032,7 +2032,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2032
2032
  waitp->how == kShared)) {
2033
2033
  break; // we timed out, or condition true, so return
2034
2034
  }
2035
- this->UnlockSlow(waitp); // got lock but condition false
2035
+ this->UnlockSlow(waitp); // got lock but condition false
2036
2036
  this->Block(waitp->thread);
2037
2037
  flags |= kMuHasBlocked;
2038
2038
  c = 0;
@@ -2043,18 +2043,19 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2043
2043
  (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2044
2044
  kMuSpin | kMuWait,
2045
2045
  std::memory_order_acquire, std::memory_order_relaxed)) {
2046
- PerThreadSynch *h = GetPerThreadSynch(v);
2047
- PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
2046
+ PerThreadSynch* h = GetPerThreadSynch(v);
2047
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
2048
2048
  intptr_t wr_wait = 0;
2049
2049
  ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2050
2050
  if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2051
- wr_wait = kMuWrWait; // give priority to a waiting writer
2051
+ wr_wait = kMuWrWait; // give priority to a waiting writer
2052
2052
  }
2053
- do { // release spinlock
2053
+ do { // release spinlock
2054
2054
  v = mu_.load(std::memory_order_relaxed);
2055
2055
  } while (!mu_.compare_exchange_weak(
2056
- v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2057
- reinterpret_cast<intptr_t>(new_h),
2056
+ v,
2057
+ (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2058
+ reinterpret_cast<intptr_t>(new_h),
2058
2059
  std::memory_order_release, std::memory_order_relaxed));
2059
2060
  dowait = true;
2060
2061
  }
@@ -2074,9 +2075,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2074
2075
  waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2075
2076
  "detected illegal recursion into Mutex code");
2076
2077
  if ((v & kMuEvent) != 0) {
2077
- PostSynchEvent(this,
2078
- waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2079
- SYNCH_EV_READERLOCK_RETURNING);
2078
+ PostSynchEvent(this, waitp->how == kExclusive
2079
+ ? SYNCH_EV_LOCK_RETURNING
2080
+ : SYNCH_EV_READERLOCK_RETURNING);
2080
2081
  }
2081
2082
  }
2082
2083
 
@@ -2085,28 +2086,28 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
2085
2086
  // which holds the lock but is not runnable because its condition is false
2086
2087
  // or it is in the process of blocking on a condition variable; it must requeue
2087
2088
  // itself on the mutex/condvar to wait for its condition to become true.
2088
- ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2089
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
2089
2090
  SchedulingGuard::ScopedDisable disable_rescheduling;
2090
2091
  intptr_t v = mu_.load(std::memory_order_relaxed);
2091
2092
  this->AssertReaderHeld();
2092
2093
  CheckForMutexCorruption(v, "Unlock");
2093
2094
  if ((v & kMuEvent) != 0) {
2094
- PostSynchEvent(this,
2095
- (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2095
+ PostSynchEvent(
2096
+ this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
2096
2097
  }
2097
2098
  int c = 0;
2098
2099
  // the waiter under consideration to wake, or zero
2099
- PerThreadSynch *w = nullptr;
2100
+ PerThreadSynch* w = nullptr;
2100
2101
  // the predecessor to w or zero
2101
- PerThreadSynch *pw = nullptr;
2102
+ PerThreadSynch* pw = nullptr;
2102
2103
  // head of the list searched previously, or zero
2103
- PerThreadSynch *old_h = nullptr;
2104
+ PerThreadSynch* old_h = nullptr;
2104
2105
  // a condition that's known to be false.
2105
- const Condition *known_false = nullptr;
2106
- PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
2107
- intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2108
- // later writer could have acquired the lock
2109
- // (starvation avoidance)
2106
+ const Condition* known_false = nullptr;
2107
+ PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
2108
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2109
+ // later writer could have acquired the lock
2110
+ // (starvation avoidance)
2110
2111
  ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2111
2112
  waitp->thread->suppress_fatal_errors,
2112
2113
  "detected illegal recursion into Mutex code");
@@ -2126,8 +2127,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2126
2127
  } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2127
2128
  // fast reader release (reader with no waiters)
2128
2129
  intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2129
- if (mu_.compare_exchange_strong(v, v - clear,
2130
- std::memory_order_release,
2130
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
2131
2131
  std::memory_order_relaxed)) {
2132
2132
  return;
2133
2133
  }
@@ -2135,16 +2135,16 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2135
2135
  mu_.compare_exchange_strong(v, v | kMuSpin,
2136
2136
  std::memory_order_acquire,
2137
2137
  std::memory_order_relaxed)) {
2138
- if ((v & kMuWait) == 0) { // no one to wake
2138
+ if ((v & kMuWait) == 0) { // no one to wake
2139
2139
  intptr_t nv;
2140
2140
  bool do_enqueue = true; // always Enqueue() the first time
2141
2141
  ABSL_RAW_CHECK(waitp != nullptr,
2142
2142
  "UnlockSlow is confused"); // about to sleep
2143
- do { // must loop to release spinlock as reader count may change
2143
+ do { // must loop to release spinlock as reader count may change
2144
2144
  v = mu_.load(std::memory_order_relaxed);
2145
2145
  // decrement reader count if there are readers
2146
- intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
2147
- PerThreadSynch *new_h = nullptr;
2146
+ intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
2147
+ PerThreadSynch* new_h = nullptr;
2148
2148
  if (do_enqueue) {
2149
2149
  // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2150
2150
  // we must not retry here. The initial attempt will always have
@@ -2168,21 +2168,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2168
2168
  }
2169
2169
  // release spinlock & our lock; retry if reader-count changed
2170
2170
  // (writer count cannot change since we hold lock)
2171
- } while (!mu_.compare_exchange_weak(v, nv,
2172
- std::memory_order_release,
2171
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
2173
2172
  std::memory_order_relaxed));
2174
2173
  break;
2175
2174
  }
2176
2175
 
2177
2176
  // There are waiters.
2178
2177
  // Set h to the head of the circular waiter list.
2179
- PerThreadSynch *h = GetPerThreadSynch(v);
2178
+ PerThreadSynch* h = GetPerThreadSynch(v);
2180
2179
  if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2181
2180
  // a reader but not the last
2182
- h->readers -= kMuOne; // release our lock
2183
- intptr_t nv = v; // normally just release spinlock
2181
+ h->readers -= kMuOne; // release our lock
2182
+ intptr_t nv = v; // normally just release spinlock
2184
2183
  if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2185
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2184
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2186
2185
  ABSL_RAW_CHECK(new_h != nullptr,
2187
2186
  "waiters disappeared during Enqueue()!");
2188
2187
  nv &= kMuLow;
@@ -2200,8 +2199,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2200
2199
 
2201
2200
  // The lock is becoming free, and there's a waiter
2202
2201
  if (old_h != nullptr &&
2203
- !old_h->may_skip) { // we used old_h as a terminator
2204
- old_h->may_skip = true; // allow old_h to skip once more
2202
+ !old_h->may_skip) { // we used old_h as a terminator
2203
+ old_h->may_skip = true; // allow old_h to skip once more
2205
2204
  ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2206
2205
  if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2207
2206
  old_h->skip = old_h->next; // old_h not head & can skip to successor
@@ -2210,7 +2209,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2210
2209
  if (h->next->waitp->how == kExclusive &&
2211
2210
  Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2212
2211
  // easy case: writer with no condition; no need to search
2213
- pw = h; // wake w, the successor of h (=pw)
2212
+ pw = h; // wake w, the successor of h (=pw)
2214
2213
  w = h->next;
2215
2214
  w->wake = true;
2216
2215
  // We are waking up a writer. This writer may be racing against
@@ -2233,13 +2232,13 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2233
2232
  // waiter has a condition or is a reader. We avoid searching over
2234
2233
  // waiters we've searched on previous iterations by starting at
2235
2234
  // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2236
- if (old_h == h) { // we've searched before, and nothing's new
2237
- // so there's no one to wake.
2238
- intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2235
+ if (old_h == h) { // we've searched before, and nothing's new
2236
+ // so there's no one to wake.
2237
+ intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
2239
2238
  h->readers = 0;
2240
- h->maybe_unlocking = false; // finished unlocking
2241
- if (waitp != nullptr) { // we must queue ourselves and sleep
2242
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2239
+ h->maybe_unlocking = false; // finished unlocking
2240
+ if (waitp != nullptr) { // we must queue ourselves and sleep
2241
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2243
2242
  nv &= kMuLow;
2244
2243
  if (new_h != nullptr) {
2245
2244
  nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2253,12 +2252,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2253
2252
  }
2254
2253
 
2255
2254
  // set up to walk the list
2256
- PerThreadSynch *w_walk; // current waiter during list walk
2257
- PerThreadSynch *pw_walk; // previous waiter during list walk
2255
+ PerThreadSynch* w_walk; // current waiter during list walk
2256
+ PerThreadSynch* pw_walk; // previous waiter during list walk
2258
2257
  if (old_h != nullptr) { // we've searched up to old_h before
2259
2258
  pw_walk = old_h;
2260
2259
  w_walk = old_h->next;
2261
- } else { // no prior search, start at beginning
2260
+ } else { // no prior search, start at beginning
2262
2261
  pw_walk =
2263
2262
  nullptr; // h->next's predecessor may change; don't record it
2264
2263
  w_walk = h->next;
@@ -2284,7 +2283,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2284
2283
  // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2285
2284
  // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2286
2285
 
2287
- old_h = h; // remember we searched to here
2286
+ old_h = h; // remember we searched to here
2288
2287
 
2289
2288
  // Walk the path upto and including h looking for waiters we can wake.
2290
2289
  while (pw_walk != h) {
@@ -2296,24 +2295,24 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2296
2295
  // is in fact true
2297
2296
  EvalConditionIgnored(this, w_walk->waitp->cond))) {
2298
2297
  if (w == nullptr) {
2299
- w_walk->wake = true; // can wake this waiter
2298
+ w_walk->wake = true; // can wake this waiter
2300
2299
  w = w_walk;
2301
2300
  pw = pw_walk;
2302
2301
  if (w_walk->waitp->how == kExclusive) {
2303
2302
  wr_wait = kMuWrWait;
2304
- break; // bail if waking this writer
2303
+ break; // bail if waking this writer
2305
2304
  }
2306
2305
  } else if (w_walk->waitp->how == kShared) { // wake if a reader
2307
2306
  w_walk->wake = true;
2308
- } else { // writer with true condition
2307
+ } else { // writer with true condition
2309
2308
  wr_wait = kMuWrWait;
2310
2309
  }
2311
- } else { // can't wake; condition false
2310
+ } else { // can't wake; condition false
2312
2311
  known_false = w_walk->waitp->cond; // remember last false condition
2313
2312
  }
2314
- if (w_walk->wake) { // we're waking reader w_walk
2315
- pw_walk = w_walk; // don't skip similar waiters
2316
- } else { // not waking; skip as much as possible
2313
+ if (w_walk->wake) { // we're waking reader w_walk
2314
+ pw_walk = w_walk; // don't skip similar waiters
2315
+ } else { // not waking; skip as much as possible
2317
2316
  pw_walk = Skip(w_walk);
2318
2317
  }
2319
2318
  // If pw_walk == h, then load of pw_walk->next can race with
@@ -2340,8 +2339,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2340
2339
  h = DequeueAllWakeable(h, pw, &wake_list);
2341
2340
 
2342
2341
  intptr_t nv = (v & kMuEvent) | kMuDesig;
2343
- // assume no waiters left,
2344
- // set kMuDesig for INV1a
2342
+ // assume no waiters left,
2343
+ // set kMuDesig for INV1a
2345
2344
 
2346
2345
  if (waitp != nullptr) { // we must queue ourselves and sleep
2347
2346
  h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2354,7 +2353,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2354
2353
 
2355
2354
  if (h != nullptr) { // there are waiters left
2356
2355
  h->readers = 0;
2357
- h->maybe_unlocking = false; // finished unlocking
2356
+ h->maybe_unlocking = false; // finished unlocking
2358
2357
  nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2359
2358
  }
2360
2359
 
@@ -2365,12 +2364,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2365
2364
  }
2366
2365
  // aggressive here; no one can proceed till we do
2367
2366
  c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2368
- } // end of for(;;)-loop
2367
+ } // end of for(;;)-loop
2369
2368
 
2370
2369
  if (wake_list != kPerThreadSynchNull) {
2371
2370
  int64_t total_wait_cycles = 0;
2372
2371
  int64_t max_wait_cycles = 0;
2373
- int64_t now = base_internal::CycleClock::Now();
2372
+ int64_t now = CycleClock::Now();
2374
2373
  do {
2375
2374
  // Profile lock contention events only if the waiter was trying to acquire
2376
2375
  // the lock, not waiting on a condition variable or Condition.
@@ -2382,7 +2381,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2382
2381
  wake_list->waitp->contention_start_cycles = now;
2383
2382
  wake_list->waitp->should_submit_contention_data = true;
2384
2383
  }
2385
- wake_list = Wakeup(wake_list); // wake waiters
2384
+ wake_list = Wakeup(wake_list); // wake waiters
2386
2385
  } while (wake_list != kPerThreadSynchNull);
2387
2386
  if (total_wait_cycles > 0) {
2388
2387
  mutex_tracer("slow release", this, total_wait_cycles);
@@ -2410,7 +2409,7 @@ void Mutex::Trans(MuHow how) {
2410
2409
  // condition variable. If this mutex is free, we simply wake the thread.
2411
2410
  // It will later acquire the mutex with high probability. Otherwise, we
2412
2411
  // enqueue thread w on this mutex.
2413
- void Mutex::Fer(PerThreadSynch *w) {
2412
+ void Mutex::Fer(PerThreadSynch* w) {
2414
2413
  SchedulingGuard::ScopedDisable disable_rescheduling;
2415
2414
  int c = 0;
2416
2415
  ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2435,9 +2434,9 @@ void Mutex::Fer(PerThreadSynch *w) {
2435
2434
  IncrementSynchSem(this, w);
2436
2435
  return;
2437
2436
  } else {
2438
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
2437
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
2439
2438
  // This thread tries to become the one and only waiter.
2440
- PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2439
+ PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2441
2440
  ABSL_RAW_CHECK(new_h != nullptr,
2442
2441
  "Enqueue failed"); // we must queue ourselves
2443
2442
  if (mu_.compare_exchange_strong(
@@ -2447,8 +2446,8 @@ void Mutex::Fer(PerThreadSynch *w) {
2447
2446
  }
2448
2447
  } else if ((v & kMuSpin) == 0 &&
2449
2448
  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2450
- PerThreadSynch *h = GetPerThreadSynch(v);
2451
- PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2449
+ PerThreadSynch* h = GetPerThreadSynch(v);
2450
+ PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2452
2451
  ABSL_RAW_CHECK(new_h != nullptr,
2453
2452
  "Enqueue failed"); // we must queue ourselves
2454
2453
  do {
@@ -2467,19 +2466,18 @@ void Mutex::Fer(PerThreadSynch *w) {
2467
2466
 
2468
2467
  void Mutex::AssertHeld() const {
2469
2468
  if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2470
- SynchEvent *e = GetSynchEvent(this);
2469
+ SynchEvent* e = GetSynchEvent(this);
2471
2470
  ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2472
- static_cast<const void *>(this),
2473
- (e == nullptr ? "" : e->name));
2471
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2474
2472
  }
2475
2473
  }
2476
2474
 
2477
2475
  void Mutex::AssertReaderHeld() const {
2478
2476
  if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2479
- SynchEvent *e = GetSynchEvent(this);
2480
- ABSL_RAW_LOG(
2481
- FATAL, "thread should hold at least a read lock on Mutex %p %s",
2482
- static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2477
+ SynchEvent* e = GetSynchEvent(this);
2478
+ ABSL_RAW_LOG(FATAL,
2479
+ "thread should hold at least a read lock on Mutex %p %s",
2480
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2483
2481
  }
2484
2482
  }
2485
2483
 
@@ -2490,13 +2488,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events
2490
2488
  static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2491
2489
 
2492
2490
  // Hack to make constant values available to gdb pretty printer
2493
- enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2491
+ enum {
2492
+ kGdbCvSpin = kCvSpin,
2493
+ kGdbCvEvent = kCvEvent,
2494
+ kGdbCvLow = kCvLow,
2495
+ };
2494
2496
 
2495
2497
  static_assert(PerThreadSynch::kAlignment > kCvLow,
2496
2498
  "PerThreadSynch::kAlignment must be greater than kCvLow");
2497
2499
 
2498
- void CondVar::EnableDebugLog(const char *name) {
2499
- SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2500
+ void CondVar::EnableDebugLog(const char* name) {
2501
+ SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2500
2502
  e->log = true;
2501
2503
  UnrefSynchEvent(e);
2502
2504
  }
@@ -2507,25 +2509,23 @@ CondVar::~CondVar() {
2507
2509
  }
2508
2510
  }
2509
2511
 
2510
-
2511
2512
  // Remove thread s from the list of waiters on this condition variable.
2512
- void CondVar::Remove(PerThreadSynch *s) {
2513
+ void CondVar::Remove(PerThreadSynch* s) {
2513
2514
  SchedulingGuard::ScopedDisable disable_rescheduling;
2514
2515
  intptr_t v;
2515
2516
  int c = 0;
2516
2517
  for (v = cv_.load(std::memory_order_relaxed);;
2517
2518
  v = cv_.load(std::memory_order_relaxed)) {
2518
2519
  if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2519
- cv_.compare_exchange_strong(v, v | kCvSpin,
2520
- std::memory_order_acquire,
2520
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2521
2521
  std::memory_order_relaxed)) {
2522
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2522
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2523
2523
  if (h != nullptr) {
2524
- PerThreadSynch *w = h;
2524
+ PerThreadSynch* w = h;
2525
2525
  while (w->next != s && w->next != h) { // search for thread
2526
2526
  w = w->next;
2527
2527
  }
2528
- if (w->next == s) { // found thread; remove it
2528
+ if (w->next == s) { // found thread; remove it
2529
2529
  w->next = s->next;
2530
2530
  if (h == s) {
2531
2531
  h = (w == s) ? nullptr : w;
@@ -2534,7 +2534,7 @@ void CondVar::Remove(PerThreadSynch *s) {
2534
2534
  s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2535
2535
  }
2536
2536
  }
2537
- // release spinlock
2537
+ // release spinlock
2538
2538
  cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2539
2539
  std::memory_order_release);
2540
2540
  return;
@@ -2557,14 +2557,14 @@ void CondVar::Remove(PerThreadSynch *s) {
2557
2557
  // variable queue just before the mutex is to be unlocked, and (most
2558
2558
  // importantly) after any call to an external routine that might re-enter the
2559
2559
  // mutex code.
2560
- static void CondVarEnqueue(SynchWaitParams *waitp) {
2560
+ static void CondVarEnqueue(SynchWaitParams* waitp) {
2561
2561
  // This thread might be transferred to the Mutex queue by Fer() when
2562
2562
  // we are woken. To make sure that is what happens, Enqueue() doesn't
2563
2563
  // call CondVarEnqueue() again but instead uses its normal code. We
2564
2564
  // must do this before we queue ourselves so that cv_word will be null
2565
2565
  // when seen by the dequeuer, who may wish immediately to requeue
2566
2566
  // this thread on another queue.
2567
- std::atomic<intptr_t> *cv_word = waitp->cv_word;
2567
+ std::atomic<intptr_t>* cv_word = waitp->cv_word;
2568
2568
  waitp->cv_word = nullptr;
2569
2569
 
2570
2570
  intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2577,8 +2577,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
2577
2577
  v = cv_word->load(std::memory_order_relaxed);
2578
2578
  }
2579
2579
  ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2580
- waitp->thread->waitp = waitp; // prepare ourselves for waiting
2581
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2580
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
2581
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2582
2582
  if (h == nullptr) { // add this thread to waiter list
2583
2583
  waitp->thread->next = waitp->thread;
2584
2584
  } else {
@@ -2591,8 +2591,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
2591
2591
  std::memory_order_release);
2592
2592
  }
2593
2593
 
2594
- bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2595
- bool rc = false; // return value; true iff we timed-out
2594
+ bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
2595
+ bool rc = false; // return value; true iff we timed-out
2596
2596
 
2597
2597
  intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2598
2598
  Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2659,27 +2659,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2659
2659
  return rc;
2660
2660
  }
2661
2661
 
2662
- bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2663
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2662
+ bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
2663
+ return WaitCommon(mu, KernelTimeout(timeout));
2664
2664
  }
2665
2665
 
2666
- bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2666
+ bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
2667
2667
  return WaitCommon(mu, KernelTimeout(deadline));
2668
2668
  }
2669
2669
 
2670
- void CondVar::Wait(Mutex *mu) {
2671
- WaitCommon(mu, KernelTimeout::Never());
2672
- }
2670
+ void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
2673
2671
 
2674
2672
  // Wake thread w
2675
2673
  // If it was a timed wait, w will be waiting on w->cv
2676
2674
  // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2677
2675
  // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
2678
- void CondVar::Wakeup(PerThreadSynch *w) {
2676
+ void CondVar::Wakeup(PerThreadSynch* w) {
2679
2677
  if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2680
2678
  // The waiting thread only needs to observe "w->state == kAvailable" to be
2681
2679
  // released, we must cache "cvmu" before clearing "next".
2682
- Mutex *mu = w->waitp->cvmu;
2680
+ Mutex* mu = w->waitp->cvmu;
2683
2681
  w->next = nullptr;
2684
2682
  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2685
2683
  Mutex::IncrementSynchSem(mu, w);
@@ -2696,11 +2694,10 @@ void CondVar::Signal() {
2696
2694
  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2697
2695
  v = cv_.load(std::memory_order_relaxed)) {
2698
2696
  if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2699
- cv_.compare_exchange_strong(v, v | kCvSpin,
2700
- std::memory_order_acquire,
2697
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2701
2698
  std::memory_order_relaxed)) {
2702
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2703
- PerThreadSynch *w = nullptr;
2699
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2700
+ PerThreadSynch* w = nullptr;
2704
2701
  if (h != nullptr) { // remove first waiter
2705
2702
  w = h->next;
2706
2703
  if (w == h) {
@@ -2709,11 +2706,11 @@ void CondVar::Signal() {
2709
2706
  h->next = w->next;
2710
2707
  }
2711
2708
  }
2712
- // release spinlock
2709
+ // release spinlock
2713
2710
  cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2714
2711
  std::memory_order_release);
2715
2712
  if (w != nullptr) {
2716
- CondVar::Wakeup(w); // wake waiter, if there was one
2713
+ CondVar::Wakeup(w); // wake waiter, if there was one
2717
2714
  cond_var_tracer("Signal wakeup", this);
2718
2715
  }
2719
2716
  if ((v & kCvEvent) != 0) {
@@ -2728,7 +2725,7 @@ void CondVar::Signal() {
2728
2725
  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2729
2726
  }
2730
2727
 
2731
- void CondVar::SignalAll () {
2728
+ void CondVar::SignalAll() {
2732
2729
  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2733
2730
  intptr_t v;
2734
2731
  int c = 0;
@@ -2742,11 +2739,11 @@ void CondVar::SignalAll () {
2742
2739
  if ((v & kCvSpin) == 0 &&
2743
2740
  cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2744
2741
  std::memory_order_relaxed)) {
2745
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2742
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2746
2743
  if (h != nullptr) {
2747
- PerThreadSynch *w;
2748
- PerThreadSynch *n = h->next;
2749
- do { // for every thread, wake it up
2744
+ PerThreadSynch* w;
2745
+ PerThreadSynch* n = h->next;
2746
+ do { // for every thread, wake it up
2750
2747
  w = n;
2751
2748
  n = n->next;
2752
2749
  CondVar::Wakeup(w);
@@ -2774,42 +2771,41 @@ void ReleasableMutexLock::Release() {
2774
2771
  }
2775
2772
 
2776
2773
  #ifdef ABSL_HAVE_THREAD_SANITIZER
2777
- extern "C" void __tsan_read1(void *addr);
2774
+ extern "C" void __tsan_read1(void* addr);
2778
2775
  #else
2779
2776
  #define __tsan_read1(addr) // do nothing if TSan not enabled
2780
2777
  #endif
2781
2778
 
2782
2779
  // A function that just returns its argument, dereferenced
2783
- static bool Dereference(void *arg) {
2780
+ static bool Dereference(void* arg) {
2784
2781
  // ThreadSanitizer does not instrument this file for memory accesses.
2785
2782
  // This function dereferences a user variable that can participate
2786
2783
  // in a data race, so we need to manually tell TSan about this memory access.
2787
2784
  __tsan_read1(arg);
2788
- return *(static_cast<bool *>(arg));
2785
+ return *(static_cast<bool*>(arg));
2789
2786
  }
2790
2787
 
2791
2788
  ABSL_CONST_INIT const Condition Condition::kTrue;
2792
2789
 
2793
- Condition::Condition(bool (*func)(void *), void *arg)
2794
- : eval_(&CallVoidPtrFunction),
2795
- arg_(arg) {
2790
+ Condition::Condition(bool (*func)(void*), void* arg)
2791
+ : eval_(&CallVoidPtrFunction), arg_(arg) {
2796
2792
  static_assert(sizeof(&func) <= sizeof(callback_),
2797
2793
  "An overlarge function pointer passed to Condition.");
2798
2794
  StoreCallback(func);
2799
2795
  }
2800
2796
 
2801
- bool Condition::CallVoidPtrFunction(const Condition *c) {
2802
- using FunctionPointer = bool (*)(void *);
2797
+ bool Condition::CallVoidPtrFunction(const Condition* c) {
2798
+ using FunctionPointer = bool (*)(void*);
2803
2799
  FunctionPointer function_pointer;
2804
2800
  std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2805
2801
  return (*function_pointer)(c->arg_);
2806
2802
  }
2807
2803
 
2808
- Condition::Condition(const bool *cond)
2804
+ Condition::Condition(const bool* cond)
2809
2805
  : eval_(CallVoidPtrFunction),
2810
2806
  // const_cast is safe since Dereference does not modify arg
2811
- arg_(const_cast<bool *>(cond)) {
2812
- using FunctionPointer = bool (*)(void *);
2807
+ arg_(const_cast<bool*>(cond)) {
2808
+ using FunctionPointer = bool (*)(void*);
2813
2809
  const FunctionPointer dereference = Dereference;
2814
2810
  StoreCallback(dereference);
2815
2811
  }
@@ -2819,7 +2815,7 @@ bool Condition::Eval() const {
2819
2815
  return (this->eval_ == nullptr) || (*this->eval_)(this);
2820
2816
  }
2821
2817
 
2822
- bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2818
+ bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
2823
2819
  // kTrue logic.
2824
2820
  if (a == nullptr || a->eval_ == nullptr) {
2825
2821
  return b == nullptr || b->eval_ == nullptr;