grpc 1.28.0 → 1.36.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (1466) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +1692 -22343
  3. data/etc/roots.pem +257 -573
  4. data/include/grpc/compression.h +1 -1
  5. data/include/grpc/grpc.h +17 -9
  6. data/include/grpc/grpc_security.h +274 -180
  7. data/include/grpc/grpc_security_constants.h +4 -0
  8. data/include/grpc/impl/codegen/README.md +22 -0
  9. data/include/grpc/impl/codegen/atm_windows.h +4 -0
  10. data/include/grpc/impl/codegen/byte_buffer.h +1 -1
  11. data/include/grpc/impl/codegen/grpc_types.h +32 -30
  12. data/include/grpc/impl/codegen/log.h +0 -2
  13. data/include/grpc/impl/codegen/port_platform.h +34 -90
  14. data/include/grpc/impl/codegen/sync_windows.h +4 -0
  15. data/include/grpc/module.modulemap +24 -39
  16. data/include/grpc/slice_buffer.h +3 -3
  17. data/include/grpc/support/sync.h +3 -3
  18. data/include/grpc/support/time.h +7 -7
  19. data/src/core/ext/filters/client_channel/backend_metric.cc +16 -12
  20. data/src/core/ext/filters/client_channel/backup_poller.cc +3 -2
  21. data/src/core/ext/filters/client_channel/client_channel.cc +3750 -2341
  22. data/src/core/ext/filters/client_channel/client_channel.h +1 -7
  23. data/src/core/ext/filters/client_channel/client_channel_channelz.h +0 -3
  24. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +4 -3
  25. data/src/core/ext/filters/client_channel/config_selector.cc +58 -0
  26. data/src/core/ext/filters/client_channel/config_selector.h +125 -0
  27. data/src/core/ext/filters/client_channel/dynamic_filters.cc +186 -0
  28. data/src/core/ext/filters/client_channel/dynamic_filters.h +99 -0
  29. data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +24 -2
  30. data/src/core/ext/filters/client_channel/health/health_check_client.cc +25 -30
  31. data/src/core/ext/filters/client_channel/health/health_check_client.h +7 -7
  32. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +15 -16
  33. data/src/core/ext/filters/client_channel/http_proxy.cc +44 -34
  34. data/src/core/ext/filters/client_channel/lb_policy.cc +25 -20
  35. data/src/core/ext/filters/client_channel/lb_policy.h +50 -38
  36. data/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc +96 -0
  37. data/src/core/ext/filters/client_channel/lb_policy/address_filtering.h +101 -0
  38. data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +20 -11
  39. data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h +1 -1
  40. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +481 -510
  41. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc +76 -0
  42. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h +37 -0
  43. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +1 -2
  44. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +6 -41
  45. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +3 -1
  46. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +3 -2
  47. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +3 -3
  48. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -1
  49. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +24 -18
  50. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +922 -0
  51. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +11 -10
  52. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +18 -46
  53. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +744 -0
  54. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +520 -134
  55. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.h +53 -26
  56. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_args.h +29 -0
  57. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +810 -0
  58. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +722 -0
  59. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +1384 -0
  60. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +18 -8
  61. data/src/core/ext/filters/client_channel/local_subchannel_pool.h +2 -1
  62. data/src/core/ext/filters/client_channel/resolver.cc +6 -10
  63. data/src/core/ext/filters/client_channel/resolver.h +10 -20
  64. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +111 -110
  65. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +4 -34
  66. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc +22 -24
  67. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +13 -11
  68. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +79 -122
  69. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +642 -180
  70. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +10 -3
  71. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc +1 -1
  72. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +1 -1
  73. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +61 -61
  74. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +102 -108
  75. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +1 -5
  76. data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +362 -0
  77. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +38 -31
  78. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +625 -46
  79. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.h +28 -0
  80. data/src/core/ext/filters/client_channel/resolver_factory.h +8 -8
  81. data/src/core/ext/filters/client_channel/resolver_registry.cc +55 -52
  82. data/src/core/ext/filters/client_channel/resolver_registry.h +10 -10
  83. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +47 -93
  84. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +30 -26
  85. data/src/core/ext/filters/client_channel/retry_throttle.cc +5 -3
  86. data/src/core/ext/filters/client_channel/retry_throttle.h +4 -2
  87. data/src/core/ext/filters/client_channel/server_address.cc +129 -13
  88. data/src/core/ext/filters/client_channel/server_address.h +80 -32
  89. data/src/core/ext/filters/client_channel/service_config.cc +114 -149
  90. data/src/core/ext/filters/client_channel/service_config.h +33 -100
  91. data/src/core/ext/filters/client_channel/service_config_call_data.h +86 -0
  92. data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +142 -0
  93. data/src/core/ext/filters/client_channel/service_config_parser.cc +89 -0
  94. data/src/core/ext/filters/client_channel/service_config_parser.h +92 -0
  95. data/src/core/ext/filters/client_channel/subchannel.cc +156 -98
  96. data/src/core/ext/filters/client_channel/subchannel.h +65 -35
  97. data/src/core/ext/filters/client_channel/subchannel_interface.h +41 -5
  98. data/src/core/ext/filters/client_channel/subchannel_pool_interface.h +6 -2
  99. data/src/core/ext/filters/deadline/deadline_filter.cc +87 -79
  100. data/src/core/ext/filters/deadline/deadline_filter.h +7 -11
  101. data/src/core/ext/filters/http/client/http_client_filter.cc +29 -34
  102. data/src/core/ext/filters/http/client_authority_filter.cc +10 -10
  103. data/src/core/ext/filters/http/http_filters_plugin.cc +34 -15
  104. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +258 -221
  105. data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +399 -0
  106. data/src/core/ext/filters/http/message_compress/message_decompress_filter.h +31 -0
  107. data/src/core/ext/filters/http/server/http_server_filter.cc +3 -3
  108. data/src/core/ext/filters/max_age/max_age_filter.cc +38 -34
  109. data/src/core/ext/filters/message_size/message_size_filter.cc +64 -90
  110. data/src/core/ext/filters/message_size/message_size_filter.h +12 -5
  111. data/src/core/ext/filters/workarounds/workaround_utils.cc +1 -1
  112. data/src/core/ext/transport/chttp2/client/authority.cc +3 -3
  113. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +87 -31
  114. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +19 -2
  115. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +20 -8
  116. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +21 -10
  117. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +34 -47
  118. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +505 -344
  119. data/src/core/ext/transport/chttp2/server/chttp2_server.h +16 -2
  120. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +13 -3
  121. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +19 -18
  122. data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +65 -21
  123. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +7 -7
  124. data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +4 -6
  125. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +307 -343
  126. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +1 -0
  127. data/src/core/ext/transport/chttp2/transport/flow_control.cc +36 -33
  128. data/src/core/ext/transport/chttp2/transport/flow_control.h +27 -19
  129. data/src/core/ext/transport/chttp2/transport/frame_data.cc +9 -12
  130. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +6 -7
  131. data/src/core/ext/transport/chttp2/transport/frame_goaway.h +2 -3
  132. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +5 -6
  133. data/src/core/ext/transport/chttp2/transport/frame_ping.h +2 -3
  134. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +12 -13
  135. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +2 -3
  136. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +8 -9
  137. data/src/core/ext/transport/chttp2/transport/frame_settings.h +4 -4
  138. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +15 -18
  139. data/src/core/ext/transport/chttp2/transport/frame_window_update.h +2 -3
  140. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +29 -16
  141. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +2 -3
  142. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +37 -37
  143. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +2 -3
  144. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +13 -17
  145. data/src/core/ext/transport/chttp2/transport/hpack_table.h +2 -2
  146. data/src/core/ext/transport/chttp2/transport/http2_settings.h +4 -5
  147. data/src/core/ext/transport/chttp2/transport/huffsyms.h +2 -3
  148. data/src/core/ext/transport/chttp2/transport/internal.h +37 -23
  149. data/src/core/ext/transport/chttp2/transport/parsing.cc +52 -74
  150. data/src/core/ext/transport/chttp2/transport/stream_map.h +2 -3
  151. data/src/core/ext/transport/chttp2/transport/writing.cc +30 -28
  152. data/src/core/ext/transport/inproc/inproc_transport.cc +106 -33
  153. data/src/core/ext/upb-generated/envoy/annotations/deprecation.upb.h +1 -1
  154. data/src/core/ext/upb-generated/envoy/annotations/resource.upb.c +1 -1
  155. data/src/core/ext/upb-generated/envoy/annotations/resource.upb.h +10 -4
  156. data/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c +243 -0
  157. data/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.h +865 -0
  158. data/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c +74 -0
  159. data/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.h +253 -0
  160. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +453 -0
  161. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +1801 -0
  162. data/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c +35 -0
  163. data/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.h +77 -0
  164. data/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c +56 -0
  165. data/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.h +364 -0
  166. data/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c +124 -0
  167. data/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.h +428 -0
  168. data/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c +35 -0
  169. data/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.h +88 -0
  170. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +334 -0
  171. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +1066 -0
  172. data/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c +103 -0
  173. data/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.h +388 -0
  174. data/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c +34 -0
  175. data/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.h +78 -0
  176. data/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c +53 -0
  177. data/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.h +149 -0
  178. data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c +241 -0
  179. data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.h +839 -0
  180. data/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c +170 -0
  181. data/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.h +767 -0
  182. data/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c +36 -0
  183. data/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.h +88 -0
  184. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +176 -0
  185. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +730 -0
  186. data/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c +27 -0
  187. data/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.h +65 -0
  188. data/src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c +34 -0
  189. data/src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.h +95 -0
  190. data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +42 -0
  191. data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.h +126 -0
  192. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c +90 -0
  193. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.h +243 -0
  194. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c +91 -0
  195. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.h +305 -0
  196. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c +112 -0
  197. data/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.h +367 -0
  198. data/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c +33 -0
  199. data/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.h +73 -0
  200. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +130 -0
  201. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +557 -0
  202. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +159 -0
  203. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +623 -0
  204. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +40 -0
  205. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +107 -0
  206. data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +178 -0
  207. data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.h +662 -0
  208. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +65 -0
  209. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +237 -0
  210. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +926 -0
  211. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +3746 -0
  212. data/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c +60 -0
  213. data/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.h +159 -0
  214. data/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c +49 -0
  215. data/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.h +122 -0
  216. data/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c +29 -0
  217. data/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.h +67 -0
  218. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +361 -0
  219. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +1484 -0
  220. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c +19 -0
  221. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.h +35 -0
  222. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +113 -0
  223. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +458 -0
  224. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c +73 -0
  225. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.h +219 -0
  226. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c +146 -0
  227. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.h +621 -0
  228. data/src/core/ext/upb-generated/envoy/{api/v2/rds.upb.c → service/cluster/v3/cds.upb.c} +7 -9
  229. data/src/core/ext/upb-generated/envoy/service/cluster/v3/cds.upb.h +56 -0
  230. data/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c +25 -0
  231. data/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.h +56 -0
  232. data/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c +146 -0
  233. data/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.h +499 -0
  234. data/src/core/ext/upb-generated/envoy/service/endpoint/v3/eds.upb.c +27 -0
  235. data/src/core/ext/upb-generated/envoy/service/endpoint/v3/eds.upb.h +56 -0
  236. data/src/core/ext/upb-generated/envoy/service/listener/v3/lds.upb.c +27 -0
  237. data/src/core/ext/upb-generated/envoy/service/listener/v3/lds.upb.h +56 -0
  238. data/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c +54 -0
  239. data/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.h +151 -0
  240. data/src/core/ext/upb-generated/envoy/{api/v2/srds.upb.c → service/route/v3/rds.upb.c} +7 -7
  241. data/src/core/ext/upb-generated/envoy/service/route/v3/rds.upb.h +56 -0
  242. data/src/core/ext/upb-generated/envoy/{api/v2/cds.upb.c → service/route/v3/srds.upb.c} +7 -7
  243. data/src/core/ext/upb-generated/envoy/service/route/v3/srds.upb.h +56 -0
  244. data/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c +47 -0
  245. data/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.h +128 -0
  246. data/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c +35 -0
  247. data/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.h +84 -0
  248. data/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c +34 -0
  249. data/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.h +78 -0
  250. data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +64 -0
  251. data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.h +166 -0
  252. data/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c +53 -0
  253. data/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.h +146 -0
  254. data/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c +63 -0
  255. data/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.h +207 -0
  256. data/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c +88 -0
  257. data/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.h +301 -0
  258. data/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c +90 -0
  259. data/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.h +283 -0
  260. data/src/core/ext/upb-generated/envoy/type/{http.upb.c → v3/http.upb.c} +3 -2
  261. data/src/core/ext/upb-generated/envoy/type/{http.upb.h → v3/http.upb.h} +9 -9
  262. data/src/core/ext/upb-generated/envoy/type/v3/percent.upb.c +40 -0
  263. data/src/core/ext/upb-generated/envoy/type/v3/percent.upb.h +99 -0
  264. data/src/core/ext/upb-generated/envoy/type/v3/range.upb.c +51 -0
  265. data/src/core/ext/upb-generated/envoy/type/v3/range.upb.h +130 -0
  266. data/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c +30 -0
  267. data/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.h +68 -0
  268. data/src/core/ext/upb-generated/google/api/annotations.upb.h +1 -1
  269. data/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c +242 -0
  270. data/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.h +830 -0
  271. data/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c +251 -0
  272. data/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.h +871 -0
  273. data/src/core/ext/upb-generated/google/api/http.upb.c +3 -3
  274. data/src/core/ext/upb-generated/google/api/http.upb.h +52 -32
  275. data/src/core/ext/upb-generated/google/protobuf/any.upb.c +1 -1
  276. data/src/core/ext/upb-generated/google/protobuf/any.upb.h +12 -6
  277. data/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c +107 -106
  278. data/src/core/ext/upb-generated/google/protobuf/descriptor.upb.h +691 -496
  279. data/src/core/ext/upb-generated/google/protobuf/duration.upb.c +1 -1
  280. data/src/core/ext/upb-generated/google/protobuf/duration.upb.h +12 -6
  281. data/src/core/ext/upb-generated/google/protobuf/empty.upb.c +1 -1
  282. data/src/core/ext/upb-generated/google/protobuf/empty.upb.h +8 -2
  283. data/src/core/ext/upb-generated/google/protobuf/struct.upb.c +5 -5
  284. data/src/core/ext/upb-generated/google/protobuf/struct.upb.h +55 -57
  285. data/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c +1 -1
  286. data/src/core/ext/upb-generated/google/protobuf/timestamp.upb.h +12 -6
  287. data/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c +9 -9
  288. data/src/core/ext/upb-generated/google/protobuf/wrappers.upb.h +82 -28
  289. data/src/core/ext/upb-generated/google/rpc/status.upb.c +1 -1
  290. data/src/core/ext/upb-generated/google/rpc/status.upb.h +17 -10
  291. data/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c +5 -5
  292. data/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.h +40 -45
  293. data/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c +43 -43
  294. data/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.h +236 -184
  295. data/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c +5 -5
  296. data/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.h +29 -13
  297. data/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c +2 -2
  298. data/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.h +19 -7
  299. data/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c +17 -17
  300. data/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.h +122 -62
  301. data/src/core/ext/upb-generated/udpa/annotations/migrate.upb.c +3 -3
  302. data/src/core/ext/upb-generated/udpa/annotations/migrate.upb.h +30 -12
  303. data/src/core/ext/upb-generated/udpa/annotations/security.upb.c +31 -0
  304. data/src/core/ext/upb-generated/udpa/annotations/security.upb.h +64 -0
  305. data/src/core/ext/upb-generated/udpa/annotations/sensitive.upb.h +1 -1
  306. data/src/core/ext/upb-generated/udpa/annotations/status.upb.c +28 -0
  307. data/src/core/ext/upb-generated/udpa/annotations/status.upb.h +71 -0
  308. data/src/core/ext/upb-generated/udpa/annotations/versioning.upb.c +27 -0
  309. data/src/core/ext/upb-generated/udpa/annotations/versioning.upb.h +60 -0
  310. data/src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c +9 -9
  311. data/src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.h +48 -68
  312. data/src/core/ext/upb-generated/validate/validate.upb.c +71 -70
  313. data/src/core/ext/upb-generated/validate/validate.upb.h +732 -586
  314. data/src/core/ext/upb-generated/xds/core/v3/authority.upb.c +28 -0
  315. data/src/core/ext/upb-generated/xds/core/v3/authority.upb.h +60 -0
  316. data/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c +52 -0
  317. data/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.h +143 -0
  318. data/src/core/ext/upb-generated/xds/core/v3/context_params.upb.c +42 -0
  319. data/src/core/ext/upb-generated/xds/core/v3/context_params.upb.h +84 -0
  320. data/src/core/ext/upb-generated/xds/core/v3/resource.upb.c +36 -0
  321. data/src/core/ext/upb-generated/xds/core/v3/resource.upb.h +94 -0
  322. data/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c +54 -0
  323. data/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.h +166 -0
  324. data/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c +36 -0
  325. data/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.h +85 -0
  326. data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c +38 -0
  327. data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.h +30 -0
  328. data/src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c +41 -0
  329. data/src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.h +35 -0
  330. data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +251 -0
  331. data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.h +105 -0
  332. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c +100 -0
  333. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.h +45 -0
  334. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +543 -0
  335. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.h +145 -0
  336. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c +53 -0
  337. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.h +35 -0
  338. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c +136 -0
  339. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.h +35 -0
  340. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c +127 -0
  341. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.h +65 -0
  342. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c +56 -0
  343. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.h +35 -0
  344. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +272 -0
  345. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.h +135 -0
  346. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c +143 -0
  347. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.h +55 -0
  348. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c +56 -0
  349. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.h +35 -0
  350. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c +66 -0
  351. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.h +40 -0
  352. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c +263 -0
  353. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.h +100 -0
  354. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c +233 -0
  355. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.h +70 -0
  356. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c +56 -0
  357. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.h +35 -0
  358. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +228 -0
  359. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +80 -0
  360. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c +43 -0
  361. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.h +35 -0
  362. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c +59 -0
  363. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.h +35 -0
  364. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +68 -0
  365. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.h +35 -0
  366. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c +107 -0
  367. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.h +50 -0
  368. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c +113 -0
  369. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.h +50 -0
  370. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c +146 -0
  371. data/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.h +55 -0
  372. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c +50 -0
  373. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.h +35 -0
  374. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +195 -0
  375. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +55 -0
  376. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c +193 -0
  377. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.h +65 -0
  378. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +59 -0
  379. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.h +40 -0
  380. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +101 -0
  381. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +40 -0
  382. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +938 -0
  383. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +285 -0
  384. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c +71 -0
  385. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.h +45 -0
  386. data/src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c +61 -0
  387. data/src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.h +40 -0
  388. data/src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c +51 -0
  389. data/src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.h +35 -0
  390. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +504 -0
  391. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +115 -0
  392. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c +44 -0
  393. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.h +30 -0
  394. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +170 -0
  395. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.h +55 -0
  396. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c +97 -0
  397. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.h +45 -0
  398. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +246 -0
  399. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.h +60 -0
  400. data/src/core/ext/upbdefs-generated/envoy/service/cluster/v3/cds.upbdefs.c +72 -0
  401. data/src/core/ext/upbdefs-generated/envoy/service/cluster/v3/cds.upbdefs.h +35 -0
  402. data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c +60 -0
  403. data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.h +35 -0
  404. data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c +142 -0
  405. data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.h +65 -0
  406. data/src/core/ext/upbdefs-generated/envoy/service/endpoint/v3/eds.upbdefs.c +73 -0
  407. data/src/core/ext/upbdefs-generated/envoy/service/endpoint/v3/eds.upbdefs.h +35 -0
  408. data/src/core/ext/upbdefs-generated/envoy/service/listener/v3/lds.upbdefs.c +72 -0
  409. data/src/core/ext/upbdefs-generated/envoy/service/listener/v3/lds.upbdefs.h +35 -0
  410. data/src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c +80 -0
  411. data/src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.h +40 -0
  412. data/src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.c +80 -0
  413. data/src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.h +35 -0
  414. data/src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.c +74 -0
  415. data/src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.h +35 -0
  416. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c +64 -0
  417. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.h +40 -0
  418. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c +54 -0
  419. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.h +35 -0
  420. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c +53 -0
  421. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.h +35 -0
  422. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +73 -0
  423. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.h +45 -0
  424. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c +69 -0
  425. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.h +40 -0
  426. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c +81 -0
  427. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.h +45 -0
  428. data/src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c +92 -0
  429. data/src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.h +65 -0
  430. data/src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c +95 -0
  431. data/src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.h +55 -0
  432. data/src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c +34 -0
  433. data/src/core/ext/{upb-generated/gogoproto/gogo.upb.h → upbdefs-generated/envoy/type/v3/http.upbdefs.h} +10 -10
  434. data/src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c +59 -0
  435. data/src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.h +40 -0
  436. data/src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c +54 -0
  437. data/src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.h +45 -0
  438. data/src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c +47 -0
  439. data/src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.h +35 -0
  440. data/src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c +40 -0
  441. data/src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.h +30 -0
  442. data/src/core/ext/upbdefs-generated/google/api/http.upbdefs.c +61 -0
  443. data/src/core/ext/upbdefs-generated/google/api/http.upbdefs.h +45 -0
  444. data/src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c +39 -0
  445. data/src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.h +35 -0
  446. data/src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c +386 -0
  447. data/src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.h +165 -0
  448. data/src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c +40 -0
  449. data/src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.h +35 -0
  450. data/src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c +37 -0
  451. data/src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.h +35 -0
  452. data/src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c +65 -0
  453. data/src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.h +50 -0
  454. data/src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c +40 -0
  455. data/src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.h +35 -0
  456. data/src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c +66 -0
  457. data/src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.h +75 -0
  458. data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c +42 -0
  459. data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.h +35 -0
  460. data/src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c +70 -0
  461. data/src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.h +45 -0
  462. data/src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c +56 -0
  463. data/src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.h +35 -0
  464. data/src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c +33 -0
  465. data/src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.h +30 -0
  466. data/src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c +49 -0
  467. data/src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.h +35 -0
  468. data/src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c +43 -0
  469. data/src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.h +35 -0
  470. data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c +310 -0
  471. data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.h +145 -0
  472. data/src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c +42 -0
  473. data/src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.h +35 -0
  474. data/src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c +62 -0
  475. data/src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.h +40 -0
  476. data/src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c +45 -0
  477. data/src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.h +40 -0
  478. data/src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c +49 -0
  479. data/src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.h +35 -0
  480. data/src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c +67 -0
  481. data/src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.h +40 -0
  482. data/src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c +50 -0
  483. data/src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.h +35 -0
  484. data/src/core/ext/xds/certificate_provider_factory.h +61 -0
  485. data/src/core/ext/xds/certificate_provider_registry.cc +103 -0
  486. data/src/core/ext/xds/certificate_provider_registry.h +57 -0
  487. data/src/core/ext/xds/certificate_provider_store.cc +87 -0
  488. data/src/core/ext/xds/certificate_provider_store.h +112 -0
  489. data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +144 -0
  490. data/src/core/ext/xds/file_watcher_certificate_provider_factory.h +69 -0
  491. data/src/core/ext/xds/xds_api.cc +2479 -0
  492. data/src/core/ext/xds/xds_api.h +431 -0
  493. data/src/core/ext/xds/xds_bootstrap.cc +539 -0
  494. data/src/core/ext/xds/xds_bootstrap.h +116 -0
  495. data/src/core/ext/xds/xds_certificate_provider.cc +405 -0
  496. data/src/core/ext/xds/xds_certificate_provider.h +151 -0
  497. data/src/core/ext/{filters/client_channel/xds → xds}/xds_channel_args.h +9 -6
  498. data/src/core/ext/{filters/client_channel/xds → xds}/xds_client.cc +839 -774
  499. data/src/core/ext/xds/xds_client.h +339 -0
  500. data/src/core/ext/xds/xds_client_stats.cc +159 -0
  501. data/src/core/ext/{filters/client_channel/xds → xds}/xds_client_stats.h +78 -38
  502. data/src/core/ext/xds/xds_server_config_fetcher.cc +267 -0
  503. data/src/core/lib/channel/channel_args.cc +24 -22
  504. data/src/core/lib/channel/channel_args.h +3 -2
  505. data/src/core/lib/channel/channel_stack.h +20 -13
  506. data/src/core/lib/channel/channel_trace.cc +6 -8
  507. data/src/core/lib/channel/channel_trace.h +1 -1
  508. data/src/core/lib/channel/channelz.cc +46 -94
  509. data/src/core/lib/channel/channelz.h +17 -25
  510. data/src/core/lib/channel/channelz_registry.cc +20 -15
  511. data/src/core/lib/channel/channelz_registry.h +3 -1
  512. data/src/core/lib/channel/connected_channel.cc +7 -5
  513. data/src/core/lib/channel/context.h +1 -1
  514. data/src/core/lib/channel/handshaker.cc +15 -20
  515. data/src/core/lib/channel/handshaker.h +7 -5
  516. data/src/core/lib/channel/handshaker_registry.cc +5 -17
  517. data/src/core/lib/channel/status_util.cc +2 -3
  518. data/src/core/lib/compression/compression.cc +8 -4
  519. data/src/core/lib/compression/compression_args.cc +3 -2
  520. data/src/core/lib/compression/compression_internal.cc +10 -5
  521. data/src/core/lib/compression/compression_internal.h +2 -1
  522. data/src/core/lib/compression/message_compress.cc +5 -1
  523. data/src/core/lib/compression/stream_compression_identity.cc +1 -3
  524. data/src/core/lib/debug/stats.cc +21 -27
  525. data/src/core/lib/debug/stats.h +5 -3
  526. data/src/core/lib/debug/stats_data.cc +1 -0
  527. data/src/core/lib/debug/stats_data.h +13 -13
  528. data/src/core/lib/gpr/alloc.cc +3 -2
  529. data/src/core/lib/gpr/cpu_iphone.cc +10 -2
  530. data/src/core/lib/gpr/log.cc +59 -17
  531. data/src/core/lib/gpr/log_linux.cc +23 -9
  532. data/src/core/lib/gpr/log_posix.cc +19 -7
  533. data/src/core/lib/gpr/log_windows.cc +18 -4
  534. data/src/core/lib/gpr/murmur_hash.cc +1 -1
  535. data/src/core/lib/gpr/spinlock.h +12 -5
  536. data/src/core/lib/gpr/string.cc +33 -55
  537. data/src/core/lib/gpr/string.h +9 -24
  538. data/src/core/lib/gpr/sync.cc +4 -4
  539. data/src/core/lib/gpr/sync_abseil.cc +2 -0
  540. data/src/core/lib/gpr/sync_posix.cc +2 -8
  541. data/src/core/lib/gpr/time.cc +16 -12
  542. data/src/core/lib/gpr/time_posix.cc +1 -1
  543. data/src/core/lib/gpr/time_precise.cc +5 -2
  544. data/src/core/lib/gpr/time_precise.h +6 -2
  545. data/src/core/lib/gpr/tls.h +4 -0
  546. data/src/core/lib/gpr/tls_msvc.h +2 -0
  547. data/src/core/lib/gpr/tls_stdcpp.h +48 -0
  548. data/src/core/lib/gpr/useful.h +5 -4
  549. data/src/core/lib/gprpp/arena.h +3 -2
  550. data/src/core/lib/gprpp/atomic.h +6 -6
  551. data/src/core/lib/gprpp/dual_ref_counted.h +331 -0
  552. data/src/core/lib/gprpp/examine_stack.cc +43 -0
  553. data/src/core/lib/gprpp/examine_stack.h +46 -0
  554. data/src/core/lib/gprpp/fork.cc +3 -3
  555. data/src/core/lib/gprpp/global_config_env.cc +8 -6
  556. data/src/core/lib/gprpp/host_port.cc +29 -35
  557. data/src/core/lib/gprpp/host_port.h +14 -17
  558. data/src/core/lib/gprpp/manual_constructor.h +1 -1
  559. data/src/core/lib/gprpp/mpscq.cc +2 -2
  560. data/src/core/lib/gprpp/orphanable.h +4 -8
  561. data/src/core/lib/gprpp/ref_counted.h +91 -68
  562. data/src/core/lib/gprpp/ref_counted_ptr.h +171 -7
  563. data/src/core/lib/gprpp/stat.h +38 -0
  564. data/src/core/lib/gprpp/stat_posix.cc +49 -0
  565. data/src/core/lib/gprpp/stat_windows.cc +48 -0
  566. data/src/core/lib/gprpp/sync.h +129 -40
  567. data/src/core/lib/gprpp/thd.h +2 -2
  568. data/src/core/lib/gprpp/thd_posix.cc +42 -37
  569. data/src/core/lib/gprpp/thd_windows.cc +3 -1
  570. data/src/core/lib/gprpp/time_util.cc +77 -0
  571. data/src/core/lib/gprpp/time_util.h +42 -0
  572. data/src/core/lib/http/format_request.cc +46 -65
  573. data/src/core/lib/http/httpcli.cc +16 -14
  574. data/src/core/lib/http/httpcli.h +4 -6
  575. data/src/core/lib/http/httpcli_security_connector.cc +13 -13
  576. data/src/core/lib/http/parser.cc +47 -27
  577. data/src/core/lib/http/parser.h +2 -3
  578. data/src/core/lib/iomgr/buffer_list.h +22 -21
  579. data/src/core/lib/iomgr/call_combiner.cc +8 -5
  580. data/src/core/lib/iomgr/call_combiner.h +3 -2
  581. data/src/core/lib/iomgr/cfstream_handle.cc +4 -2
  582. data/src/core/lib/iomgr/closure.h +2 -3
  583. data/src/core/lib/iomgr/combiner.cc +2 -1
  584. data/src/core/lib/iomgr/dualstack_socket_posix.cc +47 -0
  585. data/src/core/lib/iomgr/endpoint.cc +5 -1
  586. data/src/core/lib/iomgr/endpoint.h +8 -4
  587. data/src/core/lib/iomgr/endpoint_cfstream.cc +38 -14
  588. data/src/core/lib/iomgr/endpoint_pair.h +2 -3
  589. data/src/core/lib/iomgr/endpoint_pair_posix.cc +10 -10
  590. data/src/core/lib/iomgr/error.cc +23 -21
  591. data/src/core/lib/iomgr/error.h +0 -1
  592. data/src/core/lib/iomgr/error_cfstream.cc +9 -8
  593. data/src/core/lib/iomgr/error_internal.h +1 -1
  594. data/src/core/lib/iomgr/ev_apple.cc +359 -0
  595. data/src/core/lib/iomgr/ev_apple.h +43 -0
  596. data/src/core/lib/iomgr/ev_epoll1_linux.cc +43 -40
  597. data/src/core/lib/iomgr/ev_epollex_linux.cc +46 -45
  598. data/src/core/lib/iomgr/ev_poll_posix.cc +18 -15
  599. data/src/core/lib/iomgr/ev_posix.cc +2 -3
  600. data/src/core/lib/iomgr/exec_ctx.cc +1 -1
  601. data/src/core/lib/iomgr/exec_ctx.h +26 -10
  602. data/src/core/lib/iomgr/executor.cc +2 -1
  603. data/src/core/lib/iomgr/executor.h +1 -1
  604. data/src/core/lib/iomgr/executor/mpmcqueue.h +5 -5
  605. data/src/core/lib/iomgr/executor/threadpool.h +4 -4
  606. data/src/core/lib/iomgr/iomgr.cc +1 -1
  607. data/src/core/lib/iomgr/iomgr_posix.cc +0 -1
  608. data/src/core/lib/iomgr/iomgr_posix_cfstream.cc +84 -21
  609. data/src/core/lib/iomgr/is_epollexclusive_available.cc +14 -0
  610. data/src/core/lib/iomgr/load_file.h +1 -1
  611. data/src/core/lib/iomgr/lockfree_event.cc +19 -14
  612. data/src/core/lib/iomgr/lockfree_event.h +2 -2
  613. data/src/core/lib/iomgr/parse_address.cc +322 -0
  614. data/src/core/lib/iomgr/parse_address.h +77 -0
  615. data/src/core/lib/iomgr/poller/eventmanager_libuv.cc +2 -1
  616. data/src/core/lib/iomgr/poller/eventmanager_libuv.h +1 -1
  617. data/src/core/lib/iomgr/pollset_set_custom.cc +11 -11
  618. data/src/core/lib/{gprpp/optional.h → iomgr/pollset_uv.h} +11 -12
  619. data/src/core/lib/iomgr/port.h +2 -21
  620. data/src/core/lib/iomgr/python_util.h +46 -0
  621. data/src/core/lib/iomgr/resolve_address.cc +4 -4
  622. data/src/core/lib/iomgr/resolve_address.h +4 -6
  623. data/src/core/lib/iomgr/resolve_address_custom.cc +42 -57
  624. data/src/core/lib/iomgr/resolve_address_custom.h +4 -2
  625. data/src/core/lib/iomgr/resolve_address_posix.cc +11 -16
  626. data/src/core/lib/iomgr/resolve_address_windows.cc +16 -25
  627. data/src/core/lib/iomgr/resource_quota.cc +38 -37
  628. data/src/core/lib/iomgr/sockaddr_utils.cc +41 -44
  629. data/src/core/lib/iomgr/sockaddr_utils.h +13 -17
  630. data/src/core/lib/iomgr/socket_factory_posix.cc +3 -2
  631. data/src/core/lib/iomgr/socket_factory_posix.h +2 -3
  632. data/src/core/lib/iomgr/socket_mutator.cc +3 -2
  633. data/src/core/lib/iomgr/socket_mutator.h +2 -3
  634. data/src/core/lib/iomgr/socket_utils_common_posix.cc +103 -81
  635. data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
  636. data/src/core/lib/iomgr/socket_windows.cc +4 -5
  637. data/src/core/lib/iomgr/tcp_client.cc +3 -3
  638. data/src/core/lib/iomgr/tcp_client_cfstream.cc +14 -18
  639. data/src/core/lib/iomgr/tcp_client_custom.cc +13 -15
  640. data/src/core/lib/iomgr/tcp_client_posix.cc +31 -37
  641. data/src/core/lib/iomgr/tcp_client_windows.cc +10 -11
  642. data/src/core/lib/iomgr/tcp_custom.cc +56 -36
  643. data/src/core/lib/iomgr/tcp_custom.h +1 -1
  644. data/src/core/lib/iomgr/tcp_posix.cc +47 -25
  645. data/src/core/lib/iomgr/tcp_server.cc +3 -4
  646. data/src/core/lib/iomgr/tcp_server.h +7 -5
  647. data/src/core/lib/iomgr/tcp_server_custom.cc +39 -45
  648. data/src/core/lib/iomgr/tcp_server_posix.cc +38 -44
  649. data/src/core/lib/iomgr/tcp_server_utils_posix.h +3 -4
  650. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +7 -8
  651. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +10 -18
  652. data/src/core/lib/iomgr/tcp_server_windows.cc +16 -16
  653. data/src/core/lib/iomgr/tcp_uv.cc +3 -2
  654. data/src/core/lib/iomgr/tcp_windows.cc +26 -10
  655. data/src/core/lib/iomgr/time_averaged_stats.h +2 -3
  656. data/src/core/lib/iomgr/timer_custom.cc +5 -5
  657. data/src/core/lib/iomgr/timer_generic.cc +18 -18
  658. data/src/core/lib/{gprpp/inlined_vector.h → iomgr/timer_generic.h} +19 -17
  659. data/src/core/lib/iomgr/timer_heap.h +2 -3
  660. data/src/core/lib/iomgr/timer_manager.cc +2 -2
  661. data/src/core/lib/iomgr/udp_server.cc +33 -38
  662. data/src/core/lib/iomgr/udp_server.h +6 -4
  663. data/src/core/lib/iomgr/unix_sockets_posix.cc +36 -30
  664. data/src/core/lib/iomgr/unix_sockets_posix.h +8 -1
  665. data/src/core/lib/iomgr/unix_sockets_posix_noop.cc +12 -2
  666. data/src/core/lib/iomgr/wakeup_fd_pipe.cc +2 -2
  667. data/src/core/lib/json/json.h +15 -4
  668. data/src/core/lib/json/json_reader.cc +33 -30
  669. data/src/core/lib/json/json_util.cc +58 -0
  670. data/src/core/lib/json/json_util.h +204 -0
  671. data/src/core/lib/json/json_writer.cc +15 -13
  672. data/src/core/lib/security/authorization/authorization_engine.cc +177 -0
  673. data/src/core/lib/security/authorization/authorization_engine.h +84 -0
  674. data/src/core/lib/security/authorization/evaluate_args.cc +148 -0
  675. data/src/core/lib/security/authorization/evaluate_args.h +59 -0
  676. data/src/core/lib/security/authorization/matchers.cc +339 -0
  677. data/src/core/lib/security/authorization/matchers.h +158 -0
  678. data/src/core/lib/security/authorization/mock_cel/activation.h +57 -0
  679. data/src/core/lib/security/authorization/mock_cel/cel_expr_builder_factory.h +44 -0
  680. data/src/core/lib/security/authorization/mock_cel/cel_expression.h +69 -0
  681. data/src/core/lib/security/authorization/mock_cel/cel_value.h +99 -0
  682. data/src/core/lib/security/authorization/mock_cel/evaluator_core.h +67 -0
  683. data/src/core/lib/security/authorization/mock_cel/flat_expr_builder.h +57 -0
  684. data/src/core/lib/security/context/security_context.cc +4 -3
  685. data/src/core/lib/security/context/security_context.h +3 -1
  686. data/src/core/lib/security/credentials/alts/alts_credentials.cc +2 -1
  687. data/src/core/lib/security/credentials/alts/alts_credentials.h +1 -1
  688. data/src/core/lib/security/credentials/alts/check_gcp_environment.cc +1 -1
  689. data/src/core/lib/security/credentials/composite/composite_credentials.cc +12 -0
  690. data/src/core/lib/security/credentials/composite/composite_credentials.h +6 -3
  691. data/src/core/lib/security/credentials/credentials.cc +7 -91
  692. data/src/core/lib/security/credentials/credentials.h +18 -66
  693. data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +413 -0
  694. data/src/core/lib/security/credentials/external/aws_external_account_credentials.h +80 -0
  695. data/src/core/lib/security/credentials/external/aws_request_signer.cc +213 -0
  696. data/src/core/lib/security/credentials/external/aws_request_signer.h +72 -0
  697. data/src/core/lib/security/credentials/external/external_account_credentials.cc +497 -0
  698. data/src/core/lib/security/credentials/external/external_account_credentials.h +120 -0
  699. data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +135 -0
  700. data/src/core/lib/security/credentials/external/file_external_account_credentials.h +48 -0
  701. data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +213 -0
  702. data/src/core/lib/security/credentials/external/url_external_account_credentials.h +58 -0
  703. data/src/core/lib/security/credentials/fake/fake_credentials.cc +3 -2
  704. data/src/core/lib/security/credentials/fake/fake_credentials.h +4 -0
  705. data/src/core/lib/security/credentials/google_default/credentials_generic.cc +8 -6
  706. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +90 -67
  707. data/src/core/lib/security/credentials/iam/iam_credentials.cc +8 -6
  708. data/src/core/lib/security/credentials/iam/iam_credentials.h +4 -0
  709. data/src/core/lib/security/credentials/insecure/insecure_credentials.cc +64 -0
  710. data/src/core/lib/security/credentials/jwt/json_token.cc +4 -4
  711. data/src/core/lib/security/credentials/jwt/json_token.h +2 -5
  712. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +7 -4
  713. data/src/core/lib/security/credentials/jwt/jwt_credentials.h +13 -0
  714. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +13 -19
  715. data/src/core/lib/security/credentials/jwt/jwt_verifier.h +2 -3
  716. data/src/core/lib/security/credentials/local/local_credentials.cc +2 -1
  717. data/src/core/lib/security/credentials/local/local_credentials.h +1 -1
  718. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +109 -97
  719. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +14 -7
  720. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +20 -7
  721. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
  722. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +27 -6
  723. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +12 -2
  724. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc +346 -0
  725. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h +213 -0
  726. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +399 -0
  727. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +138 -0
  728. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +78 -140
  729. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h +74 -167
  730. data/src/core/lib/security/credentials/tls/tls_credentials.cc +18 -13
  731. data/src/core/lib/security/credentials/tls/tls_credentials.h +3 -3
  732. data/src/core/lib/security/credentials/tls/tls_utils.cc +91 -0
  733. data/src/core/lib/security/credentials/tls/tls_utils.h +38 -0
  734. data/src/core/lib/security/credentials/xds/xds_credentials.cc +244 -0
  735. data/src/core/lib/security/credentials/xds/xds_credentials.h +69 -0
  736. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +22 -7
  737. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +27 -32
  738. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc +121 -0
  739. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.h +87 -0
  740. data/src/core/lib/security/security_connector/load_system_roots.h +4 -0
  741. data/src/core/lib/security/security_connector/load_system_roots_linux.cc +3 -2
  742. data/src/core/lib/security/security_connector/load_system_roots_linux.h +2 -0
  743. data/src/core/lib/security/security_connector/local/local_security_connector.cc +4 -4
  744. data/src/core/lib/security/security_connector/security_connector.cc +6 -3
  745. data/src/core/lib/security/security_connector/security_connector.h +6 -4
  746. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +42 -40
  747. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.h +8 -5
  748. data/src/core/lib/security/security_connector/ssl_utils.cc +94 -23
  749. data/src/core/lib/security/security_connector/ssl_utils.h +37 -31
  750. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +388 -284
  751. data/src/core/lib/security/security_connector/tls/tls_security_connector.h +108 -42
  752. data/src/core/lib/security/transport/auth_filters.h +0 -5
  753. data/src/core/lib/security/transport/client_auth_filter.cc +11 -11
  754. data/src/core/lib/security/transport/secure_endpoint.cc +9 -3
  755. data/src/core/lib/security/transport/security_handshaker.cc +4 -6
  756. data/src/core/lib/security/transport/server_auth_filter.cc +2 -1
  757. data/src/core/lib/security/util/json_util.cc +12 -13
  758. data/src/core/lib/security/util/json_util.h +1 -0
  759. data/src/core/lib/slice/slice.cc +45 -5
  760. data/src/core/lib/slice/slice_buffer.cc +2 -1
  761. data/src/core/lib/slice/slice_intern.cc +9 -11
  762. data/src/core/lib/slice/slice_internal.h +17 -2
  763. data/src/core/lib/slice/slice_utils.h +9 -0
  764. data/src/core/lib/surface/byte_buffer_reader.cc +2 -47
  765. data/src/core/lib/surface/call.cc +95 -88
  766. data/src/core/lib/surface/call.h +2 -1
  767. data/src/core/lib/surface/call_details.cc +8 -8
  768. data/src/core/lib/surface/call_log_batch.cc +50 -58
  769. data/src/core/lib/surface/channel.cc +86 -72
  770. data/src/core/lib/surface/channel.h +54 -7
  771. data/src/core/lib/surface/channel_init.cc +1 -1
  772. data/src/core/lib/surface/channel_ping.cc +2 -3
  773. data/src/core/lib/surface/completion_queue.cc +63 -62
  774. data/src/core/lib/surface/completion_queue.h +16 -16
  775. data/src/core/lib/surface/event_string.cc +18 -25
  776. data/src/core/lib/surface/event_string.h +3 -1
  777. data/src/core/lib/surface/init.cc +45 -29
  778. data/src/core/lib/surface/init_secure.cc +1 -4
  779. data/src/core/lib/surface/lame_client.cc +20 -46
  780. data/src/core/lib/surface/lame_client.h +4 -0
  781. data/src/core/lib/surface/server.cc +1311 -1309
  782. data/src/core/lib/surface/server.h +410 -45
  783. data/src/core/lib/surface/validate_metadata.h +3 -0
  784. data/src/core/lib/surface/version.cc +2 -2
  785. data/src/core/lib/transport/authority_override.cc +40 -0
  786. data/src/core/lib/transport/authority_override.h +37 -0
  787. data/src/core/lib/transport/bdp_estimator.cc +1 -1
  788. data/src/core/lib/transport/bdp_estimator.h +2 -1
  789. data/src/core/lib/transport/byte_stream.h +10 -5
  790. data/src/core/lib/transport/connectivity_state.cc +23 -17
  791. data/src/core/lib/transport/connectivity_state.h +31 -15
  792. data/src/core/lib/transport/error_utils.cc +13 -0
  793. data/src/core/lib/transport/error_utils.h +7 -1
  794. data/src/core/lib/transport/metadata.cc +19 -5
  795. data/src/core/lib/transport/metadata.h +2 -2
  796. data/src/core/lib/transport/metadata_batch.h +6 -7
  797. data/src/core/lib/transport/static_metadata.cc +296 -277
  798. data/src/core/lib/transport/static_metadata.h +81 -74
  799. data/src/core/lib/transport/status_conversion.cc +6 -14
  800. data/src/core/lib/transport/status_metadata.cc +4 -3
  801. data/src/core/lib/transport/timeout_encoding.cc +4 -4
  802. data/src/core/lib/transport/transport.cc +7 -6
  803. data/src/core/lib/transport/transport.h +24 -10
  804. data/src/core/lib/transport/transport_op_string.cc +61 -102
  805. data/src/core/lib/uri/uri_parser.cc +135 -258
  806. data/src/core/lib/uri/uri_parser.h +60 -23
  807. data/src/core/plugin_registry/grpc_plugin_registry.cc +59 -12
  808. data/src/core/tsi/alts/crypt/aes_gcm.cc +0 -2
  809. data/src/core/tsi/alts/crypt/gsec.cc +5 -4
  810. data/src/core/tsi/alts/frame_protector/frame_handler.cc +8 -6
  811. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +48 -34
  812. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +8 -4
  813. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +98 -48
  814. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +9 -1
  815. data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +2 -0
  816. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h +2 -3
  817. data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc +8 -6
  818. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +4 -4
  819. data/src/core/tsi/fake_transport_security.cc +17 -19
  820. data/src/core/tsi/local_transport_security.cc +5 -1
  821. data/src/core/tsi/local_transport_security.h +6 -7
  822. data/src/core/tsi/ssl/session_cache/ssl_session.h +0 -2
  823. data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +1 -1
  824. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +0 -2
  825. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +3 -4
  826. data/src/core/tsi/ssl_transport_security.cc +226 -105
  827. data/src/core/tsi/ssl_transport_security.h +28 -16
  828. data/src/core/tsi/ssl_types.h +0 -2
  829. data/src/core/tsi/transport_security.cc +10 -8
  830. data/src/core/tsi/transport_security.h +6 -9
  831. data/src/core/tsi/transport_security_grpc.h +2 -3
  832. data/src/core/tsi/transport_security_interface.h +9 -4
  833. data/src/ruby/bin/math_services_pb.rb +4 -4
  834. data/src/ruby/ext/grpc/extconf.rb +6 -3
  835. data/src/ruby/ext/grpc/rb_call.c +12 -3
  836. data/src/ruby/ext/grpc/rb_call.h +4 -0
  837. data/src/ruby/ext/grpc/rb_call_credentials.c +57 -12
  838. data/src/ruby/ext/grpc/rb_channel_credentials.c +9 -0
  839. data/src/ruby/ext/grpc/rb_event_thread.c +2 -0
  840. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +42 -18
  841. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +80 -44
  842. data/src/ruby/lib/grpc/errors.rb +103 -42
  843. data/src/ruby/lib/grpc/generic/active_call.rb +2 -3
  844. data/src/ruby/lib/grpc/generic/client_stub.rb +1 -1
  845. data/src/ruby/lib/grpc/generic/interceptors.rb +5 -5
  846. data/src/ruby/lib/grpc/generic/rpc_server.rb +9 -10
  847. data/src/ruby/lib/grpc/generic/service.rb +5 -4
  848. data/src/ruby/lib/grpc/structs.rb +1 -1
  849. data/src/ruby/lib/grpc/version.rb +1 -1
  850. data/src/ruby/pb/generate_proto_ruby.sh +5 -3
  851. data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +2 -2
  852. data/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb +51 -0
  853. data/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb +61 -11
  854. data/src/ruby/spec/channel_credentials_spec.rb +10 -0
  855. data/src/ruby/spec/debug_message_spec.rb +134 -0
  856. data/src/ruby/spec/generic/active_call_spec.rb +19 -8
  857. data/src/ruby/spec/generic/service_spec.rb +2 -0
  858. data/src/ruby/spec/pb/codegen/grpc/testing/package_options_import2.proto +23 -0
  859. data/src/ruby/spec/pb/codegen/grpc/testing/package_options_ruby_style.proto +7 -0
  860. data/src/ruby/spec/pb/codegen/grpc/testing/same_package_service_name.proto +27 -0
  861. data/src/ruby/spec/pb/codegen/grpc/testing/same_ruby_package_service_name.proto +29 -0
  862. data/src/ruby/spec/pb/codegen/package_option_spec.rb +29 -7
  863. data/src/ruby/spec/support/services.rb +10 -4
  864. data/src/ruby/spec/testdata/ca.pem +18 -13
  865. data/src/ruby/spec/testdata/client.key +26 -14
  866. data/src/ruby/spec/testdata/client.pem +18 -12
  867. data/src/ruby/spec/testdata/server1.key +26 -14
  868. data/src/ruby/spec/testdata/server1.pem +20 -14
  869. data/src/ruby/spec/user_agent_spec.rb +74 -0
  870. data/third_party/abseil-cpp/absl/algorithm/container.h +1764 -0
  871. data/third_party/abseil-cpp/absl/base/attributes.h +99 -38
  872. data/third_party/abseil-cpp/absl/base/call_once.h +1 -1
  873. data/third_party/abseil-cpp/absl/base/casts.h +9 -6
  874. data/third_party/abseil-cpp/absl/base/config.h +60 -17
  875. data/third_party/abseil-cpp/absl/base/dynamic_annotations.h +428 -335
  876. data/third_party/abseil-cpp/absl/base/internal/bits.h +17 -16
  877. data/third_party/abseil-cpp/absl/base/internal/direct_mmap.h +166 -0
  878. data/third_party/abseil-cpp/absl/base/internal/dynamic_annotations.h +398 -0
  879. data/third_party/abseil-cpp/absl/base/internal/exponential_biased.cc +93 -0
  880. data/third_party/abseil-cpp/absl/base/internal/exponential_biased.h +130 -0
  881. data/third_party/abseil-cpp/absl/base/internal/invoke.h +4 -4
  882. data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc +620 -0
  883. data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.h +126 -0
  884. data/third_party/abseil-cpp/absl/base/internal/low_level_scheduling.h +29 -1
  885. data/third_party/abseil-cpp/absl/base/internal/raw_logging.cc +2 -2
  886. data/third_party/abseil-cpp/absl/base/internal/raw_logging.h +7 -5
  887. data/third_party/abseil-cpp/absl/base/internal/spinlock.cc +25 -38
  888. data/third_party/abseil-cpp/absl/base/internal/spinlock.h +19 -25
  889. data/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc +8 -0
  890. data/third_party/abseil-cpp/absl/base/internal/sysinfo.cc +28 -5
  891. data/third_party/abseil-cpp/absl/base/internal/sysinfo.h +8 -0
  892. data/third_party/abseil-cpp/absl/base/internal/tsan_mutex_interface.h +3 -1
  893. data/third_party/abseil-cpp/absl/base/internal/unaligned_access.h +2 -2
  894. data/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h +3 -3
  895. data/third_party/abseil-cpp/absl/base/macros.h +36 -109
  896. data/third_party/abseil-cpp/absl/base/optimization.h +61 -1
  897. data/third_party/abseil-cpp/absl/base/options.h +31 -4
  898. data/third_party/abseil-cpp/absl/base/policy_checks.h +1 -1
  899. data/third_party/abseil-cpp/absl/base/thread_annotations.h +94 -39
  900. data/third_party/abseil-cpp/absl/container/fixed_array.h +532 -0
  901. data/third_party/abseil-cpp/absl/container/flat_hash_map.h +606 -0
  902. data/third_party/abseil-cpp/absl/container/flat_hash_set.h +504 -0
  903. data/third_party/abseil-cpp/absl/container/inlined_vector.h +33 -36
  904. data/third_party/abseil-cpp/absl/container/internal/common.h +206 -0
  905. data/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h +33 -8
  906. data/third_party/abseil-cpp/absl/container/internal/container_memory.h +460 -0
  907. data/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h +161 -0
  908. data/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h +208 -0
  909. data/third_party/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h +85 -0
  910. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +270 -0
  911. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h +321 -0
  912. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +30 -0
  913. data/third_party/abseil-cpp/absl/container/internal/have_sse.h +50 -0
  914. data/third_party/abseil-cpp/absl/container/internal/layout.h +743 -0
  915. data/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h +197 -0
  916. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +48 -0
  917. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +1903 -0
  918. data/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc +139 -0
  919. data/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.h +32 -0
  920. data/third_party/abseil-cpp/absl/debugging/internal/demangle.cc +1945 -0
  921. data/third_party/abseil-cpp/absl/debugging/internal/demangle.h +71 -0
  922. data/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc +382 -0
  923. data/third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.h +134 -0
  924. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +196 -0
  925. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc +134 -0
  926. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h +89 -0
  927. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc +108 -0
  928. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +248 -0
  929. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc +24 -0
  930. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc +93 -0
  931. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +346 -0
  932. data/third_party/abseil-cpp/absl/debugging/internal/symbolize.h +149 -0
  933. data/third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc +173 -0
  934. data/third_party/abseil-cpp/absl/debugging/internal/vdso_support.h +158 -0
  935. data/third_party/abseil-cpp/absl/debugging/stacktrace.cc +140 -0
  936. data/third_party/abseil-cpp/absl/debugging/stacktrace.h +231 -0
  937. data/third_party/abseil-cpp/absl/debugging/symbolize.cc +36 -0
  938. data/third_party/abseil-cpp/absl/debugging/symbolize.h +99 -0
  939. data/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc +101 -0
  940. data/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +1560 -0
  941. data/third_party/abseil-cpp/absl/debugging/symbolize_unimplemented.inc +40 -0
  942. data/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc +81 -0
  943. data/third_party/abseil-cpp/absl/functional/bind_front.h +184 -0
  944. data/third_party/abseil-cpp/absl/functional/function_ref.h +139 -0
  945. data/third_party/abseil-cpp/absl/functional/internal/front_binder.h +95 -0
  946. data/third_party/abseil-cpp/absl/functional/internal/function_ref.h +106 -0
  947. data/third_party/abseil-cpp/absl/hash/hash.h +325 -0
  948. data/third_party/abseil-cpp/absl/hash/internal/city.cc +346 -0
  949. data/third_party/abseil-cpp/absl/hash/internal/city.h +96 -0
  950. data/third_party/abseil-cpp/absl/hash/internal/hash.cc +55 -0
  951. data/third_party/abseil-cpp/absl/hash/internal/hash.h +996 -0
  952. data/third_party/abseil-cpp/absl/memory/memory.h +4 -0
  953. data/third_party/abseil-cpp/absl/meta/type_traits.h +2 -8
  954. data/third_party/abseil-cpp/absl/numeric/int128.cc +13 -27
  955. data/third_party/abseil-cpp/absl/numeric/int128.h +16 -15
  956. data/third_party/abseil-cpp/absl/status/internal/status_internal.h +51 -0
  957. data/third_party/abseil-cpp/absl/status/internal/statusor_internal.h +399 -0
  958. data/third_party/abseil-cpp/absl/status/status.cc +445 -0
  959. data/third_party/abseil-cpp/absl/status/status.h +817 -0
  960. data/third_party/abseil-cpp/absl/status/status_payload_printer.cc +38 -0
  961. data/third_party/abseil-cpp/absl/status/status_payload_printer.h +51 -0
  962. data/third_party/abseil-cpp/absl/status/statusor.cc +71 -0
  963. data/third_party/abseil-cpp/absl/status/statusor.h +760 -0
  964. data/third_party/abseil-cpp/absl/strings/charconv.cc +2 -2
  965. data/third_party/abseil-cpp/absl/strings/cord.cc +1998 -0
  966. data/third_party/abseil-cpp/absl/strings/cord.h +1276 -0
  967. data/third_party/abseil-cpp/absl/strings/escaping.cc +9 -9
  968. data/third_party/abseil-cpp/absl/strings/internal/char_map.h +1 -1
  969. data/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc +1 -1
  970. data/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.h +2 -2
  971. data/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc +2 -2
  972. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +173 -0
  973. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +222 -136
  974. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +136 -64
  975. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc +1 -1
  976. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h +14 -21
  977. data/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h +7 -14
  978. data/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc +31 -7
  979. data/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h +147 -135
  980. data/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc +999 -87
  981. data/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.h +3 -3
  982. data/third_party/abseil-cpp/absl/strings/internal/str_format/output.h +4 -12
  983. data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc +8 -6
  984. data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h +13 -11
  985. data/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h +2 -2
  986. data/third_party/abseil-cpp/absl/strings/str_cat.cc +4 -4
  987. data/third_party/abseil-cpp/absl/strings/str_cat.h +1 -1
  988. data/third_party/abseil-cpp/absl/strings/str_format.h +289 -13
  989. data/third_party/abseil-cpp/absl/strings/str_split.cc +2 -2
  990. data/third_party/abseil-cpp/absl/strings/str_split.h +1 -0
  991. data/third_party/abseil-cpp/absl/strings/string_view.h +26 -19
  992. data/third_party/abseil-cpp/absl/strings/substitute.cc +5 -5
  993. data/third_party/abseil-cpp/absl/strings/substitute.h +32 -29
  994. data/third_party/abseil-cpp/absl/synchronization/barrier.cc +52 -0
  995. data/third_party/abseil-cpp/absl/synchronization/barrier.h +79 -0
  996. data/third_party/abseil-cpp/absl/synchronization/blocking_counter.cc +57 -0
  997. data/third_party/abseil-cpp/absl/synchronization/blocking_counter.h +99 -0
  998. data/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc +140 -0
  999. data/third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.h +60 -0
  1000. data/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc +698 -0
  1001. data/third_party/abseil-cpp/absl/synchronization/internal/graphcycles.h +141 -0
  1002. data/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h +155 -0
  1003. data/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc +249 -0
  1004. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc +106 -0
  1005. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h +115 -0
  1006. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc +492 -0
  1007. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.h +159 -0
  1008. data/third_party/abseil-cpp/absl/synchronization/mutex.cc +2739 -0
  1009. data/third_party/abseil-cpp/absl/synchronization/mutex.h +1065 -0
  1010. data/third_party/abseil-cpp/absl/synchronization/notification.cc +78 -0
  1011. data/third_party/abseil-cpp/absl/synchronization/notification.h +123 -0
  1012. data/third_party/abseil-cpp/absl/time/civil_time.cc +175 -0
  1013. data/third_party/abseil-cpp/absl/time/civil_time.h +538 -0
  1014. data/third_party/abseil-cpp/absl/time/clock.cc +569 -0
  1015. data/third_party/abseil-cpp/absl/time/clock.h +74 -0
  1016. data/third_party/abseil-cpp/absl/time/duration.cc +953 -0
  1017. data/third_party/abseil-cpp/absl/time/format.cc +160 -0
  1018. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time.h +332 -0
  1019. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +632 -0
  1020. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h +386 -0
  1021. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/zone_info_source.h +102 -0
  1022. data/third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc +94 -0
  1023. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc +140 -0
  1024. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.h +52 -0
  1025. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc +1029 -0
  1026. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc +45 -0
  1027. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.h +76 -0
  1028. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc +113 -0
  1029. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.h +93 -0
  1030. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc +965 -0
  1031. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.h +137 -0
  1032. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc +309 -0
  1033. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.h +55 -0
  1034. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc +187 -0
  1035. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc +159 -0
  1036. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.h +132 -0
  1037. data/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +122 -0
  1038. data/third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc +116 -0
  1039. data/third_party/abseil-cpp/absl/time/internal/get_current_time_chrono.inc +31 -0
  1040. data/third_party/abseil-cpp/absl/time/internal/get_current_time_posix.inc +24 -0
  1041. data/third_party/abseil-cpp/absl/time/time.cc +499 -0
  1042. data/third_party/abseil-cpp/absl/time/time.h +1583 -0
  1043. data/third_party/abseil-cpp/absl/types/bad_variant_access.cc +64 -0
  1044. data/third_party/abseil-cpp/absl/types/bad_variant_access.h +82 -0
  1045. data/third_party/abseil-cpp/absl/types/internal/variant.h +1646 -0
  1046. data/third_party/abseil-cpp/absl/types/optional.h +9 -9
  1047. data/third_party/abseil-cpp/absl/types/span.h +49 -36
  1048. data/third_party/abseil-cpp/absl/types/variant.h +861 -0
  1049. data/third_party/abseil-cpp/absl/utility/utility.h +2 -2
  1050. data/third_party/address_sorting/include/address_sorting/address_sorting.h +2 -0
  1051. data/third_party/boringssl-with-bazel/err_data.c +759 -707
  1052. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c +6 -6
  1053. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c +5 -5
  1054. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c +6 -6
  1055. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c +6 -13
  1056. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c +3 -3
  1057. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c +2 -2
  1058. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c +5 -3
  1059. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c +2 -2
  1060. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c +15 -20
  1061. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn1_locl.h +30 -0
  1062. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c +1 -1
  1063. data/third_party/boringssl-with-bazel/src/crypto/asn1/f_enum.c +1 -1
  1064. data/third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c +1 -1
  1065. data/third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c +1 -1
  1066. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c +28 -79
  1067. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c +39 -85
  1068. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c +5 -16
  1069. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c +10 -61
  1070. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c +0 -2
  1071. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c +2 -2
  1072. data/third_party/boringssl-with-bazel/src/crypto/bio/bio.c +2 -0
  1073. data/third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c +4 -0
  1074. data/third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c +158 -0
  1075. data/third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c +3 -10
  1076. data/third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c +8 -9
  1077. data/third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c +60 -45
  1078. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c +1 -1
  1079. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c +6 -81
  1080. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/internal.h +87 -0
  1081. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c +1 -0
  1082. data/third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c +41 -0
  1083. data/third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c +7 -5
  1084. data/third_party/boringssl-with-bazel/src/crypto/cpu-intel.c +13 -4
  1085. data/third_party/boringssl-with-bazel/src/crypto/crypto.c +11 -0
  1086. data/third_party/boringssl-with-bazel/src/{third_party/fiat → crypto/curve25519}/curve25519.c +18 -26
  1087. data/third_party/boringssl-with-bazel/src/{third_party/fiat → crypto/curve25519}/curve25519_tables.h +13 -21
  1088. data/third_party/boringssl-with-bazel/src/{third_party/fiat → crypto/curve25519}/internal.h +14 -22
  1089. data/third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c +1 -1
  1090. data/third_party/boringssl-with-bazel/src/crypto/{dh → dh_extra}/dh_asn1.c +0 -0
  1091. data/third_party/boringssl-with-bazel/src/crypto/{dh → dh_extra}/params.c +179 -0
  1092. data/third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c +25 -0
  1093. data/third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c +19 -43
  1094. data/third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c +55 -4
  1095. data/third_party/boringssl-with-bazel/src/crypto/dsa/internal.h +34 -0
  1096. data/third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c +2 -17
  1097. data/third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c +385 -0
  1098. data/third_party/boringssl-with-bazel/src/crypto/ec_extra/internal.h +56 -0
  1099. data/third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c +2 -2
  1100. data/third_party/boringssl-with-bazel/src/crypto/err/err.c +33 -32
  1101. data/third_party/boringssl-with-bazel/src/crypto/evp/evp.c +4 -0
  1102. data/third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c +159 -0
  1103. data/third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c +6 -2
  1104. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/aes/aes_nohw.c +1 -1
  1105. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/aes/mode_wrappers.c +17 -1
  1106. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c +5 -2
  1107. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/bn.c +13 -20
  1108. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/div.c +2 -3
  1109. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/exponentiation.c +3 -3
  1110. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/internal.h +34 -13
  1111. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/montgomery.c +8 -8
  1112. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/mul.c +30 -154
  1113. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/e_aes.c +173 -35
  1114. data/third_party/boringssl-with-bazel/src/crypto/{dh → fipsmodule/dh}/check.c +0 -0
  1115. data/third_party/boringssl-with-bazel/src/crypto/{dh → fipsmodule/dh}/dh.c +149 -211
  1116. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/digest.c +2 -0
  1117. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/digests.c +16 -0
  1118. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec.c +301 -117
  1119. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec_key.c +22 -28
  1120. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec_montgomery.c +96 -55
  1121. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/felem.c +25 -7
  1122. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/internal.h +434 -161
  1123. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/oct.c +63 -71
  1124. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p224-64.c +18 -25
  1125. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p256-x86_64-table.h +9481 -9485
  1126. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p256-x86_64.c +104 -122
  1127. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p256.c +740 -0
  1128. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p256_table.h +297 -0
  1129. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/scalar.c +90 -11
  1130. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/simple.c +125 -148
  1131. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/simple_mul.c +189 -3
  1132. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/util.c +3 -3
  1133. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/wnaf.c +61 -18
  1134. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ecdh/ecdh.c +2 -2
  1135. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ecdsa/ecdsa.c +20 -5
  1136. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/fork_detect.c +137 -0
  1137. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/fork_detect.h +49 -0
  1138. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/getrandom_fillin.h +64 -0
  1139. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/internal.h +69 -5
  1140. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/rand.c +155 -50
  1141. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/urandom.c +24 -121
  1142. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/blinding.c +4 -0
  1143. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/internal.h +5 -0
  1144. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa.c +73 -40
  1145. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa_impl.c +122 -55
  1146. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/self_check/self_check.c +217 -2
  1147. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha512.c +44 -35
  1148. data/third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c +532 -0
  1149. data/third_party/boringssl-with-bazel/src/crypto/hpke/internal.h +246 -0
  1150. data/third_party/boringssl-with-bazel/src/crypto/mem.c +47 -16
  1151. data/third_party/boringssl-with-bazel/src/crypto/obj/obj_dat.h +15 -1
  1152. data/third_party/boringssl-with-bazel/src/crypto/pkcs8/internal.h +7 -0
  1153. data/third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c +36 -5
  1154. data/third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c +10 -7
  1155. data/third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c +13 -11
  1156. data/third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c +4 -0
  1157. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c +6 -10
  1158. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c +34 -0
  1159. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c +4 -0
  1160. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c +16 -0
  1161. data/third_party/boringssl-with-bazel/src/crypto/stack/stack.c +7 -13
  1162. data/third_party/boringssl-with-bazel/src/crypto/thread_pthread.c +4 -0
  1163. data/third_party/boringssl-with-bazel/src/crypto/thread_win.c +4 -0
  1164. data/third_party/boringssl-with-bazel/src/crypto/trust_token/internal.h +318 -0
  1165. data/third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c +1399 -0
  1166. data/third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c +858 -0
  1167. data/third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c +766 -0
  1168. data/third_party/boringssl-with-bazel/src/crypto/x509/a_strex.c +7 -7
  1169. data/third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c +10 -0
  1170. data/third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c +4 -4
  1171. data/third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c +5 -1
  1172. data/third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c +3 -3
  1173. data/third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c +1 -1
  1174. data/third_party/boringssl-with-bazel/src/crypto/x509/x509.c +0 -67
  1175. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c +21 -9
  1176. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c +27 -21
  1177. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c +1 -1
  1178. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_r2x.c +1 -1
  1179. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c +10 -0
  1180. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c +89 -11
  1181. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c +7 -4
  1182. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c +67 -67
  1183. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c +4 -4
  1184. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c +62 -44
  1185. data/third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c +67 -25
  1186. data/third_party/boringssl-with-bazel/src/crypto/x509/x509name.c +13 -11
  1187. data/third_party/boringssl-with-bazel/src/crypto/x509/x_all.c +10 -10
  1188. data/third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c +38 -17
  1189. data/third_party/boringssl-with-bazel/src/crypto/x509/x_name.c +28 -40
  1190. data/third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c +2 -2
  1191. data/third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c +0 -154
  1192. data/third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c +20 -0
  1193. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c +66 -9
  1194. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c +2 -2
  1195. data/third_party/boringssl-with-bazel/src/crypto/x509v3/ext_dat.h +1 -4
  1196. data/third_party/boringssl-with-bazel/src/crypto/x509v3/internal.h +5 -0
  1197. data/third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c +5 -3
  1198. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c +3 -3
  1199. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c +25 -24
  1200. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c +3 -3
  1201. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c +32 -28
  1202. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c +2 -2
  1203. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c +2 -1
  1204. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c +42 -22
  1205. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c +3 -4
  1206. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c +1 -1
  1207. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c +25 -36
  1208. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c +2 -2
  1209. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c +126 -40
  1210. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c +7 -7
  1211. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c +6 -6
  1212. data/third_party/boringssl-with-bazel/src/include/openssl/aead.h +24 -0
  1213. data/third_party/boringssl-with-bazel/src/include/openssl/aes.h +16 -4
  1214. data/third_party/boringssl-with-bazel/src/include/openssl/arm_arch.h +54 -0
  1215. data/third_party/boringssl-with-bazel/src/include/openssl/asn1.h +662 -556
  1216. data/third_party/boringssl-with-bazel/src/include/openssl/asn1t.h +0 -167
  1217. data/third_party/boringssl-with-bazel/src/include/openssl/base.h +19 -7
  1218. data/third_party/boringssl-with-bazel/src/include/openssl/bio.h +4 -0
  1219. data/third_party/boringssl-with-bazel/src/include/openssl/blake2.h +62 -0
  1220. data/third_party/boringssl-with-bazel/src/include/openssl/bytestring.h +22 -7
  1221. data/third_party/boringssl-with-bazel/src/include/openssl/cipher.h +21 -0
  1222. data/third_party/boringssl-with-bazel/src/include/openssl/crypto.h +10 -5
  1223. data/third_party/boringssl-with-bazel/src/include/openssl/des.h +6 -0
  1224. data/third_party/boringssl-with-bazel/src/include/openssl/dh.h +82 -20
  1225. data/third_party/boringssl-with-bazel/src/include/openssl/digest.h +11 -0
  1226. data/third_party/boringssl-with-bazel/src/include/openssl/dsa.h +16 -0
  1227. data/third_party/boringssl-with-bazel/src/include/openssl/ec.h +15 -0
  1228. data/third_party/boringssl-with-bazel/src/include/openssl/ecdsa.h +6 -0
  1229. data/third_party/boringssl-with-bazel/src/include/openssl/err.h +2 -0
  1230. data/third_party/boringssl-with-bazel/src/include/openssl/evp.h +85 -3
  1231. data/third_party/boringssl-with-bazel/src/include/openssl/nid.h +5 -0
  1232. data/third_party/boringssl-with-bazel/src/include/openssl/opensslconf.h +3 -0
  1233. data/third_party/boringssl-with-bazel/src/include/openssl/pem.h +202 -134
  1234. data/third_party/boringssl-with-bazel/src/include/openssl/rand.h +6 -17
  1235. data/third_party/boringssl-with-bazel/src/include/openssl/rsa.h +31 -0
  1236. data/third_party/boringssl-with-bazel/src/include/openssl/sha.h +26 -0
  1237. data/third_party/boringssl-with-bazel/src/include/openssl/span.h +2 -1
  1238. data/third_party/boringssl-with-bazel/src/include/openssl/ssl.h +335 -112
  1239. data/third_party/boringssl-with-bazel/src/include/openssl/tls1.h +33 -10
  1240. data/third_party/boringssl-with-bazel/src/include/openssl/trust_token.h +310 -0
  1241. data/third_party/boringssl-with-bazel/src/include/openssl/x509.h +1141 -755
  1242. data/third_party/boringssl-with-bazel/src/include/openssl/x509_vfy.h +1 -0
  1243. data/third_party/boringssl-with-bazel/src/include/openssl/x509v3.h +593 -440
  1244. data/third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc +18 -5
  1245. data/third_party/boringssl-with-bazel/src/ssl/d1_both.cc +0 -4
  1246. data/third_party/boringssl-with-bazel/src/ssl/d1_lib.cc +3 -3
  1247. data/third_party/boringssl-with-bazel/src/ssl/dtls_method.cc +13 -4
  1248. data/third_party/boringssl-with-bazel/src/ssl/handoff.cc +181 -57
  1249. data/third_party/boringssl-with-bazel/src/ssl/handshake.cc +45 -26
  1250. data/third_party/boringssl-with-bazel/src/ssl/handshake_client.cc +43 -45
  1251. data/third_party/boringssl-with-bazel/src/ssl/handshake_server.cc +32 -10
  1252. data/third_party/boringssl-with-bazel/src/ssl/internal.h +160 -80
  1253. data/third_party/boringssl-with-bazel/src/ssl/s3_both.cc +10 -10
  1254. data/third_party/boringssl-with-bazel/src/ssl/s3_lib.cc +2 -3
  1255. data/third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc +21 -21
  1256. data/third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc +77 -8
  1257. data/third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc +7 -6
  1258. data/third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc +8 -9
  1259. data/third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc +131 -15
  1260. data/third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc +13 -2
  1261. data/third_party/boringssl-with-bazel/src/ssl/ssl_session.cc +50 -15
  1262. data/third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc +6 -0
  1263. data/third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc +2 -2
  1264. data/third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc +5 -5
  1265. data/third_party/boringssl-with-bazel/src/ssl/t1_enc.cc +53 -30
  1266. data/third_party/boringssl-with-bazel/src/ssl/t1_lib.cc +611 -89
  1267. data/third_party/boringssl-with-bazel/src/ssl/tls13_both.cc +2 -3
  1268. data/third_party/boringssl-with-bazel/src/ssl/tls13_client.cc +187 -68
  1269. data/third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc +71 -90
  1270. data/third_party/boringssl-with-bazel/src/ssl/tls13_server.cc +247 -73
  1271. data/third_party/boringssl-with-bazel/src/ssl/tls_method.cc +63 -25
  1272. data/third_party/boringssl-with-bazel/src/ssl/tls_record.cc +5 -3
  1273. data/third_party/boringssl-with-bazel/src/third_party/fiat/curve25519_32.h +245 -175
  1274. data/third_party/boringssl-with-bazel/src/third_party/fiat/curve25519_64.h +135 -75
  1275. data/third_party/boringssl-with-bazel/src/third_party/fiat/p256_32.h +1593 -1672
  1276. data/third_party/boringssl-with-bazel/src/third_party/fiat/p256_64.h +512 -503
  1277. data/third_party/re2/re2/bitmap256.h +117 -0
  1278. data/third_party/re2/re2/bitstate.cc +385 -0
  1279. data/third_party/re2/re2/compile.cc +1279 -0
  1280. data/third_party/re2/re2/dfa.cc +2130 -0
  1281. data/third_party/re2/re2/filtered_re2.cc +121 -0
  1282. data/third_party/re2/re2/filtered_re2.h +109 -0
  1283. data/third_party/re2/re2/mimics_pcre.cc +197 -0
  1284. data/third_party/re2/re2/nfa.cc +713 -0
  1285. data/third_party/re2/re2/onepass.cc +623 -0
  1286. data/third_party/re2/re2/parse.cc +2464 -0
  1287. data/third_party/re2/re2/perl_groups.cc +119 -0
  1288. data/third_party/re2/re2/pod_array.h +55 -0
  1289. data/third_party/re2/re2/prefilter.cc +710 -0
  1290. data/third_party/re2/re2/prefilter.h +108 -0
  1291. data/third_party/re2/re2/prefilter_tree.cc +407 -0
  1292. data/third_party/re2/re2/prefilter_tree.h +139 -0
  1293. data/third_party/re2/re2/prog.cc +988 -0
  1294. data/third_party/re2/re2/prog.h +436 -0
  1295. data/third_party/re2/re2/re2.cc +1362 -0
  1296. data/third_party/re2/re2/re2.h +1002 -0
  1297. data/third_party/re2/re2/regexp.cc +980 -0
  1298. data/third_party/re2/re2/regexp.h +659 -0
  1299. data/third_party/re2/re2/set.cc +154 -0
  1300. data/third_party/re2/re2/set.h +80 -0
  1301. data/third_party/re2/re2/simplify.cc +657 -0
  1302. data/third_party/re2/re2/sparse_array.h +392 -0
  1303. data/third_party/re2/re2/sparse_set.h +264 -0
  1304. data/third_party/re2/re2/stringpiece.cc +65 -0
  1305. data/third_party/re2/re2/stringpiece.h +210 -0
  1306. data/third_party/re2/re2/tostring.cc +351 -0
  1307. data/third_party/re2/re2/unicode_casefold.cc +582 -0
  1308. data/third_party/re2/re2/unicode_casefold.h +78 -0
  1309. data/third_party/re2/re2/unicode_groups.cc +6269 -0
  1310. data/third_party/re2/re2/unicode_groups.h +67 -0
  1311. data/third_party/re2/re2/walker-inl.h +246 -0
  1312. data/third_party/re2/util/benchmark.h +156 -0
  1313. data/third_party/re2/util/flags.h +26 -0
  1314. data/third_party/re2/util/logging.h +109 -0
  1315. data/third_party/re2/util/malloc_counter.h +19 -0
  1316. data/third_party/re2/util/mix.h +41 -0
  1317. data/third_party/re2/util/mutex.h +148 -0
  1318. data/third_party/re2/util/pcre.cc +1025 -0
  1319. data/third_party/re2/util/pcre.h +681 -0
  1320. data/third_party/re2/util/rune.cc +260 -0
  1321. data/third_party/re2/util/strutil.cc +149 -0
  1322. data/third_party/re2/util/strutil.h +21 -0
  1323. data/third_party/re2/util/test.h +50 -0
  1324. data/third_party/re2/util/utf.h +44 -0
  1325. data/third_party/re2/util/util.h +42 -0
  1326. data/third_party/upb/third_party/wyhash/wyhash.h +145 -0
  1327. data/third_party/upb/upb/decode.c +604 -511
  1328. data/third_party/upb/upb/decode.h +20 -1
  1329. data/third_party/upb/upb/decode.int.h +163 -0
  1330. data/third_party/upb/upb/decode_fast.c +1040 -0
  1331. data/third_party/upb/upb/decode_fast.h +126 -0
  1332. data/third_party/upb/upb/def.c +2178 -0
  1333. data/third_party/upb/upb/def.h +315 -0
  1334. data/third_party/upb/upb/def.hpp +439 -0
  1335. data/third_party/upb/upb/encode.c +311 -211
  1336. data/third_party/upb/upb/encode.h +27 -2
  1337. data/third_party/upb/upb/json_decode.c +1443 -0
  1338. data/third_party/upb/upb/json_decode.h +23 -0
  1339. data/third_party/upb/upb/json_encode.c +713 -0
  1340. data/third_party/upb/upb/json_encode.h +36 -0
  1341. data/third_party/upb/upb/msg.c +215 -70
  1342. data/third_party/upb/upb/msg.h +558 -14
  1343. data/third_party/upb/upb/port_def.inc +105 -63
  1344. data/third_party/upb/upb/port_undef.inc +10 -7
  1345. data/third_party/upb/upb/reflection.c +408 -0
  1346. data/third_party/upb/upb/reflection.h +168 -0
  1347. data/third_party/upb/upb/table.c +73 -269
  1348. data/third_party/upb/upb/table.int.h +25 -57
  1349. data/third_party/upb/upb/text_encode.c +421 -0
  1350. data/third_party/upb/upb/text_encode.h +38 -0
  1351. data/third_party/upb/upb/upb.c +138 -135
  1352. data/third_party/upb/upb/upb.h +119 -146
  1353. data/third_party/upb/upb/upb.hpp +88 -0
  1354. data/third_party/upb/upb/upb.int.h +29 -0
  1355. metadata +646 -164
  1356. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +0 -1754
  1357. data/src/core/ext/filters/client_channel/parse_address.cc +0 -237
  1358. data/src/core/ext/filters/client_channel/parse_address.h +0 -53
  1359. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +0 -484
  1360. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +0 -65
  1361. data/src/core/ext/filters/client_channel/resolving_lb_policy.cc +0 -359
  1362. data/src/core/ext/filters/client_channel/resolving_lb_policy.h +0 -122
  1363. data/src/core/ext/filters/client_channel/xds/xds_api.cc +0 -1779
  1364. data/src/core/ext/filters/client_channel/xds/xds_api.h +0 -280
  1365. data/src/core/ext/filters/client_channel/xds/xds_bootstrap.cc +0 -347
  1366. data/src/core/ext/filters/client_channel/xds/xds_bootstrap.h +0 -87
  1367. data/src/core/ext/filters/client_channel/xds/xds_channel.h +0 -46
  1368. data/src/core/ext/filters/client_channel/xds/xds_channel_secure.cc +0 -104
  1369. data/src/core/ext/filters/client_channel/xds/xds_client.h +0 -274
  1370. data/src/core/ext/filters/client_channel/xds/xds_client_stats.cc +0 -116
  1371. data/src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c +0 -246
  1372. data/src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.h +0 -905
  1373. data/src/core/ext/upb-generated/envoy/api/v2/cds.upb.h +0 -53
  1374. data/src/core/ext/upb-generated/envoy/api/v2/cluster.upb.c +0 -390
  1375. data/src/core/ext/upb-generated/envoy/api/v2/cluster.upb.h +0 -1411
  1376. data/src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c +0 -73
  1377. data/src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.h +0 -218
  1378. data/src/core/ext/upb-generated/envoy/api/v2/cluster/filter.upb.c +0 -34
  1379. data/src/core/ext/upb-generated/envoy/api/v2/cluster/filter.upb.h +0 -69
  1380. data/src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.c +0 -54
  1381. data/src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.h +0 -305
  1382. data/src/core/ext/upb-generated/envoy/api/v2/core/address.upb.c +0 -111
  1383. data/src/core/ext/upb-generated/envoy/api/v2/core/address.upb.h +0 -328
  1384. data/src/core/ext/upb-generated/envoy/api/v2/core/base.upb.c +0 -292
  1385. data/src/core/ext/upb-generated/envoy/api/v2/core/base.upb.h +0 -847
  1386. data/src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.c +0 -95
  1387. data/src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.h +0 -322
  1388. data/src/core/ext/upb-generated/envoy/api/v2/core/grpc_service.upb.c +0 -196
  1389. data/src/core/ext/upb-generated/envoy/api/v2/core/grpc_service.upb.h +0 -642
  1390. data/src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.c +0 -168
  1391. data/src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.h +0 -658
  1392. data/src/core/ext/upb-generated/envoy/api/v2/core/http_uri.upb.c +0 -35
  1393. data/src/core/ext/upb-generated/envoy/api/v2/core/http_uri.upb.h +0 -80
  1394. data/src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.c +0 -132
  1395. data/src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.h +0 -436
  1396. data/src/core/ext/upb-generated/envoy/api/v2/discovery.upb.c +0 -128
  1397. data/src/core/ext/upb-generated/envoy/api/v2/discovery.upb.h +0 -392
  1398. data/src/core/ext/upb-generated/envoy/api/v2/eds.upb.c +0 -30
  1399. data/src/core/ext/upb-generated/envoy/api/v2/eds.upb.h +0 -53
  1400. data/src/core/ext/upb-generated/envoy/api/v2/endpoint.upb.c +0 -91
  1401. data/src/core/ext/upb-generated/envoy/api/v2/endpoint.upb.h +0 -240
  1402. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint.upb.c +0 -17
  1403. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint.upb.h +0 -33
  1404. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint_components.upb.c +0 -88
  1405. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint_components.upb.h +0 -258
  1406. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c +0 -111
  1407. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.h +0 -324
  1408. data/src/core/ext/upb-generated/envoy/api/v2/lds.upb.c +0 -30
  1409. data/src/core/ext/upb-generated/envoy/api/v2/lds.upb.h +0 -53
  1410. data/src/core/ext/upb-generated/envoy/api/v2/listener.upb.c +0 -104
  1411. data/src/core/ext/upb-generated/envoy/api/v2/listener.upb.h +0 -383
  1412. data/src/core/ext/upb-generated/envoy/api/v2/listener/listener.upb.c +0 -17
  1413. data/src/core/ext/upb-generated/envoy/api/v2/listener/listener.upb.h +0 -33
  1414. data/src/core/ext/upb-generated/envoy/api/v2/listener/listener_components.upb.c +0 -144
  1415. data/src/core/ext/upb-generated/envoy/api/v2/listener/listener_components.upb.h +0 -527
  1416. data/src/core/ext/upb-generated/envoy/api/v2/listener/udp_listener_config.upb.c +0 -42
  1417. data/src/core/ext/upb-generated/envoy/api/v2/listener/udp_listener_config.upb.h +0 -112
  1418. data/src/core/ext/upb-generated/envoy/api/v2/rds.upb.h +0 -53
  1419. data/src/core/ext/upb-generated/envoy/api/v2/route.upb.c +0 -62
  1420. data/src/core/ext/upb-generated/envoy/api/v2/route.upb.h +0 -199
  1421. data/src/core/ext/upb-generated/envoy/api/v2/route/route.upb.c +0 -17
  1422. data/src/core/ext/upb-generated/envoy/api/v2/route/route.upb.h +0 -33
  1423. data/src/core/ext/upb-generated/envoy/api/v2/route/route_components.upb.c +0 -793
  1424. data/src/core/ext/upb-generated/envoy/api/v2/route/route_components.upb.h +0 -2936
  1425. data/src/core/ext/upb-generated/envoy/api/v2/scoped_route.upb.c +0 -58
  1426. data/src/core/ext/upb-generated/envoy/api/v2/scoped_route.upb.h +0 -134
  1427. data/src/core/ext/upb-generated/envoy/api/v2/srds.upb.h +0 -53
  1428. data/src/core/ext/upb-generated/envoy/config/filter/accesslog/v2/accesslog.upb.c +0 -227
  1429. data/src/core/ext/upb-generated/envoy/config/filter/accesslog/v2/accesslog.upb.h +0 -725
  1430. data/src/core/ext/upb-generated/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.upb.c +0 -296
  1431. data/src/core/ext/upb-generated/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.upb.h +0 -1072
  1432. data/src/core/ext/upb-generated/envoy/config/listener/v2/api_listener.upb.c +0 -32
  1433. data/src/core/ext/upb-generated/envoy/config/listener/v2/api_listener.upb.h +0 -65
  1434. data/src/core/ext/upb-generated/envoy/service/discovery/v2/ads.upb.c +0 -23
  1435. data/src/core/ext/upb-generated/envoy/service/discovery/v2/ads.upb.h +0 -50
  1436. data/src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.c +0 -52
  1437. data/src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.h +0 -130
  1438. data/src/core/ext/upb-generated/envoy/type/matcher/regex.upb.c +0 -47
  1439. data/src/core/ext/upb-generated/envoy/type/matcher/regex.upb.h +0 -108
  1440. data/src/core/ext/upb-generated/envoy/type/matcher/string.upb.c +0 -52
  1441. data/src/core/ext/upb-generated/envoy/type/matcher/string.upb.h +0 -133
  1442. data/src/core/ext/upb-generated/envoy/type/metadata/v2/metadata.upb.c +0 -87
  1443. data/src/core/ext/upb-generated/envoy/type/metadata/v2/metadata.upb.h +0 -258
  1444. data/src/core/ext/upb-generated/envoy/type/percent.upb.c +0 -38
  1445. data/src/core/ext/upb-generated/envoy/type/percent.upb.h +0 -87
  1446. data/src/core/ext/upb-generated/envoy/type/range.upb.c +0 -49
  1447. data/src/core/ext/upb-generated/envoy/type/range.upb.h +0 -112
  1448. data/src/core/ext/upb-generated/envoy/type/semantic_version.upb.c +0 -28
  1449. data/src/core/ext/upb-generated/envoy/type/semantic_version.upb.h +0 -62
  1450. data/src/core/ext/upb-generated/envoy/type/tracing/v2/custom_tag.upb.c +0 -88
  1451. data/src/core/ext/upb-generated/envoy/type/tracing/v2/custom_tag.upb.h +0 -249
  1452. data/src/core/ext/upb-generated/gogoproto/gogo.upb.c +0 -17
  1453. data/src/core/lib/gprpp/map.h +0 -59
  1454. data/src/core/lib/gprpp/string_view.h +0 -60
  1455. data/src/core/lib/iomgr/iomgr_posix.h +0 -26
  1456. data/src/core/lib/security/transport/target_authority_table.cc +0 -75
  1457. data/src/core/lib/security/transport/target_authority_table.h +0 -40
  1458. data/src/core/lib/slice/slice_hash_table.h +0 -199
  1459. data/src/core/lib/slice/slice_weak_hash_table.h +0 -102
  1460. data/src/core/tsi/grpc_shadow_boringssl.h +0 -3311
  1461. data/third_party/abseil-cpp/absl/base/dynamic_annotations.cc +0 -129
  1462. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pku.c +0 -110
  1463. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_sxnet.c +0 -274
  1464. data/third_party/boringssl-with-bazel/src/third_party/fiat/p256.c +0 -1063
  1465. data/third_party/upb/upb/generated_util.h +0 -105
  1466. data/third_party/upb/upb/port.c +0 -26
@@ -0,0 +1,159 @@
1
+ // Copyright 2017 The Abseil Authors.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // https://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ //
15
+
16
+ #ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
17
+ #define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
18
+
19
+ #include "absl/base/config.h"
20
+
21
+ #ifdef _WIN32
22
+ #include <sdkddkver.h>
23
+ #else
24
+ #include <pthread.h>
25
+ #endif
26
+
27
+ #ifdef __linux__
28
+ #include <linux/futex.h>
29
+ #endif
30
+
31
+ #ifdef ABSL_HAVE_SEMAPHORE_H
32
+ #include <semaphore.h>
33
+ #endif
34
+
35
+ #include <atomic>
36
+ #include <cstdint>
37
+
38
+ #include "absl/base/internal/thread_identity.h"
39
+ #include "absl/synchronization/internal/kernel_timeout.h"
40
+
41
+ // May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
42
+ #define ABSL_WAITER_MODE_FUTEX 0
43
+ #define ABSL_WAITER_MODE_SEM 1
44
+ #define ABSL_WAITER_MODE_CONDVAR 2
45
+ #define ABSL_WAITER_MODE_WIN32 3
46
+
47
+ #if defined(ABSL_FORCE_WAITER_MODE)
48
+ #define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
49
+ #elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
50
+ #define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
51
+ #elif defined(__BIONIC__)
52
+ // Bionic supports all the futex operations we need even when some of the futex
53
+ // definitions are missing.
54
+ #define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
55
+ #elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
56
+ // FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
57
+ #define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
58
+ #elif defined(ABSL_HAVE_SEMAPHORE_H)
59
+ #define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
60
+ #else
61
+ #define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
62
+ #endif
63
+
64
+ namespace absl {
65
+ ABSL_NAMESPACE_BEGIN
66
+ namespace synchronization_internal {
67
+
68
+ // Waiter is an OS-specific semaphore.
69
+ class Waiter {
70
+ public:
71
+ // Prepare any data to track waits.
72
+ Waiter();
73
+
74
+ // Not copyable or movable
75
+ Waiter(const Waiter&) = delete;
76
+ Waiter& operator=(const Waiter&) = delete;
77
+
78
+ // Destroy any data to track waits.
79
+ ~Waiter();
80
+
81
+ // Blocks the calling thread until a matching call to `Post()` or
82
+ // `t` has passed. Returns `true` if woken (`Post()` called),
83
+ // `false` on timeout.
84
+ bool Wait(KernelTimeout t);
85
+
86
+ // Restart the caller of `Wait()` as with a normal semaphore.
87
+ void Post();
88
+
89
+ // If anyone is waiting, wake them up temporarily and cause them to
90
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
91
+ // `Post()` or timeout.
92
+ void Poke();
93
+
94
+ // Returns the Waiter associated with the identity.
95
+ static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
96
+ static_assert(
97
+ sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
98
+ "Insufficient space for Waiter");
99
+ return reinterpret_cast<Waiter*>(identity->waiter_state.data);
100
+ }
101
+
102
+ // How many periods to remain idle before releasing resources
103
+ #ifndef ABSL_HAVE_THREAD_SANITIZER
104
+ static constexpr int kIdlePeriods = 60;
105
+ #else
106
+ // Memory consumption under ThreadSanitizer is a serious concern,
107
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
108
+ // delay before marking a thread as idle.
109
+ static const int kIdlePeriods = 1;
110
+ #endif
111
+
112
+ private:
113
+ #if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
114
+ // Futexes are defined by specification to be 32-bits.
115
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
116
+ std::atomic<int32_t> futex_;
117
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
118
+
119
+ #elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
120
+ // REQUIRES: mu_ must be held.
121
+ void InternalCondVarPoke();
122
+
123
+ pthread_mutex_t mu_;
124
+ pthread_cond_t cv_;
125
+ int waiter_count_;
126
+ int wakeup_count_; // Unclaimed wakeups.
127
+
128
+ #elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
129
+ sem_t sem_;
130
+ // This seems superfluous, but for Poke() we need to cause spurious
131
+ // wakeups on the semaphore. Hence we can't actually use the
132
+ // semaphore's count.
133
+ std::atomic<int> wakeups_;
134
+
135
+ #elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
136
+ // WinHelper - Used to define utilities for accessing the lock and
137
+ // condition variable storage once the types are complete.
138
+ class WinHelper;
139
+
140
+ // REQUIRES: WinHelper::GetLock(this) must be held.
141
+ void InternalCondVarPoke();
142
+
143
+ // We can't include Windows.h in our headers, so we use aligned charachter
144
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
145
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
146
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
147
+ int waiter_count_;
148
+ int wakeup_count_;
149
+
150
+ #else
151
+ #error Unknown ABSL_WAITER_MODE
152
+ #endif
153
+ };
154
+
155
+ } // namespace synchronization_internal
156
+ ABSL_NAMESPACE_END
157
+ } // namespace absl
158
+
159
+ #endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
@@ -0,0 +1,2739 @@
1
+ // Copyright 2017 The Abseil Authors.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // https://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ #include "absl/synchronization/mutex.h"
16
+
17
+ #ifdef _WIN32
18
+ #include <windows.h>
19
+ #ifdef ERROR
20
+ #undef ERROR
21
+ #endif
22
+ #else
23
+ #include <fcntl.h>
24
+ #include <pthread.h>
25
+ #include <sched.h>
26
+ #include <sys/time.h>
27
+ #endif
28
+
29
+ #include <assert.h>
30
+ #include <errno.h>
31
+ #include <stdio.h>
32
+ #include <stdlib.h>
33
+ #include <string.h>
34
+ #include <time.h>
35
+
36
+ #include <algorithm>
37
+ #include <atomic>
38
+ #include <cinttypes>
39
+ #include <thread> // NOLINT(build/c++11)
40
+
41
+ #include "absl/base/attributes.h"
42
+ #include "absl/base/call_once.h"
43
+ #include "absl/base/config.h"
44
+ #include "absl/base/dynamic_annotations.h"
45
+ #include "absl/base/internal/atomic_hook.h"
46
+ #include "absl/base/internal/cycleclock.h"
47
+ #include "absl/base/internal/hide_ptr.h"
48
+ #include "absl/base/internal/low_level_alloc.h"
49
+ #include "absl/base/internal/raw_logging.h"
50
+ #include "absl/base/internal/spinlock.h"
51
+ #include "absl/base/internal/sysinfo.h"
52
+ #include "absl/base/internal/thread_identity.h"
53
+ #include "absl/base/internal/tsan_mutex_interface.h"
54
+ #include "absl/base/port.h"
55
+ #include "absl/debugging/stacktrace.h"
56
+ #include "absl/debugging/symbolize.h"
57
+ #include "absl/synchronization/internal/graphcycles.h"
58
+ #include "absl/synchronization/internal/per_thread_sem.h"
59
+ #include "absl/time/time.h"
60
+
61
+ using absl::base_internal::CurrentThreadIdentityIfPresent;
62
+ using absl::base_internal::PerThreadSynch;
63
+ using absl::base_internal::SchedulingGuard;
64
+ using absl::base_internal::ThreadIdentity;
65
+ using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
66
+ using absl::synchronization_internal::GraphCycles;
67
+ using absl::synchronization_internal::GraphId;
68
+ using absl::synchronization_internal::InvalidGraphId;
69
+ using absl::synchronization_internal::KernelTimeout;
70
+ using absl::synchronization_internal::PerThreadSem;
71
+
72
+ extern "C" {
73
+ ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
74
+ } // extern "C"
75
+
76
+ namespace absl {
77
+ ABSL_NAMESPACE_BEGIN
78
+
79
+ namespace {
80
+
81
+ #if defined(ABSL_HAVE_THREAD_SANITIZER)
82
+ constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
83
+ #else
84
+ constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
85
+ #endif
86
+
87
+ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
88
+ kDeadlockDetectionDefault);
89
+ ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
90
+
91
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
92
+ absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
93
+ submit_profile_data;
94
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
95
+ const char *msg, const void *obj, int64_t wait_cycles)>
96
+ mutex_tracer;
97
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
98
+ absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
99
+ cond_var_tracer;
100
+ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
101
+ bool (*)(const void *pc, char *out, int out_size)>
102
+ symbolizer(absl::Symbolize);
103
+
104
+ } // namespace
105
+
106
+ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
107
+ bool locking, bool trylock,
108
+ bool read_lock);
109
+
110
+ void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
111
+ submit_profile_data.Store(fn);
112
+ }
113
+
114
+ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
115
+ int64_t wait_cycles)) {
116
+ mutex_tracer.Store(fn);
117
+ }
118
+
119
+ void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
120
+ cond_var_tracer.Store(fn);
121
+ }
122
+
123
+ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
124
+ symbolizer.Store(fn);
125
+ }
126
+
127
+ struct ABSL_CACHELINE_ALIGNED MutexGlobals {
128
+ absl::once_flag once;
129
+ int num_cpus = 0;
130
+ int spinloop_iterations = 0;
131
+ };
132
+
133
+ static const MutexGlobals& GetMutexGlobals() {
134
+ ABSL_CONST_INIT static MutexGlobals data;
135
+ absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
136
+ data.num_cpus = absl::base_internal::NumCPUs();
137
+ data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
138
+ });
139
+ return data;
140
+ }
141
+
142
+ // Spinlock delay on iteration c. Returns new c.
143
+ namespace {
144
+ enum DelayMode { AGGRESSIVE, GENTLE };
145
+ };
146
+
147
+ namespace synchronization_internal {
148
+ int MutexDelay(int32_t c, int mode) {
149
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
150
+ // aggressive then spin many times before yielding. If the mode is
151
+ // gentle then spin only a few times before yielding. Aggressive spinning is
152
+ // used to ensure that an Unlock() call, which must get the spin lock for
153
+ // any thread to make progress gets it without undue delay.
154
+ const int32_t limit =
155
+ GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
156
+ if (c < limit) {
157
+ // Spin.
158
+ c++;
159
+ } else {
160
+ SchedulingGuard::ScopedEnable enable_rescheduling;
161
+ ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
162
+ if (c == limit) {
163
+ // Yield once.
164
+ AbslInternalMutexYield();
165
+ c++;
166
+ } else {
167
+ // Then wait.
168
+ absl::SleepFor(absl::Microseconds(10));
169
+ c = 0;
170
+ }
171
+ ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
172
+ }
173
+ return c;
174
+ }
175
+ } // namespace synchronization_internal
176
+
177
+ // --------------------------Generic atomic ops
178
+ // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
179
+ // "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
180
+ // before making any change.
181
+ // This is used to set flags in mutex and condition variable words.
182
+ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
183
+ intptr_t wait_until_clear) {
184
+ intptr_t v;
185
+ do {
186
+ v = pv->load(std::memory_order_relaxed);
187
+ } while ((v & bits) != bits &&
188
+ ((v & wait_until_clear) != 0 ||
189
+ !pv->compare_exchange_weak(v, v | bits,
190
+ std::memory_order_release,
191
+ std::memory_order_relaxed)));
192
+ }
193
+
194
+ // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
195
+ // "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
196
+ // before making any change.
197
+ // This is used to unset flags in mutex and condition variable words.
198
+ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
199
+ intptr_t wait_until_clear) {
200
+ intptr_t v;
201
+ do {
202
+ v = pv->load(std::memory_order_relaxed);
203
+ } while ((v & bits) != 0 &&
204
+ ((v & wait_until_clear) != 0 ||
205
+ !pv->compare_exchange_weak(v, v & ~bits,
206
+ std::memory_order_release,
207
+ std::memory_order_relaxed)));
208
+ }
209
+
210
+ //------------------------------------------------------------------
211
+
212
+ // Data for doing deadlock detection.
213
+ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
214
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
215
+
216
+ // Graph used to detect deadlocks.
217
+ ABSL_CONST_INIT static GraphCycles *deadlock_graph
218
+ ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
219
+
220
+ //------------------------------------------------------------------
221
+ // An event mechanism for debugging mutex use.
222
+ // It also allows mutexes to be given names for those who can't handle
223
+ // addresses, and instead like to give their data structures names like
224
+ // "Henry", "Fido", or "Rupert IV, King of Yondavia".
225
+
226
+ namespace { // to prevent name pollution
227
+ enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
228
+ // Mutex events
229
+ SYNCH_EV_TRYLOCK_SUCCESS,
230
+ SYNCH_EV_TRYLOCK_FAILED,
231
+ SYNCH_EV_READERTRYLOCK_SUCCESS,
232
+ SYNCH_EV_READERTRYLOCK_FAILED,
233
+ SYNCH_EV_LOCK,
234
+ SYNCH_EV_LOCK_RETURNING,
235
+ SYNCH_EV_READERLOCK,
236
+ SYNCH_EV_READERLOCK_RETURNING,
237
+ SYNCH_EV_UNLOCK,
238
+ SYNCH_EV_READERUNLOCK,
239
+
240
+ // CondVar events
241
+ SYNCH_EV_WAIT,
242
+ SYNCH_EV_WAIT_RETURNING,
243
+ SYNCH_EV_SIGNAL,
244
+ SYNCH_EV_SIGNALALL,
245
+ };
246
+
247
+ enum { // Event flags
248
+ SYNCH_F_R = 0x01, // reader event
249
+ SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
250
+ SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
251
+ SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
252
+
253
+ SYNCH_F_LCK_W = SYNCH_F_LCK,
254
+ SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
255
+ };
256
+ } // anonymous namespace
257
+
258
+ // Properties of the events.
259
+ static const struct {
260
+ int flags;
261
+ const char *msg;
262
+ } event_properties[] = {
263
+ {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
264
+ {0, "TryLock failed "},
265
+ {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
266
+ {0, "ReaderTryLock failed "},
267
+ {0, "Lock blocking "},
268
+ {SYNCH_F_LCK_W, "Lock returning "},
269
+ {0, "ReaderLock blocking "},
270
+ {SYNCH_F_LCK_R, "ReaderLock returning "},
271
+ {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
272
+ {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
273
+ {0, "Wait on "},
274
+ {0, "Wait unblocked "},
275
+ {0, "Signal on "},
276
+ {0, "SignalAll on "},
277
+ };
278
+
279
+ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
280
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
281
+
282
+ // Hash table size; should be prime > 2.
283
+ // Can't be too small, as it's used for deadlock detection information.
284
+ static constexpr uint32_t kNSynchEvent = 1031;
285
+
286
+ static struct SynchEvent { // this is a trivial hash table for the events
287
+ // struct is freed when refcount reaches 0
288
+ int refcount ABSL_GUARDED_BY(synch_event_mu);
289
+
290
+ // buckets have linear, 0-terminated chains
291
+ SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
292
+
293
+ // Constant after initialization
294
+ uintptr_t masked_addr; // object at this address is called "name"
295
+
296
+ // No explicit synchronization used. Instead we assume that the
297
+ // client who enables/disables invariants/logging on a Mutex does so
298
+ // while the Mutex is not being concurrently accessed by others.
299
+ void (*invariant)(void *arg); // called on each event
300
+ void *arg; // first arg to (*invariant)()
301
+ bool log; // logging turned on
302
+
303
+ // Constant after initialization
304
+ char name[1]; // actually longer---NUL-terminated string
305
+ } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
306
+
307
+ // Ensure that the object at "addr" has a SynchEvent struct associated with it,
308
+ // set "bits" in the word there (waiting until lockbit is clear before doing
309
+ // so), and return a refcounted reference that will remain valid until
310
+ // UnrefSynchEvent() is called. If a new SynchEvent is allocated,
311
+ // the string name is copied into it.
312
+ // When used with a mutex, the caller should also ensure that kMuEvent
313
+ // is set in the mutex word, and similarly for condition variables and kCVEvent.
314
+ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
315
+ const char *name, intptr_t bits,
316
+ intptr_t lockbit) {
317
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
318
+ SynchEvent *e;
319
+ // first look for existing SynchEvent struct..
320
+ synch_event_mu.Lock();
321
+ for (e = synch_event[h];
322
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
323
+ e = e->next) {
324
+ }
325
+ if (e == nullptr) { // no SynchEvent struct found; make one.
326
+ if (name == nullptr) {
327
+ name = "";
328
+ }
329
+ size_t l = strlen(name);
330
+ e = reinterpret_cast<SynchEvent *>(
331
+ base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
332
+ e->refcount = 2; // one for return value, one for linked list
333
+ e->masked_addr = base_internal::HidePtr(addr);
334
+ e->invariant = nullptr;
335
+ e->arg = nullptr;
336
+ e->log = false;
337
+ strcpy(e->name, name); // NOLINT(runtime/printf)
338
+ e->next = synch_event[h];
339
+ AtomicSetBits(addr, bits, lockbit);
340
+ synch_event[h] = e;
341
+ } else {
342
+ e->refcount++; // for return value
343
+ }
344
+ synch_event_mu.Unlock();
345
+ return e;
346
+ }
347
+
348
+ // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
349
+ static void DeleteSynchEvent(SynchEvent *e) {
350
+ base_internal::LowLevelAlloc::Free(e);
351
+ }
352
+
353
+ // Decrement the reference count of *e, or do nothing if e==null.
354
+ static void UnrefSynchEvent(SynchEvent *e) {
355
+ if (e != nullptr) {
356
+ synch_event_mu.Lock();
357
+ bool del = (--(e->refcount) == 0);
358
+ synch_event_mu.Unlock();
359
+ if (del) {
360
+ DeleteSynchEvent(e);
361
+ }
362
+ }
363
+ }
364
+
365
+ // Forget the mapping from the object (Mutex or CondVar) at address addr
366
+ // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
367
+ // is clear before doing so).
368
+ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
369
+ intptr_t lockbit) {
370
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
371
+ SynchEvent **pe;
372
+ SynchEvent *e;
373
+ synch_event_mu.Lock();
374
+ for (pe = &synch_event[h];
375
+ (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
376
+ pe = &e->next) {
377
+ }
378
+ bool del = false;
379
+ if (e != nullptr) {
380
+ *pe = e->next;
381
+ del = (--(e->refcount) == 0);
382
+ }
383
+ AtomicClearBits(addr, bits, lockbit);
384
+ synch_event_mu.Unlock();
385
+ if (del) {
386
+ DeleteSynchEvent(e);
387
+ }
388
+ }
389
+
390
+ // Return a refcounted reference to the SynchEvent of the object at address
391
+ // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
392
+ // called.
393
+ static SynchEvent *GetSynchEvent(const void *addr) {
394
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
395
+ SynchEvent *e;
396
+ synch_event_mu.Lock();
397
+ for (e = synch_event[h];
398
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
399
+ e = e->next) {
400
+ }
401
+ if (e != nullptr) {
402
+ e->refcount++;
403
+ }
404
+ synch_event_mu.Unlock();
405
+ return e;
406
+ }
407
+
408
+ // Called when an event "ev" occurs on a Mutex of CondVar "obj"
409
+ // if event recording is on
410
+ static void PostSynchEvent(void *obj, int ev) {
411
+ SynchEvent *e = GetSynchEvent(obj);
412
+ // logging is on if event recording is on and either there's no event struct,
413
+ // or it explicitly says to log
414
+ if (e == nullptr || e->log) {
415
+ void *pcs[40];
416
+ int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
417
+ // A buffer with enough space for the ASCII for all the PCs, even on a
418
+ // 64-bit machine.
419
+ char buffer[ABSL_ARRAYSIZE(pcs) * 24];
420
+ int pos = snprintf(buffer, sizeof (buffer), " @");
421
+ for (int i = 0; i != n; i++) {
422
+ pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
423
+ }
424
+ ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
425
+ (e == nullptr ? "" : e->name), buffer);
426
+ }
427
+ const int flags = event_properties[ev].flags;
428
+ if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
429
+ // Calling the invariant as is causes problems under ThreadSanitizer.
430
+ // We are currently inside of Mutex Lock/Unlock and are ignoring all
431
+ // memory accesses and synchronization. If the invariant transitively
432
+ // synchronizes something else and we ignore the synchronization, we will
433
+ // get false positive race reports later.
434
+ // Reuse EvalConditionAnnotated to properly call into user code.
435
+ struct local {
436
+ static bool pred(SynchEvent *ev) {
437
+ (*ev->invariant)(ev->arg);
438
+ return false;
439
+ }
440
+ };
441
+ Condition cond(&local::pred, e);
442
+ Mutex *mu = static_cast<Mutex *>(obj);
443
+ const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
444
+ const bool trylock = (flags & SYNCH_F_TRY) != 0;
445
+ const bool read_lock = (flags & SYNCH_F_R) != 0;
446
+ EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
447
+ }
448
+ UnrefSynchEvent(e);
449
+ }
450
+
451
+ //------------------------------------------------------------------
452
+
453
+ // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
454
+ // whether it has a timeout, the condition, exclusive/shared, and whether a
455
+ // condition variable wait has an associated Mutex (as opposed to another
456
+ // type of lock). It also points to the PerThreadSynch struct of its thread.
457
+ // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
458
+ //
459
+ // This structure is held on the stack rather than directly in
460
+ // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
461
+ // while waiting on one Mutex, the implementation calls a client callback
462
+ // (such as a Condition function) that acquires another Mutex. We don't
463
+ // strictly need to allow this, but programmers become confused if we do not
464
+ // allow them to use functions such a LOG() within Condition functions. The
465
+ // PerThreadSynch struct points at the most recent SynchWaitParams struct when
466
+ // the thread is on a Mutex's waiter queue.
467
+ struct SynchWaitParams {
468
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
469
+ KernelTimeout timeout_arg, Mutex *cvmu_arg,
470
+ PerThreadSynch *thread_arg,
471
+ std::atomic<intptr_t> *cv_word_arg)
472
+ : how(how_arg),
473
+ cond(cond_arg),
474
+ timeout(timeout_arg),
475
+ cvmu(cvmu_arg),
476
+ thread(thread_arg),
477
+ cv_word(cv_word_arg),
478
+ contention_start_cycles(base_internal::CycleClock::Now()) {}
479
+
480
+ const Mutex::MuHow how; // How this thread needs to wait.
481
+ const Condition *cond; // The condition that this thread is waiting for.
482
+ // In Mutex, this field is set to zero if a timeout
483
+ // expires.
484
+ KernelTimeout timeout; // timeout expiry---absolute time
485
+ // In Mutex, this field is set to zero if a timeout
486
+ // expires.
487
+ Mutex *const cvmu; // used for transfer from cond var to mutex
488
+ PerThreadSynch *const thread; // thread that is waiting
489
+
490
+ // If not null, thread should be enqueued on the CondVar whose state
491
+ // word is cv_word instead of queueing normally on the Mutex.
492
+ std::atomic<intptr_t> *cv_word;
493
+
494
+ int64_t contention_start_cycles; // Time (in cycles) when this thread started
495
+ // to contend for the mutex.
496
+ };
497
+
498
+ struct SynchLocksHeld {
499
+ int n; // number of valid entries in locks[]
500
+ bool overflow; // true iff we overflowed the array at some point
501
+ struct {
502
+ Mutex *mu; // lock acquired
503
+ int32_t count; // times acquired
504
+ GraphId id; // deadlock_graph id of acquired lock
505
+ } locks[40];
506
+ // If a thread overfills the array during deadlock detection, we
507
+ // continue, discarding information as needed. If no overflow has
508
+ // taken place, we can provide more error checking, such as
509
+ // detecting when a thread releases a lock it does not hold.
510
+ };
511
+
512
+ // A sentinel value in lists that is not 0.
513
+ // A 0 value is used to mean "not on a list".
514
+ static PerThreadSynch *const kPerThreadSynchNull =
515
+ reinterpret_cast<PerThreadSynch *>(1);
516
+
517
+ static SynchLocksHeld *LocksHeldAlloc() {
518
+ SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
519
+ base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
520
+ ret->n = 0;
521
+ ret->overflow = false;
522
+ return ret;
523
+ }
524
+
525
+ // Return the PerThreadSynch-struct for this thread.
526
+ static PerThreadSynch *Synch_GetPerThread() {
527
+ ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
528
+ return &identity->per_thread_synch;
529
+ }
530
+
531
+ static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
532
+ if (mu) {
533
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
534
+ }
535
+ PerThreadSynch *w = Synch_GetPerThread();
536
+ if (mu) {
537
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
538
+ }
539
+ return w;
540
+ }
541
+
542
+ static SynchLocksHeld *Synch_GetAllLocks() {
543
+ PerThreadSynch *s = Synch_GetPerThread();
544
+ if (s->all_locks == nullptr) {
545
+ s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
546
+ }
547
+ return s->all_locks;
548
+ }
549
+
550
+ // Post on "w"'s associated PerThreadSem.
551
+ inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
552
+ if (mu) {
553
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
554
+ }
555
+ PerThreadSem::Post(w->thread_identity());
556
+ if (mu) {
557
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
558
+ }
559
+ }
560
+
561
+ // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
562
+ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
563
+ if (mu) {
564
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
565
+ }
566
+ assert(w == Synch_GetPerThread());
567
+ static_cast<void>(w);
568
+ bool res = PerThreadSem::Wait(t);
569
+ if (mu) {
570
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
571
+ }
572
+ return res;
573
+ }
574
+
575
+ // We're in a fatal signal handler that hopes to use Mutex and to get
576
+ // lucky by not deadlocking. We try to improve its chances of success
577
+ // by effectively disabling some of the consistency checks. This will
578
+ // prevent certain ABSL_RAW_CHECK() statements from being triggered when
579
+ // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
580
+ // Mutex code checking that the "waitp" field has not been reused.
581
+ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
582
+ // Fix the per-thread state only if it exists.
583
+ ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
584
+ if (identity != nullptr) {
585
+ identity->per_thread_synch.suppress_fatal_errors = true;
586
+ }
587
+ // Don't do deadlock detection when we are already failing.
588
+ synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
589
+ std::memory_order_release);
590
+ }
591
+
592
+ // --------------------------time support
593
+
594
+ // Return the current time plus the timeout. Use the same clock as
595
+ // PerThreadSem::Wait() for consistency. Unfortunately, we don't have
596
+ // such a choice when a deadline is given directly.
597
+ static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
598
+ #ifndef _WIN32
599
+ struct timeval tv;
600
+ gettimeofday(&tv, nullptr);
601
+ return absl::TimeFromTimeval(tv) + timeout;
602
+ #else
603
+ return absl::Now() + timeout;
604
+ #endif
605
+ }
606
+
607
+ // --------------------------Mutexes
608
+
609
+ // In the layout below, the msb of the bottom byte is currently unused. Also,
610
+ // the following constraints were considered in choosing the layout:
611
+ // o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
612
+ // 0xcd) are illegal: reader and writer lock both held.
613
+ // o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
614
+ // bit-twiddling trick in Mutex::Unlock().
615
+ // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
616
+ // to enable the bit-twiddling trick in CheckForMutexCorruption().
617
+ static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
618
+ static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
619
+ static const intptr_t kMuWait = 0x0004L; // threads are waiting
620
+ static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
621
+ static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
622
+ // INVARIANT1: there's a thread that was blocked on the mutex, is
623
+ // no longer, yet has not yet acquired the mutex. If there's a
624
+ // designated waker, all threads can avoid taking the slow path in
625
+ // unlock because the designated waker will subsequently acquire
626
+ // the lock and wake someone. To maintain INVARIANT1 the bit is
627
+ // set when a thread is unblocked(INV1a), and threads that were
628
+ // unblocked reset the bit when they either acquire or re-block
629
+ // (INV1b).
630
+ static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
631
+ // for a reader
632
+ static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
633
+ static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
634
+ static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
635
+
636
+ // Hack to make constant values available to gdb pretty printer
637
+ enum {
638
+ kGdbMuSpin = kMuSpin,
639
+ kGdbMuEvent = kMuEvent,
640
+ kGdbMuWait = kMuWait,
641
+ kGdbMuWriter = kMuWriter,
642
+ kGdbMuDesig = kMuDesig,
643
+ kGdbMuWrWait = kMuWrWait,
644
+ kGdbMuReader = kMuReader,
645
+ kGdbMuLow = kMuLow,
646
+ };
647
+
648
+ // kMuWrWait implies kMuWait.
649
+ // kMuReader and kMuWriter are mutually exclusive.
650
+ // If kMuReader is zero, there are no readers.
651
+ // Otherwise, if kMuWait is zero, the high order bits contain a count of the
652
+ // number of readers. Otherwise, the reader count is held in
653
+ // PerThreadSynch::readers of the most recently queued waiter, again in the
654
+ // bits above kMuLow.
655
+ static const intptr_t kMuOne = 0x0100; // a count of one reader
656
+
657
+ // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
658
+ static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
659
+ static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
660
+
661
+ static_assert(PerThreadSynch::kAlignment > kMuLow,
662
+ "PerThreadSynch::kAlignment must be greater than kMuLow");
663
+
664
+ // This struct contains various bitmasks to be used in
665
+ // acquiring and releasing a mutex in a particular mode.
666
+ struct MuHowS {
667
+ // if all the bits in fast_need_zero are zero, the lock can be acquired by
668
+ // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
669
+ // this is the designated waker.
670
+ intptr_t fast_need_zero;
671
+ intptr_t fast_or;
672
+ intptr_t fast_add;
673
+
674
+ intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
675
+
676
+ intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
677
+ // zero a reader can acquire a read share by
678
+ // setting the reader bit and incrementing
679
+ // the reader count (in last waiter since
680
+ // we're now slow-path). kMuWrWait be may
681
+ // be ignored if we already waited once.
682
+ };
683
+
684
+ static const MuHowS kSharedS = {
685
+ // shared or read lock
686
+ kMuWriter | kMuWait | kMuEvent, // fast_need_zero
687
+ kMuReader, // fast_or
688
+ kMuOne, // fast_add
689
+ kMuWriter | kMuWait, // slow_need_zero
690
+ kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
691
+ };
692
+ static const MuHowS kExclusiveS = {
693
+ // exclusive or write lock
694
+ kMuWriter | kMuReader | kMuEvent, // fast_need_zero
695
+ kMuWriter, // fast_or
696
+ 0, // fast_add
697
+ kMuWriter | kMuReader, // slow_need_zero
698
+ ~static_cast<intptr_t>(0), // slow_inc_need_zero
699
+ };
700
+ static const Mutex::MuHow kShared = &kSharedS; // shared lock
701
+ static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
702
+
703
+ #ifdef NDEBUG
704
+ static constexpr bool kDebugMode = false;
705
+ #else
706
+ static constexpr bool kDebugMode = true;
707
+ #endif
708
+
709
+ #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
710
+ static unsigned TsanFlags(Mutex::MuHow how) {
711
+ return how == kShared ? __tsan_mutex_read_lock : 0;
712
+ }
713
+ #endif
714
+
715
+ static bool DebugOnlyIsExiting() {
716
+ return false;
717
+ }
718
+
719
+ Mutex::~Mutex() {
720
+ intptr_t v = mu_.load(std::memory_order_relaxed);
721
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
722
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
723
+ }
724
+ if (kDebugMode) {
725
+ this->ForgetDeadlockInfo();
726
+ }
727
+ ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
728
+ }
729
+
730
+ void Mutex::EnableDebugLog(const char *name) {
731
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
732
+ e->log = true;
733
+ UnrefSynchEvent(e);
734
+ }
735
+
736
+ void EnableMutexInvariantDebugging(bool enabled) {
737
+ synch_check_invariants.store(enabled, std::memory_order_release);
738
+ }
739
+
740
+ void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
741
+ void *arg) {
742
+ if (synch_check_invariants.load(std::memory_order_acquire) &&
743
+ invariant != nullptr) {
744
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
745
+ e->invariant = invariant;
746
+ e->arg = arg;
747
+ UnrefSynchEvent(e);
748
+ }
749
+ }
750
+
751
+ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
752
+ synch_deadlock_detection.store(mode, std::memory_order_release);
753
+ }
754
+
755
+ // Return true iff threads x and y are waiting on the same condition for the
756
+ // same type of lock. Requires that x and y be waiting on the same Mutex
757
+ // queue.
758
+ static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
759
+ return x->waitp->how == y->waitp->how &&
760
+ Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
761
+ }
762
+
763
+ // Given the contents of a mutex word containing a PerThreadSynch pointer,
764
+ // return the pointer.
765
+ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
766
+ return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
767
+ }
768
+
769
+ // The next several routines maintain the per-thread next and skip fields
770
+ // used in the Mutex waiter queue.
771
+ // The queue is a circular singly-linked list, of which the "head" is the
772
+ // last element, and head->next if the first element.
773
+ // The skip field has the invariant:
774
+ // For thread x, x->skip is one of:
775
+ // - invalid (iff x is not in a Mutex wait queue),
776
+ // - null, or
777
+ // - a pointer to a distinct thread waiting later in the same Mutex queue
778
+ // such that all threads in [x, x->skip] have the same condition and
779
+ // lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
780
+ // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
781
+ //
782
+ // By the spec of MuSameCondition(), it is not necessary when removing the
783
+ // first runnable thread y from the front a Mutex queue to adjust the skip
784
+ // field of another thread x because if x->skip==y, x->skip must (have) become
785
+ // invalid before y is removed. The function TryRemove can remove a specified
786
+ // thread from an arbitrary position in the queue whether runnable or not, so
787
+ // it fixes up skip fields that would otherwise be left dangling.
788
+ // The statement
789
+ // if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
790
+ // maintains the invariant provided x is not the last waiter in a Mutex queue
791
+ // The statement
792
+ // if (x->skip != null) { x->skip = x->skip->skip; }
793
+ // maintains the invariant.
794
+
795
+ // Returns the last thread y in a mutex waiter queue such that all threads in
796
+ // [x, y] inclusive share the same condition. Sets skip fields of some threads
797
+ // in that range to optimize future evaluation of Skip() on x values in
798
+ // the range. Requires thread x is in a mutex waiter queue.
799
+ // The locking is unusual. Skip() is called under these conditions:
800
+ // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
801
+ // - Mutex is held in call from UnlockSlow() by last unlocker, with
802
+ // maybe_unlocking == true
803
+ // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
804
+ // UnlockSlow()) and TryRemove()
805
+ // These cases are mutually exclusive, so Skip() never runs concurrently
806
+ // with itself on the same Mutex. The skip chain is used in these other places
807
+ // that cannot occur concurrently:
808
+ // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
809
+ // - Dequeue() (with spinlock and Mutex held)
810
+ // - UnlockSlow() (with spinlock and Mutex held)
811
+ // A more complex case is Enqueue()
812
+ // - Enqueue() (with spinlock held and maybe_unlocking == false)
813
+ // This is the first case in which Skip is called, above.
814
+ // - Enqueue() (without spinlock held; but queue is empty and being freshly
815
+ // formed)
816
+ // - Enqueue() (with spinlock held and maybe_unlocking == true)
817
+ // The first case has mutual exclusion, and the second isolation through
818
+ // working on an otherwise unreachable data structure.
819
+ // In the last case, Enqueue() is required to change no skip/next pointers
820
+ // except those in the added node and the former "head" node. This implies
821
+ // that the new node is added after head, and so must be the new head or the
822
+ // new front of the queue.
823
+ static PerThreadSynch *Skip(PerThreadSynch *x) {
824
+ PerThreadSynch *x0 = nullptr;
825
+ PerThreadSynch *x1 = x;
826
+ PerThreadSynch *x2 = x->skip;
827
+ if (x2 != nullptr) {
828
+ // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
829
+ // such that x1 == x0->skip && x2 == x1->skip
830
+ while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
831
+ x0->skip = x2; // short-circuit skip from x0 to x2
832
+ }
833
+ x->skip = x1; // short-circuit skip from x to result
834
+ }
835
+ return x1;
836
+ }
837
+
838
+ // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
839
+ // The latter is going to be removed out of order, because of a timeout.
840
+ // Check whether "ancestor" has a skip field pointing to "to_be_removed",
841
+ // and fix it if it does.
842
+ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
843
+ if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
844
+ if (to_be_removed->skip != nullptr) {
845
+ ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
846
+ } else if (ancestor->next != to_be_removed) { // they are not adjacent
847
+ ancestor->skip = ancestor->next; // can skip one past ancestor
848
+ } else {
849
+ ancestor->skip = nullptr; // can't skip at all
850
+ }
851
+ }
852
+ }
853
+
854
+ static void CondVarEnqueue(SynchWaitParams *waitp);
855
+
856
+ // Enqueue thread "waitp->thread" on a waiter queue.
857
+ // Called with mutex spinlock held if head != nullptr
858
+ // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
859
+ // idempotent; it alters no state associated with the existing (empty)
860
+ // queue.
861
+ //
862
+ // If waitp->cv_word == nullptr, queue the thread at either the front or
863
+ // the end (according to its priority) of the circular mutex waiter queue whose
864
+ // head is "head", and return the new head. mu is the previous mutex state,
865
+ // which contains the reader count (perhaps adjusted for the operation in
866
+ // progress) if the list was empty and a read lock held, and the holder hint if
867
+ // the list was empty and a write lock held. (flags & kMuIsCond) indicates
868
+ // whether this thread was transferred from a CondVar or is waiting for a
869
+ // non-trivial condition. In this case, Enqueue() never returns nullptr
870
+ //
871
+ // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
872
+ // returned. This mechanism is used by CondVar to queue a thread on the
873
+ // condition variable queue instead of the mutex queue in implementing Wait().
874
+ // In this case, Enqueue() can return nullptr (if head==nullptr).
875
+ static PerThreadSynch *Enqueue(PerThreadSynch *head,
876
+ SynchWaitParams *waitp, intptr_t mu, int flags) {
877
+ // If we have been given a cv_word, call CondVarEnqueue() and return
878
+ // the previous head of the Mutex waiter queue.
879
+ if (waitp->cv_word != nullptr) {
880
+ CondVarEnqueue(waitp);
881
+ return head;
882
+ }
883
+
884
+ PerThreadSynch *s = waitp->thread;
885
+ ABSL_RAW_CHECK(
886
+ s->waitp == nullptr || // normal case
887
+ s->waitp == waitp || // Fer()---transfer from condition variable
888
+ s->suppress_fatal_errors,
889
+ "detected illegal recursion into Mutex code");
890
+ s->waitp = waitp;
891
+ s->skip = nullptr; // maintain skip invariant (see above)
892
+ s->may_skip = true; // always true on entering queue
893
+ s->wake = false; // not being woken
894
+ s->cond_waiter = ((flags & kMuIsCond) != 0);
895
+ if (head == nullptr) { // s is the only waiter
896
+ s->next = s; // it's the only entry in the cycle
897
+ s->readers = mu; // reader count is from mu word
898
+ s->maybe_unlocking = false; // no one is searching an empty list
899
+ head = s; // s is new head
900
+ } else {
901
+ PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
902
+ #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
903
+ int64_t now_cycles = base_internal::CycleClock::Now();
904
+ if (s->next_priority_read_cycles < now_cycles) {
905
+ // Every so often, update our idea of the thread's priority.
906
+ // pthread_getschedparam() is 5% of the block/wakeup time;
907
+ // base_internal::CycleClock::Now() is 0.5%.
908
+ int policy;
909
+ struct sched_param param;
910
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
911
+ if (err != 0) {
912
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
913
+ } else {
914
+ s->priority = param.sched_priority;
915
+ s->next_priority_read_cycles =
916
+ now_cycles +
917
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
918
+ }
919
+ }
920
+ if (s->priority > head->priority) { // s's priority is above head's
921
+ // try to put s in priority-fifo order, or failing that at the front.
922
+ if (!head->maybe_unlocking) {
923
+ // No unlocker can be scanning the queue, so we can insert between
924
+ // skip-chains, and within a skip-chain if it has the same condition as
925
+ // s. We insert in priority-fifo order, examining the end of every
926
+ // skip-chain, plus every element with the same condition as s.
927
+ PerThreadSynch *advance_to = head; // next value of enqueue_after
928
+ PerThreadSynch *cur; // successor of enqueue_after
929
+ do {
930
+ enqueue_after = advance_to;
931
+ cur = enqueue_after->next; // this advance ensures progress
932
+ advance_to = Skip(cur); // normally, advance to end of skip chain
933
+ // (side-effect: optimizes skip chain)
934
+ if (advance_to != cur && s->priority > advance_to->priority &&
935
+ MuSameCondition(s, cur)) {
936
+ // but this skip chain is not a singleton, s has higher priority
937
+ // than its tail and has the same condition as the chain,
938
+ // so we can insert within the skip-chain
939
+ advance_to = cur; // advance by just one
940
+ }
941
+ } while (s->priority <= advance_to->priority);
942
+ // termination guaranteed because s->priority > head->priority
943
+ // and head is the end of a skip chain
944
+ } else if (waitp->how == kExclusive &&
945
+ Condition::GuaranteedEqual(waitp->cond, nullptr)) {
946
+ // An unlocker could be scanning the queue, but we know it will recheck
947
+ // the queue front for writers that have no condition, which is what s
948
+ // is, so an insert at front is safe.
949
+ enqueue_after = head; // add after head, at front
950
+ }
951
+ }
952
+ #endif
953
+ if (enqueue_after != nullptr) {
954
+ s->next = enqueue_after->next;
955
+ enqueue_after->next = s;
956
+
957
+ // enqueue_after can be: head, Skip(...), or cur.
958
+ // The first two imply enqueue_after->skip == nullptr, and
959
+ // the last is used only if MuSameCondition(s, cur).
960
+ // We require this because clearing enqueue_after->skip
961
+ // is impossible; enqueue_after's predecessors might also
962
+ // incorrectly skip over s if we were to allow other
963
+ // insertion points.
964
+ ABSL_RAW_CHECK(
965
+ enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
966
+ "Mutex Enqueue failure");
967
+
968
+ if (enqueue_after != head && enqueue_after->may_skip &&
969
+ MuSameCondition(enqueue_after, enqueue_after->next)) {
970
+ // enqueue_after can skip to its new successor, s
971
+ enqueue_after->skip = enqueue_after->next;
972
+ }
973
+ if (MuSameCondition(s, s->next)) { // s->may_skip is known to be true
974
+ s->skip = s->next; // s may skip to its successor
975
+ }
976
+ } else { // enqueue not done any other way, so
977
+ // we're inserting s at the back
978
+ // s will become new head; copy data from head into it
979
+ s->next = head->next; // add s after head
980
+ head->next = s;
981
+ s->readers = head->readers; // reader count is from previous head
982
+ s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
983
+ if (head->may_skip && MuSameCondition(head, s)) {
984
+ // head now has successor; may skip
985
+ head->skip = s;
986
+ }
987
+ head = s; // s is new head
988
+ }
989
+ }
990
+ s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
991
+ return head;
992
+ }
993
+
994
+ // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
995
+ // whose last element is head. The new head element is returned, or null
996
+ // if the list is made empty.
997
+ // Dequeue is called with both spinlock and Mutex held.
998
+ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
999
+ PerThreadSynch *w = pw->next;
1000
+ pw->next = w->next; // snip w out of list
1001
+ if (head == w) { // we removed the head
1002
+ head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
1003
+ } else if (pw != head && MuSameCondition(pw, pw->next)) {
1004
+ // pw can skip to its new successor
1005
+ if (pw->next->skip !=
1006
+ nullptr) { // either skip to its successors skip target
1007
+ pw->skip = pw->next->skip;
1008
+ } else { // or to pw's successor
1009
+ pw->skip = pw->next;
1010
+ }
1011
+ }
1012
+ return head;
1013
+ }
1014
+
1015
+ // Traverse the elements [ pw->next, h] of the circular list whose last element
1016
+ // is head.
1017
+ // Remove all elements with wake==true and place them in the
1018
+ // singly-linked list wake_list in the order found. Assumes that
1019
+ // there is only one such element if the element has how == kExclusive.
1020
+ // Return the new head.
1021
+ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1022
+ PerThreadSynch *pw,
1023
+ PerThreadSynch **wake_tail) {
1024
+ PerThreadSynch *orig_h = head;
1025
+ PerThreadSynch *w = pw->next;
1026
+ bool skipped = false;
1027
+ do {
1028
+ if (w->wake) { // remove this element
1029
+ ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1030
+ // we're removing pw's successor so either pw->skip is zero or we should
1031
+ // already have removed pw since if pw->skip!=null, pw has the same
1032
+ // condition as w.
1033
+ head = Dequeue(head, pw);
1034
+ w->next = *wake_tail; // keep list terminated
1035
+ *wake_tail = w; // add w to wake_list;
1036
+ wake_tail = &w->next; // next addition to end
1037
+ if (w->waitp->how == kExclusive) { // wake at most 1 writer
1038
+ break;
1039
+ }
1040
+ } else { // not waking this one; skip
1041
+ pw = Skip(w); // skip as much as possible
1042
+ skipped = true;
1043
+ }
1044
+ w = pw->next;
1045
+ // We want to stop processing after we've considered the original head,
1046
+ // orig_h. We can't test for w==orig_h in the loop because w may skip over
1047
+ // it; we are guaranteed only that w's predecessor will not skip over
1048
+ // orig_h. When we've considered orig_h, either we've processed it and
1049
+ // removed it (so orig_h != head), or we considered it and skipped it (so
1050
+ // skipped==true && pw == head because skipping from head always skips by
1051
+ // just one, leaving pw pointing at head). So we want to
1052
+ // continue the loop with the negation of that expression.
1053
+ } while (orig_h == head && (pw != head || !skipped));
1054
+ return head;
1055
+ }
1056
+
1057
+ // Try to remove thread s from the list of waiters on this mutex.
1058
+ // Does nothing if s is not on the waiter list.
1059
+ void Mutex::TryRemove(PerThreadSynch *s) {
1060
+ SchedulingGuard::ScopedDisable disable_rescheduling;
1061
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1062
+ // acquire spinlock & lock
1063
+ if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1064
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1065
+ std::memory_order_acquire,
1066
+ std::memory_order_relaxed)) {
1067
+ PerThreadSynch *h = GetPerThreadSynch(v);
1068
+ if (h != nullptr) {
1069
+ PerThreadSynch *pw = h; // pw is w's predecessor
1070
+ PerThreadSynch *w;
1071
+ if ((w = pw->next) != s) { // search for thread,
1072
+ do { // processing at least one element
1073
+ if (!MuSameCondition(s, w)) { // seeking different condition
1074
+ pw = Skip(w); // so skip all that won't match
1075
+ // we don't have to worry about dangling skip fields
1076
+ // in the threads we skipped; none can point to s
1077
+ // because their condition differs from s
1078
+ } else { // seeking same condition
1079
+ FixSkip(w, s); // fix up any skip pointer from w to s
1080
+ pw = w;
1081
+ }
1082
+ // don't search further if we found the thread, or we're about to
1083
+ // process the first thread again.
1084
+ } while ((w = pw->next) != s && pw != h);
1085
+ }
1086
+ if (w == s) { // found thread; remove it
1087
+ // pw->skip may be non-zero here; the loop above ensured that
1088
+ // no ancestor of s can skip to s, so removal is safe anyway.
1089
+ h = Dequeue(h, pw);
1090
+ s->next = nullptr;
1091
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1092
+ }
1093
+ }
1094
+ intptr_t nv;
1095
+ do { // release spinlock and lock
1096
+ v = mu_.load(std::memory_order_relaxed);
1097
+ nv = v & (kMuDesig | kMuEvent);
1098
+ if (h != nullptr) {
1099
+ nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1100
+ h->readers = 0; // we hold writer lock
1101
+ h->maybe_unlocking = false; // finished unlocking
1102
+ }
1103
+ } while (!mu_.compare_exchange_weak(v, nv,
1104
+ std::memory_order_release,
1105
+ std::memory_order_relaxed));
1106
+ }
1107
+ }
1108
+
1109
+ // Wait until thread "s", which must be the current thread, is removed from the
1110
+ // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1111
+ // if the wait extends past the absolute time specified, even if "s" is still
1112
+ // on the mutex queue. In this case, remove "s" from the queue and return
1113
+ // true, otherwise return false.
1114
+ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
1115
+ while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1116
+ if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1117
+ // After a timeout, we go into a spin loop until we remove ourselves
1118
+ // from the queue, or someone else removes us. We can't be sure to be
1119
+ // able to remove ourselves in a single lock acquisition because this
1120
+ // mutex may be held, and the holder has the right to read the centre
1121
+ // of the waiter queue without holding the spinlock.
1122
+ this->TryRemove(s);
1123
+ int c = 0;
1124
+ while (s->next != nullptr) {
1125
+ c = synchronization_internal::MutexDelay(c, GENTLE);
1126
+ this->TryRemove(s);
1127
+ }
1128
+ if (kDebugMode) {
1129
+ // This ensures that we test the case that TryRemove() is called when s
1130
+ // is not on the queue.
1131
+ this->TryRemove(s);
1132
+ }
1133
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1134
+ s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1135
+ }
1136
+ }
1137
+ ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1138
+ "detected illegal recursion in Mutex code");
1139
+ s->waitp = nullptr;
1140
+ }
1141
+
1142
+ // Wake thread w, and return the next thread in the list.
1143
+ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1144
+ PerThreadSynch *next = w->next;
1145
+ w->next = nullptr;
1146
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1147
+ IncrementSynchSem(this, w);
1148
+
1149
+ return next;
1150
+ }
1151
+
1152
+ static GraphId GetGraphIdLocked(Mutex *mu)
1153
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1154
+ if (!deadlock_graph) { // (re)create the deadlock graph.
1155
+ deadlock_graph =
1156
+ new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1157
+ GraphCycles;
1158
+ }
1159
+ return deadlock_graph->GetId(mu);
1160
+ }
1161
+
1162
+ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1163
+ deadlock_graph_mu.Lock();
1164
+ GraphId id = GetGraphIdLocked(mu);
1165
+ deadlock_graph_mu.Unlock();
1166
+ return id;
1167
+ }
1168
+
1169
+ // Record a lock acquisition. This is used in debug mode for deadlock
1170
+ // detection. The held_locks pointer points to the relevant data
1171
+ // structure for each case.
1172
+ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1173
+ int n = held_locks->n;
1174
+ int i = 0;
1175
+ while (i != n && held_locks->locks[i].id != id) {
1176
+ i++;
1177
+ }
1178
+ if (i == n) {
1179
+ if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1180
+ held_locks->overflow = true; // lost some data
1181
+ } else { // we have room for lock
1182
+ held_locks->locks[i].mu = mu;
1183
+ held_locks->locks[i].count = 1;
1184
+ held_locks->locks[i].id = id;
1185
+ held_locks->n = n + 1;
1186
+ }
1187
+ } else {
1188
+ held_locks->locks[i].count++;
1189
+ }
1190
+ }
1191
+
1192
+ // Record a lock release. Each call to LockEnter(mu, id, x) should be
1193
+ // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1194
+ // It does not process the event if is not needed when deadlock detection is
1195
+ // disabled.
1196
+ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1197
+ int n = held_locks->n;
1198
+ int i = 0;
1199
+ while (i != n && held_locks->locks[i].id != id) {
1200
+ i++;
1201
+ }
1202
+ if (i == n) {
1203
+ if (!held_locks->overflow) {
1204
+ // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1205
+ // but in that case mu should still be present.
1206
+ i = 0;
1207
+ while (i != n && held_locks->locks[i].mu != mu) {
1208
+ i++;
1209
+ }
1210
+ if (i == n) { // mu missing means releasing unheld lock
1211
+ SynchEvent *mu_events = GetSynchEvent(mu);
1212
+ ABSL_RAW_LOG(FATAL,
1213
+ "thread releasing lock it does not hold: %p %s; "
1214
+ ,
1215
+ static_cast<void *>(mu),
1216
+ mu_events == nullptr ? "" : mu_events->name);
1217
+ }
1218
+ }
1219
+ } else if (held_locks->locks[i].count == 1) {
1220
+ held_locks->n = n - 1;
1221
+ held_locks->locks[i] = held_locks->locks[n - 1];
1222
+ held_locks->locks[n - 1].id = InvalidGraphId();
1223
+ held_locks->locks[n - 1].mu =
1224
+ nullptr; // clear mu to please the leak detector.
1225
+ } else {
1226
+ assert(held_locks->locks[i].count > 0);
1227
+ held_locks->locks[i].count--;
1228
+ }
1229
+ }
1230
+
1231
+ // Call LockEnter() if in debug mode and deadlock detection is enabled.
1232
+ static inline void DebugOnlyLockEnter(Mutex *mu) {
1233
+ if (kDebugMode) {
1234
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1235
+ OnDeadlockCycle::kIgnore) {
1236
+ LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1237
+ }
1238
+ }
1239
+ }
1240
+
1241
+ // Call LockEnter() if in debug mode and deadlock detection is enabled.
1242
+ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1243
+ if (kDebugMode) {
1244
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1245
+ OnDeadlockCycle::kIgnore) {
1246
+ LockEnter(mu, id, Synch_GetAllLocks());
1247
+ }
1248
+ }
1249
+ }
1250
+
1251
+ // Call LockLeave() if in debug mode and deadlock detection is enabled.
1252
+ static inline void DebugOnlyLockLeave(Mutex *mu) {
1253
+ if (kDebugMode) {
1254
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1255
+ OnDeadlockCycle::kIgnore) {
1256
+ LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1257
+ }
1258
+ }
1259
+ }
1260
+
1261
+ static char *StackString(void **pcs, int n, char *buf, int maxlen,
1262
+ bool symbolize) {
1263
+ static const int kSymLen = 200;
1264
+ char sym[kSymLen];
1265
+ int len = 0;
1266
+ for (int i = 0; i != n; i++) {
1267
+ if (symbolize) {
1268
+ if (!symbolizer(pcs[i], sym, kSymLen)) {
1269
+ sym[0] = '\0';
1270
+ }
1271
+ snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
1272
+ (i == 0 ? "\n" : ""),
1273
+ pcs[i], sym);
1274
+ } else {
1275
+ snprintf(buf + len, maxlen - len, " %p", pcs[i]);
1276
+ }
1277
+ len += strlen(&buf[len]);
1278
+ }
1279
+ return buf;
1280
+ }
1281
+
1282
+ static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1283
+ void *pcs[40];
1284
+ return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1285
+ maxlen, symbolize);
1286
+ }
1287
+
1288
+ namespace {
1289
+ enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
1290
+ // a path this long would be remarkable
1291
+ // Buffers required to report a deadlock.
1292
+ // We do not allocate them on stack to avoid large stack frame.
1293
+ struct DeadlockReportBuffers {
1294
+ char buf[6100];
1295
+ GraphId path[kMaxDeadlockPathLen];
1296
+ };
1297
+
1298
+ struct ScopedDeadlockReportBuffers {
1299
+ ScopedDeadlockReportBuffers() {
1300
+ b = reinterpret_cast<DeadlockReportBuffers *>(
1301
+ base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1302
+ }
1303
+ ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1304
+ DeadlockReportBuffers *b;
1305
+ };
1306
+
1307
+ // Helper to pass to GraphCycles::UpdateStackTrace.
1308
+ int GetStack(void** stack, int max_depth) {
1309
+ return absl::GetStackTrace(stack, max_depth, 3);
1310
+ }
1311
+ } // anonymous namespace
1312
+
1313
+ // Called in debug mode when a thread is about to acquire a lock in a way that
1314
+ // may block.
1315
+ static GraphId DeadlockCheck(Mutex *mu) {
1316
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1317
+ OnDeadlockCycle::kIgnore) {
1318
+ return InvalidGraphId();
1319
+ }
1320
+
1321
+ SynchLocksHeld *all_locks = Synch_GetAllLocks();
1322
+
1323
+ absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1324
+ const GraphId mu_id = GetGraphIdLocked(mu);
1325
+
1326
+ if (all_locks->n == 0) {
1327
+ // There are no other locks held. Return now so that we don't need to
1328
+ // call GetSynchEvent(). This way we do not record the stack trace
1329
+ // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1330
+ // it can't always be the first lock acquired by a thread.
1331
+ return mu_id;
1332
+ }
1333
+
1334
+ // We prefer to keep stack traces that show a thread holding and acquiring
1335
+ // as many locks as possible. This increases the chances that a given edge
1336
+ // in the acquires-before graph will be represented in the stack traces
1337
+ // recorded for the locks.
1338
+ deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1339
+
1340
+ // For each other mutex already held by this thread:
1341
+ for (int i = 0; i != all_locks->n; i++) {
1342
+ const GraphId other_node_id = all_locks->locks[i].id;
1343
+ const Mutex *other =
1344
+ static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1345
+ if (other == nullptr) {
1346
+ // Ignore stale lock
1347
+ continue;
1348
+ }
1349
+
1350
+ // Add the acquired-before edge to the graph.
1351
+ if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1352
+ ScopedDeadlockReportBuffers scoped_buffers;
1353
+ DeadlockReportBuffers *b = scoped_buffers.b;
1354
+ static int number_of_reported_deadlocks = 0;
1355
+ number_of_reported_deadlocks++;
1356
+ // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1357
+ bool symbolize = number_of_reported_deadlocks <= 2;
1358
+ ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1359
+ CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1360
+ int len = 0;
1361
+ for (int j = 0; j != all_locks->n; j++) {
1362
+ void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1363
+ if (pr != nullptr) {
1364
+ snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1365
+ len += static_cast<int>(strlen(&b->buf[len]));
1366
+ }
1367
+ }
1368
+ ABSL_RAW_LOG(ERROR, "Acquiring %p Mutexes held: %s",
1369
+ static_cast<void *>(mu), b->buf);
1370
+ ABSL_RAW_LOG(ERROR, "Cycle: ");
1371
+ int path_len = deadlock_graph->FindPath(
1372
+ mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1373
+ for (int j = 0; j != path_len; j++) {
1374
+ GraphId id = b->path[j];
1375
+ Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1376
+ if (path_mu == nullptr) continue;
1377
+ void** stack;
1378
+ int depth = deadlock_graph->GetStackTrace(id, &stack);
1379
+ snprintf(b->buf, sizeof(b->buf),
1380
+ "mutex@%p stack: ", static_cast<void *>(path_mu));
1381
+ StackString(stack, depth, b->buf + strlen(b->buf),
1382
+ static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1383
+ symbolize);
1384
+ ABSL_RAW_LOG(ERROR, "%s", b->buf);
1385
+ }
1386
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1387
+ OnDeadlockCycle::kAbort) {
1388
+ deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1389
+ ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1390
+ return mu_id;
1391
+ }
1392
+ break; // report at most one potential deadlock per acquisition
1393
+ }
1394
+ }
1395
+
1396
+ return mu_id;
1397
+ }
1398
+
1399
+ // Invoke DeadlockCheck() iff we're in debug mode and
1400
+ // deadlock checking has been enabled.
1401
+ static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1402
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1403
+ OnDeadlockCycle::kIgnore) {
1404
+ return DeadlockCheck(mu);
1405
+ } else {
1406
+ return InvalidGraphId();
1407
+ }
1408
+ }
1409
+
1410
+ void Mutex::ForgetDeadlockInfo() {
1411
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1412
+ OnDeadlockCycle::kIgnore) {
1413
+ deadlock_graph_mu.Lock();
1414
+ if (deadlock_graph != nullptr) {
1415
+ deadlock_graph->RemoveNode(this);
1416
+ }
1417
+ deadlock_graph_mu.Unlock();
1418
+ }
1419
+ }
1420
+
1421
+ void Mutex::AssertNotHeld() const {
1422
+ // We have the data to allow this check only if in debug mode and deadlock
1423
+ // detection is enabled.
1424
+ if (kDebugMode &&
1425
+ (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1426
+ synch_deadlock_detection.load(std::memory_order_acquire) !=
1427
+ OnDeadlockCycle::kIgnore) {
1428
+ GraphId id = GetGraphId(const_cast<Mutex *>(this));
1429
+ SynchLocksHeld *locks = Synch_GetAllLocks();
1430
+ for (int i = 0; i != locks->n; i++) {
1431
+ if (locks->locks[i].id == id) {
1432
+ SynchEvent *mu_events = GetSynchEvent(this);
1433
+ ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1434
+ static_cast<const void *>(this),
1435
+ (mu_events == nullptr ? "" : mu_events->name));
1436
+ }
1437
+ }
1438
+ }
1439
+ }
1440
+
1441
+ // Attempt to acquire *mu, and return whether successful. The implementation
1442
+ // may spin for a short while if the lock cannot be acquired immediately.
1443
+ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1444
+ int c = GetMutexGlobals().spinloop_iterations;
1445
+ do { // do/while somewhat faster on AMD
1446
+ intptr_t v = mu->load(std::memory_order_relaxed);
1447
+ if ((v & (kMuReader|kMuEvent)) != 0) {
1448
+ return false; // a reader or tracing -> give up
1449
+ } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1450
+ mu->compare_exchange_strong(v, kMuWriter | v,
1451
+ std::memory_order_acquire,
1452
+ std::memory_order_relaxed)) {
1453
+ return true;
1454
+ }
1455
+ } while (--c > 0);
1456
+ return false;
1457
+ }
1458
+
1459
+ ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
1460
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1461
+ GraphId id = DebugOnlyDeadlockCheck(this);
1462
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1463
+ // try fast acquire, then spin loop
1464
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1465
+ !mu_.compare_exchange_strong(v, kMuWriter | v,
1466
+ std::memory_order_acquire,
1467
+ std::memory_order_relaxed)) {
1468
+ // try spin acquire, then slow loop
1469
+ if (!TryAcquireWithSpinning(&this->mu_)) {
1470
+ this->LockSlow(kExclusive, nullptr, 0);
1471
+ }
1472
+ }
1473
+ DebugOnlyLockEnter(this, id);
1474
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1475
+ }
1476
+
1477
+ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
1478
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1479
+ GraphId id = DebugOnlyDeadlockCheck(this);
1480
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1481
+ // try fast acquire, then slow loop
1482
+ if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1483
+ !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1484
+ std::memory_order_acquire,
1485
+ std::memory_order_relaxed)) {
1486
+ this->LockSlow(kShared, nullptr, 0);
1487
+ }
1488
+ DebugOnlyLockEnter(this, id);
1489
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1490
+ }
1491
+
1492
+ void Mutex::LockWhen(const Condition &cond) {
1493
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1494
+ GraphId id = DebugOnlyDeadlockCheck(this);
1495
+ this->LockSlow(kExclusive, &cond, 0);
1496
+ DebugOnlyLockEnter(this, id);
1497
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1498
+ }
1499
+
1500
+ bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1501
+ return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1502
+ }
1503
+
1504
+ bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1505
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1506
+ GraphId id = DebugOnlyDeadlockCheck(this);
1507
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
1508
+ KernelTimeout(deadline), 0);
1509
+ DebugOnlyLockEnter(this, id);
1510
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1511
+ return res;
1512
+ }
1513
+
1514
+ void Mutex::ReaderLockWhen(const Condition &cond) {
1515
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1516
+ GraphId id = DebugOnlyDeadlockCheck(this);
1517
+ this->LockSlow(kShared, &cond, 0);
1518
+ DebugOnlyLockEnter(this, id);
1519
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1520
+ }
1521
+
1522
+ bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1523
+ absl::Duration timeout) {
1524
+ return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1525
+ }
1526
+
1527
+ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1528
+ absl::Time deadline) {
1529
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1530
+ GraphId id = DebugOnlyDeadlockCheck(this);
1531
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1532
+ DebugOnlyLockEnter(this, id);
1533
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1534
+ return res;
1535
+ }
1536
+
1537
+ void Mutex::Await(const Condition &cond) {
1538
+ if (cond.Eval()) { // condition already true; nothing to do
1539
+ if (kDebugMode) {
1540
+ this->AssertReaderHeld();
1541
+ }
1542
+ } else { // normal case
1543
+ ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1544
+ "condition untrue on return from Await");
1545
+ }
1546
+ }
1547
+
1548
+ bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1549
+ return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1550
+ }
1551
+
1552
+ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1553
+ if (cond.Eval()) { // condition already true; nothing to do
1554
+ if (kDebugMode) {
1555
+ this->AssertReaderHeld();
1556
+ }
1557
+ return true;
1558
+ }
1559
+
1560
+ KernelTimeout t{deadline};
1561
+ bool res = this->AwaitCommon(cond, t);
1562
+ ABSL_RAW_CHECK(res || t.has_timeout(),
1563
+ "condition untrue on return from Await");
1564
+ return res;
1565
+ }
1566
+
1567
+ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1568
+ this->AssertReaderHeld();
1569
+ MuHow how =
1570
+ (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1571
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1572
+ SynchWaitParams waitp(
1573
+ how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1574
+ nullptr /*no cv_word*/);
1575
+ int flags = kMuHasBlocked;
1576
+ if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1577
+ flags |= kMuIsCond;
1578
+ }
1579
+ this->UnlockSlow(&waitp);
1580
+ this->Block(waitp.thread);
1581
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1582
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1583
+ this->LockSlowLoop(&waitp, flags);
1584
+ bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
1585
+ EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1586
+ ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1587
+ return res;
1588
+ }
1589
+
1590
+ ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
1591
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1592
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1593
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1594
+ mu_.compare_exchange_strong(v, kMuWriter | v,
1595
+ std::memory_order_acquire,
1596
+ std::memory_order_relaxed)) {
1597
+ DebugOnlyLockEnter(this);
1598
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1599
+ return true;
1600
+ }
1601
+ if ((v & kMuEvent) != 0) { // we're recording events
1602
+ if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1603
+ mu_.compare_exchange_strong(
1604
+ v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1605
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1606
+ DebugOnlyLockEnter(this);
1607
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1608
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1609
+ return true;
1610
+ } else {
1611
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1612
+ }
1613
+ }
1614
+ ABSL_TSAN_MUTEX_POST_LOCK(
1615
+ this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1616
+ return false;
1617
+ }
1618
+
1619
+ ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
1620
+ ABSL_TSAN_MUTEX_PRE_LOCK(this,
1621
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1622
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1623
+ // The while-loops (here and below) iterate only if the mutex word keeps
1624
+ // changing (typically because the reader count changes) under the CAS. We
1625
+ // limit the number of attempts to avoid having to think about livelock.
1626
+ int loop_limit = 5;
1627
+ while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1628
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1629
+ std::memory_order_acquire,
1630
+ std::memory_order_relaxed)) {
1631
+ DebugOnlyLockEnter(this);
1632
+ ABSL_TSAN_MUTEX_POST_LOCK(
1633
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1634
+ return true;
1635
+ }
1636
+ loop_limit--;
1637
+ v = mu_.load(std::memory_order_relaxed);
1638
+ }
1639
+ if ((v & kMuEvent) != 0) { // we're recording events
1640
+ loop_limit = 5;
1641
+ while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1642
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1643
+ std::memory_order_acquire,
1644
+ std::memory_order_relaxed)) {
1645
+ DebugOnlyLockEnter(this);
1646
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1647
+ ABSL_TSAN_MUTEX_POST_LOCK(
1648
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1649
+ return true;
1650
+ }
1651
+ loop_limit--;
1652
+ v = mu_.load(std::memory_order_relaxed);
1653
+ }
1654
+ if ((v & kMuEvent) != 0) {
1655
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1656
+ }
1657
+ }
1658
+ ABSL_TSAN_MUTEX_POST_LOCK(this,
1659
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1660
+ __tsan_mutex_try_lock_failed,
1661
+ 0);
1662
+ return false;
1663
+ }
1664
+
1665
+ ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
1666
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1667
+ DebugOnlyLockLeave(this);
1668
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1669
+
1670
+ if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1671
+ ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1672
+ static_cast<unsigned>(v));
1673
+ }
1674
+
1675
+ // should_try_cas is whether we'll try a compare-and-swap immediately.
1676
+ // NOTE: optimized out when kDebugMode is false.
1677
+ bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1678
+ (v & (kMuWait | kMuDesig)) != kMuWait);
1679
+ // But, we can use an alternate computation of it, that compilers
1680
+ // currently don't find on their own. When that changes, this function
1681
+ // can be simplified.
1682
+ intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1683
+ intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1684
+ // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1685
+ // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1686
+ // all possible non-zero values for x exceed all possible values for y.
1687
+ // Therefore, (x == 0 && y > 0) == (x < y).
1688
+ if (kDebugMode && should_try_cas != (x < y)) {
1689
+ // We would usually use PRIdPTR here, but is not correctly implemented
1690
+ // within the android toolchain.
1691
+ ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1692
+ static_cast<long long>(v), static_cast<long long>(x),
1693
+ static_cast<long long>(y));
1694
+ }
1695
+ if (x < y &&
1696
+ mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1697
+ std::memory_order_release,
1698
+ std::memory_order_relaxed)) {
1699
+ // fast writer release (writer with no waiters or with designated waker)
1700
+ } else {
1701
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1702
+ }
1703
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1704
+ }
1705
+
1706
+ // Requires v to represent a reader-locked state.
1707
+ static bool ExactlyOneReader(intptr_t v) {
1708
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
1709
+ assert((v & kMuHigh) != 0);
1710
+ // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1711
+ // on some architectures the following generates slightly smaller code.
1712
+ // It may be faster too.
1713
+ constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1714
+ return (v & kMuMultipleWaitersMask) == 0;
1715
+ }
1716
+
1717
+ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
1718
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1719
+ DebugOnlyLockLeave(this);
1720
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1721
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
1722
+ if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1723
+ // fast reader release (reader with no waiters)
1724
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1725
+ if (mu_.compare_exchange_strong(v, v - clear,
1726
+ std::memory_order_release,
1727
+ std::memory_order_relaxed)) {
1728
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1729
+ return;
1730
+ }
1731
+ }
1732
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1733
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1734
+ }
1735
+
1736
+ // The zap_desig_waker bitmask is used to clear the designated waker flag in
1737
+ // the mutex if this thread has blocked, and therefore may be the designated
1738
+ // waker.
1739
+ static const intptr_t zap_desig_waker[] = {
1740
+ ~static_cast<intptr_t>(0), // not blocked
1741
+ ~static_cast<intptr_t>(
1742
+ kMuDesig) // blocked; turn off the designated waker bit
1743
+ };
1744
+
1745
+ // The ignore_waiting_writers bitmask is used to ignore the existence
1746
+ // of waiting writers if a reader that has already blocked once
1747
+ // wakes up.
1748
+ static const intptr_t ignore_waiting_writers[] = {
1749
+ ~static_cast<intptr_t>(0), // not blocked
1750
+ ~static_cast<intptr_t>(
1751
+ kMuWrWait) // blocked; pretend there are no waiting writers
1752
+ };
1753
+
1754
+ // Internal version of LockWhen(). See LockSlowWithDeadline()
1755
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1756
+ int flags) {
1757
+ ABSL_RAW_CHECK(
1758
+ this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1759
+ "condition untrue on return from LockSlow");
1760
+ }
1761
+
1762
+ // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1763
+ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1764
+ bool locking, bool trylock,
1765
+ bool read_lock) {
1766
+ // Delicate annotation dance.
1767
+ // We are currently inside of read/write lock/unlock operation.
1768
+ // All memory accesses are ignored inside of mutex operations + for unlock
1769
+ // operation tsan considers that we've already released the mutex.
1770
+ bool res = false;
1771
+ #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1772
+ const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1773
+ const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1774
+ #endif
1775
+ if (locking) {
1776
+ // For lock we pretend that we have finished the operation,
1777
+ // evaluate the predicate, then unlock the mutex and start locking it again
1778
+ // to match the annotation at the end of outer lock operation.
1779
+ // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1780
+ // will think the lock acquisition is recursive which will trigger
1781
+ // deadlock detector.
1782
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1783
+ res = cond->Eval();
1784
+ // There is no "try" version of Unlock, so use flags instead of tryflags.
1785
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1786
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1787
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1788
+ } else {
1789
+ // Similarly, for unlock we pretend that we have unlocked the mutex,
1790
+ // lock the mutex, evaluate the predicate, and start unlocking it again
1791
+ // to match the annotation at the end of outer unlock operation.
1792
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1793
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1794
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1795
+ res = cond->Eval();
1796
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1797
+ }
1798
+ // Prevent unused param warnings in non-TSAN builds.
1799
+ static_cast<void>(mu);
1800
+ static_cast<void>(trylock);
1801
+ static_cast<void>(read_lock);
1802
+ return res;
1803
+ }
1804
+
1805
+ // Compute cond->Eval() hiding it from race detectors.
1806
+ // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1807
+ // that was just added by a concurrent Lock operation; Lock adds the predicate
1808
+ // to the internal Mutex list without actually acquiring the Mutex
1809
+ // (it only acquires the internal spinlock, which is rightfully invisible for
1810
+ // tsan). As the result there is no tsan-visible synchronization between the
1811
+ // addition and this thread. So if we would enable race detection here,
1812
+ // it would race with the predicate initialization.
1813
+ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1814
+ // Memory accesses are already ignored inside of lock/unlock operations,
1815
+ // but synchronization operations are also ignored. When we evaluate the
1816
+ // predicate we must ignore only memory accesses but not synchronization,
1817
+ // because missed synchronization can lead to false reports later.
1818
+ // So we "divert" (which un-ignores both memory accesses and synchronization)
1819
+ // and then separately turn on ignores of memory accesses.
1820
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1821
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1822
+ bool res = cond->Eval();
1823
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1824
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1825
+ static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
1826
+ return res;
1827
+ }
1828
+
1829
+ // Internal equivalent of *LockWhenWithDeadline(), where
1830
+ // "t" represents the absolute timeout; !t.has_timeout() means "forever".
1831
+ // "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1832
+ // In flags, bits are ored together:
1833
+ // - kMuHasBlocked indicates that the client has already blocked on the call so
1834
+ // the designated waker bit must be cleared and waiting writers should not
1835
+ // obstruct this call
1836
+ // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1837
+ // Await, LockWhen) so contention profiling should be suppressed.
1838
+ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1839
+ KernelTimeout t, int flags) {
1840
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1841
+ bool unlock = false;
1842
+ if ((v & how->fast_need_zero) == 0 && // try fast acquire
1843
+ mu_.compare_exchange_strong(
1844
+ v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
1845
+ how->fast_add,
1846
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1847
+ if (cond == nullptr ||
1848
+ EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1849
+ return true;
1850
+ }
1851
+ unlock = true;
1852
+ }
1853
+ SynchWaitParams waitp(
1854
+ how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1855
+ nullptr /*no cv_word*/);
1856
+ if (!Condition::GuaranteedEqual(cond, nullptr)) {
1857
+ flags |= kMuIsCond;
1858
+ }
1859
+ if (unlock) {
1860
+ this->UnlockSlow(&waitp);
1861
+ this->Block(waitp.thread);
1862
+ flags |= kMuHasBlocked;
1863
+ }
1864
+ this->LockSlowLoop(&waitp, flags);
1865
+ return waitp.cond != nullptr || // => cond known true from LockSlowLoop
1866
+ cond == nullptr ||
1867
+ EvalConditionAnnotated(cond, this, true, false, how == kShared);
1868
+ }
1869
+
1870
+ // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1871
+ // the printf-style argument list. The format string must be a literal.
1872
+ // Arguments after the first are not evaluated unless the condition is true.
1873
+ #define RAW_CHECK_FMT(cond, ...) \
1874
+ do { \
1875
+ if (ABSL_PREDICT_FALSE(!(cond))) { \
1876
+ ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1877
+ } \
1878
+ } while (0)
1879
+
1880
+ static void CheckForMutexCorruption(intptr_t v, const char* label) {
1881
+ // Test for either of two situations that should not occur in v:
1882
+ // kMuWriter and kMuReader
1883
+ // kMuWrWait and !kMuWait
1884
+ const uintptr_t w = v ^ kMuWait;
1885
+ // By flipping that bit, we can now test for:
1886
+ // kMuWriter and kMuReader in w
1887
+ // kMuWrWait and kMuWait in w
1888
+ // We've chosen these two pairs of values to be so that they will overlap,
1889
+ // respectively, when the word is left shifted by three. This allows us to
1890
+ // save a branch in the common (correct) case of them not being coincident.
1891
+ static_assert(kMuReader << 3 == kMuWriter, "must match");
1892
+ static_assert(kMuWait << 3 == kMuWrWait, "must match");
1893
+ if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1894
+ RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1895
+ "%s: Mutex corrupt: both reader and writer lock held: %p",
1896
+ label, reinterpret_cast<void *>(v));
1897
+ RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1898
+ "%s: Mutex corrupt: waiting writer with no waiters: %p",
1899
+ label, reinterpret_cast<void *>(v));
1900
+ assert(false);
1901
+ }
1902
+
1903
+ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1904
+ SchedulingGuard::ScopedDisable disable_rescheduling;
1905
+ int c = 0;
1906
+ intptr_t v = mu_.load(std::memory_order_relaxed);
1907
+ if ((v & kMuEvent) != 0) {
1908
+ PostSynchEvent(this,
1909
+ waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1910
+ }
1911
+ ABSL_RAW_CHECK(
1912
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1913
+ "detected illegal recursion into Mutex code");
1914
+ for (;;) {
1915
+ v = mu_.load(std::memory_order_relaxed);
1916
+ CheckForMutexCorruption(v, "Lock");
1917
+ if ((v & waitp->how->slow_need_zero) == 0) {
1918
+ if (mu_.compare_exchange_strong(
1919
+ v, (waitp->how->fast_or |
1920
+ (v & zap_desig_waker[flags & kMuHasBlocked])) +
1921
+ waitp->how->fast_add,
1922
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1923
+ if (waitp->cond == nullptr ||
1924
+ EvalConditionAnnotated(waitp->cond, this, true, false,
1925
+ waitp->how == kShared)) {
1926
+ break; // we timed out, or condition true, so return
1927
+ }
1928
+ this->UnlockSlow(waitp); // got lock but condition false
1929
+ this->Block(waitp->thread);
1930
+ flags |= kMuHasBlocked;
1931
+ c = 0;
1932
+ }
1933
+ } else { // need to access waiter list
1934
+ bool dowait = false;
1935
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
1936
+ // This thread tries to become the one and only waiter.
1937
+ PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1938
+ intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
1939
+ kMuWait;
1940
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
1941
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
1942
+ nv |= kMuWrWait;
1943
+ }
1944
+ if (mu_.compare_exchange_strong(
1945
+ v, reinterpret_cast<intptr_t>(new_h) | nv,
1946
+ std::memory_order_release, std::memory_order_relaxed)) {
1947
+ dowait = true;
1948
+ } else { // attempted Enqueue() failed
1949
+ // zero out the waitp field set by Enqueue()
1950
+ waitp->thread->waitp = nullptr;
1951
+ }
1952
+ } else if ((v & waitp->how->slow_inc_need_zero &
1953
+ ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
1954
+ // This is a reader that needs to increment the reader count,
1955
+ // but the count is currently held in the last waiter.
1956
+ if (mu_.compare_exchange_strong(
1957
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1958
+ kMuReader,
1959
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1960
+ PerThreadSynch *h = GetPerThreadSynch(v);
1961
+ h->readers += kMuOne; // inc reader count in waiter
1962
+ do { // release spinlock
1963
+ v = mu_.load(std::memory_order_relaxed);
1964
+ } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
1965
+ std::memory_order_release,
1966
+ std::memory_order_relaxed));
1967
+ if (waitp->cond == nullptr ||
1968
+ EvalConditionAnnotated(waitp->cond, this, true, false,
1969
+ waitp->how == kShared)) {
1970
+ break; // we timed out, or condition true, so return
1971
+ }
1972
+ this->UnlockSlow(waitp); // got lock but condition false
1973
+ this->Block(waitp->thread);
1974
+ flags |= kMuHasBlocked;
1975
+ c = 0;
1976
+ }
1977
+ } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
1978
+ mu_.compare_exchange_strong(
1979
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1980
+ kMuWait,
1981
+ std::memory_order_acquire, std::memory_order_relaxed)) {
1982
+ PerThreadSynch *h = GetPerThreadSynch(v);
1983
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
1984
+ intptr_t wr_wait = 0;
1985
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
1986
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
1987
+ wr_wait = kMuWrWait; // give priority to a waiting writer
1988
+ }
1989
+ do { // release spinlock
1990
+ v = mu_.load(std::memory_order_relaxed);
1991
+ } while (!mu_.compare_exchange_weak(
1992
+ v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
1993
+ reinterpret_cast<intptr_t>(new_h),
1994
+ std::memory_order_release, std::memory_order_relaxed));
1995
+ dowait = true;
1996
+ }
1997
+ if (dowait) {
1998
+ this->Block(waitp->thread); // wait until removed from list or timeout
1999
+ flags |= kMuHasBlocked;
2000
+ c = 0;
2001
+ }
2002
+ }
2003
+ ABSL_RAW_CHECK(
2004
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2005
+ "detected illegal recursion into Mutex code");
2006
+ // delay, then try again
2007
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2008
+ }
2009
+ ABSL_RAW_CHECK(
2010
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2011
+ "detected illegal recursion into Mutex code");
2012
+ if ((v & kMuEvent) != 0) {
2013
+ PostSynchEvent(this,
2014
+ waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2015
+ SYNCH_EV_READERLOCK_RETURNING);
2016
+ }
2017
+ }
2018
+
2019
+ // Unlock this mutex, which is held by the current thread.
2020
+ // If waitp is non-zero, it must be the wait parameters for the current thread
2021
+ // which holds the lock but is not runnable because its condition is false
2022
+ // or it is in the process of blocking on a condition variable; it must requeue
2023
+ // itself on the mutex/condvar to wait for its condition to become true.
2024
+ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2025
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2026
+ intptr_t v = mu_.load(std::memory_order_relaxed);
2027
+ this->AssertReaderHeld();
2028
+ CheckForMutexCorruption(v, "Unlock");
2029
+ if ((v & kMuEvent) != 0) {
2030
+ PostSynchEvent(this,
2031
+ (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2032
+ }
2033
+ int c = 0;
2034
+ // the waiter under consideration to wake, or zero
2035
+ PerThreadSynch *w = nullptr;
2036
+ // the predecessor to w or zero
2037
+ PerThreadSynch *pw = nullptr;
2038
+ // head of the list searched previously, or zero
2039
+ PerThreadSynch *old_h = nullptr;
2040
+ // a condition that's known to be false.
2041
+ const Condition *known_false = nullptr;
2042
+ PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
2043
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2044
+ // later writer could have acquired the lock
2045
+ // (starvation avoidance)
2046
+ ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2047
+ waitp->thread->suppress_fatal_errors,
2048
+ "detected illegal recursion into Mutex code");
2049
+ // This loop finds threads wake_list to wakeup if any, and removes them from
2050
+ // the list of waiters. In addition, it places waitp.thread on the queue of
2051
+ // waiters if waitp is non-zero.
2052
+ for (;;) {
2053
+ v = mu_.load(std::memory_order_relaxed);
2054
+ if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2055
+ waitp == nullptr) {
2056
+ // fast writer release (writer with no waiters or with designated waker)
2057
+ if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2058
+ std::memory_order_release,
2059
+ std::memory_order_relaxed)) {
2060
+ return;
2061
+ }
2062
+ } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2063
+ // fast reader release (reader with no waiters)
2064
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2065
+ if (mu_.compare_exchange_strong(v, v - clear,
2066
+ std::memory_order_release,
2067
+ std::memory_order_relaxed)) {
2068
+ return;
2069
+ }
2070
+ } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
2071
+ mu_.compare_exchange_strong(v, v | kMuSpin,
2072
+ std::memory_order_acquire,
2073
+ std::memory_order_relaxed)) {
2074
+ if ((v & kMuWait) == 0) { // no one to wake
2075
+ intptr_t nv;
2076
+ bool do_enqueue = true; // always Enqueue() the first time
2077
+ ABSL_RAW_CHECK(waitp != nullptr,
2078
+ "UnlockSlow is confused"); // about to sleep
2079
+ do { // must loop to release spinlock as reader count may change
2080
+ v = mu_.load(std::memory_order_relaxed);
2081
+ // decrement reader count if there are readers
2082
+ intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
2083
+ PerThreadSynch *new_h = nullptr;
2084
+ if (do_enqueue) {
2085
+ // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2086
+ // we must not retry here. The initial attempt will always have
2087
+ // succeeded, further attempts would enqueue us against *this due to
2088
+ // Fer() handling.
2089
+ do_enqueue = (waitp->cv_word == nullptr);
2090
+ new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2091
+ }
2092
+ intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
2093
+ if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
2094
+ clear = kMuWrWait | kMuReader; // clear read bit
2095
+ }
2096
+ nv = (v & kMuLow & ~clear & ~kMuSpin);
2097
+ if (new_h != nullptr) {
2098
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2099
+ } else { // new_h could be nullptr if we queued ourselves on a
2100
+ // CondVar
2101
+ // In that case, we must place the reader count back in the mutex
2102
+ // word, as Enqueue() did not store it in the new waiter.
2103
+ nv |= new_readers & kMuHigh;
2104
+ }
2105
+ // release spinlock & our lock; retry if reader-count changed
2106
+ // (writer count cannot change since we hold lock)
2107
+ } while (!mu_.compare_exchange_weak(v, nv,
2108
+ std::memory_order_release,
2109
+ std::memory_order_relaxed));
2110
+ break;
2111
+ }
2112
+
2113
+ // There are waiters.
2114
+ // Set h to the head of the circular waiter list.
2115
+ PerThreadSynch *h = GetPerThreadSynch(v);
2116
+ if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2117
+ // a reader but not the last
2118
+ h->readers -= kMuOne; // release our lock
2119
+ intptr_t nv = v; // normally just release spinlock
2120
+ if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2121
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2122
+ ABSL_RAW_CHECK(new_h != nullptr,
2123
+ "waiters disappeared during Enqueue()!");
2124
+ nv &= kMuLow;
2125
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2126
+ }
2127
+ mu_.store(nv, std::memory_order_release); // release spinlock
2128
+ // can release with a store because there were waiters
2129
+ break;
2130
+ }
2131
+
2132
+ // Either we didn't search before, or we marked the queue
2133
+ // as "maybe_unlocking" and no one else should have changed it.
2134
+ ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2135
+ "Mutex queue changed beneath us");
2136
+
2137
+ // The lock is becoming free, and there's a waiter
2138
+ if (old_h != nullptr &&
2139
+ !old_h->may_skip) { // we used old_h as a terminator
2140
+ old_h->may_skip = true; // allow old_h to skip once more
2141
+ ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2142
+ if (h != old_h && MuSameCondition(old_h, old_h->next)) {
2143
+ old_h->skip = old_h->next; // old_h not head & can skip to successor
2144
+ }
2145
+ }
2146
+ if (h->next->waitp->how == kExclusive &&
2147
+ Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2148
+ // easy case: writer with no condition; no need to search
2149
+ pw = h; // wake w, the successor of h (=pw)
2150
+ w = h->next;
2151
+ w->wake = true;
2152
+ // We are waking up a writer. This writer may be racing against
2153
+ // an already awake reader for the lock. We want the
2154
+ // writer to usually win this race,
2155
+ // because if it doesn't, we can potentially keep taking a reader
2156
+ // perpetually and writers will starve. Worse than
2157
+ // that, this can also starve other readers if kMuWrWait gets set
2158
+ // later.
2159
+ wr_wait = kMuWrWait;
2160
+ } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2161
+ // we found a waiter w to wake on a previous iteration and either it's
2162
+ // a writer, or we've searched the entire list so we have all the
2163
+ // readers.
2164
+ if (pw == nullptr) { // if w's predecessor is unknown, it must be h
2165
+ pw = h;
2166
+ }
2167
+ } else {
2168
+ // At this point we don't know all the waiters to wake, and the first
2169
+ // waiter has a condition or is a reader. We avoid searching over
2170
+ // waiters we've searched on previous iterations by starting at
2171
+ // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2172
+ if (old_h == h) { // we've searched before, and nothing's new
2173
+ // so there's no one to wake.
2174
+ intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2175
+ h->readers = 0;
2176
+ h->maybe_unlocking = false; // finished unlocking
2177
+ if (waitp != nullptr) { // we must queue ourselves and sleep
2178
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2179
+ nv &= kMuLow;
2180
+ if (new_h != nullptr) {
2181
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2182
+ } // else new_h could be nullptr if we queued ourselves on a
2183
+ // CondVar
2184
+ }
2185
+ // release spinlock & lock
2186
+ // can release with a store because there were waiters
2187
+ mu_.store(nv, std::memory_order_release);
2188
+ break;
2189
+ }
2190
+
2191
+ // set up to walk the list
2192
+ PerThreadSynch *w_walk; // current waiter during list walk
2193
+ PerThreadSynch *pw_walk; // previous waiter during list walk
2194
+ if (old_h != nullptr) { // we've searched up to old_h before
2195
+ pw_walk = old_h;
2196
+ w_walk = old_h->next;
2197
+ } else { // no prior search, start at beginning
2198
+ pw_walk =
2199
+ nullptr; // h->next's predecessor may change; don't record it
2200
+ w_walk = h->next;
2201
+ }
2202
+
2203
+ h->may_skip = false; // ensure we never skip past h in future searches
2204
+ // even if other waiters are queued after it.
2205
+ ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2206
+
2207
+ h->maybe_unlocking = true; // we're about to scan the waiter list
2208
+ // without the spinlock held.
2209
+ // Enqueue must be conservative about
2210
+ // priority queuing.
2211
+
2212
+ // We must release the spinlock to evaluate the conditions.
2213
+ mu_.store(v, std::memory_order_release); // release just spinlock
2214
+ // can release with a store because there were waiters
2215
+
2216
+ // h is the last waiter queued, and w_walk the first unsearched waiter.
2217
+ // Without the spinlock, the locations mu_ and h->next may now change
2218
+ // underneath us, but since we hold the lock itself, the only legal
2219
+ // change is to add waiters between h and w_walk. Therefore, it's safe
2220
+ // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2221
+ // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2222
+
2223
+ old_h = h; // remember we searched to here
2224
+
2225
+ // Walk the path upto and including h looking for waiters we can wake.
2226
+ while (pw_walk != h) {
2227
+ w_walk->wake = false;
2228
+ if (w_walk->waitp->cond ==
2229
+ nullptr || // no condition => vacuously true OR
2230
+ (w_walk->waitp->cond != known_false &&
2231
+ // this thread's condition is not known false, AND
2232
+ // is in fact true
2233
+ EvalConditionIgnored(this, w_walk->waitp->cond))) {
2234
+ if (w == nullptr) {
2235
+ w_walk->wake = true; // can wake this waiter
2236
+ w = w_walk;
2237
+ pw = pw_walk;
2238
+ if (w_walk->waitp->how == kExclusive) {
2239
+ wr_wait = kMuWrWait;
2240
+ break; // bail if waking this writer
2241
+ }
2242
+ } else if (w_walk->waitp->how == kShared) { // wake if a reader
2243
+ w_walk->wake = true;
2244
+ } else { // writer with true condition
2245
+ wr_wait = kMuWrWait;
2246
+ }
2247
+ } else { // can't wake; condition false
2248
+ known_false = w_walk->waitp->cond; // remember last false condition
2249
+ }
2250
+ if (w_walk->wake) { // we're waking reader w_walk
2251
+ pw_walk = w_walk; // don't skip similar waiters
2252
+ } else { // not waking; skip as much as possible
2253
+ pw_walk = Skip(w_walk);
2254
+ }
2255
+ // If pw_walk == h, then load of pw_walk->next can race with
2256
+ // concurrent write in Enqueue(). However, at the same time
2257
+ // we do not need to do the load, because we will bail out
2258
+ // from the loop anyway.
2259
+ if (pw_walk != h) {
2260
+ w_walk = pw_walk->next;
2261
+ }
2262
+ }
2263
+
2264
+ continue; // restart for(;;)-loop to wakeup w or to find more waiters
2265
+ }
2266
+ ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2267
+ // The first (and perhaps only) waiter we've chosen to wake is w, whose
2268
+ // predecessor is pw. If w is a reader, we must wake all the other
2269
+ // waiters with wake==true as well. We may also need to queue
2270
+ // ourselves if waitp != null. The spinlock and the lock are still
2271
+ // held.
2272
+
2273
+ // This traverses the list in [ pw->next, h ], where h is the head,
2274
+ // removing all elements with wake==true and placing them in the
2275
+ // singly-linked list wake_list. Returns the new head.
2276
+ h = DequeueAllWakeable(h, pw, &wake_list);
2277
+
2278
+ intptr_t nv = (v & kMuEvent) | kMuDesig;
2279
+ // assume no waiters left,
2280
+ // set kMuDesig for INV1a
2281
+
2282
+ if (waitp != nullptr) { // we must queue ourselves and sleep
2283
+ h = Enqueue(h, waitp, v, kMuIsCond);
2284
+ // h is new last waiter; could be null if we queued ourselves on a
2285
+ // CondVar
2286
+ }
2287
+
2288
+ ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2289
+ "unexpected empty wake list");
2290
+
2291
+ if (h != nullptr) { // there are waiters left
2292
+ h->readers = 0;
2293
+ h->maybe_unlocking = false; // finished unlocking
2294
+ nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2295
+ }
2296
+
2297
+ // release both spinlock & lock
2298
+ // can release with a store because there were waiters
2299
+ mu_.store(nv, std::memory_order_release);
2300
+ break; // out of for(;;)-loop
2301
+ }
2302
+ // aggressive here; no one can proceed till we do
2303
+ c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2304
+ } // end of for(;;)-loop
2305
+
2306
+ if (wake_list != kPerThreadSynchNull) {
2307
+ int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
2308
+ bool cond_waiter = wake_list->cond_waiter;
2309
+ do {
2310
+ wake_list = Wakeup(wake_list); // wake waiters
2311
+ } while (wake_list != kPerThreadSynchNull);
2312
+ if (!cond_waiter) {
2313
+ // Sample lock contention events only if the (first) waiter was trying to
2314
+ // acquire the lock, not waiting on a condition variable or Condition.
2315
+ int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp;
2316
+ mutex_tracer("slow release", this, wait_cycles);
2317
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2318
+ submit_profile_data(enqueue_timestamp);
2319
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2320
+ }
2321
+ }
2322
+ }
2323
+
2324
+ // Used by CondVar implementation to reacquire mutex after waking from
2325
+ // condition variable. This routine is used instead of Lock() because the
2326
+ // waiting thread may have been moved from the condition variable queue to the
2327
+ // mutex queue without a wakeup, by Trans(). In that case, when the thread is
2328
+ // finally woken, the woken thread will believe it has been woken from the
2329
+ // condition variable (i.e. its PC will be in when in the CondVar code), when
2330
+ // in fact it has just been woken from the mutex. Thus, it must enter the slow
2331
+ // path of the mutex in the same state as if it had just woken from the mutex.
2332
+ // That is, it must ensure to clear kMuDesig (INV1b).
2333
+ void Mutex::Trans(MuHow how) {
2334
+ this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2335
+ }
2336
+
2337
+ // Used by CondVar implementation to effectively wake thread w from the
2338
+ // condition variable. If this mutex is free, we simply wake the thread.
2339
+ // It will later acquire the mutex with high probability. Otherwise, we
2340
+ // enqueue thread w on this mutex.
2341
+ void Mutex::Fer(PerThreadSynch *w) {
2342
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2343
+ int c = 0;
2344
+ ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2345
+ "Mutex::Fer while waiting on Condition");
2346
+ ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2347
+ "Mutex::Fer while in timed wait");
2348
+ ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2349
+ "Mutex::Fer with pending CondVar queueing");
2350
+ for (;;) {
2351
+ intptr_t v = mu_.load(std::memory_order_relaxed);
2352
+ // Note: must not queue if the mutex is unlocked (nobody will wake it).
2353
+ // For example, we can have only kMuWait (conditional) or maybe
2354
+ // kMuWait|kMuWrWait.
2355
+ // conflicting != 0 implies that the waking thread cannot currently take
2356
+ // the mutex, which in turn implies that someone else has it and can wake
2357
+ // us if we queue.
2358
+ const intptr_t conflicting =
2359
+ kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2360
+ if ((v & conflicting) == 0) {
2361
+ w->next = nullptr;
2362
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2363
+ IncrementSynchSem(this, w);
2364
+ return;
2365
+ } else {
2366
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
2367
+ // This thread tries to become the one and only waiter.
2368
+ PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2369
+ ABSL_RAW_CHECK(new_h != nullptr,
2370
+ "Enqueue failed"); // we must queue ourselves
2371
+ if (mu_.compare_exchange_strong(
2372
+ v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2373
+ std::memory_order_release, std::memory_order_relaxed)) {
2374
+ return;
2375
+ }
2376
+ } else if ((v & kMuSpin) == 0 &&
2377
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2378
+ PerThreadSynch *h = GetPerThreadSynch(v);
2379
+ PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2380
+ ABSL_RAW_CHECK(new_h != nullptr,
2381
+ "Enqueue failed"); // we must queue ourselves
2382
+ do {
2383
+ v = mu_.load(std::memory_order_relaxed);
2384
+ } while (!mu_.compare_exchange_weak(
2385
+ v,
2386
+ (v & kMuLow & ~kMuSpin) | kMuWait |
2387
+ reinterpret_cast<intptr_t>(new_h),
2388
+ std::memory_order_release, std::memory_order_relaxed));
2389
+ return;
2390
+ }
2391
+ }
2392
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2393
+ }
2394
+ }
2395
+
2396
+ void Mutex::AssertHeld() const {
2397
+ if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2398
+ SynchEvent *e = GetSynchEvent(this);
2399
+ ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2400
+ static_cast<const void *>(this),
2401
+ (e == nullptr ? "" : e->name));
2402
+ }
2403
+ }
2404
+
2405
+ void Mutex::AssertReaderHeld() const {
2406
+ if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2407
+ SynchEvent *e = GetSynchEvent(this);
2408
+ ABSL_RAW_LOG(
2409
+ FATAL, "thread should hold at least a read lock on Mutex %p %s",
2410
+ static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2411
+ }
2412
+ }
2413
+
2414
+ // -------------------------------- condition variables
2415
+ static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
2416
+ static const intptr_t kCvEvent = 0x0002L; // record events
2417
+
2418
+ static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2419
+
2420
+ // Hack to make constant values available to gdb pretty printer
2421
+ enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2422
+
2423
+ static_assert(PerThreadSynch::kAlignment > kCvLow,
2424
+ "PerThreadSynch::kAlignment must be greater than kCvLow");
2425
+
2426
+ void CondVar::EnableDebugLog(const char *name) {
2427
+ SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2428
+ e->log = true;
2429
+ UnrefSynchEvent(e);
2430
+ }
2431
+
2432
+ CondVar::~CondVar() {
2433
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2434
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2435
+ }
2436
+ }
2437
+
2438
+
2439
+ // Remove thread s from the list of waiters on this condition variable.
2440
+ void CondVar::Remove(PerThreadSynch *s) {
2441
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2442
+ intptr_t v;
2443
+ int c = 0;
2444
+ for (v = cv_.load(std::memory_order_relaxed);;
2445
+ v = cv_.load(std::memory_order_relaxed)) {
2446
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2447
+ cv_.compare_exchange_strong(v, v | kCvSpin,
2448
+ std::memory_order_acquire,
2449
+ std::memory_order_relaxed)) {
2450
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2451
+ if (h != nullptr) {
2452
+ PerThreadSynch *w = h;
2453
+ while (w->next != s && w->next != h) { // search for thread
2454
+ w = w->next;
2455
+ }
2456
+ if (w->next == s) { // found thread; remove it
2457
+ w->next = s->next;
2458
+ if (h == s) {
2459
+ h = (w == s) ? nullptr : w;
2460
+ }
2461
+ s->next = nullptr;
2462
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2463
+ }
2464
+ }
2465
+ // release spinlock
2466
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2467
+ std::memory_order_release);
2468
+ return;
2469
+ } else {
2470
+ // try again after a delay
2471
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2472
+ }
2473
+ }
2474
+ }
2475
+
2476
+ // Queue thread waitp->thread on condition variable word cv_word using
2477
+ // wait parameters waitp.
2478
+ // We split this into a separate routine, rather than simply doing it as part
2479
+ // of WaitCommon(). If we were to queue ourselves on the condition variable
2480
+ // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2481
+ // the logging code, or via a Condition function) and might potentially attempt
2482
+ // to block this thread. That would be a problem if the thread were already on
2483
+ // a the condition variable waiter queue. Thus, we use the waitp->cv_word
2484
+ // to tell the unlock code to call CondVarEnqueue() to queue the thread on the
2485
+ // condition variable queue just before the mutex is to be unlocked, and (most
2486
+ // importantly) after any call to an external routine that might re-enter the
2487
+ // mutex code.
2488
+ static void CondVarEnqueue(SynchWaitParams *waitp) {
2489
+ // This thread might be transferred to the Mutex queue by Fer() when
2490
+ // we are woken. To make sure that is what happens, Enqueue() doesn't
2491
+ // call CondVarEnqueue() again but instead uses its normal code. We
2492
+ // must do this before we queue ourselves so that cv_word will be null
2493
+ // when seen by the dequeuer, who may wish immediately to requeue
2494
+ // this thread on another queue.
2495
+ std::atomic<intptr_t> *cv_word = waitp->cv_word;
2496
+ waitp->cv_word = nullptr;
2497
+
2498
+ intptr_t v = cv_word->load(std::memory_order_relaxed);
2499
+ int c = 0;
2500
+ while ((v & kCvSpin) != 0 || // acquire spinlock
2501
+ !cv_word->compare_exchange_weak(v, v | kCvSpin,
2502
+ std::memory_order_acquire,
2503
+ std::memory_order_relaxed)) {
2504
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2505
+ v = cv_word->load(std::memory_order_relaxed);
2506
+ }
2507
+ ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2508
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
2509
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2510
+ if (h == nullptr) { // add this thread to waiter list
2511
+ waitp->thread->next = waitp->thread;
2512
+ } else {
2513
+ waitp->thread->next = h->next;
2514
+ h->next = waitp->thread;
2515
+ }
2516
+ waitp->thread->state.store(PerThreadSynch::kQueued,
2517
+ std::memory_order_relaxed);
2518
+ cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2519
+ std::memory_order_release);
2520
+ }
2521
+
2522
+ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2523
+ bool rc = false; // return value; true iff we timed-out
2524
+
2525
+ intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2526
+ Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2527
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2528
+
2529
+ // maybe trace this call
2530
+ intptr_t v = cv_.load(std::memory_order_relaxed);
2531
+ cond_var_tracer("Wait", this);
2532
+ if ((v & kCvEvent) != 0) {
2533
+ PostSynchEvent(this, SYNCH_EV_WAIT);
2534
+ }
2535
+
2536
+ // Release mu and wait on condition variable.
2537
+ SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2538
+ Synch_GetPerThreadAnnotated(mutex), &cv_);
2539
+ // UnlockSlow() will call CondVarEnqueue() just before releasing the
2540
+ // Mutex, thus queuing this thread on the condition variable. See
2541
+ // CondVarEnqueue() for the reasons.
2542
+ mutex->UnlockSlow(&waitp);
2543
+
2544
+ // wait for signal
2545
+ while (waitp.thread->state.load(std::memory_order_acquire) ==
2546
+ PerThreadSynch::kQueued) {
2547
+ if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2548
+ this->Remove(waitp.thread);
2549
+ rc = true;
2550
+ }
2551
+ }
2552
+
2553
+ ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2554
+ waitp.thread->waitp = nullptr; // cleanup
2555
+
2556
+ // maybe trace this call
2557
+ cond_var_tracer("Unwait", this);
2558
+ if ((v & kCvEvent) != 0) {
2559
+ PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2560
+ }
2561
+
2562
+ // From synchronization point of view Wait is unlock of the mutex followed
2563
+ // by lock of the mutex. We've annotated start of unlock in the beginning
2564
+ // of the function. Now, finish unlock and annotate lock of the mutex.
2565
+ // (Trans is effectively lock).
2566
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2567
+ ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2568
+ mutex->Trans(mutex_how); // Reacquire mutex
2569
+ ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2570
+ return rc;
2571
+ }
2572
+
2573
+ bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2574
+ return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2575
+ }
2576
+
2577
+ bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2578
+ return WaitCommon(mu, KernelTimeout(deadline));
2579
+ }
2580
+
2581
+ void CondVar::Wait(Mutex *mu) {
2582
+ WaitCommon(mu, KernelTimeout::Never());
2583
+ }
2584
+
2585
+ // Wake thread w
2586
+ // If it was a timed wait, w will be waiting on w->cv
2587
+ // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2588
+ // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
2589
+ void CondVar::Wakeup(PerThreadSynch *w) {
2590
+ if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2591
+ // The waiting thread only needs to observe "w->state == kAvailable" to be
2592
+ // released, we must cache "cvmu" before clearing "next".
2593
+ Mutex *mu = w->waitp->cvmu;
2594
+ w->next = nullptr;
2595
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2596
+ Mutex::IncrementSynchSem(mu, w);
2597
+ } else {
2598
+ w->waitp->cvmu->Fer(w);
2599
+ }
2600
+ }
2601
+
2602
+ void CondVar::Signal() {
2603
+ SchedulingGuard::ScopedDisable disable_rescheduling;
2604
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2605
+ intptr_t v;
2606
+ int c = 0;
2607
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
2608
+ v = cv_.load(std::memory_order_relaxed)) {
2609
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2610
+ cv_.compare_exchange_strong(v, v | kCvSpin,
2611
+ std::memory_order_acquire,
2612
+ std::memory_order_relaxed)) {
2613
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2614
+ PerThreadSynch *w = nullptr;
2615
+ if (h != nullptr) { // remove first waiter
2616
+ w = h->next;
2617
+ if (w == h) {
2618
+ h = nullptr;
2619
+ } else {
2620
+ h->next = w->next;
2621
+ }
2622
+ }
2623
+ // release spinlock
2624
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2625
+ std::memory_order_release);
2626
+ if (w != nullptr) {
2627
+ CondVar::Wakeup(w); // wake waiter, if there was one
2628
+ cond_var_tracer("Signal wakeup", this);
2629
+ }
2630
+ if ((v & kCvEvent) != 0) {
2631
+ PostSynchEvent(this, SYNCH_EV_SIGNAL);
2632
+ }
2633
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2634
+ return;
2635
+ } else {
2636
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2637
+ }
2638
+ }
2639
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2640
+ }
2641
+
2642
+ void CondVar::SignalAll () {
2643
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2644
+ intptr_t v;
2645
+ int c = 0;
2646
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
2647
+ v = cv_.load(std::memory_order_relaxed)) {
2648
+ // empty the list if spinlock free
2649
+ // We do this by simply setting the list to empty using
2650
+ // compare and swap. We then have the entire list in our hands,
2651
+ // which cannot be changing since we grabbed it while no one
2652
+ // held the lock.
2653
+ if ((v & kCvSpin) == 0 &&
2654
+ cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2655
+ std::memory_order_relaxed)) {
2656
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2657
+ if (h != nullptr) {
2658
+ PerThreadSynch *w;
2659
+ PerThreadSynch *n = h->next;
2660
+ do { // for every thread, wake it up
2661
+ w = n;
2662
+ n = n->next;
2663
+ CondVar::Wakeup(w);
2664
+ } while (w != h);
2665
+ cond_var_tracer("SignalAll wakeup", this);
2666
+ }
2667
+ if ((v & kCvEvent) != 0) {
2668
+ PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2669
+ }
2670
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2671
+ return;
2672
+ } else {
2673
+ // try again after a delay
2674
+ c = synchronization_internal::MutexDelay(c, GENTLE);
2675
+ }
2676
+ }
2677
+ ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2678
+ }
2679
+
2680
+ void ReleasableMutexLock::Release() {
2681
+ ABSL_RAW_CHECK(this->mu_ != nullptr,
2682
+ "ReleasableMutexLock::Release may only be called once");
2683
+ this->mu_->Unlock();
2684
+ this->mu_ = nullptr;
2685
+ }
2686
+
2687
+ #ifdef ABSL_HAVE_THREAD_SANITIZER
2688
+ extern "C" void __tsan_read1(void *addr);
2689
+ #else
2690
+ #define __tsan_read1(addr) // do nothing if TSan not enabled
2691
+ #endif
2692
+
2693
+ // A function that just returns its argument, dereferenced
2694
+ static bool Dereference(void *arg) {
2695
+ // ThreadSanitizer does not instrument this file for memory accesses.
2696
+ // This function dereferences a user variable that can participate
2697
+ // in a data race, so we need to manually tell TSan about this memory access.
2698
+ __tsan_read1(arg);
2699
+ return *(static_cast<bool *>(arg));
2700
+ }
2701
+
2702
+ Condition::Condition() {} // null constructor, used for kTrue only
2703
+ const Condition Condition::kTrue;
2704
+
2705
+ Condition::Condition(bool (*func)(void *), void *arg)
2706
+ : eval_(&CallVoidPtrFunction),
2707
+ function_(func),
2708
+ method_(nullptr),
2709
+ arg_(arg) {}
2710
+
2711
+ bool Condition::CallVoidPtrFunction(const Condition *c) {
2712
+ return (*c->function_)(c->arg_);
2713
+ }
2714
+
2715
+ Condition::Condition(const bool *cond)
2716
+ : eval_(CallVoidPtrFunction),
2717
+ function_(Dereference),
2718
+ method_(nullptr),
2719
+ // const_cast is safe since Dereference does not modify arg
2720
+ arg_(const_cast<bool *>(cond)) {}
2721
+
2722
+ bool Condition::Eval() const {
2723
+ // eval_ == null for kTrue
2724
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
2725
+ }
2726
+
2727
+ bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2728
+ if (a == nullptr) {
2729
+ return b == nullptr || b->eval_ == nullptr;
2730
+ }
2731
+ if (b == nullptr || b->eval_ == nullptr) {
2732
+ return a->eval_ == nullptr;
2733
+ }
2734
+ return a->eval_ == b->eval_ && a->function_ == b->function_ &&
2735
+ a->arg_ == b->arg_ && a->method_ == b->method_;
2736
+ }
2737
+
2738
+ ABSL_NAMESPACE_END
2739
+ } // namespace absl