grpc 1.61.0.pre2 → 1.62.0.pre1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (724) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +218 -196
  3. data/include/grpc/event_engine/event_engine.h +5 -43
  4. data/include/grpc/event_engine/extensible.h +68 -0
  5. data/include/grpc/impl/slice_type.h +1 -1
  6. data/include/grpc/support/port_platform.h +12 -20
  7. data/src/core/{ext/filters/client_channel → client_channel}/backend_metric.cc +1 -1
  8. data/src/core/{ext/filters/client_channel → client_channel}/backend_metric.h +4 -4
  9. data/src/core/{ext/filters/client_channel → client_channel}/backup_poller.cc +1 -1
  10. data/src/core/{ext/filters/client_channel → client_channel}/backup_poller.h +3 -3
  11. data/src/core/{ext/filters/client_channel → client_channel}/channel_connectivity.cc +11 -11
  12. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_channelz.cc +1 -1
  13. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_channelz.h +3 -3
  14. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_factory.cc +1 -1
  15. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_factory.h +4 -4
  16. data/src/core/{ext/filters/client_channel/client_channel.cc → client_channel/client_channel_filter.cc} +247 -231
  17. data/src/core/{ext/filters/client_channel/client_channel.h → client_channel/client_channel_filter.h} +42 -42
  18. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_internal.h +6 -6
  19. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_plugin.cc +5 -5
  20. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_service_config.cc +2 -2
  21. data/src/core/{ext/filters/client_channel → client_channel}/client_channel_service_config.h +5 -5
  22. data/src/core/{ext/filters/client_channel → client_channel}/config_selector.cc +1 -1
  23. data/src/core/{ext/filters/client_channel → client_channel}/config_selector.h +5 -5
  24. data/src/core/{ext/filters/client_channel → client_channel}/connector.h +3 -3
  25. data/src/core/{ext/filters/client_channel → client_channel}/dynamic_filters.cc +1 -1
  26. data/src/core/{ext/filters/client_channel → client_channel}/dynamic_filters.h +3 -3
  27. data/src/core/{ext/filters/client_channel → client_channel}/global_subchannel_pool.cc +2 -2
  28. data/src/core/{ext/filters/client_channel → client_channel}/global_subchannel_pool.h +4 -4
  29. data/src/core/{ext/filters/client_channel → client_channel}/http_proxy_mapper.cc +1 -1
  30. data/src/core/{ext/filters/client_channel → client_channel}/http_proxy_mapper.h +3 -3
  31. data/src/core/{ext/filters/client_channel → client_channel}/local_subchannel_pool.cc +2 -2
  32. data/src/core/{ext/filters/client_channel → client_channel}/local_subchannel_pool.h +4 -4
  33. data/src/core/{ext/filters/client_channel → client_channel}/retry_filter.cc +8 -8
  34. data/src/core/{ext/filters/client_channel → client_channel}/retry_filter.h +8 -8
  35. data/src/core/{ext/filters/client_channel → client_channel}/retry_filter_legacy_call_data.cc +12 -9
  36. data/src/core/{ext/filters/client_channel → client_channel}/retry_filter_legacy_call_data.h +11 -10
  37. data/src/core/{ext/filters/client_channel → client_channel}/retry_service_config.cc +1 -1
  38. data/src/core/{ext/filters/client_channel → client_channel}/retry_service_config.h +4 -4
  39. data/src/core/{ext/filters/client_channel → client_channel}/retry_throttle.cc +1 -1
  40. data/src/core/{ext/filters/client_channel → client_channel}/retry_throttle.h +3 -3
  41. data/src/core/{ext/filters/client_channel → client_channel}/service_config_channel_arg_filter.cc +4 -4
  42. data/src/core/{ext/filters/client_channel → client_channel}/subchannel.cc +2 -2
  43. data/src/core/{ext/filters/client_channel → client_channel}/subchannel.h +6 -6
  44. data/src/core/{ext/filters/client_channel → client_channel}/subchannel_interface_internal.h +5 -5
  45. data/src/core/{ext/filters/client_channel → client_channel}/subchannel_pool_interface.cc +1 -1
  46. data/src/core/{ext/filters/client_channel → client_channel}/subchannel_pool_interface.h +3 -3
  47. data/src/core/{ext/filters/client_channel → client_channel}/subchannel_stream_client.cc +1 -1
  48. data/src/core/{ext/filters/client_channel → client_channel}/subchannel_stream_client.h +4 -4
  49. data/src/core/ext/filters/backend_metrics/backend_metric_filter.cc +1 -1
  50. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +1 -1
  51. data/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.h +1 -1
  52. data/src/core/ext/filters/http/message_compress/legacy_compression_filter.cc +2 -2
  53. data/src/core/ext/filters/http/server/http_server_filter.cc +1 -1
  54. data/src/core/ext/filters/message_size/message_size_filter.cc +3 -3
  55. data/src/core/ext/filters/message_size/message_size_filter.h +1 -1
  56. data/src/core/ext/filters/rbac/rbac_filter.cc +1 -1
  57. data/src/core/ext/filters/rbac/rbac_service_config_parser.h +1 -1
  58. data/src/core/ext/filters/server_config_selector/server_config_selector.h +2 -2
  59. data/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc +2 -2
  60. data/src/core/ext/filters/stateful_session/stateful_session_filter.cc +2 -2
  61. data/src/core/ext/filters/stateful_session/stateful_session_filter.h +1 -1
  62. data/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.h +1 -1
  63. data/src/core/ext/transport/chttp2/alpn/alpn.cc +4 -1
  64. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +5 -5
  65. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +1 -1
  66. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +5 -0
  67. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +3 -1
  68. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +3 -1
  69. data/src/core/ext/transport/inproc/inproc_transport.cc +20 -13
  70. data/src/core/ext/transport/inproc/inproc_transport.h +8 -0
  71. data/src/core/ext/upb-gen/envoy/config/bootstrap/v3/bootstrap.upb.h +351 -164
  72. data/src/core/ext/upb-gen/envoy/config/bootstrap/v3/bootstrap.upb_minitable.c +89 -50
  73. data/src/core/ext/upb-gen/envoy/config/bootstrap/v3/bootstrap.upb_minitable.h +2 -0
  74. data/src/core/ext/upb-gen/envoy/config/cluster/v3/cluster.upb.h +47 -3
  75. data/src/core/ext/upb-gen/envoy/config/cluster/v3/cluster.upb_minitable.c +15 -7
  76. data/src/core/ext/upb-gen/envoy/config/cluster/v3/filter.upb.h +32 -3
  77. data/src/core/ext/upb-gen/envoy/config/cluster/v3/filter.upb_minitable.c +8 -5
  78. data/src/core/ext/upb-gen/envoy/config/cluster/v3/outlier_detection.upb.h +28 -0
  79. data/src/core/ext/upb-gen/envoy/config/cluster/v3/outlier_detection.upb_minitable.c +6 -4
  80. data/src/core/ext/upb-gen/envoy/config/common/matcher/v3/matcher.upb.h +0 -1
  81. data/src/core/ext/upb-gen/envoy/config/common/matcher/v3/matcher.upb_minitable.c +0 -1
  82. data/src/core/ext/upb-gen/envoy/config/core/v3/address.upb.h +29 -0
  83. data/src/core/ext/upb-gen/envoy/config/core/v3/address.upb_minitable.c +7 -4
  84. data/src/core/ext/upb-gen/envoy/config/core/v3/base.upb.h +17 -1
  85. data/src/core/ext/upb-gen/envoy/config/core/v3/base.upb_minitable.c +4 -3
  86. data/src/core/ext/upb-gen/envoy/config/core/v3/http_service.upb.h +166 -0
  87. data/src/core/ext/upb-gen/envoy/config/core/v3/http_service.upb_minitable.c +55 -0
  88. data/src/core/ext/upb-gen/envoy/config/core/v3/http_service.upb_minitable.h +30 -0
  89. data/src/core/ext/upb-gen/envoy/config/core/v3/protocol.upb.h +30 -0
  90. data/src/core/ext/upb-gen/envoy/config/core/v3/protocol.upb_minitable.c +7 -5
  91. data/src/core/ext/upb-gen/envoy/config/core/v3/substitution_format_string.upb.h +99 -19
  92. data/src/core/ext/upb-gen/envoy/config/core/v3/substitution_format_string.upb_minitable.c +29 -12
  93. data/src/core/ext/upb-gen/envoy/config/core/v3/substitution_format_string.upb_minitable.h +1 -0
  94. data/src/core/ext/upb-gen/envoy/config/endpoint/v3/endpoint.upb.h +15 -0
  95. data/src/core/ext/upb-gen/envoy/config/endpoint/v3/endpoint.upb_minitable.c +4 -3
  96. data/src/core/ext/upb-gen/envoy/config/route/v3/route.upb.h +31 -3
  97. data/src/core/ext/upb-gen/envoy/config/route/v3/route.upb_minitable.c +22 -4
  98. data/src/core/ext/upb-gen/envoy/config/route/v3/route_components.upb.h +91 -3
  99. data/src/core/ext/upb-gen/envoy/config/route/v3/route_components.upb_minitable.c +11 -8
  100. data/src/core/ext/upb-gen/envoy/config/tap/v3/common.upb.h +30 -0
  101. data/src/core/ext/upb-gen/envoy/config/tap/v3/common.upb_minitable.c +7 -4
  102. data/src/core/ext/upb-gen/envoy/config/trace/v3/dynamic_ot.upb.h +1 -0
  103. data/src/core/ext/upb-gen/envoy/config/trace/v3/dynamic_ot.upb_minitable.c +1 -0
  104. data/src/core/ext/upb-gen/envoy/config/trace/v3/opentelemetry.upb.h +125 -3
  105. data/src/core/ext/upb-gen/envoy/config/trace/v3/opentelemetry.upb_minitable.c +17 -4
  106. data/src/core/ext/upb-gen/envoy/data/accesslog/v3/accesslog.upb.h +19 -1
  107. data/src/core/ext/upb-gen/envoy/data/accesslog/v3/accesslog.upb_minitable.c +4 -3
  108. data/src/core/ext/upb-gen/envoy/extensions/filters/http/router/v3/router.upb.h +1 -0
  109. data/src/core/ext/upb-gen/envoy/extensions/filters/http/router/v3/router.upb_minitable.c +1 -0
  110. data/src/core/ext/upb-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb.h +15 -0
  111. data/src/core/ext/upb-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb_minitable.c +5 -2
  112. data/src/core/ext/upb-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +42 -0
  113. data/src/core/ext/upb-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb_minitable.c +11 -8
  114. data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/common.upb.h +23 -8
  115. data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/common.upb_minitable.c +9 -4
  116. data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/tls.upb.h +58 -16
  117. data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/tls.upb_minitable.c +14 -11
  118. data/src/core/ext/upb-gen/envoy/service/status/v3/csds.upb.h +15 -0
  119. data/src/core/ext/upb-gen/envoy/service/status/v3/csds.upb_minitable.c +7 -2
  120. data/src/core/ext/upb-gen/envoy/type/matcher/v3/value.upb.h +129 -0
  121. data/src/core/ext/upb-gen/envoy/type/matcher/v3/value.upb_minitable.c +27 -6
  122. data/src/core/ext/upb-gen/envoy/type/matcher/v3/value.upb_minitable.h +1 -0
  123. data/src/core/ext/upb-gen/xds/type/matcher/v3/cel.upb.h +15 -0
  124. data/src/core/ext/upb-gen/xds/type/matcher/v3/cel.upb_minitable.c +5 -2
  125. data/src/core/ext/upbdefs-gen/envoy/config/accesslog/v3/accesslog.upbdefs.c +60 -60
  126. data/src/core/ext/upbdefs-gen/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +278 -256
  127. data/src/core/ext/upbdefs-gen/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +10 -0
  128. data/src/core/ext/upbdefs-gen/envoy/config/cluster/v3/cluster.upbdefs.c +483 -475
  129. data/src/core/ext/upbdefs-gen/envoy/config/cluster/v3/filter.upbdefs.c +27 -20
  130. data/src/core/ext/upbdefs-gen/envoy/config/cluster/v3/outlier_detection.upbdefs.c +17 -12
  131. data/src/core/ext/upbdefs-gen/envoy/config/common/matcher/v3/matcher.upbdefs.c +157 -161
  132. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/address.upbdefs.c +105 -97
  133. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/base.upbdefs.c +106 -102
  134. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/http_service.upbdefs.c +52 -0
  135. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/http_service.upbdefs.h +35 -0
  136. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/http_uri.upbdefs.c +14 -13
  137. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/protocol.upbdefs.c +228 -224
  138. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/substitution_format_string.upbdefs.c +32 -26
  139. data/src/core/ext/upbdefs-gen/envoy/config/core/v3/substitution_format_string.upbdefs.h +5 -0
  140. data/src/core/ext/upbdefs-gen/envoy/config/endpoint/v3/endpoint.upbdefs.c +31 -28
  141. data/src/core/ext/upbdefs-gen/envoy/config/route/v3/route.upbdefs.c +22 -19
  142. data/src/core/ext/upbdefs-gen/envoy/config/route/v3/route_components.upbdefs.c +818 -813
  143. data/src/core/ext/upbdefs-gen/envoy/config/tap/v3/common.upbdefs.c +158 -151
  144. data/src/core/ext/upbdefs-gen/envoy/config/trace/v3/dynamic_ot.upbdefs.c +27 -23
  145. data/src/core/ext/upbdefs-gen/envoy/config/trace/v3/opencensus.upbdefs.c +59 -53
  146. data/src/core/ext/upbdefs-gen/envoy/config/trace/v3/opentelemetry.upbdefs.c +40 -18
  147. data/src/core/ext/upbdefs-gen/envoy/data/accesslog/v3/accesslog.upbdefs.c +106 -103
  148. data/src/core/ext/upbdefs-gen/envoy/extensions/filters/http/router/v3/router.upbdefs.c +16 -12
  149. data/src/core/ext/upbdefs-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upbdefs.c +22 -21
  150. data/src/core/ext/upbdefs-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +265 -261
  151. data/src/core/ext/upbdefs-gen/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +127 -125
  152. data/src/core/ext/upbdefs-gen/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +188 -182
  153. data/src/core/ext/upbdefs-gen/envoy/service/status/v3/csds.upbdefs.c +57 -56
  154. data/src/core/ext/upbdefs-gen/envoy/type/matcher/v3/value.upbdefs.c +27 -20
  155. data/src/core/ext/upbdefs-gen/envoy/type/matcher/v3/value.upbdefs.h +5 -0
  156. data/src/core/ext/upbdefs-gen/xds/type/matcher/v3/cel.upbdefs.c +10 -8
  157. data/src/core/ext/xds/xds_api.cc +63 -150
  158. data/src/core/ext/xds/xds_api.h +2 -7
  159. data/src/core/ext/xds/xds_bootstrap.h +3 -4
  160. data/src/core/ext/xds/xds_bootstrap_grpc.cc +4 -15
  161. data/src/core/ext/xds/xds_bootstrap_grpc.h +2 -1
  162. data/src/core/ext/xds/xds_client.cc +111 -59
  163. data/src/core/ext/xds/xds_client.h +20 -15
  164. data/src/core/ext/xds/xds_client_grpc.cc +53 -15
  165. data/src/core/ext/xds/xds_client_grpc.h +4 -1
  166. data/src/core/ext/xds/xds_client_stats.cc +11 -11
  167. data/src/core/ext/xds/xds_client_stats.h +8 -13
  168. data/src/core/ext/xds/xds_cluster.cc +1 -1
  169. data/src/core/ext/xds/xds_cluster.h +1 -1
  170. data/src/core/ext/xds/xds_endpoint.h +1 -1
  171. data/src/core/ext/xds/xds_health_status.h +1 -1
  172. data/src/core/ext/xds/xds_lb_policy_registry.cc +1 -1
  173. data/src/core/ext/xds/xds_route_config.cc +1 -1
  174. data/src/core/ext/xds/xds_server_config_fetcher.cc +2 -2
  175. data/src/core/ext/xds/xds_transport_grpc.cc +5 -5
  176. data/src/core/lib/channel/channel_args.h +15 -1
  177. data/src/core/lib/channel/connected_channel.cc +13 -12
  178. data/src/core/lib/channel/promise_based_filter.cc +4 -4
  179. data/src/core/lib/channel/promise_based_filter.h +1 -2
  180. data/src/core/lib/config/core_configuration.h +3 -3
  181. data/src/core/lib/event_engine/ares_resolver.cc +106 -59
  182. data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc +4 -0
  183. data/src/core/lib/event_engine/extensions/can_track_errors.h +40 -0
  184. data/src/core/lib/event_engine/extensions/supports_fd.h +160 -0
  185. data/src/core/lib/event_engine/forkable.cc +7 -5
  186. data/src/core/lib/event_engine/posix.h +11 -122
  187. data/src/core/lib/event_engine/posix_engine/native_posix_dns_resolver.h +1 -5
  188. data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +31 -7
  189. data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +1 -0
  190. data/src/core/lib/event_engine/posix_engine/posix_engine.cc +3 -4
  191. data/src/core/lib/event_engine/posix_engine/posix_engine.h +2 -3
  192. data/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc +14 -6
  193. data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc +10 -0
  194. data/src/core/lib/event_engine/query_extensions.h +85 -0
  195. data/src/core/lib/event_engine/shim.cc +3 -17
  196. data/src/core/lib/event_engine/shim.h +0 -2
  197. data/src/core/lib/event_engine/thread_pool/thread_count.cc +28 -7
  198. data/src/core/lib/event_engine/thread_pool/thread_count.h +6 -1
  199. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc +109 -5
  200. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h +9 -0
  201. data/src/core/lib/event_engine/utils.cc +2 -1
  202. data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc +1 -0
  203. data/src/core/lib/event_engine/windows/native_windows_dns_resolver.cc +1 -0
  204. data/src/core/lib/experiments/config.cc +10 -2
  205. data/src/core/lib/experiments/config.h +6 -0
  206. data/src/core/lib/experiments/experiments.cc +57 -18
  207. data/src/core/lib/experiments/experiments.h +16 -8
  208. data/src/core/lib/gpr/posix/sync.cc +2 -2
  209. data/src/core/lib/gpr/posix/time.cc +0 -5
  210. data/src/core/lib/gpr/windows/sync.cc +2 -2
  211. data/src/core/lib/gprpp/debug_location.h +2 -0
  212. data/src/core/lib/gprpp/down_cast.h +49 -0
  213. data/src/core/lib/gprpp/linux/env.cc +1 -19
  214. data/src/core/lib/gprpp/load_file.cc +2 -1
  215. data/src/core/lib/gprpp/load_file.h +2 -1
  216. data/src/core/lib/gprpp/posix/thd.cc +27 -2
  217. data/src/core/lib/gprpp/thd.h +8 -0
  218. data/src/core/lib/gprpp/time.h +4 -3
  219. data/src/core/lib/gprpp/windows/directory_reader.cc +1 -0
  220. data/src/core/lib/gprpp/windows/thd.cc +10 -1
  221. data/src/core/lib/iomgr/combiner.cc +1 -1
  222. data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +20 -14
  223. data/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc +2 -2
  224. data/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc +2 -2
  225. data/src/core/lib/iomgr/tcp_server_posix.cc +65 -50
  226. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +12 -0
  227. data/src/core/lib/json/json_writer.cc +1 -1
  228. data/src/core/lib/promise/activity.h +8 -2
  229. data/src/core/lib/promise/context.h +45 -7
  230. data/src/core/lib/promise/for_each.h +6 -9
  231. data/src/core/lib/promise/interceptor_list.h +13 -5
  232. data/src/core/lib/promise/latch.h +3 -3
  233. data/src/core/lib/promise/party.cc +12 -0
  234. data/src/core/lib/promise/party.h +37 -6
  235. data/src/core/lib/promise/pipe.h +2 -7
  236. data/src/core/lib/promise/sleep.cc +1 -1
  237. data/src/core/lib/promise/status_flag.h +32 -2
  238. data/src/core/lib/resource_quota/memory_quota.cc +4 -4
  239. data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +5 -11
  240. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +11 -10
  241. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +9 -7
  242. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +1 -1
  243. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +16 -24
  244. data/src/core/lib/security/credentials/xds/xds_credentials.cc +1 -1
  245. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +1 -1
  246. data/src/core/lib/security/security_connector/load_system_roots_supported.cc +3 -7
  247. data/src/core/lib/security/security_connector/local/local_security_connector.cc +1 -1
  248. data/src/core/lib/security/security_connector/ssl_utils.cc +26 -17
  249. data/src/core/lib/security/transport/legacy_server_auth_filter.cc +2 -2
  250. data/src/core/lib/security/transport/security_handshaker.cc +0 -8
  251. data/src/core/lib/security/transport/security_handshaker.h +0 -6
  252. data/src/core/lib/security/transport/server_auth_filter.cc +2 -2
  253. data/src/core/lib/slice/slice_buffer.h +3 -1
  254. data/src/core/lib/surface/call.cc +162 -76
  255. data/src/core/lib/surface/call_trace.cc +9 -9
  256. data/src/core/lib/surface/channel.cc +15 -24
  257. data/src/core/lib/surface/channel.h +4 -20
  258. data/src/core/lib/surface/channel_init.cc +81 -7
  259. data/src/core/lib/surface/channel_init.h +104 -6
  260. data/src/core/lib/surface/init.cc +1 -1
  261. data/src/core/lib/surface/server.cc +4 -7
  262. data/src/core/lib/surface/version.cc +2 -2
  263. data/src/core/lib/surface/wait_for_cq_end_op.cc +75 -0
  264. data/src/core/lib/surface/wait_for_cq_end_op.h +4 -26
  265. data/src/core/lib/transport/batch_builder.cc +2 -3
  266. data/src/core/lib/transport/batch_builder.h +1 -1
  267. data/src/core/lib/transport/call_factory.cc +41 -0
  268. data/src/core/lib/transport/call_factory.h +56 -0
  269. data/src/core/lib/transport/call_filters.cc +371 -0
  270. data/src/core/lib/transport/call_filters.h +1500 -0
  271. data/src/core/lib/transport/call_size_estimator.cc +41 -0
  272. data/src/core/lib/transport/call_size_estimator.h +52 -0
  273. data/src/core/lib/transport/call_spine.cc +107 -0
  274. data/src/core/lib/transport/call_spine.h +429 -0
  275. data/src/core/lib/transport/handshaker.cc +0 -8
  276. data/src/core/lib/transport/handshaker.h +0 -7
  277. data/src/core/lib/transport/message.cc +45 -0
  278. data/src/core/lib/transport/message.h +61 -0
  279. data/src/core/lib/transport/metadata.cc +37 -0
  280. data/src/core/lib/transport/metadata.h +78 -0
  281. data/src/core/lib/transport/metadata_batch.cc +4 -2
  282. data/src/core/lib/transport/metadata_batch.h +2 -2
  283. data/src/core/lib/transport/transport.cc +0 -105
  284. data/src/core/lib/transport/transport.h +3 -452
  285. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/address_filtering.cc +1 -1
  286. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/address_filtering.h +4 -4
  287. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/backend_metric_data.h +3 -3
  288. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/child_policy_handler.cc +4 -4
  289. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/child_policy_handler.h +4 -4
  290. data/src/core/{lib/load_balancing → load_balancing}/delegating_helper.h +5 -5
  291. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/endpoint_list.cc +6 -6
  292. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/endpoint_list.h +6 -6
  293. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/client_load_reporting_filter.cc +2 -2
  294. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/client_load_reporting_filter.h +3 -3
  295. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb.cc +19 -19
  296. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb.h +3 -3
  297. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb_balancer_addresses.cc +1 -1
  298. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb_balancer_addresses.h +4 -4
  299. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb_client_stats.cc +1 -1
  300. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/grpclb_client_stats.h +3 -3
  301. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/load_balancer_api.cc +1 -1
  302. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/grpclb/load_balancer_api.h +4 -4
  303. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/health_check_client.cc +6 -6
  304. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/health_check_client.h +4 -4
  305. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/health_check_client_internal.h +7 -7
  306. data/src/core/{lib/load_balancing → load_balancing}/lb_policy.cc +1 -1
  307. data/src/core/{lib/load_balancing → load_balancing}/lb_policy.h +6 -6
  308. data/src/core/{lib/load_balancing → load_balancing}/lb_policy_factory.h +4 -4
  309. data/src/core/{lib/load_balancing → load_balancing}/lb_policy_registry.cc +2 -2
  310. data/src/core/{lib/load_balancing → load_balancing}/lb_policy_registry.h +5 -5
  311. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/oob_backend_metric.cc +6 -6
  312. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/oob_backend_metric.h +5 -5
  313. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/oob_backend_metric_internal.h +8 -8
  314. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/outlier_detection/outlier_detection.cc +10 -10
  315. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/outlier_detection/outlier_detection.h +3 -3
  316. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/pick_first/pick_first.cc +6 -6
  317. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/pick_first/pick_first.h +4 -4
  318. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/priority/priority.cc +8 -8
  319. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/ring_hash/ring_hash.cc +8 -8
  320. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/ring_hash/ring_hash.h +4 -4
  321. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/rls/rls.cc +13 -13
  322. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/round_robin/round_robin.cc +7 -7
  323. data/src/core/{lib/load_balancing → load_balancing}/subchannel_interface.h +3 -3
  324. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/subchannel_list.h +8 -8
  325. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/weighted_round_robin/static_stride_scheduler.cc +1 -1
  326. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/weighted_round_robin/static_stride_scheduler.h +3 -3
  327. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/weighted_round_robin/weighted_round_robin.cc +10 -10
  328. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/weighted_target/weighted_target.cc +7 -7
  329. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/cds.cc +26 -23
  330. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_channel_args.h +4 -4
  331. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_cluster_impl.cc +11 -11
  332. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_cluster_manager.cc +8 -8
  333. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_override_host.cc +10 -10
  334. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_override_host.h +4 -4
  335. data/src/core/{ext/filters/client_channel/lb_policy → load_balancing}/xds/xds_wrr_locality.cc +6 -6
  336. data/src/core/{ext/filters/client_channel/resolver → resolver}/binder/binder_resolver.cc +3 -3
  337. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/dns_resolver_ares.cc +9 -9
  338. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/dns_resolver_ares.h +3 -3
  339. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_ev_driver.h +4 -4
  340. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_ev_driver_posix.cc +2 -2
  341. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_ev_driver_windows.cc +2 -2
  342. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_wrapper.cc +2 -2
  343. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_wrapper.h +4 -4
  344. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_wrapper_posix.cc +1 -1
  345. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/c_ares/grpc_ares_wrapper_windows.cc +2 -2
  346. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/dns_resolver_plugin.cc +7 -5
  347. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/dns_resolver_plugin.h +3 -3
  348. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/event_engine/event_engine_client_channel_resolver.cc +9 -9
  349. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/event_engine/event_engine_client_channel_resolver.h +5 -5
  350. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/event_engine/service_config_helper.cc +1 -1
  351. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/event_engine/service_config_helper.h +3 -3
  352. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/native/dns_resolver.cc +4 -4
  353. data/src/core/{ext/filters/client_channel/resolver → resolver}/dns/native/dns_resolver.h +3 -3
  354. data/src/core/{lib/resolver → resolver}/endpoint_addresses.cc +1 -1
  355. data/src/core/{lib/resolver → resolver}/endpoint_addresses.h +3 -3
  356. data/src/core/{ext/filters/client_channel/resolver → resolver}/fake/fake_resolver.cc +2 -2
  357. data/src/core/{ext/filters/client_channel/resolver → resolver}/fake/fake_resolver.h +4 -4
  358. data/src/core/{ext/filters/client_channel/resolver → resolver}/google_c2p/google_c2p_resolver.cc +3 -3
  359. data/src/core/{ext/filters/client_channel/resolver → resolver}/polling_resolver.cc +3 -3
  360. data/src/core/{ext/filters/client_channel/resolver → resolver}/polling_resolver.h +5 -5
  361. data/src/core/{lib/resolver → resolver}/resolver.cc +1 -1
  362. data/src/core/{lib/resolver → resolver}/resolver.h +6 -6
  363. data/src/core/{lib/resolver → resolver}/resolver_factory.h +4 -4
  364. data/src/core/{lib/resolver → resolver}/resolver_registry.cc +1 -1
  365. data/src/core/{lib/resolver → resolver}/resolver_registry.h +5 -5
  366. data/src/core/{lib/resolver → resolver}/server_address.h +4 -4
  367. data/src/core/{ext/filters/client_channel/resolver → resolver}/sockaddr/sockaddr_resolver.cc +3 -3
  368. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_dependency_manager.cc +4 -4
  369. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_dependency_manager.h +4 -4
  370. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_resolver.cc +11 -11
  371. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_resolver_attributes.h +4 -4
  372. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_resolver_trace.cc +1 -1
  373. data/src/core/{ext/filters/client_channel/resolver → resolver}/xds/xds_resolver_trace.h +3 -3
  374. data/src/core/{lib/service_config → service_config}/service_config.h +4 -4
  375. data/src/core/{lib/service_config → service_config}/service_config_call_data.h +5 -5
  376. data/src/core/{lib/service_config → service_config}/service_config_impl.cc +2 -2
  377. data/src/core/{lib/service_config → service_config}/service_config_impl.h +5 -5
  378. data/src/core/{lib/service_config → service_config}/service_config_parser.cc +1 -1
  379. data/src/core/{lib/service_config → service_config}/service_config_parser.h +3 -3
  380. data/src/core/tsi/fake_transport_security.cc +1 -1
  381. data/src/ruby/ext/grpc/extconf.rb +0 -1
  382. data/src/ruby/ext/grpc/rb_channel.c +11 -5
  383. data/src/ruby/ext/grpc/rb_event_thread.c +9 -3
  384. data/src/ruby/lib/grpc/version.rb +1 -1
  385. data/third_party/abseil-cpp/absl/algorithm/algorithm.h +8 -103
  386. data/third_party/abseil-cpp/absl/algorithm/container.h +57 -71
  387. data/third_party/abseil-cpp/absl/base/attributes.h +51 -12
  388. data/third_party/abseil-cpp/absl/base/call_once.h +15 -9
  389. data/third_party/abseil-cpp/absl/base/casts.h +1 -1
  390. data/third_party/abseil-cpp/absl/base/config.h +91 -24
  391. data/third_party/abseil-cpp/absl/base/internal/endian.h +13 -12
  392. data/third_party/abseil-cpp/absl/base/internal/identity.h +4 -2
  393. data/third_party/abseil-cpp/absl/base/internal/inline_variable.h +19 -18
  394. data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc +1 -1
  395. data/third_party/abseil-cpp/absl/base/internal/nullability_impl.h +106 -0
  396. data/third_party/abseil-cpp/absl/base/internal/raw_logging.cc +9 -11
  397. data/third_party/abseil-cpp/absl/base/internal/raw_logging.h +2 -0
  398. data/third_party/abseil-cpp/absl/base/internal/spinlock.h +17 -4
  399. data/third_party/abseil-cpp/absl/base/internal/sysinfo.cc +20 -0
  400. data/third_party/abseil-cpp/absl/base/internal/thread_identity.cc +10 -4
  401. data/third_party/abseil-cpp/absl/base/internal/unaligned_access.h +13 -6
  402. data/third_party/abseil-cpp/absl/base/log_severity.cc +1 -0
  403. data/third_party/abseil-cpp/absl/base/log_severity.h +23 -10
  404. data/third_party/abseil-cpp/absl/base/no_destructor.h +217 -0
  405. data/third_party/abseil-cpp/absl/base/nullability.h +224 -0
  406. data/third_party/abseil-cpp/absl/base/optimization.h +1 -0
  407. data/third_party/abseil-cpp/absl/base/options.h +27 -1
  408. data/third_party/abseil-cpp/absl/base/prefetch.h +25 -14
  409. data/third_party/abseil-cpp/absl/base/thread_annotations.h +0 -2
  410. data/third_party/abseil-cpp/absl/container/flat_hash_map.h +3 -3
  411. data/third_party/abseil-cpp/absl/container/flat_hash_set.h +1 -1
  412. data/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h +4 -2
  413. data/third_party/abseil-cpp/absl/container/internal/container_memory.h +13 -9
  414. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h +2 -12
  415. data/third_party/abseil-cpp/absl/container/internal/inlined_vector.h +12 -1
  416. data/third_party/abseil-cpp/absl/container/internal/layout.h +6 -21
  417. data/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h +11 -2
  418. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +148 -31
  419. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +717 -278
  420. data/third_party/abseil-cpp/absl/crc/internal/cpu_detect.cc +26 -2
  421. data/third_party/abseil-cpp/absl/crc/internal/cpu_detect.h +6 -0
  422. data/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h +34 -5
  423. data/third_party/abseil-cpp/absl/crc/internal/crc_memcpy.h +6 -3
  424. data/third_party/abseil-cpp/absl/crc/internal/crc_memcpy_fallback.cc +4 -2
  425. data/third_party/abseil-cpp/absl/crc/internal/{crc_memcpy_x86_64.cc → crc_memcpy_x86_arm_combined.cc} +65 -47
  426. data/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc +10 -2
  427. data/third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc +4 -2
  428. data/third_party/abseil-cpp/absl/debugging/internal/demangle.cc +24 -0
  429. data/third_party/abseil-cpp/absl/debugging/internal/demangle.h +35 -33
  430. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +41 -17
  431. data/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +108 -44
  432. data/third_party/abseil-cpp/absl/flags/declare.h +0 -5
  433. data/third_party/abseil-cpp/absl/flags/flag.h +1 -10
  434. data/third_party/abseil-cpp/absl/flags/internal/flag.h +0 -5
  435. data/third_party/abseil-cpp/absl/flags/marshalling.cc +10 -1
  436. data/third_party/abseil-cpp/absl/flags/reflection.cc +2 -1
  437. data/third_party/abseil-cpp/absl/functional/function_ref.h +8 -0
  438. data/third_party/abseil-cpp/absl/functional/internal/any_invocable.h +2 -2
  439. data/third_party/abseil-cpp/absl/hash/internal/hash.h +49 -2
  440. data/third_party/abseil-cpp/absl/numeric/bits.h +37 -18
  441. data/third_party/abseil-cpp/absl/random/distributions.h +1 -1
  442. data/third_party/abseil-cpp/absl/status/internal/status_internal.cc +248 -0
  443. data/third_party/abseil-cpp/absl/status/internal/status_internal.h +55 -14
  444. data/third_party/abseil-cpp/absl/status/internal/statusor_internal.h +53 -2
  445. data/third_party/abseil-cpp/absl/status/status.cc +36 -238
  446. data/third_party/abseil-cpp/absl/status/status.h +95 -53
  447. data/third_party/abseil-cpp/absl/status/status_payload_printer.cc +1 -3
  448. data/third_party/abseil-cpp/absl/status/status_payload_printer.h +3 -2
  449. data/third_party/abseil-cpp/absl/status/statusor.cc +5 -2
  450. data/third_party/abseil-cpp/absl/status/statusor.h +43 -3
  451. data/third_party/abseil-cpp/absl/strings/ascii.cc +84 -12
  452. data/third_party/abseil-cpp/absl/strings/ascii.h +8 -6
  453. data/third_party/abseil-cpp/absl/strings/charconv.cc +19 -12
  454. data/third_party/abseil-cpp/absl/strings/charconv.h +6 -3
  455. data/third_party/abseil-cpp/absl/strings/charset.h +164 -0
  456. data/third_party/abseil-cpp/absl/strings/cord.cc +266 -69
  457. data/third_party/abseil-cpp/absl/strings/cord.h +138 -92
  458. data/third_party/abseil-cpp/absl/strings/cord_analysis.cc +19 -33
  459. data/third_party/abseil-cpp/absl/strings/cord_analysis.h +4 -3
  460. data/third_party/abseil-cpp/absl/strings/escaping.cc +5 -4
  461. data/third_party/abseil-cpp/absl/strings/has_absl_stringify.h +63 -0
  462. data/third_party/abseil-cpp/absl/strings/has_ostream_operator.h +42 -0
  463. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc +0 -6
  464. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +19 -45
  465. data/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc +23 -28
  466. data/third_party/abseil-cpp/absl/strings/internal/has_absl_stringify.h +15 -26
  467. data/third_party/abseil-cpp/absl/strings/internal/memutil.cc +12 -4
  468. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +145 -8
  469. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +72 -24
  470. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc +17 -1
  471. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h +7 -4
  472. data/third_party/abseil-cpp/absl/strings/internal/str_format/constexpr_parser.h +8 -3
  473. data/third_party/abseil-cpp/absl/strings/internal/str_format/extension.h +10 -4
  474. data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h +5 -4
  475. data/third_party/abseil-cpp/absl/strings/match.cc +3 -0
  476. data/third_party/abseil-cpp/absl/strings/numbers.cc +396 -153
  477. data/third_party/abseil-cpp/absl/strings/numbers.h +193 -35
  478. data/third_party/abseil-cpp/absl/strings/str_cat.cc +151 -21
  479. data/third_party/abseil-cpp/absl/strings/str_cat.h +127 -25
  480. data/third_party/abseil-cpp/absl/strings/str_format.h +30 -20
  481. data/third_party/abseil-cpp/absl/strings/str_join.h +16 -16
  482. data/third_party/abseil-cpp/absl/strings/str_replace.cc +12 -3
  483. data/third_party/abseil-cpp/absl/strings/str_replace.h +8 -5
  484. data/third_party/abseil-cpp/absl/strings/str_split.cc +8 -6
  485. data/third_party/abseil-cpp/absl/strings/str_split.h +18 -0
  486. data/third_party/abseil-cpp/absl/strings/string_view.cc +26 -5
  487. data/third_party/abseil-cpp/absl/strings/string_view.h +91 -26
  488. data/third_party/abseil-cpp/absl/strings/strip.h +5 -2
  489. data/third_party/abseil-cpp/absl/strings/substitute.cc +12 -4
  490. data/third_party/abseil-cpp/absl/strings/substitute.h +103 -91
  491. data/third_party/abseil-cpp/absl/synchronization/internal/pthread_waiter.h +2 -2
  492. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.h +2 -0
  493. data/third_party/abseil-cpp/absl/synchronization/internal/win32_waiter.h +4 -2
  494. data/third_party/abseil-cpp/absl/synchronization/mutex.cc +296 -332
  495. data/third_party/abseil-cpp/absl/synchronization/mutex.h +89 -34
  496. data/third_party/abseil-cpp/absl/time/civil_time.h +26 -0
  497. data/third_party/abseil-cpp/absl/time/clock.h +5 -1
  498. data/third_party/abseil-cpp/absl/time/duration.cc +3 -3
  499. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +2 -2
  500. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc +1 -1
  501. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc +9 -14
  502. data/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +0 -8
  503. data/third_party/abseil-cpp/absl/types/bad_optional_access.cc +18 -0
  504. data/third_party/abseil-cpp/absl/types/bad_variant_access.cc +18 -0
  505. data/third_party/abseil-cpp/absl/types/internal/variant.h +3 -3
  506. data/third_party/abseil-cpp/absl/types/optional.h +3 -2
  507. data/third_party/abseil-cpp/absl/types/span.h +9 -4
  508. data/third_party/abseil-cpp/absl/utility/utility.h +11 -93
  509. data/third_party/boringssl-with-bazel/err_data.c +278 -276
  510. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c +1 -1
  511. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c +9 -9
  512. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c +8 -21
  513. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c +1 -1
  514. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c +19 -1
  515. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c +1 -1
  516. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c +11 -3
  517. data/third_party/boringssl-with-bazel/src/crypto/asn1/internal.h +4 -1
  518. data/third_party/boringssl-with-bazel/src/crypto/asn1/posix_time.c +1 -1
  519. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c +3 -3
  520. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c +1 -6
  521. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c +4 -13
  522. data/third_party/boringssl-with-bazel/src/crypto/base64/base64.c +1 -6
  523. data/third_party/boringssl-with-bazel/src/crypto/bio/bio.c +27 -4
  524. data/third_party/boringssl-with-bazel/src/crypto/bio/connect.c +1 -4
  525. data/third_party/boringssl-with-bazel/src/crypto/bio/pair.c +1 -4
  526. data/third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c +8 -0
  527. data/third_party/boringssl-with-bazel/src/crypto/buf/buf.c +1 -11
  528. data/third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c +7 -8
  529. data/third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c +42 -12
  530. data/third_party/boringssl-with-bazel/src/crypto/bytestring/internal.h +0 -22
  531. data/third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c +9 -9
  532. data/third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c +34 -1
  533. data/third_party/boringssl-with-bazel/src/crypto/chacha/internal.h +49 -3
  534. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c +30 -42
  535. data/third_party/boringssl-with-bazel/src/crypto/conf/conf.c +87 -96
  536. data/third_party/boringssl-with-bazel/src/crypto/conf/internal.h +5 -1
  537. data/third_party/boringssl-with-bazel/src/crypto/cpu_intel.c +4 -2
  538. data/third_party/boringssl-with-bazel/src/crypto/crypto.c +11 -0
  539. data/third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c +4 -0
  540. data/third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c +1 -2
  541. data/third_party/boringssl-with-bazel/src/crypto/des/des.c +105 -31
  542. data/third_party/boringssl-with-bazel/src/crypto/des/internal.h +10 -81
  543. data/third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c +2 -15
  544. data/third_party/boringssl-with-bazel/src/crypto/engine/engine.c +1 -9
  545. data/third_party/boringssl-with-bazel/src/crypto/evp/evp.c +1 -5
  546. data/third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c +2 -5
  547. data/third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c +1 -4
  548. data/third_party/boringssl-with-bazel/src/crypto/evp/p_hkdf.c +1 -2
  549. data/third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c +1 -3
  550. data/third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c +2 -2
  551. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/add.c +2 -8
  552. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/bn.c +1 -1
  553. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/bytes.c +26 -17
  554. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/ctx.c +1 -1
  555. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/exponentiation.c +4 -2
  556. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/gcd.c +26 -5
  557. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/generic.c +10 -41
  558. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/internal.h +49 -2
  559. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/montgomery.c +26 -0
  560. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/montgomery_inv.c +27 -26
  561. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/mul.c +2 -6
  562. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/prime.c +1 -8
  563. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/random.c +8 -2
  564. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/cipher.c +11 -2
  565. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/e_aes.c +11 -24
  566. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/e_aesccm.c +43 -50
  567. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/dh/dh.c +2 -6
  568. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/digest.c +4 -0
  569. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec.c +1 -2
  570. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec_key.c +16 -9
  571. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/p224-64.c +7 -6
  572. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/wnaf.c +2 -7
  573. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/fork_detect.c +51 -13
  574. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/fork_detect.h +17 -0
  575. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/rand.c +5 -2
  576. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/blinding.c +1 -2
  577. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa.c +1 -3
  578. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa_impl.c +6 -5
  579. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/self_check/fips.c +1 -2
  580. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/internal.h +153 -6
  581. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha1.c +87 -7
  582. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha256.c +39 -5
  583. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha512.c +32 -5
  584. data/third_party/boringssl-with-bazel/src/crypto/internal.h +254 -54
  585. data/third_party/boringssl-with-bazel/src/crypto/keccak/internal.h +70 -0
  586. data/third_party/boringssl-with-bazel/src/crypto/{kyber → keccak}/keccak.c +124 -49
  587. data/third_party/boringssl-with-bazel/src/crypto/kyber/internal.h +8 -39
  588. data/third_party/boringssl-with-bazel/src/crypto/kyber/kyber.c +39 -29
  589. data/third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c +3 -6
  590. data/third_party/boringssl-with-bazel/src/crypto/mem.c +17 -33
  591. data/third_party/boringssl-with-bazel/src/crypto/obj/obj.c +36 -16
  592. data/third_party/boringssl-with-bazel/src/crypto/obj/obj_dat.h +0 -3
  593. data/third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c +31 -0
  594. data/third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c +2 -4
  595. data/third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c +3 -3
  596. data/third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c +9 -13
  597. data/third_party/boringssl-with-bazel/src/crypto/pool/pool.c +3 -6
  598. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c +4 -0
  599. data/third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_crypt.c +3 -1
  600. data/third_party/boringssl-with-bazel/src/crypto/spx/address.c +101 -0
  601. data/third_party/boringssl-with-bazel/src/crypto/spx/address.h +50 -0
  602. data/third_party/boringssl-with-bazel/src/crypto/spx/fors.c +133 -0
  603. data/third_party/boringssl-with-bazel/src/crypto/spx/fors.h +54 -0
  604. data/third_party/boringssl-with-bazel/src/crypto/spx/internal.h +79 -0
  605. data/third_party/boringssl-with-bazel/src/crypto/spx/merkle.c +150 -0
  606. data/third_party/boringssl-with-bazel/src/crypto/spx/merkle.h +61 -0
  607. data/third_party/boringssl-with-bazel/src/crypto/spx/params.h +71 -0
  608. data/third_party/boringssl-with-bazel/src/crypto/spx/spx.c +139 -0
  609. data/third_party/boringssl-with-bazel/src/crypto/spx/spx_util.c +53 -0
  610. data/third_party/boringssl-with-bazel/src/crypto/spx/spx_util.h +44 -0
  611. data/third_party/boringssl-with-bazel/src/crypto/spx/thash.c +136 -0
  612. data/third_party/boringssl-with-bazel/src/crypto/spx/thash.h +70 -0
  613. data/third_party/boringssl-with-bazel/src/crypto/spx/wots.c +135 -0
  614. data/third_party/boringssl-with-bazel/src/crypto/spx/wots.h +45 -0
  615. data/third_party/boringssl-with-bazel/src/crypto/stack/stack.c +4 -9
  616. data/third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c +10 -22
  617. data/third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c +3 -6
  618. data/third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c +12 -36
  619. data/third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c +1 -2
  620. data/third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c +0 -2
  621. data/third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c +14 -9
  622. data/third_party/boringssl-with-bazel/src/crypto/x509/by_file.c +23 -33
  623. data/third_party/boringssl-with-bazel/src/crypto/x509/internal.h +225 -51
  624. data/third_party/boringssl-with-bazel/src/crypto/x509/policy.c +2 -6
  625. data/third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c +6 -2
  626. data/third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c +1 -1
  627. data/third_party/boringssl-with-bazel/src/crypto/x509/t_req.c +1 -4
  628. data/third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c +1 -3
  629. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_akey.c +1 -1
  630. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_akeya.c +3 -1
  631. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_alt.c +5 -6
  632. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_bcons.c +1 -1
  633. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_bitst.c +1 -1
  634. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_conf.c +0 -2
  635. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_cpols.c +1 -1
  636. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_crld.c +1 -2
  637. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_enum.c +1 -0
  638. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_extku.c +1 -1
  639. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_genn.c +12 -12
  640. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_ia5.c +1 -1
  641. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_info.c +4 -6
  642. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_int.c +1 -1
  643. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_lib.c +3 -2
  644. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_ncons.c +2 -2
  645. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_ocsp.c +1 -1
  646. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_pcons.c +1 -1
  647. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_pmaps.c +1 -1
  648. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_prn.c +3 -4
  649. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_purp.c +92 -335
  650. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_skey.c +1 -2
  651. data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/v3_utl.c +20 -18
  652. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c +35 -32
  653. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c +44 -59
  654. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c +0 -1
  655. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c +107 -255
  656. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c +32 -20
  657. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c +25 -152
  658. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c +0 -1
  659. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c +330 -944
  660. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c +93 -215
  661. data/third_party/boringssl-with-bazel/src/crypto/x509/x509name.c +28 -6
  662. data/third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c +1 -1
  663. data/third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c +35 -129
  664. data/third_party/boringssl-with-bazel/src/crypto/x509/x_name.c +7 -8
  665. data/third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c +46 -50
  666. data/third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c +2 -0
  667. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c +1 -4
  668. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c +6 -6
  669. data/third_party/boringssl-with-bazel/src/include/openssl/arm_arch.h +0 -21
  670. data/third_party/boringssl-with-bazel/src/include/openssl/asm_base.h +5 -6
  671. data/third_party/boringssl-with-bazel/src/include/openssl/base.h +3 -1
  672. data/third_party/boringssl-with-bazel/src/include/openssl/bio.h +24 -0
  673. data/third_party/boringssl-with-bazel/src/include/openssl/bn.h +14 -5
  674. data/third_party/boringssl-with-bazel/src/include/openssl/bytestring.h +22 -0
  675. data/third_party/boringssl-with-bazel/src/include/openssl/cipher.h +1 -0
  676. data/third_party/boringssl-with-bazel/src/include/openssl/conf.h +4 -1
  677. data/third_party/boringssl-with-bazel/src/include/openssl/curve25519.h +2 -2
  678. data/third_party/boringssl-with-bazel/src/include/openssl/des.h +0 -13
  679. data/third_party/boringssl-with-bazel/src/include/openssl/ec.h +33 -11
  680. data/third_party/boringssl-with-bazel/src/include/openssl/evp.h +1 -1
  681. data/third_party/boringssl-with-bazel/src/include/openssl/ex_data.h +5 -4
  682. data/third_party/boringssl-with-bazel/src/include/openssl/kyber.h +26 -18
  683. data/third_party/boringssl-with-bazel/src/include/openssl/mem.h +13 -6
  684. data/third_party/boringssl-with-bazel/src/include/openssl/obj.h +5 -1
  685. data/third_party/boringssl-with-bazel/src/include/openssl/opensslconf.h +1 -0
  686. data/third_party/boringssl-with-bazel/src/include/openssl/pem.h +19 -5
  687. data/third_party/boringssl-with-bazel/src/include/openssl/posix_time.h +45 -0
  688. data/third_party/boringssl-with-bazel/src/include/openssl/rand.h +5 -0
  689. data/third_party/boringssl-with-bazel/src/include/openssl/sha.h +20 -3
  690. data/third_party/boringssl-with-bazel/src/include/openssl/span.h +18 -20
  691. data/third_party/boringssl-with-bazel/src/include/openssl/ssl.h +76 -60
  692. data/third_party/boringssl-with-bazel/src/include/openssl/target.h +31 -6
  693. data/third_party/boringssl-with-bazel/src/include/openssl/time.h +3 -22
  694. data/third_party/boringssl-with-bazel/src/include/openssl/tls1.h +2 -1
  695. data/third_party/boringssl-with-bazel/src/include/openssl/x509.h +2806 -941
  696. data/third_party/boringssl-with-bazel/src/include/openssl/x509v3.h +38 -1025
  697. data/third_party/boringssl-with-bazel/src/include/openssl/x509v3_errors.h +124 -0
  698. data/third_party/boringssl-with-bazel/src/ssl/d1_both.cc +1 -2
  699. data/third_party/boringssl-with-bazel/src/ssl/extensions.cc +82 -9
  700. data/third_party/boringssl-with-bazel/src/ssl/handoff.cc +42 -4
  701. data/third_party/boringssl-with-bazel/src/ssl/internal.h +4 -0
  702. data/third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc +4 -5
  703. data/third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc +9 -1
  704. data/third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc +0 -1
  705. data/third_party/boringssl-with-bazel/src/ssl/tls13_client.cc +5 -1
  706. data/third_party/boringssl-with-bazel/src/ssl/tls13_server.cc +5 -1
  707. data/third_party/boringssl-with-bazel/src/third_party/fiat/curve25519_64_adx.h +4 -2
  708. data/third_party/boringssl-with-bazel/src/third_party/fiat/p256_64.h +21 -0
  709. data/third_party/cares/config_linux/ares_config.h +2 -38
  710. metadata +214 -179
  711. data/src/core/lib/iomgr/load_file.cc +0 -78
  712. data/src/core/lib/iomgr/load_file.h +0 -35
  713. data/third_party/abseil-cpp/absl/base/internal/prefetch.h +0 -137
  714. data/third_party/abseil-cpp/absl/base/internal/thread_annotations.h +0 -280
  715. data/third_party/abseil-cpp/absl/flags/flag.cc +0 -38
  716. data/third_party/abseil-cpp/absl/flags/internal/flag_msvc.inc +0 -116
  717. data/third_party/abseil-cpp/absl/strings/internal/char_map.h +0 -158
  718. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc +0 -773
  719. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h +0 -607
  720. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h +0 -118
  721. data/third_party/boringssl-with-bazel/src/crypto/x509/x_info.c +0 -100
  722. data/third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c +0 -111
  723. data/third_party/boringssl-with-bazel/src/crypto/x509v3/internal.h +0 -197
  724. /data/third_party/boringssl-with-bazel/src/crypto/{x509v3 → x509}/ext_dat.h +0 -0
@@ -62,6 +62,9 @@
62
62
  // pseudo-struct:
63
63
  //
64
64
  // struct BackingArray {
65
+ // // Sampling handler. This field isn't present when the sampling is
66
+ // // disabled or this allocation hasn't been selected for sampling.
67
+ // HashtablezInfoHandle infoz_;
65
68
  // // The number of elements we can insert before growing the capacity.
66
69
  // size_t growth_left;
67
70
  // // Control bytes for the "real" slots.
@@ -175,25 +178,29 @@
175
178
  #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
176
179
 
177
180
  #include <algorithm>
181
+ #include <cassert>
178
182
  #include <cmath>
179
183
  #include <cstddef>
180
184
  #include <cstdint>
181
185
  #include <cstring>
186
+ #include <initializer_list>
182
187
  #include <iterator>
183
188
  #include <limits>
184
189
  #include <memory>
185
- #include <string>
186
190
  #include <tuple>
187
191
  #include <type_traits>
188
192
  #include <utility>
189
193
 
194
+ #include "absl/base/attributes.h"
190
195
  #include "absl/base/config.h"
191
196
  #include "absl/base/internal/endian.h"
192
197
  #include "absl/base/internal/raw_logging.h"
198
+ #include "absl/base/macros.h"
193
199
  #include "absl/base/optimization.h"
200
+ #include "absl/base/options.h"
194
201
  #include "absl/base/port.h"
195
202
  #include "absl/base/prefetch.h"
196
- #include "absl/container/internal/common.h"
203
+ #include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
197
204
  #include "absl/container/internal/compressed_tuple.h"
198
205
  #include "absl/container/internal/container_memory.h"
199
206
  #include "absl/container/internal/hash_policy_traits.h"
@@ -227,6 +234,7 @@ namespace container_internal {
227
234
  #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
228
235
  #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
229
236
  #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
237
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
230
238
  defined(ABSL_HAVE_MEMORY_SANITIZER)
231
239
  // When compiled in sanitizer mode, we add generation integers to the backing
232
240
  // array and iterators. In the backing array, we store the generation between
@@ -262,8 +270,21 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs,
262
270
  swap(lhs, rhs);
263
271
  }
264
272
  template <typename AllocType>
265
- void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
266
- std::false_type /* propagate_on_container_swap */) {}
273
+ void SwapAlloc(AllocType& lhs, AllocType& rhs,
274
+ std::false_type /* propagate_on_container_swap */) {
275
+ (void)lhs;
276
+ (void)rhs;
277
+ assert(lhs == rhs &&
278
+ "It's UB to call swap with unequal non-propagating allocators.");
279
+ }
280
+
281
+ template <typename AllocType>
282
+ void CopyAlloc(AllocType& lhs, AllocType& rhs,
283
+ std::true_type /* propagate_alloc */) {
284
+ lhs = rhs;
285
+ }
286
+ template <typename AllocType>
287
+ void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
267
288
 
268
289
  // The state for a probe sequence.
269
290
  //
@@ -361,7 +382,7 @@ uint32_t TrailingZeros(T x) {
361
382
  // width of an abstract bit in the representation.
362
383
  // This mask provides operations for any number of real bits set in an abstract
363
384
  // bit. To add iteration on top of that, implementation must guarantee no more
364
- // than one real bit is set in an abstract bit.
385
+ // than the most significant real bit is set in a set abstract bit.
365
386
  template <class T, int SignificantBits, int Shift = 0>
366
387
  class NonIterableBitMask {
367
388
  public:
@@ -388,7 +409,9 @@ class NonIterableBitMask {
388
409
  uint32_t LeadingZeros() const {
389
410
  constexpr int total_significant_bits = SignificantBits << Shift;
390
411
  constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
391
- return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
412
+ return static_cast<uint32_t>(
413
+ countl_zero(static_cast<T>(mask_ << extra_bits))) >>
414
+ Shift;
392
415
  }
393
416
 
394
417
  T mask_;
@@ -418,6 +441,10 @@ class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
418
441
  using const_iterator = BitMask;
419
442
 
420
443
  BitMask& operator++() {
444
+ if (Shift == 3) {
445
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
446
+ this->mask_ &= msbs;
447
+ }
421
448
  this->mask_ &= (this->mask_ - 1);
422
449
  return *this;
423
450
  }
@@ -590,29 +617,39 @@ struct GroupSse2Impl {
590
617
  }
591
618
 
592
619
  // Returns a bitmask representing the positions of slots that match hash.
593
- BitMask<uint32_t, kWidth> Match(h2_t hash) const {
620
+ BitMask<uint16_t, kWidth> Match(h2_t hash) const {
594
621
  auto match = _mm_set1_epi8(static_cast<char>(hash));
595
- return BitMask<uint32_t, kWidth>(
596
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
622
+ BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
623
+ result = BitMask<uint16_t, kWidth>(
624
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
625
+ return result;
597
626
  }
598
627
 
599
628
  // Returns a bitmask representing the positions of empty slots.
600
- NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
629
+ NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
601
630
  #ifdef ABSL_INTERNAL_HAVE_SSSE3
602
631
  // This only works because ctrl_t::kEmpty is -128.
603
- return NonIterableBitMask<uint32_t, kWidth>(
604
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
632
+ return NonIterableBitMask<uint16_t, kWidth>(
633
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
605
634
  #else
606
635
  auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
607
- return NonIterableBitMask<uint32_t, kWidth>(
608
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
636
+ return NonIterableBitMask<uint16_t, kWidth>(
637
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
609
638
  #endif
610
639
  }
611
640
 
641
+ // Returns a bitmask representing the positions of full slots.
642
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
643
+ // original and mirrored.
644
+ BitMask<uint16_t, kWidth> MaskFull() const {
645
+ return BitMask<uint16_t, kWidth>(
646
+ static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
647
+ }
648
+
612
649
  // Returns a bitmask representing the positions of empty or deleted slots.
613
- NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
650
+ NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
614
651
  auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
615
- return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
652
+ return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
616
653
  _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
617
654
  }
618
655
 
@@ -651,9 +688,8 @@ struct GroupAArch64Impl {
651
688
  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
652
689
  uint8x8_t dup = vdup_n_u8(hash);
653
690
  auto mask = vceq_u8(ctrl, dup);
654
- constexpr uint64_t msbs = 0x8080808080808080ULL;
655
691
  return BitMask<uint64_t, kWidth, 3>(
656
- vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
692
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0));
657
693
  }
658
694
 
659
695
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
@@ -665,6 +701,17 @@ struct GroupAArch64Impl {
665
701
  return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
666
702
  }
667
703
 
704
+ // Returns a bitmask representing the positions of full slots.
705
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
706
+ // original and mirrored.
707
+ BitMask<uint64_t, kWidth, 3> MaskFull() const {
708
+ uint64_t mask = vget_lane_u64(
709
+ vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
710
+ vdup_n_s8(static_cast<int8_t>(0)))),
711
+ 0);
712
+ return BitMask<uint64_t, kWidth, 3>(mask);
713
+ }
714
+
668
715
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
669
716
  uint64_t mask =
670
717
  vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
@@ -729,13 +776,21 @@ struct GroupPortableImpl {
729
776
 
730
777
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
731
778
  constexpr uint64_t msbs = 0x8080808080808080ULL;
732
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
779
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
733
780
  msbs);
734
781
  }
735
782
 
783
+ // Returns a bitmask representing the positions of full slots.
784
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
785
+ // original and mirrored.
786
+ BitMask<uint64_t, kWidth, 3> MaskFull() const {
787
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
788
+ return BitMask<uint64_t, kWidth, 3>((ctrl ^ msbs) & msbs);
789
+ }
790
+
736
791
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
737
792
  constexpr uint64_t msbs = 0x8080808080808080ULL;
738
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
793
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
739
794
  msbs);
740
795
  }
741
796
 
@@ -760,10 +815,21 @@ struct GroupPortableImpl {
760
815
 
761
816
  #ifdef ABSL_INTERNAL_HAVE_SSE2
762
817
  using Group = GroupSse2Impl;
818
+ using GroupEmptyOrDeleted = GroupSse2Impl;
763
819
  #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
764
820
  using Group = GroupAArch64Impl;
821
+ // For Aarch64, we use the portable implementation for counting and masking
822
+ // empty or deleted group elements. This is to avoid the latency of moving
823
+ // between data GPRs and Neon registers when it does not provide a benefit.
824
+ // Using Neon is profitable when we call Match(), but is not when we don't,
825
+ // which is the case when we do *EmptyOrDeleted operations. It is difficult to
826
+ // make a similar approach beneficial on other architectures such as x86 since
827
+ // they have much lower GPR <-> vector register transfer latency and 16-wide
828
+ // Groups.
829
+ using GroupEmptyOrDeleted = GroupPortableImpl;
765
830
  #else
766
831
  using Group = GroupPortableImpl;
832
+ using GroupEmptyOrDeleted = GroupPortableImpl;
767
833
  #endif
768
834
 
769
835
  // When there is an insertion with no reserved growth, we rehash with
@@ -802,15 +868,19 @@ class CommonFieldsGenerationInfoEnabled {
802
868
  // whenever reserved_growth_ is zero.
803
869
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
804
870
  size_t capacity) const;
871
+ // Similar to above, except that we don't depend on reserved_growth_.
872
+ bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
873
+ size_t capacity) const;
805
874
  void maybe_increment_generation_on_insert() {
806
875
  if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
807
876
 
808
877
  if (reserved_growth_ > 0) {
809
878
  if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
810
879
  } else {
811
- *generation_ = NextGeneration(*generation_);
880
+ increment_generation();
812
881
  }
813
882
  }
883
+ void increment_generation() { *generation_ = NextGeneration(*generation_); }
814
884
  void reset_reserved_growth(size_t reservation, size_t size) {
815
885
  reserved_growth_ = reservation - size;
816
886
  }
@@ -856,7 +926,11 @@ class CommonFieldsGenerationInfoDisabled {
856
926
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
857
927
  return false;
858
928
  }
929
+ bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
930
+ return false;
931
+ }
859
932
  void maybe_increment_generation_on_insert() {}
933
+ void increment_generation() {}
860
934
  void reset_reserved_growth(size_t, size_t) {}
861
935
  size_t reserved_growth() const { return 0; }
862
936
  void set_reserved_growth(size_t) {}
@@ -909,9 +983,11 @@ using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
909
983
  // A valid capacity is a non-zero integer `2^m - 1`.
910
984
  inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
911
985
 
912
- // Computes the offset from the start of the backing allocation of the control
913
- // bytes. growth_left is stored at the beginning of the backing array.
914
- inline size_t ControlOffset() { return sizeof(size_t); }
986
+ // Computes the offset from the start of the backing allocation of control.
987
+ // infoz and growth_left are stored at the beginning of the backing array.
988
+ inline size_t ControlOffset(bool has_infoz) {
989
+ return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
990
+ }
915
991
 
916
992
  // Returns the number of "cloned control bytes".
917
993
  //
@@ -922,24 +998,26 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
922
998
 
923
999
  // Given the capacity of a table, computes the offset (from the start of the
924
1000
  // backing allocation) of the generation counter (if it exists).
925
- inline size_t GenerationOffset(size_t capacity) {
1001
+ inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
926
1002
  assert(IsValidCapacity(capacity));
927
1003
  const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
928
- return ControlOffset() + num_control_bytes;
1004
+ return ControlOffset(has_infoz) + num_control_bytes;
929
1005
  }
930
1006
 
931
1007
  // Given the capacity of a table, computes the offset (from the start of the
932
1008
  // backing allocation) at which the slots begin.
933
- inline size_t SlotOffset(size_t capacity, size_t slot_align) {
1009
+ inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
934
1010
  assert(IsValidCapacity(capacity));
935
- return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
1011
+ return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
1012
+ slot_align - 1) &
936
1013
  (~slot_align + 1);
937
1014
  }
938
1015
 
939
1016
  // Given the capacity of a table, computes the total size of the backing
940
1017
  // array.
941
- inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
942
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
1018
+ inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
1019
+ bool has_infoz) {
1020
+ return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
943
1021
  }
944
1022
 
945
1023
  // CommonFields hold the fields in raw_hash_set that do not depend
@@ -954,28 +1032,15 @@ class CommonFields : public CommonFieldsGenerationInfo {
954
1032
  CommonFields& operator=(const CommonFields&) = delete;
955
1033
 
956
1034
  // Movable
957
- CommonFields(CommonFields&& that)
958
- : CommonFieldsGenerationInfo(
959
- std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
960
- // Explicitly copying fields into "this" and then resetting "that"
961
- // fields generates less code then calling absl::exchange per field.
962
- control_(that.control()),
963
- slots_(that.slot_array()),
964
- capacity_(that.capacity()),
965
- compressed_tuple_(that.size(), std::move(that.infoz())) {
966
- that.set_control(EmptyGroup());
967
- that.set_slots(nullptr);
968
- that.set_capacity(0);
969
- that.set_size(0);
970
- }
1035
+ CommonFields(CommonFields&& that) = default;
971
1036
  CommonFields& operator=(CommonFields&&) = default;
972
1037
 
973
1038
  ctrl_t* control() const { return control_; }
974
1039
  void set_control(ctrl_t* c) { control_ = c; }
975
1040
  void* backing_array_start() const {
976
- // growth_left is stored before control bytes.
1041
+ // growth_left (and maybe infoz) is stored before control bytes.
977
1042
  assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
978
- return control() - sizeof(size_t);
1043
+ return control() - ControlOffset(has_infoz());
979
1044
  }
980
1045
 
981
1046
  // Note: we can't use slots() because Qt defines "slots" as a macro.
@@ -983,8 +1048,18 @@ class CommonFields : public CommonFieldsGenerationInfo {
983
1048
  void set_slots(void* s) { slots_ = s; }
984
1049
 
985
1050
  // The number of filled slots.
986
- size_t size() const { return compressed_tuple_.template get<0>(); }
987
- void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
1051
+ size_t size() const { return size_ >> HasInfozShift(); }
1052
+ void set_size(size_t s) {
1053
+ size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
1054
+ }
1055
+ void increment_size() {
1056
+ assert(size() < capacity());
1057
+ size_ += size_t{1} << HasInfozShift();
1058
+ }
1059
+ void decrement_size() {
1060
+ assert(size() > 0);
1061
+ size_ -= size_t{1} << HasInfozShift();
1062
+ }
988
1063
 
989
1064
  // The total number of available slots.
990
1065
  size_t capacity() const { return capacity_; }
@@ -996,28 +1071,52 @@ class CommonFields : public CommonFieldsGenerationInfo {
996
1071
  // The number of slots we can still fill without needing to rehash.
997
1072
  // This is stored in the heap allocation before the control bytes.
998
1073
  size_t growth_left() const {
999
- return *reinterpret_cast<size_t*>(backing_array_start());
1074
+ const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1075
+ assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1076
+ return *gl_ptr;
1000
1077
  }
1001
1078
  void set_growth_left(size_t gl) {
1002
- *reinterpret_cast<size_t*>(backing_array_start()) = gl;
1079
+ size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1080
+ assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1081
+ *gl_ptr = gl;
1003
1082
  }
1004
1083
 
1005
- HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
1006
- const HashtablezInfoHandle& infoz() const {
1007
- return compressed_tuple_.template get<1>();
1084
+ bool has_infoz() const {
1085
+ return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
1086
+ }
1087
+ void set_has_infoz(bool has_infoz) {
1088
+ size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
1089
+ }
1090
+
1091
+ HashtablezInfoHandle infoz() {
1092
+ return has_infoz()
1093
+ ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
1094
+ : HashtablezInfoHandle();
1095
+ }
1096
+ void set_infoz(HashtablezInfoHandle infoz) {
1097
+ assert(has_infoz());
1098
+ *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
1008
1099
  }
1009
1100
 
1010
1101
  bool should_rehash_for_bug_detection_on_insert() const {
1011
1102
  return CommonFieldsGenerationInfo::
1012
1103
  should_rehash_for_bug_detection_on_insert(control(), capacity());
1013
1104
  }
1105
+ bool should_rehash_for_bug_detection_on_move() const {
1106
+ return CommonFieldsGenerationInfo::
1107
+ should_rehash_for_bug_detection_on_move(control(), capacity());
1108
+ }
1109
+ void maybe_increment_generation_on_move() {
1110
+ if (capacity() == 0) return;
1111
+ increment_generation();
1112
+ }
1014
1113
  void reset_reserved_growth(size_t reservation) {
1015
1114
  CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1016
1115
  }
1017
1116
 
1018
1117
  // The size of the backing array allocation.
1019
1118
  size_t alloc_size(size_t slot_size, size_t slot_align) const {
1020
- return AllocSize(capacity(), slot_size, slot_align);
1119
+ return AllocSize(capacity(), slot_size, slot_align, has_infoz());
1021
1120
  }
1022
1121
 
1023
1122
  // Returns the number of control bytes set to kDeleted. For testing only.
@@ -1027,9 +1126,14 @@ class CommonFields : public CommonFieldsGenerationInfo {
1027
1126
  }
1028
1127
 
1029
1128
  private:
1030
- // TODO(b/259599413): Investigate removing some of these fields:
1129
+ // We store the has_infoz bit in the lowest bit of size_.
1130
+ static constexpr size_t HasInfozShift() { return 1; }
1131
+ static constexpr size_t HasInfozMask() {
1132
+ return (size_t{1} << HasInfozShift()) - 1;
1133
+ }
1134
+
1135
+ // TODO(b/182800944): Investigate removing some of these fields:
1031
1136
  // - control/slots can be derived from each other
1032
- // - we can use 6 bits for capacity since it's always a power of two minus 1
1033
1137
 
1034
1138
  // The control bytes (and, also, a pointer near to the base of the backing
1035
1139
  // array).
@@ -1044,12 +1148,16 @@ class CommonFields : public CommonFieldsGenerationInfo {
1044
1148
  // `control`. May be null for empty tables.
1045
1149
  void* slots_ = nullptr;
1046
1150
 
1151
+ // The number of slots in the backing array. This is always 2^N-1 for an
1152
+ // integer N. NOTE: we tried experimenting with compressing the capacity and
1153
+ // storing it together with size_: (a) using 6 bits to store the corresponding
1154
+ // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
1155
+ // size_ and storing size in the low bits. Both of these experiments were
1156
+ // regressions, presumably because we need capacity to do find operations.
1047
1157
  size_t capacity_ = 0;
1048
1158
 
1049
- // Bundle together size and HashtablezInfoHandle to ensure EBO for
1050
- // HashtablezInfoHandle when sampling is turned off.
1051
- absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
1052
- compressed_tuple_{0u, HashtablezInfoHandle{}};
1159
+ // The size and also has one bit that stores whether we have infoz.
1160
+ size_t size_ = 0;
1053
1161
  };
1054
1162
 
1055
1163
  template <class Policy, class Hash, class Eq, class Alloc>
@@ -1139,35 +1247,39 @@ inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1139
1247
  const GenerationType* generation_ptr,
1140
1248
  const char* operation) {
1141
1249
  if (!SwisstableDebugEnabled()) return;
1142
- if (ctrl == nullptr) {
1143
- ABSL_INTERNAL_LOG(FATAL,
1144
- std::string(operation) + " called on end() iterator.");
1145
- }
1146
- if (ctrl == EmptyGroup()) {
1147
- ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
1148
- " called on default-constructed iterator.");
1250
+ // `SwisstableDebugEnabled()` is also true for release builds with hardening
1251
+ // enabled. To minimize their impact in those builds:
1252
+ // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1253
+ // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1254
+ // the chances that the hot paths will be inlined.
1255
+ if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
1256
+ ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
1257
+ }
1258
+ if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
1259
+ ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
1260
+ operation);
1149
1261
  }
1150
1262
  if (SwisstableGenerationsEnabled()) {
1151
- if (generation != *generation_ptr) {
1152
- ABSL_INTERNAL_LOG(FATAL,
1153
- std::string(operation) +
1154
- " called on invalid iterator. The table could have "
1155
- "rehashed since this iterator was initialized.");
1263
+ if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1264
+ ABSL_RAW_LOG(FATAL,
1265
+ "%s called on invalid iterator. The table could have "
1266
+ "rehashed or moved since this iterator was initialized.",
1267
+ operation);
1156
1268
  }
1157
- if (!IsFull(*ctrl)) {
1158
- ABSL_INTERNAL_LOG(
1269
+ if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1270
+ ABSL_RAW_LOG(
1159
1271
  FATAL,
1160
- std::string(operation) +
1161
- " called on invalid iterator. The element was likely erased.");
1272
+ "%s called on invalid iterator. The element was likely erased.",
1273
+ operation);
1162
1274
  }
1163
1275
  } else {
1164
- if (!IsFull(*ctrl)) {
1165
- ABSL_INTERNAL_LOG(
1276
+ if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1277
+ ABSL_RAW_LOG(
1166
1278
  FATAL,
1167
- std::string(operation) +
1168
- " called on invalid iterator. The element might have been erased "
1169
- "or the table might have rehashed. Consider running with "
1170
- "--config=asan to diagnose rehashing issues.");
1279
+ "%s called on invalid iterator. The element might have been erased "
1280
+ "or the table might have rehashed. Consider running with "
1281
+ "--config=asan to diagnose rehashing issues.",
1282
+ operation);
1171
1283
  }
1172
1284
  }
1173
1285
  }
@@ -1180,13 +1292,13 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1180
1292
  const bool ctrl_is_valid_for_comparison =
1181
1293
  ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
1182
1294
  if (SwisstableGenerationsEnabled()) {
1183
- if (generation != *generation_ptr) {
1184
- ABSL_INTERNAL_LOG(FATAL,
1185
- "Invalid iterator comparison. The table could have "
1186
- "rehashed since this iterator was initialized.");
1295
+ if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1296
+ ABSL_RAW_LOG(FATAL,
1297
+ "Invalid iterator comparison. The table could have rehashed "
1298
+ "or moved since this iterator was initialized.");
1187
1299
  }
1188
- if (!ctrl_is_valid_for_comparison) {
1189
- ABSL_INTERNAL_LOG(
1300
+ if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
1301
+ ABSL_RAW_LOG(
1190
1302
  FATAL, "Invalid iterator comparison. The element was likely erased.");
1191
1303
  }
1192
1304
  } else {
@@ -1226,10 +1338,15 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1226
1338
  const GenerationType* generation_ptr_a,
1227
1339
  const GenerationType* generation_ptr_b) {
1228
1340
  if (!SwisstableDebugEnabled()) return;
1341
+ // `SwisstableDebugEnabled()` is also true for release builds with hardening
1342
+ // enabled. To minimize their impact in those builds:
1343
+ // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1344
+ // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1345
+ // the chances that the hot paths will be inlined.
1229
1346
  const bool a_is_default = ctrl_a == EmptyGroup();
1230
1347
  const bool b_is_default = ctrl_b == EmptyGroup();
1231
- if (a_is_default != b_is_default) {
1232
- ABSL_INTERNAL_LOG(
1348
+ if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
1349
+ ABSL_RAW_LOG(
1233
1350
  FATAL,
1234
1351
  "Invalid iterator comparison. Comparing default-constructed iterator "
1235
1352
  "with non-default-constructed iterator.");
@@ -1237,36 +1354,36 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1237
1354
  if (a_is_default && b_is_default) return;
1238
1355
 
1239
1356
  if (SwisstableGenerationsEnabled()) {
1240
- if (generation_ptr_a == generation_ptr_b) return;
1357
+ if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
1241
1358
  const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1242
1359
  const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1243
1360
  if (a_is_empty != b_is_empty) {
1244
- ABSL_INTERNAL_LOG(FATAL,
1245
- "Invalid iterator comparison. Comparing iterator from "
1246
- "a non-empty hashtable with an iterator from an empty "
1247
- "hashtable.");
1361
+ ABSL_RAW_LOG(FATAL,
1362
+ "Invalid iterator comparison. Comparing iterator from a "
1363
+ "non-empty hashtable with an iterator from an empty "
1364
+ "hashtable.");
1248
1365
  }
1249
1366
  if (a_is_empty && b_is_empty) {
1250
- ABSL_INTERNAL_LOG(FATAL,
1251
- "Invalid iterator comparison. Comparing iterators from "
1252
- "different empty hashtables.");
1367
+ ABSL_RAW_LOG(FATAL,
1368
+ "Invalid iterator comparison. Comparing iterators from "
1369
+ "different empty hashtables.");
1253
1370
  }
1254
1371
  const bool a_is_end = ctrl_a == nullptr;
1255
1372
  const bool b_is_end = ctrl_b == nullptr;
1256
1373
  if (a_is_end || b_is_end) {
1257
- ABSL_INTERNAL_LOG(FATAL,
1258
- "Invalid iterator comparison. Comparing iterator with "
1259
- "an end() iterator from a different hashtable.");
1374
+ ABSL_RAW_LOG(FATAL,
1375
+ "Invalid iterator comparison. Comparing iterator with an "
1376
+ "end() iterator from a different hashtable.");
1260
1377
  }
1261
- ABSL_INTERNAL_LOG(FATAL,
1262
- "Invalid iterator comparison. Comparing non-end() "
1263
- "iterators from different hashtables.");
1378
+ ABSL_RAW_LOG(FATAL,
1379
+ "Invalid iterator comparison. Comparing non-end() iterators "
1380
+ "from different hashtables.");
1264
1381
  } else {
1265
1382
  ABSL_HARDENING_ASSERT(
1266
1383
  AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1267
1384
  "Invalid iterator comparison. The iterators may be from different "
1268
- "containers or the container might have rehashed. Consider running "
1269
- "with --config=asan to diagnose rehashing issues.");
1385
+ "containers or the container might have rehashed or moved. Consider "
1386
+ "running with --config=asan to diagnose issues.");
1270
1387
  }
1271
1388
  }
1272
1389
 
@@ -1289,6 +1406,12 @@ struct FindInfo {
1289
1406
  // `ShouldInsertBackwards()` for small tables.
1290
1407
  inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1291
1408
 
1409
+ // Whether a table fits entirely into a probing group.
1410
+ // Arbitrary order of elements in such tables is correct.
1411
+ inline bool is_single_group(size_t capacity) {
1412
+ return capacity <= Group::kWidth;
1413
+ }
1414
+
1292
1415
  // Begins a probing operation on `common.control`, using `hash`.
1293
1416
  inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
1294
1417
  size_t hash) {
@@ -1310,7 +1433,7 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1310
1433
  auto seq = probe(common, hash);
1311
1434
  const ctrl_t* ctrl = common.control();
1312
1435
  while (true) {
1313
- Group g{ctrl + seq.offset()};
1436
+ GroupEmptyOrDeleted g{ctrl + seq.offset()};
1314
1437
  auto mask = g.MaskEmptyOrDeleted();
1315
1438
  if (mask) {
1316
1439
  #if !defined(NDEBUG)
@@ -1351,7 +1474,6 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1351
1474
  capacity + 1 + NumClonedBytes());
1352
1475
  ctrl[capacity] = ctrl_t::kSentinel;
1353
1476
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
1354
- ResetGrowthLeft(common);
1355
1477
  }
1356
1478
 
1357
1479
  // Sets `ctrl[i]` to `h`.
@@ -1386,38 +1508,263 @@ constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1386
1508
  return (std::max)(align_of_slot, alignof(size_t));
1387
1509
  }
1388
1510
 
1389
- template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
1390
- ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
1391
- assert(c.capacity());
1392
- // Folks with custom allocators often make unwarranted assumptions about the
1393
- // behavior of their classes vis-a-vis trivial destructability and what
1394
- // calls they will or won't make. Avoid sampling for people with custom
1395
- // allocators to get us out of this mess. This is not a hard guarantee but
1396
- // a workaround while we plan the exact guarantee we want to provide.
1397
- const size_t sample_size =
1398
- (std::is_same<Alloc, std::allocator<char>>::value &&
1399
- c.slot_array() == nullptr)
1400
- ? SizeOfSlot
1401
- : 0;
1402
-
1403
- const size_t cap = c.capacity();
1404
- const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
1405
- // growth_left (which is a size_t) is stored with the backing array.
1406
- char* mem = static_cast<char*>(
1407
- Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
1408
- const GenerationType old_generation = c.generation();
1409
- c.set_generation_ptr(
1410
- reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
1411
- c.set_generation(NextGeneration(old_generation));
1412
- c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
1413
- c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
1414
- ResetCtrl(c, SizeOfSlot);
1415
- if (sample_size) {
1416
- c.infoz() = Sample(sample_size);
1417
- }
1418
- c.infoz().RecordStorageChanged(c.size(), cap);
1511
+ // Returns the address of the ith slot in slots where each slot occupies
1512
+ // slot_size.
1513
+ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
1514
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
1515
+ (slot * slot_size));
1419
1516
  }
1420
1517
 
1518
+ // Helper class to perform resize of the hash set.
1519
+ //
1520
+ // It contains special optimizations for small group resizes.
1521
+ // See GrowIntoSingleGroupShuffleControlBytes for details.
1522
+ class HashSetResizeHelper {
1523
+ public:
1524
+ explicit HashSetResizeHelper(CommonFields& c)
1525
+ : old_ctrl_(c.control()),
1526
+ old_capacity_(c.capacity()),
1527
+ had_infoz_(c.has_infoz()) {}
1528
+
1529
+ // Optimized for small groups version of `find_first_non_full` applicable
1530
+ // only right after calling `raw_hash_set::resize`.
1531
+ // It has implicit assumption that `resize` will call
1532
+ // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
1533
+ // Falls back to `find_first_non_full` in case of big groups, so it is
1534
+ // safe to use after `rehash_and_grow_if_necessary`.
1535
+ static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
1536
+ size_t old_capacity,
1537
+ size_t hash) {
1538
+ if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
1539
+ return find_first_non_full(c, hash);
1540
+ }
1541
+ // Find a location for the new element non-deterministically.
1542
+ // Note that any position is correct.
1543
+ // It will located at `half_old_capacity` or one of the other
1544
+ // empty slots with approximately 50% probability each.
1545
+ size_t offset = probe(c, hash).offset();
1546
+
1547
+ // Note that we intentionally use unsigned int underflow.
1548
+ if (offset - (old_capacity + 1) >= old_capacity) {
1549
+ // Offset fall on kSentinel or into the mostly occupied first half.
1550
+ offset = old_capacity / 2;
1551
+ }
1552
+ assert(IsEmpty(c.control()[offset]));
1553
+ return FindInfo{offset, 0};
1554
+ }
1555
+
1556
+ ctrl_t* old_ctrl() const { return old_ctrl_; }
1557
+ size_t old_capacity() const { return old_capacity_; }
1558
+
1559
+ // Allocates a backing array for the hashtable.
1560
+ // Reads `capacity` and updates all other fields based on the result of
1561
+ // the allocation.
1562
+ //
1563
+ // It also may do the folowing actions:
1564
+ // 1. initialize control bytes
1565
+ // 2. initialize slots
1566
+ // 3. deallocate old slots.
1567
+ //
1568
+ // We are bundling a lot of functionality
1569
+ // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
1570
+ // duplication in raw_hash_set<>::resize.
1571
+ //
1572
+ // `c.capacity()` must be nonzero.
1573
+ // POSTCONDITIONS:
1574
+ // 1. CommonFields is initialized.
1575
+ //
1576
+ // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
1577
+ // Both control bytes and slots are fully initialized.
1578
+ // old_slots are deallocated.
1579
+ // infoz.RecordRehash is called.
1580
+ //
1581
+ // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
1582
+ // Control bytes are fully initialized.
1583
+ // infoz.RecordRehash is called.
1584
+ // GrowSizeIntoSingleGroup must be called to finish slots initialization.
1585
+ //
1586
+ // if !IsGrowingIntoSingleGroupApplicable
1587
+ // Control bytes are initialized to empty table via ResetCtrl.
1588
+ // raw_hash_set<>::resize must insert elements regularly.
1589
+ // infoz.RecordRehash is called if old_capacity == 0.
1590
+ //
1591
+ // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
1592
+ template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
1593
+ size_t AlignOfSlot>
1594
+ ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
1595
+ Alloc alloc) {
1596
+ assert(c.capacity());
1597
+ // Folks with custom allocators often make unwarranted assumptions about the
1598
+ // behavior of their classes vis-a-vis trivial destructability and what
1599
+ // calls they will or won't make. Avoid sampling for people with custom
1600
+ // allocators to get us out of this mess. This is not a hard guarantee but
1601
+ // a workaround while we plan the exact guarantee we want to provide.
1602
+ const size_t sample_size =
1603
+ (std::is_same<Alloc, std::allocator<char>>::value &&
1604
+ c.slot_array() == nullptr)
1605
+ ? SizeOfSlot
1606
+ : 0;
1607
+ HashtablezInfoHandle infoz =
1608
+ sample_size > 0 ? Sample(sample_size) : c.infoz();
1609
+
1610
+ const bool has_infoz = infoz.IsSampled();
1611
+ const size_t cap = c.capacity();
1612
+ const size_t alloc_size =
1613
+ AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
1614
+ char* mem = static_cast<char*>(
1615
+ Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
1616
+ const GenerationType old_generation = c.generation();
1617
+ c.set_generation_ptr(reinterpret_cast<GenerationType*>(
1618
+ mem + GenerationOffset(cap, has_infoz)));
1619
+ c.set_generation(NextGeneration(old_generation));
1620
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
1621
+ c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
1622
+ ResetGrowthLeft(c);
1623
+
1624
+ const bool grow_single_group =
1625
+ IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity());
1626
+ if (old_capacity_ != 0 && grow_single_group) {
1627
+ if (TransferUsesMemcpy) {
1628
+ GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
1629
+ DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
1630
+ } else {
1631
+ GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
1632
+ }
1633
+ } else {
1634
+ ResetCtrl(c, SizeOfSlot);
1635
+ }
1636
+
1637
+ c.set_has_infoz(has_infoz);
1638
+ if (has_infoz) {
1639
+ infoz.RecordStorageChanged(c.size(), cap);
1640
+ if (grow_single_group || old_capacity_ == 0) {
1641
+ infoz.RecordRehash(0);
1642
+ }
1643
+ c.set_infoz(infoz);
1644
+ }
1645
+ return grow_single_group;
1646
+ }
1647
+
1648
+ // Relocates slots into new single group consistent with
1649
+ // GrowIntoSingleGroupShuffleControlBytes.
1650
+ //
1651
+ // PRECONDITIONS:
1652
+ // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
1653
+ template <class PolicyTraits, class Alloc>
1654
+ void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
1655
+ typename PolicyTraits::slot_type* old_slots) {
1656
+ assert(old_capacity_ < Group::kWidth / 2);
1657
+ assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
1658
+ using slot_type = typename PolicyTraits::slot_type;
1659
+ assert(is_single_group(c.capacity()));
1660
+
1661
+ auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
1662
+
1663
+ size_t shuffle_bit = old_capacity_ / 2 + 1;
1664
+ for (size_t i = 0; i < old_capacity_; ++i) {
1665
+ if (IsFull(old_ctrl_[i])) {
1666
+ size_t new_i = i ^ shuffle_bit;
1667
+ SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
1668
+ PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
1669
+ }
1670
+ }
1671
+ PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
1672
+ }
1673
+
1674
+ // Deallocates old backing array.
1675
+ template <size_t AlignOfSlot, class CharAlloc>
1676
+ void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
1677
+ SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
1678
+ Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1679
+ &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_),
1680
+ AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_));
1681
+ }
1682
+
1683
+ private:
1684
+ // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
1685
+ static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
1686
+ size_t new_capacity) {
1687
+ // NOTE that `old_capacity < new_capacity` in order to have
1688
+ // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
1689
+ return is_single_group(new_capacity) && old_capacity < new_capacity;
1690
+ }
1691
+
1692
+ // Relocates control bytes and slots into new single group for
1693
+ // transferable objects.
1694
+ // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
1695
+ void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
1696
+ size_t slot_size);
1697
+
1698
+ // Shuffle control bits deterministically to the next capacity.
1699
+ // Returns offset for newly added element with given hash.
1700
+ //
1701
+ // PRECONDITIONs:
1702
+ // 1. new_ctrl is allocated for new_capacity,
1703
+ // but not initialized.
1704
+ // 2. new_capacity is a single group.
1705
+ //
1706
+ // All elements are transferred into the first `old_capacity + 1` positions
1707
+ // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
1708
+ // in order to change an order and keep it non deterministic.
1709
+ // Although rotation itself deterministic, position of the new added element
1710
+ // will be based on `H1` and is not deterministic.
1711
+ //
1712
+ // Examples:
1713
+ // S = kSentinel, E = kEmpty
1714
+ //
1715
+ // old_ctrl = SEEEEEEEE...
1716
+ // new_ctrl = ESEEEEEEE...
1717
+ //
1718
+ // old_ctrl = 0SEEEEEEE...
1719
+ // new_ctrl = E0ESE0EEE...
1720
+ //
1721
+ // old_ctrl = 012S012EEEEEEEEE...
1722
+ // new_ctrl = 2E01EEES2E01EEE...
1723
+ //
1724
+ // old_ctrl = 0123456S0123456EEEEEEEEEEE...
1725
+ // new_ctrl = 456E0123EEEEEES456E0123EEE...
1726
+ void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
1727
+ size_t new_capacity) const;
1728
+
1729
+ // Shuffle trivially transferable slots in the way consistent with
1730
+ // GrowIntoSingleGroupShuffleControlBytes.
1731
+ //
1732
+ // PRECONDITIONs:
1733
+ // 1. old_capacity must be non-zero.
1734
+ // 2. new_ctrl is fully initialized using
1735
+ // GrowIntoSingleGroupShuffleControlBytes.
1736
+ // 3. new_slots is allocated and *not* poisoned.
1737
+ //
1738
+ // POSTCONDITIONS:
1739
+ // 1. new_slots are transferred from old_slots_ consistent with
1740
+ // GrowIntoSingleGroupShuffleControlBytes.
1741
+ // 2. Empty new_slots are *not* poisoned.
1742
+ void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
1743
+ void* new_slots,
1744
+ size_t slot_size) const;
1745
+
1746
+ // Poison empty slots that were transferred using the deterministic algorithm
1747
+ // described above.
1748
+ // PRECONDITIONs:
1749
+ // 1. new_ctrl is fully initialized using
1750
+ // GrowIntoSingleGroupShuffleControlBytes.
1751
+ // 2. new_slots is fully initialized consistent with
1752
+ // GrowIntoSingleGroupShuffleControlBytes.
1753
+ void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
1754
+ // poison non full items
1755
+ for (size_t i = 0; i < c.capacity(); ++i) {
1756
+ if (!IsFull(c.control()[i])) {
1757
+ SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
1758
+ slot_size);
1759
+ }
1760
+ }
1761
+ }
1762
+
1763
+ ctrl_t* old_ctrl_;
1764
+ size_t old_capacity_;
1765
+ bool had_infoz_;
1766
+ };
1767
+
1421
1768
  // PolicyFunctions bundles together some information for a particular
1422
1769
  // raw_hash_set<T, ...> instantiation. This information is passed to
1423
1770
  // type-erased functions that want to do small amounts of type-specific
@@ -1442,7 +1789,7 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1442
1789
  bool reuse);
1443
1790
 
1444
1791
  // Type-erased version of raw_hash_set::erase_meta_only.
1445
- void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
1792
+ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
1446
1793
 
1447
1794
  // Function to place in PolicyFunctions::dealloc for raw_hash_sets
1448
1795
  // that are using std::allocator. This allows us to share the same
@@ -1456,6 +1803,7 @@ ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
1456
1803
  policy.slot_size * common.capacity());
1457
1804
 
1458
1805
  std::allocator<char> alloc;
1806
+ common.infoz().Unregister();
1459
1807
  Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1460
1808
  &alloc, common.backing_array_start(),
1461
1809
  common.alloc_size(policy.slot_size, AlignOfSlot));
@@ -1534,6 +1882,11 @@ class raw_hash_set {
1534
1882
  using AllocTraits = absl::allocator_traits<allocator_type>;
1535
1883
  using SlotAlloc = typename absl::allocator_traits<
1536
1884
  allocator_type>::template rebind_alloc<slot_type>;
1885
+ // People are often sloppy with the exact type of their allocator (sometimes
1886
+ // it has an extra const or is missing the pair, but rebinds made it work
1887
+ // anyway).
1888
+ using CharAlloc =
1889
+ typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
1537
1890
  using SlotAllocTraits = typename absl::allocator_traits<
1538
1891
  allocator_type>::template rebind_traits<slot_type>;
1539
1892
 
@@ -1590,7 +1943,7 @@ class raw_hash_set {
1590
1943
  // PRECONDITION: not an end() iterator.
1591
1944
  reference operator*() const {
1592
1945
  AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
1593
- return PolicyTraits::element(slot_);
1946
+ return unchecked_deref();
1594
1947
  }
1595
1948
 
1596
1949
  // PRECONDITION: not an end() iterator.
@@ -1645,13 +1998,17 @@ class raw_hash_set {
1645
1998
  // If a sentinel is reached, we null `ctrl_` out instead.
1646
1999
  void skip_empty_or_deleted() {
1647
2000
  while (IsEmptyOrDeleted(*ctrl_)) {
1648
- uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
2001
+ uint32_t shift =
2002
+ GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
1649
2003
  ctrl_ += shift;
1650
2004
  slot_ += shift;
1651
2005
  }
1652
2006
  if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
1653
2007
  }
1654
2008
 
2009
+ ctrl_t* control() const { return ctrl_; }
2010
+ slot_type* slot() const { return slot_; }
2011
+
1655
2012
  // We use EmptyGroup() for default-constructed iterators so that they can
1656
2013
  // be distinguished from end iterators, which have nullptr ctrl_.
1657
2014
  ctrl_t* ctrl_ = EmptyGroup();
@@ -1660,10 +2017,23 @@ class raw_hash_set {
1660
2017
  union {
1661
2018
  slot_type* slot_;
1662
2019
  };
2020
+
2021
+ // An equality check which skips ABSL Hardening iterator invalidation
2022
+ // checks.
2023
+ // Should be used when the lifetimes of the iterators are well-enough
2024
+ // understood to prove that they cannot be invalid.
2025
+ bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
2026
+
2027
+ // Dereferences the iterator without ABSL Hardening iterator invalidation
2028
+ // checks.
2029
+ reference unchecked_deref() const { return PolicyTraits::element(slot_); }
1663
2030
  };
1664
2031
 
1665
2032
  class const_iterator {
1666
2033
  friend class raw_hash_set;
2034
+ template <class Container, typename Enabler>
2035
+ friend struct absl::container_internal::hashtable_debug_internal::
2036
+ HashtableDebugAccess;
1667
2037
 
1668
2038
  public:
1669
2039
  using iterator_category = typename iterator::iterator_category;
@@ -1697,8 +2067,14 @@ class raw_hash_set {
1697
2067
  const GenerationType* gen)
1698
2068
  : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
1699
2069
  }
2070
+ ctrl_t* control() const { return inner_.control(); }
2071
+ slot_type* slot() const { return inner_.slot(); }
1700
2072
 
1701
2073
  iterator inner_;
2074
+
2075
+ bool unchecked_equals(const const_iterator& b) {
2076
+ return inner_.unchecked_equals(b.inner_);
2077
+ }
1702
2078
  };
1703
2079
 
1704
2080
  using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
@@ -1717,8 +2093,7 @@ class raw_hash_set {
1717
2093
  const allocator_type& alloc = allocator_type())
1718
2094
  : settings_(CommonFields{}, hash, eq, alloc) {
1719
2095
  if (bucket_count) {
1720
- common().set_capacity(NormalizeCapacity(bucket_count));
1721
- initialize_slots();
2096
+ resize(NormalizeCapacity(bucket_count));
1722
2097
  }
1723
2098
  }
1724
2099
 
@@ -1843,28 +2218,35 @@ class raw_hash_set {
1843
2218
  : // Hash, equality and allocator are copied instead of moved because
1844
2219
  // `that` must be left valid. If Hash is std::function<Key>, moving it
1845
2220
  // would create a nullptr functor that cannot be called.
1846
- settings_(absl::exchange(that.common(), CommonFields{}),
1847
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
2221
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
2222
+ // Note: we avoid using exchange for better generated code.
2223
+ settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
2224
+ that.alloc_ref()) {
2225
+ that.common() = CommonFields{};
2226
+ maybe_increment_generation_or_rehash_on_move();
2227
+ }
1848
2228
 
1849
2229
  raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1850
2230
  : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
1851
2231
  if (a == that.alloc_ref()) {
1852
2232
  std::swap(common(), that.common());
2233
+ maybe_increment_generation_or_rehash_on_move();
1853
2234
  } else {
1854
- reserve(that.size());
1855
- // Note: this will copy elements of dense_set and unordered_set instead of
1856
- // moving them. This can be fixed if it ever becomes an issue.
1857
- for (auto& elem : that) insert(std::move(elem));
2235
+ move_elements_allocs_unequal(std::move(that));
1858
2236
  }
1859
2237
  }
1860
2238
 
1861
2239
  raw_hash_set& operator=(const raw_hash_set& that) {
1862
- raw_hash_set tmp(that,
1863
- AllocTraits::propagate_on_container_copy_assignment::value
1864
- ? that.alloc_ref()
1865
- : alloc_ref());
1866
- swap(tmp);
1867
- return *this;
2240
+ if (ABSL_PREDICT_FALSE(this == &that)) return *this;
2241
+ constexpr bool propagate_alloc =
2242
+ AllocTraits::propagate_on_container_copy_assignment::value;
2243
+ // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
2244
+ // is an exact match for that.size(). If this->capacity() is too big, then
2245
+ // it would make iteration very slow to reuse the allocation. Maybe we can
2246
+ // do the same heuristic as clear() and reuse if it's small enough.
2247
+ raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
2248
+ // NOLINTNEXTLINE: not returning *this for performance.
2249
+ return assign_impl<propagate_alloc>(std::move(tmp));
1868
2250
  }
1869
2251
 
1870
2252
  raw_hash_set& operator=(raw_hash_set&& that) noexcept(
@@ -1879,19 +2261,7 @@ class raw_hash_set {
1879
2261
  typename AllocTraits::propagate_on_container_move_assignment());
1880
2262
  }
1881
2263
 
1882
- ~raw_hash_set() {
1883
- const size_t cap = capacity();
1884
- if (!cap) return;
1885
- destroy_slots();
1886
-
1887
- // Unpoison before returning the memory to the allocator.
1888
- SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
1889
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
1890
- &alloc_ref(), common().backing_array_start(),
1891
- AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
1892
-
1893
- infoz().Unregister();
1894
- }
2264
+ ~raw_hash_set() { destructor_impl(); }
1895
2265
 
1896
2266
  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1897
2267
  auto it = iterator_at(0);
@@ -1937,17 +2307,6 @@ class raw_hash_set {
1937
2307
  common().set_reservation_size(0);
1938
2308
  }
1939
2309
 
1940
- inline void destroy_slots() {
1941
- const size_t cap = capacity();
1942
- const ctrl_t* ctrl = control();
1943
- slot_type* slot = slot_array();
1944
- for (size_t i = 0; i != cap; ++i) {
1945
- if (IsFull(ctrl[i])) {
1946
- PolicyTraits::destroy(&alloc_ref(), slot + i);
1947
- }
1948
- }
1949
- }
1950
-
1951
2310
  // This overload kicks in when the argument is an rvalue of insertable and
1952
2311
  // decomposable type other than init_type.
1953
2312
  //
@@ -2075,7 +2434,7 @@ class raw_hash_set {
2075
2434
  alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2076
2435
  slot_type* slot = reinterpret_cast<slot_type*>(&raw);
2077
2436
 
2078
- PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
2437
+ construct(slot, std::forward<Args>(args)...);
2079
2438
  const auto& elem = PolicyTraits::element(slot);
2080
2439
  return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
2081
2440
  }
@@ -2179,8 +2538,8 @@ class raw_hash_set {
2179
2538
  // This overload is necessary because otherwise erase<K>(const K&) would be
2180
2539
  // a better match if non-const iterator is passed as an argument.
2181
2540
  void erase(iterator it) {
2182
- AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
2183
- PolicyTraits::destroy(&alloc_ref(), it.slot_);
2541
+ AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
2542
+ destroy(it.slot());
2184
2543
  erase_meta_only(it);
2185
2544
  }
2186
2545
 
@@ -2211,8 +2570,8 @@ class raw_hash_set {
2211
2570
  assert(this != &src);
2212
2571
  for (auto it = src.begin(), e = src.end(); it != e;) {
2213
2572
  auto next = std::next(it);
2214
- if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
2215
- PolicyTraits::element(it.slot_))
2573
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
2574
+ PolicyTraits::element(it.slot()))
2216
2575
  .second) {
2217
2576
  src.erase_meta_only(it);
2218
2577
  }
@@ -2226,10 +2585,9 @@ class raw_hash_set {
2226
2585
  }
2227
2586
 
2228
2587
  node_type extract(const_iterator position) {
2229
- AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
2588
+ AssertIsFull(position.control(), position.inner_.generation(),
2230
2589
  position.inner_.generation_ptr(), "extract()");
2231
- auto node =
2232
- CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
2590
+ auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
2233
2591
  erase_meta_only(position);
2234
2592
  return node;
2235
2593
  }
@@ -2364,7 +2722,11 @@ class raw_hash_set {
2364
2722
 
2365
2723
  template <class K = key_type>
2366
2724
  bool contains(const key_arg<K>& key) const {
2367
- return find(key) != end();
2725
+ // Here neither the iterator returned by `find()` nor `end()` can be invalid
2726
+ // outside of potential thread-safety issues.
2727
+ // `find()`'s return value is constructed, used, and then destructed
2728
+ // all in this context.
2729
+ return !find(key).unchecked_equals(end());
2368
2730
  }
2369
2731
 
2370
2732
  template <class K = key_type>
@@ -2400,8 +2762,10 @@ class raw_hash_set {
2400
2762
  const raw_hash_set* outer = &a;
2401
2763
  const raw_hash_set* inner = &b;
2402
2764
  if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2403
- for (const value_type& elem : *outer)
2404
- if (!inner->has_element(elem)) return false;
2765
+ for (const value_type& elem : *outer) {
2766
+ auto it = PolicyTraits::apply(FindElement{*inner}, elem);
2767
+ if (it == inner->end() || !(*it == elem)) return false;
2768
+ }
2405
2769
  return true;
2406
2770
  }
2407
2771
 
@@ -2471,10 +2835,9 @@ class raw_hash_set {
2471
2835
  std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2472
2836
  auto res = s.find_or_prepare_insert(key);
2473
2837
  if (res.second) {
2474
- PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
2475
- &slot);
2838
+ s.transfer(s.slot_array() + res.first, &slot);
2476
2839
  } else if (do_destroy) {
2477
- PolicyTraits::destroy(&s.alloc_ref(), &slot);
2840
+ s.destroy(&slot);
2478
2841
  }
2479
2842
  return {s.iterator_at(res.first), res.second};
2480
2843
  }
@@ -2483,58 +2846,111 @@ class raw_hash_set {
2483
2846
  slot_type&& slot;
2484
2847
  };
2485
2848
 
2849
+ // TODO(b/303305702): re-enable reentrant validation.
2850
+ template <typename... Args>
2851
+ inline void construct(slot_type* slot, Args&&... args) {
2852
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
2853
+ }
2854
+ inline void destroy(slot_type* slot) {
2855
+ PolicyTraits::destroy(&alloc_ref(), slot);
2856
+ }
2857
+ inline void transfer(slot_type* to, slot_type* from) {
2858
+ PolicyTraits::transfer(&alloc_ref(), to, from);
2859
+ }
2860
+
2861
+ inline void destroy_slots() {
2862
+ const size_t cap = capacity();
2863
+ const ctrl_t* ctrl = control();
2864
+ slot_type* slot = slot_array();
2865
+ for (size_t i = 0; i != cap; ++i) {
2866
+ if (IsFull(ctrl[i])) {
2867
+ destroy(slot + i);
2868
+ }
2869
+ }
2870
+ }
2871
+
2872
+ inline void dealloc() {
2873
+ assert(capacity() != 0);
2874
+ // Unpoison before returning the memory to the allocator.
2875
+ SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
2876
+ infoz().Unregister();
2877
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2878
+ &alloc_ref(), common().backing_array_start(),
2879
+ common().alloc_size(sizeof(slot_type), alignof(slot_type)));
2880
+ }
2881
+
2882
+ inline void destructor_impl() {
2883
+ if (capacity() == 0) return;
2884
+ destroy_slots();
2885
+ dealloc();
2886
+ }
2887
+
2486
2888
  // Erases, but does not destroy, the value pointed to by `it`.
2487
2889
  //
2488
2890
  // This merely updates the pertinent control byte. This can be used in
2489
2891
  // conjunction with Policy::transfer to move the object to another place.
2490
2892
  void erase_meta_only(const_iterator it) {
2491
- EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
2893
+ EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
2894
+ sizeof(slot_type));
2492
2895
  }
2493
2896
 
2494
- // Allocates a backing array for `self` and initializes its control bytes.
2495
- // This reads `capacity` and updates all other fields based on the result of
2496
- // the allocation.
2897
+ // Resizes table to the new capacity and move all elements to the new
2898
+ // positions accordingly.
2497
2899
  //
2498
- // This does not free the currently held array; `capacity` must be nonzero.
2499
- inline void initialize_slots() {
2500
- // People are often sloppy with the exact type of their allocator (sometimes
2501
- // it has an extra const or is missing the pair, but rebinds made it work
2502
- // anyway).
2503
- using CharAlloc =
2504
- typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
2505
- InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
2506
- common(), CharAlloc(alloc_ref()));
2507
- }
2508
-
2900
+ // Note that for better performance instead of
2901
+ // find_first_non_full(common(), hash),
2902
+ // HashSetResizeHelper::FindFirstNonFullAfterResize(
2903
+ // common(), old_capacity, hash)
2904
+ // can be called right after `resize`.
2509
2905
  ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
2510
2906
  assert(IsValidCapacity(new_capacity));
2511
- auto* old_ctrl = control();
2907
+ HashSetResizeHelper resize_helper(common());
2512
2908
  auto* old_slots = slot_array();
2513
- const size_t old_capacity = common().capacity();
2514
2909
  common().set_capacity(new_capacity);
2515
- initialize_slots();
2516
-
2517
- auto* new_slots = slot_array();
2518
- size_t total_probe_length = 0;
2519
- for (size_t i = 0; i != old_capacity; ++i) {
2520
- if (IsFull(old_ctrl[i])) {
2521
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
2522
- PolicyTraits::element(old_slots + i));
2523
- auto target = find_first_non_full(common(), hash);
2524
- size_t new_i = target.offset;
2525
- total_probe_length += target.probe_length;
2526
- SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2527
- PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
2528
- }
2910
+ // Note that `InitializeSlots` does different number initialization steps
2911
+ // depending on the values of `transfer_uses_memcpy` and capacities.
2912
+ // Refer to the comment in `InitializeSlots` for more details.
2913
+ const bool grow_single_group =
2914
+ resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
2915
+ PolicyTraits::transfer_uses_memcpy(),
2916
+ alignof(slot_type)>(
2917
+ common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
2918
+ CharAlloc(alloc_ref()));
2919
+
2920
+ if (resize_helper.old_capacity() == 0) {
2921
+ // InitializeSlots did all the work including infoz().RecordRehash().
2922
+ return;
2529
2923
  }
2530
- if (old_capacity) {
2531
- SanitizerUnpoisonMemoryRegion(old_slots,
2532
- sizeof(slot_type) * old_capacity);
2533
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2534
- &alloc_ref(), old_ctrl - ControlOffset(),
2535
- AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
2924
+
2925
+ if (grow_single_group) {
2926
+ if (PolicyTraits::transfer_uses_memcpy()) {
2927
+ // InitializeSlots did all the work.
2928
+ return;
2929
+ }
2930
+ // We want GrowSizeIntoSingleGroup to be called here in order to make
2931
+ // InitializeSlots not depend on PolicyTraits.
2932
+ resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
2933
+ old_slots);
2934
+ } else {
2935
+ // InitializeSlots prepares control bytes to correspond to empty table.
2936
+ auto* new_slots = slot_array();
2937
+ size_t total_probe_length = 0;
2938
+ for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
2939
+ if (IsFull(resize_helper.old_ctrl()[i])) {
2940
+ size_t hash = PolicyTraits::apply(
2941
+ HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
2942
+ auto target = find_first_non_full(common(), hash);
2943
+ size_t new_i = target.offset;
2944
+ total_probe_length += target.probe_length;
2945
+ SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2946
+ transfer(new_slots + new_i, old_slots + i);
2947
+ }
2948
+ }
2949
+ infoz().RecordRehash(total_probe_length);
2536
2950
  }
2537
- infoz().RecordRehash(total_probe_length);
2951
+ resize_helper.DeallocateOld<alignof(slot_type)>(
2952
+ CharAlloc(alloc_ref()), sizeof(slot_type),
2953
+ const_cast<std::remove_const_t<slot_type>*>(old_slots));
2538
2954
  }
2539
2955
 
2540
2956
  // Prunes control bytes to remove as many tombstones as possible.
@@ -2604,36 +3020,64 @@ class raw_hash_set {
2604
3020
  }
2605
3021
  }
2606
3022
 
2607
- bool has_element(const value_type& elem) const {
2608
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
2609
- auto seq = probe(common(), hash);
2610
- const ctrl_t* ctrl = control();
2611
- while (true) {
2612
- Group g{ctrl + seq.offset()};
2613
- for (uint32_t i : g.Match(H2(hash))) {
2614
- if (ABSL_PREDICT_TRUE(
2615
- PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
2616
- return true;
2617
- }
2618
- if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
2619
- seq.next();
2620
- assert(seq.index() <= capacity() && "full table!");
3023
+ void maybe_increment_generation_or_rehash_on_move() {
3024
+ common().maybe_increment_generation_on_move();
3025
+ if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
3026
+ resize(capacity());
2621
3027
  }
2622
- return false;
2623
3028
  }
2624
3029
 
2625
- // TODO(alkis): Optimize this assuming *this and that don't overlap.
2626
- raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
2627
- raw_hash_set tmp(std::move(that));
2628
- swap(tmp);
3030
+ template<bool propagate_alloc>
3031
+ raw_hash_set& assign_impl(raw_hash_set&& that) {
3032
+ // We don't bother checking for this/that aliasing. We just need to avoid
3033
+ // breaking the invariants in that case.
3034
+ destructor_impl();
3035
+ common() = std::move(that.common());
3036
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
3037
+ hash_ref() = that.hash_ref();
3038
+ eq_ref() = that.eq_ref();
3039
+ CopyAlloc(alloc_ref(), that.alloc_ref(),
3040
+ std::integral_constant<bool, propagate_alloc>());
3041
+ that.common() = CommonFields{};
3042
+ maybe_increment_generation_or_rehash_on_move();
2629
3043
  return *this;
2630
3044
  }
2631
- raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
2632
- raw_hash_set tmp(std::move(that), alloc_ref());
2633
- swap(tmp);
3045
+
3046
+ raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
3047
+ const size_t size = that.size();
3048
+ if (size == 0) return *this;
3049
+ reserve(size);
3050
+ for (iterator it = that.begin(); it != that.end(); ++it) {
3051
+ insert(std::move(PolicyTraits::element(it.slot())));
3052
+ that.destroy(it.slot());
3053
+ }
3054
+ that.dealloc();
3055
+ that.common() = CommonFields{};
3056
+ maybe_increment_generation_or_rehash_on_move();
2634
3057
  return *this;
2635
3058
  }
2636
3059
 
3060
+ raw_hash_set& move_assign(raw_hash_set&& that,
3061
+ std::true_type /*propagate_alloc*/) {
3062
+ return assign_impl<true>(std::move(that));
3063
+ }
3064
+ raw_hash_set& move_assign(raw_hash_set&& that,
3065
+ std::false_type /*propagate_alloc*/) {
3066
+ if (alloc_ref() == that.alloc_ref()) {
3067
+ return assign_impl<false>(std::move(that));
3068
+ }
3069
+ // Aliasing can't happen here because allocs would compare equal above.
3070
+ assert(this != &that);
3071
+ destructor_impl();
3072
+ // We can't take over that's memory so we need to move each element.
3073
+ // While moving elements, this should have that's hash/eq so copy hash/eq
3074
+ // before moving elements.
3075
+ // TODO(b/296061262): move instead of copying hash/eq.
3076
+ hash_ref() = that.hash_ref();
3077
+ eq_ref() = that.eq_ref();
3078
+ return move_elements_allocs_unequal(std::move(that));
3079
+ }
3080
+
2637
3081
  protected:
2638
3082
  // Attempts to find `key` in the table; if it isn't found, returns a slot that
2639
3083
  // the value can be inserted into, with the control byte already set to
@@ -2675,10 +3119,19 @@ class raw_hash_set {
2675
3119
  if (!rehash_for_bug_detection &&
2676
3120
  ABSL_PREDICT_FALSE(growth_left() == 0 &&
2677
3121
  !IsDeleted(control()[target.offset]))) {
3122
+ size_t old_capacity = capacity();
2678
3123
  rehash_and_grow_if_necessary();
2679
- target = find_first_non_full(common(), hash);
3124
+ // NOTE: It is safe to use `FindFirstNonFullAfterResize`.
3125
+ // `FindFirstNonFullAfterResize` must be called right after resize.
3126
+ // `rehash_and_grow_if_necessary` may *not* call `resize`
3127
+ // and perform `drop_deletes_without_resize` instead. But this
3128
+ // could happen only on big tables.
3129
+ // For big tables `FindFirstNonFullAfterResize` will always
3130
+ // fallback to normal `find_first_non_full`, so it is safe to use it.
3131
+ target = HashSetResizeHelper::FindFirstNonFullAfterResize(
3132
+ common(), old_capacity, hash);
2680
3133
  }
2681
- common().set_size(common().size() + 1);
3134
+ common().increment_size();
2682
3135
  set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
2683
3136
  SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
2684
3137
  common().maybe_increment_generation_on_insert();
@@ -2696,8 +3149,7 @@ class raw_hash_set {
2696
3149
  // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
2697
3150
  template <class... Args>
2698
3151
  void emplace_at(size_t i, Args&&... args) {
2699
- PolicyTraits::construct(&alloc_ref(), slot_array() + i,
2700
- std::forward<Args>(args)...);
3152
+ construct(slot_array() + i, std::forward<Args>(args)...);
2701
3153
 
2702
3154
  assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2703
3155
  iterator_at(i) &&
@@ -2711,6 +3163,8 @@ class raw_hash_set {
2711
3163
  return {control() + i, slot_array() + i, common().generation_ptr()};
2712
3164
  }
2713
3165
 
3166
+ reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
3167
+
2714
3168
  private:
2715
3169
  friend struct RawHashSetTestOnlyAccess;
2716
3170
 
@@ -2743,7 +3197,7 @@ class raw_hash_set {
2743
3197
  slot_type* slot_array() const {
2744
3198
  return static_cast<slot_type*>(common().slot_array());
2745
3199
  }
2746
- HashtablezInfoHandle& infoz() { return common().infoz(); }
3200
+ HashtablezInfoHandle infoz() { return common().infoz(); }
2747
3201
 
2748
3202
  hasher& hash_ref() { return settings_.template get<1>(); }
2749
3203
  const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -2763,8 +3217,7 @@ class raw_hash_set {
2763
3217
  }
2764
3218
  static void transfer_slot_fn(void* set, void* dst, void* src) {
2765
3219
  auto* h = static_cast<raw_hash_set*>(set);
2766
- PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
2767
- static_cast<slot_type*>(src));
3220
+ h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
2768
3221
  }
2769
3222
  // Note: dealloc_fn will only be used if we have a non-standard allocator.
2770
3223
  static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
@@ -2774,6 +3227,7 @@ class raw_hash_set {
2774
3227
  SanitizerUnpoisonMemoryRegion(common.slot_array(),
2775
3228
  sizeof(slot_type) * common.capacity());
2776
3229
 
3230
+ common.infoz().Unregister();
2777
3231
  Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2778
3232
  &set->alloc_ref(), common.backing_array_start(),
2779
3233
  common.alloc_size(sizeof(slot_type), alignof(slot_type)));
@@ -2847,33 +3301,18 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
2847
3301
  static size_t AllocatedByteSize(const Set& c) {
2848
3302
  size_t capacity = c.capacity();
2849
3303
  if (capacity == 0) return 0;
2850
- size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
3304
+ size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
2851
3305
 
2852
3306
  size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2853
3307
  if (per_slot != ~size_t{}) {
2854
3308
  m += per_slot * c.size();
2855
3309
  } else {
2856
- const ctrl_t* ctrl = c.control();
2857
- for (size_t i = 0; i != capacity; ++i) {
2858
- if (container_internal::IsFull(ctrl[i])) {
2859
- m += Traits::space_used(c.slot_array() + i);
2860
- }
3310
+ for (auto it = c.begin(); it != c.end(); ++it) {
3311
+ m += Traits::space_used(it.slot());
2861
3312
  }
2862
3313
  }
2863
3314
  return m;
2864
3315
  }
2865
-
2866
- static size_t LowerBoundAllocatedByteSize(size_t size) {
2867
- size_t capacity = GrowthToLowerboundCapacity(size);
2868
- if (capacity == 0) return 0;
2869
- size_t m =
2870
- AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
2871
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2872
- if (per_slot != ~size_t{}) {
2873
- m += per_slot * size;
2874
- }
2875
- return m;
2876
- }
2877
3316
  };
2878
3317
 
2879
3318
  } // namespace hashtable_debug_internal