grpc 1.73.0 → 1.74.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Makefile +38 -17
- data/include/grpc/create_channel_from_endpoint.h +54 -0
- data/include/grpc/credentials.h +11 -5
- data/include/grpc/event_engine/event_engine.h +74 -17
- data/include/grpc/grpc_posix.h +20 -1
- data/include/grpc/impl/channel_arg_names.h +2 -4
- data/include/grpc/module.modulemap +1 -0
- data/include/grpc/support/json.h +24 -0
- data/src/core/call/interception_chain.h +7 -11
- data/src/core/channelz/channel_trace.cc +213 -115
- data/src/core/channelz/channel_trace.h +380 -86
- data/src/core/channelz/channelz.cc +270 -181
- data/src/core/channelz/channelz.h +168 -55
- data/src/core/channelz/channelz_registry.cc +2 -1
- data/src/core/channelz/channelz_registry.h +24 -0
- data/src/core/channelz/property_list.cc +357 -0
- data/src/core/channelz/property_list.h +202 -0
- data/src/core/channelz/ztrace_collector.h +3 -2
- data/src/core/client_channel/backup_poller.cc +17 -2
- data/src/core/client_channel/client_channel.cc +17 -28
- data/src/core/client_channel/client_channel_filter.cc +19 -29
- data/src/core/client_channel/config_selector.h +8 -2
- data/src/core/client_channel/dynamic_filters.cc +5 -6
- data/src/core/client_channel/dynamic_filters.h +1 -1
- data/src/core/client_channel/global_subchannel_pool.cc +4 -1
- data/src/core/client_channel/retry_filter.cc +21 -27
- data/src/core/client_channel/retry_filter.h +10 -7
- data/src/core/client_channel/retry_filter_legacy_call_data.cc +5 -5
- data/src/core/client_channel/retry_filter_legacy_call_data.h +1 -1
- data/src/core/client_channel/retry_interceptor.cc +30 -44
- data/src/core/client_channel/retry_interceptor.h +18 -17
- data/src/core/client_channel/retry_throttle.cc +46 -61
- data/src/core/client_channel/retry_throttle.h +17 -39
- data/src/core/client_channel/subchannel.cc +43 -19
- data/src/core/client_channel/subchannel.h +8 -0
- data/src/core/config/config_vars.cc +2 -0
- data/src/core/config/core_configuration.cc +1 -0
- data/src/core/config/core_configuration.h +11 -0
- data/src/core/credentials/call/call_creds_registry.h +125 -0
- data/src/core/credentials/call/call_creds_registry_init.cc +91 -0
- data/src/core/credentials/call/gcp_service_account_identity/gcp_service_account_identity_credentials.cc +6 -48
- data/src/core/credentials/call/jwt_token_file/jwt_token_file_call_credentials.cc +86 -0
- data/src/core/credentials/call/jwt_token_file/jwt_token_file_call_credentials.h +74 -0
- data/src/core/credentials/call/jwt_util.cc +70 -0
- data/src/core/credentials/call/jwt_util.h +32 -0
- data/src/core/credentials/transport/channel_creds_registry_init.cc +1 -1
- data/src/core/credentials/transport/google_default/google_default_credentials.cc +72 -4
- data/src/core/credentials/transport/ssl/ssl_credentials.cc +0 -1
- data/src/core/credentials/transport/tls/load_system_roots_supported.cc +1 -0
- data/src/core/credentials/transport/xds/xds_credentials.cc +0 -3
- data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.cc +8 -8
- data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.h +16 -16
- data/src/core/ext/filters/http/client_authority_filter.cc +2 -4
- data/src/core/ext/filters/http/message_compress/compression_filter.h +25 -22
- data/src/core/ext/filters/http/server/http_server_filter.h +12 -11
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +120 -35
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +6 -5
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +162 -115
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +0 -3
- data/src/core/ext/transport/chttp2/transport/decode_huff.cc +1239 -3514
- data/src/core/ext/transport/chttp2/transport/decode_huff.h +1008 -1486
- data/src/core/ext/transport/chttp2/transport/flow_control.h +22 -17
- data/src/core/ext/transport/chttp2/transport/frame.cc +10 -0
- data/src/core/ext/transport/chttp2/transport/frame.h +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +7 -8
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +4 -5
- data/src/core/ext/transport/chttp2/transport/header_assembler.h +299 -0
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +11 -5
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +12 -1
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.cc +1017 -0
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.h +593 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +19 -22
- data/{third_party/abseil-cpp/absl/strings/cord_buffer.cc → src/core/ext/transport/chttp2/transport/http2_stats_collector.cc} +14 -14
- data/src/core/ext/transport/chttp2/transport/http2_stats_collector.h +33 -0
- data/src/core/ext/transport/chttp2/transport/http2_status.h +6 -1
- data/src/core/ext/transport/chttp2/transport/http2_transport.cc +43 -0
- data/src/core/ext/transport/chttp2/transport/http2_transport.h +65 -0
- data/src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h +0 -29
- data/src/core/ext/transport/chttp2/transport/internal.h +18 -8
- data/src/core/ext/transport/chttp2/transport/keepalive.cc +105 -0
- data/src/core/ext/transport/chttp2/transport/keepalive.h +138 -0
- data/src/core/ext/transport/chttp2/transport/message_assembler.h +185 -0
- data/src/core/ext/transport/chttp2/transport/parsing.cc +2 -4
- data/src/core/ext/transport/chttp2/transport/ping_callbacks.h +19 -0
- data/src/core/ext/transport/chttp2/transport/ping_promise.cc +151 -0
- data/src/core/ext/transport/chttp2/transport/ping_promise.h +180 -0
- data/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc +5 -9
- data/src/core/ext/transport/chttp2/transport/ping_rate_policy.h +11 -0
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +39 -1
- data/src/core/ext/transport/chttp2/transport/transport_common.cc +19 -0
- data/src/core/ext/transport/chttp2/transport/transport_common.h +27 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +37 -11
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/channelz.upb.h +571 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/channelz.upb_minitable.c +120 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/channelz.upb_minitable.h +36 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb.h +1272 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb_minitable.c +312 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb_minitable.h +50 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb.h +984 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb_minitable.c +226 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb_minitable.h +44 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/promise.upbdefs.c +175 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/promise.upbdefs.h +82 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/property_list.upbdefs.c +135 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/property_list.upbdefs.h +67 -0
- data/src/core/filter/auth/auth_filters.h +0 -25
- data/src/core/filter/auth/client_auth_filter.cc +0 -118
- data/src/core/filter/filter_args.h +9 -23
- data/src/core/handshaker/handshaker.cc +23 -14
- data/src/core/handshaker/handshaker.h +3 -0
- data/src/core/handshaker/http_connect/http_connect_handshaker.cc +3 -1
- data/src/core/handshaker/security/legacy_secure_endpoint.cc +6 -5
- data/src/core/handshaker/security/secure_endpoint.cc +70 -25
- data/src/core/handshaker/security/security_handshaker.cc +4 -1
- data/src/core/handshaker/tcp_connect/tcp_connect_handshaker.cc +7 -1
- data/src/core/lib/channel/channel_args.cc +15 -0
- data/src/core/lib/channel/channel_args.h +3 -0
- data/src/core/lib/channel/channel_stack.cc +22 -23
- data/src/core/lib/channel/channel_stack.h +9 -7
- data/src/core/lib/channel/channel_stack_builder_impl.cc +1 -1
- data/src/core/lib/channel/channel_stack_builder_impl.h +2 -7
- data/src/core/lib/channel/promise_based_filter.h +5 -5
- data/src/core/lib/debug/trace_impl.h +0 -1
- data/src/core/lib/event_engine/ares_resolver.cc +165 -46
- data/src/core/lib/event_engine/ares_resolver.h +48 -2
- data/src/core/lib/event_engine/cf_engine/cf_engine.cc +3 -1
- data/src/core/lib/event_engine/cf_engine/cf_engine.h +1 -4
- data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.h +2 -6
- data/src/core/lib/event_engine/endpoint_channel_arg_wrapper.cc +40 -0
- data/src/core/lib/event_engine/endpoint_channel_arg_wrapper.h +60 -0
- data/src/core/lib/event_engine/event_engine.cc +7 -0
- data/src/core/lib/event_engine/extensions/channelz.h +10 -6
- data/src/core/lib/event_engine/grpc_polled_fd.h +5 -0
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc +130 -162
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.h +11 -15
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +75 -117
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +7 -9
- data/src/core/lib/event_engine/posix_engine/event_poller.h +18 -15
- data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc +0 -18
- data/src/core/lib/event_engine/posix_engine/file_descriptor_collection.cc +124 -0
- data/src/core/lib/event_engine/posix_engine/file_descriptor_collection.h +243 -0
- data/src/core/lib/event_engine/posix_engine/grpc_polled_fd_posix.h +29 -19
- data/src/core/lib/event_engine/posix_engine/internal_errqueue.cc +6 -2
- data/src/core/lib/event_engine/posix_engine/internal_errqueue.h +6 -1
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +145 -92
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +9 -19
- data/src/core/lib/event_engine/posix_engine/posix_engine.cc +333 -116
- data/src/core/lib/event_engine/posix_engine/posix_engine.h +61 -18
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc +45 -37
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener.h +6 -4
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc +32 -142
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.h +6 -5
- data/src/core/lib/event_engine/posix_engine/posix_interface.h +211 -0
- data/src/core/lib/event_engine/posix_engine/posix_interface_posix.cc +1083 -0
- data/src/core/lib/event_engine/posix_engine/posix_interface_windows.cc +281 -0
- data/src/core/lib/event_engine/posix_engine/posix_write_event_sink.cc +154 -0
- data/src/core/lib/event_engine/posix_engine/posix_write_event_sink.h +174 -0
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc +3 -719
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +10 -170
- data/src/core/lib/event_engine/posix_engine/timer_manager.cc +33 -22
- data/src/core/lib/event_engine/posix_engine/timer_manager.h +13 -11
- data/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc +117 -151
- data/src/core/lib/event_engine/posix_engine/traced_buffer_list.h +26 -94
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc +26 -25
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.h +6 -2
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc +36 -62
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.h +6 -2
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix.h +7 -6
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc +12 -6
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.h +3 -1
- data/src/core/lib/event_engine/shim.cc +9 -0
- data/src/core/lib/event_engine/shim.h +3 -0
- data/src/core/lib/event_engine/thread_pool/thread_pool.h +7 -3
- data/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc +0 -17
- data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc +4 -2
- data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h +3 -2
- data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc +4 -0
- data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.h +4 -0
- data/src/core/lib/event_engine/windows/windows_endpoint.h +2 -6
- data/src/core/lib/event_engine/windows/windows_engine.cc +0 -1
- data/src/core/lib/event_engine/windows/windows_engine.h +1 -3
- data/src/core/lib/event_engine/windows/windows_listener.cc +14 -2
- data/src/core/lib/experiments/experiments.cc +45 -93
- data/src/core/lib/experiments/experiments.h +21 -51
- data/src/core/lib/iomgr/endpoint.cc +4 -3
- data/src/core/lib/iomgr/endpoint.h +7 -4
- data/src/core/lib/iomgr/endpoint_cfstream.cc +3 -2
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +7 -2
- data/src/core/lib/iomgr/ev_poll_posix.cc +7 -2
- data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +4 -6
- data/src/core/lib/iomgr/tcp_posix.cc +12 -6
- data/src/core/lib/iomgr/tcp_windows.cc +3 -2
- data/src/core/lib/promise/activity.h +1 -0
- data/src/core/lib/promise/arena_promise.h +23 -7
- data/src/core/lib/promise/detail/promise_factory.h +10 -0
- data/src/core/lib/promise/detail/promise_like.h +118 -11
- data/src/core/lib/promise/detail/promise_variant.h +50 -0
- data/src/core/lib/promise/detail/seq_state.h +687 -548
- data/src/core/lib/promise/if.h +20 -0
- data/src/core/lib/promise/inter_activity_latch.h +147 -0
- data/src/core/lib/promise/inter_activity_mutex.h +547 -0
- data/src/core/lib/promise/loop.h +65 -3
- data/src/core/lib/promise/map.h +24 -0
- data/src/core/lib/promise/match_promise.h +103 -0
- data/src/core/lib/promise/mpsc.cc +425 -0
- data/src/core/lib/promise/mpsc.h +490 -0
- data/src/core/lib/promise/party.cc +50 -1
- data/src/core/lib/promise/party.h +66 -1
- data/src/core/lib/promise/race.h +31 -0
- data/src/core/lib/promise/seq.h +4 -1
- data/src/core/lib/promise/status_flag.h +7 -0
- data/src/core/lib/promise/try_seq.h +4 -1
- data/src/core/lib/promise/wait_set.cc +28 -0
- data/src/core/lib/promise/wait_set.h +86 -0
- data/src/core/lib/resource_quota/arena.h +19 -0
- data/src/core/lib/slice/slice.h +5 -0
- data/src/core/lib/surface/channel_create.cc +88 -13
- data/src/core/lib/surface/channel_create.h +4 -0
- data/src/core/lib/surface/channel_init.cc +164 -47
- data/src/core/lib/surface/channel_init.h +64 -1
- data/src/core/lib/surface/filter_stack_call.cc +18 -9
- data/src/core/lib/surface/init.cc +6 -15
- data/src/core/lib/surface/legacy_channel.cc +3 -5
- data/src/core/lib/surface/legacy_channel.h +3 -1
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/promise_endpoint.cc +110 -0
- data/src/core/lib/transport/promise_endpoint.h +307 -0
- data/src/core/load_balancing/child_policy_handler.cc +2 -4
- data/src/core/load_balancing/delegating_helper.h +2 -3
- data/src/core/load_balancing/health_check_client.cc +1 -5
- data/src/core/load_balancing/lb_policy.h +1 -3
- data/src/core/load_balancing/oob_backend_metric.cc +1 -5
- data/src/core/load_balancing/pick_first/pick_first.cc +3 -0
- data/src/core/load_balancing/xds/cds.cc +10 -1
- data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +2 -0
- data/src/core/resolver/xds/xds_config.cc +6 -3
- data/src/core/resolver/xds/xds_config.h +9 -4
- data/src/core/resolver/xds/xds_dependency_manager.cc +21 -6
- data/src/core/resolver/xds/xds_dependency_manager.h +2 -1
- data/src/core/resolver/xds/xds_resolver.cc +31 -11
- data/src/core/server/server.cc +83 -12
- data/src/core/server/server.h +21 -2
- data/src/core/server/xds_server_config_fetcher.cc +63 -25
- data/src/core/service_config/service_config.h +1 -1
- data/src/core/service_config/service_config_impl.h +1 -1
- data/src/core/telemetry/context_list_entry.cc +38 -0
- data/src/core/telemetry/context_list_entry.h +42 -12
- data/src/core/telemetry/stats_data.cc +233 -207
- data/src/core/telemetry/stats_data.h +250 -153
- data/src/core/telemetry/tcp_tracer.h +1 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +11 -3
- data/src/core/tsi/fake_transport_security.cc +17 -0
- data/src/core/tsi/ssl_transport_security.cc +2 -0
- data/src/core/tsi/transport_security_grpc.cc +8 -0
- data/src/core/tsi/transport_security_grpc.h +15 -0
- data/src/core/util/backoff.cc +1 -5
- data/src/core/util/backoff.h +1 -0
- data/src/core/util/down_cast.h +1 -1
- data/src/core/util/function_signature.h +15 -1
- data/src/core/util/http_client/httpcli.cc +12 -5
- data/src/core/util/http_client/httpcli.h +4 -1
- data/src/core/util/latent_see.h +8 -5
- data/src/core/util/log.cc +4 -0
- data/src/core/util/memory_usage.h +268 -0
- data/src/core/util/per_cpu.cc +2 -0
- data/src/core/util/per_cpu.h +7 -0
- data/src/core/util/shared_bit_gen.h +20 -0
- data/src/core/util/single_set_ptr.h +2 -2
- data/src/core/util/upb_utils.h +42 -0
- data/src/core/util/uri.cc +3 -2
- data/src/core/util/useful.h +53 -2
- data/src/core/util/wait_for_single_owner.cc +31 -0
- data/src/core/util/wait_for_single_owner.h +24 -0
- data/src/core/xds/grpc/xds_bootstrap_grpc.cc +2 -0
- data/src/core/xds/grpc/xds_bootstrap_grpc.h +5 -0
- data/src/core/xds/grpc/xds_client_grpc.cc +6 -2
- data/src/core/xds/grpc/xds_common_types_parser.cc +138 -50
- data/src/core/xds/grpc/xds_common_types_parser.h +12 -0
- data/src/core/xds/grpc/xds_http_filter.h +7 -0
- data/src/core/xds/grpc/xds_http_gcp_authn_filter.cc +22 -0
- data/src/core/xds/grpc/xds_http_gcp_authn_filter.h +3 -0
- data/src/core/xds/grpc/xds_route_config_parser.cc +15 -38
- data/src/core/xds/grpc/xds_server_grpc.cc +63 -13
- data/src/core/xds/grpc/xds_server_grpc.h +10 -2
- data/src/core/xds/grpc/xds_server_grpc_interface.h +4 -0
- data/src/core/xds/grpc/xds_transport_grpc.cc +18 -0
- data/src/core/xds/xds_client/xds_bootstrap.h +2 -0
- data/src/core/xds/xds_client/xds_client.cc +26 -5
- data/src/ruby/ext/grpc/extconf.rb +2 -0
- data/src/ruby/ext/grpc/rb_call.c +1 -8
- data/src/ruby/ext/grpc/rb_channel.c +72 -568
- data/src/ruby/ext/grpc/rb_channel.h +0 -3
- data/src/ruby/ext/grpc/rb_completion_queue.c +26 -14
- data/src/ruby/ext/grpc/rb_completion_queue.h +1 -7
- data/src/ruby/ext/grpc/rb_grpc.c +9 -5
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +1 -1
- data/src/ruby/ext/grpc/rb_loader.c +0 -4
- data/src/ruby/ext/grpc/rb_server.c +31 -50
- data/src/ruby/lib/grpc/generic/client_stub.rb +4 -4
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/core_spec.rb +22 -0
- data/src/ruby/spec/generic/active_call_spec.rb +1 -1
- data/third_party/abseil-cpp/absl/algorithm/container.h +2 -19
- data/third_party/abseil-cpp/absl/base/attributes.h +76 -7
- data/third_party/abseil-cpp/absl/base/call_once.h +11 -12
- data/third_party/abseil-cpp/absl/base/config.h +20 -129
- data/third_party/abseil-cpp/absl/base/{internal/fast_type_id.h → fast_type_id.h} +11 -16
- data/third_party/abseil-cpp/absl/base/internal/cycleclock.cc +0 -5
- data/third_party/abseil-cpp/absl/base/internal/cycleclock_config.h +7 -7
- data/third_party/abseil-cpp/absl/base/internal/endian.h +34 -38
- data/third_party/abseil-cpp/absl/base/internal/iterator_traits.h +71 -0
- data/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc +6 -5
- data/third_party/abseil-cpp/absl/base/internal/{nullability_impl.h → nullability_deprecated.h} +45 -8
- data/third_party/abseil-cpp/absl/base/internal/spinlock.cc +0 -9
- data/third_party/abseil-cpp/absl/base/internal/spinlock.h +3 -13
- data/third_party/abseil-cpp/absl/base/internal/unaligned_access.h +6 -6
- data/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.h +8 -3
- data/third_party/abseil-cpp/absl/base/no_destructor.h +11 -32
- data/third_party/abseil-cpp/absl/base/nullability.h +84 -72
- data/third_party/abseil-cpp/absl/base/options.h +3 -80
- data/third_party/abseil-cpp/absl/base/policy_checks.h +7 -7
- data/third_party/abseil-cpp/absl/cleanup/cleanup.h +1 -3
- data/third_party/abseil-cpp/absl/cleanup/internal/cleanup.h +3 -4
- data/third_party/abseil-cpp/absl/container/btree_map.h +4 -2
- data/third_party/abseil-cpp/absl/container/btree_set.h +4 -2
- data/third_party/abseil-cpp/absl/container/fixed_array.h +7 -14
- data/third_party/abseil-cpp/absl/container/flat_hash_map.h +5 -0
- data/third_party/abseil-cpp/absl/container/flat_hash_set.h +6 -1
- data/third_party/abseil-cpp/absl/container/inlined_vector.h +8 -5
- data/third_party/abseil-cpp/absl/container/internal/btree.h +132 -29
- data/third_party/abseil-cpp/absl/container/internal/btree_container.h +175 -71
- data/third_party/abseil-cpp/absl/container/internal/common.h +43 -0
- data/third_party/abseil-cpp/absl/container/internal/common_policy_traits.h +1 -2
- data/third_party/abseil-cpp/absl/container/internal/container_memory.h +9 -10
- data/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h +1 -8
- data/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h +0 -4
- data/third_party/abseil-cpp/absl/container/internal/hashtable_control_bytes.h +527 -0
- data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +20 -4
- data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h +31 -12
- data/third_party/abseil-cpp/absl/container/internal/inlined_vector.h +2 -7
- data/third_party/abseil-cpp/absl/container/internal/layout.h +26 -42
- data/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h +199 -68
- data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +1354 -183
- data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +881 -1424
- data/third_party/abseil-cpp/absl/container/internal/raw_hash_set_resize_impl.h +80 -0
- data/third_party/abseil-cpp/absl/crc/crc32c.cc +0 -4
- data/third_party/abseil-cpp/absl/crc/crc32c.h +7 -5
- data/third_party/abseil-cpp/absl/crc/internal/crc32_x86_arm_combined_simd.h +0 -22
- data/third_party/abseil-cpp/absl/crc/internal/crc_x86_arm_combined.cc +45 -74
- data/third_party/abseil-cpp/absl/debugging/internal/addresses.h +57 -0
- data/third_party/abseil-cpp/absl/debugging/internal/decode_rust_punycode.cc +1 -1
- data/third_party/abseil-cpp/absl/debugging/internal/decode_rust_punycode.h +5 -5
- data/third_party/abseil-cpp/absl/debugging/internal/demangle.cc +8 -35
- data/third_party/abseil-cpp/absl/debugging/internal/demangle_rust.cc +16 -16
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +40 -37
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_arm-inl.inc +16 -7
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_emscripten-inl.inc +14 -5
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_generic-inl.inc +10 -4
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +27 -16
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_riscv-inl.inc +13 -4
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc +4 -3
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_win32-inl.inc +15 -28
- data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_x86-inl.inc +19 -9
- data/third_party/abseil-cpp/absl/debugging/stacktrace.cc +144 -27
- data/third_party/abseil-cpp/absl/debugging/stacktrace.h +73 -5
- data/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +19 -9
- data/third_party/abseil-cpp/absl/debugging/symbolize_emscripten.inc +3 -2
- data/third_party/abseil-cpp/absl/debugging/symbolize_win32.inc +25 -6
- data/third_party/abseil-cpp/absl/flags/commandlineflag.h +2 -2
- data/third_party/abseil-cpp/absl/flags/flag.h +4 -3
- data/third_party/abseil-cpp/absl/flags/internal/commandlineflag.h +2 -2
- data/third_party/abseil-cpp/absl/flags/internal/flag.cc +2 -1
- data/third_party/abseil-cpp/absl/flags/internal/flag.h +7 -6
- data/third_party/abseil-cpp/absl/flags/internal/registry.h +4 -3
- data/third_party/abseil-cpp/absl/flags/reflection.cc +2 -3
- data/third_party/abseil-cpp/absl/functional/any_invocable.h +8 -10
- data/third_party/abseil-cpp/absl/functional/function_ref.h +2 -9
- data/third_party/abseil-cpp/absl/functional/internal/any_invocable.h +110 -226
- data/third_party/abseil-cpp/absl/functional/internal/front_binder.h +10 -12
- data/third_party/abseil-cpp/absl/functional/internal/function_ref.h +2 -5
- data/third_party/abseil-cpp/absl/hash/hash.h +18 -0
- data/third_party/abseil-cpp/absl/hash/internal/hash.cc +1 -5
- data/third_party/abseil-cpp/absl/hash/internal/hash.h +86 -61
- data/third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc +25 -68
- data/third_party/abseil-cpp/absl/hash/internal/low_level_hash.h +2 -6
- data/third_party/abseil-cpp/absl/hash/internal/weakly_mixed_integer.h +38 -0
- data/third_party/abseil-cpp/absl/log/check.h +2 -1
- data/third_party/abseil-cpp/absl/log/globals.h +4 -5
- data/third_party/abseil-cpp/absl/log/internal/append_truncated.h +28 -0
- data/third_party/abseil-cpp/absl/log/internal/check_op.cc +22 -22
- data/third_party/abseil-cpp/absl/log/internal/check_op.h +65 -62
- data/third_party/abseil-cpp/absl/log/internal/conditions.cc +5 -3
- data/third_party/abseil-cpp/absl/log/internal/conditions.h +7 -2
- data/third_party/abseil-cpp/absl/log/internal/log_message.cc +85 -43
- data/third_party/abseil-cpp/absl/log/internal/log_message.h +84 -59
- data/third_party/abseil-cpp/absl/log/internal/nullstream.h +1 -0
- data/third_party/abseil-cpp/absl/log/internal/proto.cc +3 -2
- data/third_party/abseil-cpp/absl/log/internal/proto.h +3 -3
- data/third_party/abseil-cpp/absl/log/internal/strip.h +4 -12
- data/third_party/abseil-cpp/absl/log/internal/vlog_config.h +8 -6
- data/third_party/abseil-cpp/absl/log/internal/voidify.h +10 -4
- data/third_party/abseil-cpp/absl/log/log.h +48 -35
- data/third_party/abseil-cpp/absl/log/log_sink_registry.h +2 -2
- data/third_party/abseil-cpp/absl/meta/type_traits.h +46 -175
- data/third_party/abseil-cpp/absl/numeric/bits.h +68 -2
- data/third_party/abseil-cpp/absl/numeric/int128.cc +0 -52
- data/third_party/abseil-cpp/absl/numeric/internal/bits.h +7 -3
- data/third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc +1 -1
- data/third_party/abseil-cpp/absl/random/bit_gen_ref.h +10 -11
- data/third_party/abseil-cpp/absl/random/distributions.h +6 -8
- data/third_party/abseil-cpp/absl/random/gaussian_distribution.h +1 -1
- data/third_party/abseil-cpp/absl/random/internal/distribution_caller.h +5 -6
- data/third_party/abseil-cpp/absl/random/internal/{pool_urbg.cc → entropy_pool.cc} +22 -90
- data/third_party/abseil-cpp/absl/random/internal/entropy_pool.h +35 -0
- data/third_party/abseil-cpp/absl/random/internal/nonsecure_base.h +5 -6
- data/third_party/abseil-cpp/absl/random/internal/randen_detect.cc +1 -1
- data/third_party/abseil-cpp/absl/random/internal/seed_material.cc +20 -12
- data/third_party/abseil-cpp/absl/random/internal/seed_material.h +5 -5
- data/third_party/abseil-cpp/absl/random/random.h +88 -53
- data/third_party/abseil-cpp/absl/random/seed_sequences.cc +6 -2
- data/third_party/abseil-cpp/absl/status/internal/status_internal.cc +3 -4
- data/third_party/abseil-cpp/absl/status/internal/status_internal.h +3 -4
- data/third_party/abseil-cpp/absl/status/internal/statusor_internal.h +4 -3
- data/third_party/abseil-cpp/absl/status/status.cc +4 -8
- data/third_party/abseil-cpp/absl/status/status.h +8 -8
- data/third_party/abseil-cpp/absl/status/status_payload_printer.h +2 -2
- data/third_party/abseil-cpp/absl/status/statusor.cc +2 -2
- data/third_party/abseil-cpp/absl/status/statusor.h +6 -6
- data/third_party/abseil-cpp/absl/strings/ascii.cc +9 -9
- data/third_party/abseil-cpp/absl/strings/ascii.h +18 -18
- data/third_party/abseil-cpp/absl/strings/charconv.cc +21 -22
- data/third_party/abseil-cpp/absl/strings/charconv.h +5 -5
- data/third_party/abseil-cpp/absl/strings/cord.cc +54 -58
- data/third_party/abseil-cpp/absl/strings/cord.h +94 -83
- data/third_party/abseil-cpp/absl/strings/cord_analysis.cc +11 -11
- data/third_party/abseil-cpp/absl/strings/cord_analysis.h +3 -3
- data/third_party/abseil-cpp/absl/strings/escaping.cc +130 -149
- data/third_party/abseil-cpp/absl/strings/escaping.h +9 -10
- data/third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc +1 -1
- data/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +6 -8
- data/third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc +0 -4
- data/third_party/abseil-cpp/absl/strings/internal/cordz_info.cc +0 -4
- data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +7 -63
- data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +1 -11
- data/third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc +0 -22
- data/third_party/abseil-cpp/absl/strings/internal/str_format/output.cc +5 -3
- data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h +4 -2
- data/third_party/abseil-cpp/absl/strings/internal/str_join_internal.h +3 -3
- data/third_party/abseil-cpp/absl/strings/internal/string_constant.h +0 -5
- data/third_party/abseil-cpp/absl/strings/internal/utf8.cc +96 -1
- data/third_party/abseil-cpp/absl/strings/internal/utf8.h +15 -1
- data/third_party/abseil-cpp/absl/strings/numbers.cc +53 -32
- data/third_party/abseil-cpp/absl/strings/numbers.h +87 -58
- data/third_party/abseil-cpp/absl/strings/str_cat.cc +6 -7
- data/third_party/abseil-cpp/absl/strings/str_cat.h +32 -32
- data/third_party/abseil-cpp/absl/strings/str_format.h +18 -18
- data/third_party/abseil-cpp/absl/strings/str_replace.cc +3 -3
- data/third_party/abseil-cpp/absl/strings/str_replace.h +6 -6
- data/third_party/abseil-cpp/absl/strings/string_view.cc +4 -9
- data/third_party/abseil-cpp/absl/strings/string_view.h +27 -32
- data/third_party/abseil-cpp/absl/strings/strip.h +4 -4
- data/third_party/abseil-cpp/absl/strings/substitute.cc +5 -4
- data/third_party/abseil-cpp/absl/strings/substitute.h +66 -64
- data/third_party/abseil-cpp/absl/synchronization/internal/futex_waiter.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.cc +0 -5
- data/third_party/abseil-cpp/absl/synchronization/internal/pthread_waiter.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/internal/sem_waiter.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/internal/stdcpp_waiter.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/internal/waiter_base.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/internal/win32_waiter.cc +0 -4
- data/third_party/abseil-cpp/absl/synchronization/mutex.cc +1 -1
- data/third_party/abseil-cpp/absl/synchronization/mutex.h +97 -69
- data/third_party/abseil-cpp/absl/synchronization/notification.h +1 -1
- data/third_party/abseil-cpp/absl/time/civil_time.cc +1 -0
- data/third_party/abseil-cpp/absl/time/duration.cc +12 -7
- data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/time_zone.h +1 -1
- data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc +90 -111
- data/third_party/abseil-cpp/absl/time/time.h +20 -15
- data/third_party/abseil-cpp/absl/types/optional.h +7 -747
- data/third_party/abseil-cpp/absl/types/span.h +13 -11
- data/third_party/abseil-cpp/absl/types/variant.h +5 -784
- data/third_party/abseil-cpp/absl/utility/utility.h +10 -185
- metadata +72 -20
- data/src/core/lib/event_engine/forkable.cc +0 -105
- data/src/core/lib/event_engine/forkable.h +0 -67
- data/src/core/lib/iomgr/python_util.h +0 -46
- data/third_party/abseil-cpp/absl/base/internal/inline_variable.h +0 -108
- data/third_party/abseil-cpp/absl/base/internal/invoke.h +0 -241
- data/third_party/abseil-cpp/absl/log/log_entry.cc +0 -41
- data/third_party/abseil-cpp/absl/random/internal/pool_urbg.h +0 -131
- data/third_party/abseil-cpp/absl/types/bad_optional_access.cc +0 -66
- data/third_party/abseil-cpp/absl/types/bad_optional_access.h +0 -78
- data/third_party/abseil-cpp/absl/types/bad_variant_access.cc +0 -82
- data/third_party/abseil-cpp/absl/types/bad_variant_access.h +0 -82
- data/third_party/abseil-cpp/absl/types/internal/optional.h +0 -352
- data/third_party/abseil-cpp/absl/types/internal/variant.h +0 -1622
@@ -196,6 +196,7 @@
|
|
196
196
|
#include "absl/base/attributes.h"
|
197
197
|
#include "absl/base/config.h"
|
198
198
|
#include "absl/base/internal/endian.h"
|
199
|
+
#include "absl/base/internal/iterator_traits.h"
|
199
200
|
#include "absl/base/internal/raw_logging.h"
|
200
201
|
#include "absl/base/macros.h"
|
201
202
|
#include "absl/base/optimization.h"
|
@@ -208,30 +209,17 @@
|
|
208
209
|
#include "absl/container/internal/container_memory.h"
|
209
210
|
#include "absl/container/internal/hash_function_defaults.h"
|
210
211
|
#include "absl/container/internal/hash_policy_traits.h"
|
212
|
+
#include "absl/container/internal/hashtable_control_bytes.h"
|
211
213
|
#include "absl/container/internal/hashtable_debug_hooks.h"
|
212
214
|
#include "absl/container/internal/hashtablez_sampler.h"
|
215
|
+
#include "absl/functional/function_ref.h"
|
213
216
|
#include "absl/hash/hash.h"
|
217
|
+
#include "absl/hash/internal/weakly_mixed_integer.h"
|
214
218
|
#include "absl/memory/memory.h"
|
215
219
|
#include "absl/meta/type_traits.h"
|
216
220
|
#include "absl/numeric/bits.h"
|
217
221
|
#include "absl/utility/utility.h"
|
218
222
|
|
219
|
-
#ifdef ABSL_INTERNAL_HAVE_SSE2
|
220
|
-
#include <emmintrin.h>
|
221
|
-
#endif
|
222
|
-
|
223
|
-
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
224
|
-
#include <tmmintrin.h>
|
225
|
-
#endif
|
226
|
-
|
227
|
-
#ifdef _MSC_VER
|
228
|
-
#include <intrin.h>
|
229
|
-
#endif
|
230
|
-
|
231
|
-
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
|
232
|
-
#include <arm_neon.h>
|
233
|
-
#endif
|
234
|
-
|
235
223
|
namespace absl {
|
236
224
|
ABSL_NAMESPACE_BEGIN
|
237
225
|
namespace container_internal {
|
@@ -278,6 +266,15 @@ constexpr bool SwisstableGenerationsEnabled() { return false; }
|
|
278
266
|
constexpr size_t NumGenerationBytes() { return 0; }
|
279
267
|
#endif
|
280
268
|
|
269
|
+
// Returns true if we should assert that the table is not accessed after it has
|
270
|
+
// been destroyed or during the destruction of the table.
|
271
|
+
constexpr bool SwisstableAssertAccessToDestroyedTable() {
|
272
|
+
#ifndef NDEBUG
|
273
|
+
return true;
|
274
|
+
#endif
|
275
|
+
return SwisstableGenerationsEnabled();
|
276
|
+
}
|
277
|
+
|
281
278
|
template <typename AllocType>
|
282
279
|
void SwapAlloc(AllocType& lhs, AllocType& rhs,
|
283
280
|
std::true_type /* propagate_on_container_swap */) {
|
@@ -383,163 +380,6 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
|
|
383
380
|
return false;
|
384
381
|
}
|
385
382
|
|
386
|
-
template <typename T>
|
387
|
-
uint32_t TrailingZeros(T x) {
|
388
|
-
ABSL_ASSUME(x != 0);
|
389
|
-
return static_cast<uint32_t>(countr_zero(x));
|
390
|
-
}
|
391
|
-
|
392
|
-
// 8 bytes bitmask with most significant bit set for every byte.
|
393
|
-
constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
|
394
|
-
|
395
|
-
// An abstract bitmask, such as that emitted by a SIMD instruction.
|
396
|
-
//
|
397
|
-
// Specifically, this type implements a simple bitset whose representation is
|
398
|
-
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
|
399
|
-
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
|
400
|
-
// width of an abstract bit in the representation.
|
401
|
-
// This mask provides operations for any number of real bits set in an abstract
|
402
|
-
// bit. To add iteration on top of that, implementation must guarantee no more
|
403
|
-
// than the most significant real bit is set in a set abstract bit.
|
404
|
-
template <class T, int SignificantBits, int Shift = 0>
|
405
|
-
class NonIterableBitMask {
|
406
|
-
public:
|
407
|
-
explicit NonIterableBitMask(T mask) : mask_(mask) {}
|
408
|
-
|
409
|
-
explicit operator bool() const { return this->mask_ != 0; }
|
410
|
-
|
411
|
-
// Returns the index of the lowest *abstract* bit set in `self`.
|
412
|
-
uint32_t LowestBitSet() const {
|
413
|
-
return container_internal::TrailingZeros(mask_) >> Shift;
|
414
|
-
}
|
415
|
-
|
416
|
-
// Returns the index of the highest *abstract* bit set in `self`.
|
417
|
-
uint32_t HighestBitSet() const {
|
418
|
-
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
|
419
|
-
}
|
420
|
-
|
421
|
-
// Returns the number of trailing zero *abstract* bits.
|
422
|
-
uint32_t TrailingZeros() const {
|
423
|
-
return container_internal::TrailingZeros(mask_) >> Shift;
|
424
|
-
}
|
425
|
-
|
426
|
-
// Returns the number of leading zero *abstract* bits.
|
427
|
-
uint32_t LeadingZeros() const {
|
428
|
-
constexpr int total_significant_bits = SignificantBits << Shift;
|
429
|
-
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
|
430
|
-
return static_cast<uint32_t>(
|
431
|
-
countl_zero(static_cast<T>(mask_ << extra_bits))) >>
|
432
|
-
Shift;
|
433
|
-
}
|
434
|
-
|
435
|
-
T mask_;
|
436
|
-
};
|
437
|
-
|
438
|
-
// Mask that can be iterable
|
439
|
-
//
|
440
|
-
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
|
441
|
-
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
|
442
|
-
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
|
443
|
-
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
|
444
|
-
// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
|
445
|
-
// non zero abstract bit is allowed to have additional bits
|
446
|
-
// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
|
447
|
-
//
|
448
|
-
// For example:
|
449
|
-
// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
|
450
|
-
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
|
451
|
-
template <class T, int SignificantBits, int Shift = 0,
|
452
|
-
bool NullifyBitsOnIteration = false>
|
453
|
-
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
|
454
|
-
using Base = NonIterableBitMask<T, SignificantBits, Shift>;
|
455
|
-
static_assert(std::is_unsigned<T>::value, "");
|
456
|
-
static_assert(Shift == 0 || Shift == 3, "");
|
457
|
-
static_assert(!NullifyBitsOnIteration || Shift == 3, "");
|
458
|
-
|
459
|
-
public:
|
460
|
-
explicit BitMask(T mask) : Base(mask) {
|
461
|
-
if (Shift == 3 && !NullifyBitsOnIteration) {
|
462
|
-
ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
|
463
|
-
}
|
464
|
-
}
|
465
|
-
// BitMask is an iterator over the indices of its abstract bits.
|
466
|
-
using value_type = int;
|
467
|
-
using iterator = BitMask;
|
468
|
-
using const_iterator = BitMask;
|
469
|
-
|
470
|
-
BitMask& operator++() {
|
471
|
-
if (Shift == 3 && NullifyBitsOnIteration) {
|
472
|
-
this->mask_ &= kMsbs8Bytes;
|
473
|
-
}
|
474
|
-
this->mask_ &= (this->mask_ - 1);
|
475
|
-
return *this;
|
476
|
-
}
|
477
|
-
|
478
|
-
uint32_t operator*() const { return Base::LowestBitSet(); }
|
479
|
-
|
480
|
-
BitMask begin() const { return *this; }
|
481
|
-
BitMask end() const { return BitMask(0); }
|
482
|
-
|
483
|
-
private:
|
484
|
-
friend bool operator==(const BitMask& a, const BitMask& b) {
|
485
|
-
return a.mask_ == b.mask_;
|
486
|
-
}
|
487
|
-
friend bool operator!=(const BitMask& a, const BitMask& b) {
|
488
|
-
return a.mask_ != b.mask_;
|
489
|
-
}
|
490
|
-
};
|
491
|
-
|
492
|
-
using h2_t = uint8_t;
|
493
|
-
|
494
|
-
// The values here are selected for maximum performance. See the static asserts
|
495
|
-
// below for details.
|
496
|
-
|
497
|
-
// A `ctrl_t` is a single control byte, which can have one of four
|
498
|
-
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
|
499
|
-
// and the sentinel. They have the following bit patterns:
|
500
|
-
//
|
501
|
-
// empty: 1 0 0 0 0 0 0 0
|
502
|
-
// deleted: 1 1 1 1 1 1 1 0
|
503
|
-
// full: 0 h h h h h h h // h represents the hash bits.
|
504
|
-
// sentinel: 1 1 1 1 1 1 1 1
|
505
|
-
//
|
506
|
-
// These values are specifically tuned for SSE-flavored SIMD.
|
507
|
-
// The static_asserts below detail the source of these choices.
|
508
|
-
//
|
509
|
-
// We use an enum class so that when strict aliasing is enabled, the compiler
|
510
|
-
// knows ctrl_t doesn't alias other types.
|
511
|
-
enum class ctrl_t : int8_t {
|
512
|
-
kEmpty = -128, // 0b10000000
|
513
|
-
kDeleted = -2, // 0b11111110
|
514
|
-
kSentinel = -1, // 0b11111111
|
515
|
-
};
|
516
|
-
static_assert(
|
517
|
-
(static_cast<int8_t>(ctrl_t::kEmpty) &
|
518
|
-
static_cast<int8_t>(ctrl_t::kDeleted) &
|
519
|
-
static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
|
520
|
-
"Special markers need to have the MSB to make checking for them efficient");
|
521
|
-
static_assert(
|
522
|
-
ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
|
523
|
-
"ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
|
524
|
-
"ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
|
525
|
-
static_assert(
|
526
|
-
ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
|
527
|
-
"ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
|
528
|
-
"registers (pcmpeqd xmm, xmm)");
|
529
|
-
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
|
530
|
-
"ctrl_t::kEmpty must be -128 to make the SIMD check for its "
|
531
|
-
"existence efficient (psignb xmm, xmm)");
|
532
|
-
static_assert(
|
533
|
-
(~static_cast<int8_t>(ctrl_t::kEmpty) &
|
534
|
-
~static_cast<int8_t>(ctrl_t::kDeleted) &
|
535
|
-
static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
|
536
|
-
"ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
|
537
|
-
"shared by ctrl_t::kSentinel to make the scalar test for "
|
538
|
-
"MaskEmptyOrDeleted() efficient");
|
539
|
-
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
|
540
|
-
"ctrl_t::kDeleted must be -2 to make the implementation of "
|
541
|
-
"ConvertSpecialToEmptyAndFullToDeleted efficient");
|
542
|
-
|
543
383
|
// See definition comment for why this is size 32.
|
544
384
|
ABSL_DLL extern const ctrl_t kEmptyGroup[32];
|
545
385
|
|
@@ -585,360 +425,117 @@ inline bool IsEmptyGeneration(const GenerationType* generation) {
|
|
585
425
|
return *generation == SentinelEmptyGeneration();
|
586
426
|
}
|
587
427
|
|
588
|
-
//
|
589
|
-
//
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
}
|
428
|
+
// We only allow a maximum of 1 SOO element, which makes the implementation
|
429
|
+
// much simpler. Complications with multiple SOO elements include:
|
430
|
+
// - Satisfying the guarantee that erasing one element doesn't invalidate
|
431
|
+
// iterators to other elements means we would probably need actual SOO
|
432
|
+
// control bytes.
|
433
|
+
// - In order to prevent user code from depending on iteration order for small
|
434
|
+
// tables, we would need to randomize the iteration order somehow.
|
435
|
+
constexpr size_t SooCapacity() { return 1; }
|
436
|
+
// Sentinel type to indicate SOO CommonFields construction.
|
437
|
+
struct soo_tag_t {};
|
438
|
+
// Sentinel type to indicate SOO CommonFields construction with full size.
|
439
|
+
struct full_soo_tag_t {};
|
440
|
+
// Sentinel type to indicate non-SOO CommonFields construction.
|
441
|
+
struct non_soo_tag_t {};
|
442
|
+
// Sentinel value to indicate an uninitialized value explicitly.
|
443
|
+
struct uninitialized_tag_t {};
|
444
|
+
// Sentinel value to indicate creation of an empty table without a seed.
|
445
|
+
struct no_seed_empty_tag_t {};
|
602
446
|
|
603
|
-
//
|
604
|
-
//
|
605
|
-
//
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
return mask.LowestBitSet();
|
615
|
-
#else
|
616
|
-
return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
|
617
|
-
? mask.HighestBitSet()
|
618
|
-
: mask.LowestBitSet();
|
619
|
-
#endif
|
620
|
-
}
|
447
|
+
// Per table hash salt. This gets mixed into H1 to randomize iteration order
|
448
|
+
// per-table.
|
449
|
+
// The seed is needed to ensure non-determinism of iteration order.
|
450
|
+
class PerTableSeed {
|
451
|
+
public:
|
452
|
+
// The number of bits in the seed.
|
453
|
+
// It is big enough to ensure non-determinism of iteration order.
|
454
|
+
// We store the seed inside a uint64_t together with size and other metadata.
|
455
|
+
// Using 16 bits allows us to save one `and` instruction in H1 (we use movzwl
|
456
|
+
// instead of movq+and).
|
457
|
+
static constexpr size_t kBitCount = 16;
|
621
458
|
|
622
|
-
// Returns
|
623
|
-
|
624
|
-
//
|
625
|
-
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
|
626
|
-
// non-determinism of iteration order in most cases.
|
627
|
-
inline size_t PerTableSalt(const ctrl_t* ctrl) {
|
628
|
-
// The low bits of the pointer have little or no entropy because of
|
629
|
-
// alignment. We shift the pointer to try to use higher entropy bits. A
|
630
|
-
// good number seems to be 12 bits, because that aligns with page size.
|
631
|
-
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
|
632
|
-
}
|
633
|
-
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
|
634
|
-
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
|
635
|
-
return (hash >> 7) ^ PerTableSalt(ctrl);
|
636
|
-
}
|
459
|
+
// Returns the seed for the table. Only the lowest kBitCount are non zero.
|
460
|
+
size_t seed() const { return seed_; }
|
637
461
|
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
inline h2_t H2(size_t hash) { return hash & 0x7F; }
|
462
|
+
private:
|
463
|
+
friend class HashtableSize;
|
464
|
+
explicit PerTableSeed(size_t seed) : seed_(seed) {}
|
642
465
|
|
643
|
-
|
644
|
-
|
645
|
-
inline bool IsFull(ctrl_t c) {
|
646
|
-
// Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
|
647
|
-
// is not a value in the enum. Both ways are equivalent, but this way makes
|
648
|
-
// linters happier.
|
649
|
-
return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
|
650
|
-
}
|
651
|
-
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
|
652
|
-
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
|
466
|
+
const size_t seed_;
|
467
|
+
};
|
653
468
|
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
//
|
662
|
-
// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
|
663
|
-
// * _mm_and_si128: Ands two i128s together.
|
664
|
-
// * _mm_or_si128: Ors two i128s together.
|
665
|
-
// * _mm_andnot_si128: And-nots two i128s together.
|
666
|
-
//
|
667
|
-
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
|
668
|
-
// filling each lane with 0x00 or 0xff.
|
669
|
-
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
|
670
|
-
//
|
671
|
-
// * _mm_loadu_si128: Performs an unaligned load of an i128.
|
672
|
-
// * _mm_storeu_si128: Performs an unaligned store of an i128.
|
673
|
-
//
|
674
|
-
// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
|
675
|
-
// argument if the corresponding lane of the second
|
676
|
-
// argument is positive, negative, or zero, respectively.
|
677
|
-
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
|
678
|
-
// bitmask consisting of those bits.
|
679
|
-
// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
|
680
|
-
// four bits of each i8 lane in the second argument as
|
681
|
-
// indices.
|
682
|
-
|
683
|
-
// https://github.com/abseil/abseil-cpp/issues/209
|
684
|
-
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
|
685
|
-
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
|
686
|
-
// Work around this by using the portable implementation of Group
|
687
|
-
// when using -funsigned-char under GCC.
|
688
|
-
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
|
689
|
-
#if defined(__GNUC__) && !defined(__clang__)
|
690
|
-
if (std::is_unsigned<char>::value) {
|
691
|
-
const __m128i mask = _mm_set1_epi8(0x80);
|
692
|
-
const __m128i diff = _mm_subs_epi8(b, a);
|
693
|
-
return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
|
694
|
-
}
|
695
|
-
#endif
|
696
|
-
return _mm_cmpgt_epi8(a, b);
|
469
|
+
// Returns next per-table seed.
|
470
|
+
inline uint16_t NextSeed() {
|
471
|
+
static_assert(PerTableSeed::kBitCount == 16);
|
472
|
+
thread_local uint16_t seed =
|
473
|
+
static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
|
474
|
+
seed += uint16_t{0xad53};
|
475
|
+
return seed;
|
697
476
|
}
|
698
477
|
|
699
|
-
|
700
|
-
|
478
|
+
// The size and also has additionally
|
479
|
+
// 1) one bit that stores whether we have infoz.
|
480
|
+
// 2) PerTableSeed::kBitCount bits for the seed.
|
481
|
+
class HashtableSize {
|
482
|
+
public:
|
483
|
+
static constexpr size_t kSizeBitCount = 64 - PerTableSeed::kBitCount - 1;
|
701
484
|
|
702
|
-
explicit
|
703
|
-
|
704
|
-
}
|
485
|
+
explicit HashtableSize(uninitialized_tag_t) {}
|
486
|
+
explicit HashtableSize(no_seed_empty_tag_t) : data_(0) {}
|
487
|
+
explicit HashtableSize(full_soo_tag_t) : data_(kSizeOneNoMetadata) {}
|
705
488
|
|
706
|
-
// Returns
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
489
|
+
// Returns actual size of the table.
|
490
|
+
size_t size() const { return static_cast<size_t>(data_ >> kSizeShift); }
|
491
|
+
void increment_size() { data_ += kSizeOneNoMetadata; }
|
492
|
+
void increment_size(size_t size) {
|
493
|
+
data_ += static_cast<uint64_t>(size) * kSizeOneNoMetadata;
|
711
494
|
}
|
495
|
+
void decrement_size() { data_ -= kSizeOneNoMetadata; }
|
496
|
+
// Returns true if the table is empty.
|
497
|
+
bool empty() const { return data_ < kSizeOneNoMetadata; }
|
498
|
+
// Sets the size to zero, but keeps all the metadata bits.
|
499
|
+
void set_size_to_zero_keep_metadata() { data_ = data_ & kMetadataMask; }
|
712
500
|
|
713
|
-
|
714
|
-
|
715
|
-
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
716
|
-
// This only works because ctrl_t::kEmpty is -128.
|
717
|
-
return NonIterableBitMask<uint16_t, kWidth>(
|
718
|
-
static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
|
719
|
-
#else
|
720
|
-
auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
|
721
|
-
return NonIterableBitMask<uint16_t, kWidth>(
|
722
|
-
static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
|
723
|
-
#endif
|
501
|
+
PerTableSeed seed() const {
|
502
|
+
return PerTableSeed(static_cast<size_t>(data_) & kSeedMask);
|
724
503
|
}
|
725
504
|
|
726
|
-
|
727
|
-
|
728
|
-
// original and mirrored.
|
729
|
-
BitMask<uint16_t, kWidth> MaskFull() const {
|
730
|
-
return BitMask<uint16_t, kWidth>(
|
731
|
-
static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
|
505
|
+
void generate_new_seed() {
|
506
|
+
data_ = (data_ & ~kSeedMask) ^ uint64_t{NextSeed()};
|
732
507
|
}
|
733
508
|
|
734
|
-
// Returns
|
735
|
-
|
736
|
-
|
737
|
-
auto MaskNonFull() const {
|
738
|
-
return BitMask<uint16_t, kWidth>(
|
739
|
-
static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
|
740
|
-
}
|
741
|
-
|
742
|
-
// Returns a bitmask representing the positions of empty or deleted slots.
|
743
|
-
NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
|
744
|
-
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
|
745
|
-
return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
|
746
|
-
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
|
509
|
+
// Returns true if the table has infoz.
|
510
|
+
bool has_infoz() const {
|
511
|
+
return ABSL_PREDICT_FALSE((data_ & kHasInfozMask) != 0);
|
747
512
|
}
|
748
513
|
|
749
|
-
//
|
750
|
-
|
751
|
-
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
|
752
|
-
return TrailingZeros(static_cast<uint32_t>(
|
753
|
-
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
|
754
|
-
}
|
514
|
+
// Sets the has_infoz bit.
|
515
|
+
void set_has_infoz() { data_ |= kHasInfozMask; }
|
755
516
|
|
756
|
-
void
|
757
|
-
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
|
758
|
-
auto x126 = _mm_set1_epi8(126);
|
759
|
-
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
760
|
-
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
|
761
|
-
#else
|
762
|
-
auto zero = _mm_setzero_si128();
|
763
|
-
auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
|
764
|
-
auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
|
765
|
-
#endif
|
766
|
-
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
|
767
|
-
}
|
517
|
+
void set_no_seed_for_testing() { data_ &= ~kSeedMask; }
|
768
518
|
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
|
779
|
-
}
|
780
|
-
|
781
|
-
auto Match(h2_t hash) const {
|
782
|
-
uint8x8_t dup = vdup_n_u8(hash);
|
783
|
-
auto mask = vceq_u8(ctrl, dup);
|
784
|
-
return BitMask<uint64_t, kWidth, /*Shift=*/3,
|
785
|
-
/*NullifyBitsOnIteration=*/true>(
|
786
|
-
vget_lane_u64(vreinterpret_u64_u8(mask), 0));
|
787
|
-
}
|
788
|
-
|
789
|
-
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
|
790
|
-
uint64_t mask =
|
791
|
-
vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
|
792
|
-
vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
|
793
|
-
vreinterpret_s8_u8(ctrl))),
|
794
|
-
0);
|
795
|
-
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
|
796
|
-
}
|
797
|
-
|
798
|
-
// Returns a bitmask representing the positions of full slots.
|
799
|
-
// Note: for `is_small()` tables group may contain the "same" slot twice:
|
800
|
-
// original and mirrored.
|
801
|
-
auto MaskFull() const {
|
802
|
-
uint64_t mask = vget_lane_u64(
|
803
|
-
vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
|
804
|
-
vdup_n_s8(static_cast<int8_t>(0)))),
|
805
|
-
0);
|
806
|
-
return BitMask<uint64_t, kWidth, /*Shift=*/3,
|
807
|
-
/*NullifyBitsOnIteration=*/true>(mask);
|
808
|
-
}
|
809
|
-
|
810
|
-
// Returns a bitmask representing the positions of non full slots.
|
811
|
-
// Note: this includes: kEmpty, kDeleted, kSentinel.
|
812
|
-
// It is useful in contexts when kSentinel is not present.
|
813
|
-
auto MaskNonFull() const {
|
814
|
-
uint64_t mask = vget_lane_u64(
|
815
|
-
vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
|
816
|
-
vdup_n_s8(static_cast<int8_t>(0)))),
|
817
|
-
0);
|
818
|
-
return BitMask<uint64_t, kWidth, /*Shift=*/3,
|
819
|
-
/*NullifyBitsOnIteration=*/true>(mask);
|
820
|
-
}
|
821
|
-
|
822
|
-
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
|
823
|
-
uint64_t mask =
|
824
|
-
vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
|
825
|
-
vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
|
826
|
-
vreinterpret_s8_u8(ctrl))),
|
827
|
-
0);
|
828
|
-
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
|
829
|
-
}
|
830
|
-
|
831
|
-
uint32_t CountLeadingEmptyOrDeleted() const {
|
832
|
-
uint64_t mask =
|
833
|
-
vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
|
834
|
-
vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
|
835
|
-
vreinterpret_s8_u8(ctrl))),
|
836
|
-
0);
|
837
|
-
// Similar to MaskEmptyorDeleted() but we invert the logic to invert the
|
838
|
-
// produced bitfield. We then count number of trailing zeros.
|
839
|
-
// Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
|
840
|
-
// so we should be fine.
|
841
|
-
return static_cast<uint32_t>(countr_zero(mask)) >> 3;
|
842
|
-
}
|
843
|
-
|
844
|
-
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
|
845
|
-
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
|
846
|
-
constexpr uint64_t slsbs = 0x0202020202020202ULL;
|
847
|
-
constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
|
848
|
-
auto x = slsbs & (mask >> 6);
|
849
|
-
auto res = (x + midbs) | kMsbs8Bytes;
|
850
|
-
little_endian::Store64(dst, res);
|
851
|
-
}
|
852
|
-
|
853
|
-
uint8x8_t ctrl;
|
519
|
+
private:
|
520
|
+
static constexpr size_t kSizeShift = 64 - kSizeBitCount;
|
521
|
+
static constexpr uint64_t kSizeOneNoMetadata = uint64_t{1} << kSizeShift;
|
522
|
+
static constexpr uint64_t kMetadataMask = kSizeOneNoMetadata - 1;
|
523
|
+
static constexpr uint64_t kSeedMask =
|
524
|
+
(uint64_t{1} << PerTableSeed::kBitCount) - 1;
|
525
|
+
// The next bit after the seed.
|
526
|
+
static constexpr uint64_t kHasInfozMask = kSeedMask + 1;
|
527
|
+
uint64_t data_;
|
854
528
|
};
|
855
|
-
#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
|
856
|
-
|
857
|
-
struct GroupPortableImpl {
|
858
|
-
static constexpr size_t kWidth = 8;
|
859
|
-
|
860
|
-
explicit GroupPortableImpl(const ctrl_t* pos)
|
861
|
-
: ctrl(little_endian::Load64(pos)) {}
|
862
|
-
|
863
|
-
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
|
864
|
-
// For the technique, see:
|
865
|
-
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
|
866
|
-
// (Determine if a word has a byte equal to n).
|
867
|
-
//
|
868
|
-
// Caveat: there are false positives but:
|
869
|
-
// - they only occur if there is a real match
|
870
|
-
// - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
|
871
|
-
// - they will be handled gracefully by subsequent checks in code
|
872
|
-
//
|
873
|
-
// Example:
|
874
|
-
// v = 0x1716151413121110
|
875
|
-
// hash = 0x12
|
876
|
-
// retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
|
877
|
-
constexpr uint64_t lsbs = 0x0101010101010101ULL;
|
878
|
-
auto x = ctrl ^ (lsbs * hash);
|
879
|
-
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
|
880
|
-
}
|
881
|
-
|
882
|
-
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
|
883
|
-
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
|
884
|
-
kMsbs8Bytes);
|
885
|
-
}
|
886
|
-
|
887
|
-
// Returns a bitmask representing the positions of full slots.
|
888
|
-
// Note: for `is_small()` tables group may contain the "same" slot twice:
|
889
|
-
// original and mirrored.
|
890
|
-
BitMask<uint64_t, kWidth, 3> MaskFull() const {
|
891
|
-
return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
|
892
|
-
}
|
893
|
-
|
894
|
-
// Returns a bitmask representing the positions of non full slots.
|
895
|
-
// Note: this includes: kEmpty, kDeleted, kSentinel.
|
896
|
-
// It is useful in contexts when kSentinel is not present.
|
897
|
-
auto MaskNonFull() const {
|
898
|
-
return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
|
899
|
-
}
|
900
|
-
|
901
|
-
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
|
902
|
-
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
|
903
|
-
kMsbs8Bytes);
|
904
|
-
}
|
905
|
-
|
906
|
-
uint32_t CountLeadingEmptyOrDeleted() const {
|
907
|
-
// ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
|
908
|
-
// kDeleted. We lower all other bits and count number of trailing zeros.
|
909
|
-
constexpr uint64_t bits = 0x0101010101010101ULL;
|
910
|
-
return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
|
911
|
-
3);
|
912
|
-
}
|
913
|
-
|
914
|
-
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
|
915
|
-
constexpr uint64_t lsbs = 0x0101010101010101ULL;
|
916
|
-
auto x = ctrl & kMsbs8Bytes;
|
917
|
-
auto res = (~x + (x >> 7)) & ~lsbs;
|
918
|
-
little_endian::Store64(dst, res);
|
919
|
-
}
|
920
529
|
|
921
|
-
|
922
|
-
|
530
|
+
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table seed.
|
531
|
+
inline size_t H1(size_t hash, PerTableSeed seed) {
|
532
|
+
return (hash >> 7) ^ seed.seed();
|
533
|
+
}
|
923
534
|
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
using Group = GroupAArch64Impl;
|
929
|
-
// For Aarch64, we use the portable implementation for counting and masking
|
930
|
-
// full, empty or deleted group elements. This is to avoid the latency of moving
|
931
|
-
// between data GPRs and Neon registers when it does not provide a benefit.
|
932
|
-
// Using Neon is profitable when we call Match(), but is not when we don't,
|
933
|
-
// which is the case when we do *EmptyOrDeleted and MaskFull operations.
|
934
|
-
// It is difficult to make a similar approach beneficial on other architectures
|
935
|
-
// such as x86 since they have much lower GPR <-> vector register transfer
|
936
|
-
// latency and 16-wide Groups.
|
937
|
-
using GroupFullEmptyOrDeleted = GroupPortableImpl;
|
938
|
-
#else
|
939
|
-
using Group = GroupPortableImpl;
|
940
|
-
using GroupFullEmptyOrDeleted = GroupPortableImpl;
|
941
|
-
#endif
|
535
|
+
// Extracts the H2 portion of a hash: the 7 bits not used for H1.
|
536
|
+
//
|
537
|
+
// These are used as an occupied control byte.
|
538
|
+
inline h2_t H2(size_t hash) { return hash & 0x7F; }
|
942
539
|
|
943
540
|
// When there is an insertion with no reserved growth, we rehash with
|
944
541
|
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
|
@@ -974,10 +571,10 @@ class CommonFieldsGenerationInfoEnabled {
|
|
974
571
|
// references. We rehash on the first insertion after reserved_growth_ reaches
|
975
572
|
// 0 after a call to reserve. We also do a rehash with low probability
|
976
573
|
// whenever reserved_growth_ is zero.
|
977
|
-
bool should_rehash_for_bug_detection_on_insert(
|
574
|
+
bool should_rehash_for_bug_detection_on_insert(PerTableSeed seed,
|
978
575
|
size_t capacity) const;
|
979
576
|
// Similar to above, except that we don't depend on reserved_growth_.
|
980
|
-
bool should_rehash_for_bug_detection_on_move(
|
577
|
+
bool should_rehash_for_bug_detection_on_move(PerTableSeed seed,
|
981
578
|
size_t capacity) const;
|
982
579
|
void maybe_increment_generation_on_insert() {
|
983
580
|
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
|
@@ -1031,10 +628,10 @@ class CommonFieldsGenerationInfoDisabled {
|
|
1031
628
|
CommonFieldsGenerationInfoDisabled& operator=(
|
1032
629
|
CommonFieldsGenerationInfoDisabled&&) = default;
|
1033
630
|
|
1034
|
-
bool should_rehash_for_bug_detection_on_insert(
|
631
|
+
bool should_rehash_for_bug_detection_on_insert(PerTableSeed, size_t) const {
|
1035
632
|
return false;
|
1036
633
|
}
|
1037
|
-
bool should_rehash_for_bug_detection_on_move(
|
634
|
+
bool should_rehash_for_bug_detection_on_move(PerTableSeed, size_t) const {
|
1038
635
|
return false;
|
1039
636
|
}
|
1040
637
|
void maybe_increment_generation_on_insert() {}
|
@@ -1127,9 +724,9 @@ class GrowthInfo {
|
|
1127
724
|
}
|
1128
725
|
|
1129
726
|
// Overwrites several empty slots with full slots.
|
1130
|
-
void OverwriteManyEmptyAsFull(size_t
|
1131
|
-
ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >=
|
1132
|
-
growth_left_info_ -=
|
727
|
+
void OverwriteManyEmptyAsFull(size_t count) {
|
728
|
+
ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >= count);
|
729
|
+
growth_left_info_ -= count;
|
1133
730
|
}
|
1134
731
|
|
1135
732
|
// Overwrites specified control element with full slot.
|
@@ -1154,7 +751,14 @@ class GrowthInfo {
|
|
1154
751
|
// 2. There is no growth left.
|
1155
752
|
bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
|
1156
753
|
|
1157
|
-
// Returns true if
|
754
|
+
// Returns true if GetGrowthLeft() == 0, but must be called only if
|
755
|
+
// HasNoDeleted() is false. It is slightly more efficient.
|
756
|
+
bool HasNoGrowthLeftAssumingMayHaveDeleted() const {
|
757
|
+
ABSL_SWISSTABLE_ASSERT(!HasNoDeleted());
|
758
|
+
return growth_left_info_ == kDeletedBit;
|
759
|
+
}
|
760
|
+
|
761
|
+
// Returns true if table guaranteed to have no kDeleted slots.
|
1158
762
|
bool HasNoDeleted() const {
|
1159
763
|
return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
|
1160
764
|
}
|
@@ -1175,7 +779,7 @@ static_assert(alignof(GrowthInfo) == alignof(size_t), "");
|
|
1175
779
|
// Returns whether `n` is a valid capacity (i.e., number of slots).
|
1176
780
|
//
|
1177
781
|
// A valid capacity is a non-zero integer `2^m - 1`.
|
1178
|
-
|
782
|
+
constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
|
1179
783
|
|
1180
784
|
// Returns the number of "cloned control bytes".
|
1181
785
|
//
|
@@ -1191,26 +795,32 @@ constexpr size_t NumControlBytes(size_t capacity) {
|
|
1191
795
|
|
1192
796
|
// Computes the offset from the start of the backing allocation of control.
|
1193
797
|
// infoz and growth_info are stored at the beginning of the backing array.
|
1194
|
-
|
798
|
+
constexpr size_t ControlOffset(bool has_infoz) {
|
1195
799
|
return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
|
1196
800
|
}
|
1197
801
|
|
802
|
+
// Returns the offset of the next item after `offset` that is aligned to `align`
|
803
|
+
// bytes. `align` must be a power of two.
|
804
|
+
constexpr size_t AlignUpTo(size_t offset, size_t align) {
|
805
|
+
return (offset + align - 1) & (~align + 1);
|
806
|
+
}
|
807
|
+
|
1198
808
|
// Helper class for computing offsets and allocation size of hash set fields.
|
1199
809
|
class RawHashSetLayout {
|
1200
810
|
public:
|
1201
|
-
explicit RawHashSetLayout(size_t capacity, size_t
|
1202
|
-
|
1203
|
-
|
811
|
+
explicit RawHashSetLayout(size_t capacity, size_t slot_size,
|
812
|
+
size_t slot_align, bool has_infoz)
|
813
|
+
: control_offset_(ControlOffset(has_infoz)),
|
1204
814
|
generation_offset_(control_offset_ + NumControlBytes(capacity)),
|
1205
815
|
slot_offset_(
|
1206
|
-
(generation_offset_ + NumGenerationBytes()
|
1207
|
-
|
816
|
+
AlignUpTo(generation_offset_ + NumGenerationBytes(), slot_align)),
|
817
|
+
alloc_size_(slot_offset_ + capacity * slot_size) {
|
1208
818
|
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
|
819
|
+
ABSL_SWISSTABLE_ASSERT(
|
820
|
+
slot_size <=
|
821
|
+
((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity);
|
1209
822
|
}
|
1210
823
|
|
1211
|
-
// Returns the capacity of a table.
|
1212
|
-
size_t capacity() const { return capacity_; }
|
1213
|
-
|
1214
824
|
// Returns precomputed offset from the start of the backing allocation of
|
1215
825
|
// control.
|
1216
826
|
size_t control_offset() const { return control_offset_; }
|
@@ -1225,39 +835,17 @@ class RawHashSetLayout {
|
|
1225
835
|
|
1226
836
|
// Given the capacity of a table, computes the total size of the backing
|
1227
837
|
// array.
|
1228
|
-
size_t alloc_size(
|
1229
|
-
ABSL_SWISSTABLE_ASSERT(
|
1230
|
-
slot_size <=
|
1231
|
-
((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity_);
|
1232
|
-
return slot_offset_ + capacity_ * slot_size;
|
1233
|
-
}
|
838
|
+
size_t alloc_size() const { return alloc_size_; }
|
1234
839
|
|
1235
840
|
private:
|
1236
|
-
size_t capacity_;
|
1237
841
|
size_t control_offset_;
|
1238
842
|
size_t generation_offset_;
|
1239
843
|
size_t slot_offset_;
|
844
|
+
size_t alloc_size_;
|
1240
845
|
};
|
1241
846
|
|
1242
847
|
struct HashtableFreeFunctionsAccess;
|
1243
848
|
|
1244
|
-
// We only allow a maximum of 1 SOO element, which makes the implementation
|
1245
|
-
// much simpler. Complications with multiple SOO elements include:
|
1246
|
-
// - Satisfying the guarantee that erasing one element doesn't invalidate
|
1247
|
-
// iterators to other elements means we would probably need actual SOO
|
1248
|
-
// control bytes.
|
1249
|
-
// - In order to prevent user code from depending on iteration order for small
|
1250
|
-
// tables, we would need to randomize the iteration order somehow.
|
1251
|
-
constexpr size_t SooCapacity() { return 1; }
|
1252
|
-
// Sentinel type to indicate SOO CommonFields construction.
|
1253
|
-
struct soo_tag_t {};
|
1254
|
-
// Sentinel type to indicate SOO CommonFields construction with full size.
|
1255
|
-
struct full_soo_tag_t {};
|
1256
|
-
// Sentinel type to indicate non-SOO CommonFields construction.
|
1257
|
-
struct non_soo_tag_t {};
|
1258
|
-
// Sentinel value to indicate an uninitialized CommonFields for use in swapping.
|
1259
|
-
struct uninitialized_tag_t {};
|
1260
|
-
|
1261
849
|
// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
|
1262
850
|
// thinks that the call to slot_array() in find_or_prepare_insert() is reading
|
1263
851
|
// uninitialized memory, but slot_array is only called there when the table is
|
@@ -1285,7 +873,7 @@ union MaybeInitializedPtr {
|
|
1285
873
|
};
|
1286
874
|
|
1287
875
|
struct HeapPtrs {
|
1288
|
-
HeapPtrs()
|
876
|
+
explicit HeapPtrs(uninitialized_tag_t) {}
|
1289
877
|
explicit HeapPtrs(ctrl_t* c) : control(c) {}
|
1290
878
|
|
1291
879
|
// The control bytes (and, also, a pointer near to the base of the backing
|
@@ -1304,10 +892,13 @@ struct HeapPtrs {
|
|
1304
892
|
MaybeInitializedPtr slot_array;
|
1305
893
|
};
|
1306
894
|
|
895
|
+
// Returns the maximum size of the SOO slot.
|
896
|
+
constexpr size_t MaxSooSlotSize() { return sizeof(HeapPtrs); }
|
897
|
+
|
1307
898
|
// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
|
1308
899
|
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
|
1309
900
|
union HeapOrSoo {
|
1310
|
-
HeapOrSoo()
|
901
|
+
explicit HeapOrSoo(uninitialized_tag_t) : heap(uninitialized_tag_t{}) {}
|
1311
902
|
explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
|
1312
903
|
|
1313
904
|
ctrl_t*& control() {
|
@@ -1330,26 +921,50 @@ union HeapOrSoo {
|
|
1330
921
|
}
|
1331
922
|
|
1332
923
|
HeapPtrs heap;
|
1333
|
-
unsigned char soo_data[
|
924
|
+
unsigned char soo_data[MaxSooSlotSize()];
|
1334
925
|
};
|
1335
926
|
|
927
|
+
// Returns a reference to the GrowthInfo object stored immediately before
|
928
|
+
// `control`.
|
929
|
+
inline GrowthInfo& GetGrowthInfoFromControl(ctrl_t* control) {
|
930
|
+
auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control) - 1;
|
931
|
+
ABSL_SWISSTABLE_ASSERT(
|
932
|
+
reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
|
933
|
+
return *gl_ptr;
|
934
|
+
}
|
935
|
+
|
1336
936
|
// CommonFields hold the fields in raw_hash_set that do not depend
|
1337
937
|
// on template parameters. This allows us to conveniently pass all
|
1338
938
|
// of this state to helper functions as a single argument.
|
1339
939
|
class CommonFields : public CommonFieldsGenerationInfo {
|
1340
940
|
public:
|
1341
|
-
explicit CommonFields(soo_tag_t)
|
941
|
+
explicit CommonFields(soo_tag_t)
|
942
|
+
: capacity_(SooCapacity()),
|
943
|
+
size_(no_seed_empty_tag_t{}),
|
944
|
+
heap_or_soo_(uninitialized_tag_t{}) {}
|
1342
945
|
explicit CommonFields(full_soo_tag_t)
|
1343
|
-
: capacity_(SooCapacity()),
|
946
|
+
: capacity_(SooCapacity()),
|
947
|
+
size_(full_soo_tag_t{}),
|
948
|
+
heap_or_soo_(uninitialized_tag_t{}) {}
|
1344
949
|
explicit CommonFields(non_soo_tag_t)
|
1345
|
-
: capacity_(0),
|
950
|
+
: capacity_(0),
|
951
|
+
size_(no_seed_empty_tag_t{}),
|
952
|
+
heap_or_soo_(EmptyGroup()) {}
|
1346
953
|
// For use in swapping.
|
1347
|
-
explicit CommonFields(uninitialized_tag_t)
|
954
|
+
explicit CommonFields(uninitialized_tag_t)
|
955
|
+
: size_(uninitialized_tag_t{}), heap_or_soo_(uninitialized_tag_t{}) {}
|
1348
956
|
|
1349
957
|
// Not copyable
|
1350
958
|
CommonFields(const CommonFields&) = delete;
|
1351
959
|
CommonFields& operator=(const CommonFields&) = delete;
|
1352
960
|
|
961
|
+
// Copy with guarantee that it is not SOO.
|
962
|
+
CommonFields(non_soo_tag_t, const CommonFields& that)
|
963
|
+
: capacity_(that.capacity_),
|
964
|
+
size_(that.size_),
|
965
|
+
heap_or_soo_(that.heap_or_soo_) {
|
966
|
+
}
|
967
|
+
|
1353
968
|
// Movable
|
1354
969
|
CommonFields(CommonFields&& that) = default;
|
1355
970
|
CommonFields& operator=(CommonFields&&) = default;
|
@@ -1364,11 +979,21 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1364
979
|
const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
|
1365
980
|
void* soo_data() { return heap_or_soo_.get_soo_data(); }
|
1366
981
|
|
1367
|
-
HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
|
1368
|
-
const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
|
1369
|
-
|
1370
982
|
ctrl_t* control() const { return heap_or_soo_.control(); }
|
1371
|
-
|
983
|
+
|
984
|
+
// When we set the control bytes, we also often want to generate a new seed.
|
985
|
+
// So we bundle these two operations together to make sure we don't forget to
|
986
|
+
// generate a new seed.
|
987
|
+
// The table will be invalidated if
|
988
|
+
// `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
|
989
|
+
// being changed. In such cases, we will need to rehash the table.
|
990
|
+
template <bool kGenerateSeed>
|
991
|
+
void set_control(ctrl_t* c) {
|
992
|
+
heap_or_soo_.control() = c;
|
993
|
+
if constexpr (kGenerateSeed) {
|
994
|
+
generate_new_seed();
|
995
|
+
}
|
996
|
+
}
|
1372
997
|
void* backing_array_start() const {
|
1373
998
|
// growth_info (and maybe infoz) is stored before control bytes.
|
1374
999
|
ABSL_SWISSTABLE_ASSERT(
|
@@ -1382,26 +1007,39 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1382
1007
|
void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
|
1383
1008
|
|
1384
1009
|
// The number of filled slots.
|
1385
|
-
size_t size() const { return size_
|
1386
|
-
|
1387
|
-
|
1388
|
-
}
|
1010
|
+
size_t size() const { return size_.size(); }
|
1011
|
+
// Sets the size to zero, but keeps hashinfoz bit and seed.
|
1012
|
+
void set_size_to_zero() { size_.set_size_to_zero_keep_metadata(); }
|
1389
1013
|
void set_empty_soo() {
|
1390
1014
|
AssertInSooMode();
|
1391
|
-
size_ =
|
1015
|
+
size_ = HashtableSize(no_seed_empty_tag_t{});
|
1392
1016
|
}
|
1393
1017
|
void set_full_soo() {
|
1394
1018
|
AssertInSooMode();
|
1395
|
-
size_ =
|
1019
|
+
size_ = HashtableSize(full_soo_tag_t{});
|
1396
1020
|
}
|
1397
1021
|
void increment_size() {
|
1398
1022
|
ABSL_SWISSTABLE_ASSERT(size() < capacity());
|
1399
|
-
size_
|
1023
|
+
size_.increment_size();
|
1024
|
+
}
|
1025
|
+
void increment_size(size_t n) {
|
1026
|
+
ABSL_SWISSTABLE_ASSERT(size() + n <= capacity());
|
1027
|
+
size_.increment_size(n);
|
1400
1028
|
}
|
1401
1029
|
void decrement_size() {
|
1402
|
-
ABSL_SWISSTABLE_ASSERT(
|
1403
|
-
size_
|
1030
|
+
ABSL_SWISSTABLE_ASSERT(!empty());
|
1031
|
+
size_.decrement_size();
|
1404
1032
|
}
|
1033
|
+
bool empty() const { return size_.empty(); }
|
1034
|
+
|
1035
|
+
// The seed used for the H1 part of the hash function.
|
1036
|
+
PerTableSeed seed() const { return size_.seed(); }
|
1037
|
+
// Generates a new seed for the H1 part of the hash function.
|
1038
|
+
// The table will be invalidated if
|
1039
|
+
// `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
|
1040
|
+
// being changed. In such cases, we will need to rehash the table.
|
1041
|
+
void generate_new_seed() { size_.generate_new_seed(); }
|
1042
|
+
void set_no_seed_for_testing() { size_.set_no_seed_for_testing(); }
|
1405
1043
|
|
1406
1044
|
// The total number of available slots.
|
1407
1045
|
size_t capacity() const { return capacity_; }
|
@@ -1419,21 +1057,14 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1419
1057
|
size_t growth_left() const { return growth_info().GetGrowthLeft(); }
|
1420
1058
|
|
1421
1059
|
GrowthInfo& growth_info() {
|
1422
|
-
|
1423
|
-
ABSL_SWISSTABLE_ASSERT(
|
1424
|
-
reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
|
1425
|
-
return *gl_ptr;
|
1060
|
+
return GetGrowthInfoFromControl(control());
|
1426
1061
|
}
|
1427
1062
|
GrowthInfo growth_info() const {
|
1428
1063
|
return const_cast<CommonFields*>(this)->growth_info();
|
1429
1064
|
}
|
1430
1065
|
|
1431
|
-
bool has_infoz() const {
|
1432
|
-
|
1433
|
-
}
|
1434
|
-
void set_has_infoz(bool has_infoz) {
|
1435
|
-
size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
|
1436
|
-
}
|
1066
|
+
bool has_infoz() const { return size_.has_infoz(); }
|
1067
|
+
void set_has_infoz() { size_.set_has_infoz(); }
|
1437
1068
|
|
1438
1069
|
HashtablezInfoHandle infoz() {
|
1439
1070
|
return has_infoz()
|
@@ -1446,12 +1077,18 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1446
1077
|
}
|
1447
1078
|
|
1448
1079
|
bool should_rehash_for_bug_detection_on_insert() const {
|
1080
|
+
if constexpr (!SwisstableGenerationsEnabled()) {
|
1081
|
+
return false;
|
1082
|
+
}
|
1083
|
+
// As an optimization, we avoid calling ShouldRehashForBugDetection if we
|
1084
|
+
// will end up rehashing anyways.
|
1085
|
+
if (growth_left() == 0) return false;
|
1449
1086
|
return CommonFieldsGenerationInfo::
|
1450
|
-
should_rehash_for_bug_detection_on_insert(
|
1087
|
+
should_rehash_for_bug_detection_on_insert(seed(), capacity());
|
1451
1088
|
}
|
1452
1089
|
bool should_rehash_for_bug_detection_on_move() const {
|
1453
1090
|
return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
|
1454
|
-
|
1091
|
+
seed(), capacity());
|
1455
1092
|
}
|
1456
1093
|
void reset_reserved_growth(size_t reservation) {
|
1457
1094
|
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
|
@@ -1459,8 +1096,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1459
1096
|
|
1460
1097
|
// The size of the backing array allocation.
|
1461
1098
|
size_t alloc_size(size_t slot_size, size_t slot_align) const {
|
1462
|
-
return RawHashSetLayout(capacity(), slot_align, has_infoz())
|
1463
|
-
.alloc_size(
|
1099
|
+
return RawHashSetLayout(capacity(), slot_size, slot_align, has_infoz())
|
1100
|
+
.alloc_size();
|
1464
1101
|
}
|
1465
1102
|
|
1466
1103
|
// Move fields other than heap_or_soo_.
|
@@ -1513,11 +1150,10 @@ class CommonFields : public CommonFieldsGenerationInfo {
|
|
1513
1150
|
// regressions, presumably because we need capacity to do find operations.
|
1514
1151
|
size_t capacity_;
|
1515
1152
|
|
1516
|
-
// The size and also has one bit that stores whether we have infoz.
|
1517
1153
|
// TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
|
1518
1154
|
// encode the size in SOO case. We would be making size()/capacity() more
|
1519
1155
|
// expensive in order to have more SOO space.
|
1520
|
-
|
1156
|
+
HashtableSize size_;
|
1521
1157
|
|
1522
1158
|
// Either the control/slots pointers or the SOO slot.
|
1523
1159
|
HeapOrSoo heap_or_soo_;
|
@@ -1527,11 +1163,17 @@ template <class Policy, class Hash, class Eq, class Alloc>
|
|
1527
1163
|
class raw_hash_set;
|
1528
1164
|
|
1529
1165
|
// Returns the next valid capacity after `n`.
|
1530
|
-
|
1166
|
+
constexpr size_t NextCapacity(size_t n) {
|
1531
1167
|
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
|
1532
1168
|
return n * 2 + 1;
|
1533
1169
|
}
|
1534
1170
|
|
1171
|
+
// Returns the previous valid capacity before `n`.
|
1172
|
+
constexpr size_t PreviousCapacity(size_t n) {
|
1173
|
+
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
|
1174
|
+
return n / 2;
|
1175
|
+
}
|
1176
|
+
|
1535
1177
|
// Applies the following mapping to every byte in the control array:
|
1536
1178
|
// * kDeleted -> kEmpty
|
1537
1179
|
// * kEmpty -> kEmpty
|
@@ -1543,19 +1185,10 @@ inline size_t NextCapacity(size_t n) {
|
|
1543
1185
|
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
|
1544
1186
|
|
1545
1187
|
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
|
1546
|
-
|
1188
|
+
constexpr size_t NormalizeCapacity(size_t n) {
|
1547
1189
|
return n ? ~size_t{} >> countl_zero(n) : 1;
|
1548
1190
|
}
|
1549
1191
|
|
1550
|
-
template <size_t kSlotSize>
|
1551
|
-
size_t MaxValidCapacity() {
|
1552
|
-
return NormalizeCapacity((std::numeric_limits<size_t>::max)() / 4 /
|
1553
|
-
kSlotSize);
|
1554
|
-
}
|
1555
|
-
|
1556
|
-
// Use a non-inlined function to avoid code bloat.
|
1557
|
-
[[noreturn]] void HashTableSizeOverflow();
|
1558
|
-
|
1559
1192
|
// General notes on capacity/growth methods below:
|
1560
1193
|
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
|
1561
1194
|
// average of two empty slots per group.
|
@@ -1566,7 +1199,7 @@ size_t MaxValidCapacity() {
|
|
1566
1199
|
|
1567
1200
|
// Given `capacity`, applies the load factor; i.e., it returns the maximum
|
1568
1201
|
// number of values we should put into the table before a resizing rehash.
|
1569
|
-
|
1202
|
+
constexpr size_t CapacityToGrowth(size_t capacity) {
|
1570
1203
|
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
|
1571
1204
|
// `capacity*7/8`
|
1572
1205
|
if (Group::kWidth == 8 && capacity == 7) {
|
@@ -1576,18 +1209,28 @@ inline size_t CapacityToGrowth(size_t capacity) {
|
|
1576
1209
|
return capacity - capacity / 8;
|
1577
1210
|
}
|
1578
1211
|
|
1579
|
-
// Given `
|
1212
|
+
// Given `size`, "unapplies" the load factor to find how large the capacity
|
1580
1213
|
// should be to stay within the load factor.
|
1581
1214
|
//
|
1582
|
-
//
|
1583
|
-
//
|
1584
|
-
|
1585
|
-
|
1586
|
-
|
1587
|
-
|
1588
|
-
|
1589
|
-
}
|
1590
|
-
|
1215
|
+
// For size == 0, returns 0.
|
1216
|
+
// For other values, returns the same as `NormalizeCapacity(size*8/7)`.
|
1217
|
+
constexpr size_t SizeToCapacity(size_t size) {
|
1218
|
+
if (size == 0) {
|
1219
|
+
return 0;
|
1220
|
+
}
|
1221
|
+
// The minimum possible capacity is NormalizeCapacity(size).
|
1222
|
+
// Shifting right `~size_t{}` by `leading_zeros` yields
|
1223
|
+
// NormalizeCapacity(size).
|
1224
|
+
int leading_zeros = absl::countl_zero(size);
|
1225
|
+
constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3);
|
1226
|
+
size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros;
|
1227
|
+
// Decrease shift if size is too big for the minimum capacity.
|
1228
|
+
leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
|
1229
|
+
if constexpr (Group::kWidth == 8) {
|
1230
|
+
// Formula doesn't work when size==7 for 8-wide groups.
|
1231
|
+
leading_zeros -= (size == 7);
|
1232
|
+
}
|
1233
|
+
return (~size_t{}) >> leading_zeros;
|
1591
1234
|
}
|
1592
1235
|
|
1593
1236
|
template <class InputIter>
|
@@ -1596,12 +1239,9 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
|
|
1596
1239
|
if (bucket_count != 0) {
|
1597
1240
|
return bucket_count;
|
1598
1241
|
}
|
1599
|
-
|
1600
|
-
|
1601
|
-
|
1602
|
-
InputIterCategory>::value) {
|
1603
|
-
return GrowthToLowerboundCapacity(
|
1604
|
-
static_cast<size_t>(std::distance(first, last)));
|
1242
|
+
if (base_internal::IsAtLeastIterator<std::random_access_iterator_tag,
|
1243
|
+
InputIter>()) {
|
1244
|
+
return SizeToCapacity(static_cast<size_t>(std::distance(first, last)));
|
1605
1245
|
}
|
1606
1246
|
return 0;
|
1607
1247
|
}
|
@@ -1674,7 +1314,7 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl,
|
|
1674
1314
|
FATAL, "Invalid iterator comparison. The element was likely erased.");
|
1675
1315
|
}
|
1676
1316
|
} else {
|
1677
|
-
|
1317
|
+
ABSL_HARDENING_ASSERT_SLOW(
|
1678
1318
|
ctrl_is_valid_for_comparison &&
|
1679
1319
|
"Invalid iterator comparison. The element might have been erased or "
|
1680
1320
|
"the table might have rehashed. Consider running with --config=asan to "
|
@@ -1772,33 +1412,22 @@ struct FindInfo {
|
|
1772
1412
|
size_t probe_length;
|
1773
1413
|
};
|
1774
1414
|
|
1775
|
-
// Whether a table is "small". A small table fits entirely into a probing
|
1776
|
-
// group, i.e., has a capacity < `Group::kWidth`.
|
1777
|
-
//
|
1778
|
-
// In small mode we are able to use the whole capacity. The extra control
|
1779
|
-
// bytes give us at least one "empty" control byte to stop the iteration.
|
1780
|
-
// This is important to make 1 a valid capacity.
|
1781
|
-
//
|
1782
|
-
// In small mode only the first `capacity` control bytes after the sentinel
|
1783
|
-
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
|
1784
|
-
// represent a real slot. This is important to take into account on
|
1785
|
-
// `find_first_non_full()`, where we never try
|
1786
|
-
// `ShouldInsertBackwards()` for small tables.
|
1787
|
-
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
|
1788
|
-
|
1789
1415
|
// Whether a table fits entirely into a probing group.
|
1790
1416
|
// Arbitrary order of elements in such tables is correct.
|
1791
|
-
|
1417
|
+
constexpr bool is_single_group(size_t capacity) {
|
1792
1418
|
return capacity <= Group::kWidth;
|
1793
1419
|
}
|
1794
1420
|
|
1795
1421
|
// Begins a probing operation on `common.control`, using `hash`.
|
1796
|
-
inline probe_seq<Group::kWidth> probe(
|
1422
|
+
inline probe_seq<Group::kWidth> probe(size_t h1, size_t capacity) {
|
1423
|
+
return probe_seq<Group::kWidth>(h1, capacity);
|
1424
|
+
}
|
1425
|
+
inline probe_seq<Group::kWidth> probe(PerTableSeed seed, size_t capacity,
|
1797
1426
|
size_t hash) {
|
1798
|
-
return
|
1427
|
+
return probe(H1(hash, seed), capacity);
|
1799
1428
|
}
|
1800
1429
|
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
|
1801
|
-
return probe(common.
|
1430
|
+
return probe(common.seed(), common.capacity(), hash);
|
1802
1431
|
}
|
1803
1432
|
|
1804
1433
|
// Probes an array of control bits using a probe sequence derived from `hash`,
|
@@ -1808,51 +1437,70 @@ inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
|
|
1808
1437
|
//
|
1809
1438
|
// NOTE: this function must work with tables having both empty and deleted
|
1810
1439
|
// slots in the same group. Such tables appear during `erase()`.
|
1440
|
+
FindInfo find_first_non_full(const CommonFields& common, size_t hash);
|
1441
|
+
|
1442
|
+
constexpr size_t kProbedElementIndexSentinel = ~size_t{};
|
1443
|
+
|
1444
|
+
// Implementation detail of transfer_unprobed_elements_to_next_capacity_fn.
|
1445
|
+
// Tries to find the new index for an element whose hash corresponds to
|
1446
|
+
// `h1` for growth to the next capacity.
|
1447
|
+
// Returns kProbedElementIndexSentinel if full probing is required.
|
1448
|
+
//
|
1449
|
+
// If element is located in the first probing group in the table before growth,
|
1450
|
+
// returns one of two positions: `old_index` or `old_index + old_capacity + 1`.
|
1451
|
+
//
|
1452
|
+
// Otherwise, we will try to insert it into the first probe group of the new
|
1453
|
+
// table. We only attempt to do so if the first probe group is already
|
1454
|
+
// initialized.
|
1811
1455
|
template <typename = void>
|
1812
|
-
inline
|
1813
|
-
|
1814
|
-
|
1815
|
-
|
1816
|
-
|
1817
|
-
|
1818
|
-
|
1819
|
-
|
1820
|
-
|
1821
|
-
|
1822
|
-
|
1823
|
-
|
1824
|
-
|
1825
|
-
|
1826
|
-
|
1827
|
-
|
1828
|
-
|
1829
|
-
|
1456
|
+
inline size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
|
1457
|
+
size_t old_capacity,
|
1458
|
+
ctrl_t* new_ctrl,
|
1459
|
+
size_t new_capacity) {
|
1460
|
+
size_t index_diff = old_index - h1;
|
1461
|
+
// The first probe group starts with h1 & capacity.
|
1462
|
+
// All following groups start at (h1 + Group::kWidth * K) & capacity.
|
1463
|
+
// We can find an index within the floating group as index_diff modulo
|
1464
|
+
// Group::kWidth.
|
1465
|
+
// Both old and new capacity are larger than Group::kWidth so we can avoid
|
1466
|
+
// computing `& capacity`.
|
1467
|
+
size_t in_floating_group_index = index_diff & (Group::kWidth - 1);
|
1468
|
+
// By subtracting we will get the difference between the first probe group
|
1469
|
+
// and the probe group corresponding to old_index.
|
1470
|
+
index_diff -= in_floating_group_index;
|
1471
|
+
if (ABSL_PREDICT_TRUE((index_diff & old_capacity) == 0)) {
|
1472
|
+
size_t new_index = (h1 + in_floating_group_index) & new_capacity;
|
1473
|
+
ABSL_ASSUME(new_index != kProbedElementIndexSentinel);
|
1474
|
+
return new_index;
|
1475
|
+
}
|
1476
|
+
ABSL_SWISSTABLE_ASSERT(((old_index - h1) & old_capacity) >= Group::kWidth);
|
1477
|
+
// Try to insert element into the first probe group.
|
1478
|
+
// new_ctrl is not yet fully initialized so we can't use regular search via
|
1479
|
+
// find_first_non_full.
|
1480
|
+
|
1481
|
+
// We can search in the first probe group only if it is located in already
|
1482
|
+
// initialized part of the table.
|
1483
|
+
if (ABSL_PREDICT_FALSE((h1 & old_capacity) >= old_index)) {
|
1484
|
+
return kProbedElementIndexSentinel;
|
1485
|
+
}
|
1486
|
+
size_t offset = h1 & new_capacity;
|
1487
|
+
Group new_g(new_ctrl + offset);
|
1488
|
+
if (auto mask = new_g.MaskNonFull(); ABSL_PREDICT_TRUE(mask)) {
|
1489
|
+
size_t result = offset + mask.LowestBitSet();
|
1490
|
+
ABSL_ASSUME(result != kProbedElementIndexSentinel);
|
1491
|
+
return result;
|
1492
|
+
}
|
1493
|
+
return kProbedElementIndexSentinel;
|
1830
1494
|
}
|
1831
1495
|
|
1832
|
-
// Extern template for inline function
|
1496
|
+
// Extern template for inline function keeps possibility of inlining.
|
1833
1497
|
// When compiler decided to not inline, no symbols will be added to the
|
1834
1498
|
// corresponding translation unit.
|
1835
|
-
extern template
|
1836
|
-
|
1837
|
-
|
1838
|
-
|
1839
|
-
|
1840
|
-
|
1841
|
-
inline void ResetGrowthLeft(CommonFields& common) {
|
1842
|
-
common.growth_info().InitGrowthLeftNoDeleted(
|
1843
|
-
CapacityToGrowth(common.capacity()) - common.size());
|
1844
|
-
}
|
1845
|
-
|
1846
|
-
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
|
1847
|
-
// array as marked as empty.
|
1848
|
-
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
|
1849
|
-
const size_t capacity = common.capacity();
|
1850
|
-
ctrl_t* ctrl = common.control();
|
1851
|
-
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
|
1852
|
-
capacity + 1 + NumClonedBytes());
|
1853
|
-
ctrl[capacity] = ctrl_t::kSentinel;
|
1854
|
-
SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
|
1855
|
-
}
|
1499
|
+
extern template size_t TryFindNewIndexWithoutProbing(size_t h1,
|
1500
|
+
size_t old_index,
|
1501
|
+
size_t old_capacity,
|
1502
|
+
ctrl_t* new_ctrl,
|
1503
|
+
size_t new_capacity);
|
1856
1504
|
|
1857
1505
|
// Sets sanitizer poisoning for slot corresponding to control byte being set.
|
1858
1506
|
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
|
@@ -1899,6 +1547,22 @@ inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
|
|
1899
1547
|
SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
|
1900
1548
|
}
|
1901
1549
|
|
1550
|
+
// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
|
1551
|
+
// we can save some operations when setting the cloned control byte.
|
1552
|
+
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
|
1553
|
+
size_t slot_size) {
|
1554
|
+
ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
|
1555
|
+
DoSanitizeOnSetCtrl(c, i, h, slot_size);
|
1556
|
+
ctrl_t* ctrl = c.control();
|
1557
|
+
ctrl[i] = h;
|
1558
|
+
ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
|
1559
|
+
}
|
1560
|
+
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
|
1561
|
+
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
|
1562
|
+
size_t slot_size) {
|
1563
|
+
SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
|
1564
|
+
}
|
1565
|
+
|
1902
1566
|
// growth_info (which is a size_t) is stored with the backing array.
|
1903
1567
|
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
|
1904
1568
|
return (std::max)(align_of_slot, alignof(GrowthInfo));
|
@@ -1911,423 +1575,283 @@ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
|
|
1911
1575
|
(slot * slot_size));
|
1912
1576
|
}
|
1913
1577
|
|
1914
|
-
// Iterates over all full slots and calls `cb(const ctrl_t*,
|
1915
|
-
// No insertion to the table allowed during
|
1578
|
+
// Iterates over all full slots and calls `cb(const ctrl_t*, void*)`.
|
1579
|
+
// No insertion to the table is allowed during `cb` call.
|
1916
1580
|
// Erasure is allowed only for the element passed to the callback.
|
1917
|
-
|
1918
|
-
|
1919
|
-
|
1920
|
-
const size_t cap = c.capacity();
|
1921
|
-
const ctrl_t* ctrl = c.control();
|
1922
|
-
if (is_small(cap)) {
|
1923
|
-
// Mirrored/cloned control bytes in small table are also located in the
|
1924
|
-
// first group (starting from position 0). We are taking group from position
|
1925
|
-
// `capacity` in order to avoid duplicates.
|
1926
|
-
|
1927
|
-
// Small tables capacity fits into portable group, where
|
1928
|
-
// GroupPortableImpl::MaskFull is more efficient for the
|
1929
|
-
// capacity <= GroupPortableImpl::kWidth.
|
1930
|
-
ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
|
1931
|
-
"unexpectedly large small capacity");
|
1932
|
-
static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
|
1933
|
-
"unexpected group width");
|
1934
|
-
// Group starts from kSentinel slot, so indices in the mask will
|
1935
|
-
// be increased by 1.
|
1936
|
-
const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
|
1937
|
-
--ctrl;
|
1938
|
-
--slot;
|
1939
|
-
for (uint32_t i : mask) {
|
1940
|
-
cb(ctrl + i, slot + i);
|
1941
|
-
}
|
1942
|
-
return;
|
1943
|
-
}
|
1944
|
-
size_t remaining = c.size();
|
1945
|
-
ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
|
1946
|
-
while (remaining != 0) {
|
1947
|
-
for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
|
1948
|
-
ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
|
1949
|
-
"hash table was modified unexpectedly");
|
1950
|
-
cb(ctrl + i, slot + i);
|
1951
|
-
--remaining;
|
1952
|
-
}
|
1953
|
-
ctrl += Group::kWidth;
|
1954
|
-
slot += Group::kWidth;
|
1955
|
-
ABSL_SWISSTABLE_ASSERT(
|
1956
|
-
(remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
|
1957
|
-
"hash table was modified unexpectedly");
|
1958
|
-
}
|
1959
|
-
// NOTE: erasure of the current element is allowed in callback for
|
1960
|
-
// absl::erase_if specialization. So we use `>=`.
|
1961
|
-
ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
|
1962
|
-
"hash table was modified unexpectedly");
|
1963
|
-
}
|
1581
|
+
// The table must not be in SOO mode.
|
1582
|
+
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
|
1583
|
+
absl::FunctionRef<void(const ctrl_t*, void*)> cb);
|
1964
1584
|
|
1965
1585
|
template <typename CharAlloc>
|
1966
|
-
constexpr bool
|
1586
|
+
constexpr bool ShouldSampleHashtablezInfoForAlloc() {
|
1967
1587
|
// Folks with custom allocators often make unwarranted assumptions about the
|
1968
1588
|
// behavior of their classes vis-a-vis trivial destructability and what
|
1969
1589
|
// calls they will or won't make. Avoid sampling for people with custom
|
1970
1590
|
// allocators to get us out of this mess. This is not a hard guarantee but
|
1971
1591
|
// a workaround while we plan the exact guarantee we want to provide.
|
1972
|
-
return std::
|
1592
|
+
return std::is_same_v<CharAlloc, std::allocator<char>>;
|
1973
1593
|
}
|
1974
1594
|
|
1975
1595
|
template <bool kSooEnabled>
|
1976
|
-
|
1977
|
-
|
1978
|
-
|
1979
|
-
|
1980
|
-
|
1981
|
-
|
1596
|
+
bool ShouldSampleHashtablezInfoOnResize(bool force_sampling,
|
1597
|
+
bool is_hashtablez_eligible,
|
1598
|
+
size_t old_capacity, CommonFields& c) {
|
1599
|
+
if (!is_hashtablez_eligible) return false;
|
1600
|
+
// Force sampling is only allowed for SOO tables.
|
1601
|
+
ABSL_SWISSTABLE_ASSERT(kSooEnabled || !force_sampling);
|
1602
|
+
if (kSooEnabled && force_sampling) {
|
1603
|
+
return true;
|
1604
|
+
}
|
1982
1605
|
// In SOO, we sample on the first insertion so if this is an empty SOO case
|
1983
1606
|
// (e.g. when reserve is called), then we still need to sample.
|
1984
|
-
if (kSooEnabled &&
|
1985
|
-
return
|
1607
|
+
if (kSooEnabled && old_capacity == SooCapacity() && c.empty()) {
|
1608
|
+
return ShouldSampleNextTable();
|
1986
1609
|
}
|
1987
|
-
// For non-SOO cases, we sample whenever the capacity is increasing from zero
|
1988
|
-
// to non-zero.
|
1989
1610
|
if (!kSooEnabled && old_capacity == 0) {
|
1990
|
-
return
|
1611
|
+
return ShouldSampleNextTable();
|
1991
1612
|
}
|
1992
|
-
return
|
1613
|
+
return false;
|
1993
1614
|
}
|
1994
1615
|
|
1995
|
-
//
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
class HashSetResizeHelper {
|
2000
|
-
public:
|
2001
|
-
explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
|
2002
|
-
HashtablezInfoHandle forced_infoz)
|
2003
|
-
: old_capacity_(c.capacity()),
|
2004
|
-
had_infoz_(c.has_infoz()),
|
2005
|
-
was_soo_(was_soo),
|
2006
|
-
had_soo_slot_(had_soo_slot),
|
2007
|
-
forced_infoz_(forced_infoz) {}
|
2008
|
-
|
2009
|
-
// Optimized for small groups version of `find_first_non_full`.
|
2010
|
-
// Beneficial only right after calling `raw_hash_set::resize`.
|
2011
|
-
// It is safe to call in case capacity is big or was not changed, but there
|
2012
|
-
// will be no performance benefit.
|
2013
|
-
// It has implicit assumption that `resize` will call
|
2014
|
-
// `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
|
2015
|
-
// Falls back to `find_first_non_full` in case of big groups.
|
2016
|
-
static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
|
2017
|
-
size_t old_capacity, size_t hash);
|
2018
|
-
|
2019
|
-
HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
|
2020
|
-
void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
|
2021
|
-
ctrl_t* old_ctrl() const {
|
2022
|
-
ABSL_SWISSTABLE_ASSERT(!was_soo_);
|
2023
|
-
return old_heap_or_soo_.control();
|
2024
|
-
}
|
2025
|
-
void* old_slots() const {
|
2026
|
-
ABSL_SWISSTABLE_ASSERT(!was_soo_);
|
2027
|
-
return old_heap_or_soo_.slot_array().get();
|
2028
|
-
}
|
2029
|
-
size_t old_capacity() const { return old_capacity_; }
|
2030
|
-
|
2031
|
-
// Returns the index of the SOO slot when growing from SOO to non-SOO in a
|
2032
|
-
// single group. See also InitControlBytesAfterSoo(). It's important to use
|
2033
|
-
// index 1 so that when resizing from capacity 1 to 3, we can still have
|
2034
|
-
// random iteration order between the first two inserted elements.
|
2035
|
-
// I.e. it allows inserting the second element at either index 0 or 2.
|
2036
|
-
static size_t SooSlotIndex() { return 1; }
|
2037
|
-
|
2038
|
-
// Allocates a backing array for the hashtable.
|
2039
|
-
// Reads `capacity` and updates all other fields based on the result of
|
2040
|
-
// the allocation.
|
2041
|
-
//
|
2042
|
-
// It also may do the following actions:
|
2043
|
-
// 1. initialize control bytes
|
2044
|
-
// 2. initialize slots
|
2045
|
-
// 3. deallocate old slots.
|
2046
|
-
//
|
2047
|
-
// We are bundling a lot of functionality
|
2048
|
-
// in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
|
2049
|
-
// duplication in raw_hash_set<>::resize.
|
2050
|
-
//
|
2051
|
-
// `c.capacity()` must be nonzero.
|
2052
|
-
// POSTCONDITIONS:
|
2053
|
-
// 1. CommonFields is initialized.
|
2054
|
-
//
|
2055
|
-
// if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
|
2056
|
-
// Both control bytes and slots are fully initialized.
|
2057
|
-
// old_slots are deallocated.
|
2058
|
-
// infoz.RecordRehash is called.
|
2059
|
-
//
|
2060
|
-
// if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
|
2061
|
-
// Control bytes are fully initialized.
|
2062
|
-
// infoz.RecordRehash is called.
|
2063
|
-
// GrowSizeIntoSingleGroup must be called to finish slots initialization.
|
2064
|
-
//
|
2065
|
-
// if !IsGrowingIntoSingleGroupApplicable
|
2066
|
-
// Control bytes are initialized to empty table via ResetCtrl.
|
2067
|
-
// raw_hash_set<>::resize must insert elements regularly.
|
2068
|
-
// infoz.RecordRehash is called if old_capacity == 0.
|
2069
|
-
//
|
2070
|
-
// Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
|
2071
|
-
template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
|
2072
|
-
bool SooEnabled, size_t AlignOfSlot>
|
2073
|
-
ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
|
2074
|
-
ctrl_t soo_slot_h2,
|
2075
|
-
size_t key_size,
|
2076
|
-
size_t value_size) {
|
2077
|
-
ABSL_SWISSTABLE_ASSERT(c.capacity());
|
2078
|
-
HashtablezInfoHandle infoz =
|
2079
|
-
ShouldSampleHashtablezInfo<Alloc>()
|
2080
|
-
? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
|
2081
|
-
old_capacity_, was_soo_,
|
2082
|
-
forced_infoz_, c)
|
2083
|
-
: HashtablezInfoHandle{};
|
2084
|
-
|
2085
|
-
const bool has_infoz = infoz.IsSampled();
|
2086
|
-
RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
|
2087
|
-
char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
|
2088
|
-
&alloc, layout.alloc_size(SizeOfSlot)));
|
2089
|
-
const GenerationType old_generation = c.generation();
|
2090
|
-
c.set_generation_ptr(
|
2091
|
-
reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
|
2092
|
-
c.set_generation(NextGeneration(old_generation));
|
2093
|
-
c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
|
2094
|
-
c.set_slots(mem + layout.slot_offset());
|
2095
|
-
ResetGrowthLeft(c);
|
2096
|
-
|
2097
|
-
const bool grow_single_group =
|
2098
|
-
IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
|
2099
|
-
if (SooEnabled && was_soo_ && grow_single_group) {
|
2100
|
-
InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
|
2101
|
-
if (TransferUsesMemcpy && had_soo_slot_) {
|
2102
|
-
TransferSlotAfterSoo(c, SizeOfSlot);
|
2103
|
-
}
|
2104
|
-
// SooEnabled implies that old_capacity_ != 0.
|
2105
|
-
} else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
|
2106
|
-
if (TransferUsesMemcpy) {
|
2107
|
-
GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
|
2108
|
-
DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
|
2109
|
-
} else {
|
2110
|
-
GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
|
2111
|
-
}
|
2112
|
-
} else {
|
2113
|
-
ResetCtrl(c, SizeOfSlot);
|
2114
|
-
}
|
2115
|
-
|
2116
|
-
c.set_has_infoz(has_infoz);
|
2117
|
-
if (has_infoz) {
|
2118
|
-
infoz.RecordStorageChanged(c.size(), layout.capacity());
|
2119
|
-
if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
|
2120
|
-
infoz.RecordRehash(0);
|
2121
|
-
}
|
2122
|
-
c.set_infoz(infoz);
|
2123
|
-
}
|
2124
|
-
return grow_single_group;
|
2125
|
-
}
|
2126
|
-
|
2127
|
-
// Relocates slots into new single group consistent with
|
2128
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2129
|
-
//
|
2130
|
-
// PRECONDITIONS:
|
2131
|
-
// 1. GrowIntoSingleGroupShuffleControlBytes was already called.
|
2132
|
-
template <class PolicyTraits, class Alloc>
|
2133
|
-
void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
|
2134
|
-
ABSL_SWISSTABLE_ASSERT(old_capacity_ < Group::kWidth / 2);
|
2135
|
-
ABSL_SWISSTABLE_ASSERT(
|
2136
|
-
IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
|
2137
|
-
using slot_type = typename PolicyTraits::slot_type;
|
2138
|
-
ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
|
2139
|
-
|
2140
|
-
auto* new_slots = static_cast<slot_type*>(c.slot_array()) + 1;
|
2141
|
-
auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
|
2142
|
-
auto* old_ctrl_ptr = old_ctrl();
|
2143
|
-
|
2144
|
-
for (size_t i = 0; i < old_capacity_; ++i, ++new_slots) {
|
2145
|
-
if (IsFull(old_ctrl_ptr[i])) {
|
2146
|
-
SanitizerUnpoisonMemoryRegion(new_slots, sizeof(slot_type));
|
2147
|
-
PolicyTraits::transfer(&alloc_ref, new_slots, old_slots_ptr + i);
|
2148
|
-
}
|
2149
|
-
}
|
2150
|
-
PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
|
2151
|
-
}
|
2152
|
-
|
2153
|
-
// Deallocates old backing array.
|
2154
|
-
template <size_t AlignOfSlot, class CharAlloc>
|
2155
|
-
void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
|
2156
|
-
SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
|
2157
|
-
auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
|
2158
|
-
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
|
2159
|
-
&alloc_ref, old_ctrl() - layout.control_offset(),
|
2160
|
-
layout.alloc_size(slot_size));
|
2161
|
-
}
|
2162
|
-
|
2163
|
-
private:
|
2164
|
-
// Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
|
2165
|
-
static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
|
2166
|
-
size_t new_capacity) {
|
2167
|
-
// NOTE that `old_capacity < new_capacity` in order to have
|
2168
|
-
// `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
|
2169
|
-
return is_single_group(new_capacity) && old_capacity < new_capacity;
|
2170
|
-
}
|
2171
|
-
|
2172
|
-
// Relocates control bytes and slots into new single group for
|
2173
|
-
// transferable objects.
|
2174
|
-
// Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
|
2175
|
-
void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
|
2176
|
-
|
2177
|
-
// If there was an SOO slot and slots are transferable, transfers the SOO slot
|
2178
|
-
// into the new heap allocation. Must be called only if
|
2179
|
-
// IsGrowingIntoSingleGroupApplicable returned true.
|
2180
|
-
void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
|
2181
|
-
|
2182
|
-
// Shuffle control bits deterministically to the next capacity.
|
2183
|
-
// Returns offset for newly added element with given hash.
|
2184
|
-
//
|
2185
|
-
// PRECONDITIONs:
|
2186
|
-
// 1. new_ctrl is allocated for new_capacity,
|
2187
|
-
// but not initialized.
|
2188
|
-
// 2. new_capacity is a single group.
|
2189
|
-
// 3. old_capacity > 0.
|
2190
|
-
//
|
2191
|
-
// All elements are transferred into the first `old_capacity + 1` positions
|
2192
|
-
// of the new_ctrl. Elements are shifted by 1 in order to keep a space at the
|
2193
|
-
// beginning for the new element.
|
2194
|
-
// Position of the new added element will be based on `H1` and is not
|
2195
|
-
// deterministic.
|
2196
|
-
//
|
2197
|
-
// Examples:
|
2198
|
-
// S = kSentinel, E = kEmpty
|
2199
|
-
//
|
2200
|
-
// old_ctrl = 0SEEEEEEE...
|
2201
|
-
// new_ctrl = E0ESE0EEE...
|
2202
|
-
//
|
2203
|
-
// old_ctrl = 012S012EEEEEEEEE...
|
2204
|
-
// new_ctrl = E012EEESE012EEE...
|
2205
|
-
//
|
2206
|
-
// old_ctrl = 0123456S0123456EEEEEEEEEEE...
|
2207
|
-
// new_ctrl = E0123456EEEEEESE0123456EEE...
|
2208
|
-
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
|
2209
|
-
size_t new_capacity) const;
|
2210
|
-
|
2211
|
-
// If the table was SOO, initializes new control bytes. `h2` is the control
|
2212
|
-
// byte corresponding to the full slot. Must be called only if
|
2213
|
-
// IsGrowingIntoSingleGroupApplicable returned true.
|
2214
|
-
// Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
|
2215
|
-
void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
|
2216
|
-
size_t new_capacity);
|
2217
|
-
|
2218
|
-
// Shuffle trivially transferable slots in the way consistent with
|
2219
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2220
|
-
//
|
2221
|
-
// PRECONDITIONs:
|
2222
|
-
// 1. old_capacity must be non-zero.
|
2223
|
-
// 2. new_ctrl is fully initialized using
|
2224
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2225
|
-
// 3. new_slots is allocated and *not* poisoned.
|
2226
|
-
//
|
2227
|
-
// POSTCONDITIONS:
|
2228
|
-
// 1. new_slots are transferred from old_slots_ consistent with
|
2229
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2230
|
-
// 2. Empty new_slots are *not* poisoned.
|
2231
|
-
void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
|
2232
|
-
size_t slot_size) const;
|
2233
|
-
|
2234
|
-
// Poison empty slots that were transferred using the deterministic algorithm
|
2235
|
-
// described above.
|
2236
|
-
// PRECONDITIONs:
|
2237
|
-
// 1. new_ctrl is fully initialized using
|
2238
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2239
|
-
// 2. new_slots is fully initialized consistent with
|
2240
|
-
// GrowIntoSingleGroupShuffleControlBytes.
|
2241
|
-
void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
|
2242
|
-
// poison non full items
|
2243
|
-
for (size_t i = 0; i < c.capacity(); ++i) {
|
2244
|
-
if (!IsFull(c.control()[i])) {
|
2245
|
-
SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
|
2246
|
-
slot_size);
|
2247
|
-
}
|
2248
|
-
}
|
2249
|
-
}
|
2250
|
-
|
2251
|
-
HeapOrSoo old_heap_or_soo_;
|
2252
|
-
size_t old_capacity_;
|
2253
|
-
bool had_infoz_;
|
2254
|
-
bool was_soo_;
|
2255
|
-
bool had_soo_slot_;
|
2256
|
-
// Either null infoz or a pre-sampled forced infoz for SOO tables.
|
2257
|
-
HashtablezInfoHandle forced_infoz_;
|
2258
|
-
};
|
2259
|
-
|
2260
|
-
inline void PrepareInsertCommon(CommonFields& common) {
|
2261
|
-
common.increment_size();
|
2262
|
-
common.maybe_increment_generation_on_insert();
|
1616
|
+
// Allocates `n` bytes for a backing array.
|
1617
|
+
template <size_t AlignOfBackingArray, typename Alloc>
|
1618
|
+
ABSL_ATTRIBUTE_NOINLINE void* AllocateBackingArray(void* alloc, size_t n) {
|
1619
|
+
return Allocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), n);
|
2263
1620
|
}
|
2264
1621
|
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
1622
|
+
template <size_t AlignOfBackingArray, typename Alloc>
|
1623
|
+
ABSL_ATTRIBUTE_NOINLINE void DeallocateBackingArray(
|
1624
|
+
void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
|
1625
|
+
size_t slot_align, bool had_infoz) {
|
1626
|
+
RawHashSetLayout layout(capacity, slot_size, slot_align, had_infoz);
|
1627
|
+
void* backing_array = ctrl - layout.control_offset();
|
1628
|
+
// Unpoison before returning the memory to the allocator.
|
1629
|
+
SanitizerUnpoisonMemoryRegion(backing_array, layout.alloc_size());
|
1630
|
+
Deallocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), backing_array,
|
1631
|
+
layout.alloc_size());
|
1632
|
+
}
|
2268
1633
|
|
2269
1634
|
// PolicyFunctions bundles together some information for a particular
|
2270
1635
|
// raw_hash_set<T, ...> instantiation. This information is passed to
|
2271
1636
|
// type-erased functions that want to do small amounts of type-specific
|
2272
1637
|
// work.
|
2273
1638
|
struct PolicyFunctions {
|
2274
|
-
|
1639
|
+
uint32_t key_size;
|
1640
|
+
uint32_t value_size;
|
1641
|
+
uint32_t slot_size;
|
1642
|
+
uint16_t slot_align;
|
1643
|
+
bool soo_enabled;
|
1644
|
+
bool is_hashtablez_eligible;
|
2275
1645
|
|
2276
1646
|
// Returns the pointer to the hash function stored in the set.
|
2277
|
-
|
1647
|
+
void* (*hash_fn)(CommonFields& common);
|
2278
1648
|
|
2279
1649
|
// Returns the hash of the pointed-to slot.
|
2280
1650
|
size_t (*hash_slot)(const void* hash_fn, void* slot);
|
2281
1651
|
|
2282
|
-
// Transfers the contents of src_slot to dst_slot.
|
2283
|
-
|
1652
|
+
// Transfers the contents of `count` slots from src_slot to dst_slot.
|
1653
|
+
// We use ability to transfer several slots in single group table growth.
|
1654
|
+
void (*transfer_n)(void* set, void* dst_slot, void* src_slot, size_t count);
|
2284
1655
|
|
2285
|
-
//
|
2286
|
-
void (*
|
1656
|
+
// Returns the pointer to the CharAlloc stored in the set.
|
1657
|
+
void* (*get_char_alloc)(CommonFields& common);
|
1658
|
+
|
1659
|
+
// Allocates n bytes for the backing store for common.
|
1660
|
+
void* (*alloc)(void* alloc, size_t n);
|
2287
1661
|
|
2288
|
-
//
|
2289
|
-
|
2290
|
-
|
2291
|
-
|
1662
|
+
// Deallocates the backing store from common.
|
1663
|
+
void (*dealloc)(void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
|
1664
|
+
size_t slot_align, bool had_infoz);
|
1665
|
+
|
1666
|
+
// Implementation detail of GrowToNextCapacity.
|
1667
|
+
// Iterates over all full slots and transfers unprobed elements.
|
1668
|
+
// Initializes the new control bytes except mirrored bytes and kSentinel.
|
1669
|
+
// Caller must finish the initialization.
|
1670
|
+
// All slots corresponding to the full control bytes are transferred.
|
1671
|
+
// Probed elements are reported by `encode_probed_element` callback.
|
1672
|
+
// encode_probed_element may overwrite old_ctrl buffer till source_offset.
|
1673
|
+
// Different encoding is used depending on the capacity of the table.
|
1674
|
+
// See ProbedItem*Bytes classes for details.
|
1675
|
+
void (*transfer_unprobed_elements_to_next_capacity)(
|
1676
|
+
CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
|
1677
|
+
// TODO(b/382423690): Try to use absl::FunctionRef here.
|
1678
|
+
void* probed_storage,
|
1679
|
+
void (*encode_probed_element)(void* probed_storage, h2_t h2,
|
1680
|
+
size_t source_offset, size_t h1));
|
1681
|
+
|
1682
|
+
uint8_t soo_capacity() const {
|
1683
|
+
return static_cast<uint8_t>(soo_enabled ? SooCapacity() : 0);
|
1684
|
+
}
|
2292
1685
|
};
|
2293
1686
|
|
1687
|
+
// Returns the maximum valid size for a table with 1-byte slots.
|
1688
|
+
// This function is an utility shared by MaxValidSize and IsAboveValidSize.
|
1689
|
+
// Template parameter is only used to enable testing.
|
1690
|
+
template <size_t kSizeOfSizeT = sizeof(size_t)>
|
1691
|
+
constexpr size_t MaxValidSizeFor1ByteSlot() {
|
1692
|
+
if constexpr (kSizeOfSizeT == 8) {
|
1693
|
+
return CapacityToGrowth(
|
1694
|
+
static_cast<size_t>(uint64_t{1} << HashtableSize::kSizeBitCount) - 1);
|
1695
|
+
} else {
|
1696
|
+
static_assert(kSizeOfSizeT == 4);
|
1697
|
+
return CapacityToGrowth((size_t{1} << (kSizeOfSizeT * 8 - 2)) - 1);
|
1698
|
+
}
|
1699
|
+
}
|
1700
|
+
|
1701
|
+
// Returns the maximum valid size for a table with provided slot size.
|
1702
|
+
// Template parameter is only used to enable testing.
|
1703
|
+
template <size_t kSizeOfSizeT = sizeof(size_t)>
|
1704
|
+
constexpr size_t MaxValidSize(size_t slot_size) {
|
1705
|
+
if constexpr (kSizeOfSizeT == 8) {
|
1706
|
+
// For small slot sizes we are limited by HashtableSize::kSizeBitCount.
|
1707
|
+
if (slot_size < size_t{1} << (64 - HashtableSize::kSizeBitCount)) {
|
1708
|
+
return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
|
1709
|
+
}
|
1710
|
+
return (size_t{1} << (kSizeOfSizeT * 8 - 2)) / slot_size;
|
1711
|
+
} else {
|
1712
|
+
return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>() / slot_size;
|
1713
|
+
}
|
1714
|
+
}
|
1715
|
+
|
1716
|
+
// Returns true if size is larger than the maximum valid size.
|
1717
|
+
// It is an optimization to avoid the division operation in the common case.
|
1718
|
+
// Template parameter is only used to enable testing.
|
1719
|
+
template <size_t kSizeOfSizeT = sizeof(size_t)>
|
1720
|
+
constexpr bool IsAboveValidSize(size_t size, size_t slot_size) {
|
1721
|
+
if constexpr (kSizeOfSizeT == 8) {
|
1722
|
+
// For small slot sizes we are limited by HashtableSize::kSizeBitCount.
|
1723
|
+
if (ABSL_PREDICT_TRUE(slot_size <
|
1724
|
+
(size_t{1} << (64 - HashtableSize::kSizeBitCount)))) {
|
1725
|
+
return size > MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
|
1726
|
+
}
|
1727
|
+
return size > MaxValidSize<kSizeOfSizeT>(slot_size);
|
1728
|
+
} else {
|
1729
|
+
return uint64_t{size} * slot_size >
|
1730
|
+
MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
|
1731
|
+
}
|
1732
|
+
}
|
1733
|
+
|
1734
|
+
// Returns the index of the SOO slot when growing from SOO to non-SOO in a
|
1735
|
+
// single group. See also InitializeSmallControlBytesAfterSoo(). It's important
|
1736
|
+
// to use index 1 so that when resizing from capacity 1 to 3, we can still have
|
1737
|
+
// random iteration order between the first two inserted elements.
|
1738
|
+
// I.e. it allows inserting the second element at either index 0 or 2.
|
1739
|
+
constexpr size_t SooSlotIndex() { return 1; }
|
1740
|
+
|
1741
|
+
// Maximum capacity for the algorithm for small table after SOO.
|
1742
|
+
// Note that typical size after SOO is 3, but we allow up to 7.
|
1743
|
+
// Allowing till 16 would require additional store that can be avoided.
|
1744
|
+
constexpr size_t MaxSmallAfterSooCapacity() { return 7; }
|
1745
|
+
|
1746
|
+
// Type erased version of raw_hash_set::reserve.
|
1747
|
+
// Requires: `new_size > policy.soo_capacity`.
|
1748
|
+
void ReserveTableToFitNewSize(CommonFields& common,
|
1749
|
+
const PolicyFunctions& policy, size_t new_size);
|
1750
|
+
|
1751
|
+
// Resizes empty non-allocated table to the next valid capacity after
|
1752
|
+
// `bucket_count`. Requires:
|
1753
|
+
// 1. `c.capacity() == policy.soo_capacity`.
|
1754
|
+
// 2. `c.empty()`.
|
1755
|
+
// 3. `new_size > policy.soo_capacity`.
|
1756
|
+
// The table will be attempted to be sampled.
|
1757
|
+
void ReserveEmptyNonAllocatedTableToFitBucketCount(
|
1758
|
+
CommonFields& common, const PolicyFunctions& policy, size_t bucket_count);
|
1759
|
+
|
1760
|
+
// Type erased version of raw_hash_set::rehash.
|
1761
|
+
void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n);
|
1762
|
+
|
1763
|
+
// Type erased version of copy constructor.
|
1764
|
+
void Copy(CommonFields& common, const PolicyFunctions& policy,
|
1765
|
+
const CommonFields& other,
|
1766
|
+
absl::FunctionRef<void(void*, const void*)> copy_fn);
|
1767
|
+
|
1768
|
+
// Returns the optimal size for memcpy when transferring SOO slot.
|
1769
|
+
// Otherwise, returns the optimal size for memcpy SOO slot transfer
|
1770
|
+
// to SooSlotIndex().
|
1771
|
+
// At the destination we are allowed to copy upto twice more bytes,
|
1772
|
+
// because there is at least one more slot after SooSlotIndex().
|
1773
|
+
// The result must not exceed MaxSooSlotSize().
|
1774
|
+
// Some of the cases are merged to minimize the number of function
|
1775
|
+
// instantiations.
|
1776
|
+
constexpr size_t OptimalMemcpySizeForSooSlotTransfer(
|
1777
|
+
size_t slot_size, size_t max_soo_slot_size = MaxSooSlotSize()) {
|
1778
|
+
static_assert(MaxSooSlotSize() >= 8, "unexpectedly small SOO slot size");
|
1779
|
+
if (slot_size == 1) {
|
1780
|
+
return 1;
|
1781
|
+
}
|
1782
|
+
if (slot_size <= 3) {
|
1783
|
+
return 4;
|
1784
|
+
}
|
1785
|
+
// We are merging 4 and 8 into one case because we expect them to be the
|
1786
|
+
// hottest cases. Copying 8 bytes is as fast on common architectures.
|
1787
|
+
if (slot_size <= 8) {
|
1788
|
+
return 8;
|
1789
|
+
}
|
1790
|
+
if (max_soo_slot_size <= 16) {
|
1791
|
+
return max_soo_slot_size;
|
1792
|
+
}
|
1793
|
+
if (slot_size <= 16) {
|
1794
|
+
return 16;
|
1795
|
+
}
|
1796
|
+
if (max_soo_slot_size <= 24) {
|
1797
|
+
return max_soo_slot_size;
|
1798
|
+
}
|
1799
|
+
static_assert(MaxSooSlotSize() <= 24, "unexpectedly large SOO slot size");
|
1800
|
+
return 24;
|
1801
|
+
}
|
1802
|
+
|
1803
|
+
// Resizes SOO table to the NextCapacity(SooCapacity()) and prepares insert for
|
1804
|
+
// the given new_hash. Returns the offset of the new element.
|
1805
|
+
// `soo_slot_ctrl` is the control byte of the SOO slot.
|
1806
|
+
// If soo_slot_ctrl is kEmpty
|
1807
|
+
// 1. The table must be empty.
|
1808
|
+
// 2. Table will be forced to be sampled.
|
1809
|
+
// All possible template combinations are defined in cc file to improve
|
1810
|
+
// compilation time.
|
1811
|
+
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
|
1812
|
+
size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common,
|
1813
|
+
const PolicyFunctions& policy,
|
1814
|
+
size_t new_hash,
|
1815
|
+
ctrl_t soo_slot_ctrl);
|
1816
|
+
|
1817
|
+
// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
|
1818
|
+
// table to be sampled. SOO tables need to switch from SOO to heap in order to
|
1819
|
+
// store the infoz. No-op if sampling is disabled or not possible.
|
1820
|
+
void GrowFullSooTableToNextCapacityForceSampling(CommonFields& common,
|
1821
|
+
const PolicyFunctions& policy);
|
1822
|
+
|
1823
|
+
// Resizes table with allocated slots and change the table seed.
|
1824
|
+
// Tables with SOO enabled must have capacity > policy.soo_capacity.
|
1825
|
+
// No sampling will be performed since table is already allocated.
|
1826
|
+
void ResizeAllocatedTableWithSeedChange(CommonFields& common,
|
1827
|
+
const PolicyFunctions& policy,
|
1828
|
+
size_t new_capacity);
|
1829
|
+
|
2294
1830
|
// ClearBackingArray clears the backing array, either modifying it in place,
|
2295
1831
|
// or creating a new one based on the value of "reuse".
|
2296
1832
|
// REQUIRES: c.capacity > 0
|
2297
1833
|
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
|
2298
|
-
bool reuse, bool soo_enabled);
|
1834
|
+
void* alloc, bool reuse, bool soo_enabled);
|
2299
1835
|
|
2300
1836
|
// Type-erased version of raw_hash_set::erase_meta_only.
|
2301
1837
|
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
|
2302
1838
|
|
2303
|
-
// Function to place in PolicyFunctions::dealloc for raw_hash_sets
|
2304
|
-
// that are using std::allocator. This allows us to share the same
|
2305
|
-
// function body for raw_hash_set instantiations that have the
|
2306
|
-
// same slot alignment.
|
2307
|
-
template <size_t AlignOfSlot>
|
2308
|
-
ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
|
2309
|
-
const PolicyFunctions& policy) {
|
2310
|
-
// Unpoison before returning the memory to the allocator.
|
2311
|
-
SanitizerUnpoisonMemoryRegion(common.slot_array(),
|
2312
|
-
policy.slot_size * common.capacity());
|
2313
|
-
|
2314
|
-
std::allocator<char> alloc;
|
2315
|
-
common.infoz().Unregister();
|
2316
|
-
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
|
2317
|
-
&alloc, common.backing_array_start(),
|
2318
|
-
common.alloc_size(policy.slot_size, AlignOfSlot));
|
2319
|
-
}
|
2320
|
-
|
2321
1839
|
// For trivially relocatable types we use memcpy directly. This allows us to
|
2322
1840
|
// share the same function body for raw_hash_set instantiations that have the
|
2323
1841
|
// same slot size as long as they are relocatable.
|
1842
|
+
// Separate function for relocating single slot cause significant binary bloat.
|
2324
1843
|
template <size_t SizeOfSlot>
|
2325
|
-
ABSL_ATTRIBUTE_NOINLINE void
|
2326
|
-
|
1844
|
+
ABSL_ATTRIBUTE_NOINLINE void TransferNRelocatable(void*, void* dst, void* src,
|
1845
|
+
size_t count) {
|
1846
|
+
// TODO(b/382423690): Experiment with making specialization for power of 2 and
|
1847
|
+
// non power of 2. This would require passing the size of the slot.
|
1848
|
+
memcpy(dst, src, SizeOfSlot * count);
|
2327
1849
|
}
|
2328
1850
|
|
2329
|
-
//
|
2330
|
-
|
1851
|
+
// Returns a pointer to `common`. This is used to implement type erased
|
1852
|
+
// raw_hash_set::get_hash_ref_fn and raw_hash_set::get_alloc_ref_fn for the
|
1853
|
+
// empty class cases.
|
1854
|
+
void* GetRefForEmptyClass(CommonFields& common);
|
2331
1855
|
|
2332
1856
|
// Given the hash of a value not currently in the table and the first empty
|
2333
1857
|
// slot in the probe sequence, finds a viable slot index to insert it at.
|
@@ -2344,8 +1868,8 @@ const void* GetHashRefForEmptyHasher(const CommonFields& common);
|
|
2344
1868
|
// REQUIRES: Table is not SOO.
|
2345
1869
|
// REQUIRES: At least one non-full slot available.
|
2346
1870
|
// REQUIRES: `target` is a valid empty position to insert.
|
2347
|
-
size_t PrepareInsertNonSoo(CommonFields& common,
|
2348
|
-
|
1871
|
+
size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy,
|
1872
|
+
size_t hash, FindInfo target);
|
2349
1873
|
|
2350
1874
|
// A SwissTable.
|
2351
1875
|
//
|
@@ -2376,9 +1900,6 @@ class raw_hash_set {
|
|
2376
1900
|
public:
|
2377
1901
|
using init_type = typename PolicyTraits::init_type;
|
2378
1902
|
using key_type = typename PolicyTraits::key_type;
|
2379
|
-
// TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
|
2380
|
-
// code fixes!
|
2381
|
-
using slot_type = typename PolicyTraits::slot_type;
|
2382
1903
|
using allocator_type = Alloc;
|
2383
1904
|
using size_type = size_t;
|
2384
1905
|
using difference_type = ptrdiff_t;
|
@@ -2393,6 +1914,7 @@ class raw_hash_set {
|
|
2393
1914
|
using const_pointer = typename absl::allocator_traits<
|
2394
1915
|
allocator_type>::template rebind_traits<value_type>::const_pointer;
|
2395
1916
|
|
1917
|
+
private:
|
2396
1918
|
// Alias used for heterogeneous lookup functions.
|
2397
1919
|
// `key_arg<K>` evaluates to `K` when the functors are transparent and to
|
2398
1920
|
// `key_type` otherwise. It permits template argument deduction on `K` for the
|
@@ -2400,7 +1922,8 @@ class raw_hash_set {
|
|
2400
1922
|
template <class K>
|
2401
1923
|
using key_arg = typename KeyArgImpl::template type<K, key_type>;
|
2402
1924
|
|
2403
|
-
|
1925
|
+
using slot_type = typename PolicyTraits::slot_type;
|
1926
|
+
|
2404
1927
|
// TODO(b/289225379): we could add extra SOO space inside raw_hash_set
|
2405
1928
|
// after CommonFields to allow inlining larger slot_types (e.g. std::string),
|
2406
1929
|
// but it's a bit complicated if we want to support incomplete mapped_type in
|
@@ -2650,18 +2173,15 @@ class raw_hash_set {
|
|
2650
2173
|
std::is_nothrow_default_constructible<key_equal>::value &&
|
2651
2174
|
std::is_nothrow_default_constructible<allocator_type>::value) {}
|
2652
2175
|
|
2653
|
-
|
2176
|
+
explicit raw_hash_set(
|
2654
2177
|
size_t bucket_count, const hasher& hash = hasher(),
|
2655
2178
|
const key_equal& eq = key_equal(),
|
2656
2179
|
const allocator_type& alloc = allocator_type())
|
2657
2180
|
: settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
|
2658
2181
|
alloc) {
|
2659
2182
|
if (bucket_count > DefaultCapacity()) {
|
2660
|
-
|
2661
|
-
|
2662
|
-
HashTableSizeOverflow();
|
2663
|
-
}
|
2664
|
-
resize(NormalizeCapacity(bucket_count));
|
2183
|
+
ReserveEmptyNonAllocatedTableToFitBucketCount(
|
2184
|
+
common(), GetPolicyFunctions(), bucket_count);
|
2665
2185
|
}
|
2666
2186
|
}
|
2667
2187
|
|
@@ -2762,74 +2282,20 @@ class raw_hash_set {
|
|
2762
2282
|
|
2763
2283
|
raw_hash_set(const raw_hash_set& that)
|
2764
2284
|
: raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
|
2765
|
-
that.
|
2285
|
+
allocator_type(that.char_alloc_ref()))) {}
|
2766
2286
|
|
2767
2287
|
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
|
2768
|
-
: raw_hash_set(
|
2769
|
-
that.eq_ref(), a) {
|
2288
|
+
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
|
2770
2289
|
that.AssertNotDebugCapacity();
|
2771
|
-
|
2772
|
-
|
2773
|
-
|
2774
|
-
|
2775
|
-
|
2776
|
-
|
2777
|
-
|
2778
|
-
|
2779
|
-
|
2780
|
-
emplace_at(soo_iterator(), *that.begin());
|
2781
|
-
const HashtablezInfoHandle infoz = try_sample_soo();
|
2782
|
-
if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
|
2783
|
-
return;
|
2784
|
-
}
|
2785
|
-
ABSL_SWISSTABLE_ASSERT(!that.is_soo());
|
2786
|
-
const size_t cap = capacity();
|
2787
|
-
// Note about single group tables:
|
2788
|
-
// 1. It is correct to have any order of elements.
|
2789
|
-
// 2. Order has to be non deterministic.
|
2790
|
-
// 3. We are assigning elements with arbitrary `shift` starting from
|
2791
|
-
// `capacity + shift` position.
|
2792
|
-
// 4. `shift` must be coprime with `capacity + 1` in order to be able to use
|
2793
|
-
// modular arithmetic to traverse all positions, instead if cycling
|
2794
|
-
// through a subset of positions. Odd numbers are coprime with any
|
2795
|
-
// `capacity + 1` (2^N).
|
2796
|
-
size_t offset = cap;
|
2797
|
-
const size_t shift =
|
2798
|
-
is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
|
2799
|
-
IterateOverFullSlots(
|
2800
|
-
that.common(), that.slot_array(),
|
2801
|
-
[&](const ctrl_t* that_ctrl,
|
2802
|
-
slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
|
2803
|
-
if (shift == 0) {
|
2804
|
-
// Big tables case. Position must be searched via probing.
|
2805
|
-
// The table is guaranteed to be empty, so we can do faster than
|
2806
|
-
// a full `insert`.
|
2807
|
-
const size_t hash = PolicyTraits::apply(
|
2808
|
-
HashElement{hash_ref()}, PolicyTraits::element(that_slot));
|
2809
|
-
FindInfo target = find_first_non_full_outofline(common(), hash);
|
2810
|
-
infoz().RecordInsert(hash, target.probe_length);
|
2811
|
-
offset = target.offset;
|
2812
|
-
} else {
|
2813
|
-
// Small tables case. Next position is computed via shift.
|
2814
|
-
offset = (offset + shift) & cap;
|
2815
|
-
}
|
2816
|
-
const h2_t h2 = static_cast<h2_t>(*that_ctrl);
|
2817
|
-
ABSL_SWISSTABLE_ASSERT( // We rely that hash is not changed for small
|
2818
|
-
// tables.
|
2819
|
-
H2(PolicyTraits::apply(HashElement{hash_ref()},
|
2820
|
-
PolicyTraits::element(that_slot))) == h2 &&
|
2821
|
-
"hash function value changed unexpectedly during the copy");
|
2822
|
-
SetCtrl(common(), offset, h2, sizeof(slot_type));
|
2823
|
-
emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
|
2824
|
-
common().maybe_increment_generation_on_insert();
|
2825
|
-
});
|
2826
|
-
if (shift != 0) {
|
2827
|
-
// On small table copy we do not record individual inserts.
|
2828
|
-
// RecordInsert requires hash, but it is unknown for small tables.
|
2829
|
-
infoz().RecordStorageChanged(size, cap);
|
2830
|
-
}
|
2831
|
-
common().set_size(size);
|
2832
|
-
growth_info().OverwriteManyEmptyAsFull(size);
|
2290
|
+
if (that.empty()) return;
|
2291
|
+
Copy(common(), GetPolicyFunctions(), that.common(),
|
2292
|
+
[this](void* dst, const void* src) {
|
2293
|
+
// TODO(b/413598253): type erase for trivially copyable types via
|
2294
|
+
// PolicyTraits.
|
2295
|
+
construct(to_slot(dst),
|
2296
|
+
PolicyTraits::element(
|
2297
|
+
static_cast<slot_type*>(const_cast<void*>(src))));
|
2298
|
+
});
|
2833
2299
|
}
|
2834
2300
|
|
2835
2301
|
ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
|
@@ -2843,7 +2309,7 @@ class raw_hash_set {
|
|
2843
2309
|
settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
|
2844
2310
|
? std::move(that.common())
|
2845
2311
|
: CommonFields{full_soo_tag_t{}},
|
2846
|
-
that.hash_ref(), that.eq_ref(), that.
|
2312
|
+
that.hash_ref(), that.eq_ref(), that.char_alloc_ref()) {
|
2847
2313
|
if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
|
2848
2314
|
transfer(soo_slot(), that.soo_slot());
|
2849
2315
|
}
|
@@ -2854,7 +2320,7 @@ class raw_hash_set {
|
|
2854
2320
|
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
|
2855
2321
|
: settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
|
2856
2322
|
that.eq_ref(), a) {
|
2857
|
-
if (a == that.
|
2323
|
+
if (CharAlloc(a) == that.char_alloc_ref()) {
|
2858
2324
|
swap_common(that);
|
2859
2325
|
annotate_for_bug_detection_on_move(that);
|
2860
2326
|
} else {
|
@@ -2871,7 +2337,9 @@ class raw_hash_set {
|
|
2871
2337
|
// is an exact match for that.size(). If this->capacity() is too big, then
|
2872
2338
|
// it would make iteration very slow to reuse the allocation. Maybe we can
|
2873
2339
|
// do the same heuristic as clear() and reuse if it's small enough.
|
2874
|
-
|
2340
|
+
allocator_type alloc(propagate_alloc ? that.char_alloc_ref()
|
2341
|
+
: char_alloc_ref());
|
2342
|
+
raw_hash_set tmp(that, alloc);
|
2875
2343
|
// NOLINTNEXTLINE: not returning *this for performance.
|
2876
2344
|
return assign_impl<propagate_alloc>(std::move(tmp));
|
2877
2345
|
}
|
@@ -2890,14 +2358,14 @@ class raw_hash_set {
|
|
2890
2358
|
|
2891
2359
|
~raw_hash_set() {
|
2892
2360
|
destructor_impl();
|
2893
|
-
|
2894
|
-
|
2895
|
-
|
2361
|
+
if constexpr (SwisstableAssertAccessToDestroyedTable()) {
|
2362
|
+
common().set_capacity(InvalidCapacity::kDestroyed);
|
2363
|
+
}
|
2896
2364
|
}
|
2897
2365
|
|
2898
2366
|
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
2899
2367
|
if (ABSL_PREDICT_FALSE(empty())) return end();
|
2900
|
-
if (
|
2368
|
+
if (capacity() == 1) return single_iterator();
|
2901
2369
|
iterator it = {control(), common().slots_union(),
|
2902
2370
|
common().generation_ptr()};
|
2903
2371
|
it.skip_empty_or_deleted();
|
@@ -2933,9 +2401,7 @@ class raw_hash_set {
|
|
2933
2401
|
ABSL_ASSUME(cap >= kDefaultCapacity);
|
2934
2402
|
return cap;
|
2935
2403
|
}
|
2936
|
-
size_t max_size() const {
|
2937
|
-
return CapacityToGrowth(MaxValidCapacity<sizeof(slot_type)>());
|
2938
|
-
}
|
2404
|
+
size_t max_size() const { return MaxValidSize(sizeof(slot_type)); }
|
2939
2405
|
|
2940
2406
|
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
|
2941
2407
|
if (SwisstableGenerationsEnabled() &&
|
@@ -2958,8 +2424,7 @@ class raw_hash_set {
|
|
2958
2424
|
common().set_empty_soo();
|
2959
2425
|
} else {
|
2960
2426
|
destroy_slots();
|
2961
|
-
|
2962
|
-
SooEnabled());
|
2427
|
+
clear_backing_array(/*reuse=*/cap < 128);
|
2963
2428
|
}
|
2964
2429
|
common().set_reserved_growth(0);
|
2965
2430
|
common().set_reservation_size(0);
|
@@ -2971,15 +2436,15 @@ class raw_hash_set {
|
|
2971
2436
|
// flat_hash_map<std::string, int> m;
|
2972
2437
|
// m.insert(std::make_pair("abc", 42));
|
2973
2438
|
template <class T,
|
2974
|
-
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
2975
|
-
|
2976
|
-
|
2977
|
-
|
2439
|
+
int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
2440
|
+
IsNotBitField<T>::value &&
|
2441
|
+
!IsLifetimeBoundAssignmentFrom<T>::value,
|
2442
|
+
int>()>
|
2978
2443
|
std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
2979
2444
|
return emplace(std::forward<T>(value));
|
2980
2445
|
}
|
2981
2446
|
|
2982
|
-
template <class T,
|
2447
|
+
template <class T, int&...,
|
2983
2448
|
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
2984
2449
|
IsNotBitField<T>::value &&
|
2985
2450
|
IsLifetimeBoundAssignmentFrom<T>::value,
|
@@ -2987,7 +2452,7 @@ class raw_hash_set {
|
|
2987
2452
|
std::pair<iterator, bool> insert(
|
2988
2453
|
T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
|
2989
2454
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
2990
|
-
return
|
2455
|
+
return this->template insert<T, 0>(std::forward<T>(value));
|
2991
2456
|
}
|
2992
2457
|
|
2993
2458
|
// This overload kicks in when the argument is a bitfield or an lvalue of
|
@@ -3001,22 +2466,22 @@ class raw_hash_set {
|
|
3001
2466
|
// const char* p = "hello";
|
3002
2467
|
// s.insert(p);
|
3003
2468
|
//
|
3004
|
-
template <class T, std::enable_if_t<
|
2469
|
+
template <class T, int = std::enable_if_t<
|
3005
2470
|
IsDecomposableAndInsertable<const T&>::value &&
|
3006
2471
|
!IsLifetimeBoundAssignmentFrom<const T&>::value,
|
3007
|
-
int>
|
2472
|
+
int>()>
|
3008
2473
|
std::pair<iterator, bool> insert(const T& value)
|
3009
2474
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
3010
2475
|
return emplace(value);
|
3011
2476
|
}
|
3012
|
-
template <class T,
|
2477
|
+
template <class T, int&...,
|
3013
2478
|
std::enable_if_t<IsDecomposableAndInsertable<const T&>::value &&
|
3014
2479
|
IsLifetimeBoundAssignmentFrom<const T&>::value,
|
3015
2480
|
int> = 0>
|
3016
2481
|
std::pair<iterator, bool> insert(
|
3017
2482
|
const T& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
|
3018
2483
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
3019
|
-
return
|
2484
|
+
return this->template insert<T, 0>(value);
|
3020
2485
|
}
|
3021
2486
|
|
3022
2487
|
// This overload kicks in when the argument is an rvalue of init_type. Its
|
@@ -3043,21 +2508,22 @@ class raw_hash_set {
|
|
3043
2508
|
#endif
|
3044
2509
|
|
3045
2510
|
template <class T,
|
3046
|
-
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
3047
|
-
|
3048
|
-
|
3049
|
-
|
2511
|
+
int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
2512
|
+
IsNotBitField<T>::value &&
|
2513
|
+
!IsLifetimeBoundAssignmentFrom<T>::value,
|
2514
|
+
int>()>
|
3050
2515
|
iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
3051
2516
|
return insert(std::forward<T>(value)).first;
|
3052
2517
|
}
|
3053
|
-
template <class T,
|
2518
|
+
template <class T, int&...,
|
3054
2519
|
std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
|
3055
2520
|
IsNotBitField<T>::value &&
|
3056
2521
|
IsLifetimeBoundAssignmentFrom<T>::value,
|
3057
2522
|
int> = 0>
|
3058
|
-
iterator insert(const_iterator,
|
3059
|
-
|
3060
|
-
|
2523
|
+
iterator insert(const_iterator hint,
|
2524
|
+
T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
|
2525
|
+
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
2526
|
+
return this->template insert<T, 0>(hint, std::forward<T>(value));
|
3061
2527
|
}
|
3062
2528
|
|
3063
2529
|
template <class T, std::enable_if_t<
|
@@ -3198,7 +2664,8 @@ class raw_hash_set {
|
|
3198
2664
|
auto res = find_or_prepare_insert(key);
|
3199
2665
|
if (res.second) {
|
3200
2666
|
slot_type* slot = res.first.slot();
|
3201
|
-
|
2667
|
+
allocator_type alloc(char_alloc_ref());
|
2668
|
+
std::forward<F>(f)(constructor(&alloc, &slot));
|
3202
2669
|
ABSL_SWISSTABLE_ASSERT(!slot);
|
3203
2670
|
}
|
3204
2671
|
return res.first;
|
@@ -3243,7 +2710,7 @@ class raw_hash_set {
|
|
3243
2710
|
iterator erase(const_iterator first,
|
3244
2711
|
const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
3245
2712
|
AssertNotDebugCapacity();
|
3246
|
-
// We check for empty first because
|
2713
|
+
// We check for empty first because clear_backing_array requires that
|
3247
2714
|
// capacity() > 0 as a precondition.
|
3248
2715
|
if (empty()) return end();
|
3249
2716
|
if (first == last) return last.inner_;
|
@@ -3254,11 +2721,10 @@ class raw_hash_set {
|
|
3254
2721
|
}
|
3255
2722
|
if (first == begin() && last == end()) {
|
3256
2723
|
// TODO(ezb): we access control bytes in destroy_slots so it could make
|
3257
|
-
// sense to combine destroy_slots and
|
2724
|
+
// sense to combine destroy_slots and clear_backing_array to avoid cache
|
3258
2725
|
// misses when the table is large. Note that we also do this in clear().
|
3259
2726
|
destroy_slots();
|
3260
|
-
|
3261
|
-
SooEnabled());
|
2727
|
+
clear_backing_array(/*reuse=*/true);
|
3262
2728
|
common().set_reserved_growth(common().reservation_size());
|
3263
2729
|
return end();
|
3264
2730
|
}
|
@@ -3303,7 +2769,8 @@ class raw_hash_set {
|
|
3303
2769
|
AssertNotDebugCapacity();
|
3304
2770
|
AssertIsFull(position.control(), position.inner_.generation(),
|
3305
2771
|
position.inner_.generation_ptr(), "extract()");
|
3306
|
-
|
2772
|
+
allocator_type alloc(char_alloc_ref());
|
2773
|
+
auto node = CommonAccess::Transfer<node_type>(alloc, position.slot());
|
3307
2774
|
if (is_soo()) {
|
3308
2775
|
common().set_empty_soo();
|
3309
2776
|
} else {
|
@@ -3329,73 +2796,16 @@ class raw_hash_set {
|
|
3329
2796
|
swap_common(that);
|
3330
2797
|
swap(hash_ref(), that.hash_ref());
|
3331
2798
|
swap(eq_ref(), that.eq_ref());
|
3332
|
-
SwapAlloc(
|
2799
|
+
SwapAlloc(char_alloc_ref(), that.char_alloc_ref(),
|
3333
2800
|
typename AllocTraits::propagate_on_container_swap{});
|
3334
2801
|
}
|
3335
2802
|
|
3336
|
-
void rehash(size_t n) {
|
3337
|
-
const size_t cap = capacity();
|
3338
|
-
if (n == 0) {
|
3339
|
-
if (cap == 0 || is_soo()) return;
|
3340
|
-
if (empty()) {
|
3341
|
-
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
|
3342
|
-
SooEnabled());
|
3343
|
-
return;
|
3344
|
-
}
|
3345
|
-
if (fits_in_soo(size())) {
|
3346
|
-
// When the table is already sampled, we keep it sampled.
|
3347
|
-
if (infoz().IsSampled()) {
|
3348
|
-
const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
|
3349
|
-
if (capacity() > kInitialSampledCapacity) {
|
3350
|
-
resize(kInitialSampledCapacity);
|
3351
|
-
}
|
3352
|
-
// This asserts that we didn't lose sampling coverage in `resize`.
|
3353
|
-
ABSL_SWISSTABLE_ASSERT(infoz().IsSampled());
|
3354
|
-
return;
|
3355
|
-
}
|
3356
|
-
alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
|
3357
|
-
slot_type* tmp_slot = to_slot(slot_space);
|
3358
|
-
transfer(tmp_slot, begin().slot());
|
3359
|
-
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
|
3360
|
-
SooEnabled());
|
3361
|
-
transfer(soo_slot(), tmp_slot);
|
3362
|
-
common().set_full_soo();
|
3363
|
-
return;
|
3364
|
-
}
|
3365
|
-
}
|
3366
|
-
|
3367
|
-
// bitor is a faster way of doing `max` here. We will round up to the next
|
3368
|
-
// power-of-2-minus-1, so bitor is good enough.
|
3369
|
-
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
|
3370
|
-
// n == 0 unconditionally rehashes as per the standard.
|
3371
|
-
if (n == 0 || m > cap) {
|
3372
|
-
if (ABSL_PREDICT_FALSE(m > MaxValidCapacity<sizeof(slot_type)>())) {
|
3373
|
-
HashTableSizeOverflow();
|
3374
|
-
}
|
3375
|
-
resize(m);
|
3376
|
-
|
3377
|
-
// This is after resize, to ensure that we have completed the allocation
|
3378
|
-
// and have potentially sampled the hashtable.
|
3379
|
-
infoz().RecordReservation(n);
|
3380
|
-
}
|
3381
|
-
}
|
2803
|
+
void rehash(size_t n) { Rehash(common(), GetPolicyFunctions(), n); }
|
3382
2804
|
|
3383
2805
|
void reserve(size_t n) {
|
3384
|
-
|
3385
|
-
|
3386
|
-
if (n > max_size_before_growth) {
|
3387
|
-
if (ABSL_PREDICT_FALSE(n > max_size())) {
|
3388
|
-
HashTableSizeOverflow();
|
3389
|
-
}
|
3390
|
-
size_t m = GrowthToLowerboundCapacity(n);
|
3391
|
-
resize(NormalizeCapacity(m));
|
3392
|
-
|
3393
|
-
// This is after resize, to ensure that we have completed the allocation
|
3394
|
-
// and have potentially sampled the hashtable.
|
3395
|
-
infoz().RecordReservation(n);
|
2806
|
+
if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
|
2807
|
+
ReserveTableToFitNewSize(common(), GetPolicyFunctions(), n);
|
3396
2808
|
}
|
3397
|
-
common().reset_reserved_growth(n);
|
3398
|
-
common().set_reservation_size(n);
|
3399
2809
|
}
|
3400
2810
|
|
3401
2811
|
// Extension API: support for heterogeneous keys.
|
@@ -3424,7 +2834,7 @@ class raw_hash_set {
|
|
3424
2834
|
// Avoid probing if we won't be able to prefetch the addresses received.
|
3425
2835
|
#ifdef ABSL_HAVE_PREFETCH
|
3426
2836
|
prefetch_heap_block();
|
3427
|
-
auto seq = probe(common(),
|
2837
|
+
auto seq = probe(common(), hash_of(key));
|
3428
2838
|
PrefetchToLocalCache(control() + seq.offset());
|
3429
2839
|
PrefetchToLocalCache(slot_array() + seq.offset());
|
3430
2840
|
#endif // ABSL_HAVE_PREFETCH
|
@@ -3441,9 +2851,9 @@ class raw_hash_set {
|
|
3441
2851
|
template <class K = key_type>
|
3442
2852
|
iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
3443
2853
|
AssertOnFind(key);
|
3444
|
-
if (
|
2854
|
+
if (capacity() <= 1) return find_small(key);
|
3445
2855
|
prefetch_heap_block();
|
3446
|
-
return
|
2856
|
+
return find_large(key, hash_of(key));
|
3447
2857
|
}
|
3448
2858
|
|
3449
2859
|
template <class K = key_type>
|
@@ -3493,7 +2903,9 @@ class raw_hash_set {
|
|
3493
2903
|
|
3494
2904
|
hasher hash_function() const { return hash_ref(); }
|
3495
2905
|
key_equal key_eq() const { return eq_ref(); }
|
3496
|
-
allocator_type get_allocator() const {
|
2906
|
+
allocator_type get_allocator() const {
|
2907
|
+
return allocator_type(char_alloc_ref());
|
2908
|
+
}
|
3497
2909
|
|
3498
2910
|
friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
|
3499
2911
|
if (a.size() != b.size()) return false;
|
@@ -3525,7 +2937,7 @@ class raw_hash_set {
|
|
3525
2937
|
H>::type
|
3526
2938
|
AbslHashValue(H h, const raw_hash_set& s) {
|
3527
2939
|
return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
|
3528
|
-
s.size());
|
2940
|
+
hash_internal::WeaklyMixedInteger{s.size()});
|
3529
2941
|
}
|
3530
2942
|
|
3531
2943
|
friend void swap(raw_hash_set& a,
|
@@ -3560,7 +2972,7 @@ class raw_hash_set {
|
|
3560
2972
|
struct EqualElement {
|
3561
2973
|
template <class K2, class... Args>
|
3562
2974
|
bool operator()(const K2& lhs, Args&&...) const {
|
3563
|
-
|
2975
|
+
ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs));
|
3564
2976
|
}
|
3565
2977
|
const K1& rhs;
|
3566
2978
|
const key_equal& eq;
|
@@ -3598,37 +3010,48 @@ class raw_hash_set {
|
|
3598
3010
|
template <typename... Args>
|
3599
3011
|
inline void construct(slot_type* slot, Args&&... args) {
|
3600
3012
|
common().RunWithReentrancyGuard([&] {
|
3601
|
-
|
3013
|
+
allocator_type alloc(char_alloc_ref());
|
3014
|
+
PolicyTraits::construct(&alloc, slot, std::forward<Args>(args)...);
|
3602
3015
|
});
|
3603
3016
|
}
|
3604
3017
|
inline void destroy(slot_type* slot) {
|
3605
|
-
common().RunWithReentrancyGuard(
|
3606
|
-
|
3018
|
+
common().RunWithReentrancyGuard([&] {
|
3019
|
+
allocator_type alloc(char_alloc_ref());
|
3020
|
+
PolicyTraits::destroy(&alloc, slot);
|
3021
|
+
});
|
3607
3022
|
}
|
3608
3023
|
inline void transfer(slot_type* to, slot_type* from) {
|
3609
|
-
common().RunWithReentrancyGuard(
|
3610
|
-
|
3024
|
+
common().RunWithReentrancyGuard([&] {
|
3025
|
+
allocator_type alloc(char_alloc_ref());
|
3026
|
+
PolicyTraits::transfer(&alloc, to, from);
|
3027
|
+
});
|
3611
3028
|
}
|
3612
3029
|
|
3613
3030
|
// TODO(b/289225379): consider having a helper class that has the impls for
|
3614
3031
|
// SOO functionality.
|
3615
3032
|
template <class K = key_type>
|
3616
|
-
iterator
|
3617
|
-
ABSL_SWISSTABLE_ASSERT(
|
3618
|
-
return empty() || !PolicyTraits::apply(
|
3619
|
-
|
3033
|
+
iterator find_small(const key_arg<K>& key) {
|
3034
|
+
ABSL_SWISSTABLE_ASSERT(capacity() <= 1);
|
3035
|
+
return empty() || !PolicyTraits::apply(
|
3036
|
+
EqualElement<K>{key, eq_ref()},
|
3037
|
+
PolicyTraits::element(single_slot()))
|
3620
3038
|
? end()
|
3621
|
-
:
|
3039
|
+
: single_iterator();
|
3622
3040
|
}
|
3623
3041
|
|
3624
3042
|
template <class K = key_type>
|
3625
|
-
iterator
|
3043
|
+
iterator find_large(const key_arg<K>& key, size_t hash) {
|
3044
|
+
ABSL_SWISSTABLE_ASSERT(capacity() > 1);
|
3626
3045
|
ABSL_SWISSTABLE_ASSERT(!is_soo());
|
3627
3046
|
auto seq = probe(common(), hash);
|
3047
|
+
const h2_t h2 = H2(hash);
|
3628
3048
|
const ctrl_t* ctrl = control();
|
3629
3049
|
while (true) {
|
3050
|
+
#ifndef ABSL_HAVE_MEMORY_SANITIZER
|
3051
|
+
absl::PrefetchToLocalCache(slot_array() + seq.offset());
|
3052
|
+
#endif
|
3630
3053
|
Group g{ctrl + seq.offset()};
|
3631
|
-
for (uint32_t i : g.Match(
|
3054
|
+
for (uint32_t i : g.Match(h2)) {
|
3632
3055
|
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
|
3633
3056
|
EqualElement<K>{key, eq_ref()},
|
3634
3057
|
PolicyTraits::element(slot_array() + seq.offset(i)))))
|
@@ -3640,36 +3063,49 @@ class raw_hash_set {
|
|
3640
3063
|
}
|
3641
3064
|
}
|
3642
3065
|
|
3643
|
-
//
|
3644
|
-
// insertion into an empty SOO table and in copy
|
3645
|
-
// can fit in SOO capacity.
|
3646
|
-
|
3066
|
+
// Returns true if the table needs to be sampled.
|
3067
|
+
// This should be called on insertion into an empty SOO table and in copy
|
3068
|
+
// construction when the size can fit in SOO capacity.
|
3069
|
+
bool should_sample_soo() const {
|
3647
3070
|
ABSL_SWISSTABLE_ASSERT(is_soo());
|
3648
|
-
if (!
|
3649
|
-
return
|
3650
|
-
|
3071
|
+
if (!ShouldSampleHashtablezInfoForAlloc<CharAlloc>()) return false;
|
3072
|
+
return ABSL_PREDICT_FALSE(ShouldSampleNextTable());
|
3073
|
+
}
|
3074
|
+
|
3075
|
+
void clear_backing_array(bool reuse) {
|
3076
|
+
ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
|
3077
|
+
ClearBackingArray(common(), GetPolicyFunctions(), &char_alloc_ref(), reuse,
|
3078
|
+
SooEnabled());
|
3651
3079
|
}
|
3652
3080
|
|
3653
|
-
|
3081
|
+
void destroy_slots() {
|
3654
3082
|
ABSL_SWISSTABLE_ASSERT(!is_soo());
|
3655
3083
|
if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
|
3656
|
-
|
3657
|
-
|
3658
|
-
|
3659
|
-
|
3084
|
+
auto destroy_slot = [&](const ctrl_t*, void* slot) {
|
3085
|
+
this->destroy(static_cast<slot_type*>(slot));
|
3086
|
+
};
|
3087
|
+
if constexpr (SwisstableAssertAccessToDestroyedTable()) {
|
3088
|
+
CommonFields common_copy(non_soo_tag_t{}, this->common());
|
3089
|
+
common().set_capacity(InvalidCapacity::kDestroyed);
|
3090
|
+
IterateOverFullSlots(common_copy, sizeof(slot_type), destroy_slot);
|
3091
|
+
common().set_capacity(common_copy.capacity());
|
3092
|
+
} else {
|
3093
|
+
IterateOverFullSlots(common(), sizeof(slot_type), destroy_slot);
|
3094
|
+
}
|
3660
3095
|
}
|
3661
3096
|
|
3662
|
-
|
3663
|
-
ABSL_SWISSTABLE_ASSERT(capacity()
|
3097
|
+
void dealloc() {
|
3098
|
+
ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
|
3664
3099
|
// Unpoison before returning the memory to the allocator.
|
3665
3100
|
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
|
3666
3101
|
infoz().Unregister();
|
3667
|
-
|
3668
|
-
|
3669
|
-
|
3102
|
+
DeallocateBackingArray<BackingArrayAlignment(alignof(slot_type)),
|
3103
|
+
CharAlloc>(&char_alloc_ref(), capacity(), control(),
|
3104
|
+
sizeof(slot_type), alignof(slot_type),
|
3105
|
+
common().has_infoz());
|
3670
3106
|
}
|
3671
3107
|
|
3672
|
-
|
3108
|
+
void destructor_impl() {
|
3673
3109
|
if (SwisstableGenerationsEnabled() &&
|
3674
3110
|
capacity() >= InvalidCapacity::kMovedFrom) {
|
3675
3111
|
return;
|
@@ -3695,128 +3131,21 @@ class raw_hash_set {
|
|
3695
3131
|
sizeof(slot_type));
|
3696
3132
|
}
|
3697
3133
|
|
3134
|
+
template <class K>
|
3135
|
+
size_t hash_of(const K& key) const {
|
3136
|
+
return hash_ref()(key);
|
3137
|
+
}
|
3698
3138
|
size_t hash_of(slot_type* slot) const {
|
3699
3139
|
return PolicyTraits::apply(HashElement{hash_ref()},
|
3700
3140
|
PolicyTraits::element(slot));
|
3701
3141
|
}
|
3702
3142
|
|
3703
|
-
// Resizes table to the new capacity and move all elements to the new
|
3704
|
-
// positions accordingly.
|
3705
|
-
//
|
3706
|
-
// Note that for better performance instead of
|
3707
|
-
// find_first_non_full(common(), hash),
|
3708
|
-
// HashSetResizeHelper::FindFirstNonFullAfterResize(
|
3709
|
-
// common(), old_capacity, hash)
|
3710
|
-
// can be called right after `resize`.
|
3711
|
-
void resize(size_t new_capacity) {
|
3712
|
-
raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
|
3713
|
-
}
|
3714
|
-
|
3715
|
-
// As above, except that we also accept a pre-sampled, forced infoz for
|
3716
|
-
// SOO tables, since they need to switch from SOO to heap in order to
|
3717
|
-
// store the infoz.
|
3718
|
-
void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
|
3719
|
-
ABSL_SWISSTABLE_ASSERT(forced_infoz.IsSampled());
|
3720
|
-
raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
|
3721
|
-
forced_infoz);
|
3722
|
-
}
|
3723
|
-
|
3724
|
-
// Resizes set to the new capacity.
|
3725
|
-
// It is a static function in order to use its pointer in GetPolicyFunctions.
|
3726
|
-
ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
|
3727
|
-
CommonFields& common, size_t new_capacity,
|
3728
|
-
HashtablezInfoHandle forced_infoz) {
|
3729
|
-
raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
|
3730
|
-
ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
|
3731
|
-
ABSL_SWISSTABLE_ASSERT(!set->fits_in_soo(new_capacity));
|
3732
|
-
const bool was_soo = set->is_soo();
|
3733
|
-
const bool had_soo_slot = was_soo && !set->empty();
|
3734
|
-
const ctrl_t soo_slot_h2 =
|
3735
|
-
had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
|
3736
|
-
: ctrl_t::kEmpty;
|
3737
|
-
HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
|
3738
|
-
forced_infoz);
|
3739
|
-
// Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
|
3740
|
-
// HashSetResizeHelper constructor because it can't transfer slots when
|
3741
|
-
// transfer_uses_memcpy is false.
|
3742
|
-
// TODO(b/289225379): try to handle more of the SOO cases inside
|
3743
|
-
// InitializeSlots. See comment on cl/555990034 snapshot #63.
|
3744
|
-
if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
|
3745
|
-
resize_helper.old_heap_or_soo() = common.heap_or_soo();
|
3746
|
-
} else {
|
3747
|
-
set->transfer(set->to_slot(resize_helper.old_soo_data()),
|
3748
|
-
set->soo_slot());
|
3749
|
-
}
|
3750
|
-
common.set_capacity(new_capacity);
|
3751
|
-
// Note that `InitializeSlots` does different number initialization steps
|
3752
|
-
// depending on the values of `transfer_uses_memcpy` and capacities.
|
3753
|
-
// Refer to the comment in `InitializeSlots` for more details.
|
3754
|
-
const bool grow_single_group =
|
3755
|
-
resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
|
3756
|
-
PolicyTraits::transfer_uses_memcpy(),
|
3757
|
-
SooEnabled(), alignof(slot_type)>(
|
3758
|
-
common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
|
3759
|
-
sizeof(value_type));
|
3760
|
-
|
3761
|
-
// In the SooEnabled() case, capacity is never 0 so we don't check.
|
3762
|
-
if (!SooEnabled() && resize_helper.old_capacity() == 0) {
|
3763
|
-
// InitializeSlots did all the work including infoz().RecordRehash().
|
3764
|
-
return;
|
3765
|
-
}
|
3766
|
-
ABSL_SWISSTABLE_ASSERT(resize_helper.old_capacity() > 0);
|
3767
|
-
// Nothing more to do in this case.
|
3768
|
-
if (was_soo && !had_soo_slot) return;
|
3769
|
-
|
3770
|
-
slot_type* new_slots = set->slot_array();
|
3771
|
-
if (grow_single_group) {
|
3772
|
-
if (PolicyTraits::transfer_uses_memcpy()) {
|
3773
|
-
// InitializeSlots did all the work.
|
3774
|
-
return;
|
3775
|
-
}
|
3776
|
-
if (was_soo) {
|
3777
|
-
set->transfer(new_slots + resize_helper.SooSlotIndex(),
|
3778
|
-
to_slot(resize_helper.old_soo_data()));
|
3779
|
-
return;
|
3780
|
-
} else {
|
3781
|
-
// We want GrowSizeIntoSingleGroup to be called here in order to make
|
3782
|
-
// InitializeSlots not depend on PolicyTraits.
|
3783
|
-
resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
|
3784
|
-
set->alloc_ref());
|
3785
|
-
}
|
3786
|
-
} else {
|
3787
|
-
// InitializeSlots prepares control bytes to correspond to empty table.
|
3788
|
-
const auto insert_slot = [&](slot_type* slot) {
|
3789
|
-
size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
|
3790
|
-
PolicyTraits::element(slot));
|
3791
|
-
auto target = find_first_non_full(common, hash);
|
3792
|
-
SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
|
3793
|
-
set->transfer(new_slots + target.offset, slot);
|
3794
|
-
return target.probe_length;
|
3795
|
-
};
|
3796
|
-
if (was_soo) {
|
3797
|
-
insert_slot(to_slot(resize_helper.old_soo_data()));
|
3798
|
-
return;
|
3799
|
-
} else {
|
3800
|
-
auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
|
3801
|
-
size_t total_probe_length = 0;
|
3802
|
-
for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
|
3803
|
-
if (IsFull(resize_helper.old_ctrl()[i])) {
|
3804
|
-
total_probe_length += insert_slot(old_slots + i);
|
3805
|
-
}
|
3806
|
-
}
|
3807
|
-
common.infoz().RecordRehash(total_probe_length);
|
3808
|
-
}
|
3809
|
-
}
|
3810
|
-
resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
|
3811
|
-
sizeof(slot_type));
|
3812
|
-
}
|
3813
|
-
|
3814
3143
|
// Casting directly from e.g. char* to slot_type* can cause compilation errors
|
3815
3144
|
// on objective-C. This function converts to void* first, avoiding the issue.
|
3816
3145
|
static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
|
3817
3146
|
|
3818
3147
|
// Requires that lhs does not have a full SOO slot.
|
3819
|
-
static void move_common(bool rhs_is_full_soo,
|
3148
|
+
static void move_common(bool rhs_is_full_soo, CharAlloc& rhs_alloc,
|
3820
3149
|
CommonFields& lhs, CommonFields&& rhs) {
|
3821
3150
|
if (PolicyTraits::transfer_uses_memcpy() || !rhs_is_full_soo) {
|
3822
3151
|
lhs = std::move(rhs);
|
@@ -3841,10 +3170,12 @@ class raw_hash_set {
|
|
3841
3170
|
}
|
3842
3171
|
CommonFields tmp = CommonFields(uninitialized_tag_t{});
|
3843
3172
|
const bool that_is_full_soo = that.is_full_soo();
|
3844
|
-
move_common(that_is_full_soo, that.
|
3173
|
+
move_common(that_is_full_soo, that.char_alloc_ref(), tmp,
|
3845
3174
|
std::move(that.common()));
|
3846
|
-
move_common(is_full_soo(),
|
3847
|
-
|
3175
|
+
move_common(is_full_soo(), char_alloc_ref(), that.common(),
|
3176
|
+
std::move(common()));
|
3177
|
+
move_common(that_is_full_soo, that.char_alloc_ref(), common(),
|
3178
|
+
std::move(tmp));
|
3848
3179
|
}
|
3849
3180
|
|
3850
3181
|
void annotate_for_bug_detection_on_move(
|
@@ -3862,7 +3193,8 @@ class raw_hash_set {
|
|
3862
3193
|
}
|
3863
3194
|
common().increment_generation();
|
3864
3195
|
if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
|
3865
|
-
|
3196
|
+
ResizeAllocatedTableWithSeedChange(common(), GetPolicyFunctions(),
|
3197
|
+
capacity());
|
3866
3198
|
}
|
3867
3199
|
}
|
3868
3200
|
|
@@ -3871,11 +3203,11 @@ class raw_hash_set {
|
|
3871
3203
|
// We don't bother checking for this/that aliasing. We just need to avoid
|
3872
3204
|
// breaking the invariants in that case.
|
3873
3205
|
destructor_impl();
|
3874
|
-
move_common(that.is_full_soo(), that.
|
3206
|
+
move_common(that.is_full_soo(), that.char_alloc_ref(), common(),
|
3875
3207
|
std::move(that.common()));
|
3876
3208
|
hash_ref() = that.hash_ref();
|
3877
3209
|
eq_ref() = that.eq_ref();
|
3878
|
-
CopyAlloc(
|
3210
|
+
CopyAlloc(char_alloc_ref(), that.char_alloc_ref(),
|
3879
3211
|
std::integral_constant<bool, propagate_alloc>());
|
3880
3212
|
that.common() = CommonFields::CreateDefault<SooEnabled()>();
|
3881
3213
|
annotate_for_bug_detection_on_move(that);
|
@@ -3902,7 +3234,7 @@ class raw_hash_set {
|
|
3902
3234
|
}
|
3903
3235
|
raw_hash_set& move_assign(raw_hash_set&& that,
|
3904
3236
|
std::false_type /*propagate_alloc*/) {
|
3905
|
-
if (
|
3237
|
+
if (char_alloc_ref() == that.char_alloc_ref()) {
|
3906
3238
|
return assign_impl<false>(std::move(that));
|
3907
3239
|
}
|
3908
3240
|
// Aliasing can't happen here because allocs would compare equal above.
|
@@ -3918,22 +3250,25 @@ class raw_hash_set {
|
|
3918
3250
|
|
3919
3251
|
template <class K>
|
3920
3252
|
std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
|
3253
|
+
ctrl_t soo_slot_ctrl;
|
3921
3254
|
if (empty()) {
|
3922
|
-
|
3923
|
-
if (infoz.IsSampled()) {
|
3924
|
-
resize_with_soo_infoz(infoz);
|
3925
|
-
} else {
|
3255
|
+
if (!should_sample_soo()) {
|
3926
3256
|
common().set_full_soo();
|
3927
3257
|
return {soo_iterator(), true};
|
3928
3258
|
}
|
3259
|
+
soo_slot_ctrl = ctrl_t::kEmpty;
|
3929
3260
|
} else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
|
3930
3261
|
PolicyTraits::element(soo_slot()))) {
|
3931
3262
|
return {soo_iterator(), false};
|
3932
3263
|
} else {
|
3933
|
-
|
3934
|
-
}
|
3935
|
-
|
3936
|
-
|
3264
|
+
soo_slot_ctrl = static_cast<ctrl_t>(H2(hash_of(soo_slot())));
|
3265
|
+
}
|
3266
|
+
constexpr bool kUseMemcpy =
|
3267
|
+
PolicyTraits::transfer_uses_memcpy() && SooEnabled();
|
3268
|
+
size_t index = GrowSooTableToNextCapacityAndPrepareInsert<
|
3269
|
+
kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0,
|
3270
|
+
kUseMemcpy>(common(), GetPolicyFunctions(), hash_of(key),
|
3271
|
+
soo_slot_ctrl);
|
3937
3272
|
return {iterator_at(index), true};
|
3938
3273
|
}
|
3939
3274
|
|
@@ -3941,12 +3276,16 @@ class raw_hash_set {
|
|
3941
3276
|
std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
|
3942
3277
|
ABSL_SWISSTABLE_ASSERT(!is_soo());
|
3943
3278
|
prefetch_heap_block();
|
3944
|
-
|
3279
|
+
const size_t hash = hash_of(key);
|
3945
3280
|
auto seq = probe(common(), hash);
|
3281
|
+
const h2_t h2 = H2(hash);
|
3946
3282
|
const ctrl_t* ctrl = control();
|
3947
3283
|
while (true) {
|
3284
|
+
#ifndef ABSL_HAVE_MEMORY_SANITIZER
|
3285
|
+
absl::PrefetchToLocalCache(slot_array() + seq.offset());
|
3286
|
+
#endif
|
3948
3287
|
Group g{ctrl + seq.offset()};
|
3949
|
-
for (uint32_t i : g.Match(
|
3288
|
+
for (uint32_t i : g.Match(h2)) {
|
3950
3289
|
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
|
3951
3290
|
EqualElement<K>{key, eq_ref()},
|
3952
3291
|
PolicyTraits::element(slot_array() + seq.offset(i)))))
|
@@ -3954,11 +3293,10 @@ class raw_hash_set {
|
|
3954
3293
|
}
|
3955
3294
|
auto mask_empty = g.MaskEmpty();
|
3956
3295
|
if (ABSL_PREDICT_TRUE(mask_empty)) {
|
3957
|
-
size_t target = seq.offset(
|
3958
|
-
|
3959
|
-
|
3960
|
-
FindInfo{target, seq.index()},
|
3961
|
-
GetPolicyFunctions())),
|
3296
|
+
size_t target = seq.offset(mask_empty.LowestBitSet());
|
3297
|
+
return {iterator_at(PrepareInsertNonSoo(common(), GetPolicyFunctions(),
|
3298
|
+
hash,
|
3299
|
+
FindInfo{target, seq.index()})),
|
3962
3300
|
true};
|
3963
3301
|
}
|
3964
3302
|
seq.next();
|
@@ -3976,6 +3314,11 @@ class raw_hash_set {
|
|
3976
3314
|
|
3977
3315
|
// Asserts that the capacity is not a sentinel invalid value.
|
3978
3316
|
void AssertNotDebugCapacity() const {
|
3317
|
+
#ifdef NDEBUG
|
3318
|
+
if (!SwisstableGenerationsEnabled()) {
|
3319
|
+
return;
|
3320
|
+
}
|
3321
|
+
#endif
|
3979
3322
|
if (ABSL_PREDICT_TRUE(capacity() <
|
3980
3323
|
InvalidCapacity::kAboveMaxValidCapacity)) {
|
3981
3324
|
return;
|
@@ -3983,8 +3326,11 @@ class raw_hash_set {
|
|
3983
3326
|
assert(capacity() != InvalidCapacity::kReentrance &&
|
3984
3327
|
"Reentrant container access during element construction/destruction "
|
3985
3328
|
"is not allowed.");
|
3986
|
-
|
3987
|
-
|
3329
|
+
if constexpr (SwisstableAssertAccessToDestroyedTable()) {
|
3330
|
+
if (capacity() == InvalidCapacity::kDestroyed) {
|
3331
|
+
ABSL_RAW_LOG(FATAL, "Use of destroyed hash table.");
|
3332
|
+
}
|
3333
|
+
}
|
3988
3334
|
if (SwisstableGenerationsEnabled() &&
|
3989
3335
|
ABSL_PREDICT_FALSE(capacity() >= InvalidCapacity::kMovedFrom)) {
|
3990
3336
|
if (capacity() == InvalidCapacity::kSelfMovedFrom) {
|
@@ -4015,9 +3361,10 @@ class raw_hash_set {
|
|
4015
3361
|
}
|
4016
3362
|
if (empty()) return;
|
4017
3363
|
|
4018
|
-
const size_t hash_of_arg =
|
4019
|
-
const auto assert_consistent = [&](const ctrl_t*,
|
4020
|
-
const value_type& element =
|
3364
|
+
const size_t hash_of_arg = hash_of(key);
|
3365
|
+
const auto assert_consistent = [&](const ctrl_t*, void* slot) {
|
3366
|
+
const value_type& element =
|
3367
|
+
PolicyTraits::element(static_cast<slot_type*>(slot));
|
4021
3368
|
const bool is_key_equal =
|
4022
3369
|
PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
|
4023
3370
|
if (!is_key_equal) return;
|
@@ -4037,7 +3384,7 @@ class raw_hash_set {
|
|
4037
3384
|
}
|
4038
3385
|
// We only do validation for small tables so that it's constant time.
|
4039
3386
|
if (capacity() > 16) return;
|
4040
|
-
IterateOverFullSlots(common(),
|
3387
|
+
IterateOverFullSlots(common(), sizeof(slot_type), assert_consistent);
|
4041
3388
|
}
|
4042
3389
|
|
4043
3390
|
// Attempts to find `key` in the table; if it isn't found, returns an iterator
|
@@ -4062,7 +3409,10 @@ class raw_hash_set {
|
|
4062
3409
|
void emplace_at(iterator iter, Args&&... args) {
|
4063
3410
|
construct(iter.slot(), std::forward<Args>(args)...);
|
4064
3411
|
|
4065
|
-
|
3412
|
+
// When capacity is 1, find calls find_small and if size is 0, then it will
|
3413
|
+
// return an end iterator. This can happen in the raw_hash_set copy ctor.
|
3414
|
+
assert((capacity() == 1 ||
|
3415
|
+
PolicyTraits::apply(FindElement{*this}, *iter) == iter) &&
|
4066
3416
|
"constructed value does not match the lookup key");
|
4067
3417
|
}
|
4068
3418
|
|
@@ -4125,10 +3475,12 @@ class raw_hash_set {
|
|
4125
3475
|
}
|
4126
3476
|
slot_type* soo_slot() {
|
4127
3477
|
ABSL_SWISSTABLE_ASSERT(is_soo());
|
4128
|
-
|
3478
|
+
ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
|
3479
|
+
static_cast<slot_type*>(common().soo_data()));
|
4129
3480
|
}
|
4130
3481
|
const slot_type* soo_slot() const {
|
4131
|
-
|
3482
|
+
ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
|
3483
|
+
const_cast<raw_hash_set*>(this)->soo_slot());
|
4132
3484
|
}
|
4133
3485
|
iterator soo_iterator() {
|
4134
3486
|
return {SooControl(), soo_slot(), common().generation_ptr()};
|
@@ -4136,6 +3488,20 @@ class raw_hash_set {
|
|
4136
3488
|
const_iterator soo_iterator() const {
|
4137
3489
|
return const_cast<raw_hash_set*>(this)->soo_iterator();
|
4138
3490
|
}
|
3491
|
+
slot_type* single_slot() {
|
3492
|
+
ABSL_SWISSTABLE_ASSERT(capacity() <= 1);
|
3493
|
+
return SooEnabled() ? soo_slot() : slot_array();
|
3494
|
+
}
|
3495
|
+
const slot_type* single_slot() const {
|
3496
|
+
return const_cast<raw_hash_set*>(this)->single_slot();
|
3497
|
+
}
|
3498
|
+
iterator single_iterator() {
|
3499
|
+
return {SooEnabled() ? SooControl() : control(), single_slot(),
|
3500
|
+
common().generation_ptr()};
|
3501
|
+
}
|
3502
|
+
const_iterator single_iterator() const {
|
3503
|
+
return const_cast<raw_hash_set*>(this)->single_iterator();
|
3504
|
+
}
|
4139
3505
|
HashtablezInfoHandle infoz() {
|
4140
3506
|
ABSL_SWISSTABLE_ASSERT(!is_soo());
|
4141
3507
|
return common().infoz();
|
@@ -4145,49 +3511,118 @@ class raw_hash_set {
|
|
4145
3511
|
const hasher& hash_ref() const { return settings_.template get<1>(); }
|
4146
3512
|
key_equal& eq_ref() { return settings_.template get<2>(); }
|
4147
3513
|
const key_equal& eq_ref() const { return settings_.template get<2>(); }
|
4148
|
-
|
4149
|
-
const
|
3514
|
+
CharAlloc& char_alloc_ref() { return settings_.template get<3>(); }
|
3515
|
+
const CharAlloc& char_alloc_ref() const {
|
4150
3516
|
return settings_.template get<3>();
|
4151
3517
|
}
|
4152
3518
|
|
4153
|
-
static
|
4154
|
-
auto* h = reinterpret_cast<
|
4155
|
-
return &h->
|
3519
|
+
static void* get_char_alloc_ref_fn(CommonFields& common) {
|
3520
|
+
auto* h = reinterpret_cast<raw_hash_set*>(&common);
|
3521
|
+
return &h->char_alloc_ref();
|
4156
3522
|
}
|
4157
|
-
static void
|
3523
|
+
static void* get_hash_ref_fn(CommonFields& common) {
|
3524
|
+
auto* h = reinterpret_cast<raw_hash_set*>(&common);
|
3525
|
+
// TODO(b/397453582): Remove support for const hasher.
|
3526
|
+
return const_cast<std::remove_const_t<hasher>*>(&h->hash_ref());
|
3527
|
+
}
|
3528
|
+
static void transfer_n_slots_fn(void* set, void* dst, void* src,
|
3529
|
+
size_t count) {
|
3530
|
+
auto* src_slot = to_slot(src);
|
3531
|
+
auto* dst_slot = to_slot(dst);
|
3532
|
+
|
4158
3533
|
auto* h = static_cast<raw_hash_set*>(set);
|
4159
|
-
|
3534
|
+
for (; count > 0; --count, ++src_slot, ++dst_slot) {
|
3535
|
+
h->transfer(dst_slot, src_slot);
|
3536
|
+
}
|
4160
3537
|
}
|
4161
|
-
// Note: dealloc_fn will only be used if we have a non-standard allocator.
|
4162
|
-
static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
|
4163
|
-
auto* set = reinterpret_cast<raw_hash_set*>(&common);
|
4164
3538
|
|
4165
|
-
|
4166
|
-
|
4167
|
-
|
3539
|
+
// TODO(b/382423690): Try to type erase entire function or at least type erase
|
3540
|
+
// by GetKey + Hash for memcpyable types.
|
3541
|
+
// TODO(b/382423690): Try to type erase for big slots: sizeof(slot_type) > 16.
|
3542
|
+
static void transfer_unprobed_elements_to_next_capacity_fn(
|
3543
|
+
CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
|
3544
|
+
void* probed_storage,
|
3545
|
+
void (*encode_probed_element)(void* probed_storage, h2_t h2,
|
3546
|
+
size_t source_offset, size_t h1)) {
|
3547
|
+
const size_t new_capacity = common.capacity();
|
3548
|
+
const size_t old_capacity = PreviousCapacity(new_capacity);
|
3549
|
+
ABSL_ASSUME(old_capacity + 1 >= Group::kWidth);
|
3550
|
+
ABSL_ASSUME((old_capacity + 1) % Group::kWidth == 0);
|
3551
|
+
|
3552
|
+
auto* set = reinterpret_cast<raw_hash_set*>(&common);
|
3553
|
+
slot_type* old_slots_ptr = to_slot(old_slots);
|
3554
|
+
ctrl_t* new_ctrl = common.control();
|
3555
|
+
slot_type* new_slots = set->slot_array();
|
4168
3556
|
|
4169
|
-
common.
|
4170
|
-
|
4171
|
-
|
4172
|
-
|
3557
|
+
const PerTableSeed seed = common.seed();
|
3558
|
+
|
3559
|
+
for (size_t group_index = 0; group_index < old_capacity;
|
3560
|
+
group_index += Group::kWidth) {
|
3561
|
+
GroupFullEmptyOrDeleted old_g(old_ctrl + group_index);
|
3562
|
+
std::memset(new_ctrl + group_index, static_cast<int8_t>(ctrl_t::kEmpty),
|
3563
|
+
Group::kWidth);
|
3564
|
+
std::memset(new_ctrl + group_index + old_capacity + 1,
|
3565
|
+
static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
|
3566
|
+
// TODO(b/382423690): try to type erase everything outside of the loop.
|
3567
|
+
// We will share a lot of code in expense of one function call per group.
|
3568
|
+
for (auto in_fixed_group_index : old_g.MaskFull()) {
|
3569
|
+
size_t old_index = group_index + in_fixed_group_index;
|
3570
|
+
slot_type* old_slot = old_slots_ptr + old_index;
|
3571
|
+
// TODO(b/382423690): try to avoid entire hash calculation since we need
|
3572
|
+
// only one new bit of h1.
|
3573
|
+
size_t hash = set->hash_of(old_slot);
|
3574
|
+
size_t h1 = H1(hash, seed);
|
3575
|
+
h2_t h2 = H2(hash);
|
3576
|
+
size_t new_index = TryFindNewIndexWithoutProbing(
|
3577
|
+
h1, old_index, old_capacity, new_ctrl, new_capacity);
|
3578
|
+
// Note that encode_probed_element is allowed to use old_ctrl buffer
|
3579
|
+
// till and included the old_index.
|
3580
|
+
if (ABSL_PREDICT_FALSE(new_index == kProbedElementIndexSentinel)) {
|
3581
|
+
encode_probed_element(probed_storage, h2, old_index, h1);
|
3582
|
+
continue;
|
3583
|
+
}
|
3584
|
+
ABSL_SWISSTABLE_ASSERT((new_index & old_capacity) <= old_index);
|
3585
|
+
ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_index]));
|
3586
|
+
new_ctrl[new_index] = static_cast<ctrl_t>(h2);
|
3587
|
+
auto* new_slot = new_slots + new_index;
|
3588
|
+
SanitizerUnpoisonMemoryRegion(new_slot, sizeof(slot_type));
|
3589
|
+
set->transfer(new_slot, old_slot);
|
3590
|
+
SanitizerPoisonMemoryRegion(old_slot, sizeof(slot_type));
|
3591
|
+
}
|
3592
|
+
}
|
4173
3593
|
}
|
4174
3594
|
|
4175
3595
|
static const PolicyFunctions& GetPolicyFunctions() {
|
3596
|
+
static_assert(sizeof(slot_type) <= (std::numeric_limits<uint32_t>::max)(),
|
3597
|
+
"Slot size is too large. Use std::unique_ptr for value type "
|
3598
|
+
"or use absl::node_hash_{map,set}.");
|
3599
|
+
static_assert(alignof(slot_type) <=
|
3600
|
+
size_t{(std::numeric_limits<uint16_t>::max)()});
|
3601
|
+
static_assert(sizeof(key_type) <=
|
3602
|
+
size_t{(std::numeric_limits<uint32_t>::max)()});
|
3603
|
+
static_assert(sizeof(value_type) <=
|
3604
|
+
size_t{(std::numeric_limits<uint32_t>::max)()});
|
3605
|
+
static constexpr size_t kBackingArrayAlignment =
|
3606
|
+
BackingArrayAlignment(alignof(slot_type));
|
4176
3607
|
static constexpr PolicyFunctions value = {
|
4177
|
-
sizeof(
|
3608
|
+
static_cast<uint32_t>(sizeof(key_type)),
|
3609
|
+
static_cast<uint32_t>(sizeof(value_type)),
|
3610
|
+
static_cast<uint32_t>(sizeof(slot_type)),
|
3611
|
+
static_cast<uint16_t>(alignof(slot_type)), SooEnabled(),
|
3612
|
+
ShouldSampleHashtablezInfoForAlloc<CharAlloc>(),
|
4178
3613
|
// TODO(b/328722020): try to type erase
|
4179
3614
|
// for standard layout and alignof(Hash) <= alignof(CommonFields).
|
4180
|
-
std::
|
4181
|
-
|
3615
|
+
std::is_empty_v<hasher> ? &GetRefForEmptyClass
|
3616
|
+
: &raw_hash_set::get_hash_ref_fn,
|
4182
3617
|
PolicyTraits::template get_hash_slot_fn<hasher>(),
|
4183
3618
|
PolicyTraits::transfer_uses_memcpy()
|
4184
|
-
?
|
4185
|
-
: &raw_hash_set::
|
4186
|
-
|
4187
|
-
|
4188
|
-
|
4189
|
-
&
|
4190
|
-
|
3619
|
+
? TransferNRelocatable<sizeof(slot_type)>
|
3620
|
+
: &raw_hash_set::transfer_n_slots_fn,
|
3621
|
+
std::is_empty_v<Alloc> ? &GetRefForEmptyClass
|
3622
|
+
: &raw_hash_set::get_char_alloc_ref_fn,
|
3623
|
+
&AllocateBackingArray<kBackingArrayAlignment, CharAlloc>,
|
3624
|
+
&DeallocateBackingArray<kBackingArrayAlignment, CharAlloc>,
|
3625
|
+
&raw_hash_set::transfer_unprobed_elements_to_next_capacity_fn};
|
4191
3626
|
return value;
|
4192
3627
|
}
|
4193
3628
|
|
@@ -4195,9 +3630,9 @@ class raw_hash_set {
|
|
4195
3630
|
// CompressedTuple will ensure that sizeof is not affected by any of the empty
|
4196
3631
|
// fields that occur after CommonFields.
|
4197
3632
|
absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
|
4198
|
-
|
3633
|
+
CharAlloc>
|
4199
3634
|
settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
|
4200
|
-
key_equal{},
|
3635
|
+
key_equal{}, CharAlloc{}};
|
4201
3636
|
};
|
4202
3637
|
|
4203
3638
|
// Friend access for free functions in raw_hash_set.h.
|
@@ -4220,8 +3655,11 @@ struct HashtableFreeFunctionsAccess {
|
|
4220
3655
|
}
|
4221
3656
|
ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
|
4222
3657
|
size_t num_deleted = 0;
|
3658
|
+
using SlotType = typename Set::slot_type;
|
4223
3659
|
IterateOverFullSlots(
|
4224
|
-
c->common(),
|
3660
|
+
c->common(), sizeof(SlotType),
|
3661
|
+
[&](const ctrl_t* ctrl, void* slot_void) {
|
3662
|
+
auto* slot = static_cast<SlotType*>(slot_void);
|
4225
3663
|
if (pred(Set::PolicyTraits::element(slot))) {
|
4226
3664
|
c->destroy(slot);
|
4227
3665
|
EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
|
@@ -4246,10 +3684,12 @@ struct HashtableFreeFunctionsAccess {
|
|
4246
3684
|
cb(*c->soo_iterator());
|
4247
3685
|
return;
|
4248
3686
|
}
|
3687
|
+
using SlotType = typename Set::slot_type;
|
4249
3688
|
using ElementTypeWithConstness = decltype(*c->begin());
|
4250
3689
|
IterateOverFullSlots(
|
4251
|
-
c->common(),
|
4252
|
-
ElementTypeWithConstness& element =
|
3690
|
+
c->common(), sizeof(SlotType), [&cb](const ctrl_t*, void* slot) {
|
3691
|
+
ElementTypeWithConstness& element =
|
3692
|
+
Set::PolicyTraits::element(static_cast<SlotType*>(slot));
|
4253
3693
|
cb(element);
|
4254
3694
|
});
|
4255
3695
|
}
|
@@ -4282,12 +3722,13 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
|
|
4282
3722
|
const typename Set::key_type& key) {
|
4283
3723
|
if (set.is_soo()) return 0;
|
4284
3724
|
size_t num_probes = 0;
|
4285
|
-
size_t hash = set.
|
3725
|
+
const size_t hash = set.hash_of(key);
|
4286
3726
|
auto seq = probe(set.common(), hash);
|
3727
|
+
const h2_t h2 = H2(hash);
|
4287
3728
|
const ctrl_t* ctrl = set.control();
|
4288
3729
|
while (true) {
|
4289
3730
|
container_internal::Group g{ctrl + seq.offset()};
|
4290
|
-
for (uint32_t i : g.Match(
|
3731
|
+
for (uint32_t i : g.Match(h2)) {
|
4291
3732
|
if (Traits::apply(
|
4292
3733
|
typename Set::template EqualElement<typename Set::key_type>{
|
4293
3734
|
key, set.eq_ref()},
|
@@ -4320,6 +3761,22 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
|
|
4320
3761
|
};
|
4321
3762
|
|
4322
3763
|
} // namespace hashtable_debug_internal
|
3764
|
+
|
3765
|
+
// Extern template instantiations reduce binary size and linker input size.
|
3766
|
+
// Function definition is in raw_hash_set.cc.
|
3767
|
+
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
|
3768
|
+
CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
|
3769
|
+
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<1, true>(
|
3770
|
+
CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
|
3771
|
+
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<4, true>(
|
3772
|
+
CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
|
3773
|
+
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<8, true>(
|
3774
|
+
CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
|
3775
|
+
#if UINTPTR_MAX == UINT64_MAX
|
3776
|
+
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<16, true>(
|
3777
|
+
CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
|
3778
|
+
#endif
|
3779
|
+
|
4323
3780
|
} // namespace container_internal
|
4324
3781
|
ABSL_NAMESPACE_END
|
4325
3782
|
} // namespace absl
|