grpc 1.49.1-x86_64-linux → 1.50.0.pre1-x86_64-linux
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +54 -153
- data/include/grpc/event_engine/endpoint_config.h +11 -5
- data/include/grpc/event_engine/event_engine.h +1 -1
- data/include/grpc/impl/codegen/atm_gcc_atomic.h +19 -28
- data/include/grpc/impl/codegen/atm_gcc_sync.h +0 -2
- data/include/grpc/impl/codegen/atm_windows.h +0 -2
- data/include/grpc/impl/codegen/grpc_types.h +6 -0
- data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +3 -3
- data/src/core/ext/filters/client_channel/backup_poller.cc +4 -6
- data/src/core/ext/filters/client_channel/client_channel.cc +33 -22
- data/src/core/ext/filters/client_channel/client_channel.h +1 -1
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +0 -16
- data/src/core/ext/filters/client_channel/http_proxy.cc +12 -19
- data/src/core/ext/filters/client_channel/http_proxy.h +3 -2
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +6 -4
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h +5 -4
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +0 -2
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +112 -96
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +20 -11
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +106 -108
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.h +16 -0
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +20 -13
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +165 -257
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +218 -231
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.h +10 -6
- data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +389 -444
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +16 -16
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +8 -13
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +84 -96
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +38 -37
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +106 -186
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +106 -93
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +170 -218
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +1 -1
- data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +13 -15
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +84 -37
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.h +11 -0
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +1 -0
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +5 -3
- data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +5 -4
- data/src/core/ext/filters/client_channel/retry_filter.cc +25 -29
- data/src/core/ext/filters/client_channel/subchannel.cc +38 -33
- data/src/core/ext/filters/client_channel/subchannel.h +12 -3
- data/src/core/ext/filters/client_channel/subchannel_stream_client.cc +1 -2
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +23 -16
- data/src/core/ext/filters/fault_injection/fault_injection_filter.h +8 -0
- data/src/core/ext/filters/http/client/http_client_filter.cc +1 -2
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -4
- data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +0 -2
- data/src/core/ext/filters/http/server/http_server_filter.cc +1 -2
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +12 -8
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +32 -26
- data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +25 -130
- data/src/core/ext/transport/chttp2/transport/decode_huff.cc +287 -0
- data/src/core/ext/transport/chttp2/transport/decode_huff.h +1018 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +83 -51
- data/src/core/ext/transport/chttp2/transport/flow_control.h +11 -6
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +1 -2
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -20
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +28 -28
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +1 -10
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +11 -6
- data/src/core/ext/transport/chttp2/transport/internal.h +2 -0
- data/src/core/ext/transport/chttp2/transport/parsing.cc +44 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +3 -14
- data/src/core/ext/transport/inproc/inproc_transport.cc +1 -3
- data/src/core/ext/xds/certificate_provider_store.cc +63 -3
- data/src/core/ext/xds/certificate_provider_store.h +9 -1
- data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +5 -5
- data/src/core/ext/xds/file_watcher_certificate_provider_factory.h +1 -1
- data/src/core/ext/xds/xds_api.cc +21 -17
- data/src/core/ext/xds/xds_api.h +7 -0
- data/src/core/ext/xds/xds_bootstrap.cc +5 -537
- data/src/core/ext/xds/xds_bootstrap.h +39 -111
- data/src/core/ext/xds/xds_bootstrap_grpc.cc +370 -0
- data/src/core/ext/xds/xds_bootstrap_grpc.h +169 -0
- data/src/core/ext/xds/xds_client.cc +219 -145
- data/src/core/ext/xds/xds_client.h +19 -17
- data/src/core/ext/xds/xds_client_grpc.cc +18 -80
- data/src/core/ext/xds/xds_client_grpc.h +2 -25
- data/src/core/ext/xds/xds_client_stats.cc +4 -4
- data/src/core/ext/xds/xds_cluster.cc +87 -79
- data/src/core/ext/xds/xds_cluster.h +5 -5
- data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +3 -1
- data/src/core/ext/xds/xds_common_types.cc +13 -5
- data/src/core/ext/xds/xds_endpoint.cc +8 -6
- data/src/core/ext/xds/xds_endpoint.h +3 -4
- data/src/core/ext/xds/xds_lb_policy_registry.cc +4 -2
- data/src/core/ext/xds/xds_listener.cc +25 -20
- data/src/core/ext/xds/xds_listener.h +3 -4
- data/src/core/ext/xds/xds_resource_type.h +11 -8
- data/src/core/ext/xds/xds_route_config.cc +15 -16
- data/src/core/ext/xds/xds_route_config.h +3 -3
- data/src/core/ext/xds/xds_server_config_fetcher.cc +7 -5
- data/src/core/ext/xds/xds_transport_grpc.cc +15 -7
- data/src/core/lib/backoff/backoff.cc +2 -4
- data/src/core/lib/channel/call_finalization.h +1 -3
- data/src/core/lib/channel/channel_args.h +114 -14
- data/src/core/lib/channel/channel_trace.cc +3 -4
- data/src/core/lib/channel/promise_based_filter.cc +18 -19
- data/src/core/lib/channel/status_util.cc +27 -0
- data/src/core/lib/channel/status_util.h +10 -0
- data/src/core/lib/config/core_configuration.cc +5 -1
- data/src/core/lib/config/core_configuration.h +33 -0
- data/src/core/lib/debug/stats.cc +26 -30
- data/src/core/lib/debug/stats.h +2 -12
- data/src/core/lib/debug/stats_data.cc +118 -614
- data/src/core/lib/debug/stats_data.h +67 -465
- data/src/core/lib/debug/trace.cc +0 -2
- data/src/core/lib/event_engine/channel_args_endpoint_config.cc +12 -20
- data/src/core/lib/event_engine/channel_args_endpoint_config.h +13 -7
- data/src/core/lib/event_engine/forkable.cc +1 -1
- data/src/core/lib/event_engine/poller.h +14 -12
- data/src/core/lib/event_engine/posix_engine/timer_manager.cc +53 -32
- data/src/core/lib/event_engine/posix_engine/timer_manager.h +23 -1
- data/src/core/lib/event_engine/thread_pool.cc +131 -94
- data/src/core/lib/event_engine/thread_pool.h +56 -23
- data/src/core/lib/event_engine/time_util.cc +30 -0
- data/src/core/lib/event_engine/time_util.h +32 -0
- data/src/core/lib/event_engine/utils.cc +0 -5
- data/src/core/lib/event_engine/utils.h +0 -4
- data/src/core/lib/event_engine/windows/iocp.cc +13 -7
- data/src/core/lib/event_engine/windows/iocp.h +2 -1
- data/src/core/lib/event_engine/windows/win_socket.cc +1 -1
- data/src/core/lib/experiments/config.cc +146 -0
- data/src/core/lib/experiments/config.h +43 -0
- data/src/core/lib/experiments/experiments.cc +75 -0
- data/src/core/lib/experiments/experiments.h +56 -0
- data/src/core/lib/gpr/alloc.cc +1 -9
- data/src/core/lib/gpr/log_windows.cc +0 -1
- data/src/core/lib/gpr/string_util_windows.cc +3 -30
- data/src/core/lib/gpr/sync_abseil.cc +0 -14
- data/src/core/lib/gpr/sync_posix.cc +0 -14
- data/src/core/lib/gpr/time_posix.cc +0 -6
- data/src/core/lib/gpr/time_precise.h +1 -1
- data/src/core/lib/gpr/tmpfile_windows.cc +5 -7
- data/src/core/lib/gpr/useful.h +11 -0
- data/src/core/lib/{gpr → gprpp}/env.h +25 -12
- data/src/core/lib/{gpr → gprpp}/env_linux.cc +20 -15
- data/src/core/lib/{gpr → gprpp}/env_posix.cc +11 -10
- data/src/core/lib/gprpp/env_windows.cc +56 -0
- data/src/core/lib/gprpp/fork.cc +14 -22
- data/src/core/lib/gprpp/fork.h +0 -8
- data/src/core/lib/gprpp/global_config_env.cc +7 -6
- data/src/core/lib/gprpp/notification.h +67 -0
- data/src/core/lib/gprpp/packed_table.h +40 -0
- data/src/core/lib/gprpp/ref_counted_ptr.h +20 -33
- data/src/core/lib/gprpp/sorted_pack.h +98 -0
- data/src/core/lib/gprpp/status_helper.h +6 -0
- data/src/core/lib/gprpp/table.h +9 -1
- data/src/core/lib/gprpp/tchar.cc +49 -0
- data/src/core/lib/gprpp/tchar.h +33 -0
- data/src/core/lib/gprpp/time.cc +21 -0
- data/src/core/lib/gprpp/time.h +55 -0
- data/src/core/lib/gprpp/validation_errors.cc +61 -0
- data/src/core/lib/gprpp/validation_errors.h +110 -0
- data/src/core/{ext/filters/client_channel → lib/handshaker}/proxy_mapper.h +3 -3
- data/src/core/{ext/filters/client_channel → lib/handshaker}/proxy_mapper_registry.cc +14 -36
- data/src/core/lib/handshaker/proxy_mapper_registry.h +75 -0
- data/src/core/lib/iomgr/call_combiner.cc +0 -8
- data/src/core/lib/iomgr/closure.h +0 -1
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +14 -10
- data/src/core/lib/iomgr/endpoint_pair_windows.cc +2 -2
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +1 -38
- data/src/core/lib/iomgr/ev_poll_posix.cc +2 -17
- data/src/core/lib/iomgr/exec_ctx.cc +0 -10
- data/src/core/lib/iomgr/exec_ctx.h +7 -31
- data/src/core/lib/iomgr/iocp_windows.cc +1 -2
- data/src/core/lib/iomgr/iomgr.cc +6 -8
- data/src/core/lib/iomgr/iomgr_fwd.h +1 -0
- data/src/core/lib/iomgr/pollset.h +1 -1
- data/src/core/lib/iomgr/pollset_set.h +0 -1
- data/src/core/lib/iomgr/resolve_address.h +1 -0
- data/src/core/lib/iomgr/resolve_address_impl.h +1 -0
- data/src/core/lib/iomgr/resolve_address_posix.cc +1 -0
- data/src/core/lib/iomgr/resolve_address_windows.cc +1 -0
- data/src/core/lib/iomgr/sockaddr_utils_posix.cc +2 -1
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +12 -34
- data/src/core/lib/iomgr/socket_utils_posix.cc +83 -1
- data/src/core/lib/iomgr/socket_utils_posix.h +98 -6
- data/src/core/lib/iomgr/tcp_client.cc +6 -7
- data/src/core/lib/iomgr/tcp_client.h +11 -11
- data/src/core/lib/iomgr/tcp_client_cfstream.cc +6 -6
- data/src/core/lib/iomgr/tcp_client_posix.cc +33 -29
- data/src/core/lib/iomgr/tcp_client_posix.h +12 -9
- data/src/core/lib/iomgr/tcp_client_windows.cc +6 -6
- data/src/core/lib/iomgr/tcp_posix.cc +131 -114
- data/src/core/lib/iomgr/tcp_posix.h +3 -1
- data/src/core/lib/iomgr/tcp_server.cc +5 -4
- data/src/core/lib/iomgr/tcp_server.h +9 -6
- data/src/core/lib/iomgr/tcp_server_posix.cc +17 -28
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +2 -2
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +3 -3
- data/src/core/lib/iomgr/tcp_server_windows.cc +6 -7
- data/src/core/lib/iomgr/tcp_windows.cc +0 -1
- data/src/core/lib/iomgr/tcp_windows.h +0 -1
- data/src/core/lib/iomgr/timer_generic.cc +4 -4
- data/src/core/lib/iomgr/timer_manager.cc +1 -2
- data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +0 -2
- data/src/core/lib/json/json_object_loader.cc +21 -52
- data/src/core/lib/json/json_object_loader.h +56 -76
- data/src/core/lib/json/json_util.cc +2 -1
- data/src/core/lib/load_balancing/lb_policy.h +5 -5
- data/src/core/lib/load_balancing/lb_policy_registry.cc +29 -55
- data/src/core/lib/load_balancing/lb_policy_registry.h +23 -11
- data/src/core/lib/promise/activity.h +2 -3
- data/src/core/lib/promise/context.h +1 -1
- data/src/core/lib/promise/sleep.cc +16 -4
- data/src/core/lib/promise/sleep.h +8 -2
- data/src/core/lib/resolver/resolver.h +13 -3
- data/src/core/lib/resource_quota/api.cc +9 -0
- data/src/core/lib/resource_quota/api.h +6 -0
- data/src/core/lib/resource_quota/arena.cc +1 -3
- data/src/core/lib/resource_quota/memory_quota.cc +8 -24
- data/src/core/lib/resource_quota/memory_quota.h +6 -19
- data/src/core/lib/resource_quota/periodic_update.cc +2 -3
- data/src/core/{ext/xds → lib/security/certificate_provider}/certificate_provider_factory.h +3 -3
- data/src/core/lib/security/certificate_provider/certificate_provider_registry.cc +60 -0
- data/src/core/lib/security/certificate_provider/certificate_provider_registry.h +70 -0
- data/src/core/lib/security/credentials/channel_creds_registry_init.cc +1 -0
- data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +15 -16
- data/src/core/lib/security/credentials/external/external_account_credentials.cc +2 -1
- data/src/core/lib/security/credentials/google_default/credentials_generic.cc +5 -8
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +6 -6
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +3 -2
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +1 -1
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +1 -2
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +4 -3
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +4 -2
- data/src/core/lib/security/credentials/tls/tls_utils.cc +3 -1
- data/src/core/lib/security/transport/client_auth_filter.cc +12 -1
- data/src/core/lib/security/transport/secure_endpoint.cc +0 -4
- data/src/core/lib/surface/call.cc +1 -11
- data/src/core/lib/surface/channel.cc +3 -2
- data/src/core/lib/surface/completion_queue.cc +16 -28
- data/src/core/lib/surface/completion_queue.h +1 -1
- data/src/core/lib/surface/completion_queue_factory.cc +5 -0
- data/src/core/lib/surface/init.cc +16 -11
- data/src/core/lib/surface/init_internally.cc +24 -0
- data/src/core/lib/surface/init_internally.h +28 -0
- data/src/core/lib/surface/server.cc +1 -7
- data/src/core/lib/surface/server.h +4 -6
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/bdp_estimator.cc +1 -3
- data/src/core/lib/transport/metadata_batch.cc +2 -3
- data/src/core/lib/transport/metadata_batch.h +9 -7
- data/src/core/lib/transport/parsed_metadata.h +4 -2
- data/src/core/lib/transport/status_conversion.cc +1 -3
- data/src/core/lib/transport/tcp_connect_handshaker.cc +9 -5
- data/src/core/lib/transport/transport.h +0 -1
- data/src/core/lib/transport/transport_impl.h +0 -1
- data/src/core/plugin_registry/grpc_plugin_registry.cc +23 -46
- data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +13 -25
- data/src/ruby/lib/grpc/2.6/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/2.7/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/3.0/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/3.1/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/channel_spec.rb +5 -0
- data/src/ruby/spec/generic/server_interceptors_spec.rb +1 -1
- data/src/ruby/spec/user_agent_spec.rb +1 -1
- metadata +33 -19
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +0 -56
- data/src/core/ext/xds/certificate_provider_registry.cc +0 -103
- data/src/core/ext/xds/certificate_provider_registry.h +0 -59
- data/src/core/lib/event_engine/promise.h +0 -78
- data/src/core/lib/gpr/env_windows.cc +0 -74
- data/src/core/lib/gpr/string_windows.h +0 -32
- data/src/core/lib/profiling/basic_timers.cc +0 -295
- data/src/core/lib/profiling/stap_timers.cc +0 -50
- data/src/core/lib/profiling/timers.h +0 -94
@@ -17,24 +17,30 @@
|
|
17
17
|
#include <grpc/support/port_platform.h>
|
18
18
|
|
19
19
|
#include "absl/strings/string_view.h"
|
20
|
+
#include "absl/types/optional.h"
|
20
21
|
|
21
22
|
#include <grpc/event_engine/endpoint_config.h>
|
22
|
-
|
23
|
+
|
24
|
+
#include "src/core/lib/channel/channel_args.h"
|
23
25
|
|
24
26
|
namespace grpc_event_engine {
|
25
27
|
namespace experimental {
|
26
28
|
|
27
|
-
/// A readonly \a EndpointConfig based on grpc_channel_args. This class does not
|
28
|
-
/// take ownership of the grpc_endpoint_args*, and instances of this class
|
29
|
-
/// should not be used after the underlying args are destroyed.
|
30
29
|
class ChannelArgsEndpointConfig : public EndpointConfig {
|
31
30
|
public:
|
32
|
-
|
31
|
+
ChannelArgsEndpointConfig() = default;
|
32
|
+
explicit ChannelArgsEndpointConfig(const grpc_core::ChannelArgs& args)
|
33
33
|
: args_(args) {}
|
34
|
-
|
34
|
+
ChannelArgsEndpointConfig(const ChannelArgsEndpointConfig& config) = default;
|
35
|
+
ChannelArgsEndpointConfig& operator=(const ChannelArgsEndpointConfig& other) =
|
36
|
+
default;
|
37
|
+
absl::optional<int> GetInt(absl::string_view key) const override;
|
38
|
+
absl::optional<absl::string_view> GetString(
|
39
|
+
absl::string_view key) const override;
|
40
|
+
void* GetVoidPointer(absl::string_view key) const override;
|
35
41
|
|
36
42
|
private:
|
37
|
-
|
43
|
+
grpc_core::ChannelArgs args_;
|
38
44
|
};
|
39
45
|
|
40
46
|
} // namespace experimental
|
@@ -41,7 +41,7 @@ Forkable::~Forkable() { StopManagingForkable(this); }
|
|
41
41
|
|
42
42
|
void RegisterForkHandlers() {
|
43
43
|
grpc_core::MutexLock lock(g_mu.get());
|
44
|
-
if (!
|
44
|
+
if (!std::exchange(g_registered, true)) {
|
45
45
|
pthread_atfork(PrepareFork, PostforkParent, PostforkChild);
|
46
46
|
}
|
47
47
|
};
|
@@ -16,8 +16,7 @@
|
|
16
16
|
|
17
17
|
#include <grpc/support/port_platform.h>
|
18
18
|
|
19
|
-
#include "absl/
|
20
|
-
#include "absl/types/variant.h"
|
19
|
+
#include "absl/functional/function_ref.h"
|
21
20
|
|
22
21
|
#include <grpc/event_engine/event_engine.h>
|
23
22
|
|
@@ -30,20 +29,23 @@ namespace experimental {
|
|
30
29
|
// Work(...).
|
31
30
|
class Poller {
|
32
31
|
public:
|
33
|
-
|
34
|
-
using Events = absl::InlinedVector<EventEngine::Closure*, 5>;
|
35
|
-
struct DeadlineExceeded {};
|
36
|
-
struct Kicked {};
|
37
|
-
using WorkResult = absl::variant<Events, DeadlineExceeded, Kicked>;
|
32
|
+
enum class WorkResult { kOk, kDeadlineExceeded, kKicked };
|
38
33
|
|
39
34
|
virtual ~Poller() = default;
|
40
|
-
// Poll once for events
|
35
|
+
// Poll once for events and process received events. The callback function
|
36
|
+
// "schedule_poll_again" is expected to be run synchronously prior to
|
37
|
+
// processing received events. The callback's responsibility primarily is to
|
38
|
+
// schedule Poller::Work asynchronously again. This would ensure that the next
|
39
|
+
// polling cycle would run as quickly as possible to ensure continuous
|
40
|
+
// polling.
|
41
41
|
//
|
42
42
|
// Returns:
|
43
|
-
// *
|
44
|
-
// *
|
45
|
-
// *
|
46
|
-
|
43
|
+
// * Poller::WorkResult::kKicked if it was Kicked.
|
44
|
+
// * Poller::WorkResult::kDeadlineExceeded if timeout occurred
|
45
|
+
// * Poller::WorkResult::kOk, otherwise indicating that the callback function
|
46
|
+
// was run synchonously before some events were processed.
|
47
|
+
virtual WorkResult Work(EventEngine::Duration timeout,
|
48
|
+
absl::FunctionRef<void()> schedule_poll_again) = 0;
|
47
49
|
// Trigger the threads executing Work(..) to break out as soon as possible.
|
48
50
|
virtual void Kick() = 0;
|
49
51
|
};
|
@@ -32,11 +32,17 @@
|
|
32
32
|
#include <grpc/support/log.h>
|
33
33
|
#include <grpc/support/time.h>
|
34
34
|
|
35
|
+
#include "src/core/lib/debug/trace.h"
|
36
|
+
#include "src/core/lib/gpr/tls.h"
|
35
37
|
#include "src/core/lib/gprpp/thd.h"
|
36
38
|
|
39
|
+
static GPR_THREAD_LOCAL(bool) g_timer_thread;
|
40
|
+
|
37
41
|
namespace grpc_event_engine {
|
38
42
|
namespace posix_engine {
|
39
43
|
|
44
|
+
grpc_core::DebugOnlyTraceFlag grpc_event_engine_timer_trace(false, "timer");
|
45
|
+
|
40
46
|
namespace {
|
41
47
|
class ThreadCollector {
|
42
48
|
public:
|
@@ -88,7 +94,7 @@ void TimerManager::RunSomeTimers(
|
|
88
94
|
// if there's no thread waiting with a timeout, kick an existing untimed
|
89
95
|
// waiter so that the next deadline is not missed
|
90
96
|
if (!has_timed_waiter_) {
|
91
|
-
|
97
|
+
cv_wait_.Signal();
|
92
98
|
}
|
93
99
|
}
|
94
100
|
}
|
@@ -151,8 +157,8 @@ bool TimerManager::WaitUntil(grpc_core::Timestamp next) {
|
|
151
157
|
}
|
152
158
|
}
|
153
159
|
|
154
|
-
|
155
|
-
|
160
|
+
cv_wait_.WaitWithTimeout(&mu_,
|
161
|
+
absl::Milliseconds((next - host_.Now()).millis()));
|
156
162
|
|
157
163
|
// if this was the timed waiter, then we need to check timers, and flag
|
158
164
|
// that there's now no timed waiter... we'll look for a replacement if
|
@@ -196,16 +202,29 @@ void TimerManager::MainLoop() {
|
|
196
202
|
}
|
197
203
|
|
198
204
|
void TimerManager::RunThread(void* arg) {
|
205
|
+
g_timer_thread = true;
|
199
206
|
std::unique_ptr<RunThreadArgs> thread(static_cast<RunThreadArgs*>(arg));
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
thread->self->thread_count_--;
|
204
|
-
thread->self->completed_threads_.push_back(std::move(thread->thread));
|
207
|
+
if (grpc_event_engine_timer_trace.enabled()) {
|
208
|
+
gpr_log(GPR_DEBUG, "TimerManager::%p starting thread::%p", thread->self,
|
209
|
+
&thread->thread);
|
205
210
|
}
|
206
|
-
thread->self->
|
211
|
+
thread->self->Run(std::move(thread->thread));
|
212
|
+
if (grpc_event_engine_timer_trace.enabled()) {
|
213
|
+
gpr_log(GPR_DEBUG, "TimerManager::%p thread::%p finished", thread->self,
|
214
|
+
&thread->thread);
|
215
|
+
}
|
216
|
+
}
|
217
|
+
|
218
|
+
void TimerManager::Run(grpc_core::Thread thread) {
|
219
|
+
MainLoop();
|
220
|
+
grpc_core::MutexLock lock(&mu_);
|
221
|
+
completed_threads_.push_back(std::move(thread));
|
222
|
+
thread_count_--;
|
223
|
+
if (thread_count_ == 0) cv_threadcount_.Signal();
|
207
224
|
}
|
208
225
|
|
226
|
+
bool TimerManager::IsTimerManagerThread() { return g_timer_thread; }
|
227
|
+
|
209
228
|
TimerManager::TimerManager() : host_(this) {
|
210
229
|
timer_list_ = absl::make_unique<TimerList>(&host_);
|
211
230
|
grpc_core::MutexLock lock(&mu_);
|
@@ -227,17 +246,23 @@ bool TimerManager::TimerCancel(Timer* timer) {
|
|
227
246
|
}
|
228
247
|
|
229
248
|
TimerManager::~TimerManager() {
|
230
|
-
{
|
231
|
-
|
232
|
-
shutdown_ = true;
|
233
|
-
cv_.SignalAll();
|
249
|
+
if (grpc_event_engine_timer_trace.enabled()) {
|
250
|
+
gpr_log(GPR_DEBUG, "TimerManager::%p shutting down", this);
|
234
251
|
}
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
252
|
+
ThreadCollector collector;
|
253
|
+
grpc_core::MutexLock lock(&mu_);
|
254
|
+
shutdown_ = true;
|
255
|
+
cv_wait_.SignalAll();
|
256
|
+
while (thread_count_ > 0) {
|
257
|
+
cv_threadcount_.Wait(&mu_);
|
258
|
+
if (grpc_event_engine_timer_trace.enabled()) {
|
259
|
+
gpr_log(GPR_DEBUG, "TimerManager::%p waiting for %zu threads to finish",
|
260
|
+
this, thread_count_);
|
261
|
+
}
|
262
|
+
}
|
263
|
+
collector.Collect(std::move(completed_threads_));
|
264
|
+
if (grpc_event_engine_timer_trace.enabled()) {
|
265
|
+
gpr_log(GPR_DEBUG, "TimerManager::%p shutdown complete", this);
|
241
266
|
}
|
242
267
|
}
|
243
268
|
|
@@ -249,23 +274,19 @@ void TimerManager::Kick() {
|
|
249
274
|
timed_waiter_deadline_ = grpc_core::Timestamp::InfFuture();
|
250
275
|
++timed_waiter_generation_;
|
251
276
|
kicked_ = true;
|
252
|
-
|
277
|
+
cv_wait_.Signal();
|
253
278
|
}
|
254
279
|
|
255
280
|
void TimerManager::PrepareFork() {
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
grpc_core::MutexLock lock(&mu_);
|
264
|
-
ThreadCollector collector;
|
265
|
-
collector.Collect(std::move(completed_threads_));
|
266
|
-
if (thread_count_ == 0) break;
|
267
|
-
cv_.Wait(&mu_);
|
281
|
+
ThreadCollector collector;
|
282
|
+
grpc_core::MutexLock lock(&mu_);
|
283
|
+
forking_ = true;
|
284
|
+
prefork_thread_count_ = thread_count_;
|
285
|
+
cv_wait_.SignalAll();
|
286
|
+
while (thread_count_ > 0) {
|
287
|
+
cv_threadcount_.Wait(&mu_);
|
268
288
|
}
|
289
|
+
collector.Collect(std::move(completed_threads_));
|
269
290
|
}
|
270
291
|
|
271
292
|
void TimerManager::PostforkParent() {
|
@@ -60,6 +60,8 @@ class TimerManager final : public grpc_event_engine::experimental::Forkable {
|
|
60
60
|
void PostforkParent() override;
|
61
61
|
void PostforkChild() override;
|
62
62
|
|
63
|
+
static bool IsTimerManagerThread();
|
64
|
+
|
63
65
|
private:
|
64
66
|
struct RunThreadArgs {
|
65
67
|
TimerManager* self;
|
@@ -80,13 +82,33 @@ class TimerManager final : public grpc_event_engine::experimental::Forkable {
|
|
80
82
|
|
81
83
|
void StartThread() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
|
82
84
|
static void RunThread(void* arg);
|
85
|
+
void Run(grpc_core::Thread thread);
|
83
86
|
void MainLoop();
|
84
87
|
void RunSomeTimers(std::vector<experimental::EventEngine::Closure*> timers);
|
85
88
|
bool WaitUntil(grpc_core::Timestamp next);
|
86
89
|
void Kick();
|
87
90
|
|
88
91
|
grpc_core::Mutex mu_;
|
89
|
-
|
92
|
+
// Condvar associated with decrementing the thread count.
|
93
|
+
// Threads will signal this when thread count reaches zero, and the forking
|
94
|
+
// code *or* the destructor will wait upon it.
|
95
|
+
grpc_core::CondVar cv_threadcount_;
|
96
|
+
// Condvar associated with threads waiting to wakeup and work.
|
97
|
+
// Threads wait on this until either a timeout is reached or another thread is
|
98
|
+
// needed to wait for a timeout.
|
99
|
+
// On shutdown we SignalAll against this to wake up all threads and have them
|
100
|
+
// finish.
|
101
|
+
// On kick we Signal against this to wake up at least one thread (but not
|
102
|
+
// all)! Similarly when we note that no thread is watching timers.
|
103
|
+
//
|
104
|
+
// This is a different condvar than cv_threadcount_!
|
105
|
+
// If this were the same:
|
106
|
+
// - thread exits would require a SignalAll to ensure that the specific thread
|
107
|
+
// we want to wake is woken up.
|
108
|
+
// - kicks would need to signal all threads to avoid having the kick absorbed
|
109
|
+
// by a shutdown thread and cause a deadlock, leading to thundering herd
|
110
|
+
// problems in the common case.
|
111
|
+
grpc_core::CondVar cv_wait_;
|
90
112
|
Host host_;
|
91
113
|
// number of threads in the system
|
92
114
|
size_t thread_count_ ABSL_GUARDED_BY(mu_) = 0;
|
@@ -20,138 +20,175 @@
|
|
20
20
|
|
21
21
|
#include "src/core/lib/event_engine/thread_pool.h"
|
22
22
|
|
23
|
+
#include <memory>
|
23
24
|
#include <utility>
|
24
25
|
|
26
|
+
#include "absl/time/clock.h"
|
27
|
+
#include "absl/time/time.h"
|
28
|
+
|
29
|
+
#include <grpc/support/log.h>
|
30
|
+
|
31
|
+
#include "src/core/lib/gpr/tls.h"
|
25
32
|
#include "src/core/lib/gprpp/thd.h"
|
26
33
|
|
27
34
|
namespace grpc_event_engine {
|
28
35
|
namespace experimental {
|
29
36
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
+
namespace {
|
38
|
+
// TODO(drfloob): Remove this, and replace it with the WorkQueue* for the
|
39
|
+
// current thread (with nullptr indicating not a threadpool thread).
|
40
|
+
GPR_THREAD_LOCAL(bool) g_threadpool_thread;
|
41
|
+
} // namespace
|
42
|
+
|
43
|
+
void ThreadPool::StartThread(StatePtr state, bool throttled) {
|
44
|
+
state->thread_count.Add();
|
45
|
+
struct ThreadArg {
|
46
|
+
StatePtr state;
|
47
|
+
bool throttled;
|
48
|
+
};
|
49
|
+
grpc_core::Thread(
|
50
|
+
"event_engine",
|
51
|
+
[](void* arg) {
|
52
|
+
std::unique_ptr<ThreadArg> a(static_cast<ThreadArg*>(arg));
|
53
|
+
g_threadpool_thread = true;
|
54
|
+
if (a->throttled) {
|
55
|
+
GPR_ASSERT(a->state->currently_starting_one_thread.exchange(
|
56
|
+
false, std::memory_order_relaxed));
|
57
|
+
}
|
58
|
+
ThreadFunc(a->state);
|
59
|
+
},
|
60
|
+
new ThreadArg{state, throttled}, nullptr,
|
61
|
+
grpc_core::Thread::Options().set_tracked(false).set_joinable(false))
|
62
|
+
.Start();
|
37
63
|
}
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
pool_->ThreadFunc();
|
42
|
-
// Now that we have killed ourselves, we should reduce the thread count
|
43
|
-
grpc_core::MutexLock lock(&pool_->mu_);
|
44
|
-
pool_->nthreads_--;
|
45
|
-
// Move ourselves to dead list
|
46
|
-
pool_->dead_threads_.push_back(this);
|
47
|
-
|
48
|
-
if (pool_->nthreads_ == 0) {
|
49
|
-
if (pool_->forking_) pool_->fork_cv_.Signal();
|
50
|
-
if (pool_->shutdown_) pool_->shutdown_cv_.Signal();
|
64
|
+
|
65
|
+
void ThreadPool::ThreadFunc(StatePtr state) {
|
66
|
+
while (state->queue.Step()) {
|
51
67
|
}
|
68
|
+
state->thread_count.Remove();
|
52
69
|
}
|
53
70
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
// a fork could be initiated while the thread was waiting
|
68
|
-
if (forking_) return;
|
69
|
-
// Drain callbacks before considering shutdown to ensure all work
|
70
|
-
// gets completed.
|
71
|
-
if (!callbacks_.empty()) {
|
72
|
-
auto cb = std::move(callbacks_.front());
|
73
|
-
callbacks_.pop();
|
74
|
-
lock.Release();
|
75
|
-
cb();
|
76
|
-
} else if (shutdown_) {
|
71
|
+
bool ThreadPool::Queue::Step() {
|
72
|
+
grpc_core::ReleasableMutexLock lock(&mu_);
|
73
|
+
// Wait until work is available or we are shutting down.
|
74
|
+
while (state_ == State::kRunning && callbacks_.empty()) {
|
75
|
+
// If there are too many threads waiting, then quit this thread.
|
76
|
+
// TODO(ctiller): wait some time in this case to be sure.
|
77
|
+
if (threads_waiting_ >= reserve_threads_) return false;
|
78
|
+
threads_waiting_++;
|
79
|
+
cv_.Wait(&mu_);
|
80
|
+
threads_waiting_--;
|
81
|
+
}
|
82
|
+
switch (state_) {
|
83
|
+
case State::kRunning:
|
77
84
|
break;
|
78
|
-
|
85
|
+
case State::kShutdown:
|
86
|
+
case State::kForking:
|
87
|
+
if (!callbacks_.empty()) break;
|
88
|
+
return false;
|
79
89
|
}
|
90
|
+
GPR_ASSERT(!callbacks_.empty());
|
91
|
+
auto callback = std::move(callbacks_.front());
|
92
|
+
callbacks_.pop();
|
93
|
+
lock.Release();
|
94
|
+
callback();
|
95
|
+
return true;
|
80
96
|
}
|
81
97
|
|
82
98
|
ThreadPool::ThreadPool(int reserve_threads)
|
83
|
-
:
|
84
|
-
|
85
|
-
|
86
|
-
threads_waiting_(0),
|
87
|
-
forking_(false) {
|
88
|
-
grpc_core::MutexLock lock(&mu_);
|
89
|
-
StartNThreadsLocked(reserve_threads_);
|
90
|
-
}
|
91
|
-
|
92
|
-
void ThreadPool::StartNThreadsLocked(int n) {
|
93
|
-
for (int i = 0; i < n; i++) {
|
94
|
-
nthreads_++;
|
95
|
-
new Thread(this);
|
99
|
+
: reserve_threads_(reserve_threads) {
|
100
|
+
for (int i = 0; i < reserve_threads; i++) {
|
101
|
+
StartThread(state_, /*throttled=*/false);
|
96
102
|
}
|
97
103
|
}
|
98
104
|
|
99
|
-
|
100
|
-
|
101
|
-
|
105
|
+
ThreadPool::~ThreadPool() {
|
106
|
+
state_->queue.SetShutdown();
|
107
|
+
// Wait until all threads are exited.
|
108
|
+
// Note that if this is a threadpool thread then we won't exit this thread
|
109
|
+
// until the callstack unwinds a little, so we need to wait for just one
|
110
|
+
// thread running instead of zero.
|
111
|
+
state_->thread_count.BlockUntilThreadCount(g_threadpool_thread ? 1 : 0,
|
112
|
+
"shutting down");
|
102
113
|
}
|
103
114
|
|
104
|
-
ThreadPool
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
115
|
+
void ThreadPool::Add(absl::AnyInvocable<void()> callback) {
|
116
|
+
if (state_->queue.Add(std::move(callback))) {
|
117
|
+
if (!state_->currently_starting_one_thread.exchange(
|
118
|
+
true, std::memory_order_relaxed)) {
|
119
|
+
StartThread(state_, /*throttled=*/true);
|
120
|
+
}
|
110
121
|
}
|
111
|
-
ReapThreads(&dead_threads_);
|
112
122
|
}
|
113
123
|
|
114
|
-
|
124
|
+
bool ThreadPool::Queue::Add(absl::AnyInvocable<void()> callback) {
|
115
125
|
grpc_core::MutexLock lock(&mu_);
|
116
126
|
// Add works to the callbacks list
|
117
127
|
callbacks_.push(std::move(callback));
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
new Thread(this);
|
126
|
-
} else {
|
127
|
-
cv_.Signal();
|
128
|
-
}
|
129
|
-
// Also use this chance to harvest dead threads
|
130
|
-
if (!dead_threads_.empty()) {
|
131
|
-
ReapThreads(&dead_threads_);
|
128
|
+
cv_.Signal();
|
129
|
+
switch (state_) {
|
130
|
+
case State::kRunning:
|
131
|
+
case State::kShutdown:
|
132
|
+
return threads_waiting_ == 0;
|
133
|
+
case State::kForking:
|
134
|
+
return false;
|
132
135
|
}
|
136
|
+
GPR_UNREACHABLE_CODE(return false);
|
133
137
|
}
|
134
138
|
|
135
|
-
void ThreadPool::
|
139
|
+
void ThreadPool::Queue::SetState(State state) {
|
136
140
|
grpc_core::MutexLock lock(&mu_);
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
+
if (state == State::kRunning) {
|
142
|
+
GPR_ASSERT(state_ != State::kRunning);
|
143
|
+
} else {
|
144
|
+
GPR_ASSERT(state_ == State::kRunning);
|
141
145
|
}
|
142
|
-
|
146
|
+
state_ = state;
|
147
|
+
cv_.SignalAll();
|
143
148
|
}
|
144
149
|
|
145
|
-
void ThreadPool::
|
150
|
+
void ThreadPool::ThreadCount::Add() {
|
146
151
|
grpc_core::MutexLock lock(&mu_);
|
147
|
-
|
148
|
-
StartNThreadsLocked(reserve_threads_);
|
152
|
+
++threads_;
|
149
153
|
}
|
150
154
|
|
151
|
-
void ThreadPool::
|
155
|
+
void ThreadPool::ThreadCount::Remove() {
|
152
156
|
grpc_core::MutexLock lock(&mu_);
|
153
|
-
|
154
|
-
|
157
|
+
--threads_;
|
158
|
+
cv_.Signal();
|
159
|
+
}
|
160
|
+
|
161
|
+
void ThreadPool::ThreadCount::BlockUntilThreadCount(int threads,
|
162
|
+
const char* why) {
|
163
|
+
grpc_core::MutexLock lock(&mu_);
|
164
|
+
auto last_log = absl::Now();
|
165
|
+
while (threads_ > threads) {
|
166
|
+
// Wait for all threads to exit.
|
167
|
+
// At least once every three seconds (but no faster than once per second in
|
168
|
+
// the event of spurious wakeups) log a message indicating we're waiting to
|
169
|
+
// fork.
|
170
|
+
cv_.WaitWithTimeout(&mu_, absl::Seconds(3));
|
171
|
+
if (threads_ > threads && absl::Now() - last_log > absl::Seconds(1)) {
|
172
|
+
gpr_log(GPR_ERROR, "Waiting for thread pool to idle before %s", why);
|
173
|
+
last_log = absl::Now();
|
174
|
+
}
|
175
|
+
}
|
176
|
+
}
|
177
|
+
|
178
|
+
void ThreadPool::PrepareFork() {
|
179
|
+
state_->queue.SetForking();
|
180
|
+
state_->thread_count.BlockUntilThreadCount(0, "forking");
|
181
|
+
}
|
182
|
+
|
183
|
+
void ThreadPool::PostforkParent() { Postfork(); }
|
184
|
+
|
185
|
+
void ThreadPool::PostforkChild() { Postfork(); }
|
186
|
+
|
187
|
+
void ThreadPool::Postfork() {
|
188
|
+
state_->queue.Reset();
|
189
|
+
for (int i = 0; i < reserve_threads_; i++) {
|
190
|
+
StartThread(state_, /*throttled=*/false);
|
191
|
+
}
|
155
192
|
}
|
156
193
|
|
157
194
|
} // namespace experimental
|
@@ -21,15 +21,15 @@
|
|
21
21
|
|
22
22
|
#include <grpc/support/port_platform.h>
|
23
23
|
|
24
|
+
#include <atomic>
|
25
|
+
#include <memory>
|
24
26
|
#include <queue>
|
25
|
-
#include <vector>
|
26
27
|
|
27
28
|
#include "absl/base/thread_annotations.h"
|
28
29
|
#include "absl/functional/any_invocable.h"
|
29
30
|
|
30
31
|
#include "src/core/lib/event_engine/forkable.h"
|
31
32
|
#include "src/core/lib/gprpp/sync.h"
|
32
|
-
#include "src/core/lib/gprpp/thd.h"
|
33
33
|
|
34
34
|
namespace grpc_event_engine {
|
35
35
|
namespace experimental {
|
@@ -37,42 +37,75 @@ namespace experimental {
|
|
37
37
|
class ThreadPool final : public grpc_event_engine::experimental::Forkable {
|
38
38
|
public:
|
39
39
|
explicit ThreadPool(int reserve_threads);
|
40
|
+
// Ensures the thread pool is empty before destroying it.
|
40
41
|
~ThreadPool() override;
|
41
42
|
|
42
43
|
void Add(absl::AnyInvocable<void()> callback);
|
43
44
|
|
44
45
|
// Forkable
|
46
|
+
// Ensures that the thread pool is empty before forking.
|
45
47
|
void PrepareFork() override;
|
46
48
|
void PostforkParent() override;
|
47
49
|
void PostforkChild() override;
|
48
50
|
|
49
51
|
private:
|
50
|
-
class
|
52
|
+
class Queue {
|
51
53
|
public:
|
52
|
-
explicit
|
53
|
-
|
54
|
+
explicit Queue(int reserve_threads) : reserve_threads_(reserve_threads) {}
|
55
|
+
bool Step();
|
56
|
+
void SetShutdown() { SetState(State::kShutdown); }
|
57
|
+
void SetForking() { SetState(State::kForking); }
|
58
|
+
// Add a callback to the queue.
|
59
|
+
// Return true if we should also spin up a new thread.
|
60
|
+
bool Add(absl::AnyInvocable<void()> callback);
|
61
|
+
void Reset() { SetState(State::kRunning); }
|
54
62
|
|
55
63
|
private:
|
56
|
-
|
57
|
-
|
58
|
-
void
|
64
|
+
enum class State { kRunning, kShutdown, kForking };
|
65
|
+
|
66
|
+
void SetState(State state);
|
67
|
+
|
68
|
+
grpc_core::Mutex mu_;
|
69
|
+
grpc_core::CondVar cv_;
|
70
|
+
std::queue<absl::AnyInvocable<void()>> callbacks_ ABSL_GUARDED_BY(mu_);
|
71
|
+
int threads_waiting_ ABSL_GUARDED_BY(mu_) = 0;
|
72
|
+
const int reserve_threads_;
|
73
|
+
State state_ ABSL_GUARDED_BY(mu_) = State::kRunning;
|
59
74
|
};
|
60
75
|
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
+
class ThreadCount {
|
77
|
+
public:
|
78
|
+
void Add();
|
79
|
+
void Remove();
|
80
|
+
void BlockUntilThreadCount(int threads, const char* why);
|
81
|
+
|
82
|
+
private:
|
83
|
+
grpc_core::Mutex mu_;
|
84
|
+
grpc_core::CondVar cv_;
|
85
|
+
int threads_ ABSL_GUARDED_BY(mu_) = 0;
|
86
|
+
};
|
87
|
+
|
88
|
+
struct State {
|
89
|
+
explicit State(int reserve_threads) : queue(reserve_threads) {}
|
90
|
+
Queue queue;
|
91
|
+
ThreadCount thread_count;
|
92
|
+
// After pool creation we use this to rate limit creation of threads to one
|
93
|
+
// at a time.
|
94
|
+
std::atomic<bool> currently_starting_one_thread{false};
|
95
|
+
};
|
96
|
+
|
97
|
+
using StatePtr = std::shared_ptr<State>;
|
98
|
+
|
99
|
+
static void ThreadFunc(StatePtr state);
|
100
|
+
// Start a new thread; throttled indicates whether the State::starting_thread
|
101
|
+
// variable is being used to throttle this threads creation against others or
|
102
|
+
// not: at thread pool startup we start several threads concurrently, but
|
103
|
+
// after that we only start one at a time.
|
104
|
+
static void StartThread(StatePtr state, bool throttled);
|
105
|
+
void Postfork();
|
106
|
+
|
107
|
+
const int reserve_threads_;
|
108
|
+
const StatePtr state_ = std::make_shared<State>(reserve_threads_);
|
76
109
|
};
|
77
110
|
|
78
111
|
} // namespace experimental
|