grpc 1.60.2 → 1.61.0.pre2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Makefile +208 -165
- data/include/grpc/event_engine/event_engine.h +59 -12
- data/include/grpc/event_engine/internal/memory_allocator_impl.h +6 -0
- data/include/grpc/event_engine/internal/slice_cast.h +12 -0
- data/include/grpc/event_engine/memory_allocator.h +3 -1
- data/include/grpc/event_engine/slice.h +5 -0
- data/include/grpc/grpc_security.h +22 -1
- data/include/grpc/impl/call.h +29 -0
- data/include/grpc/impl/channel_arg_names.h +12 -1
- data/include/grpc/impl/slice_type.h +1 -1
- data/include/grpc/module.modulemap +1 -0
- data/src/core/ext/filters/backend_metrics/backend_metric_filter.cc +54 -7
- data/src/core/ext/filters/backend_metrics/backend_metric_filter.h +20 -6
- data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +10 -13
- data/src/core/ext/filters/channel_idle/channel_idle_filter.h +18 -10
- data/src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc +326 -0
- data/src/core/ext/filters/channel_idle/legacy_channel_idle_filter.h +143 -0
- data/src/core/ext/filters/client_channel/backend_metric.cc +2 -2
- data/src/core/ext/filters/client_channel/client_channel.cc +32 -6
- data/src/core/ext/filters/client_channel/client_channel_internal.h +2 -0
- data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc +54 -21
- data/src/core/ext/filters/client_channel/lb_policy/address_filtering.h +3 -2
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +2 -1
- data/src/core/ext/filters/client_channel/lb_policy/endpoint_list.cc +12 -15
- data/src/core/ext/filters/client_channel/lb_policy/endpoint_list.h +8 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +139 -92
- data/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc +9 -4
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +9 -4
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +10 -11
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +94 -93
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +5 -3
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +12 -15
- data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +38 -16
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +25 -28
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +10 -10
- data/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc +37 -35
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +11 -9
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +504 -461
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +232 -122
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +8 -6
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_override_host.cc +642 -251
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_override_host.h +2 -6
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_wrr_locality.cc +7 -8
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +2 -1
- data/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc +3 -1
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +6 -8
- data/src/core/ext/filters/client_channel/resolver/xds/xds_dependency_manager.cc +1031 -0
- data/src/core/ext/filters/client_channel/resolver/xds/xds_dependency_manager.h +277 -0
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +128 -270
- data/src/core/ext/filters/client_channel/resolver/xds/{xds_resolver.h → xds_resolver_attributes.h} +5 -4
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver_trace.cc +25 -0
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver_trace.h +30 -0
- data/src/core/ext/filters/client_channel/retry_filter.cc +1 -0
- data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +35 -17
- data/src/core/ext/filters/deadline/deadline_filter.cc +12 -0
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +17 -13
- data/src/core/ext/filters/fault_injection/fault_injection_filter.h +13 -4
- data/src/core/ext/filters/http/client/http_client_filter.cc +23 -32
- data/src/core/ext/filters/http/client/http_client_filter.h +10 -5
- data/src/core/ext/filters/http/client_authority_filter.cc +14 -14
- data/src/core/ext/filters/http/client_authority_filter.h +12 -4
- data/src/core/ext/filters/http/http_filters_plugin.cc +42 -20
- data/src/core/ext/filters/http/message_compress/compression_filter.cc +55 -80
- data/src/core/ext/filters/http/message_compress/compression_filter.h +54 -12
- data/src/core/ext/filters/http/message_compress/legacy_compression_filter.cc +325 -0
- data/src/core/ext/filters/http/message_compress/legacy_compression_filter.h +139 -0
- data/src/core/ext/filters/http/server/http_server_filter.cc +41 -41
- data/src/core/ext/filters/http/server/http_server_filter.h +11 -4
- data/src/core/ext/filters/message_size/message_size_filter.cc +56 -76
- data/src/core/ext/filters/message_size/message_size_filter.h +35 -23
- data/src/core/ext/filters/rbac/rbac_filter.cc +15 -11
- data/src/core/ext/filters/rbac/rbac_filter.h +11 -4
- data/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc +25 -13
- data/src/core/ext/filters/stateful_session/stateful_session_filter.cc +47 -50
- data/src/core/ext/filters/stateful_session/stateful_session_filter.h +21 -4
- data/src/core/ext/transport/chttp2/alpn/alpn.cc +1 -1
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +2 -2
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +11 -2
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +68 -145
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -3
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +21 -82
- data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -8
- data/src/core/ext/transport/chttp2/transport/frame.cc +506 -0
- data/src/core/ext/transport/chttp2/transport/frame.h +214 -0
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +33 -79
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +4 -7
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +27 -36
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +0 -2
- data/src/core/ext/transport/chttp2/transport/http2_settings.cc +122 -32
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +142 -37
- data/src/core/ext/transport/chttp2/transport/internal.h +1 -22
- data/src/core/ext/transport/chttp2/transport/parsing.cc +23 -37
- data/src/core/ext/transport/chttp2/transport/writing.cc +26 -58
- data/src/core/ext/transport/inproc/inproc_transport.cc +172 -13
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb.h +712 -0
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb_minitable.c +151 -0
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb_minitable.h +33 -0
- data/src/core/ext/upbdefs-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upbdefs.c +133 -0
- data/src/core/ext/upbdefs-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upbdefs.h +50 -0
- data/src/core/ext/xds/certificate_provider_store.cc +2 -1
- data/src/core/ext/xds/certificate_provider_store.h +0 -5
- data/src/core/ext/xds/xds_api.cc +31 -18
- data/src/core/ext/xds/xds_api.h +2 -2
- data/src/core/ext/xds/xds_bootstrap.h +3 -0
- data/src/core/ext/xds/xds_certificate_provider.cc +88 -287
- data/src/core/ext/xds/xds_certificate_provider.h +44 -111
- data/src/core/ext/xds/xds_client.cc +420 -414
- data/src/core/ext/xds/xds_client.h +31 -22
- data/src/core/ext/xds/xds_client_grpc.cc +3 -1
- data/src/core/ext/xds/xds_cluster.cc +104 -11
- data/src/core/ext/xds/xds_cluster.h +9 -1
- data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +9 -5
- data/src/core/ext/xds/xds_common_types.cc +14 -10
- data/src/core/ext/xds/xds_endpoint.cc +9 -4
- data/src/core/ext/xds/xds_endpoint.h +5 -1
- data/src/core/ext/xds/xds_health_status.cc +12 -2
- data/src/core/ext/xds/xds_health_status.h +4 -2
- data/src/core/ext/xds/xds_http_rbac_filter.cc +5 -3
- data/src/core/ext/xds/xds_listener.cc +14 -8
- data/src/core/ext/xds/xds_resource_type_impl.h +6 -4
- data/src/core/ext/xds/xds_route_config.cc +34 -22
- data/src/core/ext/xds/xds_route_config.h +1 -0
- data/src/core/ext/xds/xds_server_config_fetcher.cc +61 -57
- data/src/core/ext/xds/xds_transport.h +3 -0
- data/src/core/ext/xds/xds_transport_grpc.cc +47 -50
- data/src/core/ext/xds/xds_transport_grpc.h +4 -0
- data/src/core/lib/channel/call_tracer.cc +12 -0
- data/src/core/lib/channel/call_tracer.h +17 -3
- data/src/core/lib/channel/channel_args.cc +24 -14
- data/src/core/lib/channel/channel_args.h +74 -13
- data/src/core/lib/channel/channel_stack.cc +27 -0
- data/src/core/lib/channel/channel_stack.h +10 -10
- data/src/core/lib/channel/connected_channel.cc +64 -18
- data/src/core/lib/channel/promise_based_filter.h +1041 -1
- data/src/core/lib/channel/server_call_tracer_filter.cc +43 -35
- data/src/core/lib/compression/compression_internal.cc +0 -3
- data/src/core/lib/event_engine/ares_resolver.cc +35 -14
- data/src/core/lib/event_engine/ares_resolver.h +9 -10
- data/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc +8 -1
- data/src/core/lib/event_engine/posix_engine/native_posix_dns_resolver.cc +132 -0
- data/src/core/lib/event_engine/posix_engine/native_posix_dns_resolver.h +61 -0
- data/src/core/lib/event_engine/posix_engine/posix_engine.cc +52 -36
- data/src/core/lib/event_engine/posix_engine/posix_engine.h +4 -9
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc +11 -3
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc +9 -2
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +7 -0
- data/src/core/lib/event_engine/posix_engine/timer_manager.cc +17 -27
- data/src/core/lib/event_engine/posix_engine/timer_manager.h +0 -3
- data/src/core/lib/event_engine/ref_counted_dns_resolver_interface.h +55 -0
- data/src/core/lib/event_engine/windows/native_windows_dns_resolver.cc +114 -0
- data/src/core/lib/event_engine/windows/native_windows_dns_resolver.h +51 -0
- data/src/core/lib/event_engine/windows/windows_engine.cc +7 -7
- data/src/core/lib/experiments/config.cc +13 -0
- data/src/core/lib/experiments/config.h +3 -0
- data/src/core/lib/experiments/experiments.cc +245 -366
- data/src/core/lib/experiments/experiments.h +50 -156
- data/src/core/lib/gprpp/debug_location.h +13 -0
- data/src/core/lib/gprpp/dual_ref_counted.h +36 -7
- data/src/core/lib/gprpp/orphanable.h +27 -0
- data/src/core/lib/gprpp/ref_counted.h +63 -22
- data/src/core/lib/gprpp/ref_counted_ptr.h +70 -27
- data/src/core/lib/gprpp/ref_counted_string.h +13 -0
- data/src/core/lib/gprpp/status_helper.cc +1 -2
- data/src/core/lib/iomgr/combiner.cc +15 -51
- data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +31 -0
- data/src/core/lib/iomgr/event_engine_shims/endpoint.h +16 -0
- data/src/core/lib/iomgr/tcp_client_posix.cc +4 -3
- data/src/core/lib/load_balancing/lb_policy.h +1 -1
- data/src/core/lib/promise/activity.cc +17 -2
- data/src/core/lib/promise/activity.h +5 -4
- data/src/core/lib/promise/all_ok.h +80 -0
- data/src/core/lib/promise/detail/join_state.h +2077 -0
- data/src/core/lib/promise/detail/promise_factory.h +1 -0
- data/src/core/lib/promise/detail/promise_like.h +8 -1
- data/src/core/lib/promise/detail/seq_state.h +3458 -150
- data/src/core/lib/promise/detail/status.h +42 -5
- data/src/core/lib/promise/for_each.h +13 -1
- data/src/core/lib/promise/if.h +4 -0
- data/src/core/lib/promise/latch.h +6 -3
- data/src/core/lib/promise/party.cc +33 -31
- data/src/core/lib/promise/party.h +142 -6
- data/src/core/lib/promise/poll.h +39 -13
- data/src/core/lib/promise/promise.h +4 -0
- data/src/core/lib/promise/seq.h +107 -7
- data/src/core/lib/promise/status_flag.h +196 -0
- data/src/core/lib/promise/try_join.h +132 -0
- data/src/core/lib/promise/try_seq.h +132 -10
- data/src/core/lib/resolver/endpoint_addresses.cc +0 -1
- data/src/core/lib/resolver/endpoint_addresses.h +48 -0
- data/src/core/lib/resource_quota/arena.h +2 -2
- data/src/core/lib/resource_quota/memory_quota.cc +57 -8
- data/src/core/lib/resource_quota/memory_quota.h +6 -0
- data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +14 -11
- data/src/core/lib/security/authorization/grpc_server_authz_filter.h +14 -5
- data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +4 -0
- data/src/core/lib/security/credentials/external/aws_external_account_credentials.h +4 -0
- data/src/core/lib/security/credentials/external/external_account_credentials.cc +28 -20
- data/src/core/lib/security/credentials/external/external_account_credentials.h +4 -0
- data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +4 -0
- data/src/core/lib/security/credentials/external/file_external_account_credentials.h +4 -0
- data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +4 -0
- data/src/core/lib/security/credentials/external/url_external_account_credentials.h +4 -0
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +2 -1
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +0 -3
- data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +12 -0
- data/src/core/lib/security/credentials/tls/grpc_tls_crl_provider.cc +22 -5
- data/src/core/lib/security/credentials/tls/grpc_tls_crl_provider.h +1 -5
- data/src/core/lib/security/credentials/tls/tls_credentials.cc +16 -0
- data/src/core/lib/security/credentials/xds/xds_credentials.cc +21 -28
- data/src/core/lib/security/credentials/xds/xds_credentials.h +2 -4
- data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +4 -3
- data/src/core/lib/security/transport/auth_filters.h +71 -4
- data/src/core/lib/security/transport/client_auth_filter.cc +2 -4
- data/src/core/lib/security/transport/legacy_server_auth_filter.cc +244 -0
- data/src/core/lib/security/transport/server_auth_filter.cc +70 -90
- data/src/core/lib/slice/slice_buffer.h +3 -0
- data/src/core/lib/surface/builtins.cc +1 -1
- data/src/core/lib/surface/call.cc +683 -196
- data/src/core/lib/surface/call.h +26 -13
- data/src/core/lib/surface/call_trace.cc +42 -1
- data/src/core/lib/surface/channel.cc +0 -1
- data/src/core/lib/surface/channel.h +0 -6
- data/src/core/lib/surface/channel_init.h +26 -0
- data/src/core/lib/surface/init.cc +14 -8
- data/src/core/lib/surface/server.cc +256 -237
- data/src/core/lib/surface/server.h +26 -54
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/surface/wait_for_cq_end_op.h +94 -0
- data/src/core/lib/transport/call_final_info.cc +38 -0
- data/src/core/lib/transport/call_final_info.h +54 -0
- data/src/core/lib/transport/connectivity_state.cc +3 -2
- data/src/core/lib/transport/connectivity_state.h +4 -0
- data/src/core/lib/transport/metadata_batch.h +4 -4
- data/src/core/lib/transport/transport.cc +70 -19
- data/src/core/lib/transport/transport.h +395 -25
- data/src/core/plugin_registry/grpc_plugin_registry.cc +3 -0
- data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +0 -3
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +1 -1
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +1 -1
- data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +1 -1
- data/src/core/tsi/ssl_transport_security.cc +65 -43
- data/src/ruby/ext/grpc/rb_channel_args.c +3 -1
- data/src/ruby/ext/grpc/rb_grpc.c +0 -1
- data/src/ruby/ext/grpc/rb_grpc.h +0 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +6 -0
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/upb/upb/reflection/def_pool.h +2 -2
- data/third_party/zlib/adler32.c +5 -27
- data/third_party/zlib/compress.c +5 -16
- data/third_party/zlib/crc32.c +86 -162
- data/third_party/zlib/deflate.c +233 -336
- data/third_party/zlib/deflate.h +8 -8
- data/third_party/zlib/gzguts.h +11 -12
- data/third_party/zlib/infback.c +7 -23
- data/third_party/zlib/inffast.c +1 -4
- data/third_party/zlib/inffast.h +1 -1
- data/third_party/zlib/inflate.c +30 -99
- data/third_party/zlib/inftrees.c +6 -11
- data/third_party/zlib/inftrees.h +3 -3
- data/third_party/zlib/trees.c +224 -302
- data/third_party/zlib/uncompr.c +4 -12
- data/third_party/zlib/zconf.h +6 -2
- data/third_party/zlib/zlib.h +191 -188
- data/third_party/zlib/zutil.c +16 -44
- data/third_party/zlib/zutil.h +10 -10
- metadata +35 -13
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +0 -1173
- data/src/core/lib/event_engine/memory_allocator.cc +0 -74
- data/src/core/lib/transport/pid_controller.cc +0 -51
- data/src/core/lib/transport/pid_controller.h +0 -116
- data/third_party/upb/upb/collections/array.h +0 -17
- data/third_party/upb/upb/collections/map.h +0 -17
- data/third_party/upb/upb/upb.hpp +0 -18
@@ -23,8 +23,10 @@
|
|
23
23
|
|
24
24
|
#include <algorithm>
|
25
25
|
#include <functional>
|
26
|
+
#include <memory>
|
26
27
|
#include <type_traits>
|
27
28
|
|
29
|
+
#include "absl/cleanup/cleanup.h"
|
28
30
|
#include "absl/strings/match.h"
|
29
31
|
#include "absl/strings/str_cat.h"
|
30
32
|
#include "absl/strings/str_join.h"
|
@@ -68,10 +70,10 @@ TraceFlag grpc_xds_client_refcount_trace(false, "xds_client_refcount");
|
|
68
70
|
// An xds call wrapper that can restart a call upon failure. Holds a ref to
|
69
71
|
// the xds channel. The template parameter is the kind of wrapped xds call.
|
70
72
|
template <typename T>
|
71
|
-
class XdsClient::
|
73
|
+
class XdsClient::XdsChannel::RetryableCall
|
72
74
|
: public InternallyRefCounted<RetryableCall<T>> {
|
73
75
|
public:
|
74
|
-
explicit RetryableCall(WeakRefCountedPtr<
|
76
|
+
explicit RetryableCall(WeakRefCountedPtr<XdsChannel> xds_channel);
|
75
77
|
|
76
78
|
// Disable thread-safety analysis because this method is called via
|
77
79
|
// OrphanablePtr<>, but there's no way to pass the lock annotation
|
@@ -80,8 +82,8 @@ class XdsClient::ChannelState::RetryableCall
|
|
80
82
|
|
81
83
|
void OnCallFinishedLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
82
84
|
|
83
|
-
T*
|
84
|
-
|
85
|
+
T* call() const { return call_.get(); }
|
86
|
+
XdsChannel* xds_channel() const { return xds_channel_.get(); }
|
85
87
|
|
86
88
|
bool IsCurrentCallOnChannel() const;
|
87
89
|
|
@@ -93,9 +95,9 @@ class XdsClient::ChannelState::RetryableCall
|
|
93
95
|
|
94
96
|
// The wrapped xds call that talks to the xds server. It's instantiated
|
95
97
|
// every time we start a new call. It's null during call retry backoff.
|
96
|
-
OrphanablePtr<T>
|
98
|
+
OrphanablePtr<T> call_;
|
97
99
|
// The owning xds channel.
|
98
|
-
WeakRefCountedPtr<
|
100
|
+
WeakRefCountedPtr<XdsChannel> xds_channel_;
|
99
101
|
|
100
102
|
// Retry state.
|
101
103
|
BackOff backoff_;
|
@@ -106,17 +108,18 @@ class XdsClient::ChannelState::RetryableCall
|
|
106
108
|
};
|
107
109
|
|
108
110
|
// Contains an ADS call to the xds server.
|
109
|
-
class XdsClient::
|
110
|
-
: public InternallyRefCounted<AdsCallState> {
|
111
|
+
class XdsClient::XdsChannel::AdsCall : public InternallyRefCounted<AdsCall> {
|
111
112
|
public:
|
112
113
|
// The ctor and dtor should not be used directly.
|
113
|
-
explicit
|
114
|
+
explicit AdsCall(RefCountedPtr<RetryableCall<AdsCall>> retryable_call);
|
114
115
|
|
115
116
|
void Orphan() override;
|
116
117
|
|
117
|
-
RetryableCall<
|
118
|
-
|
119
|
-
|
118
|
+
RetryableCall<AdsCall>* retryable_call() const {
|
119
|
+
return retryable_call_.get();
|
120
|
+
}
|
121
|
+
XdsChannel* xds_channel() const { return retryable_call_->xds_channel(); }
|
122
|
+
XdsClient* xds_client() const { return xds_channel()->xds_client(); }
|
120
123
|
bool seen_response() const { return seen_response_; }
|
121
124
|
|
122
125
|
void SubscribeLocked(const XdsResourceType* type, const XdsResourceName& name,
|
@@ -129,6 +132,8 @@ class XdsClient::ChannelState::AdsCallState
|
|
129
132
|
bool HasSubscribedResources() const;
|
130
133
|
|
131
134
|
private:
|
135
|
+
class AdsReadDelayHandle;
|
136
|
+
|
132
137
|
class AdsResponseParser : public XdsApi::AdsResponseParserInterface {
|
133
138
|
public:
|
134
139
|
struct Result {
|
@@ -140,10 +145,10 @@ class XdsClient::ChannelState::AdsCallState
|
|
140
145
|
std::map<std::string /*authority*/, std::set<XdsResourceKey>>
|
141
146
|
resources_seen;
|
142
147
|
bool have_valid_resources = false;
|
148
|
+
RefCountedPtr<ReadDelayHandle> read_delay_handle;
|
143
149
|
};
|
144
150
|
|
145
|
-
explicit AdsResponseParser(
|
146
|
-
: ads_call_state_(ads_call_state) {}
|
151
|
+
explicit AdsResponseParser(AdsCall* ads_call) : ads_call_(ads_call) {}
|
147
152
|
|
148
153
|
absl::Status ProcessAdsResponseFields(AdsResponseFields fields) override
|
149
154
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
@@ -159,9 +164,9 @@ class XdsClient::ChannelState::AdsCallState
|
|
159
164
|
Result TakeResult() { return std::move(result_); }
|
160
165
|
|
161
166
|
private:
|
162
|
-
XdsClient* xds_client() const { return
|
167
|
+
XdsClient* xds_client() const { return ads_call_->xds_client(); }
|
163
168
|
|
164
|
-
|
169
|
+
AdsCall* ads_call_;
|
165
170
|
const Timestamp update_time_ = Timestamp::Now();
|
166
171
|
Result result_;
|
167
172
|
};
|
@@ -184,10 +189,9 @@ class XdsClient::ChannelState::AdsCallState
|
|
184
189
|
subscription_sent_ = true;
|
185
190
|
}
|
186
191
|
|
187
|
-
void MaybeMarkSubscriptionSendComplete(
|
188
|
-
RefCountedPtr<AdsCallState> ads_calld)
|
192
|
+
void MaybeMarkSubscriptionSendComplete(RefCountedPtr<AdsCall> ads_call)
|
189
193
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
190
|
-
if (subscription_sent_) MaybeStartTimer(std::move(
|
194
|
+
if (subscription_sent_) MaybeStartTimer(std::move(ads_call));
|
191
195
|
}
|
192
196
|
|
193
197
|
void MarkSeen() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
@@ -197,13 +201,13 @@ class XdsClient::ChannelState::AdsCallState
|
|
197
201
|
|
198
202
|
void MaybeCancelTimer() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
199
203
|
if (timer_handle_.has_value() &&
|
200
|
-
|
204
|
+
ads_call_->xds_client()->engine()->Cancel(*timer_handle_)) {
|
201
205
|
timer_handle_.reset();
|
202
206
|
}
|
203
207
|
}
|
204
208
|
|
205
209
|
private:
|
206
|
-
void MaybeStartTimer(RefCountedPtr<
|
210
|
+
void MaybeStartTimer(RefCountedPtr<AdsCall> ads_call)
|
207
211
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
208
212
|
// Don't start timer if we've already either seen the resource or
|
209
213
|
// marked it as non-existing.
|
@@ -224,13 +228,13 @@ class XdsClient::ChannelState::AdsCallState
|
|
224
228
|
// (a) we already have the resource and (b) the server may
|
225
229
|
// optimize by not resending the resource that we already have.
|
226
230
|
auto& authority_state =
|
227
|
-
|
231
|
+
ads_call->xds_client()->authority_state_map_[name_.authority];
|
228
232
|
ResourceState& state = authority_state.resource_map[type_][name_.key];
|
229
233
|
if (state.resource != nullptr) return;
|
230
234
|
// Start timer.
|
231
|
-
|
232
|
-
timer_handle_ =
|
233
|
-
|
235
|
+
ads_call_ = std::move(ads_call);
|
236
|
+
timer_handle_ = ads_call_->xds_client()->engine()->RunAfter(
|
237
|
+
ads_call_->xds_client()->request_timeout_,
|
234
238
|
[self = Ref(DEBUG_LOCATION, "timer")]() {
|
235
239
|
ApplicationCallbackExecCtx callback_exec_ctx;
|
236
240
|
ExecCtx exec_ctx;
|
@@ -243,32 +247,32 @@ class XdsClient::ChannelState::AdsCallState
|
|
243
247
|
gpr_log(GPR_INFO,
|
244
248
|
"[xds_client %p] xds server %s: timeout obtaining resource "
|
245
249
|
"{type=%s name=%s} from xds server",
|
246
|
-
|
247
|
-
|
250
|
+
ads_call_->xds_client(),
|
251
|
+
ads_call_->xds_channel()->server_.server_uri().c_str(),
|
248
252
|
std::string(type_->type_url()).c_str(),
|
249
253
|
XdsClient::ConstructFullXdsResourceName(
|
250
254
|
name_.authority, type_->type_url(), name_.key)
|
251
255
|
.c_str());
|
252
256
|
}
|
253
257
|
{
|
254
|
-
MutexLock lock(&
|
258
|
+
MutexLock lock(&ads_call_->xds_client()->mu_);
|
255
259
|
timer_handle_.reset();
|
256
260
|
resource_seen_ = true;
|
257
261
|
auto& authority_state =
|
258
|
-
|
262
|
+
ads_call_->xds_client()->authority_state_map_[name_.authority];
|
259
263
|
ResourceState& state = authority_state.resource_map[type_][name_.key];
|
260
264
|
state.meta.client_status = XdsApi::ResourceMetadata::DOES_NOT_EXIST;
|
261
|
-
|
262
|
-
state.watchers);
|
265
|
+
ads_call_->xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
266
|
+
state.watchers, ReadDelayHandle::NoWait());
|
263
267
|
}
|
264
|
-
|
265
|
-
|
268
|
+
ads_call_->xds_client()->work_serializer_.DrainQueue();
|
269
|
+
ads_call_.reset();
|
266
270
|
}
|
267
271
|
|
268
272
|
const XdsResourceType* type_;
|
269
273
|
const XdsResourceName name_;
|
270
274
|
|
271
|
-
RefCountedPtr<
|
275
|
+
RefCountedPtr<AdsCall> ads_call_;
|
272
276
|
// True if we have sent the initial subscription request for this
|
273
277
|
// resource on this ADS stream.
|
274
278
|
bool subscription_sent_ ABSL_GUARDED_BY(&XdsClient::mu_) = false;
|
@@ -283,19 +287,19 @@ class XdsClient::ChannelState::AdsCallState
|
|
283
287
|
class StreamEventHandler
|
284
288
|
: public XdsTransportFactory::XdsTransport::StreamingCall::EventHandler {
|
285
289
|
public:
|
286
|
-
explicit StreamEventHandler(RefCountedPtr<
|
287
|
-
:
|
290
|
+
explicit StreamEventHandler(RefCountedPtr<AdsCall> ads_call)
|
291
|
+
: ads_call_(std::move(ads_call)) {}
|
288
292
|
|
289
|
-
void OnRequestSent(bool ok) override {
|
293
|
+
void OnRequestSent(bool ok) override { ads_call_->OnRequestSent(ok); }
|
290
294
|
void OnRecvMessage(absl::string_view payload) override {
|
291
|
-
|
295
|
+
ads_call_->OnRecvMessage(payload);
|
292
296
|
}
|
293
297
|
void OnStatusReceived(absl::Status status) override {
|
294
|
-
|
298
|
+
ads_call_->OnStatusReceived(std::move(status));
|
295
299
|
}
|
296
300
|
|
297
301
|
private:
|
298
|
-
RefCountedPtr<
|
302
|
+
RefCountedPtr<AdsCall> ads_call_;
|
299
303
|
};
|
300
304
|
|
301
305
|
struct ResourceTypeState {
|
@@ -324,9 +328,10 @@ class XdsClient::ChannelState::AdsCallState
|
|
324
328
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
325
329
|
|
326
330
|
// The owning RetryableCall<>.
|
327
|
-
RefCountedPtr<RetryableCall<
|
331
|
+
RefCountedPtr<RetryableCall<AdsCall>> retryable_call_;
|
328
332
|
|
329
|
-
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall>
|
333
|
+
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall>
|
334
|
+
streaming_call_;
|
330
335
|
|
331
336
|
bool sent_initial_message_ = false;
|
332
337
|
bool seen_response_ = false;
|
@@ -342,87 +347,86 @@ class XdsClient::ChannelState::AdsCallState
|
|
342
347
|
};
|
343
348
|
|
344
349
|
// Contains an LRS call to the xds server.
|
345
|
-
class XdsClient::
|
346
|
-
: public InternallyRefCounted<LrsCallState> {
|
350
|
+
class XdsClient::XdsChannel::LrsCall : public InternallyRefCounted<LrsCall> {
|
347
351
|
public:
|
348
352
|
// The ctor and dtor should not be used directly.
|
349
|
-
explicit
|
353
|
+
explicit LrsCall(RefCountedPtr<RetryableCall<LrsCall>> retryable_call);
|
350
354
|
|
351
355
|
void Orphan() override;
|
352
356
|
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
RetryableCall<LrsCallState>* parent() { return parent_.get(); }
|
357
|
-
ChannelState* chand() const { return parent_->chand(); }
|
358
|
-
XdsClient* xds_client() const { return chand()->xds_client(); }
|
357
|
+
RetryableCall<LrsCall>* retryable_call() { return retryable_call_.get(); }
|
358
|
+
XdsChannel* xds_channel() const { return retryable_call_->xds_channel(); }
|
359
|
+
XdsClient* xds_client() const { return xds_channel()->xds_client(); }
|
359
360
|
bool seen_response() const { return seen_response_; }
|
360
361
|
|
361
362
|
private:
|
362
363
|
class StreamEventHandler
|
363
364
|
: public XdsTransportFactory::XdsTransport::StreamingCall::EventHandler {
|
364
365
|
public:
|
365
|
-
explicit StreamEventHandler(RefCountedPtr<
|
366
|
-
:
|
366
|
+
explicit StreamEventHandler(RefCountedPtr<LrsCall> lrs_call)
|
367
|
+
: lrs_call_(std::move(lrs_call)) {}
|
367
368
|
|
368
|
-
void OnRequestSent(bool ok) override {
|
369
|
+
void OnRequestSent(bool /*ok*/) override { lrs_call_->OnRequestSent(); }
|
369
370
|
void OnRecvMessage(absl::string_view payload) override {
|
370
|
-
|
371
|
+
lrs_call_->OnRecvMessage(payload);
|
371
372
|
}
|
372
373
|
void OnStatusReceived(absl::Status status) override {
|
373
|
-
|
374
|
+
lrs_call_->OnStatusReceived(std::move(status));
|
374
375
|
}
|
375
376
|
|
376
377
|
private:
|
377
|
-
RefCountedPtr<
|
378
|
+
RefCountedPtr<LrsCall> lrs_call_;
|
378
379
|
};
|
379
380
|
|
380
|
-
//
|
381
|
-
class
|
381
|
+
// A repeating timer for a particular duration.
|
382
|
+
class Timer : public InternallyRefCounted<Timer> {
|
382
383
|
public:
|
383
|
-
|
384
|
-
:
|
385
|
-
|
386
|
-
}
|
384
|
+
explicit Timer(RefCountedPtr<LrsCall> lrs_call)
|
385
|
+
: lrs_call_(std::move(lrs_call)) {}
|
386
|
+
~Timer() override { lrs_call_.reset(DEBUG_LOCATION, "LRS timer"); }
|
387
387
|
|
388
388
|
// Disable thread-safety analysis because this method is called via
|
389
389
|
// OrphanablePtr<>, but there's no way to pass the lock annotation
|
390
390
|
// through there.
|
391
391
|
void Orphan() override ABSL_NO_THREAD_SAFETY_ANALYSIS;
|
392
392
|
|
393
|
-
void OnReportDoneLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
394
|
-
|
395
|
-
private:
|
396
393
|
void ScheduleNextReportLocked()
|
397
394
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
398
|
-
bool OnNextReportTimer();
|
399
|
-
bool SendReportLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
400
395
|
|
401
|
-
|
402
|
-
|
396
|
+
private:
|
397
|
+
bool IsCurrentTimerOnCall() const {
|
398
|
+
return this == lrs_call_->timer_.get();
|
403
399
|
}
|
404
|
-
XdsClient* xds_client() const { return
|
400
|
+
XdsClient* xds_client() const { return lrs_call_->xds_client(); }
|
401
|
+
|
402
|
+
void OnNextReportTimer();
|
405
403
|
|
406
404
|
// The owning LRS call.
|
407
|
-
RefCountedPtr<
|
405
|
+
RefCountedPtr<LrsCall> lrs_call_;
|
408
406
|
|
409
|
-
// The load reporting state.
|
410
|
-
const Duration report_interval_;
|
411
|
-
bool last_report_counters_were_zero_ = false;
|
412
407
|
absl::optional<EventEngine::TaskHandle> timer_handle_
|
413
408
|
ABSL_GUARDED_BY(&XdsClient::mu_);
|
414
409
|
};
|
415
410
|
|
416
|
-
void
|
411
|
+
void MaybeScheduleNextReportLocked()
|
412
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
413
|
+
|
414
|
+
void SendReportLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
415
|
+
|
416
|
+
void SendMessageLocked(std::string payload)
|
417
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
418
|
+
|
419
|
+
void OnRequestSent();
|
417
420
|
void OnRecvMessage(absl::string_view payload);
|
418
421
|
void OnStatusReceived(absl::Status status);
|
419
422
|
|
420
423
|
bool IsCurrentCallOnChannel() const;
|
421
424
|
|
422
425
|
// The owning RetryableCall<>.
|
423
|
-
RefCountedPtr<RetryableCall<
|
426
|
+
RefCountedPtr<RetryableCall<LrsCall>> retryable_call_;
|
424
427
|
|
425
|
-
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall>
|
428
|
+
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall>
|
429
|
+
streaming_call_;
|
426
430
|
|
427
431
|
bool seen_response_ = false;
|
428
432
|
bool send_message_pending_ ABSL_GUARDED_BY(&XdsClient::mu_) = false;
|
@@ -431,19 +435,19 @@ class XdsClient::ChannelState::LrsCallState
|
|
431
435
|
bool send_all_clusters_ = false;
|
432
436
|
std::set<std::string> cluster_names_; // Asked for by the LRS server.
|
433
437
|
Duration load_reporting_interval_;
|
434
|
-
|
438
|
+
bool last_report_counters_were_zero_ = false;
|
439
|
+
OrphanablePtr<Timer> timer_;
|
435
440
|
};
|
436
441
|
|
437
442
|
//
|
438
|
-
// XdsClient::
|
443
|
+
// XdsClient::XdsChannel
|
439
444
|
//
|
440
445
|
|
441
|
-
XdsClient::
|
442
|
-
|
443
|
-
: DualRefCounted<
|
444
|
-
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace)
|
445
|
-
|
446
|
-
: nullptr),
|
446
|
+
XdsClient::XdsChannel::XdsChannel(WeakRefCountedPtr<XdsClient> xds_client,
|
447
|
+
const XdsBootstrap::XdsServer& server)
|
448
|
+
: DualRefCounted<XdsChannel>(
|
449
|
+
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace) ? "XdsChannel"
|
450
|
+
: nullptr),
|
447
451
|
xds_client_(std::move(xds_client)),
|
448
452
|
server_(server) {
|
449
453
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
@@ -462,19 +466,19 @@ XdsClient::ChannelState::ChannelState(WeakRefCountedPtr<XdsClient> xds_client,
|
|
462
466
|
if (!status.ok()) SetChannelStatusLocked(std::move(status));
|
463
467
|
}
|
464
468
|
|
465
|
-
XdsClient::
|
469
|
+
XdsClient::XdsChannel::~XdsChannel() {
|
466
470
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
467
471
|
gpr_log(GPR_INFO, "[xds_client %p] destroying xds channel %p for server %s",
|
468
472
|
xds_client(), this, server_.server_uri().c_str());
|
469
473
|
}
|
470
|
-
xds_client_.reset(DEBUG_LOCATION, "
|
474
|
+
xds_client_.reset(DEBUG_LOCATION, "XdsChannel");
|
471
475
|
}
|
472
476
|
|
473
477
|
// This method should only ever be called when holding the lock, but we can't
|
474
478
|
// use a ABSL_EXCLUSIVE_LOCKS_REQUIRED annotation, because Orphan() will be
|
475
479
|
// called from DualRefCounted::Unref, which cannot have a lock annotation for
|
476
480
|
// a lock in this subclass.
|
477
|
-
void XdsClient::
|
481
|
+
void XdsClient::XdsChannel::Orphan() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
478
482
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
479
483
|
gpr_log(GPR_INFO, "[xds_client %p] orphaning xds channel %p for server %s",
|
480
484
|
xds_client(), this, server_.server_uri().c_str());
|
@@ -482,69 +486,67 @@ void XdsClient::ChannelState::Orphan() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
|
482
486
|
shutting_down_ = true;
|
483
487
|
transport_.reset();
|
484
488
|
// At this time, all strong refs are removed, remove from channel map to
|
485
|
-
// prevent subsequent subscription from trying to use this
|
489
|
+
// prevent subsequent subscription from trying to use this XdsChannel as
|
486
490
|
// it is shutting down.
|
487
491
|
xds_client_->xds_server_channel_map_.erase(&server_);
|
488
|
-
|
489
|
-
|
492
|
+
ads_call_.reset();
|
493
|
+
lrs_call_.reset();
|
490
494
|
}
|
491
495
|
|
492
|
-
void XdsClient::
|
496
|
+
void XdsClient::XdsChannel::ResetBackoff() { transport_->ResetBackoff(); }
|
493
497
|
|
494
|
-
XdsClient::
|
495
|
-
|
496
|
-
return ads_calld_->calld();
|
498
|
+
XdsClient::XdsChannel::AdsCall* XdsClient::XdsChannel::ads_call() const {
|
499
|
+
return ads_call_->call();
|
497
500
|
}
|
498
501
|
|
499
|
-
XdsClient::
|
500
|
-
|
501
|
-
return lrs_calld_->calld();
|
502
|
+
XdsClient::XdsChannel::LrsCall* XdsClient::XdsChannel::lrs_call() const {
|
503
|
+
return lrs_call_->call();
|
502
504
|
}
|
503
505
|
|
504
|
-
void XdsClient::
|
505
|
-
if (
|
506
|
-
|
507
|
-
WeakRef(DEBUG_LOCATION, "
|
506
|
+
void XdsClient::XdsChannel::MaybeStartLrsCall() {
|
507
|
+
if (lrs_call_ != nullptr) return;
|
508
|
+
lrs_call_.reset(
|
509
|
+
new RetryableCall<LrsCall>(WeakRef(DEBUG_LOCATION, "XdsChannel+lrs")));
|
508
510
|
}
|
509
511
|
|
510
|
-
void XdsClient::
|
512
|
+
void XdsClient::XdsChannel::StopLrsCallLocked() {
|
511
513
|
xds_client_->xds_load_report_server_map_.erase(&server_);
|
512
|
-
|
514
|
+
lrs_call_.reset();
|
513
515
|
}
|
514
516
|
|
515
|
-
void XdsClient::
|
516
|
-
|
517
|
-
if (
|
517
|
+
void XdsClient::XdsChannel::SubscribeLocked(const XdsResourceType* type,
|
518
|
+
const XdsResourceName& name) {
|
519
|
+
if (ads_call_ == nullptr) {
|
518
520
|
// Start the ADS call if this is the first request.
|
519
|
-
|
520
|
-
WeakRef(DEBUG_LOCATION, "
|
521
|
-
// Note:
|
521
|
+
ads_call_.reset(
|
522
|
+
new RetryableCall<AdsCall>(WeakRef(DEBUG_LOCATION, "XdsChannel+ads")));
|
523
|
+
// Note: AdsCall's ctor will automatically subscribe to all
|
522
524
|
// resources that the XdsClient already has watchers for, so we can
|
523
525
|
// return here.
|
524
526
|
return;
|
525
527
|
}
|
526
528
|
// If the ADS call is in backoff state, we don't need to do anything now
|
527
529
|
// because when the call is restarted it will resend all necessary requests.
|
528
|
-
if (
|
530
|
+
if (ads_call() == nullptr) return;
|
529
531
|
// Subscribe to this resource if the ADS call is active.
|
530
|
-
|
531
|
-
}
|
532
|
-
|
533
|
-
void XdsClient::
|
534
|
-
|
535
|
-
|
536
|
-
if (
|
537
|
-
auto*
|
538
|
-
if (
|
539
|
-
|
540
|
-
if (!
|
541
|
-
|
532
|
+
ads_call()->SubscribeLocked(type, name, /*delay_send=*/false);
|
533
|
+
}
|
534
|
+
|
535
|
+
void XdsClient::XdsChannel::UnsubscribeLocked(const XdsResourceType* type,
|
536
|
+
const XdsResourceName& name,
|
537
|
+
bool delay_unsubscription) {
|
538
|
+
if (ads_call_ != nullptr) {
|
539
|
+
auto* call = ads_call_->call();
|
540
|
+
if (call != nullptr) {
|
541
|
+
call->UnsubscribeLocked(type, name, delay_unsubscription);
|
542
|
+
if (!call->HasSubscribedResources()) {
|
543
|
+
ads_call_.reset();
|
542
544
|
}
|
543
545
|
}
|
544
546
|
}
|
545
547
|
}
|
546
548
|
|
547
|
-
void XdsClient::
|
549
|
+
void XdsClient::XdsChannel::OnConnectivityFailure(absl::Status status) {
|
548
550
|
{
|
549
551
|
MutexLock lock(&xds_client_->mu_);
|
550
552
|
SetChannelStatusLocked(std::move(status));
|
@@ -552,7 +554,7 @@ void XdsClient::ChannelState::OnConnectivityFailure(absl::Status status) {
|
|
552
554
|
xds_client_->work_serializer_.DrainQueue();
|
553
555
|
}
|
554
556
|
|
555
|
-
void XdsClient::
|
557
|
+
void XdsClient::XdsChannel::SetChannelStatusLocked(absl::Status status) {
|
556
558
|
if (shutting_down_) return;
|
557
559
|
status = absl::Status(status.code(), absl::StrCat("xDS channel for server ",
|
558
560
|
server_.server_uri(), ": ",
|
@@ -574,7 +576,7 @@ void XdsClient::ChannelState::SetChannelStatusLocked(absl::Status status) {
|
|
574
576
|
// Find all watchers for this channel.
|
575
577
|
std::set<RefCountedPtr<ResourceWatcherInterface>> watchers;
|
576
578
|
for (const auto& a : xds_client_->authority_state_map_) { // authority
|
577
|
-
if (a.second.
|
579
|
+
if (a.second.xds_channel != this) continue;
|
578
580
|
for (const auto& t : a.second.resource_map) { // type
|
579
581
|
for (const auto& r : t.second) { // resource id
|
580
582
|
for (const auto& w : r.second.watchers) { // watchers
|
@@ -588,20 +590,20 @@ void XdsClient::ChannelState::SetChannelStatusLocked(absl::Status status) {
|
|
588
590
|
[watchers = std::move(watchers), status = std::move(status)]()
|
589
591
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(xds_client_->work_serializer_) {
|
590
592
|
for (const auto& watcher : watchers) {
|
591
|
-
watcher->OnError(status);
|
593
|
+
watcher->OnError(status, ReadDelayHandle::NoWait());
|
592
594
|
}
|
593
595
|
},
|
594
596
|
DEBUG_LOCATION);
|
595
597
|
}
|
596
598
|
|
597
599
|
//
|
598
|
-
// XdsClient::
|
600
|
+
// XdsClient::XdsChannel::RetryableCall<>
|
599
601
|
//
|
600
602
|
|
601
603
|
template <typename T>
|
602
|
-
XdsClient::
|
603
|
-
WeakRefCountedPtr<
|
604
|
-
:
|
604
|
+
XdsClient::XdsChannel::RetryableCall<T>::RetryableCall(
|
605
|
+
WeakRefCountedPtr<XdsChannel> xds_channel)
|
606
|
+
: xds_channel_(std::move(xds_channel)),
|
605
607
|
backoff_(BackOff::Options()
|
606
608
|
.set_initial_backoff(Duration::Seconds(
|
607
609
|
GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS))
|
@@ -613,42 +615,43 @@ XdsClient::ChannelState::RetryableCall<T>::RetryableCall(
|
|
613
615
|
}
|
614
616
|
|
615
617
|
template <typename T>
|
616
|
-
void XdsClient::
|
618
|
+
void XdsClient::XdsChannel::RetryableCall<T>::Orphan() {
|
617
619
|
shutting_down_ = true;
|
618
|
-
|
620
|
+
call_.reset();
|
619
621
|
if (timer_handle_.has_value()) {
|
620
|
-
|
622
|
+
xds_channel()->xds_client()->engine()->Cancel(*timer_handle_);
|
621
623
|
timer_handle_.reset();
|
622
624
|
}
|
623
625
|
this->Unref(DEBUG_LOCATION, "RetryableCall+orphaned");
|
624
626
|
}
|
625
627
|
|
626
628
|
template <typename T>
|
627
|
-
void XdsClient::
|
629
|
+
void XdsClient::XdsChannel::RetryableCall<T>::OnCallFinishedLocked() {
|
628
630
|
// If we saw a response on the current stream, reset backoff.
|
629
|
-
if (
|
630
|
-
|
631
|
+
if (call_->seen_response()) backoff_.Reset();
|
632
|
+
call_.reset();
|
631
633
|
// Start retry timer.
|
632
634
|
StartRetryTimerLocked();
|
633
635
|
}
|
634
636
|
|
635
637
|
template <typename T>
|
636
|
-
void XdsClient::
|
638
|
+
void XdsClient::XdsChannel::RetryableCall<T>::StartNewCallLocked() {
|
637
639
|
if (shutting_down_) return;
|
638
|
-
GPR_ASSERT(
|
639
|
-
GPR_ASSERT(
|
640
|
+
GPR_ASSERT(xds_channel_->transport_ != nullptr);
|
641
|
+
GPR_ASSERT(call_ == nullptr);
|
640
642
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
641
643
|
gpr_log(GPR_INFO,
|
642
644
|
"[xds_client %p] xds server %s: start new call from retryable "
|
643
645
|
"call %p",
|
644
|
-
|
646
|
+
xds_channel()->xds_client(),
|
647
|
+
xds_channel()->server_.server_uri().c_str(), this);
|
645
648
|
}
|
646
|
-
|
649
|
+
call_ = MakeOrphanable<T>(
|
647
650
|
this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
|
648
651
|
}
|
649
652
|
|
650
653
|
template <typename T>
|
651
|
-
void XdsClient::
|
654
|
+
void XdsClient::XdsChannel::RetryableCall<T>::StartRetryTimerLocked() {
|
652
655
|
if (shutting_down_) return;
|
653
656
|
const Timestamp next_attempt_time = backoff_.NextAttemptTime();
|
654
657
|
const Duration timeout =
|
@@ -657,10 +660,10 @@ void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
|
|
657
660
|
gpr_log(GPR_INFO,
|
658
661
|
"[xds_client %p] xds server %s: call attempt failed; "
|
659
662
|
"retry timer will fire in %" PRId64 "ms.",
|
660
|
-
|
661
|
-
timeout.millis());
|
663
|
+
xds_channel()->xds_client(),
|
664
|
+
xds_channel()->server_.server_uri().c_str(), timeout.millis());
|
662
665
|
}
|
663
|
-
timer_handle_ =
|
666
|
+
timer_handle_ = xds_channel()->xds_client()->engine()->RunAfter(
|
664
667
|
timeout,
|
665
668
|
[self = this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start")]() {
|
666
669
|
ApplicationCallbackExecCtx callback_exec_ctx;
|
@@ -670,8 +673,8 @@ void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
|
|
670
673
|
}
|
671
674
|
|
672
675
|
template <typename T>
|
673
|
-
void XdsClient::
|
674
|
-
MutexLock lock(&
|
676
|
+
void XdsClient::XdsChannel::RetryableCall<T>::OnRetryTimer() {
|
677
|
+
MutexLock lock(&xds_channel_->xds_client()->mu_);
|
675
678
|
if (timer_handle_.has_value()) {
|
676
679
|
timer_handle_.reset();
|
677
680
|
if (shutting_down_) return;
|
@@ -679,31 +682,52 @@ void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimer() {
|
|
679
682
|
gpr_log(GPR_INFO,
|
680
683
|
"[xds_client %p] xds server %s: retry timer fired (retryable "
|
681
684
|
"call: %p)",
|
682
|
-
|
683
|
-
this);
|
685
|
+
xds_channel()->xds_client(),
|
686
|
+
xds_channel()->server_.server_uri().c_str(), this);
|
684
687
|
}
|
685
688
|
StartNewCallLocked();
|
686
689
|
}
|
687
690
|
}
|
688
691
|
|
689
692
|
//
|
690
|
-
// XdsClient::
|
693
|
+
// XdsClient::XdsChannel::AdsCall::AdsReadDelayHandle
|
691
694
|
//
|
692
695
|
|
693
|
-
|
694
|
-
|
696
|
+
class XdsClient::XdsChannel::AdsCall::AdsReadDelayHandle
|
697
|
+
: public XdsClient::ReadDelayHandle {
|
698
|
+
public:
|
699
|
+
explicit AdsReadDelayHandle(RefCountedPtr<AdsCall> ads_call)
|
700
|
+
: ads_call_(std::move(ads_call)) {}
|
701
|
+
|
702
|
+
~AdsReadDelayHandle() override {
|
703
|
+
MutexLock lock(&ads_call_->xds_client()->mu_);
|
704
|
+
auto call = ads_call_->streaming_call_.get();
|
705
|
+
if (call != nullptr) call->StartRecvMessage();
|
706
|
+
}
|
707
|
+
|
708
|
+
private:
|
709
|
+
RefCountedPtr<AdsCall> ads_call_;
|
710
|
+
};
|
711
|
+
|
712
|
+
//
|
713
|
+
// XdsClient::XdsChannel::AdsCall::AdsResponseParser
|
714
|
+
//
|
715
|
+
|
716
|
+
absl::Status
|
717
|
+
XdsClient::XdsChannel::AdsCall::AdsResponseParser::ProcessAdsResponseFields(
|
718
|
+
AdsResponseFields fields) {
|
695
719
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
696
720
|
gpr_log(
|
697
721
|
GPR_INFO,
|
698
722
|
"[xds_client %p] xds server %s: received ADS response: type_url=%s, "
|
699
723
|
"version=%s, nonce=%s, num_resources=%" PRIuPTR,
|
700
|
-
|
701
|
-
|
724
|
+
ads_call_->xds_client(),
|
725
|
+
ads_call_->xds_channel()->server_.server_uri().c_str(),
|
702
726
|
fields.type_url.c_str(), fields.version.c_str(), fields.nonce.c_str(),
|
703
727
|
fields.num_resources);
|
704
728
|
}
|
705
729
|
result_.type =
|
706
|
-
|
730
|
+
ads_call_->xds_client()->GetResourceTypeLocked(fields.type_url);
|
707
731
|
if (result_.type == nullptr) {
|
708
732
|
return absl::InvalidArgumentError(
|
709
733
|
absl::StrCat("unknown resource type ", fields.type_url));
|
@@ -711,6 +735,8 @@ absl::Status XdsClient::ChannelState::AdsCallState::AdsResponseParser::
|
|
711
735
|
result_.type_url = std::move(fields.type_url);
|
712
736
|
result_.version = std::move(fields.version);
|
713
737
|
result_.nonce = std::move(fields.nonce);
|
738
|
+
result_.read_delay_handle =
|
739
|
+
MakeRefCounted<AdsReadDelayHandle>(ads_call_->Ref());
|
714
740
|
return absl::OkStatus();
|
715
741
|
}
|
716
742
|
|
@@ -740,7 +766,7 @@ void UpdateResourceMetadataNacked(const std::string& version,
|
|
740
766
|
|
741
767
|
} // namespace
|
742
768
|
|
743
|
-
void XdsClient::
|
769
|
+
void XdsClient::XdsChannel::AdsCall::AdsResponseParser::ParseResource(
|
744
770
|
upb_Arena* arena, size_t idx, absl::string_view type_url,
|
745
771
|
absl::string_view resource_name, absl::string_view serialized_resource) {
|
746
772
|
std::string error_prefix = absl::StrCat(
|
@@ -755,8 +781,8 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
755
781
|
}
|
756
782
|
// Parse the resource.
|
757
783
|
XdsResourceType::DecodeContext context = {
|
758
|
-
xds_client(),
|
759
|
-
xds_client()->
|
784
|
+
xds_client(), ads_call_->xds_channel()->server_, &grpc_xds_client_trace,
|
785
|
+
xds_client()->def_pool_.ptr(), arena};
|
760
786
|
XdsResourceType::DecodeResult decode_result =
|
761
787
|
result_.type->Decode(context, serialized_resource);
|
762
788
|
// If we didn't already have the resource name from the Resource
|
@@ -789,8 +815,8 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
789
815
|
return;
|
790
816
|
}
|
791
817
|
// Cancel resource-does-not-exist timer, if needed.
|
792
|
-
auto timer_it =
|
793
|
-
if (timer_it !=
|
818
|
+
auto timer_it = ads_call_->state_map_.find(result_.type);
|
819
|
+
if (timer_it != ads_call_->state_map_.end()) {
|
794
820
|
auto it = timer_it->second.subscribed_resources.find(
|
795
821
|
parsed_resource_name->authority);
|
796
822
|
if (it != timer_it->second.subscribed_resources.end()) {
|
@@ -832,7 +858,7 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
832
858
|
"resource for which we previously ignored a deletion: type %s "
|
833
859
|
"name %s",
|
834
860
|
xds_client(),
|
835
|
-
|
861
|
+
ads_call_->xds_channel()->server_.server_uri().c_str(),
|
836
862
|
std::string(type_url).c_str(), std::string(resource_name).c_str());
|
837
863
|
resource_state.ignored_deletion = false;
|
838
864
|
}
|
@@ -841,7 +867,8 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
841
867
|
xds_client()->NotifyWatchersOnErrorLocked(
|
842
868
|
resource_state.watchers,
|
843
869
|
absl::UnavailableError(
|
844
|
-
absl::StrCat("invalid resource: ", decode_status.ToString()))
|
870
|
+
absl::StrCat("invalid resource: ", decode_status.ToString())),
|
871
|
+
result_.read_delay_handle);
|
845
872
|
UpdateResourceMetadataNacked(result_.version, decode_status.ToString(),
|
846
873
|
update_time_, &resource_state.meta);
|
847
874
|
return;
|
@@ -867,57 +894,57 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
867
894
|
// Notify watchers.
|
868
895
|
auto& watchers_list = resource_state.watchers;
|
869
896
|
xds_client()->work_serializer_.Schedule(
|
870
|
-
[watchers_list, value = resource_state.resource
|
897
|
+
[watchers_list, value = resource_state.resource,
|
898
|
+
read_delay_handle = result_.read_delay_handle]()
|
871
899
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&xds_client()->work_serializer_) {
|
872
900
|
for (const auto& p : watchers_list) {
|
873
|
-
p.first->OnGenericResourceChanged(value);
|
901
|
+
p.first->OnGenericResourceChanged(value, read_delay_handle);
|
874
902
|
}
|
875
903
|
},
|
876
904
|
DEBUG_LOCATION);
|
877
905
|
}
|
878
906
|
|
879
|
-
void XdsClient::
|
907
|
+
void XdsClient::XdsChannel::AdsCall::AdsResponseParser::
|
880
908
|
ResourceWrapperParsingFailed(size_t idx, absl::string_view message) {
|
881
909
|
result_.errors.emplace_back(
|
882
910
|
absl::StrCat("resource index ", idx, ": ", message));
|
883
911
|
}
|
884
912
|
|
885
913
|
//
|
886
|
-
// XdsClient::
|
914
|
+
// XdsClient::XdsChannel::AdsCall
|
887
915
|
//
|
888
916
|
|
889
|
-
XdsClient::
|
890
|
-
RefCountedPtr<RetryableCall<
|
891
|
-
: InternallyRefCounted<
|
892
|
-
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace)
|
893
|
-
|
894
|
-
|
895
|
-
parent_(std::move(parent)) {
|
917
|
+
XdsClient::XdsChannel::AdsCall::AdsCall(
|
918
|
+
RefCountedPtr<RetryableCall<AdsCall>> retryable_call)
|
919
|
+
: InternallyRefCounted<AdsCall>(
|
920
|
+
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace) ? "AdsCall"
|
921
|
+
: nullptr),
|
922
|
+
retryable_call_(std::move(retryable_call)) {
|
896
923
|
GPR_ASSERT(xds_client() != nullptr);
|
897
924
|
// Init the ADS call.
|
898
925
|
const char* method =
|
899
926
|
"/envoy.service.discovery.v3.AggregatedDiscoveryService/"
|
900
927
|
"StreamAggregatedResources";
|
901
|
-
|
928
|
+
streaming_call_ = xds_channel()->transport_->CreateStreamingCall(
|
902
929
|
method, std::make_unique<StreamEventHandler>(
|
903
930
|
// Passing the initial ref here. This ref will go away when
|
904
931
|
// the StreamEventHandler is destroyed.
|
905
|
-
RefCountedPtr<
|
906
|
-
GPR_ASSERT(
|
932
|
+
RefCountedPtr<AdsCall>(this)));
|
933
|
+
GPR_ASSERT(streaming_call_ != nullptr);
|
907
934
|
// Start the call.
|
908
935
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
909
936
|
gpr_log(GPR_INFO,
|
910
937
|
"[xds_client %p] xds server %s: starting ADS call "
|
911
|
-
"(
|
912
|
-
xds_client(),
|
913
|
-
|
938
|
+
"(ads_call: %p, streaming_call: %p)",
|
939
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(), this,
|
940
|
+
streaming_call_.get());
|
914
941
|
}
|
915
942
|
// If this is a reconnect, add any necessary subscriptions from what's
|
916
943
|
// already in the cache.
|
917
944
|
for (const auto& a : xds_client()->authority_state_map_) {
|
918
945
|
const std::string& authority = a.first;
|
919
946
|
// Skip authorities that are not using this xDS channel.
|
920
|
-
if (a.second.
|
947
|
+
if (a.second.xds_channel != xds_channel()) continue;
|
921
948
|
for (const auto& t : a.second.resource_map) {
|
922
949
|
const XdsResourceType* type = t.first;
|
923
950
|
for (const auto& r : t.second) {
|
@@ -930,17 +957,19 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
|
|
930
957
|
for (const auto& p : state_map_) {
|
931
958
|
SendMessageLocked(p.first);
|
932
959
|
}
|
960
|
+
streaming_call_->StartRecvMessage();
|
933
961
|
}
|
934
962
|
|
935
|
-
void XdsClient::
|
963
|
+
void XdsClient::XdsChannel::AdsCall::Orphan() {
|
936
964
|
state_map_.clear();
|
937
965
|
// Note that the initial ref is held by the StreamEventHandler, which
|
938
|
-
// will be destroyed when
|
939
|
-
// here, since there may be other refs held to
|
940
|
-
|
966
|
+
// will be destroyed when streaming_call_ is destroyed, which may not happen
|
967
|
+
// here, since there may be other refs held to streaming_call_ by internal
|
968
|
+
// callbacks.
|
969
|
+
streaming_call_.reset();
|
941
970
|
}
|
942
971
|
|
943
|
-
void XdsClient::
|
972
|
+
void XdsClient::XdsChannel::AdsCall::SendMessageLocked(
|
944
973
|
const XdsResourceType* type)
|
945
974
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
946
975
|
// Buffer message sending if an existing message is in flight.
|
@@ -950,24 +979,25 @@ void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
|
|
950
979
|
}
|
951
980
|
auto& state = state_map_[type];
|
952
981
|
std::string serialized_message = xds_client()->api_.CreateAdsRequest(
|
953
|
-
type->type_url(),
|
954
|
-
ResourceNamesForRequest(type), state.status,
|
982
|
+
type->type_url(), xds_channel()->resource_type_version_map_[type],
|
983
|
+
state.nonce, ResourceNamesForRequest(type), state.status,
|
984
|
+
!sent_initial_message_);
|
955
985
|
sent_initial_message_ = true;
|
956
986
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
957
987
|
gpr_log(GPR_INFO,
|
958
988
|
"[xds_client %p] xds server %s: sending ADS request: type=%s "
|
959
989
|
"version=%s nonce=%s error=%s",
|
960
|
-
xds_client(),
|
990
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
961
991
|
std::string(type->type_url()).c_str(),
|
962
|
-
|
992
|
+
xds_channel()->resource_type_version_map_[type].c_str(),
|
963
993
|
state.nonce.c_str(), state.status.ToString().c_str());
|
964
994
|
}
|
965
995
|
state.status = absl::OkStatus();
|
966
|
-
|
996
|
+
streaming_call_->SendMessage(std::move(serialized_message));
|
967
997
|
send_message_pending_ = type;
|
968
998
|
}
|
969
999
|
|
970
|
-
void XdsClient::
|
1000
|
+
void XdsClient::XdsChannel::AdsCall::SubscribeLocked(
|
971
1001
|
const XdsResourceType* type, const XdsResourceName& name, bool delay_send) {
|
972
1002
|
auto& state = state_map_[type].subscribed_resources[name.authority][name.key];
|
973
1003
|
if (state == nullptr) {
|
@@ -976,7 +1006,7 @@ void XdsClient::ChannelState::AdsCallState::SubscribeLocked(
|
|
976
1006
|
}
|
977
1007
|
}
|
978
1008
|
|
979
|
-
void XdsClient::
|
1009
|
+
void XdsClient::XdsChannel::AdsCall::UnsubscribeLocked(
|
980
1010
|
const XdsResourceType* type, const XdsResourceName& name,
|
981
1011
|
bool delay_unsubscription) {
|
982
1012
|
auto& type_state_map = state_map_[type];
|
@@ -993,14 +1023,14 @@ void XdsClient::ChannelState::AdsCallState::UnsubscribeLocked(
|
|
993
1023
|
}
|
994
1024
|
}
|
995
1025
|
|
996
|
-
bool XdsClient::
|
1026
|
+
bool XdsClient::XdsChannel::AdsCall::HasSubscribedResources() const {
|
997
1027
|
for (const auto& p : state_map_) {
|
998
1028
|
if (!p.second.subscribed_resources.empty()) return true;
|
999
1029
|
}
|
1000
1030
|
return false;
|
1001
1031
|
}
|
1002
1032
|
|
1003
|
-
void XdsClient::
|
1033
|
+
void XdsClient::XdsChannel::AdsCall::OnRequestSent(bool ok) {
|
1004
1034
|
MutexLock lock(&xds_client()->mu_);
|
1005
1035
|
// For each resource that was in the message we just sent, start the
|
1006
1036
|
// resource timer if needed.
|
@@ -1032,25 +1062,28 @@ void XdsClient::ChannelState::AdsCallState::OnRequestSent(bool ok) {
|
|
1032
1062
|
}
|
1033
1063
|
}
|
1034
1064
|
|
1035
|
-
void XdsClient::
|
1036
|
-
|
1065
|
+
void XdsClient::XdsChannel::AdsCall::OnRecvMessage(absl::string_view payload) {
|
1066
|
+
// Needs to be destroyed after the mutex is released.
|
1067
|
+
RefCountedPtr<ReadDelayHandle> read_delay_handle;
|
1037
1068
|
{
|
1038
1069
|
MutexLock lock(&xds_client()->mu_);
|
1039
1070
|
if (!IsCurrentCallOnChannel()) return;
|
1040
1071
|
// Parse and validate the response.
|
1041
1072
|
AdsResponseParser parser(this);
|
1042
1073
|
absl::Status status = xds_client()->api_.ParseAdsResponse(payload, &parser);
|
1074
|
+
// This includes a handle that will trigger an ADS read.
|
1075
|
+
AdsResponseParser::Result result = parser.TakeResult();
|
1076
|
+
read_delay_handle = std::move(result.read_delay_handle);
|
1043
1077
|
if (!status.ok()) {
|
1044
1078
|
// Ignore unparsable response.
|
1045
1079
|
gpr_log(GPR_ERROR,
|
1046
1080
|
"[xds_client %p] xds server %s: error parsing ADS response (%s) "
|
1047
1081
|
"-- ignoring",
|
1048
|
-
xds_client(),
|
1082
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1049
1083
|
status.ToString().c_str());
|
1050
1084
|
} else {
|
1051
1085
|
seen_response_ = true;
|
1052
|
-
|
1053
|
-
AdsResponseParser::Result result = parser.TakeResult();
|
1086
|
+
xds_channel()->status_ = absl::OkStatus();
|
1054
1087
|
// Update nonce.
|
1055
1088
|
auto& state = state_map_[result.type];
|
1056
1089
|
state.nonce = result.nonce;
|
@@ -1063,7 +1096,7 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1063
1096
|
"[xds_client %p] xds server %s: ADS response invalid for "
|
1064
1097
|
"resource "
|
1065
1098
|
"type %s version %s, will NACK: nonce=%s status=%s",
|
1066
|
-
xds_client(),
|
1099
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1067
1100
|
result.type_url.c_str(), result.version.c_str(),
|
1068
1101
|
state.nonce.c_str(), state.status.ToString().c_str());
|
1069
1102
|
}
|
@@ -1073,7 +1106,7 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1073
1106
|
const std::string& authority = a.first;
|
1074
1107
|
AuthorityState& authority_state = a.second;
|
1075
1108
|
// Skip authorities that are not using this xDS channel.
|
1076
|
-
if (authority_state.
|
1109
|
+
if (authority_state.xds_channel != xds_channel()) continue;
|
1077
1110
|
auto seen_authority_it = result.resources_seen.find(authority);
|
1078
1111
|
// Find this resource type.
|
1079
1112
|
auto type_it = authority_state.resource_map.find(result.type);
|
@@ -1093,12 +1126,13 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1093
1126
|
// that the resource does not exist. For that case, we rely on
|
1094
1127
|
// the request timeout instead.
|
1095
1128
|
if (resource_state.resource == nullptr) continue;
|
1096
|
-
if (
|
1129
|
+
if (xds_channel()->server_.IgnoreResourceDeletion()) {
|
1097
1130
|
if (!resource_state.ignored_deletion) {
|
1098
1131
|
gpr_log(GPR_ERROR,
|
1099
1132
|
"[xds_client %p] xds server %s: ignoring deletion "
|
1100
1133
|
"for resource type %s name %s",
|
1101
|
-
xds_client(),
|
1134
|
+
xds_client(),
|
1135
|
+
xds_channel()->server_.server_uri().c_str(),
|
1102
1136
|
result.type_url.c_str(),
|
1103
1137
|
XdsClient::ConstructFullXdsResourceName(
|
1104
1138
|
authority, result.type_url.c_str(), resource_key)
|
@@ -1110,7 +1144,7 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1110
1144
|
resource_state.meta.client_status =
|
1111
1145
|
XdsApi::ResourceMetadata::DOES_NOT_EXIST;
|
1112
1146
|
xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
1113
|
-
resource_state.watchers);
|
1147
|
+
resource_state.watchers, read_delay_handle);
|
1114
1148
|
}
|
1115
1149
|
}
|
1116
1150
|
}
|
@@ -1118,14 +1152,8 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1118
1152
|
}
|
1119
1153
|
// If we had valid resources or the update was empty, update the version.
|
1120
1154
|
if (result.have_valid_resources || result.errors.empty()) {
|
1121
|
-
|
1155
|
+
xds_channel()->resource_type_version_map_[result.type] =
|
1122
1156
|
std::move(result.version);
|
1123
|
-
// Start load reporting if needed.
|
1124
|
-
auto& lrs_call = chand()->lrs_calld_;
|
1125
|
-
if (lrs_call != nullptr) {
|
1126
|
-
LrsCallState* lrs_calld = lrs_call->calld();
|
1127
|
-
if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked();
|
1128
|
-
}
|
1129
1157
|
}
|
1130
1158
|
// Send ACK or NACK.
|
1131
1159
|
SendMessageLocked(result.type);
|
@@ -1134,16 +1162,16 @@ void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
|
1134
1162
|
xds_client()->work_serializer_.DrainQueue();
|
1135
1163
|
}
|
1136
1164
|
|
1137
|
-
void XdsClient::
|
1138
|
-
absl::Status status) {
|
1165
|
+
void XdsClient::XdsChannel::AdsCall::OnStatusReceived(absl::Status status) {
|
1139
1166
|
{
|
1140
1167
|
MutexLock lock(&xds_client()->mu_);
|
1141
1168
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1142
1169
|
gpr_log(GPR_INFO,
|
1143
1170
|
"[xds_client %p] xds server %s: ADS call status received "
|
1144
|
-
"(
|
1145
|
-
xds_client(),
|
1146
|
-
this,
|
1171
|
+
"(xds_channel=%p, ads_call=%p, streaming_call=%p): %s",
|
1172
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1173
|
+
xds_channel(), this, streaming_call_.get(),
|
1174
|
+
status.ToString().c_str());
|
1147
1175
|
}
|
1148
1176
|
// Cancel any does-not-exist timers that may be pending.
|
1149
1177
|
for (const auto& p : state_map_) {
|
@@ -1156,12 +1184,12 @@ void XdsClient::ChannelState::AdsCallState::OnStatusReceived(
|
|
1156
1184
|
// Ignore status from a stale call.
|
1157
1185
|
if (IsCurrentCallOnChannel()) {
|
1158
1186
|
// Try to restart the call.
|
1159
|
-
|
1187
|
+
retryable_call_->OnCallFinishedLocked();
|
1160
1188
|
// If we didn't receive a response on the stream, report the
|
1161
1189
|
// stream failure as a connectivity failure, which will report the
|
1162
1190
|
// error to all watchers of resources on this channel.
|
1163
1191
|
if (!seen_response_) {
|
1164
|
-
|
1192
|
+
xds_channel()->SetChannelStatusLocked(absl::UnavailableError(
|
1165
1193
|
absl::StrCat("xDS call failed with no responses received; status: ",
|
1166
1194
|
status.ToString())));
|
1167
1195
|
}
|
@@ -1170,15 +1198,15 @@ void XdsClient::ChannelState::AdsCallState::OnStatusReceived(
|
|
1170
1198
|
xds_client()->work_serializer_.DrainQueue();
|
1171
1199
|
}
|
1172
1200
|
|
1173
|
-
bool XdsClient::
|
1201
|
+
bool XdsClient::XdsChannel::AdsCall::IsCurrentCallOnChannel() const {
|
1174
1202
|
// If the retryable ADS call is null (which only happens when the xds
|
1175
1203
|
// channel is shutting down), all the ADS calls are stale.
|
1176
|
-
if (
|
1177
|
-
return this ==
|
1204
|
+
if (xds_channel()->ads_call_ == nullptr) return false;
|
1205
|
+
return this == xds_channel()->ads_call_->call();
|
1178
1206
|
}
|
1179
1207
|
|
1180
1208
|
std::vector<std::string>
|
1181
|
-
XdsClient::
|
1209
|
+
XdsClient::XdsChannel::AdsCall::ResourceNamesForRequest(
|
1182
1210
|
const XdsResourceType* type) {
|
1183
1211
|
std::vector<std::string> resource_names;
|
1184
1212
|
auto it = state_map_.find(type);
|
@@ -1198,39 +1226,107 @@ XdsClient::ChannelState::AdsCallState::ResourceNamesForRequest(
|
|
1198
1226
|
}
|
1199
1227
|
|
1200
1228
|
//
|
1201
|
-
// XdsClient::
|
1229
|
+
// XdsClient::XdsChannel::LrsCall::Timer
|
1202
1230
|
//
|
1203
1231
|
|
1204
|
-
void XdsClient::
|
1205
|
-
if (timer_handle_.has_value()
|
1206
|
-
|
1232
|
+
void XdsClient::XdsChannel::LrsCall::Timer::Orphan() {
|
1233
|
+
if (timer_handle_.has_value()) {
|
1234
|
+
xds_client()->engine()->Cancel(*timer_handle_);
|
1207
1235
|
timer_handle_.reset();
|
1208
|
-
Unref(DEBUG_LOCATION, "Orphan");
|
1209
1236
|
}
|
1237
|
+
Unref(DEBUG_LOCATION, "Orphan");
|
1210
1238
|
}
|
1211
1239
|
|
1212
|
-
void XdsClient::
|
1213
|
-
ScheduleNextReportLocked() {
|
1240
|
+
void XdsClient::XdsChannel::LrsCall::Timer::ScheduleNextReportLocked() {
|
1214
1241
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1215
1242
|
gpr_log(GPR_INFO,
|
1216
|
-
"[xds_client %p] xds server %s: scheduling load report
|
1217
|
-
xds_client(),
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1243
|
+
"[xds_client %p] xds server %s: scheduling next load report in %s",
|
1244
|
+
xds_client(),
|
1245
|
+
lrs_call_->xds_channel()->server_.server_uri().c_str(),
|
1246
|
+
lrs_call_->load_reporting_interval_.ToString().c_str());
|
1247
|
+
}
|
1248
|
+
timer_handle_ = xds_client()->engine()->RunAfter(
|
1249
|
+
lrs_call_->load_reporting_interval_,
|
1250
|
+
[self = Ref(DEBUG_LOCATION, "timer")]() {
|
1251
|
+
ApplicationCallbackExecCtx callback_exec_ctx;
|
1252
|
+
ExecCtx exec_ctx;
|
1253
|
+
self->OnNextReportTimer();
|
1254
|
+
});
|
1226
1255
|
}
|
1227
1256
|
|
1228
|
-
|
1257
|
+
void XdsClient::XdsChannel::LrsCall::Timer::OnNextReportTimer() {
|
1229
1258
|
MutexLock lock(&xds_client()->mu_);
|
1230
1259
|
timer_handle_.reset();
|
1231
|
-
if (
|
1232
|
-
|
1233
|
-
|
1260
|
+
if (IsCurrentTimerOnCall()) lrs_call_->SendReportLocked();
|
1261
|
+
}
|
1262
|
+
|
1263
|
+
//
|
1264
|
+
// XdsClient::XdsChannel::LrsCall
|
1265
|
+
//
|
1266
|
+
|
1267
|
+
XdsClient::XdsChannel::LrsCall::LrsCall(
|
1268
|
+
RefCountedPtr<RetryableCall<LrsCall>> retryable_call)
|
1269
|
+
: InternallyRefCounted<LrsCall>(
|
1270
|
+
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace) ? "LrsCall"
|
1271
|
+
: nullptr),
|
1272
|
+
retryable_call_(std::move(retryable_call)) {
|
1273
|
+
// Init the LRS call. Note that the call will progress every time there's
|
1274
|
+
// activity in xds_client()->interested_parties_, which is comprised of
|
1275
|
+
// the polling entities from client_channel.
|
1276
|
+
GPR_ASSERT(xds_client() != nullptr);
|
1277
|
+
const char* method =
|
1278
|
+
"/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats";
|
1279
|
+
streaming_call_ = xds_channel()->transport_->CreateStreamingCall(
|
1280
|
+
method, std::make_unique<StreamEventHandler>(
|
1281
|
+
// Passing the initial ref here. This ref will go away when
|
1282
|
+
// the StreamEventHandler is destroyed.
|
1283
|
+
RefCountedPtr<LrsCall>(this)));
|
1284
|
+
GPR_ASSERT(streaming_call_ != nullptr);
|
1285
|
+
// Start the call.
|
1286
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1287
|
+
gpr_log(GPR_INFO,
|
1288
|
+
"[xds_client %p] xds server %s: starting LRS call (lrs_call=%p, "
|
1289
|
+
"streaming_call=%p)",
|
1290
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(), this,
|
1291
|
+
streaming_call_.get());
|
1292
|
+
}
|
1293
|
+
// Send the initial request.
|
1294
|
+
std::string serialized_payload = xds_client()->api_.CreateLrsInitialRequest();
|
1295
|
+
SendMessageLocked(std::move(serialized_payload));
|
1296
|
+
// Read initial response.
|
1297
|
+
streaming_call_->StartRecvMessage();
|
1298
|
+
}
|
1299
|
+
|
1300
|
+
void XdsClient::XdsChannel::LrsCall::Orphan() {
|
1301
|
+
timer_.reset();
|
1302
|
+
// Note that the initial ref is held by the StreamEventHandler, which
|
1303
|
+
// will be destroyed when streaming_call_ is destroyed, which may not happen
|
1304
|
+
// here, since there may be other refs held to streaming_call_ by internal
|
1305
|
+
// callbacks.
|
1306
|
+
streaming_call_.reset();
|
1307
|
+
}
|
1308
|
+
|
1309
|
+
void XdsClient::XdsChannel::LrsCall::MaybeScheduleNextReportLocked() {
|
1310
|
+
// If there are no more registered stats to report, cancel the call.
|
1311
|
+
auto it =
|
1312
|
+
xds_client()->xds_load_report_server_map_.find(&xds_channel()->server_);
|
1313
|
+
if (it == xds_client()->xds_load_report_server_map_.end() ||
|
1314
|
+
it->second.load_report_map.empty()) {
|
1315
|
+
it->second.xds_channel->StopLrsCallLocked();
|
1316
|
+
return;
|
1317
|
+
}
|
1318
|
+
// Don't start if the previous send_message op hasn't completed yet.
|
1319
|
+
// If this happens, we'll be called again from OnRequestSent().
|
1320
|
+
if (send_message_pending_) return;
|
1321
|
+
// Don't start if no LRS response has arrived.
|
1322
|
+
if (!seen_response()) return;
|
1323
|
+
// If there is no timer, create one.
|
1324
|
+
// This happens on the initial response and whenever the interval changes.
|
1325
|
+
if (timer_ == nullptr) {
|
1326
|
+
timer_ = MakeOrphanable<Timer>(Ref(DEBUG_LOCATION, "LRS timer"));
|
1327
|
+
}
|
1328
|
+
// Schedule the next load report.
|
1329
|
+
timer_->ScheduleNextReportLocked();
|
1234
1330
|
}
|
1235
1331
|
|
1236
1332
|
namespace {
|
@@ -1249,142 +1345,43 @@ bool LoadReportCountersAreZero(const XdsApi::ClusterLoadReportMap& snapshot) {
|
|
1249
1345
|
|
1250
1346
|
} // namespace
|
1251
1347
|
|
1252
|
-
|
1348
|
+
void XdsClient::XdsChannel::LrsCall::SendReportLocked() {
|
1253
1349
|
// Construct snapshot from all reported stats.
|
1254
1350
|
XdsApi::ClusterLoadReportMap snapshot =
|
1255
|
-
xds_client()->BuildLoadReportSnapshotLocked(
|
1256
|
-
|
1257
|
-
parent_->cluster_names_);
|
1351
|
+
xds_client()->BuildLoadReportSnapshotLocked(
|
1352
|
+
xds_channel()->server_, send_all_clusters_, cluster_names_);
|
1258
1353
|
// Skip client load report if the counters were all zero in the last
|
1259
1354
|
// report and they are still zero in this one.
|
1260
1355
|
const bool old_val = last_report_counters_were_zero_;
|
1261
1356
|
last_report_counters_were_zero_ = LoadReportCountersAreZero(snapshot);
|
1262
1357
|
if (old_val && last_report_counters_were_zero_) {
|
1263
|
-
|
1264
|
-
|
1265
|
-
if (it == xds_client()->xds_load_report_server_map_.end() ||
|
1266
|
-
it->second.load_report_map.empty()) {
|
1267
|
-
it->second.channel_state->StopLrsCallLocked();
|
1268
|
-
return true;
|
1269
|
-
}
|
1270
|
-
ScheduleNextReportLocked();
|
1271
|
-
return false;
|
1358
|
+
MaybeScheduleNextReportLocked();
|
1359
|
+
return;
|
1272
1360
|
}
|
1273
1361
|
// Send a request that contains the snapshot.
|
1274
1362
|
std::string serialized_payload =
|
1275
1363
|
xds_client()->api_.CreateLrsRequest(std::move(snapshot));
|
1276
|
-
|
1277
|
-
parent_->send_message_pending_ = true;
|
1278
|
-
return false;
|
1279
|
-
}
|
1280
|
-
|
1281
|
-
void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked() {
|
1282
|
-
// If a reporter starts a send_message op, then the reporting interval
|
1283
|
-
// changes and we destroy that reporter and create a new one, and then
|
1284
|
-
// the send_message op started by the old reporter finishes, this
|
1285
|
-
// method will be called even though it was for a completion started
|
1286
|
-
// by the old reporter. In that case, the timer will be pending, so
|
1287
|
-
// we just ignore the completion and wait for the timer to fire.
|
1288
|
-
if (timer_handle_.has_value()) return;
|
1289
|
-
// If there are no more registered stats to report, cancel the call.
|
1290
|
-
auto it = xds_client()->xds_load_report_server_map_.find(
|
1291
|
-
&parent_->chand()->server_);
|
1292
|
-
if (it == xds_client()->xds_load_report_server_map_.end()) return;
|
1293
|
-
if (it->second.load_report_map.empty()) {
|
1294
|
-
if (it->second.channel_state != nullptr) {
|
1295
|
-
it->second.channel_state->StopLrsCallLocked();
|
1296
|
-
}
|
1297
|
-
return;
|
1298
|
-
}
|
1299
|
-
// Otherwise, schedule the next load report.
|
1300
|
-
ScheduleNextReportLocked();
|
1364
|
+
SendMessageLocked(std::move(serialized_payload));
|
1301
1365
|
}
|
1302
1366
|
|
1303
|
-
|
1304
|
-
// XdsClient::ChannelState::LrsCallState
|
1305
|
-
//
|
1306
|
-
|
1307
|
-
XdsClient::ChannelState::LrsCallState::LrsCallState(
|
1308
|
-
RefCountedPtr<RetryableCall<LrsCallState>> parent)
|
1309
|
-
: InternallyRefCounted<LrsCallState>(
|
1310
|
-
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace)
|
1311
|
-
? "LrsCallState"
|
1312
|
-
: nullptr),
|
1313
|
-
parent_(std::move(parent)) {
|
1314
|
-
// Init the LRS call. Note that the call will progress every time there's
|
1315
|
-
// activity in xds_client()->interested_parties_, which is comprised of
|
1316
|
-
// the polling entities from client_channel.
|
1317
|
-
GPR_ASSERT(xds_client() != nullptr);
|
1318
|
-
const char* method =
|
1319
|
-
"/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats";
|
1320
|
-
call_ = chand()->transport_->CreateStreamingCall(
|
1321
|
-
method, std::make_unique<StreamEventHandler>(
|
1322
|
-
// Passing the initial ref here. This ref will go away when
|
1323
|
-
// the StreamEventHandler is destroyed.
|
1324
|
-
RefCountedPtr<LrsCallState>(this)));
|
1325
|
-
GPR_ASSERT(call_ != nullptr);
|
1326
|
-
// Start the call.
|
1327
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1328
|
-
gpr_log(GPR_INFO,
|
1329
|
-
"[xds_client %p] xds server %s: starting LRS call (calld=%p, "
|
1330
|
-
"call=%p)",
|
1331
|
-
xds_client(), chand()->server_.server_uri().c_str(), this,
|
1332
|
-
call_.get());
|
1333
|
-
}
|
1334
|
-
// Send the initial request.
|
1335
|
-
std::string serialized_payload = xds_client()->api_.CreateLrsInitialRequest();
|
1336
|
-
call_->SendMessage(std::move(serialized_payload));
|
1367
|
+
void XdsClient::XdsChannel::LrsCall::SendMessageLocked(std::string payload) {
|
1337
1368
|
send_message_pending_ = true;
|
1369
|
+
streaming_call_->SendMessage(std::move(payload));
|
1338
1370
|
}
|
1339
1371
|
|
1340
|
-
void XdsClient::
|
1341
|
-
reporter_.reset();
|
1342
|
-
// Note that the initial ref is held by the StreamEventHandler, which
|
1343
|
-
// will be destroyed when call_ is destroyed, which may not happen
|
1344
|
-
// here, since there may be other refs held to call_ by internal callbacks.
|
1345
|
-
call_.reset();
|
1346
|
-
}
|
1347
|
-
|
1348
|
-
void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
|
1349
|
-
// Don't start again if already started.
|
1350
|
-
if (reporter_ != nullptr) return;
|
1351
|
-
// Don't start if the previous send_message op (of the initial request or
|
1352
|
-
// the last report of the previous reporter) hasn't completed.
|
1353
|
-
if (call_ != nullptr && send_message_pending_) return;
|
1354
|
-
// Don't start if no LRS response has arrived.
|
1355
|
-
if (!seen_response()) return;
|
1356
|
-
// Don't start if the ADS call hasn't received any valid response. Note that
|
1357
|
-
// this must be the first channel because it is the current channel but its
|
1358
|
-
// ADS call hasn't seen any response.
|
1359
|
-
if (chand()->ads_calld_ == nullptr ||
|
1360
|
-
chand()->ads_calld_->calld() == nullptr ||
|
1361
|
-
!chand()->ads_calld_->calld()->seen_response()) {
|
1362
|
-
return;
|
1363
|
-
}
|
1364
|
-
// Start reporting.
|
1365
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1366
|
-
gpr_log(GPR_INFO, "[xds_client %p] xds server %s: creating load reporter",
|
1367
|
-
xds_client(), chand()->server_.server_uri().c_str());
|
1368
|
-
}
|
1369
|
-
reporter_ = MakeOrphanable<Reporter>(
|
1370
|
-
Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_);
|
1371
|
-
}
|
1372
|
-
|
1373
|
-
void XdsClient::ChannelState::LrsCallState::OnRequestSent(bool /*ok*/) {
|
1372
|
+
void XdsClient::XdsChannel::LrsCall::OnRequestSent() {
|
1374
1373
|
MutexLock lock(&xds_client()->mu_);
|
1375
1374
|
send_message_pending_ = false;
|
1376
|
-
if (
|
1377
|
-
reporter_->OnReportDoneLocked();
|
1378
|
-
} else {
|
1379
|
-
MaybeStartReportingLocked();
|
1380
|
-
}
|
1375
|
+
if (IsCurrentCallOnChannel()) MaybeScheduleNextReportLocked();
|
1381
1376
|
}
|
1382
1377
|
|
1383
|
-
void XdsClient::
|
1384
|
-
absl::string_view payload) {
|
1378
|
+
void XdsClient::XdsChannel::LrsCall::OnRecvMessage(absl::string_view payload) {
|
1385
1379
|
MutexLock lock(&xds_client()->mu_);
|
1386
1380
|
// If we're no longer the current call, ignore the result.
|
1387
1381
|
if (!IsCurrentCallOnChannel()) return;
|
1382
|
+
// Start recv after any code branch
|
1383
|
+
auto cleanup = absl::MakeCleanup(
|
1384
|
+
[call = streaming_call_.get()]() { call->StartRecvMessage(); });
|
1388
1385
|
// Parse the response.
|
1389
1386
|
bool send_all_clusters = false;
|
1390
1387
|
std::set<std::string> new_cluster_names;
|
@@ -1395,7 +1392,7 @@ void XdsClient::ChannelState::LrsCallState::OnRecvMessage(
|
|
1395
1392
|
if (!status.ok()) {
|
1396
1393
|
gpr_log(GPR_ERROR,
|
1397
1394
|
"[xds_client %p] xds server %s: LRS response parsing failed: %s",
|
1398
|
-
xds_client(),
|
1395
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1399
1396
|
status.ToString().c_str());
|
1400
1397
|
return;
|
1401
1398
|
}
|
@@ -1406,7 +1403,7 @@ void XdsClient::ChannelState::LrsCallState::OnRecvMessage(
|
|
1406
1403
|
"[xds_client %p] xds server %s: LRS response received, %" PRIuPTR
|
1407
1404
|
" cluster names, send_all_clusters=%d, load_report_interval=%" PRId64
|
1408
1405
|
"ms",
|
1409
|
-
xds_client(),
|
1406
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1410
1407
|
new_cluster_names.size(), send_all_clusters,
|
1411
1408
|
new_load_reporting_interval.millis());
|
1412
1409
|
size_t i = 0;
|
@@ -1423,7 +1420,7 @@ void XdsClient::ChannelState::LrsCallState::OnRecvMessage(
|
|
1423
1420
|
gpr_log(GPR_INFO,
|
1424
1421
|
"[xds_client %p] xds server %s: increased load_report_interval "
|
1425
1422
|
"to minimum value %dms",
|
1426
|
-
xds_client(),
|
1423
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1427
1424
|
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
|
1428
1425
|
}
|
1429
1426
|
}
|
@@ -1435,42 +1432,46 @@ void XdsClient::ChannelState::LrsCallState::OnRecvMessage(
|
|
1435
1432
|
gpr_log(GPR_INFO,
|
1436
1433
|
"[xds_client %p] xds server %s: incoming LRS response identical "
|
1437
1434
|
"to current, ignoring.",
|
1438
|
-
xds_client(),
|
1435
|
+
xds_client(), xds_channel()->server_.server_uri().c_str());
|
1439
1436
|
}
|
1440
1437
|
return;
|
1441
1438
|
}
|
1442
|
-
//
|
1443
|
-
|
1439
|
+
// If the interval has changed, we'll need to restart the timer below.
|
1440
|
+
const bool restart_timer =
|
1441
|
+
load_reporting_interval_ != new_load_reporting_interval;
|
1444
1442
|
// Record the new config.
|
1445
1443
|
send_all_clusters_ = send_all_clusters;
|
1446
1444
|
cluster_names_ = std::move(new_cluster_names);
|
1447
1445
|
load_reporting_interval_ = new_load_reporting_interval;
|
1448
|
-
//
|
1449
|
-
|
1446
|
+
// Restart timer if needed.
|
1447
|
+
if (restart_timer) {
|
1448
|
+
timer_.reset();
|
1449
|
+
MaybeScheduleNextReportLocked();
|
1450
|
+
}
|
1450
1451
|
}
|
1451
1452
|
|
1452
|
-
void XdsClient::
|
1453
|
-
absl::Status status) {
|
1453
|
+
void XdsClient::XdsChannel::LrsCall::OnStatusReceived(absl::Status status) {
|
1454
1454
|
MutexLock lock(&xds_client()->mu_);
|
1455
1455
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1456
1456
|
gpr_log(GPR_INFO,
|
1457
1457
|
"[xds_client %p] xds server %s: LRS call status received "
|
1458
|
-
"(
|
1459
|
-
xds_client(),
|
1460
|
-
|
1458
|
+
"(xds_channel=%p, lrs_call=%p, streaming_call=%p): %s",
|
1459
|
+
xds_client(), xds_channel()->server_.server_uri().c_str(),
|
1460
|
+
xds_channel(), this, streaming_call_.get(),
|
1461
|
+
status.ToString().c_str());
|
1461
1462
|
}
|
1462
1463
|
// Ignore status from a stale call.
|
1463
1464
|
if (IsCurrentCallOnChannel()) {
|
1464
1465
|
// Try to restart the call.
|
1465
|
-
|
1466
|
+
retryable_call_->OnCallFinishedLocked();
|
1466
1467
|
}
|
1467
1468
|
}
|
1468
1469
|
|
1469
|
-
bool XdsClient::
|
1470
|
+
bool XdsClient::XdsChannel::LrsCall::IsCurrentCallOnChannel() const {
|
1470
1471
|
// If the retryable LRS call is null (which only happens when the xds
|
1471
1472
|
// channel is shutting down), all the LRS calls are stale.
|
1472
|
-
if (
|
1473
|
-
return this ==
|
1473
|
+
if (xds_channel()->lrs_call_ == nullptr) return false;
|
1474
|
+
return this == xds_channel()->lrs_call_->call();
|
1474
1475
|
}
|
1475
1476
|
|
1476
1477
|
//
|
@@ -1490,7 +1491,7 @@ XdsClient::XdsClient(
|
|
1490
1491
|
transport_factory_(std::move(transport_factory)),
|
1491
1492
|
request_timeout_(resource_request_timeout),
|
1492
1493
|
xds_federation_enabled_(XdsFederationEnabled()),
|
1493
|
-
api_(this, &grpc_xds_client_trace, bootstrap_->node(), &
|
1494
|
+
api_(this, &grpc_xds_client_trace, bootstrap_->node(), &def_pool_,
|
1494
1495
|
std::move(user_agent_name), std::move(user_agent_version)),
|
1495
1496
|
work_serializer_(engine),
|
1496
1497
|
engine_(std::move(engine)) {
|
@@ -1521,24 +1522,24 @@ void XdsClient::Orphan() {
|
|
1521
1522
|
invalid_watchers_.clear();
|
1522
1523
|
// We may still be sending lingering queued load report data, so don't
|
1523
1524
|
// just clear the load reporting map, but we do want to clear the refs
|
1524
|
-
// we're holding to the
|
1525
|
+
// we're holding to the XdsChannel objects, to make sure that
|
1525
1526
|
// everything shuts down properly.
|
1526
1527
|
for (auto& p : xds_load_report_server_map_) {
|
1527
|
-
p.second.
|
1528
|
+
p.second.xds_channel.reset(DEBUG_LOCATION, "XdsClient::Orphan()");
|
1528
1529
|
}
|
1529
1530
|
}
|
1530
1531
|
|
1531
|
-
RefCountedPtr<XdsClient::
|
1532
|
+
RefCountedPtr<XdsClient::XdsChannel> XdsClient::GetOrCreateXdsChannelLocked(
|
1532
1533
|
const XdsBootstrap::XdsServer& server, const char* reason) {
|
1533
1534
|
auto it = xds_server_channel_map_.find(&server);
|
1534
1535
|
if (it != xds_server_channel_map_.end()) {
|
1535
1536
|
return it->second->Ref(DEBUG_LOCATION, reason);
|
1536
1537
|
}
|
1537
1538
|
// Channel not found, so create a new one.
|
1538
|
-
auto
|
1539
|
-
WeakRef(DEBUG_LOCATION, "
|
1540
|
-
xds_server_channel_map_[&server] =
|
1541
|
-
return
|
1539
|
+
auto xds_channel =
|
1540
|
+
MakeRefCounted<XdsChannel>(WeakRef(DEBUG_LOCATION, "XdsChannel"), server);
|
1541
|
+
xds_server_channel_map_[&server] = xds_channel.get();
|
1542
|
+
return xds_channel;
|
1542
1543
|
}
|
1543
1544
|
|
1544
1545
|
void XdsClient::WatchResource(const XdsResourceType* type,
|
@@ -1555,7 +1556,7 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1555
1556
|
work_serializer_.Run(
|
1556
1557
|
[watcher = std::move(watcher), status = std::move(status)]()
|
1557
1558
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1558
|
-
watcher->OnError(status);
|
1559
|
+
watcher->OnError(status, ReadDelayHandle::NoWait());
|
1559
1560
|
},
|
1560
1561
|
DEBUG_LOCATION);
|
1561
1562
|
};
|
@@ -1606,7 +1607,8 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1606
1607
|
work_serializer_.Schedule(
|
1607
1608
|
[watcher, value = resource_state.resource]()
|
1608
1609
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1609
|
-
watcher->OnGenericResourceChanged(value
|
1610
|
+
watcher->OnGenericResourceChanged(value,
|
1611
|
+
ReadDelayHandle::NoWait());
|
1610
1612
|
},
|
1611
1613
|
DEBUG_LOCATION);
|
1612
1614
|
} else if (resource_state.meta.client_status ==
|
@@ -1618,7 +1620,7 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1618
1620
|
}
|
1619
1621
|
work_serializer_.Schedule(
|
1620
1622
|
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1621
|
-
watcher->OnResourceDoesNotExist();
|
1623
|
+
watcher->OnResourceDoesNotExist(ReadDelayHandle::NoWait());
|
1622
1624
|
},
|
1623
1625
|
DEBUG_LOCATION);
|
1624
1626
|
} else if (resource_state.meta.client_status ==
|
@@ -1638,18 +1640,19 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1638
1640
|
work_serializer_.Schedule(
|
1639
1641
|
[watcher, details = std::move(details)]()
|
1640
1642
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1641
|
-
watcher->OnError(absl::UnavailableError(
|
1642
|
-
|
1643
|
+
watcher->OnError(absl::UnavailableError(absl::StrCat(
|
1644
|
+
"invalid resource: ", details)),
|
1645
|
+
ReadDelayHandle::NoWait());
|
1643
1646
|
},
|
1644
1647
|
DEBUG_LOCATION);
|
1645
1648
|
}
|
1646
1649
|
// If the authority doesn't yet have a channel, set it, creating it if
|
1647
1650
|
// needed.
|
1648
|
-
if (authority_state.
|
1649
|
-
authority_state.
|
1650
|
-
|
1651
|
+
if (authority_state.xds_channel == nullptr) {
|
1652
|
+
authority_state.xds_channel =
|
1653
|
+
GetOrCreateXdsChannelLocked(*xds_server, "start watch");
|
1651
1654
|
}
|
1652
|
-
absl::Status channel_status = authority_state.
|
1655
|
+
absl::Status channel_status = authority_state.xds_channel->status();
|
1653
1656
|
if (!channel_status.ok()) {
|
1654
1657
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1655
1658
|
gpr_log(GPR_INFO,
|
@@ -1660,11 +1663,11 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1660
1663
|
work_serializer_.Schedule(
|
1661
1664
|
[watcher = std::move(watcher), status = std::move(channel_status)]()
|
1662
1665
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) mutable {
|
1663
|
-
watcher->OnError(std::move(status));
|
1666
|
+
watcher->OnError(std::move(status), ReadDelayHandle::NoWait());
|
1664
1667
|
},
|
1665
1668
|
DEBUG_LOCATION);
|
1666
1669
|
}
|
1667
|
-
authority_state.
|
1670
|
+
authority_state.xds_channel->SubscribeLocked(type, *resource_name);
|
1668
1671
|
}
|
1669
1672
|
work_serializer_.DrainQueue();
|
1670
1673
|
}
|
@@ -1702,13 +1705,13 @@ void XdsClient::CancelResourceWatch(const XdsResourceType* type,
|
|
1702
1705
|
this, std::string(type->type_url()).c_str(),
|
1703
1706
|
std::string(name).c_str());
|
1704
1707
|
}
|
1705
|
-
authority_state.
|
1706
|
-
|
1708
|
+
authority_state.xds_channel->UnsubscribeLocked(type, *resource_name,
|
1709
|
+
delay_unsubscription);
|
1707
1710
|
type_map.erase(resource_it);
|
1708
1711
|
if (type_map.empty()) {
|
1709
1712
|
authority_state.resource_map.erase(type_it);
|
1710
1713
|
if (authority_state.resource_map.empty()) {
|
1711
|
-
authority_state.
|
1714
|
+
authority_state.xds_channel.reset();
|
1712
1715
|
}
|
1713
1716
|
}
|
1714
1717
|
}
|
@@ -1722,7 +1725,7 @@ void XdsClient::MaybeRegisterResourceTypeLocked(
|
|
1722
1725
|
return;
|
1723
1726
|
}
|
1724
1727
|
resource_types_.emplace(resource_type->type_url(), resource_type);
|
1725
|
-
resource_type->InitUpbSymtab(this,
|
1728
|
+
resource_type->InitUpbSymtab(this, def_pool_.ptr());
|
1726
1729
|
}
|
1727
1730
|
|
1728
1731
|
const XdsResourceType* XdsClient::GetResourceTypeLocked(
|
@@ -1792,9 +1795,9 @@ RefCountedPtr<XdsClusterDropStats> XdsClient::AddClusterDropStats(
|
|
1792
1795
|
// in the load_report_map_ key, so that they have the same lifetime.
|
1793
1796
|
auto server_it =
|
1794
1797
|
xds_load_report_server_map_.emplace(server, LoadReportServer()).first;
|
1795
|
-
if (server_it->second.
|
1796
|
-
server_it->second.
|
1797
|
-
*server, "load report map (drop stats)");
|
1798
|
+
if (server_it->second.xds_channel == nullptr) {
|
1799
|
+
server_it->second.xds_channel =
|
1800
|
+
GetOrCreateXdsChannelLocked(*server, "load report map (drop stats)");
|
1798
1801
|
}
|
1799
1802
|
auto load_report_it = server_it->second.load_report_map
|
1800
1803
|
.emplace(std::move(key), LoadReportState())
|
@@ -1814,7 +1817,7 @@ RefCountedPtr<XdsClusterDropStats> XdsClient::AddClusterDropStats(
|
|
1814
1817
|
load_report_it->first.second /*eds_service_name*/);
|
1815
1818
|
load_report_state.drop_stats = cluster_drop_stats.get();
|
1816
1819
|
}
|
1817
|
-
server_it->second.
|
1820
|
+
server_it->second.xds_channel->MaybeStartLrsCall();
|
1818
1821
|
}
|
1819
1822
|
work_serializer_.DrainQueue();
|
1820
1823
|
return cluster_drop_stats;
|
@@ -1860,8 +1863,8 @@ RefCountedPtr<XdsClusterLocalityStats> XdsClient::AddClusterLocalityStats(
|
|
1860
1863
|
// in the load_report_map_ key, so that they have the same lifetime.
|
1861
1864
|
auto server_it =
|
1862
1865
|
xds_load_report_server_map_.emplace(server, LoadReportServer()).first;
|
1863
|
-
if (server_it->second.
|
1864
|
-
server_it->second.
|
1866
|
+
if (server_it->second.xds_channel == nullptr) {
|
1867
|
+
server_it->second.xds_channel = GetOrCreateXdsChannelLocked(
|
1865
1868
|
*server, "load report map (locality stats)");
|
1866
1869
|
}
|
1867
1870
|
auto load_report_it = server_it->second.load_report_map
|
@@ -1885,7 +1888,7 @@ RefCountedPtr<XdsClusterLocalityStats> XdsClient::AddClusterLocalityStats(
|
|
1885
1888
|
std::move(locality));
|
1886
1889
|
locality_state.locality_stats = cluster_locality_stats.get();
|
1887
1890
|
}
|
1888
|
-
server_it->second.
|
1891
|
+
server_it->second.xds_channel->MaybeStartLrsCall();
|
1889
1892
|
}
|
1890
1893
|
work_serializer_.DrainQueue();
|
1891
1894
|
return cluster_locality_stats;
|
@@ -1927,7 +1930,7 @@ void XdsClient::ResetBackoff() {
|
|
1927
1930
|
void XdsClient::NotifyWatchersOnErrorLocked(
|
1928
1931
|
const std::map<ResourceWatcherInterface*,
|
1929
1932
|
RefCountedPtr<ResourceWatcherInterface>>& watchers,
|
1930
|
-
absl::Status status) {
|
1933
|
+
absl::Status status, RefCountedPtr<ReadDelayHandle> read_delay_handle) {
|
1931
1934
|
const auto* node = bootstrap_->node();
|
1932
1935
|
if (node != nullptr) {
|
1933
1936
|
status = absl::Status(
|
@@ -1935,10 +1938,11 @@ void XdsClient::NotifyWatchersOnErrorLocked(
|
|
1935
1938
|
absl::StrCat(status.message(), " (node ID:", node->id(), ")"));
|
1936
1939
|
}
|
1937
1940
|
work_serializer_.Schedule(
|
1938
|
-
[watchers, status = std::move(status)
|
1941
|
+
[watchers, status = std::move(status),
|
1942
|
+
read_delay_handle = std::move(read_delay_handle)]()
|
1939
1943
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1940
1944
|
for (const auto& p : watchers) {
|
1941
|
-
p.first->OnError(status);
|
1945
|
+
p.first->OnError(status, read_delay_handle);
|
1942
1946
|
}
|
1943
1947
|
},
|
1944
1948
|
DEBUG_LOCATION);
|
@@ -1946,13 +1950,15 @@ void XdsClient::NotifyWatchersOnErrorLocked(
|
|
1946
1950
|
|
1947
1951
|
void XdsClient::NotifyWatchersOnResourceDoesNotExist(
|
1948
1952
|
const std::map<ResourceWatcherInterface*,
|
1949
|
-
RefCountedPtr<ResourceWatcherInterface>>& watchers
|
1953
|
+
RefCountedPtr<ResourceWatcherInterface>>& watchers,
|
1954
|
+
RefCountedPtr<ReadDelayHandle> read_delay_handle) {
|
1950
1955
|
work_serializer_.Schedule(
|
1951
|
-
[watchers
|
1952
|
-
|
1953
|
-
|
1954
|
-
|
1955
|
-
|
1956
|
+
[watchers, read_delay_handle = std::move(read_delay_handle)]()
|
1957
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1958
|
+
for (const auto& p : watchers) {
|
1959
|
+
p.first->OnResourceDoesNotExist(read_delay_handle);
|
1960
|
+
}
|
1961
|
+
},
|
1956
1962
|
DEBUG_LOCATION);
|
1957
1963
|
}
|
1958
1964
|
|