grpc 1.48.0 → 1.50.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +114 -150
- data/include/grpc/event_engine/endpoint_config.h +11 -5
- data/include/grpc/event_engine/event_engine.h +20 -17
- data/include/grpc/impl/codegen/atm_gcc_atomic.h +19 -28
- data/include/grpc/impl/codegen/atm_gcc_sync.h +0 -2
- data/include/grpc/impl/codegen/atm_windows.h +0 -2
- data/include/grpc/impl/codegen/grpc_types.h +9 -8
- data/include/grpc/impl/codegen/port_platform.h +0 -8
- data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +6 -6
- data/src/core/ext/filters/channel_idle/channel_idle_filter.h +2 -2
- data/src/core/ext/filters/client_channel/backup_poller.cc +4 -6
- data/src/core/ext/filters/client_channel/client_channel.cc +154 -218
- data/src/core/ext/filters/client_channel/client_channel.h +16 -9
- data/src/core/ext/filters/client_channel/client_channel_factory.cc +0 -29
- data/src/core/ext/filters/client_channel/client_channel_factory.h +2 -10
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +0 -16
- data/src/core/ext/filters/client_channel/config_selector.h +12 -4
- data/src/core/ext/filters/client_channel/connector.h +4 -5
- data/src/core/ext/filters/client_channel/http_proxy.cc +55 -74
- data/src/core/ext/filters/client_channel/http_proxy.h +15 -11
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +20 -16
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h +11 -10
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +0 -2
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +181 -194
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc +12 -3
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h +5 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +1 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -2
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +20 -11
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +250 -146
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.h +41 -1
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +35 -32
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +195 -299
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +237 -250
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.h +12 -7
- data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +431 -498
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +31 -30
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +27 -27
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +108 -124
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +68 -76
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +131 -227
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +126 -121
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +325 -304
- data/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc +4 -8
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +431 -145
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +172 -101
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +20 -7
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +18 -16
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +7 -17
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +11 -0
- data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +51 -32
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +87 -41
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.h +16 -6
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +5 -13
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +167 -168
- data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +40 -32
- data/src/core/ext/filters/client_channel/resolver_result_parsing.h +7 -10
- data/src/core/ext/filters/client_channel/retry_filter.cc +25 -36
- data/src/core/ext/filters/client_channel/retry_service_config.cc +30 -19
- data/src/core/ext/filters/client_channel/retry_service_config.h +6 -9
- data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +6 -7
- data/src/core/ext/filters/client_channel/subchannel.cc +86 -121
- data/src/core/ext/filters/client_channel/subchannel.h +20 -11
- data/src/core/ext/filters/client_channel/subchannel_interface_internal.h +1 -1
- data/src/core/ext/filters/client_channel/subchannel_pool_interface.cc +6 -76
- data/src/core/ext/filters/client_channel/subchannel_pool_interface.h +16 -25
- data/src/core/ext/filters/client_channel/subchannel_stream_client.cc +4 -6
- data/src/core/ext/filters/deadline/deadline_filter.cc +6 -6
- data/src/core/ext/filters/deadline/deadline_filter.h +2 -3
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +24 -19
- data/src/core/ext/filters/fault_injection/fault_injection_filter.h +9 -1
- data/src/core/ext/filters/fault_injection/service_config_parser.cc +17 -12
- data/src/core/ext/filters/fault_injection/service_config_parser.h +4 -5
- data/src/core/ext/filters/http/client/http_client_filter.cc +3 -5
- data/src/core/ext/filters/http/client/http_client_filter.h +1 -1
- data/src/core/ext/filters/http/client_authority_filter.cc +1 -2
- data/src/core/ext/filters/http/client_authority_filter.h +1 -1
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -4
- data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +0 -2
- data/src/core/ext/filters/http/server/http_server_filter.cc +3 -5
- data/src/core/ext/filters/http/server/http_server_filter.h +1 -1
- data/src/core/ext/filters/message_size/message_size_filter.cc +13 -7
- data/src/core/ext/filters/message_size/message_size_filter.h +3 -6
- data/src/core/ext/filters/rbac/rbac_service_config_parser.cc +16 -12
- data/src/core/ext/filters/rbac/rbac_service_config_parser.h +4 -6
- data/src/core/ext/filters/server_config_selector/server_config_selector.h +5 -0
- data/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc +3 -5
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +40 -63
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +86 -107
- data/src/core/ext/transport/chttp2/server/chttp2_server.h +4 -6
- data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +155 -295
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
- data/src/core/ext/transport/chttp2/transport/decode_huff.cc +287 -0
- data/src/core/ext/transport/chttp2/transport/decode_huff.h +1018 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +139 -42
- data/src/core/ext/transport/chttp2/transport/flow_control.h +12 -6
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +1 -2
- data/src/core/ext/transport/chttp2/transport/hpack_constants.h +7 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +27 -28
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +4 -0
- data/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc +0 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder_table.h +8 -2
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +30 -38
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +1 -10
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +11 -6
- data/src/core/ext/transport/chttp2/transport/internal.h +4 -1
- data/src/core/ext/transport/chttp2/transport/parsing.cc +44 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +3 -14
- data/src/core/ext/transport/inproc/inproc_transport.cc +41 -77
- data/src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/admin/v3/certs.upb.h +30 -10
- data/src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c +4 -4
- data/src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.h +24 -8
- data/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c +8 -327
- data/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.h +83 -1723
- data/src/core/ext/upb-generated/envoy/admin/v3/config_dump_shared.upb.c +352 -0
- data/src/core/ext/upb-generated/envoy/admin/v3/config_dump_shared.upb.h +1768 -0
- data/src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c +6 -4
- data/src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.h +33 -4
- data/src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/admin/v3/memory.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/admin/v3/tap.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/annotations/resource.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/annotations/resource.upb.h +7 -3
- data/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c +14 -14
- data/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.h +90 -30
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +21 -19
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +124 -34
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +23 -22
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +153 -48
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c +11 -11
- data/src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.h +84 -28
- data/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c +7 -7
- data/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.h +42 -14
- data/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +25 -23
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +156 -48
- data/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c +7 -7
- data/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.h +42 -14
- data/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c +12 -12
- data/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.h +78 -26
- data/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c +9 -9
- data/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.h +54 -18
- data/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +21 -20
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +117 -44
- data/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.h +36 -12
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.h +30 -10
- data/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +37 -13
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +170 -15
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +42 -14
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +7 -4
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +36 -4
- data/src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c +6 -6
- data/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.h +48 -16
- data/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c +8 -8
- data/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.h +54 -18
- data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +21 -9
- data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.h +100 -14
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +7 -22
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +36 -77
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +121 -76
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +647 -163
- data/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c +34 -15
- data/src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.h +161 -22
- data/src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/opentelemetry.upb.c +46 -0
- data/src/core/ext/upb-generated/envoy/config/trace/v3/opentelemetry.upb.h +98 -0
- data/src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c +1 -0
- data/src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.h +1 -0
- data/src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.h +30 -10
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c +11 -4
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.h +58 -4
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +35 -27
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +177 -52
- data/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +10 -8
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +66 -14
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.h +42 -14
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c +131 -16
- data/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.h +632 -12
- data/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +30 -10
- data/src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c +4 -4
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.h +24 -8
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c +4 -4
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.h +24 -8
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c +7 -7
- data/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.h +42 -14
- data/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c +5 -5
- data/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.h +30 -10
- data/src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c +3 -3
- data/src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/v3/http_status.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/v3/percent.upb.h +12 -4
- data/src/core/ext/upb-generated/envoy/type/v3/range.upb.c +2 -2
- data/src/core/ext/upb-generated/envoy/type/v3/range.upb.h +18 -6
- data/src/core/ext/upb-generated/envoy/type/v3/ratelimit_strategy.upb.c +63 -0
- data/src/core/ext/upb-generated/envoy/type/v3/ratelimit_strategy.upb.h +202 -0
- data/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c +1 -1
- data/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.h +6 -2
- data/src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.h +6 -2
- data/src/core/ext/upb-generated/google/api/annotations.upb.h +1 -1
- data/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c +11 -11
- data/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.h +66 -22
- data/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c +9 -9
- data/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.h +72 -24
- data/src/core/ext/upb-generated/google/api/http.upb.c +3 -3
- data/src/core/ext/upb-generated/google/api/http.upb.h +18 -6
- data/src/core/ext/upb-generated/google/api/httpbody.upb.c +1 -1
- data/src/core/ext/upb-generated/google/api/httpbody.upb.h +6 -2
- data/src/core/ext/upb-generated/google/protobuf/any.upb.c +1 -1
- data/src/core/ext/upb-generated/google/protobuf/any.upb.h +6 -2
- data/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c +44 -43
- data/src/core/ext/upb-generated/google/protobuf/descriptor.upb.h +177 -55
- data/src/core/ext/upb-generated/google/protobuf/duration.upb.c +1 -1
- data/src/core/ext/upb-generated/google/protobuf/duration.upb.h +6 -2
- data/src/core/ext/upb-generated/google/protobuf/empty.upb.c +1 -1
- data/src/core/ext/upb-generated/google/protobuf/empty.upb.h +6 -2
- data/src/core/ext/upb-generated/google/protobuf/struct.upb.c +2 -2
- data/src/core/ext/upb-generated/google/protobuf/struct.upb.h +18 -6
- data/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c +1 -1
- data/src/core/ext/upb-generated/google/protobuf/timestamp.upb.h +6 -2
- data/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c +6 -6
- data/src/core/ext/upb-generated/google/protobuf/wrappers.upb.h +54 -18
- data/src/core/ext/upb-generated/google/rpc/status.upb.c +1 -1
- data/src/core/ext/upb-generated/google/rpc/status.upb.h +6 -2
- data/src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c +2 -2
- data/src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.h +24 -8
- data/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.h +6 -2
- data/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c +7 -7
- data/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.h +60 -20
- data/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c +1 -1
- data/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.h +12 -4
- data/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c +2 -2
- data/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.h +12 -4
- data/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c +6 -6
- data/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.h +54 -18
- data/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c +2 -2
- data/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.h +12 -4
- data/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c +7 -7
- data/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.h +42 -14
- data/src/core/ext/upb-generated/udpa/annotations/migrate.upb.c +3 -3
- data/src/core/ext/upb-generated/udpa/annotations/migrate.upb.h +23 -11
- data/src/core/ext/upb-generated/udpa/annotations/security.upb.c +1 -1
- data/src/core/ext/upb-generated/udpa/annotations/security.upb.h +7 -3
- data/src/core/ext/upb-generated/udpa/annotations/status.upb.h +7 -3
- data/src/core/ext/upb-generated/udpa/annotations/versioning.upb.c +1 -1
- data/src/core/ext/upb-generated/udpa/annotations/versioning.upb.h +7 -3
- data/src/core/ext/upb-generated/validate/validate.upb.c +22 -22
- data/src/core/ext/upb-generated/validate/validate.upb.h +139 -47
- data/src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c +3 -3
- data/src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.h +23 -11
- data/src/core/ext/upb-generated/xds/annotations/v3/security.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/annotations/v3/security.upb.h +7 -3
- data/src/core/ext/upb-generated/xds/annotations/v3/status.upb.c +3 -3
- data/src/core/ext/upb-generated/xds/annotations/v3/status.upb.h +27 -11
- data/src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.h +7 -3
- data/src/core/ext/upb-generated/xds/core/v3/authority.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/authority.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c +2 -2
- data/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.h +12 -4
- data/src/core/ext/upb-generated/xds/core/v3/context_params.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/context_params.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/core/v3/extension.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/extension.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/core/v3/resource.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/resource.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.h +12 -4
- data/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.h +6 -2
- data/src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c +6 -6
- data/src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.h +54 -18
- data/src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c +2 -2
- data/src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.h +12 -4
- data/src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.h +12 -4
- data/src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c +1 -1
- data/src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.h +6 -2
- data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c +67 -274
- data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.h +0 -85
- data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump_shared.upbdefs.c +256 -0
- data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump_shared.upbdefs.h +115 -0
- data/src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c +14 -11
- data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +107 -107
- data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +191 -187
- data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +156 -154
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +186 -183
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +136 -134
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +16 -16
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +162 -139
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +39 -31
- data/src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c +12 -8
- data/src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +72 -75
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +0 -5
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +713 -670
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c +150 -139
- data/src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opentelemetry.upbdefs.c +47 -0
- data/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opentelemetry.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c +16 -12
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c +46 -25
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +477 -466
- data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +21 -18
- data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c +153 -84
- data/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +113 -113
- data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +29 -28
- data/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_strategy.upbdefs.c +69 -0
- data/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_strategy.upbdefs.h +40 -0
- data/src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c +92 -90
- data/src/core/ext/xds/certificate_provider_store.cc +63 -3
- data/src/core/ext/xds/certificate_provider_store.h +9 -1
- data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +5 -5
- data/src/core/ext/xds/file_watcher_certificate_provider_factory.h +1 -1
- data/src/core/ext/xds/upb_utils.h +0 -21
- data/src/core/ext/xds/xds_api.cc +73 -102
- data/src/core/ext/xds/xds_api.h +26 -28
- data/src/core/ext/xds/xds_bootstrap.cc +5 -550
- data/src/core/ext/xds/xds_bootstrap.h +39 -91
- data/src/core/ext/xds/xds_bootstrap_grpc.cc +370 -0
- data/src/core/ext/xds/xds_bootstrap_grpc.h +169 -0
- data/src/core/ext/xds/xds_certificate_provider.h +9 -0
- data/src/core/ext/xds/xds_channel_stack_modifier.cc +6 -4
- data/src/core/ext/xds/xds_channel_stack_modifier.h +8 -0
- data/src/core/ext/xds/xds_client.cc +732 -1317
- data/src/core/ext/xds/xds_client.h +33 -59
- data/src/core/ext/xds/xds_client_grpc.cc +229 -0
- data/src/core/ext/xds/xds_client_grpc.h +79 -0
- data/src/core/ext/xds/xds_client_stats.cc +4 -4
- data/src/core/ext/xds/xds_cluster.cc +162 -165
- data/src/core/ext/xds/xds_cluster.h +8 -7
- data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +12 -14
- data/src/core/ext/xds/xds_cluster_specifier_plugin.h +1 -0
- data/src/core/ext/xds/xds_common_types.cc +140 -108
- data/src/core/ext/xds/xds_common_types.h +6 -7
- data/src/core/ext/xds/xds_endpoint.cc +87 -85
- data/src/core/ext/xds/xds_endpoint.h +4 -5
- data/src/core/ext/xds/xds_http_fault_filter.cc +4 -11
- data/src/core/ext/xds/xds_http_fault_filter.h +3 -3
- data/src/core/ext/xds/xds_http_filters.h +3 -3
- data/src/core/ext/xds/xds_http_rbac_filter.cc +39 -58
- data/src/core/ext/xds/xds_http_rbac_filter.h +3 -3
- data/src/core/ext/xds/xds_lb_policy_registry.cc +21 -22
- data/src/core/ext/xds/xds_lb_policy_registry.h +4 -4
- data/src/core/ext/xds/xds_listener.cc +348 -313
- data/src/core/ext/xds/xds_listener.h +4 -5
- data/src/core/ext/xds/xds_resource_type.h +23 -9
- data/src/core/ext/xds/xds_route_config.cc +193 -191
- data/src/core/ext/xds/xds_route_config.h +31 -17
- data/src/core/ext/xds/xds_routing.cc +3 -6
- data/src/core/ext/xds/xds_routing.h +7 -9
- data/src/core/ext/xds/xds_server_config_fetcher.cc +81 -84
- data/src/core/ext/xds/xds_transport.h +86 -0
- data/src/core/ext/xds/xds_transport_grpc.cc +357 -0
- data/src/core/ext/xds/xds_transport_grpc.h +135 -0
- data/src/core/lib/address_utils/parse_address.cc +19 -17
- data/src/core/lib/address_utils/parse_address.h +8 -5
- data/src/core/lib/avl/avl.h +47 -25
- data/src/core/lib/backoff/backoff.cc +2 -4
- data/src/core/lib/channel/call_finalization.h +1 -3
- data/src/core/lib/channel/call_tracer.h +1 -1
- data/src/core/lib/channel/channel_args.cc +88 -19
- data/src/core/lib/channel/channel_args.h +218 -67
- data/src/core/lib/channel/channel_stack.cc +0 -1
- data/src/core/lib/channel/channel_stack_builder.cc +3 -3
- data/src/core/lib/channel/channel_stack_builder.h +2 -2
- data/src/core/lib/channel/channel_stack_builder_impl.cc +2 -4
- data/src/core/lib/channel/channel_trace.cc +3 -4
- data/src/core/lib/channel/channelz.cc +27 -37
- data/src/core/lib/channel/channelz.h +9 -0
- data/src/core/lib/channel/promise_based_filter.cc +18 -19
- data/src/core/lib/channel/promise_based_filter.h +0 -1
- data/src/core/lib/channel/status_util.cc +27 -0
- data/src/core/lib/channel/status_util.h +10 -0
- data/src/core/lib/config/core_configuration.cc +5 -1
- data/src/core/lib/config/core_configuration.h +81 -35
- data/src/core/lib/debug/stats.cc +39 -46
- data/src/core/lib/debug/stats.h +11 -13
- data/src/core/lib/debug/stats_data.cc +118 -614
- data/src/core/lib/debug/stats_data.h +67 -465
- data/src/core/lib/debug/trace.cc +0 -2
- data/src/core/lib/event_engine/channel_args_endpoint_config.cc +12 -20
- data/src/core/lib/event_engine/channel_args_endpoint_config.h +13 -7
- data/src/core/lib/event_engine/{event_engine.cc → default_event_engine.cc} +9 -5
- data/src/core/lib/event_engine/{event_engine_factory.h → default_event_engine.h} +4 -8
- data/src/core/lib/event_engine/default_event_engine_factory.cc +20 -3
- data/src/core/lib/event_engine/default_event_engine_factory.h +33 -0
- data/src/core/lib/event_engine/executor/executor.h +38 -0
- data/src/core/lib/event_engine/executor/threaded_executor.cc +36 -0
- data/src/core/lib/event_engine/executor/threaded_executor.h +44 -0
- data/src/core/lib/event_engine/forkable.cc +101 -0
- data/src/core/lib/event_engine/forkable.h +61 -0
- data/src/core/lib/event_engine/poller.h +56 -0
- data/src/core/lib/event_engine/{iomgr_engine/iomgr_engine.cc → posix_engine/posix_engine.cc} +30 -47
- data/src/core/lib/event_engine/{iomgr_engine/iomgr_engine.h → posix_engine/posix_engine.h} +27 -28
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer.cc +4 -5
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer.h +8 -8
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer_heap.cc +4 -4
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer_heap.h +5 -5
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer_manager.cc +87 -30
- data/src/core/lib/event_engine/{iomgr_engine → posix_engine}/timer_manager.h +40 -9
- data/src/core/lib/event_engine/socket_notifier.h +55 -0
- data/src/core/lib/event_engine/thread_pool.cc +195 -0
- data/src/core/lib/event_engine/thread_pool.h +114 -0
- data/src/core/lib/event_engine/time_util.cc +30 -0
- data/src/core/lib/event_engine/time_util.h +32 -0
- data/src/core/lib/event_engine/utils.cc +44 -0
- data/src/core/lib/event_engine/utils.h +36 -0
- data/src/core/lib/event_engine/windows/iocp.cc +155 -0
- data/src/core/lib/event_engine/windows/iocp.h +69 -0
- data/src/core/lib/event_engine/windows/win_socket.cc +196 -0
- data/src/core/lib/event_engine/windows/win_socket.h +120 -0
- data/src/core/lib/event_engine/windows/windows_engine.cc +159 -0
- data/src/core/lib/event_engine/windows/windows_engine.h +120 -0
- data/src/core/lib/experiments/config.cc +146 -0
- data/src/core/lib/experiments/config.h +43 -0
- data/src/core/lib/experiments/experiments.cc +75 -0
- data/src/core/lib/experiments/experiments.h +56 -0
- data/src/core/lib/gpr/alloc.cc +1 -9
- data/src/core/lib/gpr/log_windows.cc +0 -1
- data/src/core/lib/gpr/string_util_windows.cc +3 -30
- data/src/core/lib/gpr/sync_abseil.cc +0 -14
- data/src/core/lib/gpr/sync_posix.cc +0 -14
- data/src/core/lib/gpr/time.cc +11 -9
- data/src/core/lib/gpr/time_posix.cc +0 -6
- data/src/core/lib/gpr/time_precise.h +1 -1
- data/src/core/lib/gpr/tmpfile_windows.cc +5 -7
- data/src/core/lib/gpr/useful.h +40 -0
- data/src/core/lib/gprpp/bitset.h +3 -13
- data/src/core/lib/gprpp/debug_location.h +39 -7
- data/src/core/lib/{gpr → gprpp}/env.h +25 -12
- data/src/core/lib/{gpr → gprpp}/env_linux.cc +20 -15
- data/src/core/lib/{gpr → gprpp}/env_posix.cc +11 -10
- data/src/core/lib/gprpp/env_windows.cc +56 -0
- data/src/core/lib/gprpp/fork.cc +14 -22
- data/src/core/lib/gprpp/fork.h +0 -8
- data/src/core/lib/gprpp/global_config_env.cc +7 -6
- data/src/core/lib/gprpp/manual_constructor.h +0 -1
- data/src/core/lib/gprpp/no_destruct.h +94 -0
- data/src/core/lib/gprpp/notification.h +67 -0
- data/src/core/lib/gprpp/packed_table.h +40 -0
- data/src/core/lib/gprpp/ref_counted_ptr.h +20 -34
- data/src/core/lib/gprpp/sorted_pack.h +98 -0
- data/src/core/lib/gprpp/status_helper.cc +1 -0
- data/src/core/lib/gprpp/status_helper.h +6 -0
- data/src/core/lib/gprpp/table.h +9 -2
- data/src/core/lib/gprpp/tchar.cc +49 -0
- data/src/core/lib/gprpp/tchar.h +33 -0
- data/src/core/lib/gprpp/time.cc +21 -0
- data/src/core/lib/gprpp/time.h +55 -0
- data/src/core/lib/{event_engine/iomgr_engine → gprpp}/time_averaged_stats.cc +3 -5
- data/src/core/lib/{event_engine/iomgr_engine → gprpp}/time_averaged_stats.h +5 -7
- data/src/core/lib/gprpp/validation_errors.cc +61 -0
- data/src/core/lib/gprpp/validation_errors.h +110 -0
- data/src/core/lib/{iomgr → gprpp}/work_serializer.cc +34 -18
- data/src/core/lib/{iomgr → gprpp}/work_serializer.h +21 -27
- data/src/core/{ext/filters/client_channel → lib/handshaker}/proxy_mapper.h +16 -17
- data/src/core/lib/handshaker/proxy_mapper_registry.cc +71 -0
- data/src/core/lib/handshaker/proxy_mapper_registry.h +75 -0
- data/src/core/lib/http/httpcli.cc +12 -24
- data/src/core/lib/http/httpcli_security_connector.cc +11 -11
- data/src/core/lib/iomgr/call_combiner.cc +0 -34
- data/src/core/lib/iomgr/closure.h +0 -10
- data/src/core/lib/iomgr/combiner.cc +0 -20
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +14 -9
- data/src/core/lib/iomgr/endpoint_pair_windows.cc +2 -2
- data/src/core/lib/iomgr/error.cc +0 -773
- data/src/core/lib/iomgr/error.h +0 -145
- data/src/core/lib/iomgr/error_cfstream.cc +0 -5
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +1 -38
- data/src/core/lib/iomgr/ev_poll_posix.cc +19 -26
- data/src/core/lib/iomgr/exec_ctx.cc +0 -22
- data/src/core/lib/iomgr/exec_ctx.h +7 -31
- data/src/core/lib/iomgr/executor.cc +0 -10
- data/src/core/lib/iomgr/executor.h +0 -3
- data/src/core/lib/iomgr/iocp_windows.cc +1 -2
- data/src/core/lib/iomgr/iomgr.cc +6 -8
- data/src/core/lib/iomgr/iomgr_fwd.h +1 -0
- data/src/core/lib/iomgr/lockfree_event.cc +0 -17
- data/src/core/lib/iomgr/pollset.h +1 -1
- data/src/core/lib/iomgr/pollset_set.h +0 -1
- data/src/core/lib/iomgr/port.h +3 -0
- data/src/core/lib/iomgr/resolve_address.h +30 -6
- data/src/core/lib/iomgr/resolve_address_impl.h +1 -0
- data/src/core/lib/iomgr/resolve_address_posix.cc +43 -8
- data/src/core/lib/iomgr/resolve_address_posix.h +19 -5
- data/src/core/lib/iomgr/resolve_address_windows.cc +45 -10
- data/src/core/lib/iomgr/resolve_address_windows.h +19 -5
- data/src/core/lib/iomgr/sockaddr_utils_posix.cc +2 -1
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +12 -34
- data/src/core/lib/iomgr/socket_utils_posix.cc +83 -1
- data/src/core/lib/iomgr/socket_utils_posix.h +98 -6
- data/src/core/lib/iomgr/socket_windows.h +0 -2
- data/src/core/lib/iomgr/tcp_client.cc +6 -7
- data/src/core/lib/iomgr/tcp_client.h +11 -11
- data/src/core/lib/iomgr/tcp_client_cfstream.cc +6 -6
- data/src/core/lib/iomgr/tcp_client_posix.cc +33 -29
- data/src/core/lib/iomgr/tcp_client_posix.h +12 -9
- data/src/core/lib/iomgr/tcp_client_windows.cc +6 -6
- data/src/core/lib/iomgr/tcp_posix.cc +249 -120
- data/src/core/lib/iomgr/tcp_posix.h +3 -1
- data/src/core/lib/iomgr/tcp_server.cc +5 -4
- data/src/core/lib/iomgr/tcp_server.h +9 -6
- data/src/core/lib/iomgr/tcp_server_posix.cc +17 -28
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +2 -2
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +3 -3
- data/src/core/lib/iomgr/tcp_server_windows.cc +6 -7
- data/src/core/lib/iomgr/tcp_windows.cc +0 -1
- data/src/core/lib/iomgr/tcp_windows.h +0 -1
- data/src/core/lib/iomgr/timer_generic.cc +10 -12
- data/src/core/lib/iomgr/timer_manager.cc +1 -2
- data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +0 -2
- data/src/core/lib/json/json.h +19 -22
- data/src/core/lib/json/json_args.h +34 -0
- data/src/core/lib/json/json_object_loader.cc +202 -0
- data/src/core/lib/json/json_object_loader.h +598 -0
- data/src/core/lib/json/json_reader.cc +86 -62
- data/src/core/lib/json/json_util.cc +9 -36
- data/src/core/{ext/filters/client_channel → lib/load_balancing}/lb_policy.cc +3 -44
- data/src/core/{ext/filters/client_channel → lib/load_balancing}/lb_policy.h +20 -29
- data/src/core/lib/load_balancing/lb_policy_factory.h +49 -0
- data/src/core/lib/load_balancing/lb_policy_registry.cc +141 -0
- data/src/core/lib/load_balancing/lb_policy_registry.h +82 -0
- data/src/core/{ext/filters/client_channel → lib/load_balancing}/subchannel_interface.h +6 -6
- data/src/core/lib/promise/activity.h +57 -10
- data/src/core/lib/promise/arena_promise.h +84 -81
- data/src/core/lib/promise/context.h +1 -2
- data/src/core/lib/promise/detail/basic_seq.h +43 -23
- data/src/core/lib/promise/detail/promise_factory.h +0 -1
- data/src/core/lib/promise/map.h +0 -1
- data/src/core/lib/promise/seq.h +25 -4
- data/src/core/lib/promise/sleep.cc +50 -42
- data/src/core/lib/promise/sleep.h +32 -23
- data/src/core/lib/promise/try_seq.h +26 -6
- data/src/core/lib/resolver/resolver.cc +0 -47
- data/src/core/lib/resolver/resolver.h +15 -15
- data/src/core/lib/resolver/resolver_factory.h +2 -3
- data/src/core/lib/resolver/resolver_registry.cc +1 -1
- data/src/core/lib/resolver/resolver_registry.h +2 -3
- data/src/core/lib/resolver/server_address.cc +11 -15
- data/src/core/lib/resolver/server_address.h +4 -8
- data/src/core/lib/resource_quota/api.cc +10 -1
- data/src/core/lib/resource_quota/api.h +6 -0
- data/src/core/lib/resource_quota/arena.cc +19 -1
- data/src/core/lib/resource_quota/arena.h +24 -2
- data/src/core/lib/resource_quota/memory_quota.cc +143 -19
- data/src/core/lib/resource_quota/memory_quota.h +85 -17
- data/src/core/lib/resource_quota/periodic_update.cc +78 -0
- data/src/core/lib/resource_quota/periodic_update.h +71 -0
- data/src/core/lib/security/authorization/evaluate_args.cc +10 -7
- data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +1 -2
- data/src/core/lib/security/authorization/grpc_server_authz_filter.h +1 -1
- data/src/core/lib/security/authorization/matchers.cc +13 -10
- data/src/core/lib/security/authorization/rbac_policy.cc +0 -1
- data/src/core/{ext/xds → lib/security/certificate_provider}/certificate_provider_factory.h +3 -3
- data/src/core/lib/security/certificate_provider/certificate_provider_registry.cc +60 -0
- data/src/core/lib/security/certificate_provider/certificate_provider_registry.h +70 -0
- data/src/core/lib/security/credentials/alts/alts_credentials.cc +2 -3
- data/src/core/lib/security/credentials/alts/alts_credentials.h +3 -4
- data/src/core/lib/security/credentials/channel_creds_registry_init.cc +1 -0
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +3 -7
- data/src/core/lib/security/credentials/composite/composite_credentials.h +2 -4
- data/src/core/lib/security/credentials/credentials.h +16 -12
- data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +33 -27
- data/src/core/lib/security/credentials/external/external_account_credentials.cc +34 -24
- data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +5 -6
- data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +4 -6
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +3 -12
- data/src/core/lib/security/credentials/fake/fake_credentials.h +0 -4
- data/src/core/lib/security/credentials/google_default/credentials_generic.cc +5 -8
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +35 -32
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +1 -3
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +0 -1
- data/src/core/lib/security/credentials/insecure/insecure_credentials.cc +3 -3
- data/src/core/lib/security/credentials/insecure/insecure_credentials.h +3 -4
- data/src/core/lib/security/credentials/jwt/json_token.cc +12 -3
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +4 -8
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +14 -15
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +1 -1
- data/src/core/lib/security/credentials/local/local_credentials.cc +3 -4
- data/src/core/lib/security/credentials/local/local_credentials.h +3 -4
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +18 -20
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +0 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +13 -21
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +3 -4
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +4 -3
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +10 -2
- data/src/core/lib/security/credentials/tls/tls_credentials.cc +13 -25
- data/src/core/lib/security/credentials/tls/tls_credentials.h +3 -4
- data/src/core/lib/security/credentials/tls/tls_utils.cc +3 -1
- data/src/core/lib/security/credentials/xds/xds_credentials.cc +13 -30
- data/src/core/lib/security/credentials/xds/xds_credentials.h +3 -3
- data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +10 -18
- data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +26 -38
- data/src/core/lib/security/security_connector/fake/fake_security_connector.h +2 -2
- data/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc +5 -5
- data/src/core/lib/security/security_connector/insecure/insecure_security_connector.h +6 -5
- data/src/core/lib/security/security_connector/local/local_security_connector.cc +13 -11
- data/src/core/lib/security/security_connector/local/local_security_connector.h +2 -2
- data/src/core/lib/security/security_connector/security_connector.h +5 -3
- data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +5 -4
- data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +5 -5
- data/src/core/lib/security/security_connector/tls/tls_security_connector.h +5 -5
- data/src/core/lib/security/transport/auth_filters.h +1 -1
- data/src/core/lib/security/transport/client_auth_filter.cc +15 -5
- data/src/core/lib/security/transport/secure_endpoint.cc +0 -4
- data/src/core/lib/security/transport/security_handshaker.cc +32 -44
- data/src/core/lib/security/transport/security_handshaker.h +2 -1
- data/src/core/lib/service_config/service_config.h +11 -0
- data/src/core/lib/service_config/service_config_impl.cc +98 -97
- data/src/core/lib/service_config/service_config_impl.h +11 -13
- data/src/core/lib/service_config/service_config_parser.cc +26 -27
- data/src/core/lib/service_config/service_config_parser.h +10 -22
- data/src/core/lib/slice/percent_encoding.cc +4 -13
- data/src/core/lib/slice/slice.cc +10 -4
- data/src/core/lib/surface/call.cc +9 -13
- data/src/core/lib/surface/channel.cc +9 -8
- data/src/core/lib/surface/channel.h +1 -1
- data/src/core/lib/surface/completion_queue.cc +16 -30
- data/src/core/lib/surface/completion_queue.h +1 -4
- data/src/core/lib/surface/completion_queue_factory.cc +5 -0
- data/src/core/lib/surface/init.cc +17 -16
- data/src/core/lib/surface/init_internally.cc +24 -0
- data/src/core/lib/surface/init_internally.h +28 -0
- data/src/core/lib/surface/lame_client.cc +2 -3
- data/src/core/lib/surface/lame_client.h +1 -1
- data/src/core/lib/surface/server.cc +8 -19
- data/src/core/lib/surface/server.h +11 -13
- data/src/core/lib/surface/validate_metadata.cc +4 -14
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/bdp_estimator.cc +1 -3
- data/src/core/lib/transport/connectivity_state.cc +0 -1
- data/src/core/lib/transport/connectivity_state.h +1 -1
- data/src/core/lib/transport/error_utils.cc +0 -36
- data/src/core/lib/transport/handshaker.cc +7 -9
- data/src/core/lib/transport/handshaker.h +4 -5
- data/src/core/lib/transport/handshaker_factory.h +2 -3
- data/src/core/lib/transport/handshaker_registry.cc +2 -1
- data/src/core/lib/transport/handshaker_registry.h +2 -4
- data/src/core/lib/transport/http_connect_handshaker.cc +16 -16
- data/src/core/lib/transport/metadata_batch.cc +7 -3
- data/src/core/lib/transport/metadata_batch.h +61 -14
- data/src/core/lib/transport/parsed_metadata.h +4 -3
- data/src/core/lib/transport/status_conversion.cc +1 -3
- data/src/core/lib/transport/tcp_connect_handshaker.cc +20 -22
- data/src/core/lib/transport/transport.h +0 -8
- data/src/core/lib/transport/transport_impl.h +0 -1
- data/src/core/plugin_registry/grpc_plugin_registry.cc +23 -46
- data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +13 -25
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +40 -21
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +1 -1
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +14 -7
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +1 -1
- data/src/core/tsi/fake_transport_security.cc +53 -30
- data/src/core/tsi/local_transport_security.cc +9 -5
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +10 -1
- data/src/core/tsi/ssl_transport_security.cc +47 -23
- data/src/core/tsi/transport_security.cc +18 -6
- data/src/core/tsi/transport_security.h +2 -1
- data/src/core/tsi/transport_security_interface.h +17 -5
- data/src/ruby/ext/grpc/extconf.rb +2 -0
- data/src/ruby/ext/grpc/rb_loader.c +6 -2
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb +4 -0
- data/src/ruby/spec/channel_spec.rb +5 -0
- data/src/ruby/spec/generic/server_interceptors_spec.rb +1 -1
- data/src/ruby/spec/user_agent_spec.rb +1 -1
- data/third_party/abseil-cpp/absl/functional/any_invocable.h +313 -0
- data/third_party/abseil-cpp/absl/functional/internal/any_invocable.h +857 -0
- data/third_party/upb/third_party/utf8_range/utf8_range.h +1 -1
- data/third_party/upb/upb/arena.c +277 -0
- data/third_party/upb/upb/arena.h +225 -0
- data/third_party/upb/upb/array.c +114 -0
- data/third_party/upb/upb/array.h +83 -0
- data/third_party/upb/upb/collections.h +36 -0
- data/third_party/upb/upb/decode.c +161 -65
- data/third_party/upb/upb/decode.h +1 -0
- data/third_party/upb/upb/decode_fast.c +1 -1
- data/third_party/upb/upb/def.c +10 -2
- data/third_party/upb/upb/def.h +8 -1
- data/third_party/upb/upb/def.hpp +7 -4
- data/third_party/upb/upb/encode.c +29 -20
- data/third_party/upb/upb/encode.h +16 -6
- data/third_party/upb/upb/extension_registry.c +93 -0
- data/third_party/upb/upb/extension_registry.h +84 -0
- data/third_party/upb/upb/{decode_internal.h → internal/decode.h} +5 -5
- data/third_party/upb/upb/internal/table.h +385 -0
- data/third_party/upb/upb/{upb_internal.h → internal/upb.h} +3 -3
- data/third_party/upb/upb/internal/vsnprintf_compat.h +52 -0
- data/third_party/upb/upb/json_decode.c +1512 -0
- data/third_party/upb/upb/json_decode.h +47 -0
- data/third_party/upb/upb/json_encode.c +7 -3
- data/third_party/upb/upb/json_encode.h +6 -3
- data/third_party/upb/upb/map.c +108 -0
- data/third_party/upb/upb/map.h +117 -0
- data/third_party/upb/upb/message_value.h +66 -0
- data/third_party/upb/upb/mini_table.c +1147 -0
- data/third_party/upb/upb/mini_table.h +189 -0
- data/third_party/upb/upb/mini_table.hpp +112 -0
- data/third_party/upb/upb/msg.c +2 -62
- data/third_party/upb/upb/msg.h +2 -45
- data/third_party/upb/upb/msg_internal.h +28 -22
- data/third_party/upb/upb/port_def.inc +2 -1
- data/third_party/upb/upb/port_undef.inc +1 -0
- data/third_party/upb/upb/reflection.c +2 -159
- data/third_party/upb/upb/reflection.h +2 -112
- data/third_party/upb/upb/status.c +86 -0
- data/third_party/upb/upb/status.h +66 -0
- data/third_party/upb/upb/table.c +2 -2
- data/third_party/upb/upb/table_internal.h +3 -352
- data/third_party/upb/upb/text_encode.c +3 -2
- data/third_party/upb/upb/upb.c +4 -290
- data/third_party/upb/upb/upb.h +7 -196
- metadata +117 -51
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +0 -50
- data/src/core/ext/filters/client_channel/lb_policy_registry.cc +0 -190
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +0 -70
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +0 -90
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +0 -55
- data/src/core/ext/xds/certificate_provider_registry.cc +0 -103
- data/src/core/ext/xds/certificate_provider_registry.h +0 -59
- data/src/core/lib/event_engine/iomgr_engine/thread_pool.cc +0 -123
- data/src/core/lib/event_engine/iomgr_engine/thread_pool.h +0 -70
- data/src/core/lib/event_engine/promise.h +0 -69
- data/src/core/lib/gpr/env_windows.cc +0 -74
- data/src/core/lib/gpr/string_windows.h +0 -32
- data/src/core/lib/iomgr/error_internal.h +0 -66
- data/src/core/lib/iomgr/executor/mpmcqueue.cc +0 -182
- data/src/core/lib/iomgr/executor/mpmcqueue.h +0 -171
- data/src/core/lib/iomgr/executor/threadpool.cc +0 -136
- data/src/core/lib/iomgr/executor/threadpool.h +0 -150
- data/src/core/lib/iomgr/time_averaged_stats.cc +0 -64
- data/src/core/lib/iomgr/time_averaged_stats.h +0 -72
- data/src/core/lib/profiling/basic_timers.cc +0 -295
- data/src/core/lib/profiling/stap_timers.cc +0 -50
- data/src/core/lib/profiling/timers.h +0 -94
- data/src/core/lib/promise/detail/switch.h +0 -1455
@@ -19,12 +19,11 @@
|
|
19
19
|
#include "src/core/ext/xds/xds_client.h"
|
20
20
|
|
21
21
|
#include <inttypes.h>
|
22
|
-
#include <limits.h>
|
23
22
|
#include <string.h>
|
24
23
|
|
25
24
|
#include <algorithm>
|
26
25
|
|
27
|
-
#include "absl/
|
26
|
+
#include "absl/memory/memory.h"
|
28
27
|
#include "absl/strings/match.h"
|
29
28
|
#include "absl/strings/str_cat.h"
|
30
29
|
#include "absl/strings/str_format.h"
|
@@ -32,52 +31,22 @@
|
|
32
31
|
#include "absl/strings/str_split.h"
|
33
32
|
#include "absl/strings/string_view.h"
|
34
33
|
#include "absl/strings/strip.h"
|
34
|
+
#include "absl/types/optional.h"
|
35
|
+
#include "upb/arena.h"
|
35
36
|
|
36
|
-
#include <grpc/
|
37
|
-
#include <grpc/byte_buffer_reader.h>
|
38
|
-
#include <grpc/grpc.h>
|
39
|
-
#include <grpc/impl/codegen/connectivity_state.h>
|
40
|
-
#include <grpc/impl/codegen/propagation_bits.h>
|
41
|
-
#include <grpc/slice.h>
|
42
|
-
#include <grpc/status.h>
|
43
|
-
#include <grpc/support/alloc.h>
|
37
|
+
#include <grpc/event_engine/event_engine.h>
|
44
38
|
#include <grpc/support/log.h>
|
45
|
-
#include <grpc/support/string_util.h>
|
46
|
-
#include <grpc/support/time.h>
|
47
39
|
|
48
|
-
#include "src/core/ext/filters/client_channel/client_channel.h"
|
49
|
-
#include "src/core/ext/xds/upb_utils.h"
|
50
40
|
#include "src/core/ext/xds/xds_api.h"
|
51
41
|
#include "src/core/ext/xds/xds_bootstrap.h"
|
52
|
-
#include "src/core/ext/xds/xds_channel_args.h"
|
53
42
|
#include "src/core/ext/xds/xds_client_stats.h"
|
54
|
-
#include "src/core/ext/xds/xds_cluster_specifier_plugin.h"
|
55
|
-
#include "src/core/ext/xds/xds_http_filters.h"
|
56
43
|
#include "src/core/lib/backoff/backoff.h"
|
57
|
-
#include "src/core/lib/
|
58
|
-
#include "src/core/lib/channel/channel_fwd.h"
|
59
|
-
#include "src/core/lib/channel/channel_stack.h"
|
60
|
-
#include "src/core/lib/config/core_configuration.h"
|
61
|
-
#include "src/core/lib/gpr/env.h"
|
62
|
-
#include "src/core/lib/gpr/useful.h"
|
44
|
+
#include "src/core/lib/event_engine/default_event_engine.h"
|
63
45
|
#include "src/core/lib/gprpp/debug_location.h"
|
64
|
-
#include "src/core/lib/gprpp/memory.h"
|
65
46
|
#include "src/core/lib/gprpp/orphanable.h"
|
66
47
|
#include "src/core/lib/gprpp/ref_counted_ptr.h"
|
67
48
|
#include "src/core/lib/gprpp/sync.h"
|
68
|
-
#include "src/core/lib/iomgr/
|
69
|
-
#include "src/core/lib/iomgr/load_file.h"
|
70
|
-
#include "src/core/lib/iomgr/pollset_set.h"
|
71
|
-
#include "src/core/lib/iomgr/timer.h"
|
72
|
-
#include "src/core/lib/security/credentials/channel_creds_registry.h"
|
73
|
-
#include "src/core/lib/security/credentials/credentials.h"
|
74
|
-
#include "src/core/lib/slice/slice.h"
|
75
|
-
#include "src/core/lib/slice/slice_internal.h"
|
76
|
-
#include "src/core/lib/slice/slice_refcount.h"
|
77
|
-
#include "src/core/lib/surface/call.h"
|
78
|
-
#include "src/core/lib/surface/channel.h"
|
79
|
-
#include "src/core/lib/surface/lame_client.h"
|
80
|
-
#include "src/core/lib/transport/connectivity_state.h"
|
49
|
+
#include "src/core/lib/iomgr/exec_ctx.h"
|
81
50
|
#include "src/core/lib/uri/uri_parser.h"
|
82
51
|
|
83
52
|
#define GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS 1
|
@@ -88,19 +57,12 @@
|
|
88
57
|
|
89
58
|
namespace grpc_core {
|
90
59
|
|
60
|
+
using ::grpc_event_engine::experimental::EventEngine;
|
61
|
+
using ::grpc_event_engine::experimental::GetDefaultEventEngine;
|
62
|
+
|
91
63
|
TraceFlag grpc_xds_client_trace(false, "xds_client");
|
92
64
|
TraceFlag grpc_xds_client_refcount_trace(false, "xds_client_refcount");
|
93
65
|
|
94
|
-
namespace {
|
95
|
-
|
96
|
-
Mutex* g_mu = nullptr;
|
97
|
-
|
98
|
-
const grpc_channel_args* g_channel_args ABSL_GUARDED_BY(*g_mu) = nullptr;
|
99
|
-
XdsClient* g_xds_client ABSL_GUARDED_BY(*g_mu) = nullptr;
|
100
|
-
char* g_fallback_bootstrap_config ABSL_GUARDED_BY(*g_mu) = nullptr;
|
101
|
-
|
102
|
-
} // namespace
|
103
|
-
|
104
66
|
//
|
105
67
|
// Internal class declarations
|
106
68
|
//
|
@@ -113,9 +75,12 @@ class XdsClient::ChannelState::RetryableCall
|
|
113
75
|
public:
|
114
76
|
explicit RetryableCall(WeakRefCountedPtr<ChannelState> chand);
|
115
77
|
|
116
|
-
|
78
|
+
// Disable thread-safety analysis because this method is called via
|
79
|
+
// OrphanablePtr<>, but there's no way to pass the lock annotation
|
80
|
+
// through there.
|
81
|
+
void Orphan() override ABSL_NO_THREAD_SAFETY_ANALYSIS;
|
117
82
|
|
118
|
-
void OnCallFinishedLocked();
|
83
|
+
void OnCallFinishedLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
119
84
|
|
120
85
|
T* calld() const { return calld_.get(); }
|
121
86
|
ChannelState* chand() const { return chand_.get(); }
|
@@ -124,9 +89,9 @@ class XdsClient::ChannelState::RetryableCall
|
|
124
89
|
|
125
90
|
private:
|
126
91
|
void StartNewCallLocked();
|
127
|
-
void StartRetryTimerLocked();
|
128
|
-
|
129
|
-
void
|
92
|
+
void StartRetryTimerLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
93
|
+
|
94
|
+
void OnRetryTimer();
|
130
95
|
|
131
96
|
// The wrapped xds call that talks to the xds server. It's instantiated
|
132
97
|
// every time we start a new call. It's null during call retry backoff.
|
@@ -136,9 +101,8 @@ class XdsClient::ChannelState::RetryableCall
|
|
136
101
|
|
137
102
|
// Retry state.
|
138
103
|
BackOff backoff_;
|
139
|
-
|
140
|
-
|
141
|
-
bool retry_timer_callback_pending_ = false;
|
104
|
+
absl::optional<EventEngine::TaskHandle> timer_handle_
|
105
|
+
ABSL_GUARDED_BY(&XdsClient::mu_);
|
142
106
|
|
143
107
|
bool shutting_down_ = false;
|
144
108
|
};
|
@@ -149,7 +113,6 @@ class XdsClient::ChannelState::AdsCallState
|
|
149
113
|
public:
|
150
114
|
// The ctor and dtor should not be used directly.
|
151
115
|
explicit AdsCallState(RefCountedPtr<RetryableCall<AdsCallState>> parent);
|
152
|
-
~AdsCallState() override;
|
153
116
|
|
154
117
|
void Orphan() override;
|
155
118
|
|
@@ -187,30 +150,32 @@ class XdsClient::ChannelState::AdsCallState
|
|
187
150
|
absl::Status ProcessAdsResponseFields(AdsResponseFields fields) override
|
188
151
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
189
152
|
|
190
|
-
void ParseResource(
|
191
|
-
absl::string_view
|
153
|
+
void ParseResource(upb_Arena* arena, size_t idx, absl::string_view type_url,
|
154
|
+
absl::string_view resource_name,
|
192
155
|
absl::string_view serialized_resource) override
|
193
156
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
194
157
|
|
158
|
+
void ResourceWrapperParsingFailed(size_t idx) override;
|
159
|
+
|
195
160
|
Result TakeResult() { return std::move(result_); }
|
196
161
|
|
197
162
|
private:
|
198
163
|
XdsClient* xds_client() const { return ads_call_state_->xds_client(); }
|
199
164
|
|
200
165
|
AdsCallState* ads_call_state_;
|
201
|
-
const Timestamp update_time_ =
|
166
|
+
const Timestamp update_time_ = Timestamp::Now();
|
202
167
|
Result result_;
|
203
168
|
};
|
204
169
|
|
205
170
|
class ResourceTimer : public InternallyRefCounted<ResourceTimer> {
|
206
171
|
public:
|
207
172
|
ResourceTimer(const XdsResourceType* type, const XdsResourceName& name)
|
208
|
-
: type_(type), name_(name) {
|
209
|
-
GRPC_CLOSURE_INIT(&timer_callback_, OnTimer, this,
|
210
|
-
grpc_schedule_on_exec_ctx);
|
211
|
-
}
|
173
|
+
: type_(type), name_(name) {}
|
212
174
|
|
213
|
-
|
175
|
+
// Disable thread-safety analysis because this method is called via
|
176
|
+
// OrphanablePtr<>, but there's no way to pass the lock annotation
|
177
|
+
// through there.
|
178
|
+
void Orphan() override ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
214
179
|
MaybeCancelTimer();
|
215
180
|
Unref(DEBUG_LOCATION, "Orphan");
|
216
181
|
}
|
@@ -230,15 +195,16 @@ class XdsClient::ChannelState::AdsCallState
|
|
230
195
|
if (state.resource != nullptr) return;
|
231
196
|
// Start timer.
|
232
197
|
ads_calld_ = std::move(ads_calld);
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
198
|
+
timer_handle_ = GetDefaultEventEngine()->RunAfter(
|
199
|
+
ads_calld_->xds_client()->request_timeout_,
|
200
|
+
[self = Ref(DEBUG_LOCATION, "timer")]() {
|
201
|
+
ApplicationCallbackExecCtx callback_exec_ctx;
|
202
|
+
ExecCtx exec_ctx;
|
203
|
+
self->OnTimer();
|
204
|
+
});
|
239
205
|
}
|
240
206
|
|
241
|
-
void MaybeCancelTimer() {
|
207
|
+
void MaybeCancelTimer() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
242
208
|
// If the timer hasn't been started yet, make sure we don't start
|
243
209
|
// it later. This can happen if the last watch for an LDS or CDS
|
244
210
|
// resource is cancelled and then restarted, both while an ADS
|
@@ -251,65 +217,72 @@ class XdsClient::ChannelState::AdsCallState
|
|
251
217
|
// For details, see https://github.com/grpc/grpc/issues/29583.
|
252
218
|
// TODO(roth): Find a way to write a test for this case.
|
253
219
|
timer_start_needed_ = false;
|
254
|
-
if (
|
255
|
-
|
256
|
-
|
220
|
+
if (timer_handle_.has_value()) {
|
221
|
+
GetDefaultEventEngine()->Cancel(*timer_handle_);
|
222
|
+
timer_handle_.reset();
|
257
223
|
}
|
258
224
|
}
|
259
225
|
|
260
226
|
private:
|
261
|
-
|
262
|
-
ResourceTimer* self = static_cast<ResourceTimer*>(arg);
|
227
|
+
void OnTimer() {
|
263
228
|
{
|
264
|
-
MutexLock lock(&
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
name_.authority, type_->type_url(), name_.key)
|
285
|
-
.c_str());
|
229
|
+
MutexLock lock(&ads_calld_->xds_client()->mu_);
|
230
|
+
if (timer_handle_.has_value()) {
|
231
|
+
timer_handle_.reset();
|
232
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
233
|
+
gpr_log(GPR_INFO,
|
234
|
+
"[xds_client %p] xds server %s: timeout obtaining resource "
|
235
|
+
"{type=%s name=%s} from xds server",
|
236
|
+
ads_calld_->xds_client(),
|
237
|
+
ads_calld_->chand()->server_.server_uri().c_str(),
|
238
|
+
std::string(type_->type_url()).c_str(),
|
239
|
+
XdsClient::ConstructFullXdsResourceName(
|
240
|
+
name_.authority, type_->type_url(), name_.key)
|
241
|
+
.c_str());
|
242
|
+
}
|
243
|
+
auto& authority_state =
|
244
|
+
ads_calld_->xds_client()->authority_state_map_[name_.authority];
|
245
|
+
ResourceState& state = authority_state.resource_map[type_][name_.key];
|
246
|
+
state.meta.client_status = XdsApi::ResourceMetadata::DOES_NOT_EXIST;
|
247
|
+
ads_calld_->xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
248
|
+
state.watchers);
|
286
249
|
}
|
287
|
-
auto& authority_state =
|
288
|
-
ads_calld_->xds_client()->authority_state_map_[name_.authority];
|
289
|
-
ResourceState& state = authority_state.resource_map[type_][name_.key];
|
290
|
-
state.meta.client_status = XdsApi::ResourceMetadata::DOES_NOT_EXIST;
|
291
|
-
ads_calld_->xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
292
|
-
state.watchers);
|
293
250
|
}
|
294
|
-
|
251
|
+
ads_calld_->xds_client()->work_serializer_.DrainQueue();
|
252
|
+
ads_calld_.reset();
|
295
253
|
}
|
296
254
|
|
297
255
|
const XdsResourceType* type_;
|
298
256
|
const XdsResourceName name_;
|
299
257
|
|
300
258
|
RefCountedPtr<AdsCallState> ads_calld_;
|
301
|
-
bool timer_start_needed_ = true;
|
302
|
-
|
303
|
-
|
304
|
-
grpc_closure timer_callback_;
|
259
|
+
bool timer_start_needed_ ABSL_GUARDED_BY(&XdsClient::mu_) = true;
|
260
|
+
absl::optional<EventEngine::TaskHandle> timer_handle_
|
261
|
+
ABSL_GUARDED_BY(&XdsClient::mu_);
|
305
262
|
};
|
306
263
|
|
307
|
-
|
308
|
-
|
264
|
+
class StreamEventHandler
|
265
|
+
: public XdsTransportFactory::XdsTransport::StreamingCall::EventHandler {
|
266
|
+
public:
|
267
|
+
explicit StreamEventHandler(RefCountedPtr<AdsCallState> ads_calld)
|
268
|
+
: ads_calld_(std::move(ads_calld)) {}
|
269
|
+
|
270
|
+
void OnRequestSent(bool ok) override { ads_calld_->OnRequestSent(ok); }
|
271
|
+
void OnRecvMessage(absl::string_view payload) override {
|
272
|
+
ads_calld_->OnRecvMessage(payload);
|
273
|
+
}
|
274
|
+
void OnStatusReceived(absl::Status status) override {
|
275
|
+
ads_calld_->OnStatusReceived(std::move(status));
|
276
|
+
}
|
309
277
|
|
310
|
-
|
278
|
+
private:
|
279
|
+
RefCountedPtr<AdsCallState> ads_calld_;
|
280
|
+
};
|
281
|
+
|
282
|
+
struct ResourceTypeState {
|
283
|
+
// Nonce and status for this resource type.
|
311
284
|
std::string nonce;
|
312
|
-
|
285
|
+
absl::Status status;
|
313
286
|
|
314
287
|
// Subscribed resources of this type.
|
315
288
|
std::map<std::string /*authority*/,
|
@@ -320,15 +293,9 @@ class XdsClient::ChannelState::AdsCallState
|
|
320
293
|
void SendMessageLocked(const XdsResourceType* type)
|
321
294
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
322
295
|
|
323
|
-
|
324
|
-
void
|
325
|
-
|
326
|
-
static void OnResponseReceived(void* arg, grpc_error_handle error);
|
327
|
-
bool OnResponseReceivedLocked()
|
328
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
329
|
-
static void OnStatusReceived(void* arg, grpc_error_handle error);
|
330
|
-
void OnStatusReceivedLocked(grpc_error_handle error)
|
331
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
296
|
+
void OnRequestSent(bool ok);
|
297
|
+
void OnRecvMessage(absl::string_view payload);
|
298
|
+
void OnStatusReceived(absl::Status status);
|
332
299
|
|
333
300
|
bool IsCurrentCallOnChannel() const;
|
334
301
|
|
@@ -340,28 +307,11 @@ class XdsClient::ChannelState::AdsCallState
|
|
340
307
|
// The owning RetryableCall<>.
|
341
308
|
RefCountedPtr<RetryableCall<AdsCallState>> parent_;
|
342
309
|
|
310
|
+
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall> call_;
|
311
|
+
|
343
312
|
bool sent_initial_message_ = false;
|
344
313
|
bool seen_response_ = false;
|
345
|
-
|
346
|
-
// Always non-NULL.
|
347
|
-
grpc_call* call_;
|
348
|
-
|
349
|
-
// recv_initial_metadata
|
350
|
-
grpc_metadata_array initial_metadata_recv_;
|
351
|
-
|
352
|
-
// send_message
|
353
|
-
grpc_byte_buffer* send_message_payload_ = nullptr;
|
354
|
-
grpc_closure on_request_sent_;
|
355
|
-
|
356
|
-
// recv_message
|
357
|
-
grpc_byte_buffer* recv_message_payload_ = nullptr;
|
358
|
-
grpc_closure on_response_received_;
|
359
|
-
|
360
|
-
// recv_trailing_metadata
|
361
|
-
grpc_metadata_array trailing_metadata_recv_;
|
362
|
-
grpc_status_code status_code_;
|
363
|
-
grpc_slice status_details_;
|
364
|
-
grpc_closure on_status_received_;
|
314
|
+
bool send_message_pending_ ABSL_GUARDED_BY(&XdsClient::mu_) = false;
|
365
315
|
|
366
316
|
// Resource types for which requests need to be sent.
|
367
317
|
std::set<const XdsResourceType*> buffered_requests_;
|
@@ -376,11 +326,11 @@ class XdsClient::ChannelState::LrsCallState
|
|
376
326
|
public:
|
377
327
|
// The ctor and dtor should not be used directly.
|
378
328
|
explicit LrsCallState(RefCountedPtr<RetryableCall<LrsCallState>> parent);
|
379
|
-
~LrsCallState() override;
|
380
329
|
|
381
330
|
void Orphan() override;
|
382
331
|
|
383
|
-
void MaybeStartReportingLocked()
|
332
|
+
void MaybeStartReportingLocked()
|
333
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
384
334
|
|
385
335
|
RetryableCall<LrsCallState>* parent() { return parent_.get(); }
|
386
336
|
ChannelState* chand() const { return parent_->chand(); }
|
@@ -388,30 +338,44 @@ class XdsClient::ChannelState::LrsCallState
|
|
388
338
|
bool seen_response() const { return seen_response_; }
|
389
339
|
|
390
340
|
private:
|
341
|
+
class StreamEventHandler
|
342
|
+
: public XdsTransportFactory::XdsTransport::StreamingCall::EventHandler {
|
343
|
+
public:
|
344
|
+
explicit StreamEventHandler(RefCountedPtr<LrsCallState> lrs_calld)
|
345
|
+
: lrs_calld_(std::move(lrs_calld)) {}
|
346
|
+
|
347
|
+
void OnRequestSent(bool ok) override { lrs_calld_->OnRequestSent(ok); }
|
348
|
+
void OnRecvMessage(absl::string_view payload) override {
|
349
|
+
lrs_calld_->OnRecvMessage(payload);
|
350
|
+
}
|
351
|
+
void OnStatusReceived(absl::Status status) override {
|
352
|
+
lrs_calld_->OnStatusReceived(std::move(status));
|
353
|
+
}
|
354
|
+
|
355
|
+
private:
|
356
|
+
RefCountedPtr<LrsCallState> lrs_calld_;
|
357
|
+
};
|
358
|
+
|
391
359
|
// Reports client-side load stats according to a fixed interval.
|
392
360
|
class Reporter : public InternallyRefCounted<Reporter> {
|
393
361
|
public:
|
394
362
|
Reporter(RefCountedPtr<LrsCallState> parent, Duration report_interval)
|
395
363
|
: parent_(std::move(parent)), report_interval_(report_interval) {
|
396
|
-
GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimer, this,
|
397
|
-
grpc_schedule_on_exec_ctx);
|
398
|
-
GRPC_CLOSURE_INIT(&on_report_done_, OnReportDone, this,
|
399
|
-
grpc_schedule_on_exec_ctx);
|
400
364
|
ScheduleNextReportLocked();
|
401
365
|
}
|
402
366
|
|
403
|
-
|
367
|
+
// Disable thread-safety analysis because this method is called via
|
368
|
+
// OrphanablePtr<>, but there's no way to pass the lock annotation
|
369
|
+
// through there.
|
370
|
+
void Orphan() override ABSL_NO_THREAD_SAFETY_ANALYSIS;
|
371
|
+
|
372
|
+
void OnReportDoneLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
404
373
|
|
405
374
|
private:
|
406
375
|
void ScheduleNextReportLocked()
|
407
376
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
408
|
-
|
409
|
-
bool OnNextReportTimerLocked(grpc_error_handle error)
|
410
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
377
|
+
bool OnNextReportTimer();
|
411
378
|
bool SendReportLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
412
|
-
static void OnReportDone(void* arg, grpc_error_handle error);
|
413
|
-
bool OnReportDoneLocked(grpc_error_handle error)
|
414
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
415
379
|
|
416
380
|
bool IsCurrentReporterOnCall() const {
|
417
381
|
return this == parent_->reporter_.get();
|
@@ -424,47 +388,23 @@ class XdsClient::ChannelState::LrsCallState
|
|
424
388
|
// The load reporting state.
|
425
389
|
const Duration report_interval_;
|
426
390
|
bool last_report_counters_were_zero_ = false;
|
427
|
-
|
428
|
-
|
429
|
-
grpc_closure on_next_report_timer_;
|
430
|
-
grpc_closure on_report_done_;
|
391
|
+
absl::optional<EventEngine::TaskHandle> timer_handle_
|
392
|
+
ABSL_GUARDED_BY(&XdsClient::mu_);
|
431
393
|
};
|
432
394
|
|
433
|
-
|
434
|
-
void
|
435
|
-
|
436
|
-
static void OnResponseReceived(void* arg, grpc_error_handle error);
|
437
|
-
bool OnResponseReceivedLocked()
|
438
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
439
|
-
static void OnStatusReceived(void* arg, grpc_error_handle error);
|
440
|
-
void OnStatusReceivedLocked(grpc_error_handle error)
|
441
|
-
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
|
395
|
+
void OnRequestSent(bool ok);
|
396
|
+
void OnRecvMessage(absl::string_view payload);
|
397
|
+
void OnStatusReceived(absl::Status status);
|
442
398
|
|
443
399
|
bool IsCurrentCallOnChannel() const;
|
444
400
|
|
445
401
|
// The owning RetryableCall<>.
|
446
402
|
RefCountedPtr<RetryableCall<LrsCallState>> parent_;
|
447
|
-
bool seen_response_ = false;
|
448
|
-
|
449
|
-
// Always non-NULL.
|
450
|
-
grpc_call* call_;
|
451
|
-
|
452
|
-
// recv_initial_metadata
|
453
|
-
grpc_metadata_array initial_metadata_recv_;
|
454
403
|
|
455
|
-
|
456
|
-
grpc_byte_buffer* send_message_payload_ = nullptr;
|
457
|
-
grpc_closure on_initial_request_sent_;
|
404
|
+
OrphanablePtr<XdsTransportFactory::XdsTransport::StreamingCall> call_;
|
458
405
|
|
459
|
-
|
460
|
-
|
461
|
-
grpc_closure on_response_received_;
|
462
|
-
|
463
|
-
// recv_trailing_metadata
|
464
|
-
grpc_metadata_array trailing_metadata_recv_;
|
465
|
-
grpc_status_code status_code_;
|
466
|
-
grpc_slice status_details_;
|
467
|
-
grpc_closure on_status_received_;
|
406
|
+
bool seen_response_ = false;
|
407
|
+
bool send_message_pending_ ABSL_GUARDED_BY(&XdsClient::mu_) = false;
|
468
408
|
|
469
409
|
// Load reporting state.
|
470
410
|
bool send_all_clusters_ = false;
|
@@ -473,58 +413,10 @@ class XdsClient::ChannelState::LrsCallState
|
|
473
413
|
OrphanablePtr<Reporter> reporter_;
|
474
414
|
};
|
475
415
|
|
476
|
-
//
|
477
|
-
// XdsClient::ChannelState::StateWatcher
|
478
|
-
//
|
479
|
-
|
480
|
-
class XdsClient::ChannelState::StateWatcher
|
481
|
-
: public AsyncConnectivityStateWatcherInterface {
|
482
|
-
public:
|
483
|
-
explicit StateWatcher(WeakRefCountedPtr<ChannelState> parent)
|
484
|
-
: parent_(std::move(parent)) {}
|
485
|
-
|
486
|
-
private:
|
487
|
-
void OnConnectivityStateChange(grpc_connectivity_state new_state,
|
488
|
-
const absl::Status& status) override {
|
489
|
-
{
|
490
|
-
MutexLock lock(&parent_->xds_client_->mu_);
|
491
|
-
if (!parent_->shutting_down_ &&
|
492
|
-
new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
|
493
|
-
// In TRANSIENT_FAILURE. Notify all watchers of error.
|
494
|
-
gpr_log(GPR_INFO,
|
495
|
-
"[xds_client %p] xds channel for server %s in "
|
496
|
-
"state TRANSIENT_FAILURE: %s",
|
497
|
-
parent_->xds_client(), parent_->server_.server_uri.c_str(),
|
498
|
-
status.ToString().c_str());
|
499
|
-
parent_->xds_client_->NotifyOnErrorLocked(
|
500
|
-
absl::UnavailableError(absl::StrCat(
|
501
|
-
"xds channel in TRANSIENT_FAILURE, connectivity error: ",
|
502
|
-
status.ToString())));
|
503
|
-
}
|
504
|
-
}
|
505
|
-
parent_->xds_client()->work_serializer_.DrainQueue();
|
506
|
-
}
|
507
|
-
|
508
|
-
WeakRefCountedPtr<ChannelState> parent_;
|
509
|
-
};
|
510
|
-
|
511
416
|
//
|
512
417
|
// XdsClient::ChannelState
|
513
418
|
//
|
514
419
|
|
515
|
-
namespace {
|
516
|
-
|
517
|
-
grpc_channel* CreateXdsChannel(grpc_channel_args* args,
|
518
|
-
const XdsBootstrap::XdsServer& server) {
|
519
|
-
RefCountedPtr<grpc_channel_credentials> channel_creds =
|
520
|
-
CoreConfiguration::Get().channel_creds_registry().CreateChannelCreds(
|
521
|
-
server.channel_creds_type, server.channel_creds_config);
|
522
|
-
return grpc_channel_create(server.server_uri.c_str(), channel_creds.get(),
|
523
|
-
args);
|
524
|
-
}
|
525
|
-
|
526
|
-
} // namespace
|
527
|
-
|
528
420
|
XdsClient::ChannelState::ChannelState(WeakRefCountedPtr<XdsClient> xds_client,
|
529
421
|
const XdsBootstrap::XdsServer& server)
|
530
422
|
: DualRefCounted<ChannelState>(
|
@@ -535,37 +427,45 @@ XdsClient::ChannelState::ChannelState(WeakRefCountedPtr<XdsClient> xds_client,
|
|
535
427
|
server_(server) {
|
536
428
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
537
429
|
gpr_log(GPR_INFO, "[xds_client %p] creating channel to %s",
|
538
|
-
xds_client_.get(), server.server_uri.c_str());
|
539
|
-
}
|
540
|
-
|
541
|
-
|
542
|
-
|
430
|
+
xds_client_.get(), server.server_uri().c_str());
|
431
|
+
}
|
432
|
+
absl::Status status;
|
433
|
+
transport_ = xds_client_->transport_factory_->Create(
|
434
|
+
server,
|
435
|
+
[self = WeakRef(DEBUG_LOCATION, "OnConnectivityFailure")](
|
436
|
+
absl::Status status) {
|
437
|
+
self->OnConnectivityFailure(std::move(status));
|
438
|
+
},
|
439
|
+
&status);
|
440
|
+
GPR_ASSERT(transport_ != nullptr);
|
441
|
+
if (!status.ok()) SetChannelStatusLocked(std::move(status));
|
543
442
|
}
|
544
443
|
|
545
444
|
XdsClient::ChannelState::~ChannelState() {
|
546
445
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
547
446
|
gpr_log(GPR_INFO, "[xds_client %p] destroying xds channel %p for server %s",
|
548
|
-
xds_client(), this, server_.server_uri.c_str());
|
447
|
+
xds_client(), this, server_.server_uri().c_str());
|
549
448
|
}
|
550
|
-
grpc_channel_destroy(channel_);
|
551
449
|
xds_client_.reset(DEBUG_LOCATION, "ChannelState");
|
552
450
|
}
|
553
451
|
|
554
452
|
// This method should only ever be called when holding the lock, but we can't
|
555
453
|
// use a ABSL_EXCLUSIVE_LOCKS_REQUIRED annotation, because Orphan() will be
|
556
|
-
// called from DualRefCounted::Unref, which cannot have a lock annotation for
|
557
|
-
// lock in this subclass.
|
454
|
+
// called from DualRefCounted::Unref, which cannot have a lock annotation for
|
455
|
+
// a lock in this subclass.
|
558
456
|
void XdsClient::ChannelState::Orphan() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
559
457
|
shutting_down_ = true;
|
560
|
-
|
458
|
+
transport_.reset();
|
561
459
|
// At this time, all strong refs are removed, remove from channel map to
|
562
|
-
// prevent subsequent subscription from trying to use this ChannelState as
|
563
|
-
// is shutting down.
|
564
|
-
xds_client_->xds_server_channel_map_.erase(server_);
|
460
|
+
// prevent subsequent subscription from trying to use this ChannelState as
|
461
|
+
// it is shutting down.
|
462
|
+
xds_client_->xds_server_channel_map_.erase(&server_);
|
565
463
|
ads_calld_.reset();
|
566
464
|
lrs_calld_.reset();
|
567
465
|
}
|
568
466
|
|
467
|
+
void XdsClient::ChannelState::ResetBackoff() { transport_->ResetBackoff(); }
|
468
|
+
|
569
469
|
XdsClient::ChannelState::AdsCallState* XdsClient::ChannelState::ads_calld()
|
570
470
|
const {
|
571
471
|
return ads_calld_->calld();
|
@@ -576,10 +476,6 @@ XdsClient::ChannelState::LrsCallState* XdsClient::ChannelState::lrs_calld()
|
|
576
476
|
return lrs_calld_->calld();
|
577
477
|
}
|
578
478
|
|
579
|
-
bool XdsClient::ChannelState::HasActiveAdsCall() const {
|
580
|
-
return ads_calld_ != nullptr && ads_calld_->calld() != nullptr;
|
581
|
-
}
|
582
|
-
|
583
479
|
void XdsClient::ChannelState::MaybeStartLrsCall() {
|
584
480
|
if (lrs_calld_ != nullptr) return;
|
585
481
|
lrs_calld_.reset(new RetryableCall<LrsCallState>(
|
@@ -587,45 +483,10 @@ void XdsClient::ChannelState::MaybeStartLrsCall() {
|
|
587
483
|
}
|
588
484
|
|
589
485
|
void XdsClient::ChannelState::StopLrsCallLocked() {
|
590
|
-
xds_client_->xds_load_report_server_map_.erase(server_);
|
486
|
+
xds_client_->xds_load_report_server_map_.erase(&server_);
|
591
487
|
lrs_calld_.reset();
|
592
488
|
}
|
593
489
|
|
594
|
-
namespace {
|
595
|
-
|
596
|
-
bool IsLameChannel(grpc_channel* channel) {
|
597
|
-
grpc_channel_element* elem =
|
598
|
-
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
|
599
|
-
return elem->filter == &LameClientFilter::kFilter;
|
600
|
-
}
|
601
|
-
|
602
|
-
} // namespace
|
603
|
-
|
604
|
-
void XdsClient::ChannelState::StartConnectivityWatchLocked() {
|
605
|
-
if (IsLameChannel(channel_)) {
|
606
|
-
xds_client()->NotifyOnErrorLocked(
|
607
|
-
absl::UnavailableError("xds client has a lame channel"));
|
608
|
-
return;
|
609
|
-
}
|
610
|
-
ClientChannel* client_channel =
|
611
|
-
ClientChannel::GetFromChannel(Channel::FromC(channel_));
|
612
|
-
GPR_ASSERT(client_channel != nullptr);
|
613
|
-
watcher_ = new StateWatcher(WeakRef(DEBUG_LOCATION, "ChannelState+watch"));
|
614
|
-
client_channel->AddConnectivityWatcher(
|
615
|
-
GRPC_CHANNEL_IDLE,
|
616
|
-
OrphanablePtr<AsyncConnectivityStateWatcherInterface>(watcher_));
|
617
|
-
}
|
618
|
-
|
619
|
-
void XdsClient::ChannelState::CancelConnectivityWatchLocked() {
|
620
|
-
if (IsLameChannel(channel_)) {
|
621
|
-
return;
|
622
|
-
}
|
623
|
-
ClientChannel* client_channel =
|
624
|
-
ClientChannel::GetFromChannel(Channel::FromC(channel_));
|
625
|
-
GPR_ASSERT(client_channel != nullptr);
|
626
|
-
client_channel->RemoveConnectivityWatcher(watcher_);
|
627
|
-
}
|
628
|
-
|
629
490
|
void XdsClient::ChannelState::SubscribeLocked(const XdsResourceType* type,
|
630
491
|
const XdsResourceName& name) {
|
631
492
|
if (ads_calld_ == nullptr) {
|
@@ -658,6 +519,56 @@ void XdsClient::ChannelState::UnsubscribeLocked(const XdsResourceType* type,
|
|
658
519
|
}
|
659
520
|
}
|
660
521
|
|
522
|
+
void XdsClient::ChannelState::OnConnectivityFailure(absl::Status status) {
|
523
|
+
{
|
524
|
+
MutexLock lock(&xds_client_->mu_);
|
525
|
+
SetChannelStatusLocked(std::move(status));
|
526
|
+
}
|
527
|
+
xds_client_->work_serializer_.DrainQueue();
|
528
|
+
}
|
529
|
+
|
530
|
+
void XdsClient::ChannelState::SetChannelStatusLocked(absl::Status status) {
|
531
|
+
if (shutting_down_) return;
|
532
|
+
status = absl::Status(status.code(), absl::StrCat("xDS channel for server ",
|
533
|
+
server_.server_uri(), ": ",
|
534
|
+
status.message()));
|
535
|
+
gpr_log(GPR_INFO, "[xds_client %p] %s", xds_client(),
|
536
|
+
status.ToString().c_str());
|
537
|
+
// If the node ID is set, append that to the status message that we send to
|
538
|
+
// the watchers, so that it will appear in log messages visible to users.
|
539
|
+
const auto* node = xds_client_->bootstrap_->node();
|
540
|
+
if (node != nullptr) {
|
541
|
+
status = absl::Status(
|
542
|
+
status.code(),
|
543
|
+
absl::StrCat(status.message(),
|
544
|
+
" (node ID:", xds_client_->bootstrap_->node()->id(), ")"));
|
545
|
+
}
|
546
|
+
// Save status in channel, so that we can immediately generate an
|
547
|
+
// error for any new watchers that may be started.
|
548
|
+
status_ = status;
|
549
|
+
// Find all watchers for this channel.
|
550
|
+
std::set<RefCountedPtr<ResourceWatcherInterface>> watchers;
|
551
|
+
for (const auto& a : xds_client_->authority_state_map_) { // authority
|
552
|
+
if (a.second.channel_state != this) continue;
|
553
|
+
for (const auto& t : a.second.resource_map) { // type
|
554
|
+
for (const auto& r : t.second) { // resource id
|
555
|
+
for (const auto& w : r.second.watchers) { // watchers
|
556
|
+
watchers.insert(w.second);
|
557
|
+
}
|
558
|
+
}
|
559
|
+
}
|
560
|
+
}
|
561
|
+
// Enqueue notification for the watchers.
|
562
|
+
xds_client_->work_serializer_.Schedule(
|
563
|
+
[watchers = std::move(watchers), status = std::move(status)]()
|
564
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(xds_client_->work_serializer_) {
|
565
|
+
for (const auto& watcher : watchers) {
|
566
|
+
watcher->OnError(status);
|
567
|
+
}
|
568
|
+
},
|
569
|
+
DEBUG_LOCATION);
|
570
|
+
}
|
571
|
+
|
661
572
|
//
|
662
573
|
// XdsClient::ChannelState::RetryableCall<>
|
663
574
|
//
|
@@ -673,9 +584,6 @@ XdsClient::ChannelState::RetryableCall<T>::RetryableCall(
|
|
673
584
|
.set_jitter(GRPC_XDS_RECONNECT_JITTER)
|
674
585
|
.set_max_backoff(Duration::Seconds(
|
675
586
|
GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS))) {
|
676
|
-
// Closure Initialization
|
677
|
-
GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimer, this,
|
678
|
-
grpc_schedule_on_exec_ctx);
|
679
587
|
StartNewCallLocked();
|
680
588
|
}
|
681
589
|
|
@@ -683,7 +591,10 @@ template <typename T>
|
|
683
591
|
void XdsClient::ChannelState::RetryableCall<T>::Orphan() {
|
684
592
|
shutting_down_ = true;
|
685
593
|
calld_.reset();
|
686
|
-
if (
|
594
|
+
if (timer_handle_.has_value()) {
|
595
|
+
GetDefaultEventEngine()->Cancel(*timer_handle_);
|
596
|
+
timer_handle_.reset();
|
597
|
+
}
|
687
598
|
this->Unref(DEBUG_LOCATION, "RetryableCall+orphaned");
|
688
599
|
}
|
689
600
|
|
@@ -699,13 +610,13 @@ void XdsClient::ChannelState::RetryableCall<T>::OnCallFinishedLocked() {
|
|
699
610
|
template <typename T>
|
700
611
|
void XdsClient::ChannelState::RetryableCall<T>::StartNewCallLocked() {
|
701
612
|
if (shutting_down_) return;
|
702
|
-
GPR_ASSERT(chand_->
|
613
|
+
GPR_ASSERT(chand_->transport_ != nullptr);
|
703
614
|
GPR_ASSERT(calld_ == nullptr);
|
704
615
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
705
|
-
gpr_log(
|
706
|
-
|
707
|
-
|
708
|
-
|
616
|
+
gpr_log(GPR_INFO,
|
617
|
+
"[xds_client %p] xds server %s: start new call from retryable "
|
618
|
+
"call %p",
|
619
|
+
chand()->xds_client(), chand()->server_.server_uri().c_str(), this);
|
709
620
|
}
|
710
621
|
calld_ = MakeOrphanable<T>(
|
711
622
|
this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
|
@@ -715,45 +626,39 @@ template <typename T>
|
|
715
626
|
void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
|
716
627
|
if (shutting_down_) return;
|
717
628
|
const Timestamp next_attempt_time = backoff_.NextAttemptTime();
|
629
|
+
const Duration timeout =
|
630
|
+
std::max(next_attempt_time - Timestamp::Now(), Duration::Zero());
|
718
631
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
719
|
-
Duration timeout =
|
720
|
-
std::max(next_attempt_time - ExecCtx::Get()->Now(), Duration::Zero());
|
721
632
|
gpr_log(GPR_INFO,
|
722
633
|
"[xds_client %p] xds server %s: call attempt failed; "
|
723
634
|
"retry timer will fire in %" PRId64 "ms.",
|
724
|
-
chand()->xds_client(), chand()->server_.server_uri.c_str(),
|
635
|
+
chand()->xds_client(), chand()->server_.server_uri().c_str(),
|
725
636
|
timeout.millis());
|
726
637
|
}
|
727
|
-
|
728
|
-
|
729
|
-
|
638
|
+
timer_handle_ = GetDefaultEventEngine()->RunAfter(
|
639
|
+
timeout,
|
640
|
+
[self = this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start")]() {
|
641
|
+
ApplicationCallbackExecCtx callback_exec_ctx;
|
642
|
+
ExecCtx exec_ctx;
|
643
|
+
self->OnRetryTimer();
|
644
|
+
});
|
730
645
|
}
|
731
646
|
|
732
647
|
template <typename T>
|
733
|
-
void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimer(
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
calld->OnRetryTimerLocked(GRPC_ERROR_REF(error));
|
739
|
-
}
|
740
|
-
calld->Unref(DEBUG_LOCATION, "RetryableCall+retry_timer_done");
|
741
|
-
}
|
742
|
-
|
743
|
-
template <typename T>
|
744
|
-
void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimerLocked(
|
745
|
-
grpc_error_handle error) {
|
746
|
-
retry_timer_callback_pending_ = false;
|
747
|
-
if (!shutting_down_ && GRPC_ERROR_IS_NONE(error)) {
|
648
|
+
void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimer() {
|
649
|
+
MutexLock lock(&chand_->xds_client()->mu_);
|
650
|
+
if (timer_handle_.has_value()) {
|
651
|
+
timer_handle_.reset();
|
652
|
+
if (shutting_down_) return;
|
748
653
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
749
654
|
gpr_log(GPR_INFO,
|
750
655
|
"[xds_client %p] xds server %s: retry timer fired (retryable "
|
751
656
|
"call: %p)",
|
752
|
-
chand()->xds_client(), chand()->server_.server_uri.c_str(),
|
657
|
+
chand()->xds_client(), chand()->server_.server_uri().c_str(),
|
658
|
+
this);
|
753
659
|
}
|
754
660
|
StartNewCallLocked();
|
755
661
|
}
|
756
|
-
GRPC_ERROR_UNREF(error);
|
757
662
|
}
|
758
663
|
|
759
664
|
//
|
@@ -768,7 +673,7 @@ absl::Status XdsClient::ChannelState::AdsCallState::AdsResponseParser::
|
|
768
673
|
"[xds_client %p] xds server %s: received ADS response: type_url=%s, "
|
769
674
|
"version=%s, nonce=%s, num_resources=%" PRIuPTR,
|
770
675
|
ads_call_state_->xds_client(),
|
771
|
-
ads_call_state_->chand()->server_.server_uri.c_str(),
|
676
|
+
ads_call_state_->chand()->server_.server_uri().c_str(),
|
772
677
|
fields.type_url.c_str(), fields.version.c_str(), fields.nonce.c_str(),
|
773
678
|
fields.num_resources);
|
774
679
|
}
|
@@ -811,40 +716,61 @@ void UpdateResourceMetadataNacked(const std::string& version,
|
|
811
716
|
} // namespace
|
812
717
|
|
813
718
|
void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
814
|
-
|
815
|
-
absl::string_view serialized_resource) {
|
719
|
+
upb_Arena* arena, size_t idx, absl::string_view type_url,
|
720
|
+
absl::string_view resource_name, absl::string_view serialized_resource) {
|
721
|
+
std::string error_prefix = absl::StrCat(
|
722
|
+
"resource index ", idx, ": ",
|
723
|
+
resource_name.empty() ? "" : absl::StrCat(resource_name, ": "));
|
816
724
|
// Check the type_url of the resource.
|
817
725
|
bool is_v2 = false;
|
818
726
|
if (!result_.type->IsType(type_url, &is_v2)) {
|
819
727
|
result_.errors.emplace_back(
|
820
|
-
absl::StrCat(
|
821
|
-
|
728
|
+
absl::StrCat(error_prefix, "incorrect resource type ", type_url,
|
729
|
+
" (should be ", result_.type_url, ")"));
|
822
730
|
return;
|
823
731
|
}
|
824
732
|
// Parse the resource.
|
825
|
-
|
733
|
+
XdsResourceType::DecodeContext context = {
|
734
|
+
xds_client(), ads_call_state_->chand()->server_, &grpc_xds_client_trace,
|
735
|
+
xds_client()->symtab_.ptr(), arena};
|
736
|
+
XdsResourceType::DecodeResult decode_result =
|
826
737
|
result_.type->Decode(context, serialized_resource, is_v2);
|
827
|
-
|
738
|
+
// If we didn't already have the resource name from the Resource
|
739
|
+
// wrapper, try to get it from the decoding result.
|
740
|
+
if (resource_name.empty()) {
|
741
|
+
if (decode_result.name.has_value()) {
|
742
|
+
resource_name = *decode_result.name;
|
743
|
+
error_prefix =
|
744
|
+
absl::StrCat("resource index ", idx, ": ", resource_name, ": ");
|
745
|
+
} else {
|
746
|
+
// We don't have any way of determining the resource name, so
|
747
|
+
// there's nothing more we can do here.
|
748
|
+
result_.errors.emplace_back(absl::StrCat(
|
749
|
+
error_prefix, decode_result.resource.status().ToString()));
|
750
|
+
return;
|
751
|
+
}
|
752
|
+
}
|
753
|
+
// If decoding failed, make sure we include the error in the NACK.
|
754
|
+
const absl::Status& decode_status = decode_result.resource.status();
|
755
|
+
if (!decode_status.ok()) {
|
828
756
|
result_.errors.emplace_back(
|
829
|
-
absl::StrCat(
|
830
|
-
return;
|
757
|
+
absl::StrCat(error_prefix, decode_status.ToString()));
|
831
758
|
}
|
832
759
|
// Check the resource name.
|
833
|
-
auto
|
834
|
-
xds_client()->ParseXdsResourceName(
|
835
|
-
if (!
|
836
|
-
result_.errors.emplace_back(
|
837
|
-
|
838
|
-
result->name, "\""));
|
760
|
+
auto parsed_resource_name =
|
761
|
+
xds_client()->ParseXdsResourceName(resource_name, result_.type);
|
762
|
+
if (!parsed_resource_name.ok()) {
|
763
|
+
result_.errors.emplace_back(
|
764
|
+
absl::StrCat(error_prefix, "Cannot parse xDS resource name"));
|
839
765
|
return;
|
840
766
|
}
|
841
767
|
// Cancel resource-does-not-exist timer, if needed.
|
842
768
|
auto timer_it = ads_call_state_->state_map_.find(result_.type);
|
843
769
|
if (timer_it != ads_call_state_->state_map_.end()) {
|
844
|
-
auto it =
|
845
|
-
|
770
|
+
auto it = timer_it->second.subscribed_resources.find(
|
771
|
+
parsed_resource_name->authority);
|
846
772
|
if (it != timer_it->second.subscribed_resources.end()) {
|
847
|
-
auto res_it = it->second.find(
|
773
|
+
auto res_it = it->second.find(parsed_resource_name->key);
|
848
774
|
if (res_it != it->second.end()) {
|
849
775
|
res_it->second->MaybeCancelTimer();
|
850
776
|
}
|
@@ -852,7 +778,7 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
852
778
|
}
|
853
779
|
// Lookup the authority in the cache.
|
854
780
|
auto authority_it =
|
855
|
-
xds_client()->authority_state_map_.find(
|
781
|
+
xds_client()->authority_state_map_.find(parsed_resource_name->authority);
|
856
782
|
if (authority_it == xds_client()->authority_state_map_.end()) {
|
857
783
|
return; // Skip resource -- we don't have a subscription for it.
|
858
784
|
}
|
@@ -864,14 +790,15 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
864
790
|
}
|
865
791
|
auto& type_map = type_it->second;
|
866
792
|
// Found type, so look up resource key.
|
867
|
-
auto it = type_map.find(
|
793
|
+
auto it = type_map.find(parsed_resource_name->key);
|
868
794
|
if (it == type_map.end()) {
|
869
795
|
return; // Skip resource -- we don't have a subscription for it.
|
870
796
|
}
|
871
797
|
ResourceState& resource_state = it->second;
|
872
798
|
// If needed, record that we've seen this resource.
|
873
799
|
if (result_.type->AllResourcesRequiredInSotW()) {
|
874
|
-
result_.resources_seen[
|
800
|
+
result_.resources_seen[parsed_resource_name->authority].insert(
|
801
|
+
parsed_resource_name->key);
|
875
802
|
}
|
876
803
|
// If we previously ignored the resource's deletion, log that we're
|
877
804
|
// now re-adding it.
|
@@ -880,21 +807,18 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
880
807
|
"[xds_client %p] xds server %s: server returned new version of "
|
881
808
|
"resource for which we previously ignored a deletion: type %s "
|
882
809
|
"name %s",
|
883
|
-
xds_client(),
|
884
|
-
|
810
|
+
xds_client(),
|
811
|
+
ads_call_state_->chand()->server_.server_uri().c_str(),
|
812
|
+
std::string(type_url).c_str(), std::string(resource_name).c_str());
|
885
813
|
resource_state.ignored_deletion = false;
|
886
814
|
}
|
887
815
|
// Update resource state based on whether the resource is valid.
|
888
|
-
if (!
|
889
|
-
result_.errors.emplace_back(absl::StrCat(
|
890
|
-
"resource index ", idx, ": ", result->name,
|
891
|
-
": validation error: ", result->resource.status().ToString()));
|
816
|
+
if (!decode_status.ok()) {
|
892
817
|
xds_client()->NotifyWatchersOnErrorLocked(
|
893
818
|
resource_state.watchers,
|
894
|
-
absl::UnavailableError(
|
895
|
-
"invalid resource: ",
|
896
|
-
UpdateResourceMetadataNacked(result_.version,
|
897
|
-
result->resource.status().ToString(),
|
819
|
+
absl::UnavailableError(
|
820
|
+
absl::StrCat("invalid resource: ", decode_status.ToString())));
|
821
|
+
UpdateResourceMetadataNacked(result_.version, decode_status.ToString(),
|
898
822
|
update_time_, &resource_state.meta);
|
899
823
|
return;
|
900
824
|
}
|
@@ -903,16 +827,17 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
903
827
|
// If it didn't change, ignore it.
|
904
828
|
if (resource_state.resource != nullptr &&
|
905
829
|
result_.type->ResourcesEqual(resource_state.resource.get(),
|
906
|
-
|
830
|
+
decode_result.resource->get())) {
|
907
831
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
908
832
|
gpr_log(GPR_INFO,
|
909
833
|
"[xds_client %p] %s resource %s identical to current, ignoring.",
|
910
|
-
xds_client(), result_.type_url.c_str(),
|
834
|
+
xds_client(), result_.type_url.c_str(),
|
835
|
+
std::string(resource_name).c_str());
|
911
836
|
}
|
912
837
|
return;
|
913
838
|
}
|
914
839
|
// Update the resource state.
|
915
|
-
resource_state.resource = std::move(*
|
840
|
+
resource_state.resource = std::move(*decode_result.resource);
|
916
841
|
resource_state.meta = CreateResourceMetadataAcked(
|
917
842
|
std::string(serialized_resource), result_.version, update_time_);
|
918
843
|
// Notify watchers.
|
@@ -930,6 +855,12 @@ void XdsClient::ChannelState::AdsCallState::AdsResponseParser::ParseResource(
|
|
930
855
|
DEBUG_LOCATION);
|
931
856
|
}
|
932
857
|
|
858
|
+
void XdsClient::ChannelState::AdsCallState::AdsResponseParser::
|
859
|
+
ResourceWrapperParsingFailed(size_t idx) {
|
860
|
+
result_.errors.emplace_back(absl::StrCat(
|
861
|
+
"resource index ", idx, ": Can't decode Resource proto wrapper"));
|
862
|
+
}
|
863
|
+
|
933
864
|
//
|
934
865
|
// XdsClient::ChannelState::AdsCallState
|
935
866
|
//
|
@@ -941,51 +872,30 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
|
|
941
872
|
? "AdsCallState"
|
942
873
|
: nullptr),
|
943
874
|
parent_(std::move(parent)) {
|
944
|
-
// Init the ADS call. Note that the call will progress every time there's
|
945
|
-
// activity in xds_client()->interested_parties_, which is comprised of
|
946
|
-
// the polling entities from client_channel.
|
947
875
|
GPR_ASSERT(xds_client() != nullptr);
|
948
|
-
//
|
876
|
+
// Init the ADS call.
|
949
877
|
const char* method =
|
950
878
|
chand()->server_.ShouldUseV3()
|
951
879
|
? "/envoy.service.discovery.v3.AggregatedDiscoveryService/"
|
952
880
|
"StreamAggregatedResources"
|
953
881
|
: "/envoy.service.discovery.v2.AggregatedDiscoveryService/"
|
954
882
|
"StreamAggregatedResources";
|
955
|
-
call_ =
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
883
|
+
call_ = chand()->transport_->CreateStreamingCall(
|
884
|
+
method, absl::make_unique<StreamEventHandler>(
|
885
|
+
// Passing the initial ref here. This ref will go away when
|
886
|
+
// the StreamEventHandler is destroyed.
|
887
|
+
RefCountedPtr<AdsCallState>(this)));
|
960
888
|
GPR_ASSERT(call_ != nullptr);
|
961
|
-
// Init data associated with the call.
|
962
|
-
grpc_metadata_array_init(&initial_metadata_recv_);
|
963
|
-
grpc_metadata_array_init(&trailing_metadata_recv_);
|
964
889
|
// Start the call.
|
965
890
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
966
891
|
gpr_log(GPR_INFO,
|
967
892
|
"[xds_client %p] xds server %s: starting ADS call "
|
968
893
|
"(calld: %p, call: %p)",
|
969
|
-
xds_client(), chand()->server_.server_uri.c_str(), this,
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
memset(ops, 0, sizeof(ops));
|
975
|
-
// Op: send initial metadata.
|
976
|
-
grpc_op* op = ops;
|
977
|
-
op->op = GRPC_OP_SEND_INITIAL_METADATA;
|
978
|
-
op->data.send_initial_metadata.count = 0;
|
979
|
-
op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY |
|
980
|
-
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
981
|
-
op->reserved = nullptr;
|
982
|
-
op++;
|
983
|
-
call_error = grpc_call_start_batch_and_execute(
|
984
|
-
call_, ops, static_cast<size_t>(op - ops), nullptr);
|
985
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
986
|
-
// Op: send request message.
|
987
|
-
GRPC_CLOSURE_INIT(&on_request_sent_, OnRequestSent, this,
|
988
|
-
grpc_schedule_on_exec_ctx);
|
894
|
+
xds_client(), chand()->server_.server_uri().c_str(), this,
|
895
|
+
call_.get());
|
896
|
+
}
|
897
|
+
// If this is a reconnect, add any necessary subscriptions from what's
|
898
|
+
// already in the cache.
|
989
899
|
for (const auto& a : xds_client()->authority_state_map_) {
|
990
900
|
const std::string& authority = a.first;
|
991
901
|
// Skip authorities that are not using this xDS channel.
|
@@ -998,120 +908,47 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
|
|
998
908
|
}
|
999
909
|
}
|
1000
910
|
}
|
911
|
+
// Send initial message if we added any subscriptions above.
|
1001
912
|
for (const auto& p : state_map_) {
|
1002
913
|
SendMessageLocked(p.first);
|
1003
914
|
}
|
1004
|
-
// Op: recv initial metadata.
|
1005
|
-
op = ops;
|
1006
|
-
op->op = GRPC_OP_RECV_INITIAL_METADATA;
|
1007
|
-
op->data.recv_initial_metadata.recv_initial_metadata =
|
1008
|
-
&initial_metadata_recv_;
|
1009
|
-
op->flags = 0;
|
1010
|
-
op->reserved = nullptr;
|
1011
|
-
op++;
|
1012
|
-
// Op: recv response.
|
1013
|
-
op->op = GRPC_OP_RECV_MESSAGE;
|
1014
|
-
op->data.recv_message.recv_message = &recv_message_payload_;
|
1015
|
-
op->flags = 0;
|
1016
|
-
op->reserved = nullptr;
|
1017
|
-
op++;
|
1018
|
-
Ref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked").release();
|
1019
|
-
GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this,
|
1020
|
-
grpc_schedule_on_exec_ctx);
|
1021
|
-
call_error = grpc_call_start_batch_and_execute(
|
1022
|
-
call_, ops, static_cast<size_t>(op - ops), &on_response_received_);
|
1023
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1024
|
-
// Op: recv server status.
|
1025
|
-
op = ops;
|
1026
|
-
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
|
1027
|
-
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
|
1028
|
-
op->data.recv_status_on_client.status = &status_code_;
|
1029
|
-
op->data.recv_status_on_client.status_details = &status_details_;
|
1030
|
-
op->flags = 0;
|
1031
|
-
op->reserved = nullptr;
|
1032
|
-
op++;
|
1033
|
-
// This callback signals the end of the call, so it relies on the initial
|
1034
|
-
// ref instead of a new ref. When it's invoked, it's the initial ref that is
|
1035
|
-
// unreffed.
|
1036
|
-
GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this,
|
1037
|
-
grpc_schedule_on_exec_ctx);
|
1038
|
-
call_error = grpc_call_start_batch_and_execute(
|
1039
|
-
call_, ops, static_cast<size_t>(op - ops), &on_status_received_);
|
1040
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1041
|
-
}
|
1042
|
-
|
1043
|
-
XdsClient::ChannelState::AdsCallState::~AdsCallState() {
|
1044
|
-
grpc_metadata_array_destroy(&initial_metadata_recv_);
|
1045
|
-
grpc_metadata_array_destroy(&trailing_metadata_recv_);
|
1046
|
-
grpc_byte_buffer_destroy(send_message_payload_);
|
1047
|
-
grpc_byte_buffer_destroy(recv_message_payload_);
|
1048
|
-
grpc_slice_unref_internal(status_details_);
|
1049
|
-
GPR_ASSERT(call_ != nullptr);
|
1050
|
-
grpc_call_unref(call_);
|
1051
915
|
}
|
1052
916
|
|
1053
917
|
void XdsClient::ChannelState::AdsCallState::Orphan() {
|
1054
|
-
GPR_ASSERT(call_ != nullptr);
|
1055
|
-
// If we are here because xds_client wants to cancel the call,
|
1056
|
-
// on_status_received_ will complete the cancellation and clean up. Otherwise,
|
1057
|
-
// we are here because xds_client has to orphan a failed call, then the
|
1058
|
-
// following cancellation will be a no-op.
|
1059
|
-
grpc_call_cancel_internal(call_);
|
1060
918
|
state_map_.clear();
|
1061
|
-
// Note that the initial ref is
|
1062
|
-
//
|
919
|
+
// Note that the initial ref is held by the StreamEventHandler, which
|
920
|
+
// will be destroyed when call_ is destroyed, which may not happen
|
921
|
+
// here, since there may be other refs held to call_ by internal callbacks.
|
922
|
+
call_.reset();
|
1063
923
|
}
|
1064
924
|
|
1065
925
|
void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
|
1066
926
|
const XdsResourceType* type)
|
1067
927
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_) {
|
1068
928
|
// Buffer message sending if an existing message is in flight.
|
1069
|
-
if (
|
929
|
+
if (send_message_pending_) {
|
1070
930
|
buffered_requests_.insert(type);
|
1071
931
|
return;
|
1072
932
|
}
|
1073
933
|
auto& state = state_map_[type];
|
1074
|
-
|
1075
|
-
request_payload_slice = xds_client()->api_.CreateAdsRequest(
|
934
|
+
std::string serialized_message = xds_client()->api_.CreateAdsRequest(
|
1076
935
|
chand()->server_,
|
1077
936
|
chand()->server_.ShouldUseV3() ? type->type_url() : type->v2_type_url(),
|
1078
937
|
chand()->resource_type_version_map_[type], state.nonce,
|
1079
|
-
ResourceNamesForRequest(type),
|
1080
|
-
!sent_initial_message_);
|
938
|
+
ResourceNamesForRequest(type), state.status, !sent_initial_message_);
|
1081
939
|
sent_initial_message_ = true;
|
1082
940
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1083
941
|
gpr_log(GPR_INFO,
|
1084
942
|
"[xds_client %p] xds server %s: sending ADS request: type=%s "
|
1085
943
|
"version=%s nonce=%s error=%s",
|
1086
|
-
xds_client(), chand()->server_.server_uri.c_str(),
|
944
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1087
945
|
std::string(type->type_url()).c_str(),
|
1088
946
|
chand()->resource_type_version_map_[type].c_str(),
|
1089
|
-
state.nonce.c_str(),
|
1090
|
-
}
|
1091
|
-
GRPC_ERROR_UNREF(state.error);
|
1092
|
-
state.error = GRPC_ERROR_NONE;
|
1093
|
-
// Create message payload.
|
1094
|
-
send_message_payload_ =
|
1095
|
-
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1096
|
-
grpc_slice_unref_internal(request_payload_slice);
|
1097
|
-
// Send the message.
|
1098
|
-
grpc_op op;
|
1099
|
-
memset(&op, 0, sizeof(op));
|
1100
|
-
op.op = GRPC_OP_SEND_MESSAGE;
|
1101
|
-
op.data.send_message.send_message = send_message_payload_;
|
1102
|
-
Ref(DEBUG_LOCATION, "ADS+OnRequestSentLocked").release();
|
1103
|
-
GRPC_CLOSURE_INIT(&on_request_sent_, OnRequestSent, this,
|
1104
|
-
grpc_schedule_on_exec_ctx);
|
1105
|
-
grpc_call_error call_error =
|
1106
|
-
grpc_call_start_batch_and_execute(call_, &op, 1, &on_request_sent_);
|
1107
|
-
if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
|
1108
|
-
gpr_log(GPR_ERROR,
|
1109
|
-
"[xds_client %p] xds server %s: error starting ADS send_message "
|
1110
|
-
"batch on calld=%p: call_error=%d",
|
1111
|
-
xds_client(), chand()->server_.server_uri.c_str(), this,
|
1112
|
-
call_error);
|
1113
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
947
|
+
state.nonce.c_str(), state.status.ToString().c_str());
|
1114
948
|
}
|
949
|
+
state.status = absl::OkStatus();
|
950
|
+
call_->SendMessage(std::move(serialized_message));
|
951
|
+
send_message_pending_ = true;
|
1115
952
|
}
|
1116
953
|
|
1117
954
|
void XdsClient::ChannelState::AdsCallState::SubscribeLocked(
|
@@ -1142,22 +979,10 @@ bool XdsClient::ChannelState::AdsCallState::HasSubscribedResources() const {
|
|
1142
979
|
return false;
|
1143
980
|
}
|
1144
981
|
|
1145
|
-
void XdsClient::ChannelState::AdsCallState::OnRequestSent(
|
1146
|
-
|
1147
|
-
|
1148
|
-
{
|
1149
|
-
MutexLock lock(&ads_calld->xds_client()->mu_);
|
1150
|
-
ads_calld->OnRequestSentLocked(GRPC_ERROR_REF(error));
|
1151
|
-
}
|
1152
|
-
ads_calld->Unref(DEBUG_LOCATION, "ADS+OnRequestSentLocked");
|
1153
|
-
}
|
1154
|
-
|
1155
|
-
void XdsClient::ChannelState::AdsCallState::OnRequestSentLocked(
|
1156
|
-
grpc_error_handle error) {
|
1157
|
-
if (IsCurrentCallOnChannel() && GRPC_ERROR_IS_NONE(error)) {
|
1158
|
-
// Clean up the sent message.
|
1159
|
-
grpc_byte_buffer_destroy(send_message_payload_);
|
1160
|
-
send_message_payload_ = nullptr;
|
982
|
+
void XdsClient::ChannelState::AdsCallState::OnRequestSent(bool ok) {
|
983
|
+
MutexLock lock(&xds_client()->mu_);
|
984
|
+
send_message_pending_ = false;
|
985
|
+
if (ok && IsCurrentCallOnChannel()) {
|
1161
986
|
// Continue to send another pending message if any.
|
1162
987
|
// TODO(roth): The current code to handle buffered messages has the
|
1163
988
|
// advantage of sending only the most recent list of resource names for
|
@@ -1173,184 +998,135 @@ void XdsClient::ChannelState::AdsCallState::OnRequestSentLocked(
|
|
1173
998
|
buffered_requests_.erase(it);
|
1174
999
|
}
|
1175
1000
|
}
|
1176
|
-
GRPC_ERROR_UNREF(error);
|
1177
1001
|
}
|
1178
1002
|
|
1179
|
-
void XdsClient::ChannelState::AdsCallState::
|
1180
|
-
|
1181
|
-
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
|
1182
|
-
bool done;
|
1003
|
+
void XdsClient::ChannelState::AdsCallState::OnRecvMessage(
|
1004
|
+
absl::string_view payload) {
|
1183
1005
|
{
|
1184
|
-
MutexLock lock(&
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1231
|
-
|
1232
|
-
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1242
|
-
|
1243
|
-
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1255
|
-
|
1256
|
-
|
1257
|
-
|
1258
|
-
|
1259
|
-
// does not exist. For that case, we rely on the request timeout
|
1260
|
-
// instead.
|
1261
|
-
if (resource_state.resource == nullptr) continue;
|
1262
|
-
if (chand()->server_.IgnoreResourceDeletion()) {
|
1263
|
-
if (!resource_state.ignored_deletion) {
|
1264
|
-
gpr_log(GPR_ERROR,
|
1265
|
-
"[xds_client %p] xds server %s: ignoring deletion "
|
1266
|
-
"for resource type %s name %s",
|
1267
|
-
xds_client(), chand()->server_.server_uri.c_str(),
|
1268
|
-
result.type_url.c_str(),
|
1269
|
-
XdsClient::ConstructFullXdsResourceName(
|
1270
|
-
authority, result.type_url.c_str(), resource_key)
|
1271
|
-
.c_str());
|
1272
|
-
resource_state.ignored_deletion = true;
|
1006
|
+
MutexLock lock(&xds_client()->mu_);
|
1007
|
+
if (!IsCurrentCallOnChannel()) return;
|
1008
|
+
// Parse and validate the response.
|
1009
|
+
AdsResponseParser parser(this);
|
1010
|
+
absl::Status status =
|
1011
|
+
xds_client()->api_.ParseAdsResponse(chand()->server_, payload, &parser);
|
1012
|
+
if (!status.ok()) {
|
1013
|
+
// Ignore unparsable response.
|
1014
|
+
gpr_log(GPR_ERROR,
|
1015
|
+
"[xds_client %p] xds server %s: error parsing ADS response (%s) "
|
1016
|
+
"-- ignoring",
|
1017
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1018
|
+
status.ToString().c_str());
|
1019
|
+
} else {
|
1020
|
+
seen_response_ = true;
|
1021
|
+
chand()->status_ = absl::OkStatus();
|
1022
|
+
AdsResponseParser::Result result = parser.TakeResult();
|
1023
|
+
// Update nonce.
|
1024
|
+
auto& state = state_map_[result.type];
|
1025
|
+
state.nonce = result.nonce;
|
1026
|
+
// If we got an error, set state.status so that we'll NACK the update.
|
1027
|
+
if (!result.errors.empty()) {
|
1028
|
+
state.status = absl::UnavailableError(
|
1029
|
+
absl::StrCat("xDS response validation errors: [",
|
1030
|
+
absl::StrJoin(result.errors, "; "), "]"));
|
1031
|
+
gpr_log(GPR_ERROR,
|
1032
|
+
"[xds_client %p] xds server %s: ADS response invalid for "
|
1033
|
+
"resource "
|
1034
|
+
"type %s version %s, will NACK: nonce=%s status=%s",
|
1035
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1036
|
+
result.type_url.c_str(), result.version.c_str(),
|
1037
|
+
state.nonce.c_str(), state.status.ToString().c_str());
|
1038
|
+
}
|
1039
|
+
// Delete resources not seen in update if needed.
|
1040
|
+
if (result.type->AllResourcesRequiredInSotW()) {
|
1041
|
+
for (auto& a : xds_client()->authority_state_map_) {
|
1042
|
+
const std::string& authority = a.first;
|
1043
|
+
AuthorityState& authority_state = a.second;
|
1044
|
+
// Skip authorities that are not using this xDS channel.
|
1045
|
+
if (authority_state.channel_state != chand()) continue;
|
1046
|
+
auto seen_authority_it = result.resources_seen.find(authority);
|
1047
|
+
// Find this resource type.
|
1048
|
+
auto type_it = authority_state.resource_map.find(result.type);
|
1049
|
+
if (type_it == authority_state.resource_map.end()) continue;
|
1050
|
+
// Iterate over resource ids.
|
1051
|
+
for (auto& r : type_it->second) {
|
1052
|
+
const XdsResourceKey& resource_key = r.first;
|
1053
|
+
ResourceState& resource_state = r.second;
|
1054
|
+
if (seen_authority_it == result.resources_seen.end() ||
|
1055
|
+
seen_authority_it->second.find(resource_key) ==
|
1056
|
+
seen_authority_it->second.end()) {
|
1057
|
+
// If the resource was newly requested but has not yet been
|
1058
|
+
// received, we don't want to generate an error for the
|
1059
|
+
// watchers, because this ADS response may be in reaction to an
|
1060
|
+
// earlier request that did not yet request the new resource, so
|
1061
|
+
// its absence from the response does not necessarily indicate
|
1062
|
+
// that the resource does not exist. For that case, we rely on
|
1063
|
+
// the request timeout instead.
|
1064
|
+
if (resource_state.resource == nullptr) continue;
|
1065
|
+
if (chand()->server_.IgnoreResourceDeletion()) {
|
1066
|
+
if (!resource_state.ignored_deletion) {
|
1067
|
+
gpr_log(GPR_ERROR,
|
1068
|
+
"[xds_client %p] xds server %s: ignoring deletion "
|
1069
|
+
"for resource type %s name %s",
|
1070
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1071
|
+
result.type_url.c_str(),
|
1072
|
+
XdsClient::ConstructFullXdsResourceName(
|
1073
|
+
authority, result.type_url.c_str(), resource_key)
|
1074
|
+
.c_str());
|
1075
|
+
resource_state.ignored_deletion = true;
|
1076
|
+
}
|
1077
|
+
} else {
|
1078
|
+
resource_state.resource.reset();
|
1079
|
+
xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
1080
|
+
resource_state.watchers);
|
1273
1081
|
}
|
1274
|
-
} else {
|
1275
|
-
resource_state.resource.reset();
|
1276
|
-
xds_client()->NotifyWatchersOnResourceDoesNotExist(
|
1277
|
-
resource_state.watchers);
|
1278
1082
|
}
|
1279
1083
|
}
|
1280
1084
|
}
|
1281
1085
|
}
|
1282
|
-
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
|
1291
|
-
|
1086
|
+
// If we had valid resources, update the version.
|
1087
|
+
if (result.have_valid_resources) {
|
1088
|
+
chand()->resource_type_version_map_[result.type] =
|
1089
|
+
std::move(result.version);
|
1090
|
+
// Start load reporting if needed.
|
1091
|
+
auto& lrs_call = chand()->lrs_calld_;
|
1092
|
+
if (lrs_call != nullptr) {
|
1093
|
+
LrsCallState* lrs_calld = lrs_call->calld();
|
1094
|
+
if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked();
|
1095
|
+
}
|
1292
1096
|
}
|
1097
|
+
// Send ACK or NACK.
|
1098
|
+
SendMessageLocked(result.type);
|
1293
1099
|
}
|
1294
|
-
|
1295
|
-
|
1296
|
-
}
|
1297
|
-
if (xds_client()->shutting_down_) return true;
|
1298
|
-
// Keep listening for updates.
|
1299
|
-
grpc_op op;
|
1300
|
-
memset(&op, 0, sizeof(op));
|
1301
|
-
op.op = GRPC_OP_RECV_MESSAGE;
|
1302
|
-
op.data.recv_message.recv_message = &recv_message_payload_;
|
1303
|
-
op.flags = 0;
|
1304
|
-
op.reserved = nullptr;
|
1305
|
-
GPR_ASSERT(call_ != nullptr);
|
1306
|
-
// Reuse the "ADS+OnResponseReceivedLocked" ref taken in ctor.
|
1307
|
-
const grpc_call_error call_error =
|
1308
|
-
grpc_call_start_batch_and_execute(call_, &op, 1, &on_response_received_);
|
1309
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1310
|
-
return false;
|
1100
|
+
}
|
1101
|
+
xds_client()->work_serializer_.DrainQueue();
|
1311
1102
|
}
|
1312
1103
|
|
1313
1104
|
void XdsClient::ChannelState::AdsCallState::OnStatusReceived(
|
1314
|
-
|
1315
|
-
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
|
1105
|
+
absl::Status status) {
|
1316
1106
|
{
|
1317
|
-
MutexLock lock(&
|
1318
|
-
|
1319
|
-
|
1320
|
-
|
1321
|
-
|
1322
|
-
|
1323
|
-
|
1324
|
-
|
1325
|
-
|
1326
|
-
|
1327
|
-
|
1328
|
-
|
1329
|
-
|
1330
|
-
|
1331
|
-
|
1332
|
-
|
1333
|
-
call_, status_code_, status_details,
|
1334
|
-
grpc_error_std_string(error).c_str());
|
1335
|
-
gpr_free(status_details);
|
1336
|
-
}
|
1337
|
-
// Ignore status from a stale call.
|
1338
|
-
if (IsCurrentCallOnChannel()) {
|
1339
|
-
// Try to restart the call.
|
1340
|
-
parent_->OnCallFinishedLocked();
|
1341
|
-
// Send error to all watchers.
|
1342
|
-
xds_client()->NotifyOnErrorLocked(absl::UnavailableError(absl::StrFormat(
|
1343
|
-
"xDS call failed: xDS server: %s, ADS call status code=%d, "
|
1344
|
-
"details='%s', error='%s'",
|
1345
|
-
chand()->server_.server_uri, status_code_,
|
1346
|
-
StringViewFromSlice(status_details_), grpc_error_std_string(error))));
|
1107
|
+
MutexLock lock(&xds_client()->mu_);
|
1108
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1109
|
+
gpr_log(GPR_INFO,
|
1110
|
+
"[xds_client %p] xds server %s: ADS call status received "
|
1111
|
+
"(chand=%p, ads_calld=%p, call=%p): %s",
|
1112
|
+
xds_client(), chand()->server_.server_uri().c_str(), chand(),
|
1113
|
+
this, call_.get(), status.ToString().c_str());
|
1114
|
+
}
|
1115
|
+
// Ignore status from a stale call.
|
1116
|
+
if (IsCurrentCallOnChannel()) {
|
1117
|
+
// Try to restart the call.
|
1118
|
+
parent_->OnCallFinishedLocked();
|
1119
|
+
// Send error to all watchers for the channel.
|
1120
|
+
chand()->SetChannelStatusLocked(absl::UnavailableError(absl::StrFormat(
|
1121
|
+
"xDS call failed; status: %s", status.ToString().c_str())));
|
1122
|
+
}
|
1347
1123
|
}
|
1348
|
-
|
1124
|
+
xds_client()->work_serializer_.DrainQueue();
|
1349
1125
|
}
|
1350
1126
|
|
1351
1127
|
bool XdsClient::ChannelState::AdsCallState::IsCurrentCallOnChannel() const {
|
1352
|
-
// If the retryable ADS call is null (which only happens when the xds
|
1353
|
-
// is shutting down), all the ADS calls are stale.
|
1128
|
+
// If the retryable ADS call is null (which only happens when the xds
|
1129
|
+
// channel is shutting down), all the ADS calls are stale.
|
1354
1130
|
if (chand()->ads_calld_ == nullptr) return false;
|
1355
1131
|
return this == chand()->ads_calld_->calld();
|
1356
1132
|
}
|
@@ -1380,38 +1156,30 @@ XdsClient::ChannelState::AdsCallState::ResourceNamesForRequest(
|
|
1380
1156
|
//
|
1381
1157
|
|
1382
1158
|
void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() {
|
1383
|
-
if (
|
1384
|
-
|
1159
|
+
if (timer_handle_.has_value() &&
|
1160
|
+
GetDefaultEventEngine()->Cancel(*timer_handle_)) {
|
1161
|
+
timer_handle_.reset();
|
1162
|
+
Unref(DEBUG_LOCATION, "Orphan");
|
1385
1163
|
}
|
1386
1164
|
}
|
1387
1165
|
|
1388
1166
|
void XdsClient::ChannelState::LrsCallState::Reporter::
|
1389
1167
|
ScheduleNextReportLocked() {
|
1390
|
-
|
1391
|
-
|
1392
|
-
|
1393
|
-
|
1394
|
-
|
1395
|
-
|
1396
|
-
|
1397
|
-
void* arg, grpc_error_handle error) {
|
1398
|
-
Reporter* self = static_cast<Reporter*>(arg);
|
1399
|
-
bool done;
|
1400
|
-
{
|
1401
|
-
MutexLock lock(&self->xds_client()->mu_);
|
1402
|
-
done = self->OnNextReportTimerLocked(GRPC_ERROR_REF(error));
|
1403
|
-
}
|
1404
|
-
if (done) self->Unref(DEBUG_LOCATION, "Reporter+timer");
|
1168
|
+
timer_handle_ = GetDefaultEventEngine()->RunAfter(report_interval_, [this]() {
|
1169
|
+
ApplicationCallbackExecCtx callback_exec_ctx;
|
1170
|
+
ExecCtx exec_ctx;
|
1171
|
+
if (OnNextReportTimer()) {
|
1172
|
+
Unref(DEBUG_LOCATION, "OnNextReportTimer()");
|
1173
|
+
}
|
1174
|
+
});
|
1405
1175
|
}
|
1406
1176
|
|
1407
|
-
bool XdsClient::ChannelState::LrsCallState::Reporter::
|
1408
|
-
|
1409
|
-
|
1410
|
-
if (!
|
1411
|
-
|
1412
|
-
|
1413
|
-
}
|
1414
|
-
return SendReportLocked();
|
1177
|
+
bool XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimer() {
|
1178
|
+
MutexLock lock(&xds_client()->mu_);
|
1179
|
+
timer_handle_.reset();
|
1180
|
+
if (!IsCurrentReporterOnCall()) return true;
|
1181
|
+
SendReportLocked();
|
1182
|
+
return false;
|
1415
1183
|
}
|
1416
1184
|
|
1417
1185
|
namespace {
|
@@ -1442,7 +1210,7 @@ bool XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
|
|
1442
1210
|
last_report_counters_were_zero_ = LoadReportCountersAreZero(snapshot);
|
1443
1211
|
if (old_val && last_report_counters_were_zero_) {
|
1444
1212
|
auto it = xds_client()->xds_load_report_server_map_.find(
|
1445
|
-
parent_->chand()->server_);
|
1213
|
+
&parent_->chand()->server_);
|
1446
1214
|
if (it == xds_client()->xds_load_report_server_map_.end() ||
|
1447
1215
|
it->second.load_report_map.empty()) {
|
1448
1216
|
it->second.channel_state->StopLrsCallLocked();
|
@@ -1451,65 +1219,34 @@ bool XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
|
|
1451
1219
|
ScheduleNextReportLocked();
|
1452
1220
|
return false;
|
1453
1221
|
}
|
1454
|
-
//
|
1455
|
-
|
1222
|
+
// Send a request that contains the snapshot.
|
1223
|
+
std::string serialized_payload =
|
1456
1224
|
xds_client()->api_.CreateLrsRequest(std::move(snapshot));
|
1457
|
-
parent_->
|
1458
|
-
|
1459
|
-
grpc_slice_unref_internal(request_payload_slice);
|
1460
|
-
// Send the report.
|
1461
|
-
grpc_op op;
|
1462
|
-
memset(&op, 0, sizeof(op));
|
1463
|
-
op.op = GRPC_OP_SEND_MESSAGE;
|
1464
|
-
op.data.send_message.send_message = parent_->send_message_payload_;
|
1465
|
-
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1466
|
-
parent_->call_, &op, 1, &on_report_done_);
|
1467
|
-
if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
|
1468
|
-
gpr_log(GPR_ERROR,
|
1469
|
-
"[xds_client %p] xds server %s: error starting LRS send_message "
|
1470
|
-
"batch on calld=%p: call_error=%d",
|
1471
|
-
xds_client(), parent_->chand()->server_.server_uri.c_str(), this,
|
1472
|
-
call_error);
|
1473
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1474
|
-
}
|
1225
|
+
parent_->call_->SendMessage(std::move(serialized_payload));
|
1226
|
+
parent_->send_message_pending_ = true;
|
1475
1227
|
return false;
|
1476
1228
|
}
|
1477
1229
|
|
1478
|
-
void XdsClient::ChannelState::LrsCallState::Reporter::
|
1479
|
-
|
1480
|
-
|
1481
|
-
|
1482
|
-
|
1483
|
-
|
1484
|
-
|
1485
|
-
|
1486
|
-
if (done) self->Unref(DEBUG_LOCATION, "Reporter+report_done");
|
1487
|
-
}
|
1488
|
-
|
1489
|
-
bool XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked(
|
1490
|
-
grpc_error_handle error) {
|
1491
|
-
grpc_byte_buffer_destroy(parent_->send_message_payload_);
|
1492
|
-
parent_->send_message_payload_ = nullptr;
|
1230
|
+
void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked() {
|
1231
|
+
// If a reporter starts a send_message op, then the reporting interval
|
1232
|
+
// changes and we destroy that reporter and create a new one, and then
|
1233
|
+
// the send_message op started by the old reporter finishes, this
|
1234
|
+
// method will be called even though it was for a completion started
|
1235
|
+
// by the old reporter. In that case, the timer will be pending, so
|
1236
|
+
// we just ignore the completion and wait for the timer to fire.
|
1237
|
+
if (timer_handle_.has_value()) return;
|
1493
1238
|
// If there are no more registered stats to report, cancel the call.
|
1494
|
-
auto it =
|
1495
|
-
|
1496
|
-
if (it == xds_client()->xds_load_report_server_map_.end()
|
1497
|
-
|
1498
|
-
it->second.channel_state
|
1499
|
-
|
1500
|
-
return true;
|
1501
|
-
}
|
1502
|
-
if (!GRPC_ERROR_IS_NONE(error) || !IsCurrentReporterOnCall()) {
|
1503
|
-
GRPC_ERROR_UNREF(error);
|
1504
|
-
// If this reporter is no longer the current one on the call, the reason
|
1505
|
-
// might be that it was orphaned for a new one due to config update.
|
1506
|
-
if (!IsCurrentReporterOnCall()) {
|
1507
|
-
parent_->MaybeStartReportingLocked();
|
1239
|
+
auto it = xds_client()->xds_load_report_server_map_.find(
|
1240
|
+
&parent_->chand()->server_);
|
1241
|
+
if (it == xds_client()->xds_load_report_server_map_.end()) return;
|
1242
|
+
if (it->second.load_report_map.empty()) {
|
1243
|
+
if (it->second.channel_state != nullptr) {
|
1244
|
+
it->second.channel_state->StopLrsCallLocked();
|
1508
1245
|
}
|
1509
|
-
return
|
1246
|
+
return;
|
1510
1247
|
}
|
1248
|
+
// Otherwise, schedule the next load report.
|
1511
1249
|
ScheduleNextReportLocked();
|
1512
|
-
return false;
|
1513
1250
|
}
|
1514
1251
|
|
1515
1252
|
//
|
@@ -1527,124 +1264,46 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
|
|
1527
1264
|
// activity in xds_client()->interested_parties_, which is comprised of
|
1528
1265
|
// the polling entities from client_channel.
|
1529
1266
|
GPR_ASSERT(xds_client() != nullptr);
|
1530
|
-
const char* method =
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
|
1536
|
-
|
1537
|
-
|
1538
|
-
|
1267
|
+
const char* method = chand()->server_.ShouldUseV3()
|
1268
|
+
? "/envoy.service.load_stats.v3."
|
1269
|
+
"LoadReportingService/StreamLoadStats"
|
1270
|
+
: "/envoy.service.load_stats.v2."
|
1271
|
+
"LoadReportingService/StreamLoadStats";
|
1272
|
+
call_ = chand()->transport_->CreateStreamingCall(
|
1273
|
+
method, absl::make_unique<StreamEventHandler>(
|
1274
|
+
// Passing the initial ref here. This ref will go away when
|
1275
|
+
// the StreamEventHandler is destroyed.
|
1276
|
+
RefCountedPtr<LrsCallState>(this)));
|
1539
1277
|
GPR_ASSERT(call_ != nullptr);
|
1540
|
-
// Init the request payload.
|
1541
|
-
grpc_slice request_payload_slice =
|
1542
|
-
xds_client()->api_.CreateLrsInitialRequest(chand()->server_);
|
1543
|
-
send_message_payload_ =
|
1544
|
-
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1545
|
-
grpc_slice_unref_internal(request_payload_slice);
|
1546
|
-
// Init other data associated with the LRS call.
|
1547
|
-
grpc_metadata_array_init(&initial_metadata_recv_);
|
1548
|
-
grpc_metadata_array_init(&trailing_metadata_recv_);
|
1549
1278
|
// Start the call.
|
1550
1279
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1551
|
-
gpr_log(
|
1552
|
-
|
1553
|
-
|
1554
|
-
|
1555
|
-
|
1556
|
-
|
1557
|
-
|
1558
|
-
|
1559
|
-
|
1560
|
-
|
1561
|
-
|
1562
|
-
op->op = GRPC_OP_SEND_INITIAL_METADATA;
|
1563
|
-
op->data.send_initial_metadata.count = 0;
|
1564
|
-
op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY |
|
1565
|
-
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
1566
|
-
op->reserved = nullptr;
|
1567
|
-
op++;
|
1568
|
-
// Op: send request message.
|
1569
|
-
GPR_ASSERT(send_message_payload_ != nullptr);
|
1570
|
-
op->op = GRPC_OP_SEND_MESSAGE;
|
1571
|
-
op->data.send_message.send_message = send_message_payload_;
|
1572
|
-
op->flags = 0;
|
1573
|
-
op->reserved = nullptr;
|
1574
|
-
op++;
|
1575
|
-
Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release();
|
1576
|
-
GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSent, this,
|
1577
|
-
grpc_schedule_on_exec_ctx);
|
1578
|
-
call_error = grpc_call_start_batch_and_execute(
|
1579
|
-
call_, ops, static_cast<size_t>(op - ops), &on_initial_request_sent_);
|
1580
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1581
|
-
// Op: recv initial metadata.
|
1582
|
-
op = ops;
|
1583
|
-
op->op = GRPC_OP_RECV_INITIAL_METADATA;
|
1584
|
-
op->data.recv_initial_metadata.recv_initial_metadata =
|
1585
|
-
&initial_metadata_recv_;
|
1586
|
-
op->flags = 0;
|
1587
|
-
op->reserved = nullptr;
|
1588
|
-
op++;
|
1589
|
-
// Op: recv response.
|
1590
|
-
op->op = GRPC_OP_RECV_MESSAGE;
|
1591
|
-
op->data.recv_message.recv_message = &recv_message_payload_;
|
1592
|
-
op->flags = 0;
|
1593
|
-
op->reserved = nullptr;
|
1594
|
-
op++;
|
1595
|
-
Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release();
|
1596
|
-
GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this,
|
1597
|
-
grpc_schedule_on_exec_ctx);
|
1598
|
-
call_error = grpc_call_start_batch_and_execute(
|
1599
|
-
call_, ops, static_cast<size_t>(op - ops), &on_response_received_);
|
1600
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1601
|
-
// Op: recv server status.
|
1602
|
-
op = ops;
|
1603
|
-
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
|
1604
|
-
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
|
1605
|
-
op->data.recv_status_on_client.status = &status_code_;
|
1606
|
-
op->data.recv_status_on_client.status_details = &status_details_;
|
1607
|
-
op->flags = 0;
|
1608
|
-
op->reserved = nullptr;
|
1609
|
-
op++;
|
1610
|
-
// This callback signals the end of the call, so it relies on the initial
|
1611
|
-
// ref instead of a new ref. When it's invoked, it's the initial ref that is
|
1612
|
-
// unreffed.
|
1613
|
-
GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this,
|
1614
|
-
grpc_schedule_on_exec_ctx);
|
1615
|
-
call_error = grpc_call_start_batch_and_execute(
|
1616
|
-
call_, ops, static_cast<size_t>(op - ops), &on_status_received_);
|
1617
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1618
|
-
}
|
1619
|
-
|
1620
|
-
XdsClient::ChannelState::LrsCallState::~LrsCallState() {
|
1621
|
-
grpc_metadata_array_destroy(&initial_metadata_recv_);
|
1622
|
-
grpc_metadata_array_destroy(&trailing_metadata_recv_);
|
1623
|
-
grpc_byte_buffer_destroy(send_message_payload_);
|
1624
|
-
grpc_byte_buffer_destroy(recv_message_payload_);
|
1625
|
-
grpc_slice_unref_internal(status_details_);
|
1626
|
-
GPR_ASSERT(call_ != nullptr);
|
1627
|
-
grpc_call_unref(call_);
|
1280
|
+
gpr_log(GPR_INFO,
|
1281
|
+
"[xds_client %p] xds server %s: starting LRS call (calld=%p, "
|
1282
|
+
"call=%p)",
|
1283
|
+
xds_client(), chand()->server_.server_uri().c_str(), this,
|
1284
|
+
call_.get());
|
1285
|
+
}
|
1286
|
+
// Send the initial request.
|
1287
|
+
std::string serialized_payload =
|
1288
|
+
xds_client()->api_.CreateLrsInitialRequest(chand()->server_);
|
1289
|
+
call_->SendMessage(std::move(serialized_payload));
|
1290
|
+
send_message_pending_ = true;
|
1628
1291
|
}
|
1629
1292
|
|
1630
1293
|
void XdsClient::ChannelState::LrsCallState::Orphan() {
|
1631
1294
|
reporter_.reset();
|
1632
|
-
|
1633
|
-
//
|
1634
|
-
//
|
1635
|
-
|
1636
|
-
// following cancellation will be a no-op.
|
1637
|
-
grpc_call_cancel_internal(call_);
|
1638
|
-
// Note that the initial ref is hold by on_status_received_. So the
|
1639
|
-
// corresponding unref happens in on_status_received_ instead of here.
|
1295
|
+
// Note that the initial ref is held by the StreamEventHandler, which
|
1296
|
+
// will be destroyed when call_ is destroyed, which may not happen
|
1297
|
+
// here, since there may be other refs held to call_ by internal callbacks.
|
1298
|
+
call_.reset();
|
1640
1299
|
}
|
1641
1300
|
|
1642
1301
|
void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
|
1643
1302
|
// Don't start again if already started.
|
1644
1303
|
if (reporter_ != nullptr) return;
|
1645
|
-
// Don't start if the previous send_message op (of the initial request or
|
1646
|
-
// last report of the previous reporter) hasn't completed.
|
1647
|
-
if (
|
1304
|
+
// Don't start if the previous send_message op (of the initial request or
|
1305
|
+
// the last report of the previous reporter) hasn't completed.
|
1306
|
+
if (call_ != nullptr && send_message_pending_) return;
|
1648
1307
|
// Don't start if no LRS response has arrived.
|
1649
1308
|
if (!seen_response()) return;
|
1650
1309
|
// Don't start if the ADS call hasn't received any valid response. Note that
|
@@ -1660,166 +1319,105 @@ void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
|
|
1660
1319
|
Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_);
|
1661
1320
|
}
|
1662
1321
|
|
1663
|
-
void XdsClient::ChannelState::LrsCallState::
|
1664
|
-
|
1665
|
-
|
1666
|
-
{
|
1667
|
-
|
1668
|
-
|
1322
|
+
void XdsClient::ChannelState::LrsCallState::OnRequestSent(bool /*ok*/) {
|
1323
|
+
MutexLock lock(&xds_client()->mu_);
|
1324
|
+
send_message_pending_ = false;
|
1325
|
+
if (reporter_ != nullptr) {
|
1326
|
+
reporter_->OnReportDoneLocked();
|
1327
|
+
} else {
|
1328
|
+
MaybeStartReportingLocked();
|
1669
1329
|
}
|
1670
|
-
lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked");
|
1671
|
-
}
|
1672
|
-
|
1673
|
-
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked() {
|
1674
|
-
// Clear the send_message_payload_.
|
1675
|
-
grpc_byte_buffer_destroy(send_message_payload_);
|
1676
|
-
send_message_payload_ = nullptr;
|
1677
|
-
MaybeStartReportingLocked();
|
1678
1330
|
}
|
1679
1331
|
|
1680
|
-
void XdsClient::ChannelState::LrsCallState::
|
1681
|
-
|
1682
|
-
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1703
|
-
|
1704
|
-
|
1705
|
-
|
1706
|
-
|
1707
|
-
|
1708
|
-
|
1709
|
-
|
1710
|
-
|
1711
|
-
|
1712
|
-
|
1713
|
-
gpr_log(GPR_ERROR,
|
1714
|
-
"[xds_client %p] xds server %s: LRS response parsing failed: %s",
|
1715
|
-
xds_client(), chand()->server_.server_uri.c_str(),
|
1716
|
-
grpc_error_std_string(parse_error).c_str());
|
1717
|
-
GRPC_ERROR_UNREF(parse_error);
|
1718
|
-
return;
|
1332
|
+
void XdsClient::ChannelState::LrsCallState::OnRecvMessage(
|
1333
|
+
absl::string_view payload) {
|
1334
|
+
MutexLock lock(&xds_client()->mu_);
|
1335
|
+
// If we're no longer the current call, ignore the result.
|
1336
|
+
if (!IsCurrentCallOnChannel()) return;
|
1337
|
+
// Parse the response.
|
1338
|
+
bool send_all_clusters = false;
|
1339
|
+
std::set<std::string> new_cluster_names;
|
1340
|
+
Duration new_load_reporting_interval;
|
1341
|
+
absl::Status status = xds_client()->api_.ParseLrsResponse(
|
1342
|
+
payload, &send_all_clusters, &new_cluster_names,
|
1343
|
+
&new_load_reporting_interval);
|
1344
|
+
if (!status.ok()) {
|
1345
|
+
gpr_log(GPR_ERROR,
|
1346
|
+
"[xds_client %p] xds server %s: LRS response parsing failed: %s",
|
1347
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1348
|
+
status.ToString().c_str());
|
1349
|
+
return;
|
1350
|
+
}
|
1351
|
+
seen_response_ = true;
|
1352
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1353
|
+
gpr_log(
|
1354
|
+
GPR_INFO,
|
1355
|
+
"[xds_client %p] xds server %s: LRS response received, %" PRIuPTR
|
1356
|
+
" cluster names, send_all_clusters=%d, load_report_interval=%" PRId64
|
1357
|
+
"ms",
|
1358
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1359
|
+
new_cluster_names.size(), send_all_clusters,
|
1360
|
+
new_load_reporting_interval.millis());
|
1361
|
+
size_t i = 0;
|
1362
|
+
for (const auto& name : new_cluster_names) {
|
1363
|
+
gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s",
|
1364
|
+
xds_client(), i++, name.c_str());
|
1719
1365
|
}
|
1720
|
-
|
1366
|
+
}
|
1367
|
+
if (new_load_reporting_interval <
|
1368
|
+
Duration::Milliseconds(GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS)) {
|
1369
|
+
new_load_reporting_interval =
|
1370
|
+
Duration::Milliseconds(GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
|
1721
1371
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1722
|
-
gpr_log(
|
1723
|
-
|
1724
|
-
|
1725
|
-
|
1726
|
-
|
1727
|
-
xds_client(), chand()->server_.server_uri.c_str(),
|
1728
|
-
new_cluster_names.size(), send_all_clusters,
|
1729
|
-
new_load_reporting_interval.millis());
|
1730
|
-
size_t i = 0;
|
1731
|
-
for (const auto& name : new_cluster_names) {
|
1732
|
-
gpr_log(GPR_INFO, "[xds_client %p] cluster_name %" PRIuPTR ": %s",
|
1733
|
-
xds_client(), i++, name.c_str());
|
1734
|
-
}
|
1735
|
-
}
|
1736
|
-
if (new_load_reporting_interval <
|
1737
|
-
Duration::Milliseconds(
|
1738
|
-
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS)) {
|
1739
|
-
new_load_reporting_interval = Duration::Milliseconds(
|
1740
|
-
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
|
1741
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1742
|
-
gpr_log(GPR_INFO,
|
1743
|
-
"[xds_client %p] xds server %s: increased load_report_interval "
|
1744
|
-
"to minimum value %dms",
|
1745
|
-
xds_client(), chand()->server_.server_uri.c_str(),
|
1746
|
-
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
|
1747
|
-
}
|
1372
|
+
gpr_log(GPR_INFO,
|
1373
|
+
"[xds_client %p] xds server %s: increased load_report_interval "
|
1374
|
+
"to minimum value %dms",
|
1375
|
+
xds_client(), chand()->server_.server_uri().c_str(),
|
1376
|
+
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
|
1748
1377
|
}
|
1749
|
-
|
1750
|
-
|
1751
|
-
|
1752
|
-
|
1753
|
-
|
1754
|
-
|
1755
|
-
|
1756
|
-
|
1757
|
-
|
1758
|
-
|
1759
|
-
}
|
1760
|
-
return;
|
1378
|
+
}
|
1379
|
+
// Ignore identical update.
|
1380
|
+
if (send_all_clusters == send_all_clusters_ &&
|
1381
|
+
cluster_names_ == new_cluster_names &&
|
1382
|
+
load_reporting_interval_ == new_load_reporting_interval) {
|
1383
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1384
|
+
gpr_log(GPR_INFO,
|
1385
|
+
"[xds_client %p] xds server %s: incoming LRS response identical "
|
1386
|
+
"to current, ignoring.",
|
1387
|
+
xds_client(), chand()->server_.server_uri().c_str());
|
1761
1388
|
}
|
1762
|
-
|
1763
|
-
reporter_.reset();
|
1764
|
-
// Record the new config.
|
1765
|
-
send_all_clusters_ = send_all_clusters;
|
1766
|
-
cluster_names_ = std::move(new_cluster_names);
|
1767
|
-
load_reporting_interval_ = new_load_reporting_interval;
|
1768
|
-
// Try starting sending load report.
|
1769
|
-
MaybeStartReportingLocked();
|
1770
|
-
}();
|
1771
|
-
grpc_slice_unref_internal(response_slice);
|
1772
|
-
if (xds_client()->shutting_down_) return true;
|
1773
|
-
// Keep listening for LRS config updates.
|
1774
|
-
grpc_op op;
|
1775
|
-
memset(&op, 0, sizeof(op));
|
1776
|
-
op.op = GRPC_OP_RECV_MESSAGE;
|
1777
|
-
op.data.recv_message.recv_message = &recv_message_payload_;
|
1778
|
-
op.flags = 0;
|
1779
|
-
op.reserved = nullptr;
|
1780
|
-
GPR_ASSERT(call_ != nullptr);
|
1781
|
-
// Reuse the "OnResponseReceivedLocked" ref taken in ctor.
|
1782
|
-
const grpc_call_error call_error =
|
1783
|
-
grpc_call_start_batch_and_execute(call_, &op, 1, &on_response_received_);
|
1784
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1785
|
-
return false;
|
1786
|
-
}
|
1787
|
-
|
1788
|
-
void XdsClient::ChannelState::LrsCallState::OnStatusReceived(
|
1789
|
-
void* arg, grpc_error_handle error) {
|
1790
|
-
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
|
1791
|
-
{
|
1792
|
-
MutexLock lock(&lrs_calld->xds_client()->mu_);
|
1793
|
-
lrs_calld->OnStatusReceivedLocked(GRPC_ERROR_REF(error));
|
1389
|
+
return;
|
1794
1390
|
}
|
1795
|
-
|
1391
|
+
// Stop current load reporting (if any) to adopt the new config.
|
1392
|
+
reporter_.reset();
|
1393
|
+
// Record the new config.
|
1394
|
+
send_all_clusters_ = send_all_clusters;
|
1395
|
+
cluster_names_ = std::move(new_cluster_names);
|
1396
|
+
load_reporting_interval_ = new_load_reporting_interval;
|
1397
|
+
// Try starting sending load report.
|
1398
|
+
MaybeStartReportingLocked();
|
1796
1399
|
}
|
1797
1400
|
|
1798
|
-
void XdsClient::ChannelState::LrsCallState::
|
1799
|
-
|
1800
|
-
|
1401
|
+
void XdsClient::ChannelState::LrsCallState::OnStatusReceived(
|
1402
|
+
absl::Status status) {
|
1403
|
+
MutexLock lock(&xds_client()->mu_);
|
1801
1404
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1802
|
-
char* status_details = grpc_slice_to_c_string(status_details_);
|
1803
1405
|
gpr_log(GPR_INFO,
|
1804
1406
|
"[xds_client %p] xds server %s: LRS call status received "
|
1805
|
-
"(chand=%p, calld=%p, call=%p): "
|
1806
|
-
|
1807
|
-
|
1808
|
-
call_, status_code_, status_details,
|
1809
|
-
grpc_error_std_string(error).c_str());
|
1810
|
-
gpr_free(status_details);
|
1407
|
+
"(chand=%p, calld=%p, call=%p): %s",
|
1408
|
+
xds_client(), chand()->server_.server_uri().c_str(), chand(), this,
|
1409
|
+
call_.get(), status.ToString().c_str());
|
1811
1410
|
}
|
1812
1411
|
// Ignore status from a stale call.
|
1813
1412
|
if (IsCurrentCallOnChannel()) {
|
1814
1413
|
// Try to restart the call.
|
1815
1414
|
parent_->OnCallFinishedLocked();
|
1816
1415
|
}
|
1817
|
-
GRPC_ERROR_UNREF(error);
|
1818
1416
|
}
|
1819
1417
|
|
1820
1418
|
bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const {
|
1821
|
-
// If the retryable LRS call is null (which only happens when the xds
|
1822
|
-
// is shutting down), all the LRS calls are stale.
|
1419
|
+
// If the retryable LRS call is null (which only happens when the xds
|
1420
|
+
// channel is shutting down), all the LRS calls are stale.
|
1823
1421
|
if (chand()->lrs_calld_ == nullptr) return false;
|
1824
1422
|
return this == chand()->lrs_calld_->calld();
|
1825
1423
|
}
|
@@ -1828,86 +1426,57 @@ bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const {
|
|
1828
1426
|
// XdsClient
|
1829
1427
|
//
|
1830
1428
|
|
1831
|
-
namespace {
|
1832
|
-
|
1833
|
-
Duration GetRequestTimeout(const grpc_channel_args* args) {
|
1834
|
-
return Duration::Milliseconds(grpc_channel_args_find_integer(
|
1835
|
-
args, GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS,
|
1836
|
-
{15000, 0, INT_MAX}));
|
1837
|
-
}
|
1838
|
-
|
1839
|
-
grpc_channel_args* ModifyChannelArgs(const grpc_channel_args* args) {
|
1840
|
-
absl::InlinedVector<grpc_arg, 1> args_to_add = {
|
1841
|
-
grpc_channel_arg_integer_create(
|
1842
|
-
const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS),
|
1843
|
-
5 * 60 * GPR_MS_PER_SEC),
|
1844
|
-
};
|
1845
|
-
return grpc_channel_args_copy_and_add(args, args_to_add.data(),
|
1846
|
-
args_to_add.size());
|
1847
|
-
}
|
1848
|
-
|
1849
|
-
} // namespace
|
1850
|
-
|
1851
1429
|
XdsClient::XdsClient(std::unique_ptr<XdsBootstrap> bootstrap,
|
1852
|
-
|
1430
|
+
OrphanablePtr<XdsTransportFactory> transport_factory,
|
1431
|
+
Duration resource_request_timeout)
|
1853
1432
|
: DualRefCounted<XdsClient>(
|
1854
1433
|
GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_refcount_trace) ? "XdsClient"
|
1855
1434
|
: nullptr),
|
1856
1435
|
bootstrap_(std::move(bootstrap)),
|
1857
|
-
|
1858
|
-
request_timeout_(
|
1436
|
+
transport_factory_(std::move(transport_factory)),
|
1437
|
+
request_timeout_(resource_request_timeout),
|
1859
1438
|
xds_federation_enabled_(XdsFederationEnabled()),
|
1860
|
-
|
1861
|
-
certificate_provider_store_(MakeOrphanable<CertificateProviderStore>(
|
1862
|
-
bootstrap_->certificate_providers())),
|
1863
|
-
api_(this, &grpc_xds_client_trace, bootstrap_->node(),
|
1864
|
-
&bootstrap_->certificate_providers(), &symtab_) {
|
1439
|
+
api_(this, &grpc_xds_client_trace, bootstrap_->node(), &symtab_) {
|
1865
1440
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1866
1441
|
gpr_log(GPR_INFO, "[xds_client %p] creating xds client", this);
|
1867
1442
|
}
|
1868
|
-
|
1869
|
-
// destroyed.
|
1870
|
-
grpc_init();
|
1443
|
+
GPR_ASSERT(bootstrap_ != nullptr);
|
1871
1444
|
}
|
1872
1445
|
|
1873
1446
|
XdsClient::~XdsClient() {
|
1874
1447
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1875
1448
|
gpr_log(GPR_INFO, "[xds_client %p] destroying xds client", this);
|
1876
1449
|
}
|
1877
|
-
grpc_channel_args_destroy(args_);
|
1878
|
-
grpc_pollset_set_destroy(interested_parties_);
|
1879
|
-
// Calling grpc_shutdown to ensure gRPC does not shut down until the XdsClient
|
1880
|
-
// is destroyed.
|
1881
|
-
grpc_shutdown();
|
1882
1450
|
}
|
1883
1451
|
|
1884
1452
|
void XdsClient::Orphan() {
|
1885
1453
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1886
1454
|
gpr_log(GPR_INFO, "[xds_client %p] shutting down xds client", this);
|
1887
1455
|
}
|
1888
|
-
|
1889
|
-
|
1890
|
-
|
1891
|
-
|
1892
|
-
|
1893
|
-
|
1894
|
-
|
1895
|
-
|
1896
|
-
|
1897
|
-
|
1456
|
+
MutexLock lock(&mu_);
|
1457
|
+
shutting_down_ = true;
|
1458
|
+
// Clear cache and any remaining watchers that may not have been cancelled.
|
1459
|
+
authority_state_map_.clear();
|
1460
|
+
invalid_watchers_.clear();
|
1461
|
+
// We may still be sending lingering queued load report data, so don't
|
1462
|
+
// just clear the load reporting map, but we do want to clear the refs
|
1463
|
+
// we're holding to the ChannelState objects, to make sure that
|
1464
|
+
// everything shuts down properly.
|
1465
|
+
for (auto& p : xds_load_report_server_map_) {
|
1466
|
+
p.second.channel_state.reset(DEBUG_LOCATION, "XdsClient::Orphan()");
|
1898
1467
|
}
|
1899
1468
|
}
|
1900
1469
|
|
1901
1470
|
RefCountedPtr<XdsClient::ChannelState> XdsClient::GetOrCreateChannelStateLocked(
|
1902
|
-
const XdsBootstrap::XdsServer& server) {
|
1903
|
-
auto it = xds_server_channel_map_.find(server);
|
1471
|
+
const XdsBootstrap::XdsServer& server, const char* reason) {
|
1472
|
+
auto it = xds_server_channel_map_.find(&server);
|
1904
1473
|
if (it != xds_server_channel_map_.end()) {
|
1905
|
-
return it->second->Ref(DEBUG_LOCATION,
|
1474
|
+
return it->second->Ref(DEBUG_LOCATION, reason);
|
1906
1475
|
}
|
1907
1476
|
// Channel not found, so create a new one.
|
1908
1477
|
auto channel_state = MakeRefCounted<ChannelState>(
|
1909
1478
|
WeakRef(DEBUG_LOCATION, "ChannelState"), server);
|
1910
|
-
xds_server_channel_map_[server] = channel_state.get();
|
1479
|
+
xds_server_channel_map_[&server] = channel_state.get();
|
1911
1480
|
return channel_state;
|
1912
1481
|
}
|
1913
1482
|
|
@@ -1923,17 +1492,16 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1923
1492
|
invalid_watchers_[w] = watcher;
|
1924
1493
|
}
|
1925
1494
|
work_serializer_.Run(
|
1926
|
-
|
1927
|
-
|
1928
|
-
|
1929
|
-
|
1930
|
-
},
|
1495
|
+
[watcher = std::move(watcher), status = std::move(status)]()
|
1496
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1497
|
+
watcher->OnError(status);
|
1498
|
+
},
|
1931
1499
|
DEBUG_LOCATION);
|
1932
1500
|
};
|
1933
1501
|
auto resource_name = ParseXdsResourceName(name, type);
|
1934
1502
|
if (!resource_name.ok()) {
|
1935
|
-
fail(absl::UnavailableError(
|
1936
|
-
"Unable to parse resource name
|
1503
|
+
fail(absl::UnavailableError(
|
1504
|
+
absl::StrCat("Unable to parse resource name ", name)));
|
1937
1505
|
return;
|
1938
1506
|
}
|
1939
1507
|
// Find server to use.
|
@@ -1947,9 +1515,7 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1947
1515
|
"\" not present in bootstrap config")));
|
1948
1516
|
return;
|
1949
1517
|
}
|
1950
|
-
|
1951
|
-
xds_server = &authority->xds_servers[0];
|
1952
|
-
}
|
1518
|
+
xds_server = authority->server();
|
1953
1519
|
}
|
1954
1520
|
if (xds_server == nullptr) xds_server = &bootstrap_->server();
|
1955
1521
|
{
|
@@ -1975,12 +1541,60 @@ void XdsClient::WatchResource(const XdsResourceType* type,
|
|
1975
1541
|
delete value;
|
1976
1542
|
},
|
1977
1543
|
DEBUG_LOCATION);
|
1544
|
+
} else if (resource_state.meta.client_status ==
|
1545
|
+
XdsApi::ResourceMetadata::DOES_NOT_EXIST) {
|
1546
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1547
|
+
gpr_log(GPR_INFO,
|
1548
|
+
"[xds_client %p] reporting cached does-not-exist for %s", this,
|
1549
|
+
std::string(name).c_str());
|
1550
|
+
}
|
1551
|
+
work_serializer_.Schedule(
|
1552
|
+
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1553
|
+
watcher->OnResourceDoesNotExist();
|
1554
|
+
},
|
1555
|
+
DEBUG_LOCATION);
|
1556
|
+
} else if (resource_state.meta.client_status ==
|
1557
|
+
XdsApi::ResourceMetadata::NACKED) {
|
1558
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1559
|
+
gpr_log(
|
1560
|
+
GPR_INFO,
|
1561
|
+
"[xds_client %p] reporting cached validation failure for %s: %s",
|
1562
|
+
this, std::string(name).c_str(),
|
1563
|
+
resource_state.meta.failed_details.c_str());
|
1564
|
+
}
|
1565
|
+
std::string details = resource_state.meta.failed_details;
|
1566
|
+
const auto* node = bootstrap_->node();
|
1567
|
+
if (node != nullptr) {
|
1568
|
+
absl::StrAppend(&details, " (node ID:", bootstrap_->node()->id(), ")");
|
1569
|
+
}
|
1570
|
+
work_serializer_.Schedule(
|
1571
|
+
[watcher, details = std::move(details)]()
|
1572
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1573
|
+
watcher->OnError(absl::UnavailableError(
|
1574
|
+
absl::StrCat("invalid resource: ", details)));
|
1575
|
+
},
|
1576
|
+
DEBUG_LOCATION);
|
1978
1577
|
}
|
1979
1578
|
// If the authority doesn't yet have a channel, set it, creating it if
|
1980
1579
|
// needed.
|
1981
1580
|
if (authority_state.channel_state == nullptr) {
|
1982
1581
|
authority_state.channel_state =
|
1983
|
-
GetOrCreateChannelStateLocked(*xds_server);
|
1582
|
+
GetOrCreateChannelStateLocked(*xds_server, "start watch");
|
1583
|
+
}
|
1584
|
+
absl::Status channel_status = authority_state.channel_state->status();
|
1585
|
+
if (!channel_status.ok()) {
|
1586
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
1587
|
+
gpr_log(GPR_INFO,
|
1588
|
+
"[xds_client %p] returning cached channel error for %s: %s",
|
1589
|
+
this, std::string(name).c_str(),
|
1590
|
+
channel_status.ToString().c_str());
|
1591
|
+
}
|
1592
|
+
work_serializer_.Schedule(
|
1593
|
+
[watcher = std::move(watcher), status = std::move(channel_status)]()
|
1594
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) mutable {
|
1595
|
+
watcher->OnError(std::move(status));
|
1596
|
+
},
|
1597
|
+
DEBUG_LOCATION);
|
1984
1598
|
}
|
1985
1599
|
authority_state.channel_state->SubscribeLocked(type, *resource_name);
|
1986
1600
|
}
|
@@ -2056,7 +1670,8 @@ const XdsResourceType* XdsClient::GetResourceTypeLocked(
|
|
2056
1670
|
absl::StatusOr<XdsClient::XdsResourceName> XdsClient::ParseXdsResourceName(
|
2057
1671
|
absl::string_view name, const XdsResourceType* type) {
|
2058
1672
|
// Old-style names use the empty string for authority.
|
2059
|
-
// authority is prefixed with "old:" to indicate that it's an old-style
|
1673
|
+
// authority is prefixed with "old:" to indicate that it's an old-style
|
1674
|
+
// name.
|
2060
1675
|
if (!xds_federation_enabled_ || !absl::StartsWith(name, "xdstp:")) {
|
2061
1676
|
return XdsResourceName{"old:", {std::string(name), {}}};
|
2062
1677
|
}
|
@@ -2098,40 +1713,45 @@ std::string XdsClient::ConstructFullXdsResourceName(
|
|
2098
1713
|
RefCountedPtr<XdsClusterDropStats> XdsClient::AddClusterDropStats(
|
2099
1714
|
const XdsBootstrap::XdsServer& xds_server, absl::string_view cluster_name,
|
2100
1715
|
absl::string_view eds_service_name) {
|
2101
|
-
|
1716
|
+
const auto* server = bootstrap_->FindXdsServer(xds_server);
|
1717
|
+
if (server == nullptr) return nullptr;
|
2102
1718
|
auto key =
|
2103
1719
|
std::make_pair(std::string(cluster_name), std::string(eds_service_name));
|
2104
|
-
MutexLock lock(&mu_);
|
2105
|
-
// We jump through some hoops here to make sure that the const
|
2106
|
-
// XdsBootstrap::XdsServer& and absl::string_views
|
2107
|
-
// stored in the XdsClusterDropStats object point to the
|
2108
|
-
// XdsBootstrap::XdsServer and strings
|
2109
|
-
// in the load_report_map_ key, so that they have the same lifetime.
|
2110
|
-
auto server_it =
|
2111
|
-
xds_load_report_server_map_.emplace(xds_server, LoadReportServer()).first;
|
2112
|
-
if (server_it->second.channel_state == nullptr) {
|
2113
|
-
server_it->second.channel_state = GetOrCreateChannelStateLocked(xds_server);
|
2114
|
-
}
|
2115
|
-
auto load_report_it = server_it->second.load_report_map
|
2116
|
-
.emplace(std::move(key), LoadReportState())
|
2117
|
-
.first;
|
2118
|
-
LoadReportState& load_report_state = load_report_it->second;
|
2119
1720
|
RefCountedPtr<XdsClusterDropStats> cluster_drop_stats;
|
2120
|
-
|
2121
|
-
|
2122
|
-
|
2123
|
-
|
1721
|
+
{
|
1722
|
+
MutexLock lock(&mu_);
|
1723
|
+
// We jump through some hoops here to make sure that the const
|
1724
|
+
// XdsBootstrap::XdsServer& and absl::string_views
|
1725
|
+
// stored in the XdsClusterDropStats object point to the
|
1726
|
+
// XdsBootstrap::XdsServer and strings
|
1727
|
+
// in the load_report_map_ key, so that they have the same lifetime.
|
1728
|
+
auto server_it =
|
1729
|
+
xds_load_report_server_map_.emplace(server, LoadReportServer()).first;
|
1730
|
+
if (server_it->second.channel_state == nullptr) {
|
1731
|
+
server_it->second.channel_state = GetOrCreateChannelStateLocked(
|
1732
|
+
*server, "load report map (drop stats)");
|
1733
|
+
}
|
1734
|
+
auto load_report_it = server_it->second.load_report_map
|
1735
|
+
.emplace(std::move(key), LoadReportState())
|
1736
|
+
.first;
|
1737
|
+
LoadReportState& load_report_state = load_report_it->second;
|
2124
1738
|
if (load_report_state.drop_stats != nullptr) {
|
2125
|
-
load_report_state.
|
2126
|
-
load_report_state.drop_stats->GetSnapshotAndReset();
|
1739
|
+
cluster_drop_stats = load_report_state.drop_stats->RefIfNonZero();
|
2127
1740
|
}
|
2128
|
-
cluster_drop_stats
|
2129
|
-
|
2130
|
-
|
2131
|
-
|
2132
|
-
|
1741
|
+
if (cluster_drop_stats == nullptr) {
|
1742
|
+
if (load_report_state.drop_stats != nullptr) {
|
1743
|
+
load_report_state.deleted_drop_stats +=
|
1744
|
+
load_report_state.drop_stats->GetSnapshotAndReset();
|
1745
|
+
}
|
1746
|
+
cluster_drop_stats = MakeRefCounted<XdsClusterDropStats>(
|
1747
|
+
Ref(DEBUG_LOCATION, "DropStats"), *server,
|
1748
|
+
load_report_it->first.first /*cluster_name*/,
|
1749
|
+
load_report_it->first.second /*eds_service_name*/);
|
1750
|
+
load_report_state.drop_stats = cluster_drop_stats.get();
|
1751
|
+
}
|
1752
|
+
server_it->second.channel_state->MaybeStartLrsCall();
|
2133
1753
|
}
|
2134
|
-
|
1754
|
+
work_serializer_.DrainQueue();
|
2135
1755
|
return cluster_drop_stats;
|
2136
1756
|
}
|
2137
1757
|
|
@@ -2139,8 +1759,10 @@ void XdsClient::RemoveClusterDropStats(
|
|
2139
1759
|
const XdsBootstrap::XdsServer& xds_server, absl::string_view cluster_name,
|
2140
1760
|
absl::string_view eds_service_name,
|
2141
1761
|
XdsClusterDropStats* cluster_drop_stats) {
|
1762
|
+
const auto* server = bootstrap_->FindXdsServer(xds_server);
|
1763
|
+
if (server == nullptr) return;
|
2142
1764
|
MutexLock lock(&mu_);
|
2143
|
-
auto server_it = xds_load_report_server_map_.find(
|
1765
|
+
auto server_it = xds_load_report_server_map_.find(server);
|
2144
1766
|
if (server_it == xds_load_report_server_map_.end()) return;
|
2145
1767
|
auto load_report_it = server_it->second.load_report_map.find(
|
2146
1768
|
std::make_pair(std::string(cluster_name), std::string(eds_service_name)));
|
@@ -2159,42 +1781,48 @@ RefCountedPtr<XdsClusterLocalityStats> XdsClient::AddClusterLocalityStats(
|
|
2159
1781
|
const XdsBootstrap::XdsServer& xds_server, absl::string_view cluster_name,
|
2160
1782
|
absl::string_view eds_service_name,
|
2161
1783
|
RefCountedPtr<XdsLocalityName> locality) {
|
2162
|
-
|
1784
|
+
const auto* server = bootstrap_->FindXdsServer(xds_server);
|
1785
|
+
if (server == nullptr) return nullptr;
|
2163
1786
|
auto key =
|
2164
1787
|
std::make_pair(std::string(cluster_name), std::string(eds_service_name));
|
2165
|
-
MutexLock lock(&mu_);
|
2166
|
-
// We jump through some hoops here to make sure that the const
|
2167
|
-
// XdsBootstrap::XdsServer& and absl::string_views
|
2168
|
-
// stored in the XdsClusterDropStats object point to the
|
2169
|
-
// XdsBootstrap::XdsServer and strings
|
2170
|
-
// in the load_report_map_ key, so that they have the same lifetime.
|
2171
|
-
auto server_it =
|
2172
|
-
xds_load_report_server_map_.emplace(xds_server, LoadReportServer()).first;
|
2173
|
-
if (server_it->second.channel_state == nullptr) {
|
2174
|
-
server_it->second.channel_state = GetOrCreateChannelStateLocked(xds_server);
|
2175
|
-
}
|
2176
|
-
auto load_report_it = server_it->second.load_report_map
|
2177
|
-
.emplace(std::move(key), LoadReportState())
|
2178
|
-
.first;
|
2179
|
-
LoadReportState& load_report_state = load_report_it->second;
|
2180
|
-
LoadReportState::LocalityState& locality_state =
|
2181
|
-
load_report_state.locality_stats[locality];
|
2182
1788
|
RefCountedPtr<XdsClusterLocalityStats> cluster_locality_stats;
|
2183
|
-
|
2184
|
-
|
2185
|
-
|
2186
|
-
|
1789
|
+
{
|
1790
|
+
MutexLock lock(&mu_);
|
1791
|
+
// We jump through some hoops here to make sure that the const
|
1792
|
+
// XdsBootstrap::XdsServer& and absl::string_views
|
1793
|
+
// stored in the XdsClusterDropStats object point to the
|
1794
|
+
// XdsBootstrap::XdsServer and strings
|
1795
|
+
// in the load_report_map_ key, so that they have the same lifetime.
|
1796
|
+
auto server_it =
|
1797
|
+
xds_load_report_server_map_.emplace(server, LoadReportServer()).first;
|
1798
|
+
if (server_it->second.channel_state == nullptr) {
|
1799
|
+
server_it->second.channel_state = GetOrCreateChannelStateLocked(
|
1800
|
+
*server, "load report map (locality stats)");
|
1801
|
+
}
|
1802
|
+
auto load_report_it = server_it->second.load_report_map
|
1803
|
+
.emplace(std::move(key), LoadReportState())
|
1804
|
+
.first;
|
1805
|
+
LoadReportState& load_report_state = load_report_it->second;
|
1806
|
+
LoadReportState::LocalityState& locality_state =
|
1807
|
+
load_report_state.locality_stats[locality];
|
2187
1808
|
if (locality_state.locality_stats != nullptr) {
|
2188
|
-
locality_state.
|
2189
|
-
locality_state.locality_stats->GetSnapshotAndReset();
|
1809
|
+
cluster_locality_stats = locality_state.locality_stats->RefIfNonZero();
|
2190
1810
|
}
|
2191
|
-
cluster_locality_stats
|
2192
|
-
|
2193
|
-
|
2194
|
-
|
2195
|
-
|
1811
|
+
if (cluster_locality_stats == nullptr) {
|
1812
|
+
if (locality_state.locality_stats != nullptr) {
|
1813
|
+
locality_state.deleted_locality_stats +=
|
1814
|
+
locality_state.locality_stats->GetSnapshotAndReset();
|
1815
|
+
}
|
1816
|
+
cluster_locality_stats = MakeRefCounted<XdsClusterLocalityStats>(
|
1817
|
+
Ref(DEBUG_LOCATION, "LocalityStats"), *server,
|
1818
|
+
load_report_it->first.first /*cluster_name*/,
|
1819
|
+
load_report_it->first.second /*eds_service_name*/,
|
1820
|
+
std::move(locality));
|
1821
|
+
locality_state.locality_stats = cluster_locality_stats.get();
|
1822
|
+
}
|
1823
|
+
server_it->second.channel_state->MaybeStartLrsCall();
|
2196
1824
|
}
|
2197
|
-
|
1825
|
+
work_serializer_.DrainQueue();
|
2198
1826
|
return cluster_locality_stats;
|
2199
1827
|
}
|
2200
1828
|
|
@@ -2203,8 +1831,10 @@ void XdsClient::RemoveClusterLocalityStats(
|
|
2203
1831
|
absl::string_view eds_service_name,
|
2204
1832
|
const RefCountedPtr<XdsLocalityName>& locality,
|
2205
1833
|
XdsClusterLocalityStats* cluster_locality_stats) {
|
1834
|
+
const auto* server = bootstrap_->FindXdsServer(xds_server);
|
1835
|
+
if (server == nullptr) return;
|
2206
1836
|
MutexLock lock(&mu_);
|
2207
|
-
auto server_it = xds_load_report_server_map_.find(
|
1837
|
+
auto server_it = xds_load_report_server_map_.find(server);
|
2208
1838
|
if (server_it == xds_load_report_server_map_.end()) return;
|
2209
1839
|
auto load_report_it = server_it->second.load_report_map.find(
|
2210
1840
|
std::make_pair(std::string(cluster_name), std::string(eds_service_name)));
|
@@ -2225,36 +1855,8 @@ void XdsClient::RemoveClusterLocalityStats(
|
|
2225
1855
|
void XdsClient::ResetBackoff() {
|
2226
1856
|
MutexLock lock(&mu_);
|
2227
1857
|
for (auto& p : xds_server_channel_map_) {
|
2228
|
-
|
2229
|
-
}
|
2230
|
-
}
|
2231
|
-
|
2232
|
-
void XdsClient::NotifyOnErrorLocked(absl::Status status) {
|
2233
|
-
const auto* node = bootstrap_->node();
|
2234
|
-
if (node != nullptr) {
|
2235
|
-
status = absl::Status(
|
2236
|
-
status.code(), absl::StrCat(status.message(),
|
2237
|
-
" (node ID:", bootstrap_->node()->id, ")"));
|
2238
|
-
}
|
2239
|
-
std::set<RefCountedPtr<ResourceWatcherInterface>> watchers;
|
2240
|
-
for (const auto& a : authority_state_map_) { // authority
|
2241
|
-
for (const auto& t : a.second.resource_map) { // type
|
2242
|
-
for (const auto& r : t.second) { // resource id
|
2243
|
-
for (const auto& w : r.second.watchers) { // watchers
|
2244
|
-
watchers.insert(w.second);
|
2245
|
-
}
|
2246
|
-
}
|
2247
|
-
}
|
1858
|
+
p.second->ResetBackoff();
|
2248
1859
|
}
|
2249
|
-
work_serializer_.Schedule(
|
2250
|
-
// TODO(yashykt): When we move to C++14, capture watchers using
|
2251
|
-
// std::move()
|
2252
|
-
[watchers, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(work_serializer_) {
|
2253
|
-
for (const auto& watcher : watchers) {
|
2254
|
-
watcher->OnError(status);
|
2255
|
-
}
|
2256
|
-
},
|
2257
|
-
DEBUG_LOCATION);
|
2258
1860
|
}
|
2259
1861
|
|
2260
1862
|
void XdsClient::NotifyWatchersOnErrorLocked(
|
@@ -2264,15 +1866,16 @@ void XdsClient::NotifyWatchersOnErrorLocked(
|
|
2264
1866
|
const auto* node = bootstrap_->node();
|
2265
1867
|
if (node != nullptr) {
|
2266
1868
|
status = absl::Status(
|
2267
|
-
status.code(),
|
2268
|
-
|
1869
|
+
status.code(),
|
1870
|
+
absl::StrCat(status.message(), " (node ID:", node->id(), ")"));
|
2269
1871
|
}
|
2270
1872
|
work_serializer_.Schedule(
|
2271
|
-
[watchers, status
|
2272
|
-
|
2273
|
-
|
2274
|
-
|
2275
|
-
|
1873
|
+
[watchers, status = std::move(status)]()
|
1874
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
|
1875
|
+
for (const auto& p : watchers) {
|
1876
|
+
p.first->OnError(status);
|
1877
|
+
}
|
1878
|
+
},
|
2276
1879
|
DEBUG_LOCATION);
|
2277
1880
|
}
|
2278
1881
|
|
@@ -2295,7 +1898,7 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
|
|
2295
1898
|
gpr_log(GPR_INFO, "[xds_client %p] start building load report", this);
|
2296
1899
|
}
|
2297
1900
|
XdsApi::ClusterLoadReportMap snapshot_map;
|
2298
|
-
auto server_it = xds_load_report_server_map_.find(xds_server);
|
1901
|
+
auto server_it = xds_load_report_server_map_.find(&xds_server);
|
2299
1902
|
if (server_it == xds_load_report_server_map_.end()) return snapshot_map;
|
2300
1903
|
auto& load_report_map = server_it->second.load_report_map;
|
2301
1904
|
for (auto load_report_it = load_report_map.begin();
|
@@ -2354,7 +1957,7 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
|
|
2354
1957
|
}
|
2355
1958
|
}
|
2356
1959
|
// Compute load report interval.
|
2357
|
-
const Timestamp now =
|
1960
|
+
const Timestamp now = Timestamp::Now();
|
2358
1961
|
snapshot.load_report_interval = now - load_report.last_report_time;
|
2359
1962
|
load_report.last_report_time = now;
|
2360
1963
|
// Record snapshot.
|
@@ -2394,192 +1997,4 @@ std::string XdsClient::DumpClientConfigBinary() {
|
|
2394
1997
|
return api_.AssembleClientConfig(resource_type_metadata_map);
|
2395
1998
|
}
|
2396
1999
|
|
2397
|
-
//
|
2398
|
-
// accessors for global state
|
2399
|
-
//
|
2400
|
-
|
2401
|
-
void XdsClientGlobalInit() {
|
2402
|
-
g_mu = new Mutex;
|
2403
|
-
XdsHttpFilterRegistry::Init();
|
2404
|
-
XdsClusterSpecifierPluginRegistry::Init();
|
2405
|
-
}
|
2406
|
-
|
2407
|
-
// TODO(roth): Find a better way to clear the fallback config that does
|
2408
|
-
// not require using ABSL_NO_THREAD_SAFETY_ANALYSIS.
|
2409
|
-
void XdsClientGlobalShutdown() ABSL_NO_THREAD_SAFETY_ANALYSIS {
|
2410
|
-
gpr_free(g_fallback_bootstrap_config);
|
2411
|
-
g_fallback_bootstrap_config = nullptr;
|
2412
|
-
delete g_mu;
|
2413
|
-
g_mu = nullptr;
|
2414
|
-
XdsHttpFilterRegistry::Shutdown();
|
2415
|
-
XdsClusterSpecifierPluginRegistry::Shutdown();
|
2416
|
-
}
|
2417
|
-
|
2418
|
-
namespace {
|
2419
|
-
|
2420
|
-
std::string GetBootstrapContents(const char* fallback_config,
|
2421
|
-
grpc_error_handle* error) {
|
2422
|
-
// First, try GRPC_XDS_BOOTSTRAP env var.
|
2423
|
-
UniquePtr<char> path(gpr_getenv("GRPC_XDS_BOOTSTRAP"));
|
2424
|
-
if (path != nullptr) {
|
2425
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
2426
|
-
gpr_log(GPR_INFO,
|
2427
|
-
"Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
|
2428
|
-
"environment variable: %s",
|
2429
|
-
path.get());
|
2430
|
-
}
|
2431
|
-
grpc_slice contents;
|
2432
|
-
*error =
|
2433
|
-
grpc_load_file(path.get(), /*add_null_terminator=*/true, &contents);
|
2434
|
-
if (!GRPC_ERROR_IS_NONE(*error)) return "";
|
2435
|
-
std::string contents_str(StringViewFromSlice(contents));
|
2436
|
-
grpc_slice_unref_internal(contents);
|
2437
|
-
return contents_str;
|
2438
|
-
}
|
2439
|
-
// Next, try GRPC_XDS_BOOTSTRAP_CONFIG env var.
|
2440
|
-
UniquePtr<char> env_config(gpr_getenv("GRPC_XDS_BOOTSTRAP_CONFIG"));
|
2441
|
-
if (env_config != nullptr) {
|
2442
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
2443
|
-
gpr_log(GPR_INFO,
|
2444
|
-
"Got bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG "
|
2445
|
-
"environment variable");
|
2446
|
-
}
|
2447
|
-
return env_config.get();
|
2448
|
-
}
|
2449
|
-
// Finally, try fallback config.
|
2450
|
-
if (fallback_config != nullptr) {
|
2451
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
2452
|
-
gpr_log(GPR_INFO, "Got bootstrap contents from fallback config");
|
2453
|
-
}
|
2454
|
-
return fallback_config;
|
2455
|
-
}
|
2456
|
-
// No bootstrap config found.
|
2457
|
-
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
2458
|
-
"Environment variables GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG "
|
2459
|
-
"not defined");
|
2460
|
-
return "";
|
2461
|
-
}
|
2462
|
-
|
2463
|
-
} // namespace
|
2464
|
-
|
2465
|
-
RefCountedPtr<XdsClient> XdsClient::GetOrCreate(const grpc_channel_args* args,
|
2466
|
-
grpc_error_handle* error) {
|
2467
|
-
RefCountedPtr<XdsClient> xds_client;
|
2468
|
-
// If getting bootstrap from channel args, create a local XdsClient
|
2469
|
-
// instance for the channel or server instead of using the global instance.
|
2470
|
-
const char* bootstrap_config = grpc_channel_args_find_string(
|
2471
|
-
args, GRPC_ARG_TEST_ONLY_DO_NOT_USE_IN_PROD_XDS_BOOTSTRAP_CONFIG);
|
2472
|
-
if (bootstrap_config != nullptr) {
|
2473
|
-
std::unique_ptr<XdsBootstrap> bootstrap =
|
2474
|
-
XdsBootstrap::Create(bootstrap_config, error);
|
2475
|
-
if (GRPC_ERROR_IS_NONE(*error)) {
|
2476
|
-
grpc_channel_args* xds_channel_args =
|
2477
|
-
grpc_channel_args_find_pointer<grpc_channel_args>(
|
2478
|
-
args,
|
2479
|
-
GRPC_ARG_TEST_ONLY_DO_NOT_USE_IN_PROD_XDS_CLIENT_CHANNEL_ARGS);
|
2480
|
-
return MakeRefCounted<XdsClient>(std::move(bootstrap), xds_channel_args);
|
2481
|
-
}
|
2482
|
-
return nullptr;
|
2483
|
-
}
|
2484
|
-
// Otherwise, use the global instance.
|
2485
|
-
{
|
2486
|
-
MutexLock lock(g_mu);
|
2487
|
-
if (g_xds_client != nullptr) {
|
2488
|
-
auto xds_client = g_xds_client->RefIfNonZero();
|
2489
|
-
if (xds_client != nullptr) return xds_client;
|
2490
|
-
}
|
2491
|
-
// Find bootstrap contents.
|
2492
|
-
std::string bootstrap_contents =
|
2493
|
-
GetBootstrapContents(g_fallback_bootstrap_config, error);
|
2494
|
-
if (!GRPC_ERROR_IS_NONE(*error)) return nullptr;
|
2495
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
|
2496
|
-
gpr_log(GPR_INFO, "xDS bootstrap contents: %s",
|
2497
|
-
bootstrap_contents.c_str());
|
2498
|
-
}
|
2499
|
-
// Parse bootstrap.
|
2500
|
-
std::unique_ptr<XdsBootstrap> bootstrap =
|
2501
|
-
XdsBootstrap::Create(bootstrap_contents, error);
|
2502
|
-
if (!GRPC_ERROR_IS_NONE(*error)) return nullptr;
|
2503
|
-
// Instantiate XdsClient.
|
2504
|
-
xds_client =
|
2505
|
-
MakeRefCounted<XdsClient>(std::move(bootstrap), g_channel_args);
|
2506
|
-
g_xds_client = xds_client.get();
|
2507
|
-
}
|
2508
|
-
return xds_client;
|
2509
|
-
}
|
2510
|
-
|
2511
|
-
namespace internal {
|
2512
|
-
|
2513
|
-
void SetXdsChannelArgsForTest(grpc_channel_args* args) {
|
2514
|
-
MutexLock lock(g_mu);
|
2515
|
-
g_channel_args = args;
|
2516
|
-
}
|
2517
|
-
|
2518
|
-
void UnsetGlobalXdsClientForTest() {
|
2519
|
-
MutexLock lock(g_mu);
|
2520
|
-
g_xds_client = nullptr;
|
2521
|
-
}
|
2522
|
-
|
2523
|
-
void SetXdsFallbackBootstrapConfig(const char* config) {
|
2524
|
-
MutexLock lock(g_mu);
|
2525
|
-
gpr_free(g_fallback_bootstrap_config);
|
2526
|
-
g_fallback_bootstrap_config = gpr_strdup(config);
|
2527
|
-
}
|
2528
|
-
|
2529
|
-
} // namespace internal
|
2530
|
-
|
2531
|
-
//
|
2532
|
-
// embedding XdsClient in channel args
|
2533
|
-
//
|
2534
|
-
|
2535
|
-
#define GRPC_ARG_XDS_CLIENT "grpc.internal.xds_client"
|
2536
|
-
|
2537
|
-
namespace {
|
2538
|
-
|
2539
|
-
void* XdsClientArgCopy(void* p) {
|
2540
|
-
XdsClient* xds_client = static_cast<XdsClient*>(p);
|
2541
|
-
xds_client->Ref(DEBUG_LOCATION, "channel arg").release();
|
2542
|
-
return p;
|
2543
|
-
}
|
2544
|
-
|
2545
|
-
void XdsClientArgDestroy(void* p) {
|
2546
|
-
XdsClient* xds_client = static_cast<XdsClient*>(p);
|
2547
|
-
xds_client->Unref(DEBUG_LOCATION, "channel arg");
|
2548
|
-
}
|
2549
|
-
|
2550
|
-
int XdsClientArgCmp(void* p, void* q) { return QsortCompare(p, q); }
|
2551
|
-
|
2552
|
-
const grpc_arg_pointer_vtable kXdsClientArgVtable = {
|
2553
|
-
XdsClientArgCopy, XdsClientArgDestroy, XdsClientArgCmp};
|
2554
|
-
|
2555
|
-
} // namespace
|
2556
|
-
|
2557
|
-
grpc_arg XdsClient::MakeChannelArg() const {
|
2558
|
-
return grpc_channel_arg_pointer_create(const_cast<char*>(GRPC_ARG_XDS_CLIENT),
|
2559
|
-
const_cast<XdsClient*>(this),
|
2560
|
-
&kXdsClientArgVtable);
|
2561
|
-
}
|
2562
|
-
|
2563
|
-
RefCountedPtr<XdsClient> XdsClient::GetFromChannelArgs(
|
2564
|
-
const grpc_channel_args& args) {
|
2565
|
-
XdsClient* xds_client =
|
2566
|
-
grpc_channel_args_find_pointer<XdsClient>(&args, GRPC_ARG_XDS_CLIENT);
|
2567
|
-
if (xds_client == nullptr) return nullptr;
|
2568
|
-
return xds_client->Ref(DEBUG_LOCATION, "GetFromChannelArgs");
|
2569
|
-
}
|
2570
|
-
|
2571
2000
|
} // namespace grpc_core
|
2572
|
-
|
2573
|
-
// The returned bytes may contain NULL(0), so we can't use c-string.
|
2574
|
-
grpc_slice grpc_dump_xds_configs(void) {
|
2575
|
-
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
|
2576
|
-
grpc_core::ExecCtx exec_ctx;
|
2577
|
-
grpc_error_handle error = GRPC_ERROR_NONE;
|
2578
|
-
auto xds_client = grpc_core::XdsClient::GetOrCreate(nullptr, &error);
|
2579
|
-
if (!GRPC_ERROR_IS_NONE(error)) {
|
2580
|
-
// If we isn't using xDS, just return an empty string.
|
2581
|
-
GRPC_ERROR_UNREF(error);
|
2582
|
-
return grpc_empty_slice();
|
2583
|
-
}
|
2584
|
-
return grpc_slice_from_cpp_string(xds_client->DumpClientConfigBinary());
|
2585
|
-
}
|