grpc 1.78.1 → 1.80.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Makefile +22 -8
- data/include/grpc/credentials.h +47 -37
- data/include/grpc/credentials_cpp.h +39 -0
- data/include/grpc/event_engine/event_engine.h +8 -3
- data/include/grpc/grpc.h +4 -0
- data/include/grpc/impl/call.h +9 -0
- data/include/grpc/impl/channel_arg_names.h +7 -0
- data/include/grpc/module.modulemap +2 -0
- data/include/grpc/private_key_signer.h +104 -0
- data/include/grpc/support/port_platform.h +6 -0
- data/src/core/call/call_filters.h +101 -78
- data/src/core/call/call_spine.h +91 -68
- data/src/core/call/call_state.h +60 -4
- data/src/core/call/client_call.cc +9 -9
- data/src/core/call/client_call.h +1 -1
- data/src/core/call/metadata_batch.cc +2 -0
- data/src/core/call/metadata_batch.h +48 -1
- data/src/core/call/metadata_info.cc +35 -0
- data/src/core/call/metadata_info.h +2 -0
- data/src/core/call/simple_slice_based_metadata.h +2 -1
- data/src/core/channelz/channelz.cc +9 -6
- data/src/core/channelz/channelz.h +7 -4
- data/src/core/channelz/property_list.h +5 -0
- data/src/core/channelz/v2tov1/convert.cc +1 -1
- data/src/core/channelz/v2tov1/legacy_api.cc +164 -307
- data/src/core/client_channel/buffered_call.cc +7 -3
- data/src/core/client_channel/buffered_call.h +11 -5
- data/src/core/client_channel/client_channel.cc +106 -44
- data/src/core/client_channel/client_channel.h +3 -6
- data/src/core/client_channel/client_channel_filter.cc +90 -64
- data/src/core/client_channel/client_channel_filter.h +3 -6
- data/src/core/client_channel/client_channel_internal.h +5 -0
- data/src/core/client_channel/config_selector.h +17 -12
- data/src/core/client_channel/dynamic_filters.cc +8 -7
- data/src/core/client_channel/dynamic_filters.h +7 -5
- data/src/core/client_channel/retry_filter.cc +1 -1
- data/src/core/client_channel/retry_filter.h +2 -2
- data/src/core/client_channel/subchannel.cc +1682 -266
- data/src/core/client_channel/subchannel.h +411 -134
- data/src/core/client_channel/subchannel_stream_client.cc +22 -18
- data/src/core/client_channel/subchannel_stream_client.h +8 -9
- data/src/core/client_channel/subchannel_stream_limiter.cc +76 -0
- data/src/core/client_channel/subchannel_stream_limiter.h +51 -0
- data/src/core/config/config_vars.cc +9 -1
- data/src/core/config/config_vars.h +6 -0
- data/src/core/credentials/call/call_creds_registry.h +51 -22
- data/src/core/credentials/call/call_creds_registry_init.cc +86 -2
- data/src/core/credentials/call/external/aws_external_account_credentials.cc +2 -2
- data/src/core/credentials/call/external/external_account_credentials.cc +11 -4
- data/src/core/credentials/call/external/file_external_account_credentials.cc +2 -2
- data/src/core/credentials/transport/channel_creds_registry.h +71 -20
- data/src/core/credentials/transport/channel_creds_registry_init.cc +338 -29
- data/src/core/credentials/transport/ssl/ssl_credentials.cc +43 -24
- data/src/core/credentials/transport/ssl/ssl_credentials.h +7 -1
- data/src/core/credentials/transport/ssl/ssl_security_connector.cc +2 -8
- data/src/core/credentials/transport/ssl/ssl_security_connector.h +4 -3
- data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.cc +25 -5
- data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.h +7 -5
- data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.cc +181 -109
- data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.h +55 -42
- data/src/core/credentials/transport/tls/grpc_tls_credentials_options.cc +28 -23
- data/src/core/credentials/transport/tls/grpc_tls_credentials_options.h +26 -23
- data/src/core/credentials/transport/tls/spiffe_utils.cc +2 -2
- data/src/core/credentials/transport/tls/ssl_utils.cc +18 -18
- data/src/core/credentials/transport/tls/ssl_utils.h +12 -10
- data/src/core/credentials/transport/tls/tls_security_connector.cc +106 -74
- data/src/core/credentials/transport/tls/tls_security_connector.h +12 -8
- data/src/core/credentials/transport/xds/xds_credentials.cc +76 -32
- data/src/core/credentials/transport/xds/xds_credentials.h +4 -2
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +117 -35
- data/src/core/ext/filters/fault_injection/fault_injection_filter.h +42 -4
- data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.cc +58 -29
- data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.h +19 -11
- data/src/core/ext/filters/stateful_session/stateful_session_filter.cc +82 -25
- data/src/core/ext/filters/stateful_session/stateful_session_filter.h +28 -3
- data/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.cc +9 -7
- data/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.h +1 -1
- data/src/core/ext/transport/chttp2/transport/call_tracer_wrapper.h +7 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +117 -67
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.h +11 -1
- data/src/core/ext/transport/chttp2/transport/frame.cc +2 -15
- data/src/core/ext/transport/chttp2/transport/frame.h +0 -4
- data/src/core/ext/transport/chttp2/transport/goaway.cc +17 -2
- data/src/core/ext/transport/chttp2/transport/goaway.h +27 -6
- data/src/core/ext/transport/chttp2/transport/header_assembler.h +8 -21
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +101 -40
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +95 -0
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.cc +923 -772
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.h +406 -423
- data/src/core/ext/transport/chttp2/transport/http2_settings.cc +1 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +8 -1
- data/src/core/ext/transport/chttp2/transport/http2_settings_promises.h +25 -13
- data/src/core/ext/transport/chttp2/transport/http2_transport.cc +71 -24
- data/src/core/ext/transport/chttp2/transport/http2_transport.h +25 -49
- data/src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h +2 -2
- data/src/core/ext/transport/chttp2/transport/incoming_metadata_tracker.h +29 -9
- data/src/core/ext/transport/chttp2/transport/internal.h +6 -2
- data/src/core/ext/transport/chttp2/transport/keepalive.cc +14 -20
- data/src/core/ext/transport/chttp2/transport/keepalive.h +9 -6
- data/src/core/ext/transport/chttp2/transport/parsing.cc +11 -0
- data/src/core/ext/transport/chttp2/transport/ping_promise.cc +34 -74
- data/src/core/ext/transport/chttp2/transport/ping_promise.h +123 -79
- data/src/core/ext/transport/chttp2/transport/security_frame.h +233 -3
- data/src/core/ext/transport/chttp2/transport/stream.h +152 -73
- data/src/core/ext/transport/chttp2/transport/stream_data_queue.h +155 -85
- data/src/core/ext/transport/chttp2/transport/transport_common.h +0 -5
- data/src/core/ext/transport/chttp2/transport/writable_streams.h +8 -7
- data/src/core/ext/transport/chttp2/transport/write_cycle.cc +86 -0
- data/src/core/ext/transport/chttp2/transport/write_cycle.h +355 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +31 -29
- data/src/core/ext/upb-gen/cel/expr/checked.upb.h +1875 -0
- data/src/core/ext/upb-gen/cel/expr/checked.upb_minitable.c +409 -0
- data/src/core/ext/upb-gen/cel/expr/checked.upb_minitable.h +56 -0
- data/src/core/ext/upb-gen/cel/expr/syntax.upb.h +2223 -0
- data/src/core/ext/upb-gen/cel/expr/syntax.upb_minitable.c +489 -0
- data/src/core/ext/upb-gen/cel/expr/syntax.upb_minitable.h +60 -0
- data/src/core/ext/upb-gen/envoy/config/accesslog/v3/accesslog.upb.h +2 -1
- data/src/core/ext/upb-gen/envoy/config/bootstrap/v3/bootstrap.upb.h +130 -18
- data/src/core/ext/upb-gen/envoy/config/bootstrap/v3/bootstrap.upb_minitable.c +18 -13
- data/src/core/ext/upb-gen/envoy/config/cluster/v3/cluster.upb.h +70 -38
- data/src/core/ext/upb-gen/envoy/config/cluster/v3/cluster.upb_minitable.c +20 -17
- data/src/core/ext/upb-gen/envoy/config/common/matcher/v3/matcher.upb.h +26 -10
- data/src/core/ext/upb-gen/envoy/config/common/matcher/v3/matcher.upb_minitable.c +8 -7
- data/src/core/ext/upb-gen/envoy/config/common/mutation_rules/v3/mutation_rules.upb.h +495 -0
- data/src/core/ext/upb-gen/envoy/config/common/mutation_rules/v3/mutation_rules.upb_minitable.c +114 -0
- data/src/core/ext/upb-gen/envoy/config/common/mutation_rules/v3/mutation_rules.upb_minitable.h +36 -0
- data/src/core/ext/upb-gen/envoy/config/core/v3/address.upb.h +26 -10
- data/src/core/ext/upb-gen/envoy/config/core/v3/address.upb_minitable.c +8 -7
- data/src/core/ext/upb-gen/envoy/config/core/v3/cel.upb.h +121 -0
- data/src/core/ext/upb-gen/envoy/config/core/v3/cel.upb_minitable.c +54 -0
- data/src/core/ext/upb-gen/envoy/config/core/v3/cel.upb_minitable.h +32 -0
- data/src/core/ext/upb-gen/envoy/config/core/v3/grpc_service.upb.h +143 -9
- data/src/core/ext/upb-gen/envoy/config/core/v3/grpc_service.upb_minitable.c +18 -6
- data/src/core/ext/upb-gen/envoy/config/core/v3/protocol.upb.h +112 -11
- data/src/core/ext/upb-gen/envoy/config/core/v3/protocol.upb_minitable.c +22 -9
- data/src/core/ext/upb-gen/envoy/config/core/v3/proxy_protocol.upb.h +276 -0
- data/src/core/ext/upb-gen/envoy/config/core/v3/proxy_protocol.upb_minitable.c +60 -5
- data/src/core/ext/upb-gen/envoy/config/core/v3/proxy_protocol.upb_minitable.h +4 -0
- data/src/core/ext/upb-gen/envoy/config/endpoint/v3/endpoint_components.upb.h +72 -0
- data/src/core/ext/upb-gen/envoy/config/endpoint/v3/endpoint_components.upb_minitable.c +23 -2
- data/src/core/ext/upb-gen/envoy/config/endpoint/v3/endpoint_components.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/config/listener/v3/listener.upb.h +129 -13
- data/src/core/ext/upb-gen/envoy/config/listener/v3/listener.upb_minitable.c +36 -10
- data/src/core/ext/upb-gen/envoy/config/listener/v3/listener.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/config/listener/v3/quic_config.upb.h +30 -0
- data/src/core/ext/upb-gen/envoy/config/listener/v3/quic_config.upb_minitable.c +5 -3
- data/src/core/ext/upb-gen/envoy/config/metrics/v3/metrics_service.upb.h +16 -0
- data/src/core/ext/upb-gen/envoy/config/metrics/v3/metrics_service.upb_minitable.c +4 -3
- data/src/core/ext/upb-gen/envoy/config/metrics/v3/stats.upb.h +31 -0
- data/src/core/ext/upb-gen/envoy/config/metrics/v3/stats.upb_minitable.c +5 -3
- data/src/core/ext/upb-gen/envoy/config/overload/v3/overload.upb.h +2 -1
- data/src/core/ext/upb-gen/envoy/config/rbac/v3/rbac.upb.h +63 -0
- data/src/core/ext/upb-gen/envoy/config/rbac/v3/rbac.upb_minitable.c +12 -7
- data/src/core/ext/upb-gen/envoy/config/route/v3/route.upb.h +97 -81
- data/src/core/ext/upb-gen/envoy/config/route/v3/route.upb_minitable.c +40 -23
- data/src/core/ext/upb-gen/envoy/config/route/v3/route_components.upb.h +604 -228
- data/src/core/ext/upb-gen/envoy/config/route/v3/route_components.upb_minitable.c +146 -100
- data/src/core/ext/upb-gen/envoy/config/tap/v3/common.upb.h +30 -0
- data/src/core/ext/upb-gen/envoy/config/tap/v3/common.upb_minitable.c +5 -3
- data/src/core/ext/upb-gen/envoy/config/trace/v3/opentelemetry.upb.h +35 -3
- data/src/core/ext/upb-gen/envoy/config/trace/v3/opentelemetry.upb_minitable.c +7 -4
- data/src/core/ext/upb-gen/envoy/config/trace/v3/zipkin.upb.h +66 -14
- data/src/core/ext/upb-gen/envoy/config/trace/v3/zipkin.upb_minitable.c +22 -11
- data/src/core/ext/upb-gen/envoy/extensions/clusters/aggregate/v3/cluster.upb.h +87 -0
- data/src/core/ext/upb-gen/envoy/extensions/clusters/aggregate/v3/cluster.upb_minitable.c +29 -2
- data/src/core/ext/upb-gen/envoy/extensions/clusters/aggregate/v3/cluster.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/extensions/filters/http/rbac/v3/rbac.upb.h +0 -1
- data/src/core/ext/upb-gen/envoy/extensions/filters/http/rbac/v3/rbac.upb_minitable.c +0 -1
- data/src/core/ext/upb-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb.h +20 -4
- data/src/core/ext/upb-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb_minitable.c +5 -4
- data/src/core/ext/upb-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +239 -60
- data/src/core/ext/upb-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb_minitable.c +59 -28
- data/src/core/ext/upb-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/call_credentials/access_token/v3/access_token_credentials.upb.h +89 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/call_credentials/access_token/v3/access_token_credentials.upb_minitable.c +50 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/call_credentials/access_token/v3/access_token_credentials.upb_minitable.h +32 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/tls/v3/tls_credentials.upb.h +135 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/tls/v3/tls_credentials.upb_minitable.c +53 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/tls/v3/tls_credentials.upb_minitable.h +32 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/xds/v3/xds_credentials.upb.h +105 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/xds/v3/xds_credentials.upb_minitable.c +51 -0
- data/src/core/ext/upb-gen/envoy/extensions/grpc_service/channel_credentials/xds/v3/xds_credentials.upb_minitable.h +32 -0
- data/src/core/ext/upb-gen/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.upb.h +32 -0
- data/src/core/ext/upb-gen/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.upb_minitable.c +6 -3
- data/src/core/ext/upb-gen/envoy/extensions/load_balancing_policies/common/v3/common.upb.h +206 -0
- data/src/core/ext/upb-gen/envoy/extensions/load_balancing_policies/common/v3/common.upb_minitable.c +41 -8
- data/src/core/ext/upb-gen/envoy/extensions/load_balancing_policies/common/v3/common.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/common.upb.h +64 -0
- data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/common.upb_minitable.c +4 -3
- data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/secret.upb.h +64 -0
- data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/secret.upb_minitable.c +31 -5
- data/src/core/ext/upb-gen/envoy/extensions/transport_sockets/tls/v3/secret.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb.h +283 -14
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb_minitable.c +48 -11
- data/src/core/ext/upb-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/type/http/v3/cookie.upb.h +144 -6
- data/src/core/ext/upb-gen/envoy/type/http/v3/cookie.upb_minitable.c +35 -7
- data/src/core/ext/upb-gen/envoy/type/http/v3/cookie.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/envoy/type/tracing/v3/custom_tag.upb.h +42 -21
- data/src/core/ext/upb-gen/envoy/type/tracing/v3/custom_tag.upb_minitable.c +9 -8
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb.h +164 -1
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb_minitable.c +37 -6
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/promise.upb_minitable.h +2 -0
- data/src/core/ext/upb-gen/xds/type/matcher/v3/cel.upb.h +0 -1
- data/src/core/ext/upb-gen/xds/type/matcher/v3/cel.upb_minitable.c +0 -1
- data/src/core/ext/upb-gen/xds/type/matcher/v3/http_inputs.upb.h +0 -1
- data/src/core/ext/upb-gen/xds/type/matcher/v3/http_inputs.upb_minitable.c +0 -1
- data/src/core/ext/upb-gen/xds/type/matcher/v3/matcher.upb.h +26 -11
- data/src/core/ext/upb-gen/xds/type/matcher/v3/matcher.upb_minitable.c +8 -8
- data/src/core/ext/upb-gen/xds/type/matcher/v3/string.upb.h +33 -0
- data/src/core/ext/upb-gen/xds/type/matcher/v3/string.upb_minitable.c +14 -3
- data/src/core/ext/upb-gen/xds/type/v3/cel.upb.h +90 -10
- data/src/core/ext/upb-gen/xds/type/v3/cel.upb_minitable.c +18 -7
- data/src/core/ext/upbdefs-gen/cel/expr/checked.upbdefs.c +248 -0
- data/src/core/ext/upbdefs-gen/cel/expr/checked.upbdefs.h +97 -0
- data/src/core/ext/upbdefs-gen/cel/expr/syntax.upbdefs.c +283 -0
- data/src/core/ext/upbdefs-gen/cel/expr/syntax.upbdefs.h +107 -0
- data/src/core/ext/upbdefs-gen/envoy/config/accesslog/v3/accesslog.upbdefs.c +213 -211
- data/src/core/ext/upbdefs-gen/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +635 -614
- data/src/core/ext/upbdefs-gen/envoy/config/cluster/v3/cluster.upbdefs.c +1012 -1000
- data/src/core/ext/upbdefs-gen/envoy/config/common/matcher/v3/matcher.upbdefs.c +276 -273
- data/src/core/ext/upbdefs-gen/envoy/config/common/mutation_rules/v3/mutation_rules.upbdefs.c +152 -0
- data/src/core/ext/upbdefs-gen/envoy/config/common/mutation_rules/v3/mutation_rules.upbdefs.h +47 -0
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/address.upbdefs.c +149 -144
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/base.upbdefs.c +367 -370
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/cel.upbdefs.c +63 -0
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/cel.upbdefs.h +37 -0
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/grpc_service.upbdefs.c +297 -284
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/protocol.upbdefs.c +492 -469
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/proxy_protocol.upbdefs.c +74 -43
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/proxy_protocol.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-gen/envoy/config/core/v3/substitution_format_string.upbdefs.c +60 -59
- data/src/core/ext/upbdefs-gen/envoy/config/endpoint/v3/endpoint_components.upbdefs.c +202 -184
- data/src/core/ext/upbdefs-gen/envoy/config/endpoint/v3/endpoint_components.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/config/listener/v3/listener.upbdefs.c +354 -339
- data/src/core/ext/upbdefs-gen/envoy/config/listener/v3/listener.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/config/listener/v3/quic_config.upbdefs.c +28 -19
- data/src/core/ext/upbdefs-gen/envoy/config/metrics/v3/metrics_service.upbdefs.c +30 -27
- data/src/core/ext/upbdefs-gen/envoy/config/metrics/v3/stats.upbdefs.c +71 -66
- data/src/core/ext/upbdefs-gen/envoy/config/overload/v3/overload.upbdefs.c +94 -91
- data/src/core/ext/upbdefs-gen/envoy/config/rbac/v3/rbac.upbdefs.c +386 -369
- data/src/core/ext/upbdefs-gen/envoy/config/route/v3/route.upbdefs.c +60 -57
- data/src/core/ext/upbdefs-gen/envoy/config/route/v3/route_components.upbdefs.c +1974 -1884
- data/src/core/ext/upbdefs-gen/envoy/config/tap/v3/common.upbdefs.c +119 -112
- data/src/core/ext/upbdefs-gen/envoy/config/trace/v3/opentelemetry.upbdefs.c +62 -51
- data/src/core/ext/upbdefs-gen/envoy/config/trace/v3/zipkin.upbdefs.c +109 -88
- data/src/core/ext/upbdefs-gen/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c +54 -36
- data/src/core/ext/upbdefs-gen/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c +78 -84
- data/src/core/ext/upbdefs-gen/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upbdefs.c +48 -46
- data/src/core/ext/upbdefs-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +1041 -984
- data/src/core/ext/upbdefs-gen/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +304 -290
- data/src/core/ext/upbdefs-gen/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c +94 -77
- data/src/core/ext/upbdefs-gen/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upbdefs.c +246 -193
- data/src/core/ext/upbdefs-gen/envoy/extensions/upstreams/http/v3/http_protocol_options.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/type/http/v3/cookie.upbdefs.c +37 -23
- data/src/core/ext/upbdefs-gen/envoy/type/http/v3/cookie.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/envoy/type/tracing/v3/custom_tag.upbdefs.c +5 -3
- data/src/core/ext/upbdefs-gen/google/api/http.upbdefs.c +4 -4
- data/src/core/ext/upbdefs-gen/google/api/httpbody.upbdefs.c +4 -5
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/promise.upbdefs.c +113 -87
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/promise.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-gen/udpa/annotations/migrate.upbdefs.c +5 -5
- data/src/core/ext/upbdefs-gen/udpa/annotations/security.upbdefs.c +6 -5
- data/src/core/ext/upbdefs-gen/udpa/annotations/sensitive.upbdefs.c +5 -5
- data/src/core/ext/upbdefs-gen/udpa/annotations/status.upbdefs.c +5 -5
- data/src/core/ext/upbdefs-gen/udpa/annotations/versioning.upbdefs.c +5 -5
- data/src/core/ext/upbdefs-gen/xds/type/matcher/v3/cel.upbdefs.c +25 -30
- data/src/core/ext/upbdefs-gen/xds/type/matcher/v3/http_inputs.upbdefs.c +14 -20
- data/src/core/ext/upbdefs-gen/xds/type/matcher/v3/matcher.upbdefs.c +180 -183
- data/src/core/ext/upbdefs-gen/xds/type/matcher/v3/string.upbdefs.c +56 -47
- data/src/core/ext/upbdefs-gen/xds/type/v3/cel.upbdefs.c +69 -47
- data/src/core/filter/filter_chain.h +95 -0
- data/src/core/handshaker/http_connect/{http_connect_handshaker.cc → http_connect_client_handshaker.cc} +32 -31
- data/src/core/handshaker/http_connect/{http_connect_handshaker.h → http_connect_client_handshaker.h} +4 -4
- data/src/core/handshaker/http_connect/http_proxy_mapper.cc +1 -1
- data/src/core/handshaker/http_connect/xds_http_proxy_mapper.cc +1 -1
- data/src/core/handshaker/security/pipelined_secure_endpoint.cc +14 -13
- data/src/core/handshaker/security/secure_endpoint.cc +282 -68
- data/src/core/handshaker/security/secure_endpoint.h +0 -7
- data/src/core/lib/channel/channel_args.h +1 -1
- data/src/core/lib/channel/promise_based_filter.cc +17 -4
- data/src/core/lib/channel/promise_based_filter.h +3 -2
- data/src/core/lib/debug/trace_flags.cc +2 -0
- data/src/core/lib/debug/trace_flags.h +1 -0
- data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc +35 -8
- data/src/core/lib/event_engine/cf_engine/dns_service_resolver.h +1 -2
- data/src/core/lib/event_engine/event_engine.cc +9 -0
- data/src/core/lib/event_engine/extensions/tcp_trace.h +0 -3
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +2 -2
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +1 -1
- data/src/core/lib/event_engine/posix_engine/posix_engine.cc +34 -9
- data/src/core/lib/event_engine/posix_engine/posix_engine.h +24 -2
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc +1 -3
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc +141 -14
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.h +19 -2
- data/src/core/lib/event_engine/posix_engine/posix_interface.h +7 -0
- data/src/core/lib/event_engine/posix_engine/posix_interface_posix.cc +21 -3
- data/src/core/lib/event_engine/posix_engine/posix_interface_windows.cc +16 -0
- data/src/core/lib/experiments/experiments.cc +309 -201
- data/src/core/lib/experiments/experiments.h +141 -80
- data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +2 -2
- data/src/core/lib/iomgr/resolve_address.h +0 -2
- data/src/core/lib/iomgr/resolved_address.h +0 -2
- data/src/core/lib/iomgr/tcp_posix.cc +13 -5
- data/src/core/lib/iomgr/tcp_server.cc +0 -5
- data/src/core/lib/iomgr/tcp_server.h +0 -7
- data/src/core/lib/iomgr/tcp_server_posix.cc +0 -17
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +0 -3
- data/src/core/lib/iomgr/tcp_server_windows.cc +12 -51
- data/src/core/lib/promise/all_ok.h +17 -12
- data/src/core/lib/promise/cancel_callback.h +12 -13
- data/src/core/lib/promise/detail/join_state.h +626 -0
- data/src/core/lib/promise/detail/promise_factory.h +14 -14
- data/src/core/lib/promise/for_each.h +32 -8
- data/src/core/lib/promise/if.h +9 -7
- data/src/core/lib/promise/loop.h +18 -16
- data/src/core/lib/promise/map.h +54 -47
- data/src/core/lib/promise/mpsc.h +11 -10
- data/src/core/lib/promise/observable.h +6 -6
- data/src/core/lib/promise/party.h +25 -19
- data/src/core/lib/promise/poll.h +5 -5
- data/src/core/lib/promise/prioritized_race.h +10 -7
- data/src/core/lib/promise/promise.h +16 -11
- data/src/core/lib/promise/race.h +6 -5
- data/src/core/lib/promise/seq.h +109 -74
- data/src/core/lib/promise/try_join.h +14 -6
- data/src/core/lib/promise/try_seq.h +76 -60
- data/src/core/lib/resource_quota/api.cc +7 -0
- data/src/core/lib/resource_quota/arena.h +1 -1
- data/src/core/lib/resource_quota/memory_quota.cc +4 -1
- data/src/core/lib/resource_quota/resource_quota.cc +2 -1
- data/src/core/lib/resource_quota/resource_quota.h +3 -0
- data/src/core/lib/resource_quota/stream_quota.cc +77 -1
- data/src/core/lib/resource_quota/stream_quota.h +64 -1
- data/src/core/lib/resource_quota/telemetry.h +1 -1
- data/src/core/lib/surface/call.cc +13 -0
- data/src/core/lib/surface/call_utils.h +58 -43
- data/src/core/lib/surface/channel.h +1 -4
- data/src/core/lib/surface/completion_queue.cc +13 -6
- data/src/core/lib/surface/validate_metadata.cc +20 -15
- data/src/core/lib/surface/validate_metadata.h +3 -1
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/promise_endpoint.cc +1 -1
- data/src/core/lib/transport/promise_endpoint.h +1 -1
- data/src/core/lib/transport/transport.h +5 -0
- data/src/core/load_balancing/health_check_client.cc +1 -15
- data/src/core/load_balancing/health_check_client_internal.h +0 -2
- data/src/core/load_balancing/oob_backend_metric.cc +1 -5
- data/src/core/load_balancing/oob_backend_metric_internal.h +0 -1
- data/src/core/load_balancing/xds/xds_cluster_impl.cc +12 -9
- data/src/core/plugin_registry/grpc_plugin_registry.cc +3 -2
- data/src/core/resolver/xds/xds_resolver.cc +162 -116
- data/src/core/server/server.cc +18 -1
- data/src/core/server/server.h +2 -0
- data/src/core/server/xds_server_config_fetcher.cc +4 -4
- data/src/core/telemetry/call_tracer.cc +87 -2
- data/src/core/telemetry/call_tracer.h +46 -8
- data/src/core/telemetry/instrument.cc +102 -40
- data/src/core/telemetry/instrument.h +246 -65
- data/src/core/tsi/fake_transport_security.cc +3 -1
- data/src/core/tsi/ssl_transport_security.cc +516 -137
- data/src/core/tsi/ssl_transport_security.h +28 -22
- data/src/core/tsi/ssl_transport_security_utils.cc +2 -2
- data/src/core/tsi/ssl_transport_security_utils.h +2 -2
- data/src/core/util/bitset.h +6 -0
- data/src/core/util/function_signature.h +3 -1
- data/src/core/util/http_client/httpcli_security_connector.cc +2 -1
- data/src/core/util/json/json_reader.cc +0 -4
- data/src/core/xds/grpc/certificate_provider_store.cc +2 -1
- data/src/core/xds/grpc/certificate_provider_store.h +3 -17
- data/src/core/xds/grpc/certificate_provider_store_interface.h +61 -0
- data/src/core/xds/grpc/xds_bootstrap_grpc.cc +48 -0
- data/src/core/xds/grpc/xds_bootstrap_grpc.h +18 -0
- data/src/core/xds/grpc/xds_certificate_provider.cc +7 -2
- data/src/core/xds/grpc/xds_certificate_provider.h +13 -2
- data/src/core/xds/grpc/xds_client_grpc.cc +13 -6
- data/src/core/xds/grpc/xds_client_grpc.h +10 -7
- data/src/core/xds/grpc/xds_cluster.cc +18 -4
- data/src/core/xds/grpc/xds_cluster.h +17 -2
- data/src/core/xds/grpc/xds_cluster_parser.cc +36 -11
- data/src/core/xds/grpc/xds_common_types.cc +45 -0
- data/src/core/xds/grpc/xds_common_types.h +31 -0
- data/src/core/xds/grpc/xds_common_types_parser.cc +274 -16
- data/src/core/xds/grpc/xds_common_types_parser.h +12 -0
- data/src/core/xds/grpc/xds_http_fault_filter.cc +128 -24
- data/src/core/xds/grpc/xds_http_fault_filter.h +19 -10
- data/src/core/xds/grpc/xds_http_filter.cc +38 -0
- data/src/core/xds/grpc/xds_http_filter.h +70 -47
- data/src/core/xds/grpc/xds_http_filter_registry.cc +48 -14
- data/src/core/xds/grpc/xds_http_filter_registry.h +29 -15
- data/src/core/xds/grpc/xds_http_gcp_authn_filter.cc +88 -22
- data/src/core/xds/grpc/xds_http_gcp_authn_filter.h +22 -11
- data/src/core/xds/grpc/xds_http_rbac_filter.cc +36 -20
- data/src/core/xds/grpc/xds_http_rbac_filter.h +19 -10
- data/src/core/xds/grpc/xds_http_stateful_session_filter.cc +143 -26
- data/src/core/xds/grpc/xds_http_stateful_session_filter.h +19 -10
- data/src/core/xds/grpc/xds_listener.cc +4 -1
- data/src/core/xds/grpc/xds_listener.h +10 -2
- data/src/core/xds/grpc/xds_listener_parser.cc +23 -18
- data/src/core/xds/grpc/xds_matcher.cc +40 -5
- data/src/core/xds/grpc/xds_matcher.h +13 -0
- data/src/core/xds/grpc/xds_matcher_action.h +1 -1
- data/src/core/xds/grpc/xds_matcher_parse.cc +60 -40
- data/src/core/xds/grpc/xds_matcher_parse.h +2 -1
- data/src/core/xds/grpc/xds_route_config.cc +12 -1
- data/src/core/xds/grpc/xds_route_config.h +15 -2
- data/src/core/xds/grpc/xds_route_config_parser.cc +11 -5
- data/src/core/xds/grpc/xds_routing.cc +181 -6
- data/src/core/xds/grpc/xds_routing.h +57 -0
- data/src/core/xds/grpc/xds_server_grpc.cc +55 -43
- data/src/core/xds/grpc/xds_server_grpc.h +13 -6
- data/src/core/xds/grpc/xds_server_grpc_interface.h +3 -2
- data/src/core/xds/grpc/xds_transport_grpc.cc +12 -6
- data/src/core/xds/grpc/xds_transport_grpc.h +5 -1
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +12 -8
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +18 -12
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/generate_proto_ruby.sh +1 -1
- metadata +42 -6
- data/src/core/ext/transport/chttp2/transport/security_frame.cc +0 -31
- data/src/core/handshaker/security/legacy_secure_endpoint.cc +0 -597
|
@@ -32,8 +32,10 @@
|
|
|
32
32
|
#include "src/core/call/interception_chain.h"
|
|
33
33
|
#include "src/core/channelz/channel_trace.h"
|
|
34
34
|
#include "src/core/channelz/channelz.h"
|
|
35
|
+
#include "src/core/client_channel/buffered_call.h"
|
|
35
36
|
#include "src/core/client_channel/client_channel_internal.h"
|
|
36
37
|
#include "src/core/client_channel/subchannel_pool_interface.h"
|
|
38
|
+
#include "src/core/client_channel/subchannel_stream_limiter.h"
|
|
37
39
|
#include "src/core/config/core_configuration.h"
|
|
38
40
|
#include "src/core/handshaker/proxy_mapper_registry.h"
|
|
39
41
|
#include "src/core/lib/address_utils/sockaddr_utils.h"
|
|
@@ -90,21 +92,111 @@ namespace grpc_core {
|
|
|
90
92
|
|
|
91
93
|
using ::grpc_event_engine::experimental::EventEngine;
|
|
92
94
|
|
|
95
|
+
// To avoid a naming conflict between
|
|
96
|
+
// ConnectivityStateWatcherInterface and
|
|
97
|
+
// Subchannel::ConnectivityStateWatcherInterface.
|
|
98
|
+
using TransportConnectivityStateWatcher = ConnectivityStateWatcherInterface;
|
|
99
|
+
|
|
100
|
+
//
|
|
101
|
+
// Subchannel::Call
|
|
102
|
+
//
|
|
103
|
+
|
|
104
|
+
RefCountedPtr<Subchannel::Call> Subchannel::Call::Ref() {
|
|
105
|
+
IncrementRefCount();
|
|
106
|
+
return RefCountedPtr<Subchannel::Call>(this);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
RefCountedPtr<Subchannel::Call> Subchannel::Call::Ref(
|
|
110
|
+
const DebugLocation& location, const char* reason) {
|
|
111
|
+
IncrementRefCount(location, reason);
|
|
112
|
+
return RefCountedPtr<Subchannel::Call>(this);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
//
|
|
116
|
+
// Subchannel
|
|
117
|
+
//
|
|
118
|
+
|
|
119
|
+
RefCountedPtr<Subchannel> Subchannel::Create(
|
|
120
|
+
OrphanablePtr<SubchannelConnector> connector,
|
|
121
|
+
const grpc_resolved_address& address, const ChannelArgs& args) {
|
|
122
|
+
if (!IsSubchannelConnectionScalingEnabled()) {
|
|
123
|
+
return OldSubchannel::Create(std::move(connector), address, args);
|
|
124
|
+
}
|
|
125
|
+
return NewSubchannel::Create(std::move(connector), address, args);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
Subchannel::Subchannel()
|
|
129
|
+
: DualRefCounted<Subchannel>(GRPC_TRACE_FLAG_ENABLED(subchannel_refcount)
|
|
130
|
+
? "Subchannel"
|
|
131
|
+
: nullptr) {}
|
|
132
|
+
|
|
133
|
+
ChannelArgs Subchannel::MakeSubchannelArgs(
|
|
134
|
+
const ChannelArgs& channel_args, const ChannelArgs& address_args,
|
|
135
|
+
const RefCountedPtr<SubchannelPoolInterface>& subchannel_pool,
|
|
136
|
+
const std::string& channel_default_authority) {
|
|
137
|
+
// Note that we start with the channel-level args and then apply the
|
|
138
|
+
// per-address args, so that if a value is present in both, the one
|
|
139
|
+
// in the channel-level args is used. This is particularly important
|
|
140
|
+
// for the GRPC_ARG_DEFAULT_AUTHORITY arg, which we want to allow
|
|
141
|
+
// resolvers to set on a per-address basis only if the application
|
|
142
|
+
// did not explicitly set it at the channel level.
|
|
143
|
+
return channel_args.UnionWith(address_args)
|
|
144
|
+
.SetObject(subchannel_pool)
|
|
145
|
+
// If we haven't already set the default authority arg (i.e., it
|
|
146
|
+
// was not explicitly set by the application nor overridden by
|
|
147
|
+
// the resolver), add it from the channel's default.
|
|
148
|
+
.SetIfUnset(GRPC_ARG_DEFAULT_AUTHORITY, channel_default_authority)
|
|
149
|
+
// Remove channel args that should not affect subchannel
|
|
150
|
+
// uniqueness.
|
|
151
|
+
.Remove(GRPC_ARG_HEALTH_CHECK_SERVICE_NAME)
|
|
152
|
+
.Remove(GRPC_ARG_INHIBIT_HEALTH_CHECKING)
|
|
153
|
+
.Remove(GRPC_ARG_MAX_CONNECTIONS_PER_SUBCHANNEL)
|
|
154
|
+
.Remove(GRPC_ARG_MAX_CONNECTIONS_PER_SUBCHANNEL_CAP)
|
|
155
|
+
.Remove(GRPC_ARG_CHANNELZ_CHANNEL_NODE)
|
|
156
|
+
// Remove all keys with the no-subchannel prefix.
|
|
157
|
+
.RemoveAllKeysWithPrefix(GRPC_ARG_NO_SUBCHANNEL_PREFIX);
|
|
158
|
+
}
|
|
159
|
+
|
|
93
160
|
//
|
|
94
|
-
// ConnectedSubchannel
|
|
161
|
+
// OldSubchannel::ConnectedSubchannel
|
|
95
162
|
//
|
|
96
163
|
|
|
97
|
-
|
|
98
|
-
: RefCounted<ConnectedSubchannel>
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
164
|
+
class OldSubchannel::ConnectedSubchannel
|
|
165
|
+
: public RefCounted<ConnectedSubchannel> {
|
|
166
|
+
public:
|
|
167
|
+
const ChannelArgs& args() const { return args_; }
|
|
168
|
+
|
|
169
|
+
virtual void StartWatch(
|
|
170
|
+
grpc_pollset_set* interested_parties,
|
|
171
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) = 0;
|
|
172
|
+
|
|
173
|
+
// Methods for v3 stack.
|
|
174
|
+
virtual void Ping(absl::AnyInvocable<void(absl::Status)> on_ack) = 0;
|
|
175
|
+
virtual RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
176
|
+
const = 0;
|
|
177
|
+
|
|
178
|
+
// Methods for legacy stack.
|
|
179
|
+
virtual RefCountedPtr<Call> CreateCall(CreateCallArgs args,
|
|
180
|
+
grpc_error_handle* error) = 0;
|
|
181
|
+
virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0;
|
|
182
|
+
|
|
183
|
+
protected:
|
|
184
|
+
explicit ConnectedSubchannel(const ChannelArgs& args)
|
|
185
|
+
: RefCounted<ConnectedSubchannel>(
|
|
186
|
+
GRPC_TRACE_FLAG_ENABLED(subchannel_refcount) ? "ConnectedSubchannel"
|
|
187
|
+
: nullptr),
|
|
188
|
+
args_(args) {}
|
|
189
|
+
|
|
190
|
+
private:
|
|
191
|
+
ChannelArgs args_;
|
|
192
|
+
};
|
|
102
193
|
|
|
103
194
|
//
|
|
104
|
-
// LegacyConnectedSubchannel
|
|
195
|
+
// OldSubchannel::LegacyConnectedSubchannel
|
|
105
196
|
//
|
|
106
197
|
|
|
107
|
-
class LegacyConnectedSubchannel
|
|
198
|
+
class OldSubchannel::LegacyConnectedSubchannel final
|
|
199
|
+
: public ConnectedSubchannel {
|
|
108
200
|
public:
|
|
109
201
|
LegacyConnectedSubchannel(
|
|
110
202
|
RefCountedPtr<grpc_channel_stack> channel_stack, const ChannelArgs& args,
|
|
@@ -117,13 +209,9 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
|
|
|
117
209
|
channel_stack_.reset(DEBUG_LOCATION, "ConnectedSubchannel");
|
|
118
210
|
}
|
|
119
211
|
|
|
120
|
-
channelz::SubchannelNode* channelz_node() const override {
|
|
121
|
-
return channelz_node_.get();
|
|
122
|
-
}
|
|
123
|
-
|
|
124
212
|
void StartWatch(
|
|
125
213
|
grpc_pollset_set* interested_parties,
|
|
126
|
-
OrphanablePtr<
|
|
214
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) override {
|
|
127
215
|
grpc_transport_op* op = grpc_make_transport_op(nullptr);
|
|
128
216
|
op->start_connectivity_watch = std::move(watcher);
|
|
129
217
|
op->start_connectivity_watch_state = GRPC_CHANNEL_READY;
|
|
@@ -142,13 +230,15 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
|
|
|
142
230
|
Crash("call v3 unstarted_call_destination method called in legacy impl");
|
|
143
231
|
}
|
|
144
232
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
233
|
+
RefCountedPtr<Call> CreateCall(CreateCallArgs args,
|
|
234
|
+
grpc_error_handle* error) override {
|
|
235
|
+
const size_t allocation_size =
|
|
236
|
+
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
|
|
237
|
+
channel_stack_->call_stack_size;
|
|
238
|
+
Arena* arena = args.arena;
|
|
239
|
+
return RefCountedPtr<SubchannelCall>(
|
|
240
|
+
new (arena->Alloc(allocation_size)) SubchannelCall(
|
|
241
|
+
RefAsSubclass<LegacyConnectedSubchannel>(), args, error));
|
|
152
242
|
}
|
|
153
243
|
|
|
154
244
|
void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) override {
|
|
@@ -161,88 +251,58 @@ class LegacyConnectedSubchannel : public ConnectedSubchannel {
|
|
|
161
251
|
}
|
|
162
252
|
|
|
163
253
|
private:
|
|
164
|
-
|
|
165
|
-
RefCountedPtr<grpc_channel_stack> channel_stack_;
|
|
166
|
-
};
|
|
167
|
-
|
|
168
|
-
//
|
|
169
|
-
// NewConnectedSubchannel
|
|
170
|
-
//
|
|
171
|
-
|
|
172
|
-
class NewConnectedSubchannel : public ConnectedSubchannel {
|
|
173
|
-
public:
|
|
174
|
-
class TransportCallDestination final : public CallDestination {
|
|
254
|
+
class SubchannelCall final : public Call {
|
|
175
255
|
public:
|
|
176
|
-
|
|
177
|
-
|
|
256
|
+
SubchannelCall(
|
|
257
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel,
|
|
258
|
+
CreateCallArgs args, grpc_error_handle* error);
|
|
178
259
|
|
|
179
|
-
|
|
260
|
+
void StartTransportStreamOpBatch(
|
|
261
|
+
grpc_transport_stream_op_batch* batch) override;
|
|
180
262
|
|
|
181
|
-
void
|
|
182
|
-
transport_->StartCall(std::move(handler));
|
|
183
|
-
}
|
|
263
|
+
void SetAfterCallStackDestroy(grpc_closure* closure) override;
|
|
184
264
|
|
|
185
|
-
|
|
265
|
+
// When refcount drops to 0, destroys itself and the associated call stack,
|
|
266
|
+
// but does NOT free the memory because it's in the call arena.
|
|
267
|
+
void Unref() override;
|
|
268
|
+
void Unref(const DebugLocation& location, const char* reason) override;
|
|
186
269
|
|
|
187
270
|
private:
|
|
188
|
-
|
|
271
|
+
// If channelz is enabled, intercepts recv_trailing so that we may check the
|
|
272
|
+
// status and associate it to a subchannel.
|
|
273
|
+
void MaybeInterceptRecvTrailingMetadata(
|
|
274
|
+
grpc_transport_stream_op_batch* batch);
|
|
275
|
+
|
|
276
|
+
static void RecvTrailingMetadataReady(void* arg, grpc_error_handle error);
|
|
277
|
+
|
|
278
|
+
// Interface of RefCounted<>.
|
|
279
|
+
void IncrementRefCount() override;
|
|
280
|
+
void IncrementRefCount(const DebugLocation& location,
|
|
281
|
+
const char* reason) override;
|
|
282
|
+
|
|
283
|
+
static void Destroy(void* arg, grpc_error_handle error);
|
|
284
|
+
|
|
285
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel_;
|
|
286
|
+
grpc_closure* after_call_stack_destroy_ = nullptr;
|
|
287
|
+
// State needed to support channelz interception of recv trailing metadata.
|
|
288
|
+
grpc_closure recv_trailing_metadata_ready_;
|
|
289
|
+
grpc_closure* original_recv_trailing_metadata_ = nullptr;
|
|
290
|
+
grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
|
|
291
|
+
Timestamp deadline_;
|
|
189
292
|
};
|
|
190
293
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
RefCountedPtr<TransportCallDestination> transport,
|
|
194
|
-
const ChannelArgs& args)
|
|
195
|
-
: ConnectedSubchannel(args),
|
|
196
|
-
call_destination_(std::move(call_destination)),
|
|
197
|
-
transport_(std::move(transport)) {}
|
|
198
|
-
|
|
199
|
-
void StartWatch(
|
|
200
|
-
grpc_pollset_set*,
|
|
201
|
-
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) override {
|
|
202
|
-
transport_->transport()->StartConnectivityWatch(std::move(watcher));
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
void Ping(absl::AnyInvocable<void(absl::Status)>) override {
|
|
206
|
-
// TODO(ctiller): add new transport API for this in v3 stack
|
|
207
|
-
Crash("not implemented");
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
211
|
-
const override {
|
|
212
|
-
return call_destination_;
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
grpc_channel_stack* channel_stack() const override { return nullptr; }
|
|
216
|
-
|
|
217
|
-
size_t GetInitialCallSizeEstimate() const override { return 0; }
|
|
218
|
-
|
|
219
|
-
void Ping(grpc_closure*, grpc_closure*) override {
|
|
220
|
-
Crash("legacy ping method called in call v3 impl");
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
channelz::SubchannelNode* channelz_node() const override { return nullptr; }
|
|
224
|
-
|
|
225
|
-
private:
|
|
226
|
-
RefCountedPtr<UnstartedCallDestination> call_destination_;
|
|
227
|
-
RefCountedPtr<TransportCallDestination> transport_;
|
|
294
|
+
RefCountedPtr<channelz::SubchannelNode> channelz_node_;
|
|
295
|
+
RefCountedPtr<grpc_channel_stack> channel_stack_;
|
|
228
296
|
};
|
|
229
297
|
|
|
230
298
|
//
|
|
231
|
-
// SubchannelCall
|
|
299
|
+
// OldSubchannel::LegacyConnectedSubchannel::SubchannelCall
|
|
232
300
|
//
|
|
233
301
|
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
Arena* arena = args.arena;
|
|
239
|
-
return RefCountedPtr<SubchannelCall>(new (
|
|
240
|
-
arena->Alloc(allocation_size)) SubchannelCall(std::move(args), error));
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
|
|
244
|
-
: connected_subchannel_(args.connected_subchannel
|
|
245
|
-
.TakeAsSubclass<LegacyConnectedSubchannel>()),
|
|
302
|
+
OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::SubchannelCall(
|
|
303
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel,
|
|
304
|
+
CreateCallArgs args, grpc_error_handle* error)
|
|
305
|
+
: connected_subchannel_(std::move(connected_subchannel)),
|
|
246
306
|
deadline_(args.deadline) {
|
|
247
307
|
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
|
248
308
|
const grpc_call_element_args call_args = {
|
|
@@ -253,21 +313,20 @@ SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
|
|
|
253
313
|
args.arena, // arena
|
|
254
314
|
args.call_combiner // call_combiner
|
|
255
315
|
};
|
|
256
|
-
*error = grpc_call_stack_init(connected_subchannel_->
|
|
316
|
+
*error = grpc_call_stack_init(connected_subchannel_->channel_stack_.get(), 1,
|
|
257
317
|
SubchannelCall::Destroy, this, &call_args);
|
|
258
318
|
if (GPR_UNLIKELY(!error->ok())) {
|
|
259
319
|
LOG(ERROR) << "error: " << StatusToString(*error);
|
|
260
320
|
return;
|
|
261
321
|
}
|
|
262
322
|
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
channelz_node->RecordCallStarted();
|
|
323
|
+
if (connected_subchannel_->channelz_node_ != nullptr) {
|
|
324
|
+
connected_subchannel_->channelz_node_->RecordCallStarted();
|
|
266
325
|
}
|
|
267
326
|
}
|
|
268
327
|
|
|
269
|
-
void SubchannelCall::
|
|
270
|
-
grpc_transport_stream_op_batch* batch) {
|
|
328
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
329
|
+
StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch) {
|
|
271
330
|
MaybeInterceptRecvTrailingMetadata(batch);
|
|
272
331
|
grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
|
273
332
|
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
|
|
@@ -277,37 +336,24 @@ void SubchannelCall::StartTransportStreamOpBatch(
|
|
|
277
336
|
top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
|
|
278
337
|
}
|
|
279
338
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
void SubchannelCall::SetAfterCallStackDestroy(grpc_closure* closure) {
|
|
339
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
340
|
+
SetAfterCallStackDestroy(grpc_closure* closure) {
|
|
285
341
|
GRPC_CHECK_EQ(after_call_stack_destroy_, nullptr);
|
|
286
342
|
GRPC_CHECK_NE(closure, nullptr);
|
|
287
343
|
after_call_stack_destroy_ = closure;
|
|
288
344
|
}
|
|
289
345
|
|
|
290
|
-
|
|
291
|
-
IncrementRefCount();
|
|
292
|
-
return RefCountedPtr<SubchannelCall>(this);
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
RefCountedPtr<SubchannelCall> SubchannelCall::Ref(const DebugLocation& location,
|
|
296
|
-
const char* reason) {
|
|
297
|
-
IncrementRefCount(location, reason);
|
|
298
|
-
return RefCountedPtr<SubchannelCall>(this);
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
void SubchannelCall::Unref() {
|
|
346
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::Unref() {
|
|
302
347
|
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
|
|
303
348
|
}
|
|
304
349
|
|
|
305
|
-
void SubchannelCall::Unref(
|
|
306
|
-
|
|
350
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::Unref(
|
|
351
|
+
const DebugLocation& /*location*/, const char* reason) {
|
|
307
352
|
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
|
|
308
353
|
}
|
|
309
354
|
|
|
310
|
-
void SubchannelCall::Destroy(
|
|
355
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::Destroy(
|
|
356
|
+
void* arg, grpc_error_handle /*error*/) {
|
|
311
357
|
SubchannelCall* self = static_cast<SubchannelCall*>(arg);
|
|
312
358
|
// Keep some members before destroying the subchannel call.
|
|
313
359
|
grpc_closure* after_call_stack_destroy = self->after_call_stack_destroy_;
|
|
@@ -325,12 +371,12 @@ void SubchannelCall::Destroy(void* arg, grpc_error_handle /*error*/) {
|
|
|
325
371
|
// stack.
|
|
326
372
|
}
|
|
327
373
|
|
|
328
|
-
void SubchannelCall::
|
|
329
|
-
grpc_transport_stream_op_batch* batch) {
|
|
374
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
375
|
+
MaybeInterceptRecvTrailingMetadata(grpc_transport_stream_op_batch* batch) {
|
|
330
376
|
// only intercept payloads with recv trailing.
|
|
331
377
|
if (!batch->recv_trailing_metadata) return;
|
|
332
378
|
// only add interceptor is channelz is enabled.
|
|
333
|
-
if (connected_subchannel_->
|
|
379
|
+
if (connected_subchannel_->channelz_node_ == nullptr) return;
|
|
334
380
|
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
|
|
335
381
|
this, grpc_schedule_on_exec_ctx);
|
|
336
382
|
// save some state needed for the interception callback.
|
|
@@ -357,14 +403,14 @@ void GetCallStatus(grpc_status_code* status, Timestamp deadline,
|
|
|
357
403
|
|
|
358
404
|
} // namespace
|
|
359
405
|
|
|
360
|
-
void SubchannelCall::
|
|
361
|
-
|
|
406
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
407
|
+
RecvTrailingMetadataReady(void* arg, grpc_error_handle error) {
|
|
362
408
|
SubchannelCall* call = static_cast<SubchannelCall*>(arg);
|
|
363
409
|
GRPC_CHECK_NE(call->recv_trailing_metadata_, nullptr);
|
|
364
410
|
grpc_status_code status = GRPC_STATUS_OK;
|
|
365
411
|
GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_, error);
|
|
366
412
|
channelz::SubchannelNode* channelz_node =
|
|
367
|
-
call->connected_subchannel_->
|
|
413
|
+
call->connected_subchannel_->channelz_node_.get();
|
|
368
414
|
GRPC_CHECK_NE(channelz_node, nullptr);
|
|
369
415
|
if (status == GRPC_STATUS_OK) {
|
|
370
416
|
channelz_node->RecordCallSucceeded();
|
|
@@ -374,24 +420,85 @@ void SubchannelCall::RecvTrailingMetadataReady(void* arg,
|
|
|
374
420
|
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_, error);
|
|
375
421
|
}
|
|
376
422
|
|
|
377
|
-
void SubchannelCall::
|
|
423
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
424
|
+
IncrementRefCount() {
|
|
378
425
|
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
|
|
379
426
|
}
|
|
380
427
|
|
|
381
|
-
void SubchannelCall::
|
|
382
|
-
|
|
428
|
+
void OldSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
429
|
+
IncrementRefCount(const DebugLocation& /*location*/, const char* reason) {
|
|
383
430
|
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
|
|
384
431
|
}
|
|
385
432
|
|
|
386
433
|
//
|
|
387
|
-
//
|
|
434
|
+
// OldSubchannel::NewConnectedSubchannel
|
|
435
|
+
//
|
|
436
|
+
|
|
437
|
+
class OldSubchannel::NewConnectedSubchannel final : public ConnectedSubchannel {
|
|
438
|
+
public:
|
|
439
|
+
class TransportCallDestination final : public CallDestination {
|
|
440
|
+
public:
|
|
441
|
+
explicit TransportCallDestination(OrphanablePtr<ClientTransport> transport)
|
|
442
|
+
: transport_(std::move(transport)) {}
|
|
443
|
+
|
|
444
|
+
ClientTransport* transport() { return transport_.get(); }
|
|
445
|
+
|
|
446
|
+
void HandleCall(CallHandler handler) override {
|
|
447
|
+
transport_->StartCall(std::move(handler));
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
void Orphaned() override { transport_.reset(); }
|
|
451
|
+
|
|
452
|
+
private:
|
|
453
|
+
OrphanablePtr<ClientTransport> transport_;
|
|
454
|
+
};
|
|
455
|
+
|
|
456
|
+
NewConnectedSubchannel(
|
|
457
|
+
RefCountedPtr<UnstartedCallDestination> call_destination,
|
|
458
|
+
RefCountedPtr<TransportCallDestination> transport,
|
|
459
|
+
const ChannelArgs& args)
|
|
460
|
+
: ConnectedSubchannel(args),
|
|
461
|
+
call_destination_(std::move(call_destination)),
|
|
462
|
+
transport_(std::move(transport)) {}
|
|
463
|
+
|
|
464
|
+
void StartWatch(
|
|
465
|
+
grpc_pollset_set*,
|
|
466
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) override {
|
|
467
|
+
transport_->transport()->StartConnectivityWatch(std::move(watcher));
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
void Ping(absl::AnyInvocable<void(absl::Status)>) override {
|
|
471
|
+
// TODO(ctiller): add new transport API for this in v3 stack
|
|
472
|
+
Crash("not implemented");
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
476
|
+
const override {
|
|
477
|
+
return call_destination_;
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
RefCountedPtr<Call> CreateCall(CreateCallArgs, grpc_error_handle*) override {
|
|
481
|
+
Crash("legacy CreateCall() called on v3 impl");
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
void Ping(grpc_closure*, grpc_closure*) override {
|
|
485
|
+
Crash("legacy ping method called in call v3 impl");
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
private:
|
|
489
|
+
RefCountedPtr<UnstartedCallDestination> call_destination_;
|
|
490
|
+
RefCountedPtr<TransportCallDestination> transport_;
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
//
|
|
494
|
+
// OldSubchannel::ConnectedSubchannelStateWatcher
|
|
388
495
|
//
|
|
389
496
|
|
|
390
|
-
class
|
|
497
|
+
class OldSubchannel::ConnectedSubchannelStateWatcher final
|
|
391
498
|
: public AsyncConnectivityStateWatcherInterface {
|
|
392
499
|
public:
|
|
393
500
|
// Must be instantiated while holding c->mu.
|
|
394
|
-
explicit ConnectedSubchannelStateWatcher(WeakRefCountedPtr<
|
|
501
|
+
explicit ConnectedSubchannelStateWatcher(WeakRefCountedPtr<OldSubchannel> c)
|
|
395
502
|
: subchannel_(std::move(c)) {}
|
|
396
503
|
|
|
397
504
|
~ConnectedSubchannelStateWatcher() override {
|
|
@@ -401,7 +508,7 @@ class Subchannel::ConnectedSubchannelStateWatcher final
|
|
|
401
508
|
private:
|
|
402
509
|
void OnConnectivityStateChange(grpc_connectivity_state new_state,
|
|
403
510
|
const absl::Status& status) override {
|
|
404
|
-
|
|
511
|
+
OldSubchannel* c = subchannel_.get();
|
|
405
512
|
{
|
|
406
513
|
MutexLock lock(&c->mu_);
|
|
407
514
|
// If we're either shutting down or have already seen this connection
|
|
@@ -442,86 +549,24 @@ class Subchannel::ConnectedSubchannelStateWatcher final
|
|
|
442
549
|
}
|
|
443
550
|
}
|
|
444
551
|
|
|
445
|
-
WeakRefCountedPtr<
|
|
446
|
-
};
|
|
447
|
-
|
|
448
|
-
//
|
|
449
|
-
// Subchannel::ConnectionStateWatcher
|
|
450
|
-
//
|
|
451
|
-
|
|
452
|
-
class Subchannel::ConnectionStateWatcher final
|
|
453
|
-
: public Transport::StateWatcher {
|
|
454
|
-
public:
|
|
455
|
-
explicit ConnectionStateWatcher(WeakRefCountedPtr<Subchannel> subchannel)
|
|
456
|
-
: subchannel_(std::move(subchannel)) {}
|
|
457
|
-
|
|
458
|
-
~ConnectionStateWatcher() override {
|
|
459
|
-
subchannel_.reset(DEBUG_LOCATION, "state_watcher");
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
void OnDisconnect(absl::Status status,
|
|
463
|
-
DisconnectInfo disconnect_info) override {
|
|
464
|
-
MutexLock lock(&subchannel_->mu_);
|
|
465
|
-
// Handle keepalive update.
|
|
466
|
-
if (disconnect_info.keepalive_time.has_value()) {
|
|
467
|
-
subchannel_->ThrottleKeepaliveTimeLocked(*disconnect_info.keepalive_time);
|
|
468
|
-
subchannel_->watcher_list_.NotifyOnKeepaliveUpdateLocked(
|
|
469
|
-
*disconnect_info.keepalive_time);
|
|
470
|
-
}
|
|
471
|
-
// We shouldn't ever see OnDisconnect() more than once for a given
|
|
472
|
-
// connection, but we'll be defensive just in case: if the connected
|
|
473
|
-
// subchannel has already been cleared, then this becomes a no-op.
|
|
474
|
-
RefCountedPtr<ConnectedSubchannel> connected_subchannel =
|
|
475
|
-
std::move(subchannel_->connected_subchannel_);
|
|
476
|
-
if (connected_subchannel == nullptr) return;
|
|
477
|
-
GRPC_TRACE_LOG(subchannel, INFO)
|
|
478
|
-
<< "subchannel " << subchannel_.get() << " "
|
|
479
|
-
<< subchannel_->key_.ToString() << ": connected subchannel "
|
|
480
|
-
<< connected_subchannel.get() << " reports disconnection: " << status;
|
|
481
|
-
// If the subchannel was created from an endpoint, then we report
|
|
482
|
-
// TRANSIENT_FAILURE here instead of IDLE. The subchannel will never
|
|
483
|
-
// leave TRANSIENT_FAILURE state, because there is no way for us to
|
|
484
|
-
// establish a new connection. Otherwise, we report IDLE here.
|
|
485
|
-
if (subchannel_->created_from_endpoint_) {
|
|
486
|
-
subchannel_->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
|
|
487
|
-
status);
|
|
488
|
-
} else {
|
|
489
|
-
subchannel_->SetConnectivityStateLocked(GRPC_CHANNEL_IDLE,
|
|
490
|
-
absl::OkStatus());
|
|
491
|
-
}
|
|
492
|
-
subchannel_->backoff_.Reset();
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
void OnPeerMaxConcurrentStreamsUpdate(
|
|
496
|
-
uint32_t /*max_concurrent_streams*/,
|
|
497
|
-
std::unique_ptr<MaxConcurrentStreamsUpdateDoneHandle> /*on_done*/)
|
|
498
|
-
override {
|
|
499
|
-
// TODO(roth): Implement this as part of adding connection scaling.
|
|
500
|
-
}
|
|
501
|
-
|
|
502
|
-
grpc_pollset_set* interested_parties() const override {
|
|
503
|
-
return subchannel_->pollset_set_;
|
|
504
|
-
}
|
|
505
|
-
|
|
506
|
-
private:
|
|
507
|
-
WeakRefCountedPtr<Subchannel> subchannel_;
|
|
552
|
+
WeakRefCountedPtr<OldSubchannel> subchannel_;
|
|
508
553
|
};
|
|
509
554
|
|
|
510
555
|
//
|
|
511
|
-
//
|
|
556
|
+
// OldSubchannel::ConnectivityStateWatcherList
|
|
512
557
|
//
|
|
513
558
|
|
|
514
|
-
void
|
|
559
|
+
void OldSubchannel::ConnectivityStateWatcherList::AddWatcherLocked(
|
|
515
560
|
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
|
|
516
561
|
watchers_.insert(std::move(watcher));
|
|
517
562
|
}
|
|
518
563
|
|
|
519
|
-
void
|
|
564
|
+
void OldSubchannel::ConnectivityStateWatcherList::RemoveWatcherLocked(
|
|
520
565
|
ConnectivityStateWatcherInterface* watcher) {
|
|
521
566
|
watchers_.erase(watcher);
|
|
522
567
|
}
|
|
523
568
|
|
|
524
|
-
void
|
|
569
|
+
void OldSubchannel::ConnectivityStateWatcherList::NotifyLocked(
|
|
525
570
|
grpc_connectivity_state state, const absl::Status& status) {
|
|
526
571
|
for (const auto& watcher : watchers_) {
|
|
527
572
|
subchannel_->work_serializer_.Run([watcher, state, status]() {
|
|
@@ -530,7 +575,7 @@ void Subchannel::ConnectivityStateWatcherList::NotifyLocked(
|
|
|
530
575
|
}
|
|
531
576
|
}
|
|
532
577
|
|
|
533
|
-
void
|
|
578
|
+
void OldSubchannel::ConnectivityStateWatcherList::NotifyOnKeepaliveUpdateLocked(
|
|
534
579
|
Duration new_keepalive_time) {
|
|
535
580
|
for (const auto& watcher : watchers_) {
|
|
536
581
|
subchannel_->work_serializer_.Run([watcher, new_keepalive_time]() {
|
|
@@ -540,7 +585,7 @@ void Subchannel::ConnectivityStateWatcherList::NotifyOnKeepaliveUpdateLocked(
|
|
|
540
585
|
}
|
|
541
586
|
|
|
542
587
|
uint32_t
|
|
543
|
-
|
|
588
|
+
OldSubchannel::ConnectivityStateWatcherList::GetMaxConnectionsPerSubchannel()
|
|
544
589
|
const {
|
|
545
590
|
uint32_t max_connections_per_subchannel = 1;
|
|
546
591
|
for (const auto& watcher : watchers_) {
|
|
@@ -552,7 +597,7 @@ Subchannel::ConnectivityStateWatcherList::GetMaxConnectionsPerSubchannel()
|
|
|
552
597
|
}
|
|
553
598
|
|
|
554
599
|
//
|
|
555
|
-
//
|
|
600
|
+
// OldSubchannel
|
|
556
601
|
//
|
|
557
602
|
|
|
558
603
|
namespace {
|
|
@@ -595,13 +640,10 @@ BackOff::Options ParseArgsForBackoffValues(const ChannelArgs& args,
|
|
|
595
640
|
|
|
596
641
|
} // namespace
|
|
597
642
|
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
:
|
|
602
|
-
? "Subchannel"
|
|
603
|
-
: nullptr),
|
|
604
|
-
key_(std::move(key)),
|
|
643
|
+
OldSubchannel::OldSubchannel(SubchannelKey key,
|
|
644
|
+
OrphanablePtr<SubchannelConnector> connector,
|
|
645
|
+
const ChannelArgs& args)
|
|
646
|
+
: key_(std::move(key)),
|
|
605
647
|
created_from_endpoint_(args.Contains(GRPC_ARG_SUBCHANNEL_ENDPOINT)),
|
|
606
648
|
args_(args),
|
|
607
649
|
pollset_set_(grpc_pollset_set_create()),
|
|
@@ -646,7 +688,7 @@ Subchannel::Subchannel(SubchannelKey key,
|
|
|
646
688
|
}
|
|
647
689
|
}
|
|
648
690
|
|
|
649
|
-
|
|
691
|
+
OldSubchannel::~OldSubchannel() {
|
|
650
692
|
if (channelz_node_ != nullptr) {
|
|
651
693
|
GRPC_CHANNELZ_LOG(channelz_node_) << "Subchannel destroyed";
|
|
652
694
|
channelz_node_->UpdateConnectivityState(GRPC_CHANNEL_SHUTDOWN);
|
|
@@ -657,17 +699,18 @@ Subchannel::~Subchannel() {
|
|
|
657
699
|
ShutdownInternally();
|
|
658
700
|
}
|
|
659
701
|
|
|
660
|
-
RefCountedPtr<Subchannel>
|
|
702
|
+
RefCountedPtr<Subchannel> OldSubchannel::Create(
|
|
661
703
|
OrphanablePtr<SubchannelConnector> connector,
|
|
662
704
|
const grpc_resolved_address& address, const ChannelArgs& args) {
|
|
663
705
|
SubchannelKey key(address, args);
|
|
664
706
|
auto* subchannel_pool = args.GetObject<SubchannelPoolInterface>();
|
|
665
707
|
GRPC_CHECK_NE(subchannel_pool, nullptr);
|
|
666
|
-
RefCountedPtr<
|
|
708
|
+
RefCountedPtr<OldSubchannel> c =
|
|
709
|
+
subchannel_pool->FindSubchannel(key).TakeAsSubclass<OldSubchannel>();
|
|
667
710
|
if (c != nullptr) {
|
|
668
711
|
return c;
|
|
669
712
|
}
|
|
670
|
-
c = MakeRefCounted<
|
|
713
|
+
c = MakeRefCounted<OldSubchannel>(std::move(key), std::move(connector), args);
|
|
671
714
|
if (c->created_from_endpoint_) {
|
|
672
715
|
// We don't interact with the subchannel pool in this case.
|
|
673
716
|
// Instead, we unconditionally return the newly created subchannel.
|
|
@@ -681,18 +724,19 @@ RefCountedPtr<Subchannel> Subchannel::Create(
|
|
|
681
724
|
// Otherwise, in case of a registration race, unreffing c in
|
|
682
725
|
// RegisterSubchannel() will cause c to be tried to be unregistered, while
|
|
683
726
|
// its key maps to a different subchannel.
|
|
684
|
-
RefCountedPtr<
|
|
685
|
-
subchannel_pool->RegisterSubchannel(c->key_, c)
|
|
727
|
+
RefCountedPtr<OldSubchannel> registered =
|
|
728
|
+
subchannel_pool->RegisterSubchannel(c->key_, c)
|
|
729
|
+
.TakeAsSubclass<OldSubchannel>();
|
|
686
730
|
if (registered == c) c->subchannel_pool_ = subchannel_pool->Ref();
|
|
687
731
|
return registered;
|
|
688
732
|
}
|
|
689
733
|
|
|
690
|
-
void
|
|
734
|
+
void OldSubchannel::ThrottleKeepaliveTime(Duration new_keepalive_time) {
|
|
691
735
|
MutexLock lock(&mu_);
|
|
692
736
|
ThrottleKeepaliveTimeLocked(new_keepalive_time);
|
|
693
737
|
}
|
|
694
738
|
|
|
695
|
-
void
|
|
739
|
+
void OldSubchannel::ThrottleKeepaliveTimeLocked(Duration new_keepalive_time) {
|
|
696
740
|
// Only update the value if the new keepalive time is larger.
|
|
697
741
|
if (new_keepalive_time > keepalive_time_) {
|
|
698
742
|
keepalive_time_ = new_keepalive_time;
|
|
@@ -703,11 +747,11 @@ void Subchannel::ThrottleKeepaliveTimeLocked(Duration new_keepalive_time) {
|
|
|
703
747
|
}
|
|
704
748
|
}
|
|
705
749
|
|
|
706
|
-
channelz::SubchannelNode*
|
|
750
|
+
channelz::SubchannelNode* OldSubchannel::channelz_node() {
|
|
707
751
|
return channelz_node_.get();
|
|
708
752
|
}
|
|
709
753
|
|
|
710
|
-
void
|
|
754
|
+
void OldSubchannel::WatchConnectivityState(
|
|
711
755
|
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
|
|
712
756
|
MutexLock lock(&mu_);
|
|
713
757
|
grpc_pollset_set* interested_parties = watcher->interested_parties();
|
|
@@ -722,7 +766,7 @@ void Subchannel::WatchConnectivityState(
|
|
|
722
766
|
watcher_list_.AddWatcherLocked(std::move(watcher));
|
|
723
767
|
}
|
|
724
768
|
|
|
725
|
-
void
|
|
769
|
+
void OldSubchannel::CancelConnectivityStateWatch(
|
|
726
770
|
ConnectivityStateWatcherInterface* watcher) {
|
|
727
771
|
MutexLock lock(&mu_);
|
|
728
772
|
grpc_pollset_set* interested_parties = watcher->interested_parties();
|
|
@@ -732,14 +776,14 @@ void Subchannel::CancelConnectivityStateWatch(
|
|
|
732
776
|
watcher_list_.RemoveWatcherLocked(watcher);
|
|
733
777
|
}
|
|
734
778
|
|
|
735
|
-
void
|
|
779
|
+
void OldSubchannel::RequestConnection() {
|
|
736
780
|
MutexLock lock(&mu_);
|
|
737
781
|
if (state_ == GRPC_CHANNEL_IDLE) {
|
|
738
782
|
StartConnectingLocked();
|
|
739
783
|
}
|
|
740
784
|
}
|
|
741
785
|
|
|
742
|
-
void
|
|
786
|
+
void OldSubchannel::ResetBackoff() {
|
|
743
787
|
// Hold a ref to ensure cancellation and subsequent deletion of the closure
|
|
744
788
|
// does not eliminate the last ref and destroy the Subchannel before the
|
|
745
789
|
// method returns.
|
|
@@ -754,7 +798,7 @@ void Subchannel::ResetBackoff() {
|
|
|
754
798
|
}
|
|
755
799
|
}
|
|
756
800
|
|
|
757
|
-
void
|
|
801
|
+
void OldSubchannel::Orphaned() {
|
|
758
802
|
// The subchannel_pool is only used once here in this subchannel, so the
|
|
759
803
|
// access can be outside of the lock.
|
|
760
804
|
if (subchannel_pool_ != nullptr) {
|
|
@@ -768,7 +812,7 @@ void Subchannel::Orphaned() {
|
|
|
768
812
|
connected_subchannel_.reset();
|
|
769
813
|
}
|
|
770
814
|
|
|
771
|
-
void
|
|
815
|
+
void OldSubchannel::GetOrAddDataProducer(
|
|
772
816
|
UniqueTypeName type,
|
|
773
817
|
std::function<void(DataProducerInterface**)> get_or_add) {
|
|
774
818
|
MutexLock lock(&mu_);
|
|
@@ -776,7 +820,7 @@ void Subchannel::GetOrAddDataProducer(
|
|
|
776
820
|
get_or_add(&it->second);
|
|
777
821
|
}
|
|
778
822
|
|
|
779
|
-
void
|
|
823
|
+
void OldSubchannel::RemoveDataProducer(DataProducerInterface* data_producer) {
|
|
780
824
|
MutexLock lock(&mu_);
|
|
781
825
|
auto it = data_producer_map_.find(data_producer->type());
|
|
782
826
|
if (it != data_producer_map_.end() && it->second == data_producer) {
|
|
@@ -785,8 +829,8 @@ void Subchannel::RemoveDataProducer(DataProducerInterface* data_producer) {
|
|
|
785
829
|
}
|
|
786
830
|
|
|
787
831
|
// Note: Must be called with a state that is different from the current state.
|
|
788
|
-
void
|
|
789
|
-
|
|
832
|
+
void OldSubchannel::SetConnectivityStateLocked(grpc_connectivity_state state,
|
|
833
|
+
const absl::Status& status) {
|
|
790
834
|
state_ = state;
|
|
791
835
|
if (status.ok()) {
|
|
792
836
|
status_ = status;
|
|
@@ -819,12 +863,12 @@ void Subchannel::SetConnectivityStateLocked(grpc_connectivity_state state,
|
|
|
819
863
|
watcher_list_.NotifyLocked(state, status_);
|
|
820
864
|
}
|
|
821
865
|
|
|
822
|
-
void
|
|
866
|
+
void OldSubchannel::OnRetryTimer() {
|
|
823
867
|
MutexLock lock(&mu_);
|
|
824
868
|
OnRetryTimerLocked();
|
|
825
869
|
}
|
|
826
870
|
|
|
827
|
-
void
|
|
871
|
+
void OldSubchannel::OnRetryTimerLocked() {
|
|
828
872
|
if (shutdown_) return;
|
|
829
873
|
GRPC_TRACE_LOG(subchannel, INFO)
|
|
830
874
|
<< "subchannel " << this << " " << key_.ToString()
|
|
@@ -832,7 +876,7 @@ void Subchannel::OnRetryTimerLocked() {
|
|
|
832
876
|
SetConnectivityStateLocked(GRPC_CHANNEL_IDLE, absl::OkStatus());
|
|
833
877
|
}
|
|
834
878
|
|
|
835
|
-
void
|
|
879
|
+
void OldSubchannel::StartConnectingLocked() {
|
|
836
880
|
// Set next attempt time.
|
|
837
881
|
const Timestamp now = Timestamp::Now();
|
|
838
882
|
const Timestamp min_deadline = now + min_connect_timeout_;
|
|
@@ -849,8 +893,8 @@ void Subchannel::StartConnectingLocked() {
|
|
|
849
893
|
connector_->Connect(args, &connecting_result_, &on_connecting_finished_);
|
|
850
894
|
}
|
|
851
895
|
|
|
852
|
-
void
|
|
853
|
-
WeakRefCountedPtr<
|
|
896
|
+
void OldSubchannel::OnConnectingFinished(void* arg, grpc_error_handle error) {
|
|
897
|
+
WeakRefCountedPtr<OldSubchannel> c(static_cast<OldSubchannel*>(arg));
|
|
854
898
|
{
|
|
855
899
|
MutexLock lock(&c->mu_);
|
|
856
900
|
c->OnConnectingFinishedLocked(error);
|
|
@@ -858,7 +902,7 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error_handle error) {
|
|
|
858
902
|
c.reset(DEBUG_LOCATION, "Connect");
|
|
859
903
|
}
|
|
860
904
|
|
|
861
|
-
void
|
|
905
|
+
void OldSubchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
|
|
862
906
|
if (shutdown_) {
|
|
863
907
|
connecting_result_.Reset();
|
|
864
908
|
return;
|
|
@@ -884,7 +928,8 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
|
|
|
884
928
|
if (created_from_endpoint_) return;
|
|
885
929
|
retry_timer_handle_ = event_engine_->RunAfter(
|
|
886
930
|
time_until_next_attempt,
|
|
887
|
-
[self = WeakRef(DEBUG_LOCATION, "RetryTimer")
|
|
931
|
+
[self = WeakRef(DEBUG_LOCATION, "RetryTimer")
|
|
932
|
+
.TakeAsSubclass<OldSubchannel>()]() mutable {
|
|
888
933
|
{
|
|
889
934
|
ExecCtx exec_ctx;
|
|
890
935
|
self->OnRetryTimer();
|
|
@@ -900,13 +945,8 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
|
|
|
900
945
|
}
|
|
901
946
|
}
|
|
902
947
|
|
|
903
|
-
bool
|
|
948
|
+
bool OldSubchannel::PublishTransportLocked() {
|
|
904
949
|
auto socket_node = connecting_result_.transport->GetSocketNode();
|
|
905
|
-
if (IsTransportStateWatcherEnabled()) {
|
|
906
|
-
connecting_result_.transport->StartWatch(
|
|
907
|
-
MakeRefCounted<ConnectionStateWatcher>(
|
|
908
|
-
WeakRef(DEBUG_LOCATION, "state_watcher")));
|
|
909
|
-
}
|
|
910
950
|
if (connecting_result_.transport->filter_stack_transport() != nullptr) {
|
|
911
951
|
// Construct channel stack.
|
|
912
952
|
// Builder takes ownership of transport.
|
|
@@ -976,41 +1016,1417 @@ bool Subchannel::PublishTransportLocked() {
|
|
|
976
1016
|
socket_node->AddParent(channelz_node_.get());
|
|
977
1017
|
}
|
|
978
1018
|
}
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
}
|
|
1019
|
+
connected_subchannel_->StartWatch(
|
|
1020
|
+
pollset_set_, MakeOrphanable<ConnectedSubchannelStateWatcher>(
|
|
1021
|
+
WeakRef(DEBUG_LOCATION, "state_watcher")
|
|
1022
|
+
.TakeAsSubclass<OldSubchannel>()));
|
|
984
1023
|
// Report initial state.
|
|
985
1024
|
SetConnectivityStateLocked(GRPC_CHANNEL_READY, absl::Status());
|
|
986
1025
|
return true;
|
|
987
1026
|
}
|
|
988
1027
|
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
return
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1028
|
+
RefCountedPtr<Subchannel::Call> OldSubchannel::CreateCall(
|
|
1029
|
+
CreateCallArgs args, grpc_error_handle* error) {
|
|
1030
|
+
auto connected_subchannel = GetConnectedSubchannel();
|
|
1031
|
+
if (connected_subchannel == nullptr) return nullptr;
|
|
1032
|
+
return connected_subchannel->CreateCall(args, error);
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
RefCountedPtr<UnstartedCallDestination> OldSubchannel::call_destination() {
|
|
1036
|
+
auto connected_subchannel = GetConnectedSubchannel();
|
|
1037
|
+
if (connected_subchannel == nullptr) return nullptr;
|
|
1038
|
+
return connected_subchannel->unstarted_call_destination();
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
void OldSubchannel::Ping(absl::AnyInvocable<void(absl::Status)>) {
|
|
1042
|
+
// TODO(ctiller): Implement
|
|
1043
|
+
}
|
|
1044
|
+
|
|
1045
|
+
absl::Status OldSubchannel::Ping(grpc_closure* on_initiate,
|
|
1046
|
+
grpc_closure* on_ack) {
|
|
1047
|
+
auto connected_subchannel = GetConnectedSubchannel();
|
|
1048
|
+
if (connected_subchannel == nullptr) {
|
|
1049
|
+
return absl::UnavailableError("no connection");
|
|
1050
|
+
}
|
|
1051
|
+
connected_subchannel->Ping(on_initiate, on_ack);
|
|
1052
|
+
return absl::OkStatus();
|
|
1053
|
+
}
|
|
1054
|
+
|
|
1055
|
+
RefCountedPtr<OldSubchannel::ConnectedSubchannel>
|
|
1056
|
+
OldSubchannel::GetConnectedSubchannel() {
|
|
1057
|
+
MutexLock lock(&mu_);
|
|
1058
|
+
return connected_subchannel_;
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
//
|
|
1062
|
+
// NewSubchannel::ConnectedSubchannel
|
|
1063
|
+
//
|
|
1064
|
+
|
|
1065
|
+
class NewSubchannel::ConnectedSubchannel
|
|
1066
|
+
: public DualRefCounted<ConnectedSubchannel> {
|
|
1067
|
+
public:
|
|
1068
|
+
~ConnectedSubchannel() override {
|
|
1069
|
+
subchannel_.reset(DEBUG_LOCATION, "ConnectedSubchannel");
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
const ChannelArgs& args() const { return args_; }
|
|
1073
|
+
NewSubchannel* subchannel() const { return subchannel_.get(); }
|
|
1074
|
+
|
|
1075
|
+
virtual void StartWatch(
|
|
1076
|
+
grpc_pollset_set* interested_parties,
|
|
1077
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) = 0;
|
|
1078
|
+
|
|
1079
|
+
// Methods for v3 stack.
|
|
1080
|
+
virtual void Ping(absl::AnyInvocable<void(absl::Status)> on_ack) = 0;
|
|
1081
|
+
virtual RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
1082
|
+
const = 0;
|
|
1083
|
+
|
|
1084
|
+
// Methods for legacy stack.
|
|
1085
|
+
virtual RefCountedPtr<Call> CreateCall(CreateCallArgs args,
|
|
1086
|
+
grpc_error_handle* error) = 0;
|
|
1087
|
+
virtual void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) = 0;
|
|
1088
|
+
|
|
1089
|
+
// Returns true if there is quota for another RPC to start on this
|
|
1090
|
+
// connection.
|
|
1091
|
+
GRPC_MUST_USE_RESULT bool SetMaxConcurrentStreams(
|
|
1092
|
+
uint32_t max_concurrent_streams) {
|
|
1093
|
+
return stream_limiter_.SetMaxConcurrentStreams(max_concurrent_streams);
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
// Returns true if the RPC can start.
|
|
1097
|
+
bool GetQuotaForRpc() {
|
|
1098
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1099
|
+
<< "subchannel " << subchannel_.get() << " connection " << this
|
|
1100
|
+
<< ": attempting to get quota for an RPC...";
|
|
1101
|
+
bool result = stream_limiter_.GetQuotaForRpc();
|
|
1102
|
+
GRPC_TRACE_LOG(subchannel_call, INFO) << " quota acquired: " << result;
|
|
1103
|
+
return result;
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
// Returns true if this RPC finishing brought the connection below quota.
|
|
1107
|
+
bool ReturnQuotaForRpc() { return stream_limiter_.ReturnQuotaForRpc(); }
|
|
1108
|
+
|
|
1109
|
+
protected:
|
|
1110
|
+
explicit ConnectedSubchannel(WeakRefCountedPtr<NewSubchannel> subchannel,
|
|
1111
|
+
const ChannelArgs& args,
|
|
1112
|
+
uint32_t max_concurrent_streams)
|
|
1113
|
+
: DualRefCounted<ConnectedSubchannel>(
|
|
1114
|
+
GRPC_TRACE_FLAG_ENABLED(subchannel_refcount) ? "ConnectedSubchannel"
|
|
1115
|
+
: nullptr),
|
|
1116
|
+
subchannel_(std::move(subchannel)),
|
|
1117
|
+
args_(args),
|
|
1118
|
+
stream_limiter_(max_concurrent_streams) {}
|
|
1119
|
+
|
|
1120
|
+
private:
|
|
1121
|
+
WeakRefCountedPtr<NewSubchannel> subchannel_;
|
|
1122
|
+
ChannelArgs args_;
|
|
1123
|
+
SubchannelStreamLimiter stream_limiter_;
|
|
1124
|
+
};
|
|
1125
|
+
|
|
1126
|
+
//
|
|
1127
|
+
// NewSubchannel::LegacyConnectedSubchannel
|
|
1128
|
+
//
|
|
1129
|
+
|
|
1130
|
+
class NewSubchannel::LegacyConnectedSubchannel final
|
|
1131
|
+
: public ConnectedSubchannel {
|
|
1132
|
+
public:
|
|
1133
|
+
LegacyConnectedSubchannel(
|
|
1134
|
+
WeakRefCountedPtr<NewSubchannel> subchannel,
|
|
1135
|
+
RefCountedPtr<grpc_channel_stack> channel_stack, const ChannelArgs& args,
|
|
1136
|
+
RefCountedPtr<channelz::SubchannelNode> channelz_node,
|
|
1137
|
+
uint32_t max_concurrent_streams)
|
|
1138
|
+
: ConnectedSubchannel(std::move(subchannel), args,
|
|
1139
|
+
max_concurrent_streams),
|
|
1140
|
+
channelz_node_(std::move(channelz_node)),
|
|
1141
|
+
channel_stack_(std::move(channel_stack)) {}
|
|
1142
|
+
|
|
1143
|
+
void Orphaned() override {
|
|
1144
|
+
channel_stack_.reset(DEBUG_LOCATION, "ConnectedSubchannel");
|
|
1145
|
+
}
|
|
1146
|
+
|
|
1147
|
+
void StartWatch(
|
|
1148
|
+
grpc_pollset_set* interested_parties,
|
|
1149
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) override {
|
|
1150
|
+
grpc_transport_op* op = grpc_make_transport_op(nullptr);
|
|
1151
|
+
op->start_connectivity_watch = std::move(watcher);
|
|
1152
|
+
op->start_connectivity_watch_state = GRPC_CHANNEL_READY;
|
|
1153
|
+
op->bind_pollset_set = interested_parties;
|
|
1154
|
+
grpc_channel_element* elem =
|
|
1155
|
+
grpc_channel_stack_element(channel_stack_.get(), 0);
|
|
1156
|
+
elem->filter->start_transport_op(elem, op);
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
void Ping(absl::AnyInvocable<void(absl::Status)>) override {
|
|
1160
|
+
Crash("call v3 ping method called in legacy impl");
|
|
1161
|
+
}
|
|
1162
|
+
|
|
1163
|
+
RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
1164
|
+
const override {
|
|
1165
|
+
Crash("call v3 unstarted_call_destination method called in legacy impl");
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
RefCountedPtr<Call> CreateCall(CreateCallArgs args,
|
|
1169
|
+
grpc_error_handle* error) override {
|
|
1170
|
+
const size_t allocation_size =
|
|
1171
|
+
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
|
|
1172
|
+
channel_stack_->call_stack_size;
|
|
1173
|
+
Arena* arena = args.arena;
|
|
1174
|
+
return RefCountedPtr<SubchannelCall>(
|
|
1175
|
+
new (arena->Alloc(allocation_size)) SubchannelCall(
|
|
1176
|
+
RefAsSubclass<LegacyConnectedSubchannel>(), args, error));
|
|
1177
|
+
}
|
|
1178
|
+
|
|
1179
|
+
void Ping(grpc_closure* on_initiate, grpc_closure* on_ack) override {
|
|
1180
|
+
grpc_transport_op* op = grpc_make_transport_op(nullptr);
|
|
1181
|
+
op->send_ping.on_initiate = on_initiate;
|
|
1182
|
+
op->send_ping.on_ack = on_ack;
|
|
1183
|
+
grpc_channel_element* elem =
|
|
1184
|
+
grpc_channel_stack_element(channel_stack_.get(), 0);
|
|
1185
|
+
elem->filter->start_transport_op(elem, op);
|
|
1186
|
+
}
|
|
1187
|
+
|
|
1188
|
+
private:
|
|
1189
|
+
class SubchannelCall final : public Call {
|
|
1190
|
+
public:
|
|
1191
|
+
SubchannelCall(
|
|
1192
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel,
|
|
1193
|
+
CreateCallArgs args, grpc_error_handle* error);
|
|
1194
|
+
|
|
1195
|
+
void StartTransportStreamOpBatch(
|
|
1196
|
+
grpc_transport_stream_op_batch* batch) override;
|
|
1197
|
+
|
|
1198
|
+
void SetAfterCallStackDestroy(grpc_closure* closure) override;
|
|
1199
|
+
|
|
1200
|
+
// When refcount drops to 0, destroys itself and the associated call stack,
|
|
1201
|
+
// but does NOT free the memory because it's in the call arena.
|
|
1202
|
+
void Unref() override;
|
|
1203
|
+
void Unref(const DebugLocation& location, const char* reason) override;
|
|
1204
|
+
|
|
1205
|
+
private:
|
|
1206
|
+
// If channelz is enabled, intercepts recv_trailing so that we may check the
|
|
1207
|
+
// status and associate it to a subchannel.
|
|
1208
|
+
void MaybeInterceptRecvTrailingMetadata(
|
|
1209
|
+
grpc_transport_stream_op_batch* batch);
|
|
1210
|
+
|
|
1211
|
+
static void RecvTrailingMetadataReady(void* arg, grpc_error_handle error);
|
|
1212
|
+
|
|
1213
|
+
// Interface of RefCounted<>.
|
|
1214
|
+
void IncrementRefCount() override;
|
|
1215
|
+
void IncrementRefCount(const DebugLocation& location,
|
|
1216
|
+
const char* reason) override;
|
|
1217
|
+
|
|
1218
|
+
static void Destroy(void* arg, grpc_error_handle error);
|
|
1219
|
+
|
|
1220
|
+
// Returns the quota for this RPC. If that brings the connection
|
|
1221
|
+
// below quota, then try to drain the queue.
|
|
1222
|
+
void MaybeReturnQuota();
|
|
1223
|
+
|
|
1224
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel_;
|
|
1225
|
+
grpc_closure* after_call_stack_destroy_ = nullptr;
|
|
1226
|
+
// State needed to support channelz interception of recv trailing metadata.
|
|
1227
|
+
grpc_closure recv_trailing_metadata_ready_;
|
|
1228
|
+
grpc_closure* original_recv_trailing_metadata_ = nullptr;
|
|
1229
|
+
grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
|
|
1230
|
+
Timestamp deadline_;
|
|
1231
|
+
bool returned_quota_ = false;
|
|
1232
|
+
};
|
|
1233
|
+
|
|
1234
|
+
RefCountedPtr<channelz::SubchannelNode> channelz_node_;
|
|
1235
|
+
RefCountedPtr<grpc_channel_stack> channel_stack_;
|
|
1236
|
+
};
|
|
1237
|
+
|
|
1238
|
+
//
|
|
1239
|
+
// NewSubchannel::LegacyConnectedSubchannel::SubchannelCall
|
|
1240
|
+
//
|
|
1241
|
+
|
|
1242
|
+
NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::SubchannelCall(
|
|
1243
|
+
RefCountedPtr<LegacyConnectedSubchannel> connected_subchannel,
|
|
1244
|
+
CreateCallArgs args, grpc_error_handle* error)
|
|
1245
|
+
: connected_subchannel_(std::move(connected_subchannel)),
|
|
1246
|
+
deadline_(args.deadline) {
|
|
1247
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1248
|
+
<< "subchannel " << connected_subchannel_->subchannel() << " connection "
|
|
1249
|
+
<< connected_subchannel_.get() << ": created call " << this;
|
|
1250
|
+
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
|
1251
|
+
const grpc_call_element_args call_args = {
|
|
1252
|
+
callstk, // call_stack
|
|
1253
|
+
nullptr, // server_transport_data
|
|
1254
|
+
args.start_time, // start_time
|
|
1255
|
+
args.deadline, // deadline
|
|
1256
|
+
args.arena, // arena
|
|
1257
|
+
args.call_combiner // call_combiner
|
|
1258
|
+
};
|
|
1259
|
+
*error = grpc_call_stack_init(connected_subchannel_->channel_stack_.get(), 1,
|
|
1260
|
+
SubchannelCall::Destroy, this, &call_args);
|
|
1261
|
+
if (GPR_UNLIKELY(!error->ok())) {
|
|
1262
|
+
LOG(ERROR) << "error: " << StatusToString(*error);
|
|
1263
|
+
return;
|
|
1264
|
+
}
|
|
1265
|
+
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
|
|
1266
|
+
if (connected_subchannel_->channelz_node_ != nullptr) {
|
|
1267
|
+
connected_subchannel_->channelz_node_->RecordCallStarted();
|
|
1268
|
+
}
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1272
|
+
StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch) {
|
|
1273
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1274
|
+
<< "subchannel " << connected_subchannel_->subchannel() << " connection "
|
|
1275
|
+
<< connected_subchannel_.get() << " call " << this << ": starting batch: "
|
|
1276
|
+
<< grpc_transport_stream_op_batch_string(batch, false);
|
|
1277
|
+
MaybeInterceptRecvTrailingMetadata(batch);
|
|
1278
|
+
grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(this);
|
|
1279
|
+
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
|
|
1280
|
+
GRPC_TRACE_LOG(channel, INFO)
|
|
1281
|
+
<< "OP[" << top_elem->filter->name << ":" << top_elem
|
|
1282
|
+
<< "]: " << grpc_transport_stream_op_batch_string(batch, false);
|
|
1283
|
+
top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
|
|
1284
|
+
}
|
|
1285
|
+
|
|
1286
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1287
|
+
SetAfterCallStackDestroy(grpc_closure* closure) {
|
|
1288
|
+
GRPC_CHECK_EQ(after_call_stack_destroy_, nullptr);
|
|
1289
|
+
GRPC_CHECK_NE(closure, nullptr);
|
|
1290
|
+
after_call_stack_destroy_ = closure;
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::Unref() {
|
|
1294
|
+
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
|
|
1295
|
+
}
|
|
1296
|
+
|
|
1297
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::Unref(
|
|
1298
|
+
const DebugLocation& /*location*/, const char* reason) {
|
|
1299
|
+
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
|
|
1300
|
+
}
|
|
1301
|
+
|
|
1302
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::Destroy(
|
|
1303
|
+
void* arg, grpc_error_handle /*error*/) {
|
|
1304
|
+
SubchannelCall* self = static_cast<SubchannelCall*>(arg);
|
|
1305
|
+
// Just in case we didn't already take care of this in the
|
|
1306
|
+
// recv_trailing_metadata callback, return the quota now.
|
|
1307
|
+
self->MaybeReturnQuota();
|
|
1308
|
+
// Keep some members before destroying the subchannel call.
|
|
1309
|
+
grpc_closure* after_call_stack_destroy = self->after_call_stack_destroy_;
|
|
1310
|
+
RefCountedPtr<ConnectedSubchannel> connected_subchannel =
|
|
1311
|
+
std::move(self->connected_subchannel_);
|
|
1312
|
+
// Destroy the subchannel call.
|
|
1313
|
+
self->~SubchannelCall();
|
|
1314
|
+
// Destroy the call stack. This should be after destroying the subchannel
|
|
1315
|
+
// call, because call->after_call_stack_destroy(), if not null, will free
|
|
1316
|
+
// the call arena.
|
|
1317
|
+
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(self), nullptr,
|
|
1318
|
+
after_call_stack_destroy);
|
|
1319
|
+
// Automatically reset connected_subchannel. This should be after destroying
|
|
1320
|
+
// the call stack, because destroying call stack needs access to the channel
|
|
1321
|
+
// stack.
|
|
1322
|
+
}
|
|
1323
|
+
|
|
1324
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1325
|
+
MaybeInterceptRecvTrailingMetadata(grpc_transport_stream_op_batch* batch) {
|
|
1326
|
+
// only intercept payloads with recv trailing.
|
|
1327
|
+
if (!batch->recv_trailing_metadata) return;
|
|
1328
|
+
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
|
|
1329
|
+
this, grpc_schedule_on_exec_ctx);
|
|
1330
|
+
// save some state needed for the interception callback.
|
|
1331
|
+
GRPC_CHECK_EQ(recv_trailing_metadata_, nullptr);
|
|
1332
|
+
recv_trailing_metadata_ =
|
|
1333
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata;
|
|
1334
|
+
original_recv_trailing_metadata_ =
|
|
1335
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
|
|
1336
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
|
1337
|
+
&recv_trailing_metadata_ready_;
|
|
1338
|
+
}
|
|
1339
|
+
|
|
1340
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1341
|
+
RecvTrailingMetadataReady(void* arg, grpc_error_handle error) {
|
|
1342
|
+
SubchannelCall* call = static_cast<SubchannelCall*>(arg);
|
|
1343
|
+
GRPC_CHECK_NE(call->recv_trailing_metadata_, nullptr);
|
|
1344
|
+
// Return MAX_CONCURRENT_STREAMS quota.
|
|
1345
|
+
call->MaybeReturnQuota();
|
|
1346
|
+
// If channelz is enabled, record the success or failure of the call.
|
|
1347
|
+
if (auto* channelz_node = call->connected_subchannel_->channelz_node_.get();
|
|
1348
|
+
channelz_node != nullptr) {
|
|
1349
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
|
1350
|
+
GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_,
|
|
1351
|
+
error);
|
|
1352
|
+
GRPC_CHECK_NE(channelz_node, nullptr);
|
|
1353
|
+
if (status == GRPC_STATUS_OK) {
|
|
1354
|
+
channelz_node->RecordCallSucceeded();
|
|
1355
|
+
} else {
|
|
1356
|
+
channelz_node->RecordCallFailed();
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_, error);
|
|
1360
|
+
}
|
|
1361
|
+
|
|
1362
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1363
|
+
MaybeReturnQuota() {
|
|
1364
|
+
if (returned_quota_) return; // Already returned.
|
|
1365
|
+
returned_quota_ = true;
|
|
1366
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1367
|
+
<< "subchannel " << connected_subchannel_->subchannel() << " connection "
|
|
1368
|
+
<< connected_subchannel_.get() << ": call " << this
|
|
1369
|
+
<< " complete, returning quota";
|
|
1370
|
+
if (connected_subchannel_->ReturnQuotaForRpc()) {
|
|
1371
|
+
connected_subchannel_->subchannel()->RetryQueuedRpcs();
|
|
1372
|
+
}
|
|
1373
|
+
}
|
|
1374
|
+
|
|
1375
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1376
|
+
IncrementRefCount() {
|
|
1377
|
+
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
|
|
1378
|
+
}
|
|
1379
|
+
|
|
1380
|
+
void NewSubchannel::LegacyConnectedSubchannel::SubchannelCall::
|
|
1381
|
+
IncrementRefCount(const DebugLocation& /*location*/, const char* reason) {
|
|
1382
|
+
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
|
|
1383
|
+
}
|
|
1384
|
+
|
|
1385
|
+
//
|
|
1386
|
+
// NewSubchannel::QueuedCall
|
|
1387
|
+
//
|
|
1388
|
+
|
|
1389
|
+
class NewSubchannel::QueuedCall final : public Subchannel::Call {
|
|
1390
|
+
public:
|
|
1391
|
+
QueuedCall(WeakRefCountedPtr<NewSubchannel> subchannel, CreateCallArgs args);
|
|
1392
|
+
~QueuedCall() override;
|
|
1393
|
+
|
|
1394
|
+
void StartTransportStreamOpBatch(
|
|
1395
|
+
grpc_transport_stream_op_batch* batch) override;
|
|
1396
|
+
|
|
1397
|
+
void SetAfterCallStackDestroy(grpc_closure* closure) override;
|
|
1398
|
+
|
|
1399
|
+
// Interface of RefCounted<>.
|
|
1400
|
+
// When refcount drops to 0, the dtor is called, but we do not
|
|
1401
|
+
// free memory, because it's allocated on the arena.
|
|
1402
|
+
void Unref() override {
|
|
1403
|
+
if (ref_count_.Unref()) this->~QueuedCall();
|
|
1404
|
+
}
|
|
1405
|
+
void Unref(const DebugLocation& location, const char* reason) override {
|
|
1406
|
+
if (ref_count_.Unref(location, reason)) this->~QueuedCall();
|
|
1407
|
+
}
|
|
1408
|
+
|
|
1409
|
+
void ResumeOnConnectionLocked(ConnectedSubchannel* connected_subchannel)
|
|
1410
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&NewSubchannel::mu_);
|
|
1411
|
+
|
|
1412
|
+
void FailLocked(absl::Status status)
|
|
1413
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&NewSubchannel::mu_);
|
|
1414
|
+
|
|
1415
|
+
private:
|
|
1416
|
+
// Allow RefCountedPtr<> to access IncrementRefCount().
|
|
1417
|
+
template <typename T>
|
|
1418
|
+
friend class RefCountedPtr;
|
|
1419
|
+
|
|
1420
|
+
class Canceller;
|
|
1421
|
+
|
|
1422
|
+
// Interface of RefCounted<>.
|
|
1423
|
+
void IncrementRefCount() override { ref_count_.Ref(); }
|
|
1424
|
+
void IncrementRefCount(const DebugLocation& location,
|
|
1425
|
+
const char* reason) override {
|
|
1426
|
+
ref_count_.Ref(location, reason);
|
|
1427
|
+
}
|
|
1428
|
+
|
|
1429
|
+
static void RecvTrailingMetadataReady(void* arg, grpc_error_handle error);
|
|
1430
|
+
|
|
1431
|
+
RefCount ref_count_;
|
|
1432
|
+
WeakRefCountedPtr<NewSubchannel> subchannel_;
|
|
1433
|
+
CreateCallArgs args_;
|
|
1434
|
+
|
|
1435
|
+
// Note that unlike in the resolver and LB code, the subchannel code
|
|
1436
|
+
// adds the call to the queue before adding batches to buffered_call_,
|
|
1437
|
+
// so it's possible that the subchannel will get quota for the call
|
|
1438
|
+
// and try to resume it before buffered_call_ contains any batches.
|
|
1439
|
+
// In that case, we will not be holding the call combiner here, so we
|
|
1440
|
+
// need a mutex for synchronization.
|
|
1441
|
+
Mutex mu_ ABSL_ACQUIRED_AFTER(NewSubchannel::mu_);
|
|
1442
|
+
grpc_closure* after_call_stack_destroy_ ABSL_GUARDED_BY(&mu_) = nullptr;
|
|
1443
|
+
grpc_error_handle cancel_error_ ABSL_GUARDED_BY(&mu_);
|
|
1444
|
+
BufferedCall buffered_call_ ABSL_GUARDED_BY(&mu_);
|
|
1445
|
+
RefCountedPtr<Call> subchannel_call_ ABSL_GUARDED_BY(&mu_);
|
|
1446
|
+
|
|
1447
|
+
// The queue holds a raw pointer to this QueuedCall object, and this
|
|
1448
|
+
// is a reference to that pointer. If the call gets cancelled while
|
|
1449
|
+
// in the queue, we set this pointer to null. The queuing code knows to
|
|
1450
|
+
// ignore null pointers when draining the queue, which ensures that we
|
|
1451
|
+
// don't try to dequeue this call after it's been cancelled.
|
|
1452
|
+
QueuedCall*& queue_entry_;
|
|
1453
|
+
|
|
1454
|
+
Canceller* canceller_ ABSL_GUARDED_BY(&NewSubchannel::mu_);
|
|
1455
|
+
|
|
1456
|
+
std::atomic<bool> is_retriable_{false};
|
|
1457
|
+
grpc_closure recv_trailing_metadata_ready_;
|
|
1458
|
+
grpc_closure* original_recv_trailing_metadata_ = nullptr;
|
|
1459
|
+
grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
|
|
1460
|
+
};
|
|
1461
|
+
|
|
1462
|
+
// Handles call combiner cancellation. We don't yield the call combiner
|
|
1463
|
+
// when queuing the call, which means that if the call gets cancelled
|
|
1464
|
+
// while we're queued, the surface will be unable to immediately start the
|
|
1465
|
+
// cancel_stream batch to let us know about the cancellation. Instead,
|
|
1466
|
+
// this object registers itself with the call combiner to be called if
|
|
1467
|
+
// the call is cancelled. In that case, it removes the call from the
|
|
1468
|
+
// queue and fails any pending batches, thus immediately releasing the
|
|
1469
|
+
// call combiner and allowing the cancellation to proceed.
|
|
1470
|
+
class NewSubchannel::QueuedCall::Canceller final {
|
|
1471
|
+
public:
|
|
1472
|
+
explicit Canceller(RefCountedPtr<QueuedCall> call) : call_(std::move(call)) {
|
|
1473
|
+
GRPC_CLOSURE_INIT(&cancel_, CancelLocked, this, nullptr);
|
|
1474
|
+
call_->args_.call_combiner->SetNotifyOnCancel(&cancel_);
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
private:
|
|
1478
|
+
static void CancelLocked(void* arg, grpc_error_handle error) {
|
|
1479
|
+
auto* self = static_cast<Canceller*>(arg);
|
|
1480
|
+
bool cancelled = false;
|
|
1481
|
+
{
|
|
1482
|
+
MutexLock lock(&self->call_->subchannel_->mu_);
|
|
1483
|
+
if (self->call_->canceller_ == self && !error.ok()) {
|
|
1484
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1485
|
+
<< "subchannel " << self->call_->subchannel_.get()
|
|
1486
|
+
<< " queued call " << self->call_.get()
|
|
1487
|
+
<< ": call combiner canceller called";
|
|
1488
|
+
// Remove from queue.
|
|
1489
|
+
self->call_->queue_entry_ = nullptr;
|
|
1490
|
+
cancelled = true;
|
|
1491
|
+
}
|
|
1492
|
+
}
|
|
1493
|
+
if (cancelled) {
|
|
1494
|
+
MutexLock lock(&self->call_->mu_);
|
|
1495
|
+
// Fail pending batches on the call.
|
|
1496
|
+
self->call_->buffered_call_.Fail(
|
|
1497
|
+
error, BufferedCall::YieldCallCombinerIfPendingBatchesFound);
|
|
1498
|
+
}
|
|
1499
|
+
delete self;
|
|
1500
|
+
}
|
|
1501
|
+
|
|
1502
|
+
RefCountedPtr<QueuedCall> call_;
|
|
1503
|
+
grpc_closure cancel_;
|
|
1504
|
+
};
|
|
1505
|
+
|
|
1506
|
+
NewSubchannel::QueuedCall::QueuedCall(
|
|
1507
|
+
WeakRefCountedPtr<NewSubchannel> subchannel, CreateCallArgs args)
|
|
1508
|
+
: subchannel_(std::move(subchannel)),
|
|
1509
|
+
args_(args),
|
|
1510
|
+
buffered_call_(args_.call_combiner, &subchannel_call_trace),
|
|
1511
|
+
queue_entry_(subchannel_->queued_calls_.emplace_back(this)) {
|
|
1512
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1513
|
+
<< "subchannel " << subchannel_.get() << ": created queued call " << this
|
|
1514
|
+
<< ", queue size=" << subchannel_->queued_calls_.size();
|
|
1515
|
+
canceller_ = new Canceller(Ref().TakeAsSubclass<QueuedCall>());
|
|
1516
|
+
}
|
|
1517
|
+
|
|
1518
|
+
NewSubchannel::QueuedCall::~QueuedCall() {
|
|
1519
|
+
GRPC_TRACE_LOG(subchannel_call, INFO) << "subchannel " << subchannel_.get()
|
|
1520
|
+
<< ": destroying queued call " << this;
|
|
1521
|
+
if (after_call_stack_destroy_ != nullptr) {
|
|
1522
|
+
ExecCtx::Run(DEBUG_LOCATION, after_call_stack_destroy_, absl::OkStatus());
|
|
1523
|
+
}
|
|
1524
|
+
}
|
|
1525
|
+
|
|
1526
|
+
void NewSubchannel::QueuedCall::SetAfterCallStackDestroy(
|
|
1527
|
+
grpc_closure* closure) {
|
|
1528
|
+
GRPC_CHECK_NE(closure, nullptr);
|
|
1529
|
+
MutexLock lock(&mu_);
|
|
1530
|
+
if (subchannel_call_ != nullptr) {
|
|
1531
|
+
subchannel_call_->SetAfterCallStackDestroy(closure);
|
|
1532
|
+
} else {
|
|
1533
|
+
GRPC_CHECK_EQ(after_call_stack_destroy_, nullptr);
|
|
1534
|
+
after_call_stack_destroy_ = closure;
|
|
1535
|
+
}
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
void NewSubchannel::QueuedCall::StartTransportStreamOpBatch(
|
|
1539
|
+
grpc_transport_stream_op_batch* batch) {
|
|
1540
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1541
|
+
<< "subchannel " << subchannel_.get() << " queued call " << this
|
|
1542
|
+
<< ": starting batch: "
|
|
1543
|
+
<< grpc_transport_stream_op_batch_string(batch, false);
|
|
1544
|
+
MutexLock lock(&mu_);
|
|
1545
|
+
// If we already have a real subchannel call, pass the batch down to it.
|
|
1546
|
+
if (subchannel_call_ != nullptr) {
|
|
1547
|
+
subchannel_call_->StartTransportStreamOpBatch(batch);
|
|
1548
|
+
return;
|
|
1549
|
+
}
|
|
1550
|
+
// Intercept recv_trailing_metadata, so that we can mark the call as
|
|
1551
|
+
// eligible for transparent retries if we fail it due to all
|
|
1552
|
+
// connections failing.
|
|
1553
|
+
if (batch->recv_trailing_metadata) {
|
|
1554
|
+
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
|
|
1555
|
+
this, grpc_schedule_on_exec_ctx);
|
|
1556
|
+
GRPC_CHECK_EQ(recv_trailing_metadata_, nullptr);
|
|
1557
|
+
recv_trailing_metadata_ =
|
|
1558
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata;
|
|
1559
|
+
original_recv_trailing_metadata_ =
|
|
1560
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
|
|
1561
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
|
1562
|
+
&recv_trailing_metadata_ready_;
|
|
1563
|
+
}
|
|
1564
|
+
// If we've previously been cancelled, immediately fail the new batch.
|
|
1565
|
+
if (!cancel_error_.ok()) {
|
|
1566
|
+
// Note: This will release the call combiner.
|
|
1567
|
+
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
|
|
1568
|
+
args_.call_combiner);
|
|
1569
|
+
return;
|
|
1570
|
+
}
|
|
1571
|
+
// Handle cancellation batches.
|
|
1572
|
+
if (batch->cancel_stream) {
|
|
1573
|
+
cancel_error_ = batch->payload->cancel_stream.cancel_error;
|
|
1574
|
+
buffered_call_.Fail(cancel_error_, BufferedCall::NoYieldCallCombiner);
|
|
1575
|
+
// Note: This will release the call combiner.
|
|
1576
|
+
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
|
|
1577
|
+
args_.call_combiner);
|
|
1578
|
+
return;
|
|
1579
|
+
}
|
|
1580
|
+
// Enqueue the batch.
|
|
1581
|
+
buffered_call_.EnqueueBatch(batch);
|
|
1582
|
+
// We hold on to the call combiner for the send_initial_metadata batch,
|
|
1583
|
+
// but yield it for other batches. This ensures that we are holding on
|
|
1584
|
+
// to the call combiner exactly once when we are ready to resume.
|
|
1585
|
+
if (!batch->send_initial_metadata) {
|
|
1586
|
+
GRPC_CALL_COMBINER_STOP(args_.call_combiner,
|
|
1587
|
+
"batch does not include send_initial_metadata");
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
void NewSubchannel::QueuedCall::RecvTrailingMetadataReady(
|
|
1592
|
+
void* arg, grpc_error_handle error) {
|
|
1593
|
+
QueuedCall* call = static_cast<QueuedCall*>(arg);
|
|
1594
|
+
GRPC_CHECK_NE(call->recv_trailing_metadata_, nullptr);
|
|
1595
|
+
if (call->is_retriable_.load()) {
|
|
1596
|
+
call->recv_trailing_metadata_->Set(GrpcStreamNetworkState(),
|
|
1597
|
+
GrpcStreamNetworkState::kNotSentOnWire);
|
|
1598
|
+
}
|
|
1599
|
+
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_, error);
|
|
1600
|
+
}
|
|
1601
|
+
|
|
1602
|
+
void NewSubchannel::QueuedCall::ResumeOnConnectionLocked(
|
|
1603
|
+
ConnectedSubchannel* connected_subchannel) {
|
|
1604
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1605
|
+
<< "subchannel " << subchannel_.get() << " queued call " << this
|
|
1606
|
+
<< ": resuming on connected_subchannel " << connected_subchannel;
|
|
1607
|
+
canceller_ = nullptr;
|
|
1608
|
+
queue_entry_ = nullptr;
|
|
1609
|
+
MutexLock lock(&mu_);
|
|
1610
|
+
grpc_error_handle error;
|
|
1611
|
+
subchannel_call_ = connected_subchannel->CreateCall(args_, &error);
|
|
1612
|
+
if (after_call_stack_destroy_ != nullptr) {
|
|
1613
|
+
subchannel_call_->SetAfterCallStackDestroy(after_call_stack_destroy_);
|
|
1614
|
+
after_call_stack_destroy_ = nullptr;
|
|
1615
|
+
}
|
|
1616
|
+
// It's possible that the subchannel will get quota for the call
|
|
1617
|
+
// and try to resume it before buffered_call_ contains any batches.
|
|
1618
|
+
// In that case, we will not be holding the call combiner here, so we
|
|
1619
|
+
// must not yeild it. That's why we use
|
|
1620
|
+
// YieldCallCombinerIfPendingBatchesFound here.
|
|
1621
|
+
if (!error.ok()) {
|
|
1622
|
+
buffered_call_.Fail(error,
|
|
1623
|
+
BufferedCall::YieldCallCombinerIfPendingBatchesFound);
|
|
1624
|
+
} else {
|
|
1625
|
+
buffered_call_.Resume(
|
|
1626
|
+
[subchannel_call =
|
|
1627
|
+
subchannel_call_](grpc_transport_stream_op_batch* batch) {
|
|
1628
|
+
// This will release the call combiner.
|
|
1629
|
+
subchannel_call->StartTransportStreamOpBatch(batch);
|
|
1630
|
+
},
|
|
1631
|
+
BufferedCall::YieldCallCombinerIfPendingBatchesFound);
|
|
1632
|
+
}
|
|
1633
|
+
}
|
|
1634
|
+
|
|
1635
|
+
void NewSubchannel::QueuedCall::FailLocked(absl::Status status) {
|
|
1636
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
1637
|
+
<< "subchannel " << subchannel_.get() << " queued call " << this
|
|
1638
|
+
<< ": failing: " << status;
|
|
1639
|
+
canceller_ = nullptr;
|
|
1640
|
+
queue_entry_ = nullptr;
|
|
1641
|
+
is_retriable_.store(true);
|
|
1642
|
+
MutexLock lock(&mu_);
|
|
1643
|
+
cancel_error_ = status;
|
|
1644
|
+
buffered_call_.Fail(status,
|
|
1645
|
+
BufferedCall::YieldCallCombinerIfPendingBatchesFound);
|
|
1646
|
+
}
|
|
1647
|
+
|
|
1648
|
+
//
|
|
1649
|
+
// NewSubchannel::NewConnectedSubchannel
|
|
1650
|
+
//
|
|
1651
|
+
|
|
1652
|
+
class NewSubchannel::NewConnectedSubchannel final : public ConnectedSubchannel {
|
|
1653
|
+
public:
|
|
1654
|
+
class TransportCallDestination final : public CallDestination {
|
|
1655
|
+
public:
|
|
1656
|
+
explicit TransportCallDestination(OrphanablePtr<ClientTransport> transport)
|
|
1657
|
+
: transport_(std::move(transport)) {}
|
|
1658
|
+
|
|
1659
|
+
ClientTransport* transport() { return transport_.get(); }
|
|
1660
|
+
|
|
1661
|
+
void HandleCall(CallHandler handler) override {
|
|
1662
|
+
transport_->StartCall(std::move(handler));
|
|
1663
|
+
}
|
|
1664
|
+
|
|
1665
|
+
void Orphaned() override { transport_.reset(); }
|
|
1666
|
+
|
|
1667
|
+
private:
|
|
1668
|
+
OrphanablePtr<ClientTransport> transport_;
|
|
1669
|
+
};
|
|
1670
|
+
|
|
1671
|
+
NewConnectedSubchannel(
|
|
1672
|
+
WeakRefCountedPtr<NewSubchannel> subchannel,
|
|
1673
|
+
RefCountedPtr<UnstartedCallDestination> call_destination,
|
|
1674
|
+
RefCountedPtr<TransportCallDestination> transport,
|
|
1675
|
+
const ChannelArgs& args, uint32_t max_concurrent_streams)
|
|
1676
|
+
: ConnectedSubchannel(std::move(subchannel), args,
|
|
1677
|
+
max_concurrent_streams),
|
|
1678
|
+
call_destination_(std::move(call_destination)),
|
|
1679
|
+
transport_(std::move(transport)) {}
|
|
1680
|
+
|
|
1681
|
+
void Orphaned() override {
|
|
1682
|
+
call_destination_.reset();
|
|
1683
|
+
transport_.reset();
|
|
1684
|
+
}
|
|
1685
|
+
|
|
1686
|
+
void StartWatch(
|
|
1687
|
+
grpc_pollset_set*,
|
|
1688
|
+
OrphanablePtr<TransportConnectivityStateWatcher> watcher) override {
|
|
1689
|
+
transport_->transport()->StartConnectivityWatch(std::move(watcher));
|
|
1690
|
+
}
|
|
1691
|
+
|
|
1692
|
+
void Ping(absl::AnyInvocable<void(absl::Status)>) override {
|
|
1693
|
+
// TODO(ctiller): add new transport API for this in v3 stack
|
|
1694
|
+
Crash("not implemented");
|
|
1695
|
+
}
|
|
1696
|
+
|
|
1697
|
+
RefCountedPtr<UnstartedCallDestination> unstarted_call_destination()
|
|
1698
|
+
const override {
|
|
1699
|
+
return call_destination_;
|
|
1700
|
+
}
|
|
1701
|
+
|
|
1702
|
+
RefCountedPtr<Call> CreateCall(CreateCallArgs, grpc_error_handle*) override {
|
|
1703
|
+
Crash("legacy CreateCall() called on v3 impl");
|
|
1704
|
+
}
|
|
1705
|
+
|
|
1706
|
+
void Ping(grpc_closure*, grpc_closure*) override {
|
|
1707
|
+
Crash("legacy ping method called in call v3 impl");
|
|
1708
|
+
}
|
|
1709
|
+
|
|
1710
|
+
private:
|
|
1711
|
+
RefCountedPtr<UnstartedCallDestination> call_destination_;
|
|
1712
|
+
RefCountedPtr<TransportCallDestination> transport_;
|
|
1713
|
+
};
|
|
1714
|
+
|
|
1715
|
+
//
|
|
1716
|
+
// NewSubchannel::ConnectionStateWatcher
|
|
1717
|
+
//
|
|
1718
|
+
|
|
1719
|
+
class NewSubchannel::ConnectionStateWatcher final
|
|
1720
|
+
: public Transport::StateWatcher {
|
|
1721
|
+
public:
|
|
1722
|
+
explicit ConnectionStateWatcher(
|
|
1723
|
+
WeakRefCountedPtr<ConnectedSubchannel> connected_subchannel)
|
|
1724
|
+
: connected_subchannel_(std::move(connected_subchannel)) {}
|
|
1725
|
+
|
|
1726
|
+
void OnDisconnect(absl::Status status,
|
|
1727
|
+
DisconnectInfo disconnect_info) override {
|
|
1728
|
+
NewSubchannel* subchannel = connected_subchannel_->subchannel();
|
|
1729
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1730
|
+
<< "subchannel " << subchannel << " " << subchannel->key_.ToString()
|
|
1731
|
+
<< ": connected subchannel " << connected_subchannel_.get()
|
|
1732
|
+
<< " reports disconnection: " << status;
|
|
1733
|
+
MutexLock lock(&subchannel->mu_);
|
|
1734
|
+
// Handle keepalive update.
|
|
1735
|
+
if (disconnect_info.keepalive_time.has_value()) {
|
|
1736
|
+
subchannel->ThrottleKeepaliveTimeLocked(*disconnect_info.keepalive_time);
|
|
1737
|
+
subchannel->watcher_list_.NotifyOnKeepaliveUpdateLocked(
|
|
1738
|
+
*disconnect_info.keepalive_time);
|
|
1739
|
+
}
|
|
1740
|
+
// Remove the connection from the subchannel's list of connections.
|
|
1741
|
+
subchannel->RemoveConnectionLocked(connected_subchannel_.get());
|
|
1742
|
+
// If this was the last connection, then fail all queued RPCs and
|
|
1743
|
+
// update the connectivity state.
|
|
1744
|
+
if (subchannel->connections_.empty()) {
|
|
1745
|
+
subchannel->FailAllQueuedRpcsLocked(
|
|
1746
|
+
absl::UnavailableError("subchannel lost all connections"));
|
|
1747
|
+
subchannel->MaybeUpdateConnectivityStateLocked();
|
|
1748
|
+
} else {
|
|
1749
|
+
// Otherwise, retry queued RPCs, which may trigger a new
|
|
1750
|
+
// connection attempt.
|
|
1751
|
+
subchannel->RetryQueuedRpcsLocked();
|
|
1752
|
+
}
|
|
1753
|
+
}
|
|
1754
|
+
|
|
1755
|
+
void OnPeerMaxConcurrentStreamsUpdate(
|
|
1756
|
+
uint32_t max_concurrent_streams,
|
|
1757
|
+
std::unique_ptr<MaxConcurrentStreamsUpdateDoneHandle> /*on_done*/)
|
|
1758
|
+
override {
|
|
1759
|
+
NewSubchannel* subchannel = connected_subchannel_->subchannel();
|
|
1760
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1761
|
+
<< "subchannel " << subchannel << " " << subchannel->key_.ToString()
|
|
1762
|
+
<< ": connection " << connected_subchannel_.get()
|
|
1763
|
+
<< ": setting MAX_CONCURRENT_STREAMS=" << max_concurrent_streams;
|
|
1764
|
+
if (connected_subchannel_->SetMaxConcurrentStreams(
|
|
1765
|
+
max_concurrent_streams)) {
|
|
1766
|
+
subchannel->RetryQueuedRpcs();
|
|
1767
|
+
}
|
|
1768
|
+
}
|
|
1769
|
+
|
|
1770
|
+
grpc_pollset_set* interested_parties() const override {
|
|
1771
|
+
return connected_subchannel_->subchannel()->pollset_set_;
|
|
1772
|
+
}
|
|
1773
|
+
|
|
1774
|
+
private:
|
|
1775
|
+
WeakRefCountedPtr<ConnectedSubchannel> connected_subchannel_;
|
|
1776
|
+
};
|
|
1777
|
+
|
|
1778
|
+
//
|
|
1779
|
+
// NewSubchannel::ConnectivityStateWatcherList
|
|
1780
|
+
//
|
|
1781
|
+
|
|
1782
|
+
void NewSubchannel::ConnectivityStateWatcherList::AddWatcherLocked(
|
|
1783
|
+
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
|
|
1784
|
+
watchers_.insert(std::move(watcher));
|
|
1785
|
+
}
|
|
1786
|
+
|
|
1787
|
+
void NewSubchannel::ConnectivityStateWatcherList::RemoveWatcherLocked(
|
|
1788
|
+
ConnectivityStateWatcherInterface* watcher) {
|
|
1789
|
+
watchers_.erase(watcher);
|
|
1790
|
+
}
|
|
1791
|
+
|
|
1792
|
+
void NewSubchannel::ConnectivityStateWatcherList::NotifyLocked(
|
|
1793
|
+
grpc_connectivity_state state, const absl::Status& status) {
|
|
1794
|
+
for (const auto& watcher : watchers_) {
|
|
1795
|
+
subchannel_->work_serializer_.Run([watcher, state, status]() {
|
|
1796
|
+
watcher->OnConnectivityStateChange(state, status);
|
|
1797
|
+
});
|
|
1798
|
+
}
|
|
1799
|
+
}
|
|
1800
|
+
|
|
1801
|
+
void NewSubchannel::ConnectivityStateWatcherList::NotifyOnKeepaliveUpdateLocked(
|
|
1802
|
+
Duration new_keepalive_time) {
|
|
1803
|
+
for (const auto& watcher : watchers_) {
|
|
1804
|
+
subchannel_->work_serializer_.Run([watcher, new_keepalive_time]() {
|
|
1805
|
+
watcher->OnKeepaliveUpdate(new_keepalive_time);
|
|
1806
|
+
});
|
|
1807
|
+
}
|
|
1808
|
+
}
|
|
1809
|
+
|
|
1810
|
+
uint32_t
|
|
1811
|
+
NewSubchannel::ConnectivityStateWatcherList::GetMaxConnectionsPerSubchannel()
|
|
1812
|
+
const {
|
|
1813
|
+
uint32_t max_connections_per_subchannel = 1;
|
|
1814
|
+
for (const auto& watcher : watchers_) {
|
|
1815
|
+
max_connections_per_subchannel =
|
|
1816
|
+
std::max(max_connections_per_subchannel,
|
|
1817
|
+
watcher->max_connections_per_subchannel());
|
|
1818
|
+
}
|
|
1819
|
+
return max_connections_per_subchannel;
|
|
1820
|
+
}
|
|
1821
|
+
|
|
1822
|
+
//
|
|
1823
|
+
// NewSubchannel
|
|
1824
|
+
//
|
|
1825
|
+
|
|
1826
|
+
NewSubchannel::NewSubchannel(SubchannelKey key,
|
|
1827
|
+
OrphanablePtr<SubchannelConnector> connector,
|
|
1828
|
+
const ChannelArgs& args)
|
|
1829
|
+
: key_(std::move(key)),
|
|
1830
|
+
created_from_endpoint_(args.Contains(GRPC_ARG_SUBCHANNEL_ENDPOINT)),
|
|
1831
|
+
args_(args),
|
|
1832
|
+
pollset_set_(grpc_pollset_set_create()),
|
|
1833
|
+
connector_(std::move(connector)),
|
|
1834
|
+
watcher_list_(this),
|
|
1835
|
+
work_serializer_(args_.GetObjectRef<EventEngine>()),
|
|
1836
|
+
backoff_(ParseArgsForBackoffValues(args_, &min_connect_timeout_)),
|
|
1837
|
+
event_engine_(args_.GetObjectRef<EventEngine>()) {
|
|
1838
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1839
|
+
<< "subchannel " << this << " " << key_.ToString() << ": created";
|
|
1840
|
+
// A grpc_init is added here to ensure that grpc_shutdown does not happen
|
|
1841
|
+
// until the subchannel is destroyed. Subchannels can persist longer than
|
|
1842
|
+
// channels because they maybe reused/shared among multiple channels. As a
|
|
1843
|
+
// result the subchannel destruction happens asynchronously to channel
|
|
1844
|
+
// destruction. If the last channel destruction triggers a grpc_shutdown
|
|
1845
|
+
// before the last subchannel destruction, then there maybe race conditions
|
|
1846
|
+
// triggering segmentation faults. To prevent this issue, we call a
|
|
1847
|
+
// grpc_init here and a grpc_shutdown in the subchannel destructor.
|
|
1848
|
+
InitInternally();
|
|
1849
|
+
global_stats().IncrementClientSubchannelsCreated();
|
|
1850
|
+
GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this,
|
|
1851
|
+
grpc_schedule_on_exec_ctx);
|
|
1852
|
+
// Check proxy mapper to determine address to connect to and channel
|
|
1853
|
+
// args to use.
|
|
1854
|
+
address_for_connect_ = CoreConfiguration::Get()
|
|
1855
|
+
.proxy_mapper_registry()
|
|
1856
|
+
.MapAddress(key_.address(), &args_)
|
|
1857
|
+
.value_or(key_.address());
|
|
1858
|
+
// Initialize channelz.
|
|
1859
|
+
const bool channelz_enabled = args_.GetBool(GRPC_ARG_ENABLE_CHANNELZ)
|
|
1860
|
+
.value_or(GRPC_ENABLE_CHANNELZ_DEFAULT);
|
|
1861
|
+
if (channelz_enabled) {
|
|
1862
|
+
const size_t channel_tracer_max_memory = Clamp(
|
|
1863
|
+
args_.GetInt(GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE)
|
|
1864
|
+
.value_or(GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT),
|
|
1865
|
+
0, INT_MAX);
|
|
1866
|
+
channelz_node_ = MakeRefCounted<channelz::SubchannelNode>(
|
|
1867
|
+
grpc_sockaddr_to_uri(&key_.address())
|
|
1868
|
+
.value_or("<unknown address type>"),
|
|
1869
|
+
channel_tracer_max_memory);
|
|
1870
|
+
GRPC_CHANNELZ_LOG(channelz_node_) << "subchannel created";
|
|
1871
|
+
channelz_node_->SetChannelArgs(args_);
|
|
1872
|
+
args_ = args_.SetObject<channelz::BaseNode>(channelz_node_);
|
|
1873
|
+
}
|
|
1874
|
+
}
|
|
1875
|
+
|
|
1876
|
+
NewSubchannel::~NewSubchannel() {
|
|
1877
|
+
if (channelz_node_ != nullptr) {
|
|
1878
|
+
GRPC_CHANNELZ_LOG(channelz_node_) << "Subchannel destroyed";
|
|
1879
|
+
channelz_node_->UpdateConnectivityState(GRPC_CHANNEL_SHUTDOWN);
|
|
1880
|
+
}
|
|
1881
|
+
connector_.reset();
|
|
1882
|
+
grpc_pollset_set_destroy(pollset_set_);
|
|
1883
|
+
// grpc_shutdown is called here because grpc_init is called in the ctor.
|
|
1884
|
+
ShutdownInternally();
|
|
1885
|
+
}
|
|
1886
|
+
|
|
1887
|
+
RefCountedPtr<Subchannel> NewSubchannel::Create(
|
|
1888
|
+
OrphanablePtr<SubchannelConnector> connector,
|
|
1889
|
+
const grpc_resolved_address& address, const ChannelArgs& args) {
|
|
1890
|
+
SubchannelKey key(address, args);
|
|
1891
|
+
auto* subchannel_pool = args.GetObject<SubchannelPoolInterface>();
|
|
1892
|
+
GRPC_CHECK_NE(subchannel_pool, nullptr);
|
|
1893
|
+
RefCountedPtr<NewSubchannel> c =
|
|
1894
|
+
subchannel_pool->FindSubchannel(key).TakeAsSubclass<NewSubchannel>();
|
|
1895
|
+
if (c != nullptr) {
|
|
1896
|
+
return c;
|
|
1897
|
+
}
|
|
1898
|
+
c = MakeRefCounted<NewSubchannel>(std::move(key), std::move(connector), args);
|
|
1899
|
+
if (c->created_from_endpoint_) {
|
|
1900
|
+
// We don't interact with the subchannel pool in this case.
|
|
1901
|
+
// Instead, we unconditionally return the newly created subchannel.
|
|
1902
|
+
// Before returning, we explicitly trigger a connection attempt
|
|
1903
|
+
// by calling RequestConnection(), which sets the subchannel's
|
|
1904
|
+
// connectivity state to CONNECTING.
|
|
1905
|
+
c->RequestConnection();
|
|
1906
|
+
return c;
|
|
1907
|
+
}
|
|
1908
|
+
// Try to register the subchannel before setting the subchannel pool.
|
|
1909
|
+
// Otherwise, in case of a registration race, unreffing c in
|
|
1910
|
+
// RegisterSubchannel() will cause c to be tried to be unregistered, while
|
|
1911
|
+
// its key maps to a different subchannel.
|
|
1912
|
+
RefCountedPtr<NewSubchannel> registered =
|
|
1913
|
+
subchannel_pool->RegisterSubchannel(c->key_, c)
|
|
1914
|
+
.TakeAsSubclass<NewSubchannel>();
|
|
1915
|
+
if (registered == c) c->subchannel_pool_ = subchannel_pool->Ref();
|
|
1916
|
+
return registered;
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
void NewSubchannel::ThrottleKeepaliveTime(Duration new_keepalive_time) {
|
|
1920
|
+
MutexLock lock(&mu_);
|
|
1921
|
+
ThrottleKeepaliveTimeLocked(new_keepalive_time);
|
|
1922
|
+
}
|
|
1923
|
+
|
|
1924
|
+
void NewSubchannel::ThrottleKeepaliveTimeLocked(Duration new_keepalive_time) {
|
|
1925
|
+
// Only update the value if the new keepalive time is larger.
|
|
1926
|
+
if (new_keepalive_time > keepalive_time_) {
|
|
1927
|
+
keepalive_time_ = new_keepalive_time;
|
|
1928
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1929
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
1930
|
+
<< ": throttling keepalive time to " << new_keepalive_time;
|
|
1931
|
+
args_ = args_.Set(GRPC_ARG_KEEPALIVE_TIME_MS, new_keepalive_time.millis());
|
|
1932
|
+
}
|
|
1933
|
+
}
|
|
1934
|
+
|
|
1935
|
+
channelz::SubchannelNode* NewSubchannel::channelz_node() {
|
|
1936
|
+
return channelz_node_.get();
|
|
1937
|
+
}
|
|
1938
|
+
|
|
1939
|
+
void NewSubchannel::WatchConnectivityState(
|
|
1940
|
+
RefCountedPtr<ConnectivityStateWatcherInterface> watcher) {
|
|
1941
|
+
MutexLock lock(&mu_);
|
|
1942
|
+
grpc_pollset_set* interested_parties = watcher->interested_parties();
|
|
1943
|
+
if (interested_parties != nullptr) {
|
|
1944
|
+
grpc_pollset_set_add_pollset_set(pollset_set_, interested_parties);
|
|
1945
|
+
}
|
|
1946
|
+
work_serializer_.Run(
|
|
1947
|
+
[watcher, state = state_, status = ConnectivityStatusToReportLocked()]() {
|
|
1948
|
+
watcher->OnConnectivityStateChange(state, status);
|
|
1949
|
+
},
|
|
1950
|
+
DEBUG_LOCATION);
|
|
1951
|
+
watcher_list_.AddWatcherLocked(std::move(watcher));
|
|
1952
|
+
// The max_connections_per_subchannel setting may have changed, so
|
|
1953
|
+
// this may trigger another connection attempt.
|
|
1954
|
+
RetryQueuedRpcsLocked();
|
|
1955
|
+
}
|
|
1956
|
+
|
|
1957
|
+
void NewSubchannel::CancelConnectivityStateWatch(
|
|
1958
|
+
ConnectivityStateWatcherInterface* watcher) {
|
|
1959
|
+
MutexLock lock(&mu_);
|
|
1960
|
+
grpc_pollset_set* interested_parties = watcher->interested_parties();
|
|
1961
|
+
if (interested_parties != nullptr) {
|
|
1962
|
+
grpc_pollset_set_del_pollset_set(pollset_set_, interested_parties);
|
|
1963
|
+
}
|
|
1964
|
+
watcher_list_.RemoveWatcherLocked(watcher);
|
|
1965
|
+
}
|
|
1966
|
+
|
|
1967
|
+
void NewSubchannel::RequestConnection() {
|
|
1968
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1969
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
1970
|
+
<< ": RequestConnection()";
|
|
1971
|
+
MutexLock lock(&mu_);
|
|
1972
|
+
if (state_ == GRPC_CHANNEL_IDLE) {
|
|
1973
|
+
StartConnectingLocked();
|
|
1974
|
+
}
|
|
1975
|
+
}
|
|
1976
|
+
|
|
1977
|
+
void NewSubchannel::ResetBackoff() {
|
|
1978
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1979
|
+
<< "subchannel " << this << " " << key_.ToString() << ": ResetBackoff()";
|
|
1980
|
+
// Hold a ref to ensure cancellation and subsequent deletion of the closure
|
|
1981
|
+
// does not eliminate the last ref and destroy the Subchannel before the
|
|
1982
|
+
// method returns.
|
|
1983
|
+
auto self = WeakRef(DEBUG_LOCATION, "ResetBackoff");
|
|
1984
|
+
MutexLock lock(&mu_);
|
|
1985
|
+
backoff_.Reset();
|
|
1986
|
+
if (retry_timer_handle_.has_value() &&
|
|
1987
|
+
event_engine_->Cancel(*retry_timer_handle_)) {
|
|
1988
|
+
OnRetryTimerLocked();
|
|
1989
|
+
} else if (connection_attempt_in_flight_) {
|
|
1990
|
+
next_attempt_time_ = Timestamp::Now();
|
|
1991
|
+
}
|
|
1992
|
+
}
|
|
1993
|
+
|
|
1994
|
+
void NewSubchannel::Orphaned() {
|
|
1995
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
1996
|
+
<< "subchannel " << this << " " << key_.ToString() << ": shutting down";
|
|
1997
|
+
// The subchannel_pool is only used once here in this subchannel, so the
|
|
1998
|
+
// access can be outside of the lock.
|
|
1999
|
+
if (subchannel_pool_ != nullptr) {
|
|
2000
|
+
subchannel_pool_->UnregisterSubchannel(key_, this);
|
|
2001
|
+
subchannel_pool_.reset();
|
|
2002
|
+
}
|
|
2003
|
+
MutexLock lock(&mu_);
|
|
2004
|
+
GRPC_CHECK(!shutdown_);
|
|
2005
|
+
shutdown_ = true;
|
|
2006
|
+
connector_.reset();
|
|
2007
|
+
connections_.clear();
|
|
2008
|
+
if (retry_timer_handle_.has_value()) {
|
|
2009
|
+
event_engine_->Cancel(*retry_timer_handle_);
|
|
2010
|
+
}
|
|
2011
|
+
}
|
|
2012
|
+
|
|
2013
|
+
void NewSubchannel::GetOrAddDataProducer(
|
|
2014
|
+
UniqueTypeName type,
|
|
2015
|
+
std::function<void(DataProducerInterface**)> get_or_add) {
|
|
2016
|
+
MutexLock lock(&mu_);
|
|
2017
|
+
auto it = data_producer_map_.emplace(type, nullptr).first;
|
|
2018
|
+
get_or_add(&it->second);
|
|
2019
|
+
}
|
|
2020
|
+
|
|
2021
|
+
void NewSubchannel::RemoveDataProducer(DataProducerInterface* data_producer) {
|
|
2022
|
+
MutexLock lock(&mu_);
|
|
2023
|
+
auto it = data_producer_map_.find(data_producer->type());
|
|
2024
|
+
if (it != data_producer_map_.end() && it->second == data_producer) {
|
|
2025
|
+
data_producer_map_.erase(it);
|
|
2026
|
+
}
|
|
2027
|
+
}
|
|
2028
|
+
|
|
2029
|
+
namespace {
|
|
2030
|
+
|
|
2031
|
+
absl::Status PrependAddressToStatusMessage(const SubchannelKey& key,
|
|
2032
|
+
const absl::Status& status) {
|
|
2033
|
+
return AddMessagePrefix(
|
|
2034
|
+
grpc_sockaddr_to_uri(&key.address()).value_or("<unknown address type>"),
|
|
2035
|
+
status);
|
|
2036
|
+
}
|
|
2037
|
+
|
|
2038
|
+
} // namespace
|
|
2039
|
+
|
|
2040
|
+
void NewSubchannel::SetLastFailureLocked(const absl::Status& status) {
|
|
2041
|
+
// Augment status message to include IP address.
|
|
2042
|
+
last_failure_status_ = PrependAddressToStatusMessage(key_, status);
|
|
2043
|
+
}
|
|
2044
|
+
|
|
2045
|
+
grpc_connectivity_state NewSubchannel::ComputeConnectivityStateLocked() const {
|
|
2046
|
+
// If we have at least one connection, report READY.
|
|
2047
|
+
if (!connections_.empty()) return GRPC_CHANNEL_READY;
|
|
2048
|
+
// If we were created from an endpoint and the connection is closed,
|
|
2049
|
+
// we have no way to create a new connection, so we report
|
|
2050
|
+
// TRANSIENT_FAILURE, and we'll never leave that state.
|
|
2051
|
+
if (created_from_endpoint_) return GRPC_CHANNEL_TRANSIENT_FAILURE;
|
|
2052
|
+
// If there's a connection attempt in flight, report CONNECTING.
|
|
2053
|
+
if (connection_attempt_in_flight_) return GRPC_CHANNEL_CONNECTING;
|
|
2054
|
+
// If we're in backoff delay, report TRANSIENT_FAILURE.
|
|
2055
|
+
if (retry_timer_handle_.has_value()) {
|
|
2056
|
+
return GRPC_CHANNEL_TRANSIENT_FAILURE;
|
|
2057
|
+
}
|
|
2058
|
+
// Otherwise, report IDLE.
|
|
2059
|
+
return GRPC_CHANNEL_IDLE;
|
|
2060
|
+
}
|
|
2061
|
+
|
|
2062
|
+
absl::Status NewSubchannel::ConnectivityStatusToReportLocked() const {
|
|
2063
|
+
// Report status in TRANSIENT_FAILURE state.
|
|
2064
|
+
if (state_ == GRPC_CHANNEL_TRANSIENT_FAILURE) return last_failure_status_;
|
|
2065
|
+
return absl::OkStatus();
|
|
2066
|
+
}
|
|
2067
|
+
|
|
2068
|
+
void NewSubchannel::MaybeUpdateConnectivityStateLocked() {
|
|
2069
|
+
// Determine what state we are in.
|
|
2070
|
+
grpc_connectivity_state new_state = ComputeConnectivityStateLocked();
|
|
2071
|
+
// If we're already in that state, no need to report a change.
|
|
2072
|
+
if (new_state == state_) return;
|
|
2073
|
+
state_ = new_state;
|
|
2074
|
+
absl::Status status = ConnectivityStatusToReportLocked();
|
|
2075
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2076
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2077
|
+
<< ": reporting connectivity state " << ConnectivityStateName(new_state)
|
|
2078
|
+
<< ", status: " << status;
|
|
2079
|
+
// Update channelz.
|
|
2080
|
+
if (channelz_node_ != nullptr) {
|
|
2081
|
+
channelz_node_->UpdateConnectivityState(new_state);
|
|
2082
|
+
if (status.ok()) {
|
|
2083
|
+
GRPC_CHANNELZ_LOG(channelz_node_)
|
|
2084
|
+
<< "Subchannel connectivity state changed to "
|
|
2085
|
+
<< ConnectivityStateName(new_state);
|
|
2086
|
+
} else {
|
|
2087
|
+
GRPC_CHANNELZ_LOG(channelz_node_)
|
|
2088
|
+
<< "Subchannel connectivity state changed to "
|
|
2089
|
+
<< ConnectivityStateName(new_state) << ": " << status;
|
|
2090
|
+
}
|
|
2091
|
+
}
|
|
2092
|
+
// Notify watchers.
|
|
2093
|
+
watcher_list_.NotifyLocked(new_state, status);
|
|
2094
|
+
}
|
|
2095
|
+
|
|
2096
|
+
bool NewSubchannel::RemoveConnectionLocked(
|
|
2097
|
+
ConnectedSubchannel* connected_subchannel) {
|
|
2098
|
+
for (auto it = connections_.begin(); it != connections_.end(); ++it) {
|
|
2099
|
+
if (*it == connected_subchannel) {
|
|
2100
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2101
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2102
|
+
<< ": removing connection " << connected_subchannel;
|
|
2103
|
+
connections_.erase(it);
|
|
2104
|
+
return true;
|
|
2105
|
+
}
|
|
2106
|
+
}
|
|
2107
|
+
return false;
|
|
2108
|
+
}
|
|
2109
|
+
|
|
2110
|
+
void NewSubchannel::OnRetryTimer() {
|
|
2111
|
+
MutexLock lock(&mu_);
|
|
2112
|
+
OnRetryTimerLocked();
|
|
2113
|
+
}
|
|
2114
|
+
|
|
2115
|
+
void NewSubchannel::OnRetryTimerLocked() {
|
|
2116
|
+
retry_timer_handle_.reset();
|
|
2117
|
+
if (shutdown_) return;
|
|
2118
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2119
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2120
|
+
<< ": backoff delay elapsed";
|
|
2121
|
+
RetryQueuedRpcsLocked(); // May trigger another connection attempt.
|
|
2122
|
+
MaybeUpdateConnectivityStateLocked();
|
|
2123
|
+
}
|
|
2124
|
+
|
|
2125
|
+
void NewSubchannel::StartConnectingLocked() {
|
|
2126
|
+
// Set next attempt time.
|
|
2127
|
+
const Timestamp now = Timestamp::Now();
|
|
2128
|
+
const Timestamp min_deadline = now + min_connect_timeout_;
|
|
2129
|
+
next_attempt_time_ = now + backoff_.NextAttemptDelay();
|
|
2130
|
+
// Change connectivity state if needed.
|
|
2131
|
+
connection_attempt_in_flight_ = true;
|
|
2132
|
+
MaybeUpdateConnectivityStateLocked();
|
|
2133
|
+
// Start connection attempt.
|
|
2134
|
+
SubchannelConnector::Args args;
|
|
2135
|
+
args.address = &address_for_connect_;
|
|
2136
|
+
args.interested_parties = pollset_set_;
|
|
2137
|
+
args.deadline = std::max(next_attempt_time_, min_deadline);
|
|
2138
|
+
args.channel_args =
|
|
2139
|
+
args_.Set(GRPC_ARG_MAX_CONCURRENT_STREAMS_REJECT_ON_CLIENT, true);
|
|
2140
|
+
WeakRef(DEBUG_LOCATION, "Connect").release(); // Ref held by callback.
|
|
2141
|
+
connector_->Connect(args, &connecting_result_, &on_connecting_finished_);
|
|
2142
|
+
}
|
|
2143
|
+
|
|
2144
|
+
void NewSubchannel::OnConnectingFinished(void* arg, grpc_error_handle error) {
|
|
2145
|
+
WeakRefCountedPtr<NewSubchannel> c(static_cast<NewSubchannel*>(arg));
|
|
2146
|
+
{
|
|
2147
|
+
MutexLock lock(&c->mu_);
|
|
2148
|
+
c->OnConnectingFinishedLocked(error);
|
|
2149
|
+
}
|
|
2150
|
+
c.reset(DEBUG_LOCATION, "Connect");
|
|
2151
|
+
}
|
|
2152
|
+
|
|
2153
|
+
void NewSubchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
|
|
2154
|
+
connection_attempt_in_flight_ = false;
|
|
2155
|
+
if (shutdown_) {
|
|
2156
|
+
connecting_result_.Reset();
|
|
2157
|
+
return;
|
|
2158
|
+
}
|
|
2159
|
+
// If we didn't get a transport or we fail to publish it, report
|
|
2160
|
+
// TRANSIENT_FAILURE and start the retry timer.
|
|
2161
|
+
// Note that if the connection attempt took longer than the backoff
|
|
2162
|
+
// time, then the timer will fire immediately, and we will quickly
|
|
2163
|
+
// transition back to IDLE.
|
|
2164
|
+
if (connecting_result_.transport == nullptr || !PublishTransportLocked()) {
|
|
2165
|
+
const Duration time_until_next_attempt =
|
|
2166
|
+
next_attempt_time_ - Timestamp::Now();
|
|
2167
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2168
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2169
|
+
<< ": connect failed (" << StatusToString(error) << ")"
|
|
2170
|
+
<< (created_from_endpoint_
|
|
2171
|
+
? ", no retry will be attempted (created from endpoint); "
|
|
2172
|
+
"remaining in TRANSIENT_FAILURE"
|
|
2173
|
+
: ", backing off for " +
|
|
2174
|
+
std::to_string(time_until_next_attempt.millis()) + " ms");
|
|
2175
|
+
if (!created_from_endpoint_) {
|
|
2176
|
+
retry_timer_handle_ = event_engine_->RunAfter(
|
|
2177
|
+
time_until_next_attempt,
|
|
2178
|
+
[self = WeakRef(DEBUG_LOCATION, "RetryTimer")
|
|
2179
|
+
.TakeAsSubclass<NewSubchannel>()]() mutable {
|
|
2180
|
+
{
|
|
2181
|
+
ExecCtx exec_ctx;
|
|
2182
|
+
self->OnRetryTimer();
|
|
2183
|
+
// Subchannel deletion might require an active ExecCtx. So if
|
|
2184
|
+
// self.reset() is not called here, the WeakRefCountedPtr
|
|
2185
|
+
// destructor may run after the ExecCtx declared in the callback
|
|
2186
|
+
// is destroyed. Since subchannel may get destroyed when the
|
|
2187
|
+
// WeakRefCountedPtr destructor runs, it may not have an active
|
|
2188
|
+
// ExecCtx - thus leading to crashes.
|
|
2189
|
+
self.reset();
|
|
2190
|
+
}
|
|
2191
|
+
});
|
|
2192
|
+
}
|
|
2193
|
+
SetLastFailureLocked(grpc_error_to_absl_status(error));
|
|
2194
|
+
MaybeUpdateConnectivityStateLocked();
|
|
2195
|
+
}
|
|
2196
|
+
}
|
|
2197
|
+
|
|
2198
|
+
bool NewSubchannel::PublishTransportLocked() {
|
|
2199
|
+
auto socket_node = connecting_result_.transport->GetSocketNode();
|
|
2200
|
+
Transport* transport = connecting_result_.transport;
|
|
2201
|
+
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
|
|
2202
|
+
if (connecting_result_.transport->filter_stack_transport() != nullptr) {
|
|
2203
|
+
// Construct channel stack.
|
|
2204
|
+
// Builder takes ownership of transport.
|
|
2205
|
+
ChannelStackBuilderImpl builder(
|
|
2206
|
+
"subchannel", GRPC_CLIENT_SUBCHANNEL,
|
|
2207
|
+
connecting_result_.channel_args.SetObject(
|
|
2208
|
+
std::exchange(connecting_result_.transport, nullptr)));
|
|
2209
|
+
if (!CoreConfiguration::Get().channel_init().CreateStack(&builder)) {
|
|
2210
|
+
return false;
|
|
2211
|
+
}
|
|
2212
|
+
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> stack = builder.Build();
|
|
2213
|
+
if (!stack.ok()) {
|
|
2214
|
+
connecting_result_.Reset();
|
|
2215
|
+
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
|
|
2216
|
+
<< ": error initializing subchannel stack: " << stack.status();
|
|
2217
|
+
return false;
|
|
2218
|
+
}
|
|
2219
|
+
connected_subchannel = MakeRefCounted<LegacyConnectedSubchannel>(
|
|
2220
|
+
WeakRef().TakeAsSubclass<NewSubchannel>(), std::move(*stack), args_,
|
|
2221
|
+
channelz_node_, connecting_result_.max_concurrent_streams);
|
|
2222
|
+
} else {
|
|
2223
|
+
OrphanablePtr<ClientTransport> transport(
|
|
2224
|
+
std::exchange(connecting_result_.transport, nullptr)
|
|
2225
|
+
->client_transport());
|
|
2226
|
+
InterceptionChainBuilder builder(
|
|
2227
|
+
connecting_result_.channel_args.SetObject(transport.get()));
|
|
2228
|
+
if (channelz_node_ != nullptr) {
|
|
2229
|
+
// TODO(ctiller): If/when we have a good way to access the subchannel
|
|
2230
|
+
// from a filter (maybe GetContext<Subchannel>?), consider replacing
|
|
2231
|
+
// these two hooks with a filter so that we can avoid storing two
|
|
2232
|
+
// separate refs to the channelz node in each connection.
|
|
2233
|
+
builder.AddOnClientInitialMetadata(
|
|
2234
|
+
[channelz_node = channelz_node_](ClientMetadata&) {
|
|
2235
|
+
channelz_node->RecordCallStarted();
|
|
2236
|
+
});
|
|
2237
|
+
builder.AddOnServerTrailingMetadata(
|
|
2238
|
+
[channelz_node = channelz_node_](ServerMetadata& metadata) {
|
|
2239
|
+
if (IsStatusOk(metadata)) {
|
|
2240
|
+
channelz_node->RecordCallSucceeded();
|
|
2241
|
+
} else {
|
|
2242
|
+
channelz_node->RecordCallFailed();
|
|
2243
|
+
}
|
|
2244
|
+
});
|
|
2245
|
+
}
|
|
2246
|
+
CoreConfiguration::Get().channel_init().AddToInterceptionChainBuilder(
|
|
2247
|
+
GRPC_CLIENT_SUBCHANNEL, builder);
|
|
2248
|
+
auto transport_destination =
|
|
2249
|
+
MakeRefCounted<NewConnectedSubchannel::TransportCallDestination>(
|
|
2250
|
+
std::move(transport));
|
|
2251
|
+
auto call_destination = builder.Build(transport_destination);
|
|
2252
|
+
if (!call_destination.ok()) {
|
|
2253
|
+
connecting_result_.Reset();
|
|
2254
|
+
LOG(ERROR) << "subchannel " << this << " " << key_.ToString()
|
|
2255
|
+
<< ": error initializing subchannel stack: "
|
|
2256
|
+
<< call_destination.status();
|
|
2257
|
+
return false;
|
|
2258
|
+
}
|
|
2259
|
+
connected_subchannel = MakeRefCounted<NewConnectedSubchannel>(
|
|
2260
|
+
WeakRef().TakeAsSubclass<NewSubchannel>(), std::move(*call_destination),
|
|
2261
|
+
std::move(transport_destination), args_,
|
|
2262
|
+
connecting_result_.max_concurrent_streams);
|
|
2263
|
+
}
|
|
2264
|
+
connecting_result_.Reset();
|
|
2265
|
+
// Reset backoff.
|
|
2266
|
+
backoff_.Reset();
|
|
2267
|
+
// Publish.
|
|
2268
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2269
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2270
|
+
<< ": new connected subchannel at " << connected_subchannel.get()
|
|
2271
|
+
<< ", max_concurrent_streams="
|
|
2272
|
+
<< connecting_result_.max_concurrent_streams;
|
|
2273
|
+
if (channelz_node_ != nullptr) {
|
|
2274
|
+
if (socket_node != nullptr) {
|
|
2275
|
+
socket_node->AddParent(channelz_node_.get());
|
|
2276
|
+
}
|
|
2277
|
+
}
|
|
2278
|
+
transport->StartWatch(
|
|
2279
|
+
MakeRefCounted<ConnectionStateWatcher>(connected_subchannel->WeakRef()));
|
|
2280
|
+
connections_.push_back(std::move(connected_subchannel));
|
|
2281
|
+
RetryQueuedRpcsLocked();
|
|
2282
|
+
MaybeUpdateConnectivityStateLocked();
|
|
2283
|
+
return true;
|
|
2284
|
+
}
|
|
2285
|
+
|
|
2286
|
+
RefCountedPtr<Subchannel::Call> NewSubchannel::CreateCall(
|
|
2287
|
+
CreateCallArgs args, grpc_error_handle* error) {
|
|
2288
|
+
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
|
|
2289
|
+
{
|
|
2290
|
+
MutexLock lock(&mu_);
|
|
2291
|
+
// If we hit a race condition where the LB picker chose the subchannel
|
|
2292
|
+
// at the same time as the last connection was closed, then tell the
|
|
2293
|
+
// channel to re-queue the pick.
|
|
2294
|
+
if (connections_.empty()) return nullptr;
|
|
2295
|
+
// Otherwise, choose a connection.
|
|
2296
|
+
// Optimization: If the queue is non-empty, then we know there won't be
|
|
2297
|
+
// a connection that we can send this RPC on, so we don't bother looking.
|
|
2298
|
+
if (queued_calls_.empty()) connected_subchannel = ChooseConnectionLocked();
|
|
2299
|
+
// If we don't have a connection to send the RPC on, queue it.
|
|
2300
|
+
if (connected_subchannel == nullptr) {
|
|
2301
|
+
// The QueuedCall object adds itself to queued_calls_.
|
|
2302
|
+
auto queued_call = RefCountedPtr<QueuedCall>(args.arena->New<QueuedCall>(
|
|
2303
|
+
WeakRef().TakeAsSubclass<NewSubchannel>(), args));
|
|
2304
|
+
MaybeFailAllQueuedRpcsLocked();
|
|
2305
|
+
return queued_call;
|
|
2306
|
+
}
|
|
2307
|
+
}
|
|
2308
|
+
// Found a connection, so create a call on it.
|
|
2309
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2310
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2311
|
+
<< ": creating call on connection " << connected_subchannel.get();
|
|
2312
|
+
return connected_subchannel->CreateCall(args, error);
|
|
2313
|
+
}
|
|
2314
|
+
|
|
2315
|
+
RefCountedPtr<UnstartedCallDestination> NewSubchannel::call_destination() {
|
|
2316
|
+
// TODO(roth): Implement connection scaling for v3.
|
|
2317
|
+
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
|
|
2318
|
+
{
|
|
2319
|
+
MutexLock lock(&mu_);
|
|
2320
|
+
if (!connections_.empty()) connected_subchannel = connections_[0];
|
|
2321
|
+
}
|
|
2322
|
+
if (connected_subchannel == nullptr) return nullptr;
|
|
2323
|
+
return connected_subchannel->unstarted_call_destination();
|
|
2324
|
+
}
|
|
2325
|
+
|
|
2326
|
+
namespace {
|
|
2327
|
+
bool g_test_only_always_send_calls_to_transport = false;
|
|
2328
|
+
} // namespace
|
|
2329
|
+
|
|
2330
|
+
void TestOnlySetSubchannelAlwaysSendCallsToTransport(bool enabled) {
|
|
2331
|
+
g_test_only_always_send_calls_to_transport = enabled;
|
|
2332
|
+
}
|
|
2333
|
+
|
|
2334
|
+
RefCountedPtr<NewSubchannel::ConnectedSubchannel>
|
|
2335
|
+
NewSubchannel::ChooseConnectionLocked() {
|
|
2336
|
+
// Try to find a connection with quota available for the RPC.
|
|
2337
|
+
for (auto& connection : connections_) {
|
|
2338
|
+
if (connection->GetQuotaForRpc()) return connection;
|
|
2339
|
+
}
|
|
2340
|
+
// TODO(roth): This is an ugly hack for the chttp2 streams_not_seen test.
|
|
2341
|
+
// Find a better way to do this.
|
|
2342
|
+
if (g_test_only_always_send_calls_to_transport && !connections_.empty()) {
|
|
2343
|
+
return connections_[0];
|
|
2344
|
+
}
|
|
2345
|
+
// If we didn't find a connection for the RPC, we'll queue it.
|
|
2346
|
+
// Trigger a new connection attempt if we need to scale up the number
|
|
2347
|
+
// of connections.
|
|
2348
|
+
if (connections_.size() < watcher_list_.GetMaxConnectionsPerSubchannel() &&
|
|
2349
|
+
!connection_attempt_in_flight_ && !retry_timer_handle_.has_value()) {
|
|
2350
|
+
GRPC_TRACE_LOG(subchannel, INFO)
|
|
2351
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2352
|
+
<< ": adding a new connection";
|
|
2353
|
+
StartConnectingLocked();
|
|
2354
|
+
}
|
|
2355
|
+
return nullptr;
|
|
2356
|
+
}
|
|
2357
|
+
|
|
2358
|
+
void NewSubchannel::RetryQueuedRpcs() {
|
|
2359
|
+
MutexLock lock(&mu_);
|
|
2360
|
+
if (shutdown_) return;
|
|
2361
|
+
RetryQueuedRpcsLocked();
|
|
2362
|
+
}
|
|
2363
|
+
|
|
2364
|
+
void NewSubchannel::RetryQueuedRpcsLocked() {
|
|
2365
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2366
|
+
<< "subchannel " << this << " " << key_.ToString()
|
|
2367
|
+
<< ": retrying RPCs from queue, queue size=" << queued_calls_.size();
|
|
2368
|
+
while (!queued_calls_.empty()) {
|
|
2369
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2370
|
+
<< " retrying first queued RPC, queue size=" << queued_calls_.size();
|
|
2371
|
+
QueuedCall* queued_call = queued_calls_.front();
|
|
2372
|
+
if (queued_call == nullptr) {
|
|
2373
|
+
GRPC_TRACE_LOG(subchannel_call, INFO) << " RPC already cancelled";
|
|
2374
|
+
} else {
|
|
2375
|
+
auto connected_subchannel = ChooseConnectionLocked();
|
|
2376
|
+
// If we don't have a connection to dispatch this RPC on, then
|
|
2377
|
+
// we've drained as much from the queue as we can, so stop here.
|
|
2378
|
+
if (connected_subchannel == nullptr) {
|
|
2379
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2380
|
+
<< " no usable connection found; will stop retrying from queue";
|
|
2381
|
+
MaybeFailAllQueuedRpcsLocked();
|
|
2382
|
+
return;
|
|
2383
|
+
}
|
|
2384
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2385
|
+
<< " starting RPC on connection " << connected_subchannel.get();
|
|
2386
|
+
queued_call->ResumeOnConnectionLocked(connected_subchannel.get());
|
|
2387
|
+
}
|
|
2388
|
+
queued_calls_.pop_front();
|
|
2389
|
+
}
|
|
2390
|
+
}
|
|
2391
|
+
|
|
2392
|
+
void NewSubchannel::MaybeFailAllQueuedRpcsLocked() {
|
|
2393
|
+
bool fail_instead_of_queuing =
|
|
2394
|
+
args_.GetInt(GRPC_ARG_MAX_CONCURRENT_STREAMS_REJECT_ON_CLIENT)
|
|
2395
|
+
.value_or(false);
|
|
2396
|
+
if (fail_instead_of_queuing &&
|
|
2397
|
+
connections_.size() == watcher_list_.GetMaxConnectionsPerSubchannel()) {
|
|
2398
|
+
FailAllQueuedRpcsLocked(
|
|
2399
|
+
absl::ResourceExhaustedError("subchannel at max number of connections, "
|
|
2400
|
+
"but no quota to send RPC"));
|
|
2401
|
+
}
|
|
2402
|
+
}
|
|
2403
|
+
|
|
2404
|
+
void NewSubchannel::FailAllQueuedRpcsLocked(absl::Status status) {
|
|
2405
|
+
GRPC_TRACE_LOG(subchannel_call, INFO)
|
|
2406
|
+
<< "subchannel " << this << ": failing all queued RPCs: " << status;
|
|
2407
|
+
status = PrependAddressToStatusMessage(key_, status);
|
|
2408
|
+
for (QueuedCall* queued_call : queued_calls_) {
|
|
2409
|
+
if (queued_call != nullptr) queued_call->FailLocked(status);
|
|
2410
|
+
}
|
|
2411
|
+
queued_calls_.clear();
|
|
2412
|
+
}
|
|
2413
|
+
|
|
2414
|
+
void NewSubchannel::Ping(absl::AnyInvocable<void(absl::Status)>) {
|
|
2415
|
+
// TODO(ctiller): Implement
|
|
2416
|
+
}
|
|
2417
|
+
|
|
2418
|
+
absl::Status NewSubchannel::Ping(grpc_closure* on_initiate,
|
|
2419
|
+
grpc_closure* on_ack) {
|
|
2420
|
+
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
|
|
2421
|
+
{
|
|
2422
|
+
MutexLock lock(&mu_);
|
|
2423
|
+
if (!connections_.empty()) connected_subchannel = connections_[0];
|
|
2424
|
+
}
|
|
2425
|
+
if (connected_subchannel == nullptr) {
|
|
2426
|
+
return absl::UnavailableError("no connection");
|
|
2427
|
+
}
|
|
2428
|
+
connected_subchannel->Ping(on_initiate, on_ack);
|
|
2429
|
+
return absl::OkStatus();
|
|
1014
2430
|
}
|
|
1015
2431
|
|
|
1016
2432
|
} // namespace grpc_core
|