grpc 1.39.0.pre1 → 1.40.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +34 -18
- data/include/grpc/event_engine/event_engine.h +10 -14
- data/include/grpc/event_engine/slice_allocator.h +8 -33
- data/include/grpc/impl/codegen/grpc_types.h +18 -8
- data/include/grpc/impl/codegen/port_platform.h +24 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +413 -247
- data/src/core/ext/filters/client_channel/client_channel.h +42 -18
- data/src/core/ext/filters/client_channel/config_selector.h +19 -6
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +7 -8
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +12 -21
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +3 -5
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +17 -38
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +8 -15
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +3 -6
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +8 -12
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +14 -22
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +2 -9
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +5 -8
- data/src/core/ext/filters/client_channel/lb_policy.cc +1 -15
- data/src/core/ext/filters/client_channel/lb_policy.h +70 -46
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +101 -73
- data/src/core/ext/filters/client_channel/retry_filter.cc +392 -243
- data/src/core/ext/filters/client_channel/retry_service_config.cc +36 -26
- data/src/core/ext/filters/client_channel/retry_service_config.h +1 -1
- data/src/core/ext/filters/client_channel/service_config_call_data.h +45 -5
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +0 -6
- data/src/core/ext/filters/http/client/http_client_filter.cc +5 -2
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +5 -1
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +1 -1
- data/src/core/{lib/event_engine/slice_allocator.cc → ext/transport/chttp2/transport/chttp2_slice_allocator.cc} +15 -38
- data/src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.h +74 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +2 -6
- data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -4
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +8 -8
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +5 -5
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +639 -752
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +190 -69
- data/src/core/ext/transport/chttp2/transport/internal.h +1 -1
- data/src/core/ext/transport/chttp2/transport/parsing.cc +70 -54
- data/src/core/ext/transport/chttp2/transport/varint.cc +6 -4
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +56 -35
- data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +180 -76
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +35 -27
- data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +97 -48
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +45 -9
- data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +67 -7
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +66 -9
- data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +227 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c +46 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.h +121 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +1 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c +35 -0
- data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.h +90 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +32 -24
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +120 -73
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +4 -2
- data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +15 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c +48 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.h +171 -0
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +8 -6
- data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +27 -19
- data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +1 -0
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +24 -7
- data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +57 -0
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +29 -17
- data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +72 -0
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +3 -2
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +4 -0
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +6 -5
- data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +15 -11
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +85 -43
- data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +274 -91
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +11 -8
- data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +30 -13
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +33 -5
- data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +115 -0
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c +60 -0
- data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.h +181 -0
- data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +1 -0
- data/src/core/ext/upb-generated/validate/validate.upb.c +82 -66
- data/src/core/ext/upb-generated/validate/validate.upb.h +220 -124
- data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c +15 -7
- data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +53 -52
- data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +318 -277
- data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +437 -410
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +198 -170
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c +9 -8
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +219 -163
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +15 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c +59 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.h +40 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +29 -25
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c +52 -0
- data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +135 -125
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c +131 -123
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c +90 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.h +35 -0
- data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +32 -24
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +69 -55
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +684 -664
- data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c +13 -10
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c +13 -10
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +441 -375
- data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +10 -0
- data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +122 -114
- data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +1 -1
- data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +112 -79
- data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.h +5 -0
- data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c +64 -0
- data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.h +50 -0
- data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +35 -32
- data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c +4 -4
- data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c +182 -160
- data/src/core/ext/xds/certificate_provider_store.h +1 -1
- data/src/core/ext/xds/xds_api.cc +320 -121
- data/src/core/ext/xds/xds_api.h +31 -2
- data/src/core/ext/xds/xds_bootstrap.cc +4 -1
- data/src/core/ext/xds/xds_client.cc +66 -43
- data/src/core/ext/xds/xds_client.h +0 -4
- data/src/core/ext/xds/xds_http_filters.cc +3 -2
- data/src/core/ext/xds/xds_http_filters.h +3 -0
- data/src/core/lib/channel/call_tracer.h +85 -0
- data/src/core/lib/channel/channel_stack.h +1 -1
- data/src/core/lib/channel/context.h +3 -0
- data/src/core/lib/channel/status_util.h +4 -0
- data/src/core/lib/compression/stream_compression.h +1 -1
- data/src/core/lib/compression/stream_compression_gzip.h +1 -1
- data/src/core/lib/compression/stream_compression_identity.h +1 -1
- data/src/core/lib/debug/stats.h +1 -1
- data/src/core/lib/gpr/murmur_hash.cc +4 -2
- data/src/core/lib/gprpp/manual_constructor.h +1 -1
- data/src/core/lib/gprpp/orphanable.h +3 -3
- data/src/core/lib/gprpp/sync.h +2 -30
- data/src/core/lib/iomgr/buffer_list.cc +1 -1
- data/src/core/lib/iomgr/ev_apple.h +1 -1
- data/src/core/lib/iomgr/event_engine/endpoint.cc +6 -8
- data/src/core/lib/iomgr/event_engine/tcp.cc +30 -10
- data/src/core/lib/iomgr/python_util.h +1 -1
- data/src/core/lib/iomgr/resource_quota.cc +2 -0
- data/src/core/lib/iomgr/tcp_client_windows.cc +2 -0
- data/src/core/lib/iomgr/tcp_server_posix.cc +1 -0
- data/src/core/lib/iomgr/timer_manager.cc +1 -1
- data/src/core/lib/json/json_reader.cc +1 -2
- data/src/core/lib/matchers/matchers.cc +8 -20
- data/src/core/lib/matchers/matchers.h +2 -1
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +49 -0
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +7 -0
- data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +6 -18
- data/src/core/lib/security/transport/security_handshaker.cc +12 -4
- data/src/core/lib/security/transport/server_auth_filter.cc +0 -7
- data/src/core/lib/slice/slice_internal.h +1 -0
- data/src/core/lib/surface/call.cc +5 -6
- data/src/core/lib/surface/server.cc +3 -1
- data/src/core/lib/surface/server.h +3 -3
- data/src/core/lib/surface/version.cc +2 -4
- data/src/ruby/ext/grpc/extconf.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/xxhash/xxhash.h +77 -195
- metadata +57 -40
- data/src/core/lib/gpr/arena.h +0 -47
@@ -174,9 +174,9 @@ class ClientChannel::CallData {
|
|
174
174
|
void MaybeAddCallToResolverQueuedCallsLocked(grpc_call_element* elem)
|
175
175
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
|
176
176
|
|
177
|
-
static void
|
177
|
+
static void RecvTrailingMetadataReadyForConfigSelectorCommitCallback(
|
178
178
|
void* arg, grpc_error_handle error);
|
179
|
-
void
|
179
|
+
void InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
|
180
180
|
grpc_transport_stream_op_batch* batch);
|
181
181
|
|
182
182
|
void CreateDynamicCall(grpc_call_element* elem);
|
@@ -199,7 +199,7 @@ class ClientChannel::CallData {
|
|
199
199
|
|
200
200
|
grpc_polling_entity* pollent_ = nullptr;
|
201
201
|
|
202
|
-
grpc_closure
|
202
|
+
grpc_closure resolution_done_closure_;
|
203
203
|
|
204
204
|
// Accessed while holding ClientChannel::resolution_mu_.
|
205
205
|
bool service_config_applied_ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) =
|
@@ -211,10 +211,8 @@ class ClientChannel::CallData {
|
|
211
211
|
ResolverQueuedCallCanceller* resolver_call_canceller_
|
212
212
|
ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) = nullptr;
|
213
213
|
|
214
|
-
|
215
|
-
|
216
|
-
grpc_closure* original_recv_initial_metadata_ready_ = nullptr;
|
217
|
-
grpc_closure recv_initial_metadata_ready_;
|
214
|
+
grpc_closure* original_recv_trailing_metadata_ready_ = nullptr;
|
215
|
+
grpc_closure recv_trailing_metadata_ready_;
|
218
216
|
|
219
217
|
RefCountedPtr<DynamicFilters> dynamic_filters_;
|
220
218
|
RefCountedPtr<DynamicFilters::Call> dynamic_call_;
|
@@ -345,13 +343,16 @@ class DynamicTerminationFilter::CallData {
|
|
345
343
|
auto* calld = static_cast<CallData*>(elem->call_data);
|
346
344
|
auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
|
347
345
|
ClientChannel* client_channel = chand->chand_;
|
348
|
-
grpc_call_element_args args = {
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
346
|
+
grpc_call_element_args args = {calld->owning_call_, nullptr,
|
347
|
+
calld->call_context_, calld->path_,
|
348
|
+
/*start_time=*/0, calld->deadline_,
|
349
|
+
calld->arena_, calld->call_combiner_};
|
350
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
351
|
+
calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
|
352
|
+
calld->lb_call_ = client_channel->CreateLoadBalancedCall(
|
353
|
+
args, pollent, nullptr,
|
354
|
+
service_config_call_data->call_dispatch_controller(),
|
355
|
+
/*is_transparent_retry=*/false);
|
355
356
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
|
356
357
|
gpr_log(GPR_INFO,
|
357
358
|
"chand=%p dynamic_termination_calld=%p: create lb_call=%p", chand,
|
@@ -362,7 +363,6 @@ class DynamicTerminationFilter::CallData {
|
|
362
363
|
private:
|
363
364
|
explicit CallData(const grpc_call_element_args& args)
|
364
365
|
: path_(grpc_slice_ref_internal(args.path)),
|
365
|
-
call_start_time_(args.start_time),
|
366
366
|
deadline_(args.deadline),
|
367
367
|
arena_(args.arena),
|
368
368
|
owning_call_(args.call_stack),
|
@@ -372,14 +372,13 @@ class DynamicTerminationFilter::CallData {
|
|
372
372
|
~CallData() { grpc_slice_unref_internal(path_); }
|
373
373
|
|
374
374
|
grpc_slice path_; // Request path.
|
375
|
-
gpr_cycle_counter call_start_time_;
|
376
375
|
grpc_millis deadline_;
|
377
376
|
Arena* arena_;
|
378
377
|
grpc_call_stack* owning_call_;
|
379
378
|
CallCombiner* call_combiner_;
|
380
379
|
grpc_call_context_element* call_context_;
|
381
380
|
|
382
|
-
|
381
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall> lb_call_;
|
383
382
|
};
|
384
383
|
|
385
384
|
const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
|
@@ -1060,10 +1059,6 @@ void ClientChannel::Destroy(grpc_channel_element* elem) {
|
|
1060
1059
|
|
1061
1060
|
namespace {
|
1062
1061
|
|
1063
|
-
bool GetEnableRetries(const grpc_channel_args* args) {
|
1064
|
-
return grpc_channel_args_find_bool(args, GRPC_ARG_ENABLE_RETRIES, false);
|
1065
|
-
}
|
1066
|
-
|
1067
1062
|
RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
|
1068
1063
|
const grpc_channel_args* args) {
|
1069
1064
|
const bool use_local_subchannel_pool = grpc_channel_args_find_bool(
|
@@ -1085,7 +1080,6 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
|
|
1085
1080
|
grpc_error_handle* error)
|
1086
1081
|
: deadline_checking_enabled_(
|
1087
1082
|
grpc_deadline_checking_enabled(args->channel_args)),
|
1088
|
-
enable_retries_(GetEnableRetries(args->channel_args)),
|
1089
1083
|
owning_stack_(args->channel_stack),
|
1090
1084
|
client_channel_factory_(
|
1091
1085
|
ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
|
@@ -1169,12 +1163,15 @@ ClientChannel::~ClientChannel() {
|
|
1169
1163
|
GRPC_ERROR_UNREF(disconnect_error_.Load(MemoryOrder::RELAXED));
|
1170
1164
|
}
|
1171
1165
|
|
1172
|
-
|
1166
|
+
OrphanablePtr<ClientChannel::LoadBalancedCall>
|
1173
1167
|
ClientChannel::CreateLoadBalancedCall(
|
1174
1168
|
const grpc_call_element_args& args, grpc_polling_entity* pollent,
|
1175
|
-
grpc_closure* on_call_destruction_complete
|
1176
|
-
|
1177
|
-
|
1169
|
+
grpc_closure* on_call_destruction_complete,
|
1170
|
+
ConfigSelector::CallDispatchController* call_dispatch_controller,
|
1171
|
+
bool is_transparent_retry) {
|
1172
|
+
return OrphanablePtr<LoadBalancedCall>(args.arena->New<LoadBalancedCall>(
|
1173
|
+
this, args, pollent, on_call_destruction_complete,
|
1174
|
+
call_dispatch_controller, is_transparent_retry));
|
1178
1175
|
}
|
1179
1176
|
|
1180
1177
|
namespace {
|
@@ -1359,11 +1356,12 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
|
|
1359
1356
|
grpc_error_handle state_error =
|
1360
1357
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1361
1358
|
"Resolver transient failure", &error, 1);
|
1359
|
+
absl::Status status = grpc_error_to_absl_status(state_error);
|
1362
1360
|
{
|
1363
1361
|
MutexLock lock(&resolution_mu_);
|
1364
1362
|
// Update resolver transient failure.
|
1365
1363
|
GRPC_ERROR_UNREF(resolver_transient_failure_error_);
|
1366
|
-
resolver_transient_failure_error_ =
|
1364
|
+
resolver_transient_failure_error_ = state_error;
|
1367
1365
|
// Process calls that were queued waiting for the resolver result.
|
1368
1366
|
for (ResolverQueuedCall* call = resolver_queued_calls_; call != nullptr;
|
1369
1367
|
call = call->next) {
|
@@ -1377,10 +1375,8 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
|
|
1377
1375
|
}
|
1378
1376
|
// Update connectivity state.
|
1379
1377
|
UpdateStateAndPickerLocked(
|
1380
|
-
GRPC_CHANNEL_TRANSIENT_FAILURE,
|
1381
|
-
|
1382
|
-
absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
|
1383
|
-
state_error));
|
1378
|
+
GRPC_CHANNEL_TRANSIENT_FAILURE, status, "resolver failure",
|
1379
|
+
absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(status));
|
1384
1380
|
}
|
1385
1381
|
GRPC_ERROR_UNREF(error);
|
1386
1382
|
}
|
@@ -1507,14 +1503,6 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
|
|
1507
1503
|
config_selector =
|
1508
1504
|
MakeRefCounted<DefaultConfigSelector>(saved_service_config_);
|
1509
1505
|
}
|
1510
|
-
// Construct dynamic filter stack.
|
1511
|
-
std::vector<const grpc_channel_filter*> filters =
|
1512
|
-
config_selector->GetFilters();
|
1513
|
-
if (enable_retries_) {
|
1514
|
-
filters.push_back(&kRetryFilterVtable);
|
1515
|
-
} else {
|
1516
|
-
filters.push_back(&DynamicTerminationFilter::kFilterVtable);
|
1517
|
-
}
|
1518
1506
|
absl::InlinedVector<grpc_arg, 2> args_to_add = {
|
1519
1507
|
grpc_channel_arg_pointer_create(
|
1520
1508
|
const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL), this,
|
@@ -1526,6 +1514,16 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
|
|
1526
1514
|
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
|
1527
1515
|
channel_args_, args_to_add.data(), args_to_add.size());
|
1528
1516
|
new_args = config_selector->ModifyChannelArgs(new_args);
|
1517
|
+
bool enable_retries =
|
1518
|
+
grpc_channel_args_find_bool(new_args, GRPC_ARG_ENABLE_RETRIES, true);
|
1519
|
+
// Construct dynamic filter stack.
|
1520
|
+
std::vector<const grpc_channel_filter*> filters =
|
1521
|
+
config_selector->GetFilters();
|
1522
|
+
if (enable_retries) {
|
1523
|
+
filters.push_back(&kRetryFilterVtable);
|
1524
|
+
} else {
|
1525
|
+
filters.push_back(&DynamicTerminationFilter::kFilterVtable);
|
1526
|
+
}
|
1529
1527
|
RefCountedPtr<DynamicFilters> dynamic_filters =
|
1530
1528
|
DynamicFilters::Create(new_args, std::move(filters));
|
1531
1529
|
GPR_ASSERT(dynamic_filters != nullptr);
|
@@ -1548,6 +1546,15 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
|
|
1548
1546
|
// Process calls that were queued waiting for the resolver result.
|
1549
1547
|
for (ResolverQueuedCall* call = resolver_queued_calls_; call != nullptr;
|
1550
1548
|
call = call->next) {
|
1549
|
+
// If there are a lot of queued calls here, resuming them all may cause us
|
1550
|
+
// to stay inside C-core for a long period of time. All of that work would
|
1551
|
+
// be done using the same ExecCtx instance and therefore the same cached
|
1552
|
+
// value of "now". The longer it takes to finish all of this work and exit
|
1553
|
+
// from C-core, the more stale the cached value of "now" may become. This
|
1554
|
+
// can cause problems whereby (e.g.) we calculate a timer deadline based
|
1555
|
+
// on the stale value, which results in the timer firing too early. To
|
1556
|
+
// avoid this, we invalidate the cached value for each call we process.
|
1557
|
+
ExecCtx::Get()->InvalidateNow();
|
1551
1558
|
grpc_call_element* elem = call->elem;
|
1552
1559
|
CallData* calld = static_cast<CallData*>(elem->call_data);
|
1553
1560
|
grpc_error_handle error = GRPC_ERROR_NONE;
|
@@ -1660,6 +1667,15 @@ void ClientChannel::UpdateStateAndPickerLocked(
|
|
1660
1667
|
// Re-process queued picks.
|
1661
1668
|
for (LbQueuedCall* call = lb_queued_calls_; call != nullptr;
|
1662
1669
|
call = call->next) {
|
1670
|
+
// If there are a lot of queued calls here, resuming them all may cause us
|
1671
|
+
// to stay inside C-core for a long period of time. All of that work would
|
1672
|
+
// be done using the same ExecCtx instance and therefore the same cached
|
1673
|
+
// value of "now". The longer it takes to finish all of this work and exit
|
1674
|
+
// from C-core, the more stale the cached value of "now" may become. This
|
1675
|
+
// can cause problems whereby (e.g.) we calculate a timer deadline based
|
1676
|
+
// on the stale value, which results in the timer firing too early. To
|
1677
|
+
// avoid this, we invalidate the cached value for each call we process.
|
1678
|
+
ExecCtx::Get()->InvalidateNow();
|
1663
1679
|
grpc_error_handle error = GRPC_ERROR_NONE;
|
1664
1680
|
if (call->lb_call->PickSubchannelLocked(&error)) {
|
1665
1681
|
call->lb_call->AsyncPickDone(error);
|
@@ -1671,6 +1687,40 @@ void ClientChannel::UpdateStateAndPickerLocked(
|
|
1671
1687
|
pending_subchannel_updates_.clear();
|
1672
1688
|
}
|
1673
1689
|
|
1690
|
+
namespace {
|
1691
|
+
|
1692
|
+
// TODO(roth): Remove this in favor of the gprpp Match() function once
|
1693
|
+
// we can do that without breaking lock annotations.
|
1694
|
+
template <typename T>
|
1695
|
+
T HandlePickResult(
|
1696
|
+
LoadBalancingPolicy::PickResult* result,
|
1697
|
+
std::function<T(LoadBalancingPolicy::PickResult::Complete*)> complete_func,
|
1698
|
+
std::function<T(LoadBalancingPolicy::PickResult::Queue*)> queue_func,
|
1699
|
+
std::function<T(LoadBalancingPolicy::PickResult::Fail*)> fail_func,
|
1700
|
+
std::function<T(LoadBalancingPolicy::PickResult::Drop*)> drop_func) {
|
1701
|
+
auto* complete_pick =
|
1702
|
+
absl::get_if<LoadBalancingPolicy::PickResult::Complete>(&result->result);
|
1703
|
+
if (complete_pick != nullptr) {
|
1704
|
+
return complete_func(complete_pick);
|
1705
|
+
}
|
1706
|
+
auto* queue_pick =
|
1707
|
+
absl::get_if<LoadBalancingPolicy::PickResult::Queue>(&result->result);
|
1708
|
+
if (queue_pick != nullptr) {
|
1709
|
+
return queue_func(queue_pick);
|
1710
|
+
}
|
1711
|
+
auto* fail_pick =
|
1712
|
+
absl::get_if<LoadBalancingPolicy::PickResult::Fail>(&result->result);
|
1713
|
+
if (fail_pick != nullptr) {
|
1714
|
+
return fail_func(fail_pick);
|
1715
|
+
}
|
1716
|
+
auto* drop_pick =
|
1717
|
+
absl::get_if<LoadBalancingPolicy::PickResult::Drop>(&result->result);
|
1718
|
+
GPR_ASSERT(drop_pick != nullptr);
|
1719
|
+
return drop_func(drop_pick);
|
1720
|
+
}
|
1721
|
+
|
1722
|
+
} // namespace
|
1723
|
+
|
1674
1724
|
grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
|
1675
1725
|
if (state_tracker_.state() != GRPC_CHANNEL_READY) {
|
1676
1726
|
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected");
|
@@ -1680,21 +1730,31 @@ grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
|
|
1680
1730
|
MutexLock lock(&data_plane_mu_);
|
1681
1731
|
result = picker_->Pick(LoadBalancingPolicy::PickArgs());
|
1682
1732
|
}
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1733
|
+
return HandlePickResult<grpc_error_handle>(
|
1734
|
+
&result,
|
1735
|
+
// Complete pick.
|
1736
|
+
[op](LoadBalancingPolicy::PickResult::Complete* complete_pick)
|
1737
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::work_serializer_) {
|
1738
|
+
SubchannelWrapper* subchannel = static_cast<SubchannelWrapper*>(
|
1739
|
+
complete_pick->subchannel.get());
|
1740
|
+
ConnectedSubchannel* connected_subchannel =
|
1741
|
+
subchannel->connected_subchannel();
|
1742
|
+
connected_subchannel->Ping(op->send_ping.on_initiate,
|
1743
|
+
op->send_ping.on_ack);
|
1744
|
+
return GRPC_ERROR_NONE;
|
1745
|
+
},
|
1746
|
+
// Queue pick.
|
1747
|
+
[](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/) {
|
1748
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("LB picker queued call");
|
1749
|
+
},
|
1750
|
+
// Fail pick.
|
1751
|
+
[](LoadBalancingPolicy::PickResult::Fail* fail_pick) {
|
1752
|
+
return absl_status_to_grpc_error(fail_pick->status);
|
1753
|
+
},
|
1754
|
+
// Drop pick.
|
1755
|
+
[](LoadBalancingPolicy::PickResult::Drop* drop_pick) {
|
1756
|
+
return absl_status_to_grpc_error(drop_pick->status);
|
1757
|
+
});
|
1698
1758
|
}
|
1699
1759
|
|
1700
1760
|
void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
|
@@ -1749,7 +1809,7 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
|
|
1749
1809
|
UpdateStateAndPickerLocked(
|
1750
1810
|
GRPC_CHANNEL_SHUTDOWN, absl::Status(), "shutdown from API",
|
1751
1811
|
absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
|
1752
|
-
|
1812
|
+
grpc_error_to_absl_status(op->disconnect_with_error)));
|
1753
1813
|
}
|
1754
1814
|
}
|
1755
1815
|
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "start_transport_op");
|
@@ -1919,10 +1979,26 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
|
|
1919
1979
|
if (GPR_LIKELY(chand->deadline_checking_enabled_)) {
|
1920
1980
|
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
|
1921
1981
|
}
|
1922
|
-
// Intercept
|
1923
|
-
|
1924
|
-
|
1982
|
+
// Intercept recv_trailing_metadata to call CallDispatchController::Commit(),
|
1983
|
+
// in case we wind up failing the call before we get down to the retry
|
1984
|
+
// or LB call layer.
|
1985
|
+
if (batch->recv_trailing_metadata) {
|
1986
|
+
calld->InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
|
1987
|
+
batch);
|
1988
|
+
}
|
1989
|
+
// If we already have a dynamic call, pass the batch down to it.
|
1990
|
+
// Note that once we have done so, we do not need to acquire the channel's
|
1991
|
+
// resolution mutex, which is more efficient (especially for streaming calls).
|
1992
|
+
if (calld->dynamic_call_ != nullptr) {
|
1993
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
1994
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
|
1995
|
+
chand, calld, calld->dynamic_call_.get());
|
1996
|
+
}
|
1997
|
+
calld->dynamic_call_->StartTransportStreamOpBatch(batch);
|
1998
|
+
return;
|
1925
1999
|
}
|
2000
|
+
// We do not yet have a dynamic call.
|
2001
|
+
//
|
1926
2002
|
// If we've previously been cancelled, immediately fail any new batches.
|
1927
2003
|
if (GPR_UNLIKELY(calld->cancel_error_ != GRPC_ERROR_NONE)) {
|
1928
2004
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
@@ -1949,35 +2025,16 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
|
|
1949
2025
|
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
|
1950
2026
|
calld, grpc_error_std_string(calld->cancel_error_).c_str());
|
1951
2027
|
}
|
1952
|
-
//
|
1953
|
-
|
1954
|
-
|
1955
|
-
|
1956
|
-
|
1957
|
-
|
1958
|
-
// Note: This will release the call combiner.
|
1959
|
-
grpc_transport_stream_op_batch_finish_with_failure(
|
1960
|
-
batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
|
1961
|
-
} else {
|
1962
|
-
// Note: This will release the call combiner.
|
1963
|
-
calld->dynamic_call_->StartTransportStreamOpBatch(batch);
|
1964
|
-
}
|
2028
|
+
// Fail all pending batches.
|
2029
|
+
calld->PendingBatchesFail(elem, GRPC_ERROR_REF(calld->cancel_error_),
|
2030
|
+
NoYieldCallCombiner);
|
2031
|
+
// Note: This will release the call combiner.
|
2032
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
2033
|
+
batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
|
1965
2034
|
return;
|
1966
2035
|
}
|
1967
2036
|
// Add the batch to the pending list.
|
1968
2037
|
calld->PendingBatchesAdd(elem, batch);
|
1969
|
-
// Check if we've already created a dynamic call.
|
1970
|
-
// Note that once we have done so, we do not need to acquire the channel's
|
1971
|
-
// resolution mutex, which is more efficient (especially for streaming calls).
|
1972
|
-
if (calld->dynamic_call_ != nullptr) {
|
1973
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
1974
|
-
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
|
1975
|
-
chand, calld, calld->dynamic_call_.get());
|
1976
|
-
}
|
1977
|
-
calld->PendingBatchesResume(elem);
|
1978
|
-
return;
|
1979
|
-
}
|
1980
|
-
// We do not yet have a dynamic call.
|
1981
2038
|
// For batches containing a send_initial_metadata op, acquire the
|
1982
2039
|
// channel's resolution mutex to apply the service config to the call,
|
1983
2040
|
// after which we will create a dynamic call.
|
@@ -2218,7 +2275,6 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
|
|
2218
2275
|
ConfigSelector::CallConfig call_config =
|
2219
2276
|
config_selector->GetCallConfig({&path_, initial_metadata, arena_});
|
2220
2277
|
if (call_config.error != GRPC_ERROR_NONE) return call_config.error;
|
2221
|
-
on_call_committed_ = std::move(call_config.on_call_committed);
|
2222
2278
|
// Create a ServiceConfigCallData for the call. This stores a ref to the
|
2223
2279
|
// ServiceConfig and caches the right set of parsed configs to use for
|
2224
2280
|
// the call. The MethodConfig will store itself in the call context,
|
@@ -2226,7 +2282,8 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
|
|
2226
2282
|
// will be cleaned up when the call ends.
|
2227
2283
|
auto* service_config_call_data = arena_->New<ServiceConfigCallData>(
|
2228
2284
|
std::move(call_config.service_config), call_config.method_configs,
|
2229
|
-
std::move(call_config.call_attributes),
|
2285
|
+
std::move(call_config.call_attributes),
|
2286
|
+
call_config.call_dispatch_controller, call_context_);
|
2230
2287
|
// Apply our own method params to the call.
|
2231
2288
|
auto* method_params = static_cast<ClientChannelMethodParsedConfig*>(
|
2232
2289
|
service_config_call_data->GetMethodParsedConfig(
|
@@ -2265,36 +2322,36 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
|
|
2265
2322
|
}
|
2266
2323
|
|
2267
2324
|
void ClientChannel::CallData::
|
2268
|
-
|
2325
|
+
RecvTrailingMetadataReadyForConfigSelectorCommitCallback(
|
2269
2326
|
void* arg, grpc_error_handle error) {
|
2270
2327
|
auto* self = static_cast<CallData*>(arg);
|
2271
|
-
|
2272
|
-
|
2273
|
-
|
2328
|
+
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
|
2329
|
+
self->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
|
2330
|
+
if (service_config_call_data != nullptr) {
|
2331
|
+
service_config_call_data->call_dispatch_controller()->Commit();
|
2274
2332
|
}
|
2275
2333
|
// Chain to original callback.
|
2276
|
-
Closure::Run(DEBUG_LOCATION, self->
|
2334
|
+
Closure::Run(DEBUG_LOCATION, self->original_recv_trailing_metadata_ready_,
|
2277
2335
|
GRPC_ERROR_REF(error));
|
2278
2336
|
}
|
2279
2337
|
|
2280
|
-
// TODO(roth): Consider not intercepting this callback unless we
|
2281
|
-
// actually need to, if this causes a performance problem.
|
2282
2338
|
void ClientChannel::CallData::
|
2283
|
-
|
2339
|
+
InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
|
2284
2340
|
grpc_transport_stream_op_batch* batch) {
|
2285
|
-
|
2286
|
-
batch->payload->
|
2287
|
-
GRPC_CLOSURE_INIT(&
|
2288
|
-
|
2341
|
+
original_recv_trailing_metadata_ready_ =
|
2342
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
|
2343
|
+
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
|
2344
|
+
RecvTrailingMetadataReadyForConfigSelectorCommitCallback,
|
2289
2345
|
this, nullptr);
|
2290
|
-
batch->payload->
|
2291
|
-
&
|
2346
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
2347
|
+
&recv_trailing_metadata_ready_;
|
2292
2348
|
}
|
2293
2349
|
|
2294
2350
|
void ClientChannel::CallData::AsyncResolutionDone(grpc_call_element* elem,
|
2295
2351
|
grpc_error_handle error) {
|
2296
|
-
|
2297
|
-
|
2352
|
+
// TODO(roth): Does this callback need to hold a ref to the call stack?
|
2353
|
+
GRPC_CLOSURE_INIT(&resolution_done_closure_, ResolutionDone, elem, nullptr);
|
2354
|
+
ExecCtx::Run(DEBUG_LOCATION, &resolution_done_closure_, error);
|
2298
2355
|
}
|
2299
2356
|
|
2300
2357
|
void ClientChannel::CallData::ResolutionDone(void* arg,
|
@@ -2530,22 +2587,39 @@ class ClientChannel::LoadBalancedCall::LbCallState
|
|
2530
2587
|
// LoadBalancedCall
|
2531
2588
|
//
|
2532
2589
|
|
2590
|
+
namespace {
|
2591
|
+
|
2592
|
+
CallTracer::CallAttemptTracer* GetCallAttemptTracer(
|
2593
|
+
grpc_call_context_element* context, bool is_transparent_retry) {
|
2594
|
+
auto* call_tracer =
|
2595
|
+
static_cast<CallTracer*>(context[GRPC_CONTEXT_CALL_TRACER].value);
|
2596
|
+
if (call_tracer == nullptr) return nullptr;
|
2597
|
+
return call_tracer->StartNewAttempt(is_transparent_retry);
|
2598
|
+
}
|
2599
|
+
|
2600
|
+
} // namespace
|
2601
|
+
|
2533
2602
|
ClientChannel::LoadBalancedCall::LoadBalancedCall(
|
2534
2603
|
ClientChannel* chand, const grpc_call_element_args& args,
|
2535
|
-
grpc_polling_entity* pollent, grpc_closure* on_call_destruction_complete
|
2536
|
-
|
2537
|
-
|
2538
|
-
|
2604
|
+
grpc_polling_entity* pollent, grpc_closure* on_call_destruction_complete,
|
2605
|
+
ConfigSelector::CallDispatchController* call_dispatch_controller,
|
2606
|
+
bool is_transparent_retry)
|
2607
|
+
: InternallyRefCounted(
|
2608
|
+
GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
|
2609
|
+
? "LoadBalancedCall"
|
2610
|
+
: nullptr),
|
2539
2611
|
chand_(chand),
|
2540
2612
|
path_(grpc_slice_ref_internal(args.path)),
|
2541
|
-
call_start_time_(args.start_time),
|
2542
2613
|
deadline_(args.deadline),
|
2543
2614
|
arena_(args.arena),
|
2544
2615
|
owning_call_(args.call_stack),
|
2545
2616
|
call_combiner_(args.call_combiner),
|
2546
2617
|
call_context_(args.context),
|
2547
2618
|
pollent_(pollent),
|
2548
|
-
on_call_destruction_complete_(on_call_destruction_complete)
|
2619
|
+
on_call_destruction_complete_(on_call_destruction_complete),
|
2620
|
+
call_dispatch_controller_(call_dispatch_controller),
|
2621
|
+
call_attempt_tracer_(
|
2622
|
+
GetCallAttemptTracer(args.context, is_transparent_retry)) {}
|
2549
2623
|
|
2550
2624
|
ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
|
2551
2625
|
grpc_slice_unref_internal(path_);
|
@@ -2565,6 +2639,16 @@ ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
|
|
2565
2639
|
}
|
2566
2640
|
}
|
2567
2641
|
|
2642
|
+
void ClientChannel::LoadBalancedCall::Orphan() {
|
2643
|
+
// Compute latency and report it to the tracer.
|
2644
|
+
if (call_attempt_tracer_ != nullptr) {
|
2645
|
+
gpr_timespec latency =
|
2646
|
+
gpr_cycle_counter_sub(gpr_get_cycle_counter(), lb_call_start_time_);
|
2647
|
+
call_attempt_tracer_->RecordEnd(latency);
|
2648
|
+
}
|
2649
|
+
Unref();
|
2650
|
+
}
|
2651
|
+
|
2568
2652
|
size_t ClientChannel::LoadBalancedCall::GetBatchIndex(
|
2569
2653
|
grpc_transport_stream_op_batch* batch) {
|
2570
2654
|
// Note: It is important the send_initial_metadata be the first entry
|
@@ -2680,10 +2764,79 @@ void ClientChannel::LoadBalancedCall::PendingBatchesResume() {
|
|
2680
2764
|
|
2681
2765
|
void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
|
2682
2766
|
grpc_transport_stream_op_batch* batch) {
|
2683
|
-
//
|
2767
|
+
// Handle call tracing.
|
2768
|
+
if (call_attempt_tracer_ != nullptr) {
|
2769
|
+
// Record send ops in tracer.
|
2770
|
+
if (batch->cancel_stream) {
|
2771
|
+
call_attempt_tracer_->RecordCancel(
|
2772
|
+
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
|
2773
|
+
}
|
2774
|
+
if (batch->send_initial_metadata) {
|
2775
|
+
call_attempt_tracer_->RecordSendInitialMetadata(
|
2776
|
+
batch->payload->send_initial_metadata.send_initial_metadata,
|
2777
|
+
batch->payload->send_initial_metadata.send_initial_metadata_flags);
|
2778
|
+
peer_string_ = batch->payload->send_initial_metadata.peer_string;
|
2779
|
+
original_send_initial_metadata_on_complete_ = batch->on_complete;
|
2780
|
+
GRPC_CLOSURE_INIT(&send_initial_metadata_on_complete_,
|
2781
|
+
SendInitialMetadataOnComplete, this, nullptr);
|
2782
|
+
batch->on_complete = &send_initial_metadata_on_complete_;
|
2783
|
+
}
|
2784
|
+
if (batch->send_message) {
|
2785
|
+
call_attempt_tracer_->RecordSendMessage(
|
2786
|
+
*batch->payload->send_message.send_message);
|
2787
|
+
}
|
2788
|
+
if (batch->send_trailing_metadata) {
|
2789
|
+
call_attempt_tracer_->RecordSendTrailingMetadata(
|
2790
|
+
batch->payload->send_trailing_metadata.send_trailing_metadata);
|
2791
|
+
}
|
2792
|
+
// Intercept recv ops.
|
2793
|
+
if (batch->recv_initial_metadata) {
|
2794
|
+
recv_initial_metadata_ =
|
2795
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata;
|
2796
|
+
original_recv_initial_metadata_ready_ =
|
2797
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
|
2798
|
+
GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
|
2799
|
+
this, nullptr);
|
2800
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
2801
|
+
&recv_initial_metadata_ready_;
|
2802
|
+
}
|
2803
|
+
if (batch->recv_message) {
|
2804
|
+
recv_message_ = batch->payload->recv_message.recv_message;
|
2805
|
+
original_recv_message_ready_ =
|
2806
|
+
batch->payload->recv_message.recv_message_ready;
|
2807
|
+
GRPC_CLOSURE_INIT(&recv_message_ready_, RecvMessageReady, this, nullptr);
|
2808
|
+
batch->payload->recv_message.recv_message_ready = &recv_message_ready_;
|
2809
|
+
}
|
2810
|
+
}
|
2811
|
+
// Intercept recv_trailing_metadata even if there is no call tracer,
|
2812
|
+
// since we may need to notify the LB policy about trailing metadata.
|
2684
2813
|
if (batch->recv_trailing_metadata) {
|
2685
|
-
|
2814
|
+
recv_trailing_metadata_ =
|
2815
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata;
|
2816
|
+
transport_stream_stats_ =
|
2817
|
+
batch->payload->recv_trailing_metadata.collect_stats;
|
2818
|
+
original_recv_trailing_metadata_ready_ =
|
2819
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
|
2820
|
+
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
|
2821
|
+
this, nullptr);
|
2822
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
2823
|
+
&recv_trailing_metadata_ready_;
|
2824
|
+
}
|
2825
|
+
// If we've already gotten a subchannel call, pass the batch down to it.
|
2826
|
+
// Note that once we have picked a subchannel, we do not need to acquire
|
2827
|
+
// the channel's data plane mutex, which is more efficient (especially for
|
2828
|
+
// streaming calls).
|
2829
|
+
if (subchannel_call_ != nullptr) {
|
2830
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
2831
|
+
gpr_log(GPR_INFO,
|
2832
|
+
"chand=%p lb_call=%p: starting batch on subchannel_call=%p",
|
2833
|
+
chand_, this, subchannel_call_.get());
|
2834
|
+
}
|
2835
|
+
subchannel_call_->StartTransportStreamOpBatch(batch);
|
2836
|
+
return;
|
2686
2837
|
}
|
2838
|
+
// We do not yet have a subchannel call.
|
2839
|
+
//
|
2687
2840
|
// If we've previously been cancelled, immediately fail any new batches.
|
2688
2841
|
if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
|
2689
2842
|
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
@@ -2708,36 +2861,15 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
|
|
2708
2861
|
gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
|
2709
2862
|
chand_, this, grpc_error_std_string(cancel_error_).c_str());
|
2710
2863
|
}
|
2711
|
-
//
|
2712
|
-
|
2713
|
-
//
|
2714
|
-
|
2715
|
-
|
2716
|
-
// Note: This will release the call combiner.
|
2717
|
-
grpc_transport_stream_op_batch_finish_with_failure(
|
2718
|
-
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
|
2719
|
-
} else {
|
2720
|
-
// Note: This will release the call combiner.
|
2721
|
-
subchannel_call_->StartTransportStreamOpBatch(batch);
|
2722
|
-
}
|
2864
|
+
// Fail all pending batches.
|
2865
|
+
PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
|
2866
|
+
// Note: This will release the call combiner.
|
2867
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
2868
|
+
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
|
2723
2869
|
return;
|
2724
2870
|
}
|
2725
2871
|
// Add the batch to the pending list.
|
2726
2872
|
PendingBatchesAdd(batch);
|
2727
|
-
// Check if we've already gotten a subchannel call.
|
2728
|
-
// Note that once we have picked a subchannel, we do not need to acquire
|
2729
|
-
// the channel's data plane mutex, which is more efficient (especially for
|
2730
|
-
// streaming calls).
|
2731
|
-
if (subchannel_call_ != nullptr) {
|
2732
|
-
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
|
2733
|
-
gpr_log(GPR_INFO,
|
2734
|
-
"chand=%p lb_call=%p: starting batch on subchannel_call=%p",
|
2735
|
-
chand_, this, subchannel_call_.get());
|
2736
|
-
}
|
2737
|
-
PendingBatchesResume();
|
2738
|
-
return;
|
2739
|
-
}
|
2740
|
-
// We do not yet have a subchannel call.
|
2741
2873
|
// For batches containing a send_initial_metadata op, acquire the
|
2742
2874
|
// channel's data plane mutex to pick a subchannel.
|
2743
2875
|
if (GPR_LIKELY(batch->send_initial_metadata)) {
|
@@ -2759,38 +2891,82 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
|
|
2759
2891
|
}
|
2760
2892
|
}
|
2761
2893
|
|
2762
|
-
void ClientChannel::LoadBalancedCall::
|
2763
|
-
|
2764
|
-
|
2894
|
+
void ClientChannel::LoadBalancedCall::SendInitialMetadataOnComplete(
|
2895
|
+
void* arg, grpc_error_handle error) {
|
2896
|
+
auto* self = static_cast<LoadBalancedCall*>(arg);
|
2897
|
+
self->call_attempt_tracer_->RecordOnDoneSendInitialMetadata(
|
2898
|
+
self->peer_string_);
|
2899
|
+
Closure::Run(DEBUG_LOCATION,
|
2900
|
+
self->original_send_initial_metadata_on_complete_,
|
2901
|
+
GRPC_ERROR_REF(error));
|
2902
|
+
}
|
2903
|
+
|
2904
|
+
void ClientChannel::LoadBalancedCall::RecvInitialMetadataReady(
|
2905
|
+
void* arg, grpc_error_handle error) {
|
2906
|
+
auto* self = static_cast<LoadBalancedCall*>(arg);
|
2907
|
+
if (error == GRPC_ERROR_NONE) {
|
2908
|
+
// recv_initial_metadata_flags is not populated for clients
|
2909
|
+
self->call_attempt_tracer_->RecordReceivedInitialMetadata(
|
2910
|
+
self->recv_initial_metadata_, 0 /* recv_initial_metadata_flags */);
|
2911
|
+
}
|
2912
|
+
Closure::Run(DEBUG_LOCATION, self->original_recv_initial_metadata_ready_,
|
2913
|
+
GRPC_ERROR_REF(error));
|
2914
|
+
}
|
2915
|
+
|
2916
|
+
void ClientChannel::LoadBalancedCall::RecvMessageReady(
|
2917
|
+
void* arg, grpc_error_handle error) {
|
2918
|
+
auto* self = static_cast<LoadBalancedCall*>(arg);
|
2919
|
+
if (*self->recv_message_ != nullptr) {
|
2920
|
+
self->call_attempt_tracer_->RecordReceivedMessage(**self->recv_message_);
|
2921
|
+
}
|
2922
|
+
Closure::Run(DEBUG_LOCATION, self->original_recv_message_ready_,
|
2923
|
+
GRPC_ERROR_REF(error));
|
2924
|
+
}
|
2925
|
+
|
2926
|
+
void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
|
2927
|
+
void* arg, grpc_error_handle error) {
|
2765
2928
|
auto* self = static_cast<LoadBalancedCall*>(arg);
|
2766
|
-
if
|
2767
|
-
|
2768
|
-
|
2929
|
+
// Check if we have a tracer or an LB callback to invoke.
|
2930
|
+
if (self->call_attempt_tracer_ != nullptr ||
|
2931
|
+
self->lb_recv_trailing_metadata_ready_ != nullptr) {
|
2932
|
+
// Get the call's status.
|
2933
|
+
absl::Status status;
|
2769
2934
|
if (error != GRPC_ERROR_NONE) {
|
2770
|
-
|
2935
|
+
// Get status from error.
|
2936
|
+
grpc_status_code code;
|
2937
|
+
grpc_slice message = grpc_empty_slice();
|
2938
|
+
grpc_error_get_status(error, self->deadline_, &code, &message,
|
2939
|
+
/*http_error=*/nullptr, /*error_string=*/nullptr);
|
2940
|
+
status = absl::Status(static_cast<absl::StatusCode>(code),
|
2941
|
+
StringViewFromSlice(message));
|
2771
2942
|
} else {
|
2943
|
+
// Get status from headers.
|
2772
2944
|
const auto& fields = self->recv_trailing_metadata_->idx.named;
|
2773
2945
|
GPR_ASSERT(fields.grpc_status != nullptr);
|
2774
|
-
grpc_status_code
|
2946
|
+
grpc_status_code code =
|
2775
2947
|
grpc_get_status_code_from_metadata(fields.grpc_status->md);
|
2776
|
-
|
2777
|
-
|
2778
|
-
error_for_lb = grpc_error_set_int(
|
2779
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("call failed"),
|
2780
|
-
GRPC_ERROR_INT_GRPC_STATUS, status);
|
2948
|
+
if (code != GRPC_STATUS_OK) {
|
2949
|
+
absl::string_view message;
|
2781
2950
|
if (fields.grpc_message != nullptr) {
|
2782
|
-
|
2783
|
-
error_for_lb, GRPC_ERROR_STR_GRPC_MESSAGE,
|
2784
|
-
grpc_slice_ref_internal(GRPC_MDVALUE(fields.grpc_message->md)));
|
2951
|
+
message = StringViewFromSlice(GRPC_MDVALUE(fields.grpc_message->md));
|
2785
2952
|
}
|
2953
|
+
status = absl::Status(static_cast<absl::StatusCode>(code), message);
|
2786
2954
|
}
|
2787
2955
|
}
|
2788
|
-
//
|
2789
|
-
|
2790
|
-
|
2791
|
-
|
2792
|
-
|
2793
|
-
|
2956
|
+
// If we have a tracer, notify it.
|
2957
|
+
if (self->call_attempt_tracer_ != nullptr) {
|
2958
|
+
self->call_attempt_tracer_->RecordReceivedTrailingMetadata(
|
2959
|
+
status, self->recv_trailing_metadata_,
|
2960
|
+
*self->transport_stream_stats_);
|
2961
|
+
}
|
2962
|
+
// If the LB policy requested a callback for trailing metadata, invoke
|
2963
|
+
// the callback.
|
2964
|
+
if (self->lb_recv_trailing_metadata_ready_ != nullptr) {
|
2965
|
+
Metadata trailing_metadata(self, self->recv_trailing_metadata_);
|
2966
|
+
LbCallState lb_call_state(self);
|
2967
|
+
self->lb_recv_trailing_metadata_ready_(status, &trailing_metadata,
|
2968
|
+
&lb_call_state);
|
2969
|
+
}
|
2794
2970
|
}
|
2795
2971
|
// Chain to original callback.
|
2796
2972
|
if (self->failure_error_ != GRPC_ERROR_NONE) {
|
@@ -2803,23 +2979,9 @@ void ClientChannel::LoadBalancedCall::
|
|
2803
2979
|
error);
|
2804
2980
|
}
|
2805
2981
|
|
2806
|
-
void ClientChannel::LoadBalancedCall::
|
2807
|
-
InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
|
2808
|
-
grpc_transport_stream_op_batch* batch) {
|
2809
|
-
recv_trailing_metadata_ =
|
2810
|
-
batch->payload->recv_trailing_metadata.recv_trailing_metadata;
|
2811
|
-
original_recv_trailing_metadata_ready_ =
|
2812
|
-
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
|
2813
|
-
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
|
2814
|
-
RecvTrailingMetadataReadyForLoadBalancingPolicy, this,
|
2815
|
-
grpc_schedule_on_exec_ctx);
|
2816
|
-
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
2817
|
-
&recv_trailing_metadata_ready_;
|
2818
|
-
}
|
2819
|
-
|
2820
2982
|
void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
|
2821
2983
|
SubchannelCall::Args call_args = {
|
2822
|
-
std::move(connected_subchannel_), pollent_, path_,
|
2984
|
+
std::move(connected_subchannel_), pollent_, path_, /*start_time=*/0,
|
2823
2985
|
deadline_, arena_,
|
2824
2986
|
// TODO(roth): When we implement hedging support, we will probably
|
2825
2987
|
// need to use a separate call context for each subchannel call.
|
@@ -2873,6 +3035,7 @@ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
|
|
2873
3035
|
lb_call->lb_call_canceller_);
|
2874
3036
|
}
|
2875
3037
|
if (lb_call->lb_call_canceller_ == self && error != GRPC_ERROR_NONE) {
|
3038
|
+
lb_call->call_dispatch_controller_->Commit();
|
2876
3039
|
// Remove pick from list of queued picks.
|
2877
3040
|
lb_call->MaybeRemoveCallFromLbQueuedCallsLocked();
|
2878
3041
|
// Fail pending batches on the call.
|
@@ -2914,6 +3077,7 @@ void ClientChannel::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
|
|
2914
3077
|
}
|
2915
3078
|
|
2916
3079
|
void ClientChannel::LoadBalancedCall::AsyncPickDone(grpc_error_handle error) {
|
3080
|
+
// TODO(roth): Does this callback need to hold a ref to LoadBalancedCall?
|
2917
3081
|
GRPC_CLOSURE_INIT(&pick_closure_, PickDone, this, grpc_schedule_on_exec_ctx);
|
2918
3082
|
ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
|
2919
3083
|
}
|
@@ -2930,26 +3094,10 @@ void ClientChannel::LoadBalancedCall::PickDone(void* arg,
|
|
2930
3094
|
self->PendingBatchesFail(GRPC_ERROR_REF(error), YieldCallCombiner);
|
2931
3095
|
return;
|
2932
3096
|
}
|
3097
|
+
self->call_dispatch_controller_->Commit();
|
2933
3098
|
self->CreateSubchannelCall();
|
2934
3099
|
}
|
2935
3100
|
|
2936
|
-
namespace {
|
2937
|
-
|
2938
|
-
const char* PickResultTypeName(
|
2939
|
-
LoadBalancingPolicy::PickResult::ResultType type) {
|
2940
|
-
switch (type) {
|
2941
|
-
case LoadBalancingPolicy::PickResult::PICK_COMPLETE:
|
2942
|
-
return "COMPLETE";
|
2943
|
-
case LoadBalancingPolicy::PickResult::PICK_QUEUE:
|
2944
|
-
return "QUEUE";
|
2945
|
-
case LoadBalancingPolicy::PickResult::PICK_FAILED:
|
2946
|
-
return "FAILED";
|
2947
|
-
}
|
2948
|
-
GPR_UNREACHABLE_CODE(return "UNKNOWN");
|
2949
|
-
}
|
2950
|
-
|
2951
|
-
} // namespace
|
2952
|
-
|
2953
3101
|
void ClientChannel::LoadBalancedCall::PickSubchannel(void* arg,
|
2954
3102
|
grpc_error_handle error) {
|
2955
3103
|
auto* self = static_cast<LoadBalancedCall*>(arg);
|
@@ -2983,64 +3131,82 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
|
|
2983
3131
|
Metadata initial_metadata(this, initial_metadata_batch);
|
2984
3132
|
pick_args.initial_metadata = &initial_metadata;
|
2985
3133
|
auto result = chand_->picker_->Pick(pick_args);
|
2986
|
-
|
2987
|
-
|
2988
|
-
|
2989
|
-
|
2990
|
-
|
2991
|
-
|
2992
|
-
|
2993
|
-
|
2994
|
-
|
2995
|
-
|
2996
|
-
|
2997
|
-
|
2998
|
-
|
2999
|
-
|
3000
|
-
|
3001
|
-
|
3002
|
-
|
3003
|
-
|
3004
|
-
|
3005
|
-
|
3006
|
-
|
3007
|
-
|
3008
|
-
|
3009
|
-
|
3010
|
-
|
3011
|
-
|
3012
|
-
|
3013
|
-
|
3014
|
-
|
3015
|
-
|
3016
|
-
|
3017
|
-
|
3018
|
-
|
3019
|
-
|
3020
|
-
|
3021
|
-
|
3022
|
-
|
3023
|
-
|
3024
|
-
|
3025
|
-
|
3026
|
-
|
3027
|
-
|
3028
|
-
|
3029
|
-
|
3030
|
-
|
3031
|
-
|
3032
|
-
|
3033
|
-
|
3034
|
-
|
3035
|
-
|
3036
|
-
|
3037
|
-
|
3038
|
-
|
3039
|
-
|
3040
|
-
|
3041
|
-
|
3042
|
-
|
3043
|
-
|
3134
|
+
return HandlePickResult<bool>(
|
3135
|
+
&result,
|
3136
|
+
// CompletePick
|
3137
|
+
[this](LoadBalancingPolicy::PickResult::Complete* complete_pick)
|
3138
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
|
3139
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
|
3140
|
+
gpr_log(GPR_INFO,
|
3141
|
+
"chand=%p lb_call=%p: LB pick succeeded: subchannel=%p",
|
3142
|
+
chand_, this, complete_pick->subchannel.get());
|
3143
|
+
}
|
3144
|
+
GPR_ASSERT(complete_pick->subchannel != nullptr);
|
3145
|
+
// Grab a ref to the connected subchannel while we're still
|
3146
|
+
// holding the data plane mutex.
|
3147
|
+
connected_subchannel_ = chand_->GetConnectedSubchannelInDataPlane(
|
3148
|
+
complete_pick->subchannel.get());
|
3149
|
+
GPR_ASSERT(connected_subchannel_ != nullptr);
|
3150
|
+
lb_recv_trailing_metadata_ready_ =
|
3151
|
+
std::move(complete_pick->recv_trailing_metadata_ready);
|
3152
|
+
MaybeRemoveCallFromLbQueuedCallsLocked();
|
3153
|
+
return true;
|
3154
|
+
},
|
3155
|
+
// QueuePick
|
3156
|
+
[this](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/)
|
3157
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
|
3158
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
|
3159
|
+
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick queued", chand_,
|
3160
|
+
this);
|
3161
|
+
}
|
3162
|
+
MaybeAddCallToLbQueuedCallsLocked();
|
3163
|
+
return false;
|
3164
|
+
},
|
3165
|
+
// FailPick
|
3166
|
+
[this, send_initial_metadata_flags,
|
3167
|
+
&error](LoadBalancingPolicy::PickResult::Fail* fail_pick)
|
3168
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
|
3169
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
|
3170
|
+
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick failed: %s",
|
3171
|
+
chand_, this, fail_pick->status.ToString().c_str());
|
3172
|
+
}
|
3173
|
+
// If we're shutting down, fail all RPCs.
|
3174
|
+
grpc_error_handle disconnect_error = chand_->disconnect_error();
|
3175
|
+
if (disconnect_error != GRPC_ERROR_NONE) {
|
3176
|
+
MaybeRemoveCallFromLbQueuedCallsLocked();
|
3177
|
+
*error = GRPC_ERROR_REF(disconnect_error);
|
3178
|
+
return true;
|
3179
|
+
}
|
3180
|
+
// If wait_for_ready is false, then the error indicates the RPC
|
3181
|
+
// attempt's final status.
|
3182
|
+
if ((send_initial_metadata_flags &
|
3183
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
|
3184
|
+
grpc_error_handle lb_error =
|
3185
|
+
absl_status_to_grpc_error(fail_pick->status);
|
3186
|
+
*error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
3187
|
+
"Failed to pick subchannel", &lb_error, 1);
|
3188
|
+
GRPC_ERROR_UNREF(lb_error);
|
3189
|
+
MaybeRemoveCallFromLbQueuedCallsLocked();
|
3190
|
+
return true;
|
3191
|
+
}
|
3192
|
+
// If wait_for_ready is true, then queue to retry when we get a new
|
3193
|
+
// picker.
|
3194
|
+
MaybeAddCallToLbQueuedCallsLocked();
|
3195
|
+
return false;
|
3196
|
+
},
|
3197
|
+
// DropPick
|
3198
|
+
[this, &error](LoadBalancingPolicy::PickResult::Drop* drop_pick)
|
3199
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
|
3200
|
+
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
|
3201
|
+
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick dropped: %s",
|
3202
|
+
chand_, this, drop_pick->status.ToString().c_str());
|
3203
|
+
}
|
3204
|
+
*error =
|
3205
|
+
grpc_error_set_int(absl_status_to_grpc_error(drop_pick->status),
|
3206
|
+
GRPC_ERROR_INT_LB_POLICY_DROP, 1);
|
3207
|
+
MaybeRemoveCallFromLbQueuedCallsLocked();
|
3208
|
+
return true;
|
3209
|
+
});
|
3044
3210
|
}
|
3045
3211
|
|
3046
3212
|
} // namespace grpc_core
|