grpc 1.12.0 → 1.13.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +314 -23
- data/include/grpc/impl/codegen/fork.h +4 -4
- data/include/grpc/impl/codegen/grpc_types.h +1 -1
- data/include/grpc/impl/codegen/port_platform.h +3 -0
- data/src/boringssl/err_data.c +256 -246
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +1 -1
- data/src/core/ext/filters/client_channel/client_channel.cc +367 -272
- data/src/core/ext/filters/client_channel/lb_policy.h +1 -3
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +11 -9
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +42 -32
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +36 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +36 -102
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +37 -32
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +22 -19
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +1 -1
- data/src/core/ext/filters/client_channel/resolver.h +1 -3
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +3 -3
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +0 -1
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +4 -4
- data/src/core/ext/filters/client_channel/subchannel.cc +3 -3
- data/src/core/ext/filters/http/client_authority_filter.cc +5 -4
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +4 -4
- data/src/core/ext/filters/http/server/http_server_filter.cc +123 -131
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +9 -8
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +19 -19
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +10 -6
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +4 -3
- data/src/core/ext/transport/chttp2/transport/parsing.cc +14 -12
- data/src/core/ext/transport/chttp2/transport/writing.cc +6 -6
- data/src/core/lib/channel/channel_stack.cc +0 -5
- data/src/core/lib/channel/channel_stack.h +1 -1
- data/src/core/lib/channel/channel_stack_builder.cc +0 -3
- data/src/core/lib/channel/channel_stack_builder.h +0 -2
- data/src/core/lib/channel/channel_trace.cc +3 -3
- data/src/core/lib/channel/channelz_registry.cc +77 -0
- data/src/core/lib/channel/channelz_registry.h +99 -0
- data/src/core/lib/channel/handshaker.cc +20 -1
- data/src/core/lib/debug/stats.h +7 -0
- data/src/core/lib/debug/stats_data.cc +5 -0
- data/src/core/lib/debug/stats_data.h +120 -0
- data/src/core/lib/debug/trace.h +11 -9
- data/src/core/lib/gprpp/fork.cc +260 -0
- data/src/core/lib/gprpp/fork.h +79 -0
- data/src/core/lib/gprpp/memory.h +12 -0
- data/src/core/lib/gprpp/orphanable.h +2 -6
- data/src/core/lib/gprpp/ref_counted.h +2 -6
- data/src/core/lib/gprpp/thd.h +0 -3
- data/src/core/lib/gprpp/thd_posix.cc +4 -53
- data/src/core/lib/gprpp/thd_windows.cc +0 -7
- data/src/core/lib/http/httpcli_security_connector.cc +1 -3
- data/src/core/lib/iomgr/combiner.cc +19 -2
- data/src/core/lib/iomgr/combiner.h +1 -1
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +2 -2
- data/src/core/lib/iomgr/ev_epollex_linux.cc +59 -3
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +1 -1
- data/src/core/lib/iomgr/ev_poll_posix.cc +2 -2
- data/src/core/lib/iomgr/ev_posix.cc +11 -4
- data/src/core/lib/iomgr/ev_posix.h +6 -0
- data/src/core/lib/iomgr/exec_ctx.cc +9 -9
- data/src/core/lib/iomgr/exec_ctx.h +39 -20
- data/src/core/lib/iomgr/fork_posix.cc +30 -18
- data/src/core/lib/iomgr/iomgr_posix.cc +2 -2
- data/src/core/lib/iomgr/polling_entity.cc +11 -2
- data/src/core/lib/iomgr/pollset_custom.cc +2 -2
- data/src/core/lib/iomgr/port.h +38 -1
- data/src/core/lib/iomgr/resolve_address.h +1 -1
- data/src/core/lib/iomgr/resolve_address_posix.cc +1 -1
- data/src/core/lib/iomgr/resource_quota.cc +1 -1
- data/src/core/lib/iomgr/sockaddr_posix.h +1 -1
- data/src/core/lib/iomgr/socket_factory_posix.cc +1 -1
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +1 -1
- data/src/core/lib/iomgr/tcp_client_custom.cc +3 -3
- data/src/core/lib/iomgr/tcp_client_posix.cc +3 -2
- data/src/core/lib/iomgr/tcp_custom.cc +1 -1
- data/src/core/lib/iomgr/tcp_posix.cc +18 -10
- data/src/core/lib/iomgr/tcp_server_posix.cc +9 -8
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +1 -1
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +16 -4
- data/src/core/lib/iomgr/timer.h +1 -1
- data/src/core/lib/iomgr/timer_generic.cc +113 -41
- data/src/core/lib/iomgr/timer_manager.cc +1 -1
- data/src/core/lib/security/credentials/credentials.h +1 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +88 -115
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +16 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +10 -6
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +1 -1
- data/src/core/lib/security/security_connector/alts_security_connector.cc +2 -1
- data/src/core/lib/security/security_connector/security_connector.cc +7 -7
- data/src/core/lib/security/transport/security_handshaker.cc +1 -0
- data/src/core/lib/security/util/json_util.cc +4 -0
- data/src/core/lib/slice/slice_buffer.cc +15 -3
- data/src/core/lib/surface/call.cc +31 -17
- data/src/core/lib/surface/call.h +5 -0
- data/src/core/lib/surface/channel.cc +2 -5
- data/src/core/lib/surface/completion_queue.cc +1 -3
- data/src/core/lib/surface/completion_queue.h +0 -1
- data/src/core/lib/surface/init.cc +7 -8
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/byte_stream.cc +1 -1
- data/src/core/lib/transport/transport.cc +2 -1
- data/src/core/lib/transport/transport.h +4 -8
- data/src/core/lib/transport/transport_op_string.cc +1 -1
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +19 -7
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +10 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +28 -2
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +3 -0
- data/src/core/tsi/fake_transport_security.cc +1 -0
- data/src/core/tsi/ssl_transport_security.cc +238 -110
- data/src/core/tsi/transport_security.cc +14 -0
- data/src/core/tsi/transport_security.h +2 -0
- data/src/core/tsi/transport_security_interface.h +11 -1
- data/src/ruby/bin/math_client.rb +17 -9
- data/src/ruby/lib/grpc/generic/rpc_server.rb +2 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +4 -1
- data/third_party/boringssl/crypto/asn1/a_int.c +33 -28
- data/third_party/boringssl/crypto/asn1/a_mbstr.c +24 -22
- data/third_party/boringssl/crypto/asn1/a_utf8.c +13 -11
- data/third_party/boringssl/crypto/asn1/asn1_locl.h +3 -0
- data/third_party/boringssl/crypto/bio/fd.c +1 -0
- data/third_party/boringssl/crypto/bio/file.c +2 -0
- data/third_party/boringssl/crypto/bn_extra/convert.c +6 -5
- data/third_party/boringssl/crypto/bytestring/ber.c +1 -4
- data/third_party/boringssl/crypto/bytestring/cbb.c +116 -16
- data/third_party/boringssl/crypto/bytestring/cbs.c +150 -20
- data/third_party/boringssl/crypto/cipher_extra/e_aesccm.c +171 -0
- data/third_party/boringssl/crypto/cipher_extra/e_rc2.c +2 -0
- data/third_party/boringssl/crypto/cipher_extra/e_tls.c +1 -2
- data/third_party/boringssl/crypto/cpu-aarch64-fuchsia.c +55 -0
- data/third_party/boringssl/crypto/cpu-aarch64-linux.c +2 -1
- data/third_party/boringssl/crypto/dsa/dsa.c +16 -54
- data/third_party/boringssl/crypto/fipsmodule/bcm.c +11 -542
- data/third_party/boringssl/crypto/fipsmodule/bn/add.c +33 -64
- data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +4 -3
- data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +122 -70
- data/third_party/boringssl/crypto/fipsmodule/bn/bytes.c +32 -71
- data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +58 -112
- data/third_party/boringssl/crypto/fipsmodule/bn/div.c +198 -122
- data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +31 -65
- data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +2 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +98 -15
- data/third_party/boringssl/crypto/fipsmodule/bn/jacobi.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +124 -81
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +8 -30
- data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +303 -347
- data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +2 -3
- data/third_party/boringssl/crypto/fipsmodule/bn/random.c +3 -4
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.c +199 -222
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.h +27 -47
- data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +45 -28
- data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/cipher/e_aes.c +10 -10
- data/third_party/boringssl/crypto/fipsmodule/des/internal.h +2 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +78 -47
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +35 -54
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +3 -10
- data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +36 -22
- data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +59 -90
- data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +29 -48
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +17 -26
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.h +15 -11
- data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +45 -51
- data/third_party/boringssl/crypto/fipsmodule/ec/{util-64.c → util.c} +0 -5
- data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +144 -264
- data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +78 -56
- data/third_party/boringssl/crypto/fipsmodule/modes/ccm.c +256 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/internal.h +36 -32
- data/third_party/boringssl/crypto/fipsmodule/rand/ctrdrbg.c +9 -7
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +16 -10
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +255 -102
- data/third_party/boringssl/crypto/fipsmodule/self_check/self_check.c +581 -0
- data/third_party/boringssl/crypto/fipsmodule/tls/internal.h +39 -0
- data/third_party/boringssl/crypto/fipsmodule/tls/kdf.c +165 -0
- data/third_party/boringssl/crypto/internal.h +65 -2
- data/third_party/boringssl/crypto/mem.c +0 -2
- data/third_party/boringssl/crypto/obj/obj.c +6 -73
- data/third_party/boringssl/crypto/thread_pthread.c +35 -5
- data/third_party/boringssl/crypto/x509/a_strex.c +11 -11
- data/third_party/boringssl/crypto/x509/x_name.c +13 -0
- data/third_party/boringssl/include/openssl/aead.h +4 -0
- data/third_party/boringssl/include/openssl/asn1.h +1 -3
- data/third_party/boringssl/include/openssl/base.h +1 -14
- data/third_party/boringssl/include/openssl/bio.h +1 -1
- data/third_party/boringssl/include/openssl/bn.h +49 -15
- data/third_party/boringssl/include/openssl/bytestring.h +49 -24
- data/third_party/boringssl/include/openssl/crypto.h +4 -0
- data/third_party/boringssl/include/openssl/ec_key.h +7 -3
- data/third_party/boringssl/include/openssl/err.h +9 -9
- data/third_party/boringssl/include/openssl/evp.h +1 -1
- data/third_party/boringssl/include/openssl/rsa.h +34 -10
- data/third_party/boringssl/include/openssl/ssl.h +160 -17
- data/third_party/boringssl/include/openssl/stack.h +1 -1
- data/third_party/boringssl/include/openssl/tls1.h +10 -2
- data/third_party/boringssl/include/openssl/x509.h +3 -0
- data/third_party/boringssl/ssl/d1_both.cc +16 -2
- data/third_party/boringssl/ssl/dtls_method.cc +1 -1
- data/third_party/boringssl/ssl/handoff.cc +285 -0
- data/third_party/boringssl/ssl/handshake.cc +26 -12
- data/third_party/boringssl/ssl/handshake_client.cc +65 -31
- data/third_party/boringssl/ssl/handshake_server.cc +14 -2
- data/third_party/boringssl/ssl/internal.h +132 -79
- data/third_party/boringssl/ssl/s3_both.cc +2 -2
- data/third_party/boringssl/ssl/s3_lib.cc +3 -1
- data/third_party/boringssl/ssl/s3_pkt.cc +0 -18
- data/third_party/boringssl/ssl/ssl_aead_ctx.cc +1 -4
- data/third_party/boringssl/ssl/ssl_asn1.cc +47 -43
- data/third_party/boringssl/ssl/ssl_cipher.cc +8 -8
- data/third_party/boringssl/ssl/ssl_key_share.cc +3 -1
- data/third_party/boringssl/ssl/ssl_lib.cc +83 -14
- data/third_party/boringssl/ssl/ssl_privkey.cc +6 -0
- data/third_party/boringssl/ssl/ssl_stat.cc +6 -6
- data/third_party/boringssl/ssl/ssl_versions.cc +12 -85
- data/third_party/boringssl/ssl/ssl_x509.cc +59 -61
- data/third_party/boringssl/ssl/t1_enc.cc +73 -124
- data/third_party/boringssl/ssl/t1_lib.cc +367 -41
- data/third_party/boringssl/ssl/tls13_both.cc +8 -0
- data/third_party/boringssl/ssl/tls13_client.cc +98 -184
- data/third_party/boringssl/ssl/tls13_enc.cc +88 -158
- data/third_party/boringssl/ssl/tls13_server.cc +91 -137
- data/third_party/boringssl/ssl/tls_method.cc +0 -17
- data/third_party/boringssl/ssl/tls_record.cc +1 -10
- data/third_party/boringssl/third_party/fiat/curve25519.c +921 -2753
- data/third_party/boringssl/third_party/fiat/curve25519_tables.h +7880 -0
- data/third_party/boringssl/third_party/fiat/internal.h +32 -20
- data/third_party/boringssl/third_party/fiat/p256.c +1824 -0
- metadata +64 -64
- data/src/core/lib/channel/channel_trace_registry.cc +0 -80
- data/src/core/lib/channel/channel_trace_registry.h +0 -43
- data/src/core/lib/gpr/fork.cc +0 -78
- data/src/core/lib/gpr/fork.h +0 -35
- data/src/core/tsi/transport_security_adapter.cc +0 -235
- data/src/core/tsi/transport_security_adapter.h +0 -41
- data/src/ruby/bin/apis/google/protobuf/empty.rb +0 -29
- data/src/ruby/bin/apis/pubsub_demo.rb +0 -241
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb +0 -159
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb +0 -88
- data/src/ruby/pb/test/client.rb +0 -764
- data/src/ruby/pb/test/server.rb +0 -252
- data/third_party/boringssl/crypto/curve25519/x25519-x86_64.c +0 -247
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +0 -1674
@@ -40,7 +40,7 @@ grpc_connectivity_state grpc_channel_check_connectivity_state(
|
|
40
40
|
GRPC_API_TRACE(
|
41
41
|
"grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
|
42
42
|
(channel, try_to_connect));
|
43
|
-
if (client_channel_elem->filter == &grpc_client_channel_filter) {
|
43
|
+
if (GPR_LIKELY(client_channel_elem->filter == &grpc_client_channel_filter)) {
|
44
44
|
state = grpc_client_channel_check_connectivity_state(client_channel_elem,
|
45
45
|
try_to_connect);
|
46
46
|
|
@@ -379,7 +379,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
379
379
|
new_lb_policy =
|
380
380
|
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
|
381
381
|
lb_policy_name, lb_policy_args);
|
382
|
-
if (new_lb_policy == nullptr) {
|
382
|
+
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
|
383
383
|
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
|
384
384
|
lb_policy_name);
|
385
385
|
} else {
|
@@ -891,6 +891,7 @@ typedef struct client_channel_call_data {
|
|
891
891
|
grpc_closure pick_cancel_closure;
|
892
892
|
|
893
893
|
grpc_polling_entity* pollent;
|
894
|
+
bool pollent_added_to_interested_parties;
|
894
895
|
|
895
896
|
// Batches are added to this list when received from above.
|
896
897
|
// They are removed when we are done handling the batch (i.e., when
|
@@ -911,6 +912,15 @@ typedef struct client_channel_call_data {
|
|
911
912
|
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
|
912
913
|
grpc_timer retry_timer;
|
913
914
|
|
915
|
+
// The number of pending retriable subchannel batches containing send ops.
|
916
|
+
// We hold a ref to the call stack while this is non-zero, since replay
|
917
|
+
// batches may not complete until after all callbacks have been returned
|
918
|
+
// to the surface, and we need to make sure that the call is not destroyed
|
919
|
+
// until all of these batches have completed.
|
920
|
+
// Note that we actually only need to track replay batches, but it's
|
921
|
+
// easier to track all batches with send ops.
|
922
|
+
int num_pending_retriable_subchannel_send_batches;
|
923
|
+
|
914
924
|
// Cached data for retrying send ops.
|
915
925
|
// send_initial_metadata
|
916
926
|
bool seen_send_initial_metadata;
|
@@ -940,7 +950,6 @@ static void retry_commit(grpc_call_element* elem,
|
|
940
950
|
static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
|
941
951
|
static void on_complete(void* arg, grpc_error* error);
|
942
952
|
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
|
943
|
-
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
|
944
953
|
static void start_pick_locked(void* arg, grpc_error* ignored);
|
945
954
|
|
946
955
|
//
|
@@ -1114,7 +1123,8 @@ static void pending_batches_add(grpc_call_element* elem,
|
|
1114
1123
|
if (batch->send_trailing_metadata) {
|
1115
1124
|
calld->pending_send_trailing_metadata = true;
|
1116
1125
|
}
|
1117
|
-
if (calld->bytes_buffered_for_retry >
|
1126
|
+
if (GPR_UNLIKELY(calld->bytes_buffered_for_retry >
|
1127
|
+
chand->per_rpc_retry_buffer_size)) {
|
1118
1128
|
if (grpc_client_channel_trace.enabled()) {
|
1119
1129
|
gpr_log(GPR_INFO,
|
1120
1130
|
"chand=%p calld=%p: exceeded retry buffer size, committing",
|
@@ -1421,7 +1431,7 @@ static void do_retry(grpc_call_element* elem,
|
|
1421
1431
|
}
|
1422
1432
|
if (grpc_client_channel_trace.enabled()) {
|
1423
1433
|
gpr_log(GPR_INFO,
|
1424
|
-
"chand=%p calld=%p: retrying failed call in %"
|
1434
|
+
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
|
1425
1435
|
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
|
1426
1436
|
}
|
1427
1437
|
// Schedule retry after computed delay.
|
@@ -1461,7 +1471,7 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1461
1471
|
}
|
1462
1472
|
}
|
1463
1473
|
// Check status.
|
1464
|
-
if (status == GRPC_STATUS_OK) {
|
1474
|
+
if (GPR_LIKELY(status == GRPC_STATUS_OK)) {
|
1465
1475
|
if (calld->retry_throttle_data != nullptr) {
|
1466
1476
|
calld->retry_throttle_data->RecordSuccess();
|
1467
1477
|
}
|
@@ -1655,8 +1665,9 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
|
1655
1665
|
// the recv_trailing_metadata on_complete callback, then defer
|
1656
1666
|
// propagating this callback back to the surface. We can evaluate whether
|
1657
1667
|
// to retry when recv_trailing_metadata comes back.
|
1658
|
-
if ((batch_data->trailing_metadata_available ||
|
1659
|
-
|
1668
|
+
if (GPR_UNLIKELY((batch_data->trailing_metadata_available ||
|
1669
|
+
error != GRPC_ERROR_NONE) &&
|
1670
|
+
!retry_state->completed_recv_trailing_metadata)) {
|
1660
1671
|
if (grpc_client_channel_trace.enabled()) {
|
1661
1672
|
gpr_log(GPR_INFO,
|
1662
1673
|
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
|
@@ -1744,8 +1755,9 @@ static void recv_message_ready(void* arg, grpc_error* error) {
|
|
1744
1755
|
// the recv_trailing_metadata on_complete callback, then defer
|
1745
1756
|
// propagating this callback back to the surface. We can evaluate whether
|
1746
1757
|
// to retry when recv_trailing_metadata comes back.
|
1747
|
-
if ((
|
1748
|
-
|
1758
|
+
if (GPR_UNLIKELY(
|
1759
|
+
(batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
|
1760
|
+
!retry_state->completed_recv_trailing_metadata)) {
|
1749
1761
|
if (grpc_client_channel_trace.enabled()) {
|
1750
1762
|
gpr_log(GPR_INFO,
|
1751
1763
|
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
|
@@ -1858,7 +1870,8 @@ static void add_closures_for_deferred_recv_callbacks(
|
|
1858
1870
|
closure_to_execute* closures, size_t* num_closures) {
|
1859
1871
|
if (batch_data->batch.recv_trailing_metadata) {
|
1860
1872
|
// Add closure for deferred recv_initial_metadata_ready.
|
1861
|
-
if (retry_state->recv_initial_metadata_ready_deferred_batch !=
|
1873
|
+
if (GPR_UNLIKELY(retry_state->recv_initial_metadata_ready_deferred_batch !=
|
1874
|
+
nullptr)) {
|
1862
1875
|
closure_to_execute* closure = &closures[(*num_closures)++];
|
1863
1876
|
closure->closure = GRPC_CLOSURE_INIT(
|
1864
1877
|
&batch_data->recv_initial_metadata_ready,
|
@@ -1870,7 +1883,8 @@ static void add_closures_for_deferred_recv_callbacks(
|
|
1870
1883
|
retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
|
1871
1884
|
}
|
1872
1885
|
// Add closure for deferred recv_message_ready.
|
1873
|
-
if (retry_state->recv_message_ready_deferred_batch !=
|
1886
|
+
if (GPR_UNLIKELY(retry_state->recv_message_ready_deferred_batch !=
|
1887
|
+
nullptr)) {
|
1874
1888
|
closure_to_execute* closure = &closures[(*num_closures)++];
|
1875
1889
|
closure->closure = GRPC_CLOSURE_INIT(
|
1876
1890
|
&batch_data->recv_message_ready, invoke_recv_message_callback,
|
@@ -2039,7 +2053,7 @@ static void on_complete(void* arg, grpc_error* error) {
|
|
2039
2053
|
// an error or (b) we receive status.
|
2040
2054
|
grpc_status_code status = GRPC_STATUS_OK;
|
2041
2055
|
grpc_mdelem* server_pushback_md = nullptr;
|
2042
|
-
if (error != GRPC_ERROR_NONE) { // Case (a).
|
2056
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) { // Case (a).
|
2043
2057
|
call_finished = true;
|
2044
2058
|
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
2045
2059
|
nullptr);
|
@@ -2075,7 +2089,22 @@ static void on_complete(void* arg, grpc_error* error) {
|
|
2075
2089
|
batch_data_unref(batch_data);
|
2076
2090
|
GRPC_ERROR_UNREF(retry_state->recv_message_error);
|
2077
2091
|
}
|
2092
|
+
// Track number of pending subchannel send batches and determine if
|
2093
|
+
// this was the last one.
|
2094
|
+
bool last_callback_complete = false;
|
2095
|
+
if (batch_data->batch.send_initial_metadata ||
|
2096
|
+
batch_data->batch.send_message ||
|
2097
|
+
batch_data->batch.send_trailing_metadata) {
|
2098
|
+
--calld->num_pending_retriable_subchannel_send_batches;
|
2099
|
+
last_callback_complete =
|
2100
|
+
calld->num_pending_retriable_subchannel_send_batches == 0;
|
2101
|
+
}
|
2078
2102
|
batch_data_unref(batch_data);
|
2103
|
+
// If we just completed the last subchannel send batch, unref the
|
2104
|
+
// call stack.
|
2105
|
+
if (last_callback_complete) {
|
2106
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "subchannel_send_batches");
|
2107
|
+
}
|
2079
2108
|
return;
|
2080
2109
|
}
|
2081
2110
|
// Not retrying, so commit the call.
|
@@ -2118,11 +2147,26 @@ static void on_complete(void* arg, grpc_error* error) {
|
|
2118
2147
|
add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
|
2119
2148
|
closures, &num_closures);
|
2120
2149
|
}
|
2150
|
+
// Track number of pending subchannel send batches and determine if this
|
2151
|
+
// was the last one.
|
2152
|
+
bool last_callback_complete = false;
|
2153
|
+
if (batch_data->batch.send_initial_metadata ||
|
2154
|
+
batch_data->batch.send_message ||
|
2155
|
+
batch_data->batch.send_trailing_metadata) {
|
2156
|
+
--calld->num_pending_retriable_subchannel_send_batches;
|
2157
|
+
last_callback_complete =
|
2158
|
+
calld->num_pending_retriable_subchannel_send_batches == 0;
|
2159
|
+
}
|
2121
2160
|
// Don't need batch_data anymore.
|
2122
2161
|
batch_data_unref(batch_data);
|
2123
2162
|
// Schedule all of the closures identified above.
|
2163
|
+
// Note: This yeilds the call combiner.
|
2124
2164
|
execute_closures_in_call_combiner(elem, "on_complete", closures,
|
2125
2165
|
num_closures);
|
2166
|
+
// If we just completed the last subchannel send batch, unref the call stack.
|
2167
|
+
if (last_callback_complete) {
|
2168
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "subchannel_send_batches");
|
2169
|
+
}
|
2126
2170
|
}
|
2127
2171
|
|
2128
2172
|
//
|
@@ -2185,13 +2229,13 @@ static void add_retriable_send_initial_metadata_op(
|
|
2185
2229
|
grpc_metadata_batch_copy(&calld->send_initial_metadata,
|
2186
2230
|
&batch_data->send_initial_metadata,
|
2187
2231
|
batch_data->send_initial_metadata_storage);
|
2188
|
-
if (batch_data->send_initial_metadata.idx.named
|
2189
|
-
|
2232
|
+
if (GPR_UNLIKELY(batch_data->send_initial_metadata.idx.named
|
2233
|
+
.grpc_previous_rpc_attempts != nullptr)) {
|
2190
2234
|
grpc_metadata_batch_remove(
|
2191
2235
|
&batch_data->send_initial_metadata,
|
2192
2236
|
batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
|
2193
2237
|
}
|
2194
|
-
if (calld->num_attempts_completed > 0) {
|
2238
|
+
if (GPR_UNLIKELY(calld->num_attempts_completed > 0)) {
|
2195
2239
|
grpc_mdelem retry_md = grpc_mdelem_from_slices(
|
2196
2240
|
GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
|
2197
2241
|
*retry_count_strings[calld->num_attempts_completed - 1]);
|
@@ -2200,7 +2244,7 @@ static void add_retriable_send_initial_metadata_op(
|
|
2200
2244
|
&batch_data->send_initial_metadata_storage[calld->send_initial_metadata
|
2201
2245
|
.list.count],
|
2202
2246
|
retry_md);
|
2203
|
-
if (error != GRPC_ERROR_NONE) {
|
2247
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
2204
2248
|
gpr_log(GPR_ERROR, "error adding retry metadata: %s",
|
2205
2249
|
grpc_error_string(error));
|
2206
2250
|
GPR_ASSERT(false);
|
@@ -2441,7 +2485,8 @@ static void add_subchannel_batches_for_pending_batches(
|
|
2441
2485
|
// If we previously completed a recv_trailing_metadata op
|
2442
2486
|
// initiated by start_internal_recv_trailing_metadata(), use the
|
2443
2487
|
// result of that instead of trying to re-start this op.
|
2444
|
-
if (retry_state->recv_trailing_metadata_internal_batch !=
|
2488
|
+
if (GPR_UNLIKELY((retry_state->recv_trailing_metadata_internal_batch !=
|
2489
|
+
nullptr))) {
|
2445
2490
|
// If the batch completed, then trigger the completion callback
|
2446
2491
|
// directly, so that we return the previously returned results to
|
2447
2492
|
// the application. Otherwise, just unref the internally
|
@@ -2507,6 +2552,15 @@ static void add_subchannel_batches_for_pending_batches(
|
|
2507
2552
|
}
|
2508
2553
|
add_closure_for_subchannel_batch(calld, &batch_data->batch, closures,
|
2509
2554
|
num_closures);
|
2555
|
+
// Track number of pending subchannel send batches.
|
2556
|
+
// If this is the first one, take a ref to the call stack.
|
2557
|
+
if (batch->send_initial_metadata || batch->send_message ||
|
2558
|
+
batch->send_trailing_metadata) {
|
2559
|
+
if (calld->num_pending_retriable_subchannel_send_batches == 0) {
|
2560
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "subchannel_send_batches");
|
2561
|
+
}
|
2562
|
+
++calld->num_pending_retriable_subchannel_send_batches;
|
2563
|
+
}
|
2510
2564
|
}
|
2511
2565
|
}
|
2512
2566
|
|
@@ -2534,6 +2588,12 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
|
|
2534
2588
|
if (replay_batch_data != nullptr) {
|
2535
2589
|
add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures,
|
2536
2590
|
&num_closures);
|
2591
|
+
// Track number of pending subchannel send batches.
|
2592
|
+
// If this is the first one, take a ref to the call stack.
|
2593
|
+
if (calld->num_pending_retriable_subchannel_send_batches == 0) {
|
2594
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "subchannel_send_batches");
|
2595
|
+
}
|
2596
|
+
++calld->num_pending_retriable_subchannel_send_batches;
|
2537
2597
|
}
|
2538
2598
|
// Now add pending batches.
|
2539
2599
|
add_subchannel_batches_for_pending_batches(elem, retry_state, closures,
|
@@ -2574,7 +2634,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
|
|
2574
2634
|
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
2575
2635
|
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
2576
2636
|
}
|
2577
|
-
if (new_error != GRPC_ERROR_NONE) {
|
2637
|
+
if (GPR_UNLIKELY(new_error != GRPC_ERROR_NONE)) {
|
2578
2638
|
new_error = grpc_error_add_child(new_error, error);
|
2579
2639
|
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2580
2640
|
} else {
|
@@ -2595,7 +2655,7 @@ static void pick_done(void* arg, grpc_error* error) {
|
|
2595
2655
|
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2596
2656
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2597
2657
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2598
|
-
if (calld->pick.connected_subchannel == nullptr) {
|
2658
|
+
if (GPR_UNLIKELY(calld->pick.connected_subchannel == nullptr)) {
|
2599
2659
|
// Failed to create subchannel.
|
2600
2660
|
// If there was no error, this is an LB policy drop, in which case
|
2601
2661
|
// we return an error; otherwise, we may retry.
|
@@ -2624,59 +2684,133 @@ static void pick_done(void* arg, grpc_error* error) {
|
|
2624
2684
|
}
|
2625
2685
|
}
|
2626
2686
|
|
2687
|
+
static void maybe_add_call_to_channel_interested_parties_locked(
|
2688
|
+
grpc_call_element* elem) {
|
2689
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2690
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2691
|
+
if (!calld->pollent_added_to_interested_parties) {
|
2692
|
+
calld->pollent_added_to_interested_parties = true;
|
2693
|
+
grpc_polling_entity_add_to_pollset_set(calld->pollent,
|
2694
|
+
chand->interested_parties);
|
2695
|
+
}
|
2696
|
+
}
|
2697
|
+
|
2698
|
+
static void maybe_del_call_from_channel_interested_parties_locked(
|
2699
|
+
grpc_call_element* elem) {
|
2700
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2701
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2702
|
+
if (calld->pollent_added_to_interested_parties) {
|
2703
|
+
calld->pollent_added_to_interested_parties = false;
|
2704
|
+
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2705
|
+
chand->interested_parties);
|
2706
|
+
}
|
2707
|
+
}
|
2708
|
+
|
2627
2709
|
// Invoked when a pick is completed to leave the client_channel combiner
|
2628
2710
|
// and continue processing in the call combiner.
|
2711
|
+
// If needed, removes the call's polling entity from chand->interested_parties.
|
2629
2712
|
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2630
2713
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2714
|
+
maybe_del_call_from_channel_interested_parties_locked(elem);
|
2631
2715
|
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
|
2632
2716
|
grpc_schedule_on_exec_ctx);
|
2633
2717
|
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
|
2634
2718
|
}
|
2635
2719
|
|
2636
|
-
|
2637
|
-
// either (a) the pick was deferred pending a resolver result or (b) the
|
2638
|
-
// pick was done asynchronously. Removes the call's polling entity from
|
2639
|
-
// chand->interested_parties before invoking pick_done_locked().
|
2640
|
-
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2641
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2642
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2643
|
-
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2644
|
-
chand->interested_parties);
|
2645
|
-
pick_done_locked(elem, error);
|
2646
|
-
}
|
2720
|
+
namespace grpc_core {
|
2647
2721
|
|
2648
|
-
//
|
2649
|
-
|
2650
|
-
|
2651
|
-
|
2652
|
-
|
2653
|
-
|
2654
|
-
|
2655
|
-
// in which case we will be cancelling the pick on a policy other than
|
2656
|
-
// the one we started it on. However, this will just be a no-op.
|
2657
|
-
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
|
2722
|
+
// Performs subchannel pick via LB policy.
|
2723
|
+
class LbPicker {
|
2724
|
+
public:
|
2725
|
+
// Starts a pick on chand->lb_policy.
|
2726
|
+
static void StartLocked(grpc_call_element* elem) {
|
2727
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2728
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2658
2729
|
if (grpc_client_channel_trace.enabled()) {
|
2659
|
-
gpr_log(GPR_INFO, "chand=%p calld=%p:
|
2730
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
2660
2731
|
chand, calld, chand->lb_policy.get());
|
2661
2732
|
}
|
2662
|
-
|
2733
|
+
// If this is a retry, use the send_initial_metadata payload that
|
2734
|
+
// we've cached; otherwise, use the pending batch. The
|
2735
|
+
// send_initial_metadata batch will be the first pending batch in the
|
2736
|
+
// list, as set by get_batch_index() above.
|
2737
|
+
calld->pick.initial_metadata =
|
2738
|
+
calld->seen_send_initial_metadata
|
2739
|
+
? &calld->send_initial_metadata
|
2740
|
+
: calld->pending_batches[0]
|
2741
|
+
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2742
|
+
calld->pick.initial_metadata_flags =
|
2743
|
+
calld->seen_send_initial_metadata
|
2744
|
+
? calld->send_initial_metadata_flags
|
2745
|
+
: calld->pending_batches[0]
|
2746
|
+
.batch->payload->send_initial_metadata
|
2747
|
+
.send_initial_metadata_flags;
|
2748
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, &LbPicker::DoneLocked, elem,
|
2749
|
+
grpc_combiner_scheduler(chand->combiner));
|
2750
|
+
calld->pick.on_complete = &calld->pick_closure;
|
2751
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
2752
|
+
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
|
2753
|
+
if (GPR_LIKELY(pick_done)) {
|
2754
|
+
// Pick completed synchronously.
|
2755
|
+
if (grpc_client_channel_trace.enabled()) {
|
2756
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
|
2757
|
+
chand, calld);
|
2758
|
+
}
|
2759
|
+
pick_done_locked(elem, GRPC_ERROR_NONE);
|
2760
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2761
|
+
} else {
|
2762
|
+
// Pick will be returned asynchronously.
|
2763
|
+
// Add the polling entity from call_data to the channel_data's
|
2764
|
+
// interested_parties, so that the I/O of the LB policy can be done
|
2765
|
+
// under it. It will be removed in pick_done_locked().
|
2766
|
+
maybe_add_call_to_channel_interested_parties_locked(elem);
|
2767
|
+
// Request notification on call cancellation.
|
2768
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
2769
|
+
grpc_call_combiner_set_notify_on_cancel(
|
2770
|
+
calld->call_combiner,
|
2771
|
+
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
2772
|
+
&LbPicker::CancelLocked, elem,
|
2773
|
+
grpc_combiner_scheduler(chand->combiner)));
|
2774
|
+
}
|
2663
2775
|
}
|
2664
|
-
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2665
|
-
}
|
2666
2776
|
|
2667
|
-
|
2668
|
-
//
|
2669
|
-
|
2670
|
-
|
2671
|
-
|
2672
|
-
|
2673
|
-
|
2674
|
-
|
2675
|
-
|
2777
|
+
private:
|
2778
|
+
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
2779
|
+
// Unrefs the LB policy and invokes pick_done_locked().
|
2780
|
+
static void DoneLocked(void* arg, grpc_error* error) {
|
2781
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2782
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2783
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2784
|
+
if (grpc_client_channel_trace.enabled()) {
|
2785
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously",
|
2786
|
+
chand, calld);
|
2787
|
+
}
|
2788
|
+
pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2789
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2676
2790
|
}
|
2677
|
-
|
2678
|
-
|
2679
|
-
|
2791
|
+
|
2792
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2793
|
+
// holding the call combiner.
|
2794
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
2795
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2796
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2797
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2798
|
+
// Note: chand->lb_policy may have changed since we started our pick,
|
2799
|
+
// in which case we will be cancelling the pick on a policy other than
|
2800
|
+
// the one we started it on. However, this will just be a no-op.
|
2801
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE && chand->lb_policy != nullptr)) {
|
2802
|
+
if (grpc_client_channel_trace.enabled()) {
|
2803
|
+
gpr_log(GPR_INFO,
|
2804
|
+
"chand=%p calld=%p: cancelling pick from LB policy %p", chand,
|
2805
|
+
calld, chand->lb_policy.get());
|
2806
|
+
}
|
2807
|
+
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
|
2808
|
+
}
|
2809
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2810
|
+
}
|
2811
|
+
};
|
2812
|
+
|
2813
|
+
} // namespace grpc_core
|
2680
2814
|
|
2681
2815
|
// Applies service config to the call. Must be invoked once we know
|
2682
2816
|
// that the resolver has returned results to the channel.
|
@@ -2706,6 +2840,24 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
|
2706
2840
|
grpc_deadline_state_reset(elem, calld->deadline);
|
2707
2841
|
}
|
2708
2842
|
}
|
2843
|
+
// If the service config set wait_for_ready and the application
|
2844
|
+
// did not explicitly set it, use the value from the service config.
|
2845
|
+
uint32_t* send_initial_metadata_flags =
|
2846
|
+
&calld->pending_batches[0]
|
2847
|
+
.batch->payload->send_initial_metadata
|
2848
|
+
.send_initial_metadata_flags;
|
2849
|
+
if (GPR_UNLIKELY(
|
2850
|
+
calld->method_params->wait_for_ready() !=
|
2851
|
+
ClientChannelMethodParams::WAIT_FOR_READY_UNSET &&
|
2852
|
+
!(*send_initial_metadata_flags &
|
2853
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET))) {
|
2854
|
+
if (calld->method_params->wait_for_ready() ==
|
2855
|
+
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2856
|
+
*send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2857
|
+
} else {
|
2858
|
+
*send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2859
|
+
}
|
2860
|
+
}
|
2709
2861
|
}
|
2710
2862
|
}
|
2711
2863
|
// If no retry policy, disable retries.
|
@@ -2716,214 +2868,164 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
|
2716
2868
|
}
|
2717
2869
|
}
|
2718
2870
|
|
2719
|
-
//
|
2720
|
-
|
2721
|
-
|
2722
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2871
|
+
// Invoked once resolver results are available.
|
2872
|
+
static void process_service_config_and_start_lb_pick_locked(
|
2873
|
+
grpc_call_element* elem) {
|
2723
2874
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2724
|
-
if (grpc_client_channel_trace.enabled()) {
|
2725
|
-
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
|
2726
|
-
calld, chand->lb_policy.get());
|
2727
|
-
}
|
2728
2875
|
// Only get service config data on the first attempt.
|
2729
|
-
if (calld->num_attempts_completed == 0) {
|
2876
|
+
if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
|
2730
2877
|
apply_service_config_to_call_locked(elem);
|
2731
2878
|
}
|
2732
|
-
//
|
2733
|
-
|
2734
|
-
// method, use that.
|
2735
|
-
//
|
2736
|
-
// The send_initial_metadata batch will be the first one in the list,
|
2737
|
-
// as set by get_batch_index() above.
|
2738
|
-
calld->pick.initial_metadata =
|
2739
|
-
calld->seen_send_initial_metadata
|
2740
|
-
? &calld->send_initial_metadata
|
2741
|
-
: calld->pending_batches[0]
|
2742
|
-
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2743
|
-
uint32_t send_initial_metadata_flags =
|
2744
|
-
calld->seen_send_initial_metadata
|
2745
|
-
? calld->send_initial_metadata_flags
|
2746
|
-
: calld->pending_batches[0]
|
2747
|
-
.batch->payload->send_initial_metadata
|
2748
|
-
.send_initial_metadata_flags;
|
2749
|
-
const bool wait_for_ready_set_from_api =
|
2750
|
-
send_initial_metadata_flags &
|
2751
|
-
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
2752
|
-
const bool wait_for_ready_set_from_service_config =
|
2753
|
-
calld->method_params != nullptr &&
|
2754
|
-
calld->method_params->wait_for_ready() !=
|
2755
|
-
ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
|
2756
|
-
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
|
2757
|
-
if (calld->method_params->wait_for_ready() ==
|
2758
|
-
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2759
|
-
send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2760
|
-
} else {
|
2761
|
-
send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2762
|
-
}
|
2763
|
-
}
|
2764
|
-
calld->pick.initial_metadata_flags = send_initial_metadata_flags;
|
2765
|
-
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
|
2766
|
-
grpc_combiner_scheduler(chand->combiner));
|
2767
|
-
calld->pick.on_complete = &calld->pick_closure;
|
2768
|
-
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
2769
|
-
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
|
2770
|
-
if (pick_done) {
|
2771
|
-
// Pick completed synchronously.
|
2772
|
-
if (grpc_client_channel_trace.enabled()) {
|
2773
|
-
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
|
2774
|
-
chand, calld);
|
2775
|
-
}
|
2776
|
-
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2777
|
-
} else {
|
2778
|
-
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
2779
|
-
grpc_call_combiner_set_notify_on_cancel(
|
2780
|
-
calld->call_combiner,
|
2781
|
-
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
2782
|
-
pick_callback_cancel_locked, elem,
|
2783
|
-
grpc_combiner_scheduler(chand->combiner)));
|
2784
|
-
}
|
2785
|
-
return pick_done;
|
2879
|
+
// Start LB pick.
|
2880
|
+
grpc_core::LbPicker::StartLocked(elem);
|
2786
2881
|
}
|
2787
2882
|
|
2788
|
-
|
2789
|
-
|
2790
|
-
|
2791
|
-
|
2792
|
-
|
2793
|
-
|
2794
|
-
|
2795
|
-
|
2796
|
-
|
2797
|
-
static void pick_after_resolver_result_cancel_locked(void* arg,
|
2798
|
-
grpc_error* error) {
|
2799
|
-
pick_after_resolver_result_args* args =
|
2800
|
-
static_cast<pick_after_resolver_result_args*>(arg);
|
2801
|
-
if (args->finished) {
|
2802
|
-
gpr_free(args);
|
2803
|
-
return;
|
2804
|
-
}
|
2805
|
-
// If we don't yet have a resolver result, then a closure for
|
2806
|
-
// pick_after_resolver_result_done_locked() will have been added to
|
2807
|
-
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
2808
|
-
// until after this call has been destroyed. We mark the operation as
|
2809
|
-
// finished, so that when pick_after_resolver_result_done_locked()
|
2810
|
-
// is called, it will be a no-op. We also immediately invoke
|
2811
|
-
// async_pick_done_locked() to propagate the error back to the caller.
|
2812
|
-
args->finished = true;
|
2813
|
-
grpc_call_element* elem = args->elem;
|
2814
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2815
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2816
|
-
if (grpc_client_channel_trace.enabled()) {
|
2817
|
-
gpr_log(GPR_INFO,
|
2818
|
-
"chand=%p calld=%p: cancelling pick waiting for resolver result",
|
2819
|
-
chand, calld);
|
2820
|
-
}
|
2821
|
-
// Note: Although we are not in the call combiner here, we are
|
2822
|
-
// basically stealing the call combiner from the pending pick, so
|
2823
|
-
// it's safe to call async_pick_done_locked() here -- we are
|
2824
|
-
// essentially calling it here instead of calling it in
|
2825
|
-
// pick_after_resolver_result_done_locked().
|
2826
|
-
async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2827
|
-
"Pick cancelled", &error, 1));
|
2828
|
-
}
|
2829
|
-
|
2830
|
-
static void pick_after_resolver_result_done_locked(void* arg,
|
2831
|
-
grpc_error* error) {
|
2832
|
-
pick_after_resolver_result_args* args =
|
2833
|
-
static_cast<pick_after_resolver_result_args*>(arg);
|
2834
|
-
if (args->finished) {
|
2835
|
-
/* cancelled, do nothing */
|
2836
|
-
if (grpc_client_channel_trace.enabled()) {
|
2837
|
-
gpr_log(GPR_INFO, "call cancelled before resolver result");
|
2838
|
-
}
|
2839
|
-
gpr_free(args);
|
2840
|
-
return;
|
2841
|
-
}
|
2842
|
-
args->finished = true;
|
2843
|
-
grpc_call_element* elem = args->elem;
|
2844
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2845
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2846
|
-
if (error != GRPC_ERROR_NONE) {
|
2883
|
+
namespace grpc_core {
|
2884
|
+
|
2885
|
+
// Handles waiting for a resolver result.
|
2886
|
+
// Used only for the first call on an idle channel.
|
2887
|
+
class ResolverResultWaiter {
|
2888
|
+
public:
|
2889
|
+
explicit ResolverResultWaiter(grpc_call_element* elem) : elem_(elem) {
|
2890
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2891
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2847
2892
|
if (grpc_client_channel_trace.enabled()) {
|
2848
|
-
gpr_log(GPR_INFO,
|
2893
|
+
gpr_log(GPR_INFO,
|
2894
|
+
"chand=%p calld=%p: deferring pick pending resolver result",
|
2849
2895
|
chand, calld);
|
2850
2896
|
}
|
2851
|
-
|
2852
|
-
|
2853
|
-
|
2854
|
-
|
2855
|
-
|
2856
|
-
|
2897
|
+
// Add closure to be run when a resolver result is available.
|
2898
|
+
GRPC_CLOSURE_INIT(&done_closure_, &ResolverResultWaiter::DoneLocked, this,
|
2899
|
+
grpc_combiner_scheduler(chand->combiner));
|
2900
|
+
AddToWaitingList();
|
2901
|
+
// Set cancellation closure, so that we abort if the call is cancelled.
|
2902
|
+
GRPC_CLOSURE_INIT(&cancel_closure_, &ResolverResultWaiter::CancelLocked,
|
2903
|
+
this, grpc_combiner_scheduler(chand->combiner));
|
2904
|
+
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
|
2905
|
+
&cancel_closure_);
|
2906
|
+
}
|
2907
|
+
|
2908
|
+
private:
|
2909
|
+
// Adds closure_ to chand->waiting_for_resolver_result_closures.
|
2910
|
+
void AddToWaitingList() {
|
2911
|
+
channel_data* chand = static_cast<channel_data*>(elem_->channel_data);
|
2912
|
+
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
2913
|
+
&done_closure_, GRPC_ERROR_NONE);
|
2914
|
+
}
|
2915
|
+
|
2916
|
+
// Invoked when a resolver result is available.
|
2917
|
+
static void DoneLocked(void* arg, grpc_error* error) {
|
2918
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
2919
|
+
// If CancelLocked() has already run, delete ourselves without doing
|
2920
|
+
// anything. Note that the call stack may have already been destroyed,
|
2921
|
+
// so it's not safe to access anything in elem_.
|
2922
|
+
if (GPR_UNLIKELY(self->finished_)) {
|
2923
|
+
if (grpc_client_channel_trace.enabled()) {
|
2924
|
+
gpr_log(GPR_INFO, "call cancelled before resolver result");
|
2925
|
+
}
|
2926
|
+
Delete(self);
|
2927
|
+
return;
|
2857
2928
|
}
|
2858
|
-
|
2859
|
-
|
2860
|
-
|
2861
|
-
|
2862
|
-
|
2863
|
-
uint32_t send_initial_metadata_flags =
|
2864
|
-
calld->seen_send_initial_metadata
|
2865
|
-
? calld->send_initial_metadata_flags
|
2866
|
-
: calld->pending_batches[0]
|
2867
|
-
.batch->payload->send_initial_metadata
|
2868
|
-
.send_initial_metadata_flags;
|
2869
|
-
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2929
|
+
// Otherwise, process the resolver result.
|
2930
|
+
grpc_call_element* elem = self->elem_;
|
2931
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2932
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2933
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
2870
2934
|
if (grpc_client_channel_trace.enabled()) {
|
2871
|
-
gpr_log(GPR_INFO,
|
2872
|
-
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2873
|
-
"wait_for_ready=true; trying again",
|
2935
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
|
2874
2936
|
chand, calld);
|
2875
2937
|
}
|
2876
|
-
|
2938
|
+
pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2939
|
+
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
|
2940
|
+
// Shutting down.
|
2941
|
+
if (grpc_client_channel_trace.enabled()) {
|
2942
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
|
2943
|
+
calld);
|
2944
|
+
}
|
2945
|
+
pick_done_locked(elem,
|
2946
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2947
|
+
} else if (GPR_UNLIKELY(chand->lb_policy == nullptr)) {
|
2948
|
+
// Transient resolver failure.
|
2949
|
+
// If call has wait_for_ready=true, try again; otherwise, fail.
|
2950
|
+
uint32_t send_initial_metadata_flags =
|
2951
|
+
calld->seen_send_initial_metadata
|
2952
|
+
? calld->send_initial_metadata_flags
|
2953
|
+
: calld->pending_batches[0]
|
2954
|
+
.batch->payload->send_initial_metadata
|
2955
|
+
.send_initial_metadata_flags;
|
2956
|
+
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2957
|
+
if (grpc_client_channel_trace.enabled()) {
|
2958
|
+
gpr_log(GPR_INFO,
|
2959
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2960
|
+
"wait_for_ready=true; trying again",
|
2961
|
+
chand, calld);
|
2962
|
+
}
|
2963
|
+
// Re-add ourselves to the waiting list.
|
2964
|
+
self->AddToWaitingList();
|
2965
|
+
// Return early so that we don't set finished_ to true below.
|
2966
|
+
return;
|
2967
|
+
} else {
|
2968
|
+
if (grpc_client_channel_trace.enabled()) {
|
2969
|
+
gpr_log(GPR_INFO,
|
2970
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2971
|
+
"wait_for_ready=false; failing",
|
2972
|
+
chand, calld);
|
2973
|
+
}
|
2974
|
+
pick_done_locked(
|
2975
|
+
elem,
|
2976
|
+
grpc_error_set_int(
|
2977
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2978
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2979
|
+
}
|
2877
2980
|
} else {
|
2878
2981
|
if (grpc_client_channel_trace.enabled()) {
|
2879
|
-
gpr_log(GPR_INFO,
|
2880
|
-
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2881
|
-
"wait_for_ready=false; failing",
|
2982
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing LB pick",
|
2882
2983
|
chand, calld);
|
2883
2984
|
}
|
2884
|
-
|
2885
|
-
elem,
|
2886
|
-
grpc_error_set_int(
|
2887
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2888
|
-
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2985
|
+
process_service_config_and_start_lb_pick_locked(elem);
|
2889
2986
|
}
|
2890
|
-
|
2891
|
-
|
2892
|
-
|
2893
|
-
|
2987
|
+
self->finished_ = true;
|
2988
|
+
}
|
2989
|
+
|
2990
|
+
// Invoked when the call is cancelled.
|
2991
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2992
|
+
// holding the call combiner.
|
2993
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
2994
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
2995
|
+
// If DoneLocked() has already run, delete ourselves without doing anything.
|
2996
|
+
if (GPR_LIKELY(self->finished_)) {
|
2997
|
+
Delete(self);
|
2998
|
+
return;
|
2894
2999
|
}
|
2895
|
-
|
2896
|
-
|
2897
|
-
|
2898
|
-
|
2899
|
-
|
2900
|
-
|
2901
|
-
|
3000
|
+
// If we are being cancelled, immediately invoke pick_done_locked()
|
3001
|
+
// to propagate the error back to the caller.
|
3002
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
3003
|
+
grpc_call_element* elem = self->elem_;
|
3004
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3005
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
3006
|
+
if (grpc_client_channel_trace.enabled()) {
|
3007
|
+
gpr_log(GPR_INFO,
|
3008
|
+
"chand=%p calld=%p: cancelling call waiting for name "
|
3009
|
+
"resolution",
|
3010
|
+
chand, calld);
|
3011
|
+
}
|
3012
|
+
// Note: Although we are not in the call combiner here, we are
|
3013
|
+
// basically stealing the call combiner from the pending pick, so
|
3014
|
+
// it's safe to call pick_done_locked() here -- we are essentially
|
3015
|
+
// calling it here instead of calling it in DoneLocked().
|
3016
|
+
pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
3017
|
+
"Pick cancelled", &error, 1));
|
2902
3018
|
}
|
3019
|
+
self->finished_ = true;
|
2903
3020
|
}
|
2904
|
-
}
|
2905
3021
|
|
2906
|
-
|
2907
|
-
|
2908
|
-
|
2909
|
-
|
2910
|
-
|
2911
|
-
|
2912
|
-
|
2913
|
-
}
|
2914
|
-
pick_after_resolver_result_args* args =
|
2915
|
-
static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
|
2916
|
-
args->elem = elem;
|
2917
|
-
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
|
2918
|
-
args, grpc_combiner_scheduler(chand->combiner));
|
2919
|
-
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
2920
|
-
&args->closure, GRPC_ERROR_NONE);
|
2921
|
-
grpc_call_combiner_set_notify_on_cancel(
|
2922
|
-
calld->call_combiner,
|
2923
|
-
GRPC_CLOSURE_INIT(&args->cancel_closure,
|
2924
|
-
pick_after_resolver_result_cancel_locked, args,
|
2925
|
-
grpc_combiner_scheduler(chand->combiner)));
|
2926
|
-
}
|
3022
|
+
grpc_call_element* elem_;
|
3023
|
+
grpc_closure done_closure_;
|
3024
|
+
grpc_closure cancel_closure_;
|
3025
|
+
bool finished_ = false;
|
3026
|
+
};
|
3027
|
+
|
3028
|
+
} // namespace grpc_core
|
2927
3029
|
|
2928
3030
|
static void start_pick_locked(void* arg, grpc_error* ignored) {
|
2929
3031
|
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
@@ -2931,32 +3033,25 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
|
|
2931
3033
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2932
3034
|
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
|
2933
3035
|
GPR_ASSERT(calld->subchannel_call == nullptr);
|
2934
|
-
if (chand->lb_policy != nullptr) {
|
2935
|
-
// We already have
|
2936
|
-
|
2937
|
-
|
2938
|
-
|
2939
|
-
|
2940
|
-
|
3036
|
+
if (GPR_LIKELY(chand->lb_policy != nullptr)) {
|
3037
|
+
// We already have resolver results, so process the service config
|
3038
|
+
// and start an LB pick.
|
3039
|
+
process_service_config_and_start_lb_pick_locked(elem);
|
3040
|
+
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
|
3041
|
+
pick_done_locked(elem,
|
3042
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2941
3043
|
} else {
|
2942
3044
|
// We do not yet have an LB policy, so wait for a resolver result.
|
2943
|
-
if (chand->
|
2944
|
-
pick_done_locked(elem,
|
2945
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2946
|
-
return;
|
2947
|
-
}
|
2948
|
-
if (!chand->started_resolving) {
|
3045
|
+
if (GPR_UNLIKELY(!chand->started_resolving)) {
|
2949
3046
|
start_resolving_locked(chand);
|
2950
3047
|
}
|
2951
|
-
|
3048
|
+
// Create a new waiter, which will delete itself when done.
|
3049
|
+
grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
|
3050
|
+
// Add the polling entity from call_data to the channel_data's
|
3051
|
+
// interested_parties, so that the I/O of the resolver can be done
|
3052
|
+
// under it. It will be removed in pick_done_locked().
|
3053
|
+
maybe_add_call_to_channel_interested_parties_locked(elem);
|
2952
3054
|
}
|
2953
|
-
// We need to wait for either a resolver result or for an async result
|
2954
|
-
// from the LB policy. Add the polling entity from call_data to the
|
2955
|
-
// channel_data's interested_parties, so that the I/O of the LB policy
|
2956
|
-
// and resolver can be done under it. The polling entity will be
|
2957
|
-
// removed in async_pick_done_locked().
|
2958
|
-
grpc_polling_entity_add_to_pollset_set(calld->pollent,
|
2959
|
-
chand->interested_parties);
|
2960
3055
|
}
|
2961
3056
|
|
2962
3057
|
//
|
@@ -2968,11 +3063,11 @@ static void cc_start_transport_stream_op_batch(
|
|
2968
3063
|
GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
|
2969
3064
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2970
3065
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2971
|
-
if (chand->deadline_checking_enabled) {
|
3066
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
2972
3067
|
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
|
2973
3068
|
}
|
2974
3069
|
// If we've previously been cancelled, immediately fail any new batches.
|
2975
|
-
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
3070
|
+
if (GPR_UNLIKELY(calld->cancel_error != GRPC_ERROR_NONE)) {
|
2976
3071
|
if (grpc_client_channel_trace.enabled()) {
|
2977
3072
|
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
|
2978
3073
|
chand, calld, grpc_error_string(calld->cancel_error));
|
@@ -2983,7 +3078,7 @@ static void cc_start_transport_stream_op_batch(
|
|
2983
3078
|
return;
|
2984
3079
|
}
|
2985
3080
|
// Handle cancellation.
|
2986
|
-
if (batch->cancel_stream) {
|
3081
|
+
if (GPR_UNLIKELY(batch->cancel_stream)) {
|
2987
3082
|
// Stash a copy of cancel_error in our call data, so that we can use
|
2988
3083
|
// it for subsequent operations. This ensures that if the call is
|
2989
3084
|
// cancelled before any batches are passed down (e.g., if the deadline
|
@@ -3029,7 +3124,7 @@ static void cc_start_transport_stream_op_batch(
|
|
3029
3124
|
// We do not yet have a subchannel call.
|
3030
3125
|
// For batches containing a send_initial_metadata op, enter the channel
|
3031
3126
|
// combiner to start a pick.
|
3032
|
-
if (batch->send_initial_metadata) {
|
3127
|
+
if (GPR_LIKELY(batch->send_initial_metadata)) {
|
3033
3128
|
if (grpc_client_channel_trace.enabled()) {
|
3034
3129
|
gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
|
3035
3130
|
chand, calld);
|
@@ -3062,7 +3157,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
|
|
3062
3157
|
calld->arena = args->arena;
|
3063
3158
|
calld->owning_call = args->call_stack;
|
3064
3159
|
calld->call_combiner = args->call_combiner;
|
3065
|
-
if (chand->deadline_checking_enabled) {
|
3160
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
3066
3161
|
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
|
3067
3162
|
calld->deadline);
|
3068
3163
|
}
|
@@ -3077,14 +3172,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
3077
3172
|
grpc_closure* then_schedule_closure) {
|
3078
3173
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
3079
3174
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3080
|
-
if (chand->deadline_checking_enabled) {
|
3175
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
3081
3176
|
grpc_deadline_state_destroy(elem);
|
3082
3177
|
}
|
3083
3178
|
grpc_slice_unref_internal(calld->path);
|
3084
3179
|
calld->retry_throttle_data.reset();
|
3085
3180
|
calld->method_params.reset();
|
3086
3181
|
GRPC_ERROR_UNREF(calld->cancel_error);
|
3087
|
-
if (calld->subchannel_call != nullptr) {
|
3182
|
+
if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
|
3088
3183
|
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
|
3089
3184
|
then_schedule_closure);
|
3090
3185
|
then_schedule_closure = nullptr;
|
@@ -3094,7 +3189,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
3094
3189
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
3095
3190
|
GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
|
3096
3191
|
}
|
3097
|
-
if (calld->pick.connected_subchannel != nullptr) {
|
3192
|
+
if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
|
3098
3193
|
calld->pick.connected_subchannel.reset();
|
3099
3194
|
}
|
3100
3195
|
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
|
@@ -3242,7 +3337,7 @@ static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
|
|
3242
3337
|
"external_connectivity_watcher");
|
3243
3338
|
external_connectivity_watcher_list_remove(w->chand, w);
|
3244
3339
|
gpr_free(w);
|
3245
|
-
|
3340
|
+
GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
|
3246
3341
|
}
|
3247
3342
|
|
3248
3343
|
static void watch_connectivity_state_locked(void* arg,
|