grpc-flamingo 1.11.0 → 1.15.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Makefile +1150 -176
- data/etc/roots.pem +40 -196
- data/include/grpc/grpc.h +49 -8
- data/include/grpc/grpc_security.h +123 -2
- data/include/grpc/grpc_security_constants.h +6 -0
- data/include/grpc/impl/codegen/fork.h +4 -4
- data/include/grpc/impl/codegen/grpc_types.h +26 -5
- data/include/grpc/impl/codegen/log.h +112 -0
- data/include/grpc/impl/codegen/port_platform.h +55 -4
- data/include/grpc/module.modulemap +2 -0
- data/include/grpc/support/log.h +2 -80
- data/include/grpc/support/string_util.h +2 -0
- data/include/grpc/support/sync.h +0 -16
- data/src/boringssl/err_data.c +602 -588
- data/src/core/ext/{census → filters/census}/grpc_context.cc +0 -0
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +1 -1
- data/src/core/ext/filters/client_channel/client_channel.cc +1234 -1070
- data/src/core/ext/filters/client_channel/client_channel.h +5 -0
- data/src/core/ext/filters/client_channel/client_channel_channelz.cc +113 -0
- data/src/core/ext/filters/client_channel/client_channel_channelz.h +71 -0
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +9 -0
- data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -1
- data/src/core/ext/filters/client_channel/http_proxy.cc +22 -5
- data/src/core/ext/filters/client_channel/lb_policy.cc +2 -2
- data/src/core/ext/filters/client_channel/lb_policy.h +30 -10
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +11 -9
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +120 -127
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +36 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +36 -102
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +37 -32
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +25 -22
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +4 -2
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c +19 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h +54 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c +19 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h +54 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +4 -17
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +37 -63
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +306 -239
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +444 -392
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +538 -98
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +8 -0
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -0
- data/src/core/ext/filters/client_channel/method_params.h +4 -0
- data/src/core/ext/filters/client_channel/resolver.h +10 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +36 -19
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +320 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +62 -9
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +49 -294
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +537 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +112 -87
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +17 -2
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +6 -5
- data/src/core/ext/filters/{load_reporting/server_load_reporting_filter.h → client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc} +7 -8
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +29 -0
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +32 -15
- data/src/core/ext/filters/client_channel/retry_throttle.h +4 -0
- data/src/core/ext/filters/client_channel/subchannel.cc +58 -15
- data/src/core/ext/filters/client_channel/subchannel.h +11 -0
- data/src/core/ext/filters/deadline/deadline_filter.cc +18 -15
- data/src/core/ext/filters/deadline/deadline_filter.h +5 -5
- data/src/core/ext/filters/http/client/http_client_filter.cc +10 -9
- data/src/core/ext/filters/http/client_authority_filter.cc +6 -5
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +6 -6
- data/src/core/ext/filters/http/server/http_server_filter.cc +123 -131
- data/src/core/ext/filters/http/server/http_server_filter.h +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +1 -1
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +3 -2
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +9 -8
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +97 -48
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +10 -7
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +3 -3
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +12 -8
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +4 -3
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +2 -2
- data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -2
- data/src/core/ext/transport/chttp2/transport/parsing.cc +14 -12
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +3 -3
- data/src/core/ext/transport/chttp2/transport/writing.cc +32 -27
- data/src/core/ext/transport/inproc/inproc_transport.cc +87 -49
- data/src/core/lib/channel/channel_args.cc +28 -0
- data/src/core/lib/channel/channel_args.h +4 -0
- data/src/core/lib/channel/channel_stack.cc +22 -29
- data/src/core/lib/channel/channel_stack.h +2 -2
- data/src/core/lib/channel/channel_stack_builder.cc +0 -3
- data/src/core/lib/channel/channel_stack_builder.h +0 -2
- data/src/core/lib/channel/channel_trace.cc +28 -63
- data/src/core/lib/channel/channel_trace.h +13 -17
- data/src/core/lib/channel/channelz.cc +153 -0
- data/src/core/lib/channel/channelz.h +133 -0
- data/src/core/lib/channel/channelz_registry.cc +145 -0
- data/src/core/lib/channel/channelz_registry.h +120 -0
- data/src/core/lib/channel/connected_channel.cc +8 -1
- data/src/core/lib/channel/handshaker.cc +71 -0
- data/src/core/lib/channel/handshaker.h +4 -0
- data/src/core/lib/debug/stats.h +7 -0
- data/src/core/lib/debug/stats_data.cc +5 -0
- data/src/core/lib/debug/stats_data.h +120 -0
- data/src/core/lib/debug/trace.cc +2 -1
- data/src/core/lib/debug/trace.h +12 -1
- data/src/core/lib/gpr/alloc.h +28 -0
- data/src/core/lib/gpr/arena.cc +38 -45
- data/src/core/lib/gpr/log.cc +8 -2
- data/src/core/lib/gpr/log_android.cc +4 -0
- data/src/core/lib/gpr/log_linux.cc +4 -0
- data/src/core/lib/gpr/log_posix.cc +4 -0
- data/src/core/lib/gpr/log_windows.cc +5 -0
- data/src/core/lib/gpr/string.cc +28 -0
- data/src/core/lib/gpr/string.h +10 -0
- data/src/core/lib/gprpp/abstract.h +5 -2
- data/src/core/lib/gprpp/fork.cc +268 -0
- data/src/core/lib/gprpp/fork.h +88 -0
- data/src/core/lib/gprpp/inlined_vector.h +87 -37
- data/src/core/lib/gprpp/memory.h +12 -0
- data/src/core/lib/gprpp/mutex_lock.h +42 -0
- data/src/core/lib/gprpp/orphanable.h +10 -12
- data/src/core/lib/gprpp/ref_counted.h +10 -12
- data/src/core/lib/gprpp/ref_counted_ptr.h +65 -8
- data/src/core/lib/gprpp/thd.h +0 -3
- data/src/core/lib/gprpp/thd_posix.cc +5 -54
- data/src/core/lib/gprpp/thd_windows.cc +0 -7
- data/src/core/lib/http/httpcli_security_connector.cc +1 -3
- data/src/core/lib/iomgr/call_combiner.cc +13 -13
- data/src/core/lib/iomgr/call_combiner.h +84 -1
- data/src/core/lib/iomgr/closure.h +6 -5
- data/src/core/lib/iomgr/combiner.cc +30 -13
- data/src/core/lib/iomgr/combiner.h +1 -1
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
- data/src/core/lib/iomgr/error.cc +12 -0
- data/src/core/lib/iomgr/error.h +5 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +138 -51
- data/src/core/lib/iomgr/ev_epollex_linux.cc +276 -93
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +58 -50
- data/src/core/lib/iomgr/ev_poll_posix.cc +163 -42
- data/src/core/lib/iomgr/ev_posix.cc +88 -24
- data/src/core/lib/iomgr/ev_posix.h +48 -12
- data/src/core/lib/iomgr/exec_ctx.cc +15 -9
- data/src/core/lib/iomgr/exec_ctx.h +48 -20
- data/src/core/lib/iomgr/executor.cc +274 -142
- data/src/core/lib/iomgr/executor.h +82 -16
- data/src/core/lib/iomgr/fork_posix.cc +42 -19
- data/src/core/lib/iomgr/iocp_windows.cc +9 -4
- data/src/core/lib/iomgr/iomgr.cc +2 -0
- data/src/core/lib/iomgr/iomgr.h +5 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +2 -2
- data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -0
- data/src/core/lib/iomgr/lockfree_event.cc +5 -1
- data/src/core/lib/iomgr/polling_entity.cc +11 -2
- data/src/core/lib/iomgr/pollset_custom.cc +2 -2
- data/src/core/lib/iomgr/port.h +51 -1
- data/src/core/lib/iomgr/resolve_address.h +1 -1
- data/src/core/lib/iomgr/resolve_address_posix.cc +4 -3
- data/src/core/lib/iomgr/resolve_address_windows.cc +3 -2
- data/src/core/lib/iomgr/resource_quota.cc +89 -12
- data/src/core/lib/iomgr/resource_quota.h +16 -0
- data/src/core/lib/iomgr/sockaddr_posix.h +1 -1
- data/src/core/lib/iomgr/socket_factory_posix.cc +1 -1
- data/src/core/lib/iomgr/socket_mutator.cc +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +1 -1
- data/src/core/lib/iomgr/socket_utils.h +9 -0
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +29 -1
- data/src/core/lib/iomgr/socket_utils_linux.cc +0 -1
- data/src/core/lib/iomgr/socket_utils_posix.cc +2 -3
- data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
- data/src/core/lib/iomgr/socket_utils_uv.cc +4 -0
- data/src/core/lib/iomgr/socket_utils_windows.cc +4 -0
- data/src/core/lib/iomgr/socket_windows.cc +33 -0
- data/src/core/lib/iomgr/socket_windows.h +6 -0
- data/src/core/lib/iomgr/tcp_client_custom.cc +5 -5
- data/src/core/lib/iomgr/tcp_client_posix.cc +10 -11
- data/src/core/lib/iomgr/tcp_custom.cc +11 -11
- data/src/core/lib/iomgr/tcp_posix.cc +49 -36
- data/src/core/lib/iomgr/tcp_server_custom.cc +5 -5
- data/src/core/lib/iomgr/tcp_server_posix.cc +16 -36
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +1 -1
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +17 -5
- data/src/core/lib/iomgr/tcp_server_windows.cc +1 -0
- data/src/core/lib/iomgr/tcp_uv.cc +3 -0
- data/src/core/lib/iomgr/tcp_windows.cc +18 -2
- data/src/core/lib/iomgr/tcp_windows.h +2 -0
- data/src/core/lib/iomgr/timer.h +4 -3
- data/src/core/lib/iomgr/timer_generic.cc +133 -51
- data/src/core/lib/iomgr/timer_manager.cc +12 -14
- data/src/core/lib/iomgr/timer_uv.cc +3 -0
- data/src/core/lib/iomgr/udp_server.cc +106 -52
- data/src/core/lib/iomgr/udp_server.h +8 -4
- data/src/core/lib/json/json.cc +12 -1
- data/src/core/lib/json/json.h +5 -0
- data/src/core/lib/profiling/basic_timers.cc +1 -0
- data/src/core/lib/security/context/security_context.cc +8 -8
- data/src/core/lib/security/context/security_context.h +6 -2
- data/src/core/lib/security/credentials/alts/alts_credentials.h +0 -20
- data/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc +3 -2
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +7 -7
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +1 -38
- data/src/core/lib/security/credentials/credentials.h +1 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +89 -115
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +16 -0
- data/src/core/lib/security/credentials/jwt/json_token.h +2 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
- data/src/core/lib/security/credentials/local/local_credentials.cc +77 -0
- data/src/core/lib/security/credentials/local/local_credentials.h +40 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +11 -7
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +1 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +17 -3
- data/src/core/lib/security/security_connector/alts_security_connector.cc +2 -1
- data/src/core/lib/security/security_connector/load_system_roots.h +29 -0
- data/src/core/lib/{gpr/fork.h → security/security_connector/load_system_roots_fallback.cc} +10 -13
- data/src/core/lib/security/security_connector/load_system_roots_linux.cc +165 -0
- data/src/core/lib/security/security_connector/load_system_roots_linux.h +44 -0
- data/src/core/lib/security/security_connector/local_security_connector.cc +245 -0
- data/src/core/lib/security/security_connector/local_security_connector.h +58 -0
- data/src/core/lib/security/security_connector/security_connector.cc +79 -32
- data/src/core/lib/security/security_connector/security_connector.h +5 -3
- data/src/core/lib/security/transport/client_auth_filter.cc +5 -5
- data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
- data/src/core/lib/security/transport/security_handshaker.cc +7 -2
- data/src/core/lib/security/transport/server_auth_filter.cc +4 -7
- data/src/core/lib/security/util/json_util.cc +4 -0
- data/src/core/lib/slice/slice.cc +6 -2
- data/src/core/lib/slice/slice_buffer.cc +27 -7
- data/src/core/lib/slice/slice_hash_table.h +4 -0
- data/src/core/lib/slice/slice_weak_hash_table.h +4 -0
- data/src/core/lib/surface/call.cc +119 -58
- data/src/core/lib/surface/call.h +7 -0
- data/src/core/lib/surface/channel.cc +50 -18
- data/src/core/lib/surface/channel.h +4 -0
- data/src/core/lib/surface/completion_queue.cc +153 -18
- data/src/core/lib/surface/completion_queue.h +20 -2
- data/src/core/lib/surface/completion_queue_factory.cc +13 -4
- data/src/core/lib/surface/init.cc +7 -8
- data/src/core/lib/surface/init.h +0 -1
- data/src/core/lib/surface/server.cc +16 -0
- data/src/core/lib/surface/version.cc +1 -1
- data/src/core/lib/transport/bdp_estimator.cc +3 -3
- data/src/core/lib/transport/bdp_estimator.h +2 -2
- data/src/core/lib/transport/byte_stream.cc +1 -1
- data/src/core/lib/transport/connectivity_state.cc +6 -7
- data/src/core/lib/transport/service_config.cc +2 -2
- data/src/core/lib/transport/service_config.h +3 -3
- data/src/core/lib/transport/transport.cc +22 -10
- data/src/core/lib/transport/transport.h +18 -18
- data/src/core/lib/transport/transport_op_string.cc +1 -8
- data/src/core/plugin_registry/grpc_plugin_registry.cc +0 -4
- data/src/core/tsi/alts/crypt/aes_gcm.cc +2 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +19 -7
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +10 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +2 -2
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +38 -3
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +3 -0
- data/src/core/tsi/alts/handshaker/altscontext.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/altscontext.pb.h +1 -2
- data/src/core/tsi/alts/handshaker/handshaker.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/handshaker.pb.h +1 -2
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +1 -1
- data/src/core/tsi/alts/handshaker/transport_security_common_api.h +2 -2
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +47 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +3 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +12 -11
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +7 -2
- data/src/core/tsi/fake_transport_security.cc +1 -0
- data/src/core/tsi/grpc_shadow_boringssl.h +3006 -0
- data/src/core/tsi/local_transport_security.cc +209 -0
- data/src/core/tsi/local_transport_security.h +51 -0
- data/src/core/tsi/ssl/session_cache/ssl_session.h +2 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +5 -5
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +6 -0
- data/src/core/tsi/ssl_transport_security.cc +245 -116
- data/src/core/tsi/ssl_types.h +2 -0
- data/src/core/tsi/transport_security.cc +14 -0
- data/src/core/tsi/transport_security.h +2 -0
- data/src/core/tsi/transport_security_interface.h +11 -1
- data/src/ruby/bin/math_client.rb +17 -9
- data/src/ruby/ext/grpc/extconf.rb +1 -26
- data/src/ruby/ext/grpc/rb_channel_credentials.c +3 -3
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +42 -16
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +65 -26
- data/src/ruby/lib/grpc/generic/active_call.rb +19 -23
- data/src/ruby/lib/grpc/generic/rpc_server.rb +2 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/generate_proto_ruby.sh +7 -1
- data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +4 -1
- data/src/ruby/spec/call_credentials_spec.rb +1 -1
- data/src/ruby/spec/call_spec.rb +1 -1
- data/src/ruby/spec/channel_credentials_spec.rb +1 -1
- data/src/ruby/spec/channel_spec.rb +1 -1
- data/src/ruby/spec/client_auth_spec.rb +1 -12
- data/src/ruby/spec/client_server_spec.rb +1 -1
- data/src/ruby/spec/compression_options_spec.rb +1 -1
- data/src/ruby/spec/error_sanity_spec.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +16 -4
- data/src/ruby/spec/generic/rpc_desc_spec.rb +1 -1
- data/src/ruby/spec/generic/rpc_server_pool_spec.rb +1 -1
- data/src/ruby/spec/generic/service_spec.rb +1 -1
- data/src/ruby/spec/google_rpc_status_utils_spec.rb +1 -12
- data/src/ruby/spec/pb/duplicate/codegen_spec.rb +1 -0
- data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
- data/src/ruby/spec/server_credentials_spec.rb +1 -1
- data/src/ruby/spec/server_spec.rb +1 -1
- data/src/ruby/spec/spec_helper.rb +1 -0
- data/src/ruby/spec/support/services.rb +1 -1
- data/src/ruby/spec/time_consts_spec.rb +1 -1
- data/third_party/address_sorting/address_sorting.c +17 -11
- data/third_party/address_sorting/address_sorting_windows.c +43 -3
- data/third_party/address_sorting/include/address_sorting/address_sorting.h +3 -0
- data/third_party/boringssl/crypto/asn1/a_int.c +33 -28
- data/third_party/boringssl/crypto/asn1/a_mbstr.c +24 -22
- data/third_party/boringssl/crypto/asn1/a_utf8.c +13 -11
- data/third_party/boringssl/crypto/asn1/asn1_locl.h +3 -0
- data/third_party/boringssl/crypto/asn1/tasn_dec.c +40 -19
- data/third_party/boringssl/crypto/bio/fd.c +1 -0
- data/third_party/boringssl/crypto/bio/file.c +2 -0
- data/third_party/boringssl/crypto/bn_extra/convert.c +6 -5
- data/third_party/boringssl/crypto/bytestring/ber.c +1 -4
- data/third_party/boringssl/crypto/bytestring/cbb.c +116 -16
- data/third_party/boringssl/crypto/bytestring/cbs.c +151 -20
- data/third_party/boringssl/crypto/cipher_extra/e_aesccm.c +203 -0
- data/third_party/boringssl/crypto/cipher_extra/e_rc2.c +2 -0
- data/third_party/boringssl/crypto/cipher_extra/e_tls.c +1 -2
- data/third_party/boringssl/crypto/cpu-aarch64-fuchsia.c +55 -0
- data/third_party/boringssl/crypto/cpu-aarch64-linux.c +2 -1
- data/third_party/boringssl/crypto/dsa/dsa.c +16 -54
- data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +9 -10
- data/third_party/boringssl/crypto/ecdh/ecdh.c +4 -3
- data/third_party/boringssl/crypto/fipsmodule/bcm.c +11 -542
- data/third_party/boringssl/crypto/fipsmodule/bn/add.c +57 -112
- data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +4 -3
- data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +128 -70
- data/third_party/boringssl/crypto/fipsmodule/bn/bytes.c +32 -71
- data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +64 -118
- data/third_party/boringssl/crypto/fipsmodule/bn/div.c +284 -122
- data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +31 -65
- data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +274 -218
- data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +2 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +187 -27
- data/third_party/boringssl/crypto/fipsmodule/bn/jacobi.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +124 -81
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +8 -30
- data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +321 -347
- data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +326 -66
- data/third_party/boringssl/crypto/fipsmodule/bn/random.c +77 -25
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.c +199 -222
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.h +27 -47
- data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +155 -96
- data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/cipher/e_aes.c +10 -10
- data/third_party/boringssl/crypto/fipsmodule/des/internal.h +2 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +78 -47
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +99 -163
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +3 -10
- data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +44 -23
- data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +59 -90
- data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +38 -65
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64-table.h +5378 -5418
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +17 -26
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.h +15 -11
- data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +45 -51
- data/third_party/boringssl/crypto/fipsmodule/ec/{util-64.c → util.c} +0 -5
- data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +144 -264
- data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +78 -62
- data/third_party/boringssl/crypto/fipsmodule/modes/ccm.c +256 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/internal.h +36 -32
- data/third_party/boringssl/crypto/fipsmodule/rand/ctrdrbg.c +9 -7
- data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +16 -40
- data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +1 -6
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +57 -39
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +309 -142
- data/third_party/boringssl/crypto/fipsmodule/self_check/self_check.c +581 -0
- data/third_party/boringssl/crypto/fipsmodule/tls/internal.h +39 -0
- data/third_party/boringssl/crypto/fipsmodule/tls/kdf.c +165 -0
- data/third_party/boringssl/crypto/internal.h +65 -2
- data/third_party/boringssl/crypto/mem.c +0 -2
- data/third_party/boringssl/crypto/obj/obj.c +6 -73
- data/third_party/boringssl/crypto/thread_pthread.c +35 -5
- data/third_party/boringssl/crypto/x509/a_strex.c +11 -11
- data/third_party/boringssl/crypto/x509/vpm_int.h +1 -0
- data/third_party/boringssl/crypto/x509/x509_vfy.c +4 -0
- data/third_party/boringssl/crypto/x509/x509_vpm.c +44 -22
- data/third_party/boringssl/crypto/x509/x_name.c +13 -0
- data/third_party/boringssl/include/openssl/aead.h +10 -0
- data/third_party/boringssl/include/openssl/asn1.h +2 -3
- data/third_party/boringssl/include/openssl/base.h +5 -14
- data/third_party/boringssl/include/openssl/bio.h +1 -1
- data/third_party/boringssl/include/openssl/bn.h +62 -18
- data/third_party/boringssl/include/openssl/bytestring.h +53 -28
- data/third_party/boringssl/include/openssl/crypto.h +4 -0
- data/third_party/boringssl/include/openssl/ec.h +10 -4
- data/third_party/boringssl/include/openssl/ec_key.h +7 -6
- data/third_party/boringssl/include/openssl/err.h +9 -9
- data/third_party/boringssl/include/openssl/evp.h +1 -1
- data/third_party/boringssl/include/openssl/rsa.h +35 -10
- data/third_party/boringssl/include/openssl/ssl.h +167 -19
- data/third_party/boringssl/include/openssl/ssl3.h +0 -1
- data/third_party/boringssl/include/openssl/stack.h +1 -1
- data/third_party/boringssl/include/openssl/tls1.h +10 -2
- data/third_party/boringssl/include/openssl/x509.h +4 -0
- data/third_party/boringssl/include/openssl/x509v3.h +1 -0
- data/third_party/boringssl/ssl/d1_both.cc +16 -2
- data/third_party/boringssl/ssl/dtls_method.cc +1 -1
- data/third_party/boringssl/ssl/handoff.cc +285 -0
- data/third_party/boringssl/ssl/handshake.cc +26 -12
- data/third_party/boringssl/ssl/handshake_client.cc +101 -95
- data/third_party/boringssl/ssl/handshake_server.cc +14 -2
- data/third_party/boringssl/ssl/internal.h +132 -79
- data/third_party/boringssl/ssl/s3_both.cc +2 -2
- data/third_party/boringssl/ssl/s3_lib.cc +3 -1
- data/third_party/boringssl/ssl/s3_pkt.cc +0 -18
- data/third_party/boringssl/ssl/ssl_aead_ctx.cc +1 -4
- data/third_party/boringssl/ssl/ssl_asn1.cc +47 -43
- data/third_party/boringssl/ssl/ssl_cipher.cc +12 -8
- data/third_party/boringssl/ssl/ssl_key_share.cc +3 -1
- data/third_party/boringssl/ssl/ssl_lib.cc +83 -14
- data/third_party/boringssl/ssl/ssl_privkey.cc +6 -0
- data/third_party/boringssl/ssl/ssl_stat.cc +6 -6
- data/third_party/boringssl/ssl/ssl_versions.cc +12 -85
- data/third_party/boringssl/ssl/ssl_x509.cc +59 -61
- data/third_party/boringssl/ssl/t1_enc.cc +73 -124
- data/third_party/boringssl/ssl/t1_lib.cc +367 -41
- data/third_party/boringssl/ssl/tls13_both.cc +8 -0
- data/third_party/boringssl/ssl/tls13_client.cc +98 -184
- data/third_party/boringssl/ssl/tls13_enc.cc +88 -158
- data/third_party/boringssl/ssl/tls13_server.cc +91 -137
- data/third_party/boringssl/ssl/tls_method.cc +0 -17
- data/third_party/boringssl/ssl/tls_record.cc +1 -10
- data/third_party/boringssl/third_party/fiat/curve25519.c +921 -2753
- data/third_party/boringssl/third_party/fiat/curve25519_tables.h +7880 -0
- data/third_party/boringssl/third_party/fiat/internal.h +32 -20
- data/third_party/boringssl/third_party/fiat/p256.c +1824 -0
- metadata +86 -71
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +0 -253
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +0 -222
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +0 -71
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -61
- data/src/core/lib/channel/channel_trace_registry.cc +0 -80
- data/src/core/lib/channel/channel_trace_registry.h +0 -43
- data/src/core/lib/gpr/fork.cc +0 -78
- data/src/core/tsi/transport_security_adapter.cc +0 -235
- data/src/core/tsi/transport_security_adapter.h +0 -41
- data/src/ruby/bin/apis/google/protobuf/empty.rb +0 -29
- data/src/ruby/bin/apis/pubsub_demo.rb +0 -241
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb +0 -159
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb +0 -88
- data/src/ruby/pb/test/client.rb +0 -764
- data/src/ruby/pb/test/server.rb +0 -252
- data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +0 -54
- data/src/ruby/spec/pb/package_with_underscore/data.proto +0 -23
- data/src/ruby/spec/pb/package_with_underscore/service.proto +0 -23
- data/third_party/boringssl/crypto/curve25519/x25519-x86_64.c +0 -247
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +0 -1674
File without changes
|
@@ -40,7 +40,7 @@ grpc_connectivity_state grpc_channel_check_connectivity_state(
|
|
40
40
|
GRPC_API_TRACE(
|
41
41
|
"grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
|
42
42
|
(channel, try_to_connect));
|
43
|
-
if (client_channel_elem->filter == &grpc_client_channel_filter) {
|
43
|
+
if (GPR_LIKELY(client_channel_elem->filter == &grpc_client_channel_filter)) {
|
44
44
|
state = grpc_client_channel_check_connectivity_state(client_channel_elem,
|
45
45
|
try_to_connect);
|
46
46
|
|
@@ -126,9 +126,9 @@ typedef struct client_channel_channel_data {
|
|
126
126
|
/* the following properties are guarded by a mutex since APIs require them
|
127
127
|
to be instantaneously available */
|
128
128
|
gpr_mu info_mu;
|
129
|
-
char
|
129
|
+
grpc_core::UniquePtr<char> info_lb_policy_name;
|
130
130
|
/** service config in JSON form */
|
131
|
-
char
|
131
|
+
grpc_core::UniquePtr<char> info_service_config_json;
|
132
132
|
} channel_data;
|
133
133
|
|
134
134
|
typedef struct {
|
@@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
|
|
174
174
|
}
|
175
175
|
}
|
176
176
|
if (grpc_client_channel_trace.enabled()) {
|
177
|
-
gpr_log(
|
177
|
+
gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
|
178
178
|
grpc_connectivity_state_name(state));
|
179
179
|
}
|
180
180
|
grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
|
@@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
|
|
186
186
|
/* check if the notification is for the latest policy */
|
187
187
|
if (w->lb_policy == w->chand->lb_policy.get()) {
|
188
188
|
if (grpc_client_channel_trace.enabled()) {
|
189
|
-
gpr_log(
|
189
|
+
gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
|
190
190
|
w->lb_policy, grpc_connectivity_state_name(w->state));
|
191
191
|
}
|
192
192
|
set_channel_connectivity_state_locked(w->chand, w->state,
|
@@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
|
|
215
215
|
|
216
216
|
static void start_resolving_locked(channel_data* chand) {
|
217
217
|
if (grpc_client_channel_trace.enabled()) {
|
218
|
-
gpr_log(
|
218
|
+
gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
|
219
219
|
}
|
220
220
|
GPR_ASSERT(!chand->started_resolving);
|
221
221
|
chand->started_resolving = true;
|
@@ -284,6 +284,78 @@ static void parse_retry_throttle_params(
|
|
284
284
|
}
|
285
285
|
}
|
286
286
|
|
287
|
+
// Invoked from the resolver NextLocked() callback when the resolver
|
288
|
+
// is shutting down.
|
289
|
+
static void on_resolver_shutdown_locked(channel_data* chand,
|
290
|
+
grpc_error* error) {
|
291
|
+
if (grpc_client_channel_trace.enabled()) {
|
292
|
+
gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
|
293
|
+
}
|
294
|
+
if (chand->lb_policy != nullptr) {
|
295
|
+
if (grpc_client_channel_trace.enabled()) {
|
296
|
+
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
|
297
|
+
chand->lb_policy.get());
|
298
|
+
}
|
299
|
+
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
|
300
|
+
chand->interested_parties);
|
301
|
+
chand->lb_policy.reset();
|
302
|
+
}
|
303
|
+
if (chand->resolver != nullptr) {
|
304
|
+
// This should never happen; it can only be triggered by a resolver
|
305
|
+
// implementation spotaneously deciding to report shutdown without
|
306
|
+
// being orphaned. This code is included just to be defensive.
|
307
|
+
if (grpc_client_channel_trace.enabled()) {
|
308
|
+
gpr_log(GPR_INFO, "chand=%p: spontaneous shutdown from resolver %p",
|
309
|
+
chand, chand->resolver.get());
|
310
|
+
}
|
311
|
+
chand->resolver.reset();
|
312
|
+
set_channel_connectivity_state_locked(
|
313
|
+
chand, GRPC_CHANNEL_SHUTDOWN,
|
314
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
315
|
+
"Resolver spontaneous shutdown", &error, 1),
|
316
|
+
"resolver_spontaneous_shutdown");
|
317
|
+
}
|
318
|
+
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
|
319
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
320
|
+
"Channel disconnected", &error, 1));
|
321
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
322
|
+
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
|
323
|
+
grpc_channel_args_destroy(chand->resolver_result);
|
324
|
+
chand->resolver_result = nullptr;
|
325
|
+
GRPC_ERROR_UNREF(error);
|
326
|
+
}
|
327
|
+
|
328
|
+
// Returns the LB policy name from the resolver result.
|
329
|
+
static grpc_core::UniquePtr<char>
|
330
|
+
get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
|
331
|
+
// Find LB policy name in channel args.
|
332
|
+
const grpc_arg* channel_arg =
|
333
|
+
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
|
334
|
+
const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
|
335
|
+
// Special case: If at least one balancer address is present, we use
|
336
|
+
// the grpclb policy, regardless of what the resolver actually specified.
|
337
|
+
channel_arg =
|
338
|
+
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
|
339
|
+
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
|
340
|
+
grpc_lb_addresses* addresses =
|
341
|
+
static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
|
342
|
+
if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
|
343
|
+
if (lb_policy_name != nullptr &&
|
344
|
+
gpr_stricmp(lb_policy_name, "grpclb") != 0) {
|
345
|
+
gpr_log(GPR_INFO,
|
346
|
+
"resolver requested LB policy %s but provided at least one "
|
347
|
+
"balancer address -- forcing use of grpclb LB policy",
|
348
|
+
lb_policy_name);
|
349
|
+
}
|
350
|
+
lb_policy_name = "grpclb";
|
351
|
+
}
|
352
|
+
}
|
353
|
+
// Use pick_first if nothing was specified and we didn't select grpclb
|
354
|
+
// above.
|
355
|
+
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
|
356
|
+
return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
|
357
|
+
}
|
358
|
+
|
287
359
|
static void request_reresolution_locked(void* arg, grpc_error* error) {
|
288
360
|
reresolution_request_args* args =
|
289
361
|
static_cast<reresolution_request_args*>(arg);
|
@@ -297,241 +369,190 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
|
|
297
369
|
return;
|
298
370
|
}
|
299
371
|
if (grpc_client_channel_trace.enabled()) {
|
300
|
-
gpr_log(
|
372
|
+
gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
|
301
373
|
}
|
302
374
|
chand->resolver->RequestReresolutionLocked();
|
303
375
|
// Give back the closure to the LB policy.
|
304
376
|
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
|
305
377
|
}
|
306
378
|
|
307
|
-
//
|
308
|
-
//
|
309
|
-
//
|
310
|
-
//
|
311
|
-
static void
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
if (chand->resolver != nullptr) {
|
329
|
-
// Find LB policy name.
|
330
|
-
const grpc_arg* channel_arg = grpc_channel_args_find(
|
331
|
-
chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
|
332
|
-
const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
|
333
|
-
// Special case: If at least one balancer address is present, we use
|
334
|
-
// the grpclb policy, regardless of what the resolver actually specified.
|
335
|
-
channel_arg =
|
336
|
-
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
|
337
|
-
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
|
338
|
-
grpc_lb_addresses* addresses =
|
339
|
-
static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
|
340
|
-
bool found_balancer_address = false;
|
341
|
-
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
342
|
-
if (addresses->addresses[i].is_balancer) {
|
343
|
-
found_balancer_address = true;
|
344
|
-
break;
|
345
|
-
}
|
346
|
-
}
|
347
|
-
if (found_balancer_address) {
|
348
|
-
if (lb_policy_name != nullptr &&
|
349
|
-
strcmp(lb_policy_name, "grpclb") != 0) {
|
350
|
-
gpr_log(GPR_INFO,
|
351
|
-
"resolver requested LB policy %s but provided at least one "
|
352
|
-
"balancer address -- forcing use of grpclb LB policy",
|
353
|
-
lb_policy_name);
|
354
|
-
}
|
355
|
-
lb_policy_name = "grpclb";
|
356
|
-
}
|
357
|
-
}
|
358
|
-
// Use pick_first if nothing was specified and we didn't select grpclb
|
359
|
-
// above.
|
360
|
-
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
|
361
|
-
// Check to see if we're already using the right LB policy.
|
362
|
-
// Note: It's safe to use chand->info_lb_policy_name here without
|
363
|
-
// taking a lock on chand->info_mu, because this function is the
|
364
|
-
// only thing that modifies its value, and it can only be invoked
|
365
|
-
// once at any given time.
|
366
|
-
lb_policy_name_changed =
|
367
|
-
chand->info_lb_policy_name == nullptr ||
|
368
|
-
gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
|
369
|
-
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
|
370
|
-
// Continue using the same LB policy. Update with new addresses.
|
371
|
-
lb_policy_updated = true;
|
372
|
-
chand->lb_policy->UpdateLocked(*chand->resolver_result);
|
373
|
-
} else {
|
374
|
-
// Instantiate new LB policy.
|
375
|
-
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
|
376
|
-
lb_policy_args.combiner = chand->combiner;
|
377
|
-
lb_policy_args.client_channel_factory = chand->client_channel_factory;
|
378
|
-
lb_policy_args.args = chand->resolver_result;
|
379
|
-
new_lb_policy =
|
380
|
-
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
|
381
|
-
lb_policy_name, lb_policy_args);
|
382
|
-
if (new_lb_policy == nullptr) {
|
383
|
-
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
|
384
|
-
lb_policy_name);
|
385
|
-
} else {
|
386
|
-
lb_policy_created = true;
|
387
|
-
reresolution_request_args* args =
|
388
|
-
static_cast<reresolution_request_args*>(
|
389
|
-
gpr_zalloc(sizeof(*args)));
|
390
|
-
args->chand = chand;
|
391
|
-
args->lb_policy = new_lb_policy.get();
|
392
|
-
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
|
393
|
-
grpc_combiner_scheduler(chand->combiner));
|
394
|
-
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
|
395
|
-
new_lb_policy->SetReresolutionClosureLocked(&args->closure);
|
396
|
-
}
|
397
|
-
}
|
398
|
-
// Before we clean up, save a copy of lb_policy_name, since it might
|
399
|
-
// be pointing to data inside chand->resolver_result.
|
400
|
-
// The copy will be saved in chand->lb_policy_name below.
|
401
|
-
lb_policy_name_dup = gpr_strdup(lb_policy_name);
|
402
|
-
// Find service config.
|
403
|
-
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
404
|
-
GRPC_ARG_SERVICE_CONFIG);
|
405
|
-
service_config_json =
|
406
|
-
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
|
407
|
-
if (service_config_json != nullptr) {
|
408
|
-
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
|
409
|
-
grpc_core::ServiceConfig::Create(service_config_json);
|
410
|
-
if (service_config != nullptr) {
|
411
|
-
if (chand->enable_retries) {
|
412
|
-
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
413
|
-
GRPC_ARG_SERVER_URI);
|
414
|
-
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
|
415
|
-
GPR_ASSERT(server_uri != nullptr);
|
416
|
-
grpc_uri* uri = grpc_uri_parse(server_uri, true);
|
417
|
-
GPR_ASSERT(uri->path[0] != '\0');
|
418
|
-
service_config_parsing_state parsing_state;
|
419
|
-
memset(&parsing_state, 0, sizeof(parsing_state));
|
420
|
-
parsing_state.server_name =
|
421
|
-
uri->path[0] == '/' ? uri->path + 1 : uri->path;
|
422
|
-
service_config->ParseGlobalParams(parse_retry_throttle_params,
|
423
|
-
&parsing_state);
|
424
|
-
grpc_uri_destroy(uri);
|
425
|
-
retry_throttle_data = std::move(parsing_state.retry_throttle_data);
|
426
|
-
}
|
427
|
-
method_params_table = service_config->CreateMethodConfigTable(
|
428
|
-
ClientChannelMethodParams::CreateFromJson);
|
429
|
-
}
|
430
|
-
}
|
379
|
+
// Creates a new LB policy, replacing any previous one.
|
380
|
+
// If the new policy is created successfully, sets *connectivity_state and
|
381
|
+
// *connectivity_error to its initial connectivity state; otherwise,
|
382
|
+
// leaves them unchanged.
|
383
|
+
static void create_new_lb_policy_locked(
|
384
|
+
channel_data* chand, char* lb_policy_name,
|
385
|
+
grpc_connectivity_state* connectivity_state,
|
386
|
+
grpc_error** connectivity_error) {
|
387
|
+
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
|
388
|
+
lb_policy_args.combiner = chand->combiner;
|
389
|
+
lb_policy_args.client_channel_factory = chand->client_channel_factory;
|
390
|
+
lb_policy_args.args = chand->resolver_result;
|
391
|
+
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
|
392
|
+
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
|
393
|
+
lb_policy_name, lb_policy_args);
|
394
|
+
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
|
395
|
+
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
|
396
|
+
} else {
|
397
|
+
if (grpc_client_channel_trace.enabled()) {
|
398
|
+
gpr_log(GPR_INFO, "chand=%p: created new LB policy \"%s\" (%p)", chand,
|
399
|
+
lb_policy_name, new_lb_policy.get());
|
431
400
|
}
|
432
|
-
|
433
|
-
|
434
|
-
gpr_log(GPR_DEBUG,
|
435
|
-
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
|
436
|
-
"service_config=\"%s\"",
|
437
|
-
chand, lb_policy_name_dup,
|
438
|
-
lb_policy_name_changed ? " (changed)" : "", service_config_json);
|
439
|
-
}
|
440
|
-
// Now swap out fields in chand. Note that the new values may still
|
441
|
-
// be nullptr if (e.g.) the resolver failed to return results or the
|
442
|
-
// results did not contain the necessary data.
|
443
|
-
//
|
444
|
-
// First, swap out the data used by cc_get_channel_info().
|
445
|
-
gpr_mu_lock(&chand->info_mu);
|
446
|
-
if (lb_policy_name_dup != nullptr) {
|
447
|
-
gpr_free(chand->info_lb_policy_name);
|
448
|
-
chand->info_lb_policy_name = lb_policy_name_dup;
|
449
|
-
}
|
450
|
-
if (service_config_json != nullptr) {
|
451
|
-
gpr_free(chand->info_service_config_json);
|
452
|
-
chand->info_service_config_json = service_config_json;
|
453
|
-
}
|
454
|
-
gpr_mu_unlock(&chand->info_mu);
|
455
|
-
// Swap out the retry throttle data.
|
456
|
-
chand->retry_throttle_data = std::move(retry_throttle_data);
|
457
|
-
// Swap out the method params table.
|
458
|
-
chand->method_params_table = std::move(method_params_table);
|
459
|
-
// If we have a new LB policy or are shutting down (in which case
|
460
|
-
// new_lb_policy will be nullptr), swap out the LB policy, unreffing the
|
461
|
-
// old one and removing its fds from chand->interested_parties.
|
462
|
-
// Note that we do NOT do this if either (a) we updated the existing
|
463
|
-
// LB policy above or (b) we failed to create the new LB policy (in
|
464
|
-
// which case we want to continue using the most recent one we had).
|
465
|
-
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
|
466
|
-
chand->resolver == nullptr) {
|
401
|
+
// Swap out the LB policy and update the fds in
|
402
|
+
// chand->interested_parties.
|
467
403
|
if (chand->lb_policy != nullptr) {
|
468
404
|
if (grpc_client_channel_trace.enabled()) {
|
469
|
-
gpr_log(
|
405
|
+
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
|
470
406
|
chand->lb_policy.get());
|
471
407
|
}
|
472
408
|
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
|
473
409
|
chand->interested_parties);
|
474
410
|
chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
|
475
|
-
chand->lb_policy.reset();
|
476
411
|
}
|
477
412
|
chand->lb_policy = std::move(new_lb_policy);
|
413
|
+
grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
|
414
|
+
chand->interested_parties);
|
415
|
+
// Set up re-resolution callback.
|
416
|
+
reresolution_request_args* args =
|
417
|
+
static_cast<reresolution_request_args*>(gpr_zalloc(sizeof(*args)));
|
418
|
+
args->chand = chand;
|
419
|
+
args->lb_policy = chand->lb_policy.get();
|
420
|
+
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
|
421
|
+
grpc_combiner_scheduler(chand->combiner));
|
422
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
|
423
|
+
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
|
424
|
+
// Get the new LB policy's initial connectivity state and start a
|
425
|
+
// connectivity watch.
|
426
|
+
GRPC_ERROR_UNREF(*connectivity_error);
|
427
|
+
*connectivity_state =
|
428
|
+
chand->lb_policy->CheckConnectivityLocked(connectivity_error);
|
429
|
+
if (chand->exit_idle_when_lb_policy_arrives) {
|
430
|
+
chand->lb_policy->ExitIdleLocked();
|
431
|
+
chand->exit_idle_when_lb_policy_arrives = false;
|
432
|
+
}
|
433
|
+
watch_lb_policy_locked(chand, chand->lb_policy.get(), *connectivity_state);
|
434
|
+
}
|
435
|
+
}
|
436
|
+
|
437
|
+
// Returns the service config (as a JSON string) from the resolver result.
|
438
|
+
// Also updates state in chand.
|
439
|
+
static grpc_core::UniquePtr<char>
|
440
|
+
get_service_config_from_resolver_result_locked(channel_data* chand) {
|
441
|
+
const grpc_arg* channel_arg =
|
442
|
+
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
|
443
|
+
const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
|
444
|
+
if (service_config_json != nullptr) {
|
445
|
+
if (grpc_client_channel_trace.enabled()) {
|
446
|
+
gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
|
447
|
+
chand, service_config_json);
|
448
|
+
}
|
449
|
+
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
|
450
|
+
grpc_core::ServiceConfig::Create(service_config_json);
|
451
|
+
if (service_config != nullptr) {
|
452
|
+
if (chand->enable_retries) {
|
453
|
+
channel_arg =
|
454
|
+
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
|
455
|
+
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
|
456
|
+
GPR_ASSERT(server_uri != nullptr);
|
457
|
+
grpc_uri* uri = grpc_uri_parse(server_uri, true);
|
458
|
+
GPR_ASSERT(uri->path[0] != '\0');
|
459
|
+
service_config_parsing_state parsing_state;
|
460
|
+
memset(&parsing_state, 0, sizeof(parsing_state));
|
461
|
+
parsing_state.server_name =
|
462
|
+
uri->path[0] == '/' ? uri->path + 1 : uri->path;
|
463
|
+
service_config->ParseGlobalParams(parse_retry_throttle_params,
|
464
|
+
&parsing_state);
|
465
|
+
grpc_uri_destroy(uri);
|
466
|
+
chand->retry_throttle_data =
|
467
|
+
std::move(parsing_state.retry_throttle_data);
|
468
|
+
}
|
469
|
+
chand->method_params_table = service_config->CreateMethodConfigTable(
|
470
|
+
ClientChannelMethodParams::CreateFromJson);
|
471
|
+
}
|
478
472
|
}
|
479
|
-
|
480
|
-
|
473
|
+
return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
|
474
|
+
}
|
475
|
+
|
476
|
+
// Callback invoked when a resolver result is available.
|
477
|
+
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
478
|
+
channel_data* chand = static_cast<channel_data*>(arg);
|
479
|
+
if (grpc_client_channel_trace.enabled()) {
|
480
|
+
const char* disposition =
|
481
|
+
chand->resolver_result != nullptr
|
482
|
+
? ""
|
483
|
+
: (error == GRPC_ERROR_NONE ? " (transient error)"
|
484
|
+
: " (resolver shutdown)");
|
485
|
+
gpr_log(GPR_INFO,
|
486
|
+
"chand=%p: got resolver result: resolver_result=%p error=%s%s",
|
487
|
+
chand, chand->resolver_result, grpc_error_string(error),
|
488
|
+
disposition);
|
489
|
+
}
|
490
|
+
// Handle shutdown.
|
481
491
|
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
|
492
|
+
on_resolver_shutdown_locked(chand, GRPC_ERROR_REF(error));
|
493
|
+
return;
|
494
|
+
}
|
495
|
+
// Data used to set the channel's connectivity state.
|
496
|
+
bool set_connectivity_state = true;
|
497
|
+
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
|
498
|
+
grpc_error* connectivity_error =
|
499
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
|
500
|
+
// chand->resolver_result will be null in the case of a transient
|
501
|
+
// resolution error. In that case, we don't have any new result to
|
502
|
+
// process, which means that we keep using the previous result (if any).
|
503
|
+
if (chand->resolver_result == nullptr) {
|
482
504
|
if (grpc_client_channel_trace.enabled()) {
|
483
|
-
gpr_log(
|
505
|
+
gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
|
484
506
|
}
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
"Channel disconnected", &error, 1));
|
499
|
-
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
500
|
-
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
|
501
|
-
grpc_channel_args_destroy(chand->resolver_result);
|
502
|
-
chand->resolver_result = nullptr;
|
503
|
-
} else { // Not shutting down.
|
504
|
-
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
|
505
|
-
grpc_error* state_error =
|
506
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
|
507
|
-
if (lb_policy_created) {
|
507
|
+
} else {
|
508
|
+
grpc_core::UniquePtr<char> lb_policy_name =
|
509
|
+
get_lb_policy_name_from_resolver_result_locked(chand);
|
510
|
+
// Check to see if we're already using the right LB policy.
|
511
|
+
// Note: It's safe to use chand->info_lb_policy_name here without
|
512
|
+
// taking a lock on chand->info_mu, because this function is the
|
513
|
+
// only thing that modifies its value, and it can only be invoked
|
514
|
+
// once at any given time.
|
515
|
+
bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
|
516
|
+
gpr_stricmp(chand->info_lb_policy_name.get(),
|
517
|
+
lb_policy_name.get()) != 0;
|
518
|
+
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
|
519
|
+
// Continue using the same LB policy. Update with new addresses.
|
508
520
|
if (grpc_client_channel_trace.enabled()) {
|
509
|
-
gpr_log(
|
510
|
-
|
511
|
-
GRPC_ERROR_UNREF(state_error);
|
512
|
-
state = chand->lb_policy->CheckConnectivityLocked(&state_error);
|
513
|
-
grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
|
514
|
-
chand->interested_parties);
|
515
|
-
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
516
|
-
if (chand->exit_idle_when_lb_policy_arrives) {
|
517
|
-
chand->lb_policy->ExitIdleLocked();
|
518
|
-
chand->exit_idle_when_lb_policy_arrives = false;
|
521
|
+
gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
|
522
|
+
chand, lb_policy_name.get(), chand->lb_policy.get());
|
519
523
|
}
|
520
|
-
|
521
|
-
|
522
|
-
//
|
523
|
-
|
524
|
-
}
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
}
|
524
|
+
chand->lb_policy->UpdateLocked(*chand->resolver_result);
|
525
|
+
// No need to set the channel's connectivity state; the existing
|
526
|
+
// watch on the LB policy will take care of that.
|
527
|
+
set_connectivity_state = false;
|
528
|
+
} else {
|
529
|
+
// Instantiate new LB policy.
|
530
|
+
create_new_lb_policy_locked(chand, lb_policy_name.get(),
|
531
|
+
&connectivity_state, &connectivity_error);
|
532
|
+
}
|
533
|
+
// Find service config.
|
534
|
+
grpc_core::UniquePtr<char> service_config_json =
|
535
|
+
get_service_config_from_resolver_result_locked(chand);
|
536
|
+
// Swap out the data used by cc_get_channel_info().
|
537
|
+
gpr_mu_lock(&chand->info_mu);
|
538
|
+
chand->info_lb_policy_name = std::move(lb_policy_name);
|
539
|
+
chand->info_service_config_json = std::move(service_config_json);
|
540
|
+
gpr_mu_unlock(&chand->info_mu);
|
541
|
+
// Clean up.
|
529
542
|
grpc_channel_args_destroy(chand->resolver_result);
|
530
543
|
chand->resolver_result = nullptr;
|
531
|
-
chand->resolver->NextLocked(&chand->resolver_result,
|
532
|
-
&chand->on_resolver_result_changed);
|
533
|
-
GRPC_ERROR_UNREF(state_error);
|
534
544
|
}
|
545
|
+
// Set the channel's connectivity state if needed.
|
546
|
+
if (set_connectivity_state) {
|
547
|
+
set_channel_connectivity_state_locked(
|
548
|
+
chand, connectivity_state, connectivity_error, "resolver_result");
|
549
|
+
} else {
|
550
|
+
GRPC_ERROR_UNREF(connectivity_error);
|
551
|
+
}
|
552
|
+
// Invoke closures that were waiting for results and renew the watch.
|
553
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
554
|
+
chand->resolver->NextLocked(&chand->resolver_result,
|
555
|
+
&chand->on_resolver_result_changed);
|
535
556
|
}
|
536
557
|
|
537
558
|
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
|
@@ -550,15 +571,32 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
|
|
550
571
|
|
551
572
|
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
|
552
573
|
if (chand->lb_policy == nullptr) {
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
GRPC_CLOSURE_SCHED(
|
557
|
-
op->send_ping.on_ack,
|
558
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
|
574
|
+
grpc_error* error =
|
575
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing");
|
576
|
+
GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
|
577
|
+
GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
|
559
578
|
} else {
|
560
|
-
|
561
|
-
|
579
|
+
grpc_error* error = GRPC_ERROR_NONE;
|
580
|
+
grpc_core::LoadBalancingPolicy::PickState pick_state;
|
581
|
+
pick_state.initial_metadata = nullptr;
|
582
|
+
pick_state.initial_metadata_flags = 0;
|
583
|
+
pick_state.on_complete = nullptr;
|
584
|
+
memset(&pick_state.subchannel_call_context, 0,
|
585
|
+
sizeof(pick_state.subchannel_call_context));
|
586
|
+
pick_state.user_data = nullptr;
|
587
|
+
// Pick must return synchronously, because pick_state.on_complete is null.
|
588
|
+
GPR_ASSERT(chand->lb_policy->PickLocked(&pick_state, &error));
|
589
|
+
if (pick_state.connected_subchannel != nullptr) {
|
590
|
+
pick_state.connected_subchannel->Ping(op->send_ping.on_initiate,
|
591
|
+
op->send_ping.on_ack);
|
592
|
+
} else {
|
593
|
+
if (error == GRPC_ERROR_NONE) {
|
594
|
+
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
595
|
+
"LB policy dropped call on ping");
|
596
|
+
}
|
597
|
+
GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
|
598
|
+
GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
|
599
|
+
}
|
562
600
|
op->bind_pollset = nullptr;
|
563
601
|
}
|
564
602
|
op->send_ping.on_initiate = nullptr;
|
@@ -584,6 +622,17 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
|
|
584
622
|
}
|
585
623
|
GRPC_ERROR_UNREF(op->disconnect_with_error);
|
586
624
|
}
|
625
|
+
|
626
|
+
if (op->reset_connect_backoff) {
|
627
|
+
if (chand->resolver != nullptr) {
|
628
|
+
chand->resolver->ResetBackoffLocked();
|
629
|
+
chand->resolver->RequestReresolutionLocked();
|
630
|
+
}
|
631
|
+
if (chand->lb_policy != nullptr) {
|
632
|
+
chand->lb_policy->ResetBackoffLocked();
|
633
|
+
}
|
634
|
+
}
|
635
|
+
|
587
636
|
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
|
588
637
|
|
589
638
|
GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
|
@@ -611,15 +660,11 @@ static void cc_get_channel_info(grpc_channel_element* elem,
|
|
611
660
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
612
661
|
gpr_mu_lock(&chand->info_mu);
|
613
662
|
if (info->lb_policy_name != nullptr) {
|
614
|
-
*info->lb_policy_name = chand->info_lb_policy_name
|
615
|
-
? nullptr
|
616
|
-
: gpr_strdup(chand->info_lb_policy_name);
|
663
|
+
*info->lb_policy_name = gpr_strdup(chand->info_lb_policy_name.get());
|
617
664
|
}
|
618
665
|
if (info->service_config_json != nullptr) {
|
619
666
|
*info->service_config_json =
|
620
|
-
chand->info_service_config_json
|
621
|
-
? nullptr
|
622
|
-
: gpr_strdup(chand->info_service_config_json);
|
667
|
+
gpr_strdup(chand->info_service_config_json.get());
|
623
668
|
}
|
624
669
|
gpr_mu_unlock(&chand->info_mu);
|
625
670
|
}
|
@@ -699,19 +744,15 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
|
|
699
744
|
return GRPC_ERROR_NONE;
|
700
745
|
}
|
701
746
|
|
702
|
-
static void shutdown_resolver_locked(void* arg, grpc_error* error) {
|
703
|
-
grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
|
704
|
-
resolver->Orphan();
|
705
|
-
}
|
706
|
-
|
707
747
|
/* Destructor for channel_data */
|
708
748
|
static void cc_destroy_channel_elem(grpc_channel_element* elem) {
|
709
749
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
710
750
|
if (chand->resolver != nullptr) {
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
751
|
+
// The only way we can get here is if we never started resolving,
|
752
|
+
// because we take a ref to the channel stack when we start
|
753
|
+
// resolving and do not release it until the resolver callback is
|
754
|
+
// invoked after the resolver shuts down.
|
755
|
+
chand->resolver.reset();
|
715
756
|
}
|
716
757
|
if (chand->client_channel_factory != nullptr) {
|
717
758
|
grpc_client_channel_factory_unref(chand->client_channel_factory);
|
@@ -721,8 +762,10 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
|
|
721
762
|
chand->interested_parties);
|
722
763
|
chand->lb_policy.reset();
|
723
764
|
}
|
724
|
-
|
725
|
-
|
765
|
+
// TODO(roth): Once we convert the filter API to C++, there will no
|
766
|
+
// longer be any need to explicitly reset these smart pointer data members.
|
767
|
+
chand->info_lb_policy_name.reset();
|
768
|
+
chand->info_service_config_json.reset();
|
726
769
|
chand->retry_throttle_data.reset();
|
727
770
|
chand->method_params_table.reset();
|
728
771
|
grpc_client_channel_stop_backup_polling(chand->interested_parties);
|
@@ -794,6 +837,15 @@ typedef struct {
|
|
794
837
|
// The batch to use in the subchannel call.
|
795
838
|
// Its payload field points to subchannel_call_retry_state.batch_payload.
|
796
839
|
grpc_transport_stream_op_batch batch;
|
840
|
+
// For intercepting on_complete.
|
841
|
+
grpc_closure on_complete;
|
842
|
+
} subchannel_batch_data;
|
843
|
+
|
844
|
+
// Retry state associated with a subchannel call.
|
845
|
+
// Stored in the parent_data of the subchannel call object.
|
846
|
+
typedef struct {
|
847
|
+
// subchannel_batch_data.batch.payload points to this.
|
848
|
+
grpc_transport_stream_op_batch_payload batch_payload;
|
797
849
|
// For send_initial_metadata.
|
798
850
|
// Note that we need to make a copy of the initial metadata for each
|
799
851
|
// subchannel call instead of just referring to the copy in call_data,
|
@@ -817,15 +869,7 @@ typedef struct {
|
|
817
869
|
// For intercepting recv_trailing_metadata.
|
818
870
|
grpc_metadata_batch recv_trailing_metadata;
|
819
871
|
grpc_transport_stream_stats collect_stats;
|
820
|
-
|
821
|
-
grpc_closure on_complete;
|
822
|
-
} subchannel_batch_data;
|
823
|
-
|
824
|
-
// Retry state associated with a subchannel call.
|
825
|
-
// Stored in the parent_data of the subchannel call object.
|
826
|
-
typedef struct {
|
827
|
-
// subchannel_batch_data.batch.payload points to this.
|
828
|
-
grpc_transport_stream_op_batch_payload batch_payload;
|
872
|
+
grpc_closure recv_trailing_metadata_ready;
|
829
873
|
// These fields indicate which ops have been started and completed on
|
830
874
|
// this subchannel call.
|
831
875
|
size_t started_send_message_count;
|
@@ -842,10 +886,11 @@ typedef struct {
|
|
842
886
|
bool completed_recv_trailing_metadata : 1;
|
843
887
|
// State for callback processing.
|
844
888
|
bool retry_dispatched : 1;
|
845
|
-
|
846
|
-
bool recv_message_ready_deferred : 1;
|
889
|
+
subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
|
847
890
|
grpc_error* recv_initial_metadata_error;
|
891
|
+
subchannel_batch_data* recv_message_ready_deferred_batch;
|
848
892
|
grpc_error* recv_message_error;
|
893
|
+
subchannel_batch_data* recv_trailing_metadata_internal_batch;
|
849
894
|
} subchannel_call_retry_state;
|
850
895
|
|
851
896
|
// Pending batches stored in call data.
|
@@ -890,6 +935,7 @@ typedef struct client_channel_call_data {
|
|
890
935
|
grpc_closure pick_cancel_closure;
|
891
936
|
|
892
937
|
grpc_polling_entity* pollent;
|
938
|
+
bool pollent_added_to_interested_parties;
|
893
939
|
|
894
940
|
// Batches are added to this list when received from above.
|
895
941
|
// They are removed when we are done handling the batch (i.e., when
|
@@ -910,6 +956,15 @@ typedef struct client_channel_call_data {
|
|
910
956
|
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
|
911
957
|
grpc_timer retry_timer;
|
912
958
|
|
959
|
+
// The number of pending retriable subchannel batches containing send ops.
|
960
|
+
// We hold a ref to the call stack while this is non-zero, since replay
|
961
|
+
// batches may not complete until after all callbacks have been returned
|
962
|
+
// to the surface, and we need to make sure that the call is not destroyed
|
963
|
+
// until all of these batches have completed.
|
964
|
+
// Note that we actually only need to track replay batches, but it's
|
965
|
+
// easier to track all batches with send ops.
|
966
|
+
int num_pending_retriable_subchannel_send_batches;
|
967
|
+
|
913
968
|
// Cached data for retrying send ops.
|
914
969
|
// send_initial_metadata
|
915
970
|
bool seen_send_initial_metadata;
|
@@ -924,7 +979,9 @@ typedef struct client_channel_call_data {
|
|
924
979
|
// Note: We inline the cache for the first 3 send_message ops and use
|
925
980
|
// dynamic allocation after that. This number was essentially picked
|
926
981
|
// at random; it could be changed in the future to tune performance.
|
927
|
-
grpc_core::
|
982
|
+
grpc_core::ManualConstructor<
|
983
|
+
grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
|
984
|
+
send_messages;
|
928
985
|
// send_trailing_metadata
|
929
986
|
bool seen_send_trailing_metadata;
|
930
987
|
grpc_linked_mdelem* send_trailing_metadata_storage;
|
@@ -937,7 +994,6 @@ static void retry_commit(grpc_call_element* elem,
|
|
937
994
|
static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
|
938
995
|
static void on_complete(void* arg, grpc_error* error);
|
939
996
|
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
|
940
|
-
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
|
941
997
|
static void start_pick_locked(void* arg, grpc_error* ignored);
|
942
998
|
|
943
999
|
//
|
@@ -974,7 +1030,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
|
|
974
1030
|
gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
|
975
1031
|
new (cache) grpc_core::ByteStreamCache(
|
976
1032
|
std::move(batch->payload->send_message.send_message));
|
977
|
-
calld->send_messages
|
1033
|
+
calld->send_messages->push_back(cache);
|
978
1034
|
}
|
979
1035
|
// Save metadata batch for send_trailing_metadata ops.
|
980
1036
|
if (batch->send_trailing_metadata) {
|
@@ -992,6 +1048,39 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
|
|
992
1048
|
}
|
993
1049
|
}
|
994
1050
|
|
1051
|
+
// Frees cached send_initial_metadata.
|
1052
|
+
static void free_cached_send_initial_metadata(channel_data* chand,
|
1053
|
+
call_data* calld) {
|
1054
|
+
if (grpc_client_channel_trace.enabled()) {
|
1055
|
+
gpr_log(GPR_INFO,
|
1056
|
+
"chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
|
1057
|
+
calld);
|
1058
|
+
}
|
1059
|
+
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
|
1060
|
+
}
|
1061
|
+
|
1062
|
+
// Frees cached send_message at index idx.
|
1063
|
+
static void free_cached_send_message(channel_data* chand, call_data* calld,
|
1064
|
+
size_t idx) {
|
1065
|
+
if (grpc_client_channel_trace.enabled()) {
|
1066
|
+
gpr_log(GPR_INFO,
|
1067
|
+
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
|
1068
|
+
chand, calld, idx);
|
1069
|
+
}
|
1070
|
+
(*calld->send_messages)[idx]->Destroy();
|
1071
|
+
}
|
1072
|
+
|
1073
|
+
// Frees cached send_trailing_metadata.
|
1074
|
+
static void free_cached_send_trailing_metadata(channel_data* chand,
|
1075
|
+
call_data* calld) {
|
1076
|
+
if (grpc_client_channel_trace.enabled()) {
|
1077
|
+
gpr_log(GPR_INFO,
|
1078
|
+
"chand=%p calld=%p: destroying calld->send_trailing_metadata",
|
1079
|
+
chand, calld);
|
1080
|
+
}
|
1081
|
+
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
|
1082
|
+
}
|
1083
|
+
|
995
1084
|
// Frees cached send ops that have already been completed after
|
996
1085
|
// committing the call.
|
997
1086
|
static void free_cached_send_op_data_after_commit(
|
@@ -999,19 +1088,13 @@ static void free_cached_send_op_data_after_commit(
|
|
999
1088
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1000
1089
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1001
1090
|
if (retry_state->completed_send_initial_metadata) {
|
1002
|
-
|
1091
|
+
free_cached_send_initial_metadata(chand, calld);
|
1003
1092
|
}
|
1004
1093
|
for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
|
1005
|
-
|
1006
|
-
gpr_log(GPR_DEBUG,
|
1007
|
-
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1008
|
-
"]",
|
1009
|
-
chand, calld, i);
|
1010
|
-
}
|
1011
|
-
calld->send_messages[i]->Destroy();
|
1094
|
+
free_cached_send_message(chand, calld, i);
|
1012
1095
|
}
|
1013
1096
|
if (retry_state->completed_send_trailing_metadata) {
|
1014
|
-
|
1097
|
+
free_cached_send_trailing_metadata(chand, calld);
|
1015
1098
|
}
|
1016
1099
|
}
|
1017
1100
|
|
@@ -1023,20 +1106,14 @@ static void free_cached_send_op_data_for_completed_batch(
|
|
1023
1106
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1024
1107
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1025
1108
|
if (batch_data->batch.send_initial_metadata) {
|
1026
|
-
|
1109
|
+
free_cached_send_initial_metadata(chand, calld);
|
1027
1110
|
}
|
1028
1111
|
if (batch_data->batch.send_message) {
|
1029
|
-
|
1030
|
-
|
1031
|
-
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1032
|
-
"]",
|
1033
|
-
chand, calld, retry_state->completed_send_message_count - 1);
|
1034
|
-
}
|
1035
|
-
calld->send_messages[retry_state->completed_send_message_count - 1]
|
1036
|
-
->Destroy();
|
1112
|
+
free_cached_send_message(chand, calld,
|
1113
|
+
retry_state->completed_send_message_count - 1);
|
1037
1114
|
}
|
1038
1115
|
if (batch_data->batch.send_trailing_metadata) {
|
1039
|
-
|
1116
|
+
free_cached_send_trailing_metadata(chand, calld);
|
1040
1117
|
}
|
1041
1118
|
}
|
1042
1119
|
|
@@ -1064,7 +1141,7 @@ static void pending_batches_add(grpc_call_element* elem,
|
|
1064
1141
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1065
1142
|
const size_t idx = get_batch_index(batch);
|
1066
1143
|
if (grpc_client_channel_trace.enabled()) {
|
1067
|
-
gpr_log(
|
1144
|
+
gpr_log(GPR_INFO,
|
1068
1145
|
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
|
1069
1146
|
calld, idx);
|
1070
1147
|
}
|
@@ -1090,9 +1167,10 @@ static void pending_batches_add(grpc_call_element* elem,
|
|
1090
1167
|
if (batch->send_trailing_metadata) {
|
1091
1168
|
calld->pending_send_trailing_metadata = true;
|
1092
1169
|
}
|
1093
|
-
if (calld->bytes_buffered_for_retry >
|
1170
|
+
if (GPR_UNLIKELY(calld->bytes_buffered_for_retry >
|
1171
|
+
chand->per_rpc_retry_buffer_size)) {
|
1094
1172
|
if (grpc_client_channel_trace.enabled()) {
|
1095
|
-
gpr_log(
|
1173
|
+
gpr_log(GPR_INFO,
|
1096
1174
|
"chand=%p calld=%p: exceeded retry buffer size, committing",
|
1097
1175
|
chand, calld);
|
1098
1176
|
}
|
@@ -1107,7 +1185,7 @@ static void pending_batches_add(grpc_call_element* elem,
|
|
1107
1185
|
// retries are disabled so that we don't bother with retry overhead.
|
1108
1186
|
if (calld->num_attempts_completed == 0) {
|
1109
1187
|
if (grpc_client_channel_trace.enabled()) {
|
1110
|
-
gpr_log(
|
1188
|
+
gpr_log(GPR_INFO,
|
1111
1189
|
"chand=%p calld=%p: disabling retries before first attempt",
|
1112
1190
|
chand, calld);
|
1113
1191
|
}
|
@@ -1154,39 +1232,28 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
|
|
1154
1232
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1155
1233
|
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1156
1234
|
}
|
1157
|
-
gpr_log(
|
1235
|
+
gpr_log(GPR_INFO,
|
1158
1236
|
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
|
1159
1237
|
elem->channel_data, calld, num_batches, grpc_error_string(error));
|
1160
1238
|
}
|
1161
|
-
|
1162
|
-
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1163
|
-
size_t num_batches = 0;
|
1239
|
+
grpc_core::CallCombinerClosureList closures;
|
1164
1240
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1165
1241
|
pending_batch* pending = &calld->pending_batches[i];
|
1166
1242
|
grpc_transport_stream_op_batch* batch = pending->batch;
|
1167
1243
|
if (batch != nullptr) {
|
1168
|
-
|
1244
|
+
batch->handler_private.extra_arg = calld;
|
1245
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1246
|
+
fail_pending_batch_in_call_combiner, batch,
|
1247
|
+
grpc_schedule_on_exec_ctx);
|
1248
|
+
closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
|
1249
|
+
"pending_batches_fail");
|
1169
1250
|
pending_batch_clear(calld, pending);
|
1170
1251
|
}
|
1171
1252
|
}
|
1172
|
-
for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
|
1173
|
-
grpc_transport_stream_op_batch* batch = batches[i];
|
1174
|
-
batch->handler_private.extra_arg = calld;
|
1175
|
-
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1176
|
-
fail_pending_batch_in_call_combiner, batch,
|
1177
|
-
grpc_schedule_on_exec_ctx);
|
1178
|
-
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1179
|
-
&batch->handler_private.closure,
|
1180
|
-
GRPC_ERROR_REF(error), "pending_batches_fail");
|
1181
|
-
}
|
1182
1253
|
if (yield_call_combiner) {
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
|
1187
|
-
} else {
|
1188
|
-
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
|
1189
|
-
}
|
1254
|
+
closures.RunClosures(calld->call_combiner);
|
1255
|
+
} else {
|
1256
|
+
closures.RunClosuresWithoutYielding(calld->call_combiner);
|
1190
1257
|
}
|
1191
1258
|
GRPC_ERROR_UNREF(error);
|
1192
1259
|
}
|
@@ -1216,35 +1283,27 @@ static void pending_batches_resume(grpc_call_element* elem) {
|
|
1216
1283
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1217
1284
|
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1218
1285
|
}
|
1219
|
-
gpr_log(
|
1286
|
+
gpr_log(GPR_INFO,
|
1220
1287
|
"chand=%p calld=%p: starting %" PRIuPTR
|
1221
1288
|
" pending batches on subchannel_call=%p",
|
1222
1289
|
chand, calld, num_batches, calld->subchannel_call);
|
1223
1290
|
}
|
1224
|
-
|
1225
|
-
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1226
|
-
size_t num_batches = 0;
|
1291
|
+
grpc_core::CallCombinerClosureList closures;
|
1227
1292
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1228
1293
|
pending_batch* pending = &calld->pending_batches[i];
|
1229
1294
|
grpc_transport_stream_op_batch* batch = pending->batch;
|
1230
1295
|
if (batch != nullptr) {
|
1231
|
-
|
1296
|
+
batch->handler_private.extra_arg = calld->subchannel_call;
|
1297
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1298
|
+
resume_pending_batch_in_call_combiner, batch,
|
1299
|
+
grpc_schedule_on_exec_ctx);
|
1300
|
+
closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
|
1301
|
+
"pending_batches_resume");
|
1232
1302
|
pending_batch_clear(calld, pending);
|
1233
1303
|
}
|
1234
1304
|
}
|
1235
|
-
for (size_t i = 1; i < num_batches; ++i) {
|
1236
|
-
grpc_transport_stream_op_batch* batch = batches[i];
|
1237
|
-
batch->handler_private.extra_arg = calld->subchannel_call;
|
1238
|
-
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1239
|
-
resume_pending_batch_in_call_combiner, batch,
|
1240
|
-
grpc_schedule_on_exec_ctx);
|
1241
|
-
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1242
|
-
&batch->handler_private.closure, GRPC_ERROR_NONE,
|
1243
|
-
"pending_batches_resume");
|
1244
|
-
}
|
1245
|
-
GPR_ASSERT(num_batches > 0);
|
1246
1305
|
// Note: This will release the call combiner.
|
1247
|
-
|
1306
|
+
closures.RunClosures(calld->call_combiner);
|
1248
1307
|
}
|
1249
1308
|
|
1250
1309
|
static void maybe_clear_pending_batch(grpc_call_element* elem,
|
@@ -1259,83 +1318,39 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
|
|
1259
1318
|
batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
|
1260
1319
|
nullptr) &&
|
1261
1320
|
(!batch->recv_message ||
|
1262
|
-
batch->payload->recv_message.recv_message_ready == nullptr)
|
1321
|
+
batch->payload->recv_message.recv_message_ready == nullptr) &&
|
1322
|
+
(!batch->recv_trailing_metadata ||
|
1323
|
+
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
|
1324
|
+
nullptr)) {
|
1263
1325
|
if (grpc_client_channel_trace.enabled()) {
|
1264
|
-
gpr_log(
|
1326
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
|
1265
1327
|
calld);
|
1266
1328
|
}
|
1267
1329
|
pending_batch_clear(calld, pending);
|
1268
1330
|
}
|
1269
1331
|
}
|
1270
1332
|
|
1271
|
-
// Returns
|
1272
|
-
|
1273
|
-
|
1274
|
-
|
1275
|
-
|
1276
|
-
|
1277
|
-
|
1278
|
-
|
1279
|
-
|
1280
|
-
|
1281
|
-
|
1282
|
-
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
if (pending->batch->recv_initial_metadata &&
|
1291
|
-
!retry_state->completed_recv_initial_metadata) {
|
1292
|
-
return false;
|
1293
|
-
}
|
1294
|
-
if (pending->batch->recv_message &&
|
1295
|
-
retry_state->completed_recv_message_count <
|
1296
|
-
retry_state->started_recv_message_count) {
|
1297
|
-
return false;
|
1298
|
-
}
|
1299
|
-
if (pending->batch->recv_trailing_metadata &&
|
1300
|
-
!retry_state->completed_recv_trailing_metadata) {
|
1301
|
-
return false;
|
1302
|
-
}
|
1303
|
-
return true;
|
1304
|
-
}
|
1305
|
-
|
1306
|
-
// Returns true if any op in the batch was not yet started.
|
1307
|
-
static bool pending_batch_is_unstarted(
|
1308
|
-
pending_batch* pending, call_data* calld,
|
1309
|
-
subchannel_call_retry_state* retry_state) {
|
1310
|
-
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1311
|
-
return false;
|
1312
|
-
}
|
1313
|
-
if (pending->batch->send_initial_metadata &&
|
1314
|
-
!retry_state->started_send_initial_metadata) {
|
1315
|
-
return true;
|
1316
|
-
}
|
1317
|
-
if (pending->batch->send_message &&
|
1318
|
-
retry_state->started_send_message_count < calld->send_messages.size()) {
|
1319
|
-
return true;
|
1320
|
-
}
|
1321
|
-
if (pending->batch->send_trailing_metadata &&
|
1322
|
-
!retry_state->started_send_trailing_metadata) {
|
1323
|
-
return true;
|
1324
|
-
}
|
1325
|
-
if (pending->batch->recv_initial_metadata &&
|
1326
|
-
!retry_state->started_recv_initial_metadata) {
|
1327
|
-
return true;
|
1328
|
-
}
|
1329
|
-
if (pending->batch->recv_message &&
|
1330
|
-
retry_state->completed_recv_message_count ==
|
1331
|
-
retry_state->started_recv_message_count) {
|
1332
|
-
return true;
|
1333
|
-
}
|
1334
|
-
if (pending->batch->recv_trailing_metadata &&
|
1335
|
-
!retry_state->started_recv_trailing_metadata) {
|
1336
|
-
return true;
|
1333
|
+
// Returns a pointer to the first pending batch for which predicate(batch)
|
1334
|
+
// returns true, or null if not found.
|
1335
|
+
template <typename Predicate>
|
1336
|
+
static pending_batch* pending_batch_find(grpc_call_element* elem,
|
1337
|
+
const char* log_message,
|
1338
|
+
Predicate predicate) {
|
1339
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1340
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1341
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1342
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1343
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1344
|
+
if (batch != nullptr && predicate(batch)) {
|
1345
|
+
if (grpc_client_channel_trace.enabled()) {
|
1346
|
+
gpr_log(GPR_INFO,
|
1347
|
+
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR, chand,
|
1348
|
+
calld, log_message, i);
|
1349
|
+
}
|
1350
|
+
return pending;
|
1351
|
+
}
|
1337
1352
|
}
|
1338
|
-
return
|
1353
|
+
return nullptr;
|
1339
1354
|
}
|
1340
1355
|
|
1341
1356
|
//
|
@@ -1350,7 +1365,7 @@ static void retry_commit(grpc_call_element* elem,
|
|
1350
1365
|
if (calld->retry_committed) return;
|
1351
1366
|
calld->retry_committed = true;
|
1352
1367
|
if (grpc_client_channel_trace.enabled()) {
|
1353
|
-
gpr_log(
|
1368
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
|
1354
1369
|
}
|
1355
1370
|
if (retry_state != nullptr) {
|
1356
1371
|
free_cached_send_op_data_after_commit(elem, retry_state);
|
@@ -1395,8 +1410,8 @@ static void do_retry(grpc_call_element* elem,
|
|
1395
1410
|
next_attempt_time = calld->retry_backoff->NextAttemptTime();
|
1396
1411
|
}
|
1397
1412
|
if (grpc_client_channel_trace.enabled()) {
|
1398
|
-
gpr_log(
|
1399
|
-
"chand=%p calld=%p: retrying failed call in %"
|
1413
|
+
gpr_log(GPR_INFO,
|
1414
|
+
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
|
1400
1415
|
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
|
1401
1416
|
}
|
1402
1417
|
// Schedule retry after computed delay.
|
@@ -1429,26 +1444,26 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1429
1444
|
batch_data->subchannel_call));
|
1430
1445
|
if (retry_state->retry_dispatched) {
|
1431
1446
|
if (grpc_client_channel_trace.enabled()) {
|
1432
|
-
gpr_log(
|
1447
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
|
1433
1448
|
calld);
|
1434
1449
|
}
|
1435
1450
|
return true;
|
1436
1451
|
}
|
1437
1452
|
}
|
1438
1453
|
// Check status.
|
1439
|
-
if (status == GRPC_STATUS_OK) {
|
1454
|
+
if (GPR_LIKELY(status == GRPC_STATUS_OK)) {
|
1440
1455
|
if (calld->retry_throttle_data != nullptr) {
|
1441
1456
|
calld->retry_throttle_data->RecordSuccess();
|
1442
1457
|
}
|
1443
1458
|
if (grpc_client_channel_trace.enabled()) {
|
1444
|
-
gpr_log(
|
1459
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
|
1445
1460
|
}
|
1446
1461
|
return false;
|
1447
1462
|
}
|
1448
1463
|
// Status is not OK. Check whether the status is retryable.
|
1449
1464
|
if (!retry_policy->retryable_status_codes.Contains(status)) {
|
1450
1465
|
if (grpc_client_channel_trace.enabled()) {
|
1451
|
-
gpr_log(
|
1466
|
+
gpr_log(GPR_INFO,
|
1452
1467
|
"chand=%p calld=%p: status %s not configured as retryable", chand,
|
1453
1468
|
calld, grpc_status_code_to_string(status));
|
1454
1469
|
}
|
@@ -1464,14 +1479,14 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1464
1479
|
if (calld->retry_throttle_data != nullptr &&
|
1465
1480
|
!calld->retry_throttle_data->RecordFailure()) {
|
1466
1481
|
if (grpc_client_channel_trace.enabled()) {
|
1467
|
-
gpr_log(
|
1482
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
|
1468
1483
|
}
|
1469
1484
|
return false;
|
1470
1485
|
}
|
1471
1486
|
// Check whether the call is committed.
|
1472
1487
|
if (calld->retry_committed) {
|
1473
1488
|
if (grpc_client_channel_trace.enabled()) {
|
1474
|
-
gpr_log(
|
1489
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
|
1475
1490
|
calld);
|
1476
1491
|
}
|
1477
1492
|
return false;
|
@@ -1480,7 +1495,7 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1480
1495
|
++calld->num_attempts_completed;
|
1481
1496
|
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
|
1482
1497
|
if (grpc_client_channel_trace.enabled()) {
|
1483
|
-
gpr_log(
|
1498
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
|
1484
1499
|
calld, retry_policy->max_attempts);
|
1485
1500
|
}
|
1486
1501
|
return false;
|
@@ -1488,7 +1503,7 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1488
1503
|
// If the call was cancelled from the surface, don't retry.
|
1489
1504
|
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
1490
1505
|
if (grpc_client_channel_trace.enabled()) {
|
1491
|
-
gpr_log(
|
1506
|
+
gpr_log(GPR_INFO,
|
1492
1507
|
"chand=%p calld=%p: call cancelled from surface, not retrying",
|
1493
1508
|
chand, calld);
|
1494
1509
|
}
|
@@ -1501,16 +1516,15 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1501
1516
|
uint32_t ms;
|
1502
1517
|
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
|
1503
1518
|
if (grpc_client_channel_trace.enabled()) {
|
1504
|
-
gpr_log(
|
1519
|
+
gpr_log(GPR_INFO,
|
1505
1520
|
"chand=%p calld=%p: not retrying due to server push-back",
|
1506
1521
|
chand, calld);
|
1507
1522
|
}
|
1508
1523
|
return false;
|
1509
1524
|
} else {
|
1510
1525
|
if (grpc_client_channel_trace.enabled()) {
|
1511
|
-
gpr_log(
|
1512
|
-
|
1513
|
-
calld, ms);
|
1526
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
|
1527
|
+
chand, calld, ms);
|
1514
1528
|
}
|
1515
1529
|
server_pushback_ms = (grpc_millis)ms;
|
1516
1530
|
}
|
@@ -1523,8 +1537,13 @@ static bool maybe_retry(grpc_call_element* elem,
|
|
1523
1537
|
// subchannel_batch_data
|
1524
1538
|
//
|
1525
1539
|
|
1540
|
+
// Creates a subchannel_batch_data object on the call's arena with the
|
1541
|
+
// specified refcount. If set_on_complete is true, the batch's
|
1542
|
+
// on_complete callback will be set to point to on_complete();
|
1543
|
+
// otherwise, the batch's on_complete callback will be null.
|
1526
1544
|
static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
|
1527
|
-
int refcount
|
1545
|
+
int refcount,
|
1546
|
+
bool set_on_complete) {
|
1528
1547
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1529
1548
|
subchannel_call_retry_state* retry_state =
|
1530
1549
|
static_cast<subchannel_call_retry_state*>(
|
@@ -1537,26 +1556,32 @@ static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
|
|
1537
1556
|
GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
|
1538
1557
|
batch_data->batch.payload = &retry_state->batch_payload;
|
1539
1558
|
gpr_ref_init(&batch_data->refs, refcount);
|
1540
|
-
|
1541
|
-
|
1542
|
-
|
1559
|
+
if (set_on_complete) {
|
1560
|
+
GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
|
1561
|
+
grpc_schedule_on_exec_ctx);
|
1562
|
+
batch_data->batch.on_complete = &batch_data->on_complete;
|
1563
|
+
}
|
1543
1564
|
GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
|
1544
1565
|
return batch_data;
|
1545
1566
|
}
|
1546
1567
|
|
1547
1568
|
static void batch_data_unref(subchannel_batch_data* batch_data) {
|
1548
1569
|
if (gpr_unref(&batch_data->refs)) {
|
1549
|
-
|
1550
|
-
|
1570
|
+
subchannel_call_retry_state* retry_state =
|
1571
|
+
static_cast<subchannel_call_retry_state*>(
|
1572
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1573
|
+
batch_data->subchannel_call));
|
1574
|
+
if (batch_data->batch.send_initial_metadata) {
|
1575
|
+
grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
|
1551
1576
|
}
|
1552
|
-
if (batch_data->
|
1553
|
-
grpc_metadata_batch_destroy(&
|
1577
|
+
if (batch_data->batch.send_trailing_metadata) {
|
1578
|
+
grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
|
1554
1579
|
}
|
1555
1580
|
if (batch_data->batch.recv_initial_metadata) {
|
1556
|
-
grpc_metadata_batch_destroy(&
|
1581
|
+
grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
|
1557
1582
|
}
|
1558
1583
|
if (batch_data->batch.recv_trailing_metadata) {
|
1559
|
-
grpc_metadata_batch_destroy(&
|
1584
|
+
grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
|
1560
1585
|
}
|
1561
1586
|
GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
|
1562
1587
|
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
@@ -1572,30 +1597,22 @@ static void batch_data_unref(subchannel_batch_data* batch_data) {
|
|
1572
1597
|
static void invoke_recv_initial_metadata_callback(void* arg,
|
1573
1598
|
grpc_error* error) {
|
1574
1599
|
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1575
|
-
channel_data* chand =
|
1576
|
-
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1577
|
-
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1578
1600
|
// Find pending batch.
|
1579
|
-
pending_batch* pending =
|
1580
|
-
|
1581
|
-
|
1582
|
-
|
1583
|
-
|
1584
|
-
|
1585
|
-
|
1586
|
-
gpr_log(GPR_DEBUG,
|
1587
|
-
"chand=%p calld=%p: invoking recv_initial_metadata_ready for "
|
1588
|
-
"pending batch at index %" PRIuPTR,
|
1589
|
-
chand, calld, i);
|
1590
|
-
}
|
1591
|
-
pending = &calld->pending_batches[i];
|
1592
|
-
break;
|
1593
|
-
}
|
1594
|
-
}
|
1601
|
+
pending_batch* pending = pending_batch_find(
|
1602
|
+
batch_data->elem, "invoking recv_initial_metadata_ready for",
|
1603
|
+
[](grpc_transport_stream_op_batch* batch) {
|
1604
|
+
return batch->recv_initial_metadata &&
|
1605
|
+
batch->payload->recv_initial_metadata
|
1606
|
+
.recv_initial_metadata_ready != nullptr;
|
1607
|
+
});
|
1595
1608
|
GPR_ASSERT(pending != nullptr);
|
1596
1609
|
// Return metadata.
|
1610
|
+
subchannel_call_retry_state* retry_state =
|
1611
|
+
static_cast<subchannel_call_retry_state*>(
|
1612
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1613
|
+
batch_data->subchannel_call));
|
1597
1614
|
grpc_metadata_batch_move(
|
1598
|
-
&
|
1615
|
+
&retry_state->recv_initial_metadata,
|
1599
1616
|
pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
|
1600
1617
|
// Update bookkeeping.
|
1601
1618
|
// Note: Need to do this before invoking the callback, since invoking
|
@@ -1619,7 +1636,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
|
1619
1636
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1620
1637
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1621
1638
|
if (grpc_client_channel_trace.enabled()) {
|
1622
|
-
gpr_log(
|
1639
|
+
gpr_log(GPR_INFO,
|
1623
1640
|
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
|
1624
1641
|
chand, calld, grpc_error_string(error));
|
1625
1642
|
}
|
@@ -1627,19 +1644,29 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
|
1627
1644
|
static_cast<subchannel_call_retry_state*>(
|
1628
1645
|
grpc_connected_subchannel_call_get_parent_data(
|
1629
1646
|
batch_data->subchannel_call));
|
1647
|
+
retry_state->completed_recv_initial_metadata = true;
|
1648
|
+
// If a retry was already dispatched, then we're not going to use the
|
1649
|
+
// result of this recv_initial_metadata op, so do nothing.
|
1650
|
+
if (retry_state->retry_dispatched) {
|
1651
|
+
GRPC_CALL_COMBINER_STOP(
|
1652
|
+
calld->call_combiner,
|
1653
|
+
"recv_initial_metadata_ready after retry dispatched");
|
1654
|
+
return;
|
1655
|
+
}
|
1630
1656
|
// If we got an error or a Trailers-Only response and have not yet gotten
|
1631
|
-
// the
|
1632
|
-
//
|
1633
|
-
//
|
1634
|
-
if ((
|
1635
|
-
|
1657
|
+
// the recv_trailing_metadata_ready callback, then defer propagating this
|
1658
|
+
// callback back to the surface. We can evaluate whether to retry when
|
1659
|
+
// recv_trailing_metadata comes back.
|
1660
|
+
if (GPR_UNLIKELY((retry_state->trailing_metadata_available ||
|
1661
|
+
error != GRPC_ERROR_NONE) &&
|
1662
|
+
!retry_state->completed_recv_trailing_metadata)) {
|
1636
1663
|
if (grpc_client_channel_trace.enabled()) {
|
1637
|
-
gpr_log(
|
1664
|
+
gpr_log(GPR_INFO,
|
1638
1665
|
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
|
1639
1666
|
"(Trailers-Only)",
|
1640
1667
|
chand, calld);
|
1641
1668
|
}
|
1642
|
-
retry_state->
|
1669
|
+
retry_state->recv_initial_metadata_ready_deferred_batch = batch_data;
|
1643
1670
|
retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
|
1644
1671
|
if (!retry_state->started_recv_trailing_metadata) {
|
1645
1672
|
// recv_trailing_metadata not yet started by application; start it
|
@@ -1654,9 +1681,9 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
|
1654
1681
|
}
|
1655
1682
|
// Received valid initial metadata, so commit the call.
|
1656
1683
|
retry_commit(elem, retry_state);
|
1684
|
+
// Invoke the callback to return the result to the surface.
|
1657
1685
|
// Manually invoking a callback function; it does not take ownership of error.
|
1658
1686
|
invoke_recv_initial_metadata_callback(batch_data, error);
|
1659
|
-
GRPC_ERROR_UNREF(error);
|
1660
1687
|
}
|
1661
1688
|
|
1662
1689
|
//
|
@@ -1666,29 +1693,21 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
|
1666
1693
|
// Invokes recv_message_ready for a subchannel batch.
|
1667
1694
|
static void invoke_recv_message_callback(void* arg, grpc_error* error) {
|
1668
1695
|
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1669
|
-
channel_data* chand =
|
1670
|
-
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1671
|
-
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1672
1696
|
// Find pending op.
|
1673
|
-
pending_batch* pending =
|
1674
|
-
|
1675
|
-
|
1676
|
-
|
1677
|
-
|
1678
|
-
|
1679
|
-
gpr_log(GPR_DEBUG,
|
1680
|
-
"chand=%p calld=%p: invoking recv_message_ready for "
|
1681
|
-
"pending batch at index %" PRIuPTR,
|
1682
|
-
chand, calld, i);
|
1683
|
-
}
|
1684
|
-
pending = &calld->pending_batches[i];
|
1685
|
-
break;
|
1686
|
-
}
|
1687
|
-
}
|
1697
|
+
pending_batch* pending = pending_batch_find(
|
1698
|
+
batch_data->elem, "invoking recv_message_ready for",
|
1699
|
+
[](grpc_transport_stream_op_batch* batch) {
|
1700
|
+
return batch->recv_message &&
|
1701
|
+
batch->payload->recv_message.recv_message_ready != nullptr;
|
1702
|
+
});
|
1688
1703
|
GPR_ASSERT(pending != nullptr);
|
1689
1704
|
// Return payload.
|
1705
|
+
subchannel_call_retry_state* retry_state =
|
1706
|
+
static_cast<subchannel_call_retry_state*>(
|
1707
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1708
|
+
batch_data->subchannel_call));
|
1690
1709
|
*pending->batch->payload->recv_message.recv_message =
|
1691
|
-
std::move(
|
1710
|
+
std::move(retry_state->recv_message);
|
1692
1711
|
// Update bookkeeping.
|
1693
1712
|
// Note: Need to do this before invoking the callback, since invoking
|
1694
1713
|
// the callback will result in yielding the call combiner.
|
@@ -1709,26 +1728,35 @@ static void recv_message_ready(void* arg, grpc_error* error) {
|
|
1709
1728
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1710
1729
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1711
1730
|
if (grpc_client_channel_trace.enabled()) {
|
1712
|
-
gpr_log(
|
1731
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
|
1713
1732
|
chand, calld, grpc_error_string(error));
|
1714
1733
|
}
|
1715
1734
|
subchannel_call_retry_state* retry_state =
|
1716
1735
|
static_cast<subchannel_call_retry_state*>(
|
1717
1736
|
grpc_connected_subchannel_call_get_parent_data(
|
1718
1737
|
batch_data->subchannel_call));
|
1738
|
+
++retry_state->completed_recv_message_count;
|
1739
|
+
// If a retry was already dispatched, then we're not going to use the
|
1740
|
+
// result of this recv_message op, so do nothing.
|
1741
|
+
if (retry_state->retry_dispatched) {
|
1742
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
1743
|
+
"recv_message_ready after retry dispatched");
|
1744
|
+
return;
|
1745
|
+
}
|
1719
1746
|
// If we got an error or the payload was nullptr and we have not yet gotten
|
1720
|
-
// the
|
1721
|
-
//
|
1722
|
-
//
|
1723
|
-
if ((
|
1724
|
-
|
1747
|
+
// the recv_trailing_metadata_ready callback, then defer propagating this
|
1748
|
+
// callback back to the surface. We can evaluate whether to retry when
|
1749
|
+
// recv_trailing_metadata comes back.
|
1750
|
+
if (GPR_UNLIKELY(
|
1751
|
+
(retry_state->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
|
1752
|
+
!retry_state->completed_recv_trailing_metadata)) {
|
1725
1753
|
if (grpc_client_channel_trace.enabled()) {
|
1726
|
-
gpr_log(
|
1754
|
+
gpr_log(GPR_INFO,
|
1727
1755
|
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
|
1728
1756
|
"message and recv_trailing_metadata pending)",
|
1729
1757
|
chand, calld);
|
1730
1758
|
}
|
1731
|
-
retry_state->
|
1759
|
+
retry_state->recv_message_ready_deferred_batch = batch_data;
|
1732
1760
|
retry_state->recv_message_error = GRPC_ERROR_REF(error);
|
1733
1761
|
if (!retry_state->started_recv_trailing_metadata) {
|
1734
1762
|
// recv_trailing_metadata not yet started by application; start it
|
@@ -1741,83 +1769,276 @@ static void recv_message_ready(void* arg, grpc_error* error) {
|
|
1741
1769
|
}
|
1742
1770
|
// Received a valid message, so commit the call.
|
1743
1771
|
retry_commit(elem, retry_state);
|
1772
|
+
// Invoke the callback to return the result to the surface.
|
1744
1773
|
// Manually invoking a callback function; it does not take ownership of error.
|
1745
1774
|
invoke_recv_message_callback(batch_data, error);
|
1746
|
-
GRPC_ERROR_UNREF(error);
|
1747
1775
|
}
|
1748
1776
|
|
1749
1777
|
//
|
1750
|
-
//
|
1778
|
+
// recv_trailing_metadata handling
|
1751
1779
|
//
|
1752
1780
|
|
1753
|
-
//
|
1754
|
-
static void
|
1755
|
-
|
1756
|
-
|
1757
|
-
|
1758
|
-
|
1759
|
-
|
1760
|
-
|
1761
|
-
|
1762
|
-
}
|
1763
|
-
|
1764
|
-
|
1765
|
-
|
1766
|
-
|
1767
|
-
|
1768
|
-
|
1769
|
-
|
1770
|
-
|
1771
|
-
|
1772
|
-
if (batch_data->batch.recv_trailing_metadata) {
|
1773
|
-
retry_state->completed_recv_trailing_metadata = true;
|
1781
|
+
// Sets *status and *server_pushback_md based on batch_data and error.
|
1782
|
+
static void get_call_status(subchannel_batch_data* batch_data,
|
1783
|
+
grpc_error* error, grpc_status_code* status,
|
1784
|
+
grpc_mdelem** server_pushback_md) {
|
1785
|
+
grpc_call_element* elem = batch_data->elem;
|
1786
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1787
|
+
if (error != GRPC_ERROR_NONE) {
|
1788
|
+
grpc_error_get_status(error, calld->deadline, status, nullptr, nullptr,
|
1789
|
+
nullptr);
|
1790
|
+
} else {
|
1791
|
+
grpc_metadata_batch* md_batch =
|
1792
|
+
batch_data->batch.payload->recv_trailing_metadata
|
1793
|
+
.recv_trailing_metadata;
|
1794
|
+
GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
|
1795
|
+
*status =
|
1796
|
+
grpc_get_status_code_from_metadata(md_batch->idx.named.grpc_status->md);
|
1797
|
+
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
|
1798
|
+
*server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
|
1799
|
+
}
|
1774
1800
|
}
|
1801
|
+
GRPC_ERROR_UNREF(error);
|
1775
1802
|
}
|
1776
1803
|
|
1777
|
-
//
|
1778
|
-
|
1779
|
-
|
1780
|
-
|
1781
|
-
|
1782
|
-
|
1804
|
+
// Adds recv_trailing_metadata_ready closure to closures.
|
1805
|
+
static void add_closure_for_recv_trailing_metadata_ready(
|
1806
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1807
|
+
grpc_error* error, grpc_core::CallCombinerClosureList* closures) {
|
1808
|
+
// Find pending batch.
|
1809
|
+
pending_batch* pending = pending_batch_find(
|
1810
|
+
elem, "invoking recv_trailing_metadata for",
|
1811
|
+
[](grpc_transport_stream_op_batch* batch) {
|
1812
|
+
return batch->recv_trailing_metadata &&
|
1813
|
+
batch->payload->recv_trailing_metadata
|
1814
|
+
.recv_trailing_metadata_ready != nullptr;
|
1815
|
+
});
|
1816
|
+
// If we generated the recv_trailing_metadata op internally via
|
1817
|
+
// start_internal_recv_trailing_metadata(), then there will be no
|
1818
|
+
// pending batch.
|
1819
|
+
if (pending == nullptr) {
|
1820
|
+
GRPC_ERROR_UNREF(error);
|
1821
|
+
return;
|
1822
|
+
}
|
1823
|
+
// Return metadata.
|
1824
|
+
subchannel_call_retry_state* retry_state =
|
1825
|
+
static_cast<subchannel_call_retry_state*>(
|
1826
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1827
|
+
batch_data->subchannel_call));
|
1828
|
+
grpc_metadata_batch_move(
|
1829
|
+
&retry_state->recv_trailing_metadata,
|
1830
|
+
pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata);
|
1831
|
+
// Add closure.
|
1832
|
+
closures->Add(pending->batch->payload->recv_trailing_metadata
|
1833
|
+
.recv_trailing_metadata_ready,
|
1834
|
+
error, "recv_trailing_metadata_ready for pending batch");
|
1835
|
+
// Update bookkeeping.
|
1836
|
+
pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
|
1837
|
+
nullptr;
|
1838
|
+
maybe_clear_pending_batch(elem, pending);
|
1839
|
+
}
|
1783
1840
|
|
1784
1841
|
// Adds any necessary closures for deferred recv_initial_metadata and
|
1785
|
-
// recv_message callbacks to closures
|
1842
|
+
// recv_message callbacks to closures.
|
1786
1843
|
static void add_closures_for_deferred_recv_callbacks(
|
1787
1844
|
subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
|
1788
|
-
|
1789
|
-
if (batch_data->batch.recv_trailing_metadata
|
1790
|
-
|
1791
|
-
|
1792
|
-
|
1793
|
-
|
1794
|
-
|
1795
|
-
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1800
|
-
retry_state->
|
1801
|
-
|
1802
|
-
closure
|
1803
|
-
|
1804
|
-
|
1805
|
-
|
1806
|
-
|
1845
|
+
grpc_core::CallCombinerClosureList* closures) {
|
1846
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1847
|
+
// Add closure for deferred recv_initial_metadata_ready.
|
1848
|
+
if (GPR_UNLIKELY(retry_state->recv_initial_metadata_ready_deferred_batch !=
|
1849
|
+
nullptr)) {
|
1850
|
+
GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
|
1851
|
+
invoke_recv_initial_metadata_callback,
|
1852
|
+
retry_state->recv_initial_metadata_ready_deferred_batch,
|
1853
|
+
grpc_schedule_on_exec_ctx);
|
1854
|
+
closures->Add(&retry_state->recv_initial_metadata_ready,
|
1855
|
+
retry_state->recv_initial_metadata_error,
|
1856
|
+
"resuming recv_initial_metadata_ready");
|
1857
|
+
retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
|
1858
|
+
}
|
1859
|
+
// Add closure for deferred recv_message_ready.
|
1860
|
+
if (GPR_UNLIKELY(retry_state->recv_message_ready_deferred_batch !=
|
1861
|
+
nullptr)) {
|
1862
|
+
GRPC_CLOSURE_INIT(&retry_state->recv_message_ready,
|
1863
|
+
invoke_recv_message_callback,
|
1864
|
+
retry_state->recv_message_ready_deferred_batch,
|
1865
|
+
grpc_schedule_on_exec_ctx);
|
1866
|
+
closures->Add(&retry_state->recv_message_ready,
|
1867
|
+
retry_state->recv_message_error,
|
1868
|
+
"resuming recv_message_ready");
|
1869
|
+
retry_state->recv_message_ready_deferred_batch = nullptr;
|
1870
|
+
}
|
1871
|
+
}
|
1872
|
+
}
|
1873
|
+
|
1874
|
+
// Returns true if any op in the batch was not yet started.
|
1875
|
+
// Only looks at send ops, since recv ops are always started immediately.
|
1876
|
+
static bool pending_batch_is_unstarted(
|
1877
|
+
pending_batch* pending, call_data* calld,
|
1878
|
+
subchannel_call_retry_state* retry_state) {
|
1879
|
+
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1880
|
+
return false;
|
1881
|
+
}
|
1882
|
+
if (pending->batch->send_initial_metadata &&
|
1883
|
+
!retry_state->started_send_initial_metadata) {
|
1884
|
+
return true;
|
1885
|
+
}
|
1886
|
+
if (pending->batch->send_message &&
|
1887
|
+
retry_state->started_send_message_count < calld->send_messages->size()) {
|
1888
|
+
return true;
|
1889
|
+
}
|
1890
|
+
if (pending->batch->send_trailing_metadata &&
|
1891
|
+
!retry_state->started_send_trailing_metadata) {
|
1892
|
+
return true;
|
1893
|
+
}
|
1894
|
+
return false;
|
1895
|
+
}
|
1896
|
+
|
1897
|
+
// For any pending batch containing an op that has not yet been started,
|
1898
|
+
// adds the pending batch's completion closures to closures.
|
1899
|
+
static void add_closures_to_fail_unstarted_pending_batches(
|
1900
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
1901
|
+
grpc_error* error, grpc_core::CallCombinerClosureList* closures) {
|
1902
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1903
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1904
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1905
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1906
|
+
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
|
1907
|
+
if (grpc_client_channel_trace.enabled()) {
|
1908
|
+
gpr_log(GPR_INFO,
|
1909
|
+
"chand=%p calld=%p: failing unstarted pending batch at index "
|
1910
|
+
"%" PRIuPTR,
|
1911
|
+
chand, calld, i);
|
1912
|
+
}
|
1913
|
+
closures->Add(pending->batch->on_complete, GRPC_ERROR_REF(error),
|
1914
|
+
"failing on_complete for pending batch");
|
1915
|
+
pending->batch->on_complete = nullptr;
|
1916
|
+
maybe_clear_pending_batch(elem, pending);
|
1917
|
+
}
|
1918
|
+
}
|
1919
|
+
GRPC_ERROR_UNREF(error);
|
1920
|
+
}
|
1921
|
+
|
1922
|
+
// Runs necessary closures upon completion of a call attempt.
|
1923
|
+
static void run_closures_for_completed_call(subchannel_batch_data* batch_data,
|
1924
|
+
grpc_error* error) {
|
1925
|
+
grpc_call_element* elem = batch_data->elem;
|
1926
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1927
|
+
subchannel_call_retry_state* retry_state =
|
1928
|
+
static_cast<subchannel_call_retry_state*>(
|
1929
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1930
|
+
batch_data->subchannel_call));
|
1931
|
+
// Construct list of closures to execute.
|
1932
|
+
grpc_core::CallCombinerClosureList closures;
|
1933
|
+
// First, add closure for recv_trailing_metadata_ready.
|
1934
|
+
add_closure_for_recv_trailing_metadata_ready(
|
1935
|
+
elem, batch_data, GRPC_ERROR_REF(error), &closures);
|
1936
|
+
// If there are deferred recv_initial_metadata_ready or recv_message_ready
|
1937
|
+
// callbacks, add them to closures.
|
1938
|
+
add_closures_for_deferred_recv_callbacks(batch_data, retry_state, &closures);
|
1939
|
+
// Add closures to fail any pending batches that have not yet been started.
|
1940
|
+
add_closures_to_fail_unstarted_pending_batches(
|
1941
|
+
elem, retry_state, GRPC_ERROR_REF(error), &closures);
|
1942
|
+
// Don't need batch_data anymore.
|
1943
|
+
batch_data_unref(batch_data);
|
1944
|
+
// Schedule all of the closures identified above.
|
1945
|
+
// Note: This will release the call combiner.
|
1946
|
+
closures.RunClosures(calld->call_combiner);
|
1947
|
+
GRPC_ERROR_UNREF(error);
|
1948
|
+
}
|
1949
|
+
|
1950
|
+
// Intercepts recv_trailing_metadata_ready callback for retries.
|
1951
|
+
// Commits the call and returns the trailing metadata up the stack.
|
1952
|
+
static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
|
1953
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1954
|
+
grpc_call_element* elem = batch_data->elem;
|
1955
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1956
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1957
|
+
if (grpc_client_channel_trace.enabled()) {
|
1958
|
+
gpr_log(GPR_INFO,
|
1959
|
+
"chand=%p calld=%p: got recv_trailing_metadata_ready, error=%s",
|
1960
|
+
chand, calld, grpc_error_string(error));
|
1961
|
+
}
|
1962
|
+
subchannel_call_retry_state* retry_state =
|
1963
|
+
static_cast<subchannel_call_retry_state*>(
|
1964
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1965
|
+
batch_data->subchannel_call));
|
1966
|
+
retry_state->completed_recv_trailing_metadata = true;
|
1967
|
+
// Get the call's status and check for server pushback metadata.
|
1968
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
1969
|
+
grpc_mdelem* server_pushback_md = nullptr;
|
1970
|
+
get_call_status(batch_data, GRPC_ERROR_REF(error), &status,
|
1971
|
+
&server_pushback_md);
|
1972
|
+
if (grpc_client_channel_trace.enabled()) {
|
1973
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
|
1974
|
+
calld, grpc_status_code_to_string(status));
|
1975
|
+
}
|
1976
|
+
// Check if we should retry.
|
1977
|
+
if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
|
1978
|
+
// Unref batch_data for deferred recv_initial_metadata_ready or
|
1979
|
+
// recv_message_ready callbacks, if any.
|
1980
|
+
if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
|
1981
|
+
batch_data_unref(batch_data);
|
1982
|
+
GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
|
1983
|
+
}
|
1984
|
+
if (retry_state->recv_message_ready_deferred_batch != nullptr) {
|
1985
|
+
batch_data_unref(batch_data);
|
1986
|
+
GRPC_ERROR_UNREF(retry_state->recv_message_error);
|
1987
|
+
}
|
1988
|
+
batch_data_unref(batch_data);
|
1989
|
+
return;
|
1990
|
+
}
|
1991
|
+
// Not retrying, so commit the call.
|
1992
|
+
retry_commit(elem, retry_state);
|
1993
|
+
// Run any necessary closures.
|
1994
|
+
run_closures_for_completed_call(batch_data, GRPC_ERROR_REF(error));
|
1995
|
+
}
|
1996
|
+
|
1997
|
+
//
|
1998
|
+
// on_complete callback handling
|
1999
|
+
//
|
2000
|
+
|
2001
|
+
// Adds the on_complete closure for the pending batch completed in
|
2002
|
+
// batch_data to closures.
|
2003
|
+
static void add_closure_for_completed_pending_batch(
|
2004
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
2005
|
+
subchannel_call_retry_state* retry_state, grpc_error* error,
|
2006
|
+
grpc_core::CallCombinerClosureList* closures) {
|
2007
|
+
pending_batch* pending = pending_batch_find(
|
2008
|
+
elem, "completed", [batch_data](grpc_transport_stream_op_batch* batch) {
|
2009
|
+
// Match the pending batch with the same set of send ops as the
|
2010
|
+
// subchannel batch we've just completed.
|
2011
|
+
return batch->on_complete != nullptr &&
|
2012
|
+
batch_data->batch.send_initial_metadata ==
|
2013
|
+
batch->send_initial_metadata &&
|
2014
|
+
batch_data->batch.send_message == batch->send_message &&
|
2015
|
+
batch_data->batch.send_trailing_metadata ==
|
2016
|
+
batch->send_trailing_metadata;
|
2017
|
+
});
|
2018
|
+
// If batch_data is a replay batch, then there will be no pending
|
2019
|
+
// batch to complete.
|
2020
|
+
if (pending == nullptr) {
|
2021
|
+
GRPC_ERROR_UNREF(error);
|
2022
|
+
return;
|
1807
2023
|
}
|
2024
|
+
// Add closure.
|
2025
|
+
closures->Add(pending->batch->on_complete, error,
|
2026
|
+
"on_complete for pending batch");
|
2027
|
+
pending->batch->on_complete = nullptr;
|
2028
|
+
maybe_clear_pending_batch(elem, pending);
|
1808
2029
|
}
|
1809
2030
|
|
1810
2031
|
// If there are any cached ops to replay or pending ops to start on the
|
1811
2032
|
// subchannel call, adds a closure to closures to invoke
|
1812
|
-
// start_retriable_subchannel_batches()
|
2033
|
+
// start_retriable_subchannel_batches().
|
1813
2034
|
static void add_closures_for_replay_or_pending_send_ops(
|
1814
2035
|
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1815
|
-
subchannel_call_retry_state* retry_state,
|
1816
|
-
|
2036
|
+
subchannel_call_retry_state* retry_state,
|
2037
|
+
grpc_core::CallCombinerClosureList* closures) {
|
1817
2038
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1818
2039
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1819
2040
|
bool have_pending_send_message_ops =
|
1820
|
-
retry_state->started_send_message_count < calld->send_messages
|
2041
|
+
retry_state->started_send_message_count < calld->send_messages->size();
|
1821
2042
|
bool have_pending_send_trailing_metadata_op =
|
1822
2043
|
calld->seen_send_trailing_metadata &&
|
1823
2044
|
!retry_state->started_send_trailing_metadata;
|
@@ -1835,97 +2056,16 @@ static void add_closures_for_replay_or_pending_send_ops(
|
|
1835
2056
|
}
|
1836
2057
|
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
|
1837
2058
|
if (grpc_client_channel_trace.enabled()) {
|
1838
|
-
gpr_log(
|
2059
|
+
gpr_log(GPR_INFO,
|
1839
2060
|
"chand=%p calld=%p: starting next batch for pending send op(s)",
|
1840
2061
|
chand, calld);
|
1841
2062
|
}
|
1842
|
-
|
1843
|
-
|
1844
|
-
|
1845
|
-
|
1846
|
-
|
1847
|
-
closure->reason = "starting next batch for send_* op(s)";
|
1848
|
-
}
|
1849
|
-
}
|
1850
|
-
|
1851
|
-
// For any pending batch completed in batch_data, adds the necessary
|
1852
|
-
// completion closures to closures, updating *num_closures as needed.
|
1853
|
-
static void add_closures_for_completed_pending_batches(
|
1854
|
-
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1855
|
-
subchannel_call_retry_state* retry_state, grpc_error* error,
|
1856
|
-
closure_to_execute* closures, size_t* num_closures) {
|
1857
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1858
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1859
|
-
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1860
|
-
pending_batch* pending = &calld->pending_batches[i];
|
1861
|
-
if (pending_batch_is_completed(pending, calld, retry_state)) {
|
1862
|
-
if (grpc_client_channel_trace.enabled()) {
|
1863
|
-
gpr_log(GPR_DEBUG,
|
1864
|
-
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
|
1865
|
-
chand, calld, i);
|
1866
|
-
}
|
1867
|
-
// Copy the trailing metadata to return it to the surface.
|
1868
|
-
if (batch_data->batch.recv_trailing_metadata) {
|
1869
|
-
grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
|
1870
|
-
pending->batch->payload->recv_trailing_metadata
|
1871
|
-
.recv_trailing_metadata);
|
1872
|
-
}
|
1873
|
-
closure_to_execute* closure = &closures[(*num_closures)++];
|
1874
|
-
closure->closure = pending->batch->on_complete;
|
1875
|
-
closure->error = GRPC_ERROR_REF(error);
|
1876
|
-
closure->reason = "on_complete for pending batch";
|
1877
|
-
pending->batch->on_complete = nullptr;
|
1878
|
-
maybe_clear_pending_batch(elem, pending);
|
1879
|
-
}
|
1880
|
-
}
|
1881
|
-
GRPC_ERROR_UNREF(error);
|
1882
|
-
}
|
1883
|
-
|
1884
|
-
// For any pending batch containing an op that has not yet been started,
|
1885
|
-
// adds the pending batch's completion closures to closures, updating
|
1886
|
-
// *num_closures as needed.
|
1887
|
-
static void add_closures_to_fail_unstarted_pending_batches(
|
1888
|
-
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
1889
|
-
grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
|
1890
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1891
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1892
|
-
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1893
|
-
pending_batch* pending = &calld->pending_batches[i];
|
1894
|
-
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
|
1895
|
-
if (grpc_client_channel_trace.enabled()) {
|
1896
|
-
gpr_log(GPR_DEBUG,
|
1897
|
-
"chand=%p calld=%p: failing unstarted pending batch at index "
|
1898
|
-
"%" PRIuPTR,
|
1899
|
-
chand, calld, i);
|
1900
|
-
}
|
1901
|
-
if (pending->batch->recv_initial_metadata) {
|
1902
|
-
closure_to_execute* closure = &closures[(*num_closures)++];
|
1903
|
-
closure->closure = pending->batch->payload->recv_initial_metadata
|
1904
|
-
.recv_initial_metadata_ready;
|
1905
|
-
closure->error = GRPC_ERROR_REF(error);
|
1906
|
-
closure->reason =
|
1907
|
-
"failing recv_initial_metadata_ready for pending batch";
|
1908
|
-
pending->batch->payload->recv_initial_metadata
|
1909
|
-
.recv_initial_metadata_ready = nullptr;
|
1910
|
-
}
|
1911
|
-
if (pending->batch->recv_message) {
|
1912
|
-
*pending->batch->payload->recv_message.recv_message = nullptr;
|
1913
|
-
closure_to_execute* closure = &closures[(*num_closures)++];
|
1914
|
-
closure->closure =
|
1915
|
-
pending->batch->payload->recv_message.recv_message_ready;
|
1916
|
-
closure->error = GRPC_ERROR_REF(error);
|
1917
|
-
closure->reason = "failing recv_message_ready for pending batch";
|
1918
|
-
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1919
|
-
}
|
1920
|
-
closure_to_execute* closure = &closures[(*num_closures)++];
|
1921
|
-
closure->closure = pending->batch->on_complete;
|
1922
|
-
closure->error = GRPC_ERROR_REF(error);
|
1923
|
-
closure->reason = "failing on_complete for pending batch";
|
1924
|
-
pending->batch->on_complete = nullptr;
|
1925
|
-
maybe_clear_pending_batch(elem, pending);
|
1926
|
-
}
|
2063
|
+
GRPC_CLOSURE_INIT(&batch_data->batch.handler_private.closure,
|
2064
|
+
start_retriable_subchannel_batches, elem,
|
2065
|
+
grpc_schedule_on_exec_ctx);
|
2066
|
+
closures->Add(&batch_data->batch.handler_private.closure, GRPC_ERROR_NONE,
|
2067
|
+
"starting next batch for send_* op(s)");
|
1927
2068
|
}
|
1928
|
-
GRPC_ERROR_UNREF(error);
|
1929
2069
|
}
|
1930
2070
|
|
1931
2071
|
// Callback used to intercept on_complete from subchannel calls.
|
@@ -1937,7 +2077,7 @@ static void on_complete(void* arg, grpc_error* error) {
|
|
1937
2077
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1938
2078
|
if (grpc_client_channel_trace.enabled()) {
|
1939
2079
|
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
|
1940
|
-
gpr_log(
|
2080
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
|
1941
2081
|
chand, calld, grpc_error_string(error), batch_str);
|
1942
2082
|
gpr_free(batch_str);
|
1943
2083
|
}
|
@@ -1945,113 +2085,50 @@ static void on_complete(void* arg, grpc_error* error) {
|
|
1945
2085
|
static_cast<subchannel_call_retry_state*>(
|
1946
2086
|
grpc_connected_subchannel_call_get_parent_data(
|
1947
2087
|
batch_data->subchannel_call));
|
1948
|
-
// If we have previously completed recv_trailing_metadata, then the
|
1949
|
-
// call is finished.
|
1950
|
-
bool call_finished = retry_state->completed_recv_trailing_metadata;
|
1951
2088
|
// Update bookkeeping in retry_state.
|
1952
|
-
|
1953
|
-
|
1954
|
-
|
1955
|
-
|
1956
|
-
|
1957
|
-
}
|
1958
|
-
} else {
|
1959
|
-
// Check if this batch finished the call, and if so, get its status.
|
1960
|
-
// The call is finished if either (a) this callback was invoked with
|
1961
|
-
// an error or (b) we receive status.
|
1962
|
-
grpc_status_code status = GRPC_STATUS_OK;
|
1963
|
-
grpc_mdelem* server_pushback_md = nullptr;
|
1964
|
-
if (error != GRPC_ERROR_NONE) { // Case (a).
|
1965
|
-
call_finished = true;
|
1966
|
-
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
1967
|
-
nullptr);
|
1968
|
-
} else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
|
1969
|
-
call_finished = true;
|
1970
|
-
grpc_metadata_batch* md_batch =
|
1971
|
-
batch_data->batch.payload->recv_trailing_metadata
|
1972
|
-
.recv_trailing_metadata;
|
1973
|
-
GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
|
1974
|
-
status = grpc_get_status_code_from_metadata(
|
1975
|
-
md_batch->idx.named.grpc_status->md);
|
1976
|
-
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
|
1977
|
-
server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
|
1978
|
-
}
|
1979
|
-
} else if (retry_state->completed_recv_trailing_metadata) {
|
1980
|
-
call_finished = true;
|
1981
|
-
}
|
1982
|
-
if (call_finished && grpc_client_channel_trace.enabled()) {
|
1983
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
|
1984
|
-
calld, grpc_status_code_to_string(status));
|
1985
|
-
}
|
1986
|
-
// If the call is finished, check if we should retry.
|
1987
|
-
if (call_finished &&
|
1988
|
-
maybe_retry(elem, batch_data, status, server_pushback_md)) {
|
1989
|
-
// Unref batch_data for deferred recv_initial_metadata_ready or
|
1990
|
-
// recv_message_ready callbacks, if any.
|
1991
|
-
if (batch_data->batch.recv_trailing_metadata &&
|
1992
|
-
retry_state->recv_initial_metadata_ready_deferred) {
|
1993
|
-
batch_data_unref(batch_data);
|
1994
|
-
GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
|
1995
|
-
}
|
1996
|
-
if (batch_data->batch.recv_trailing_metadata &&
|
1997
|
-
retry_state->recv_message_ready_deferred) {
|
1998
|
-
batch_data_unref(batch_data);
|
1999
|
-
GRPC_ERROR_UNREF(retry_state->recv_message_error);
|
2000
|
-
}
|
2001
|
-
batch_data_unref(batch_data);
|
2002
|
-
return;
|
2003
|
-
}
|
2089
|
+
if (batch_data->batch.send_initial_metadata) {
|
2090
|
+
retry_state->completed_send_initial_metadata = true;
|
2091
|
+
}
|
2092
|
+
if (batch_data->batch.send_message) {
|
2093
|
+
++retry_state->completed_send_message_count;
|
2004
2094
|
}
|
2005
|
-
|
2006
|
-
|
2007
|
-
|
2095
|
+
if (batch_data->batch.send_trailing_metadata) {
|
2096
|
+
retry_state->completed_send_trailing_metadata = true;
|
2097
|
+
}
|
2098
|
+
// If the call is committed, free cached data for send ops that we've just
|
2099
|
+
// completed.
|
2100
|
+
if (calld->retry_committed) {
|
2008
2101
|
free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
|
2009
2102
|
}
|
2010
|
-
// Call not being retried.
|
2011
2103
|
// Construct list of closures to execute.
|
2012
|
-
|
2013
|
-
//
|
2014
|
-
//
|
2015
|
-
//
|
2016
|
-
|
2017
|
-
|
2018
|
-
|
2019
|
-
|
2020
|
-
|
2021
|
-
|
2022
|
-
|
2023
|
-
|
2024
|
-
|
2025
|
-
|
2026
|
-
|
2027
|
-
|
2028
|
-
//
|
2029
|
-
|
2030
|
-
|
2031
|
-
|
2032
|
-
if (call_finished) {
|
2033
|
-
add_closures_to_fail_unstarted_pending_batches(
|
2034
|
-
elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
|
2035
|
-
} else {
|
2036
|
-
add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
|
2037
|
-
closures, &num_closures);
|
2038
|
-
}
|
2104
|
+
grpc_core::CallCombinerClosureList closures;
|
2105
|
+
// If a retry was already dispatched, that means we saw
|
2106
|
+
// recv_trailing_metadata before this, so we do nothing here.
|
2107
|
+
// Otherwise, invoke the callback to return the result to the surface.
|
2108
|
+
if (!retry_state->retry_dispatched) {
|
2109
|
+
// Add closure for the completed pending batch, if any.
|
2110
|
+
add_closure_for_completed_pending_batch(elem, batch_data, retry_state,
|
2111
|
+
GRPC_ERROR_REF(error), &closures);
|
2112
|
+
// If needed, add a callback to start any replay or pending send ops on
|
2113
|
+
// the subchannel call.
|
2114
|
+
if (!retry_state->completed_recv_trailing_metadata) {
|
2115
|
+
add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
|
2116
|
+
&closures);
|
2117
|
+
}
|
2118
|
+
}
|
2119
|
+
// Track number of pending subchannel send batches and determine if this
|
2120
|
+
// was the last one.
|
2121
|
+
--calld->num_pending_retriable_subchannel_send_batches;
|
2122
|
+
const bool last_send_batch_complete =
|
2123
|
+
calld->num_pending_retriable_subchannel_send_batches == 0;
|
2039
2124
|
// Don't need batch_data anymore.
|
2040
2125
|
batch_data_unref(batch_data);
|
2041
2126
|
// Schedule all of the closures identified above.
|
2042
|
-
// Note
|
2043
|
-
|
2044
|
-
// the
|
2045
|
-
|
2046
|
-
|
2047
|
-
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
|
2048
|
-
for (size_t i = 1; i < num_closures; ++i) {
|
2049
|
-
GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
|
2050
|
-
closures[i].error, closures[i].reason);
|
2051
|
-
}
|
2052
|
-
} else {
|
2053
|
-
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2054
|
-
"no closures to run for on_complete");
|
2127
|
+
// Note: This yeilds the call combiner.
|
2128
|
+
closures.RunClosures(calld->call_combiner);
|
2129
|
+
// If this was the last subchannel send batch, unref the call stack.
|
2130
|
+
if (last_send_batch_complete) {
|
2131
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "subchannel_send_batches");
|
2055
2132
|
}
|
2056
2133
|
}
|
2057
2134
|
|
@@ -2069,6 +2146,26 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
|
|
2069
2146
|
grpc_subchannel_call_process_op(subchannel_call, batch);
|
2070
2147
|
}
|
2071
2148
|
|
2149
|
+
// Adds a closure to closures that will execute batch in the call combiner.
|
2150
|
+
static void add_closure_for_subchannel_batch(
|
2151
|
+
grpc_call_element* elem, grpc_transport_stream_op_batch* batch,
|
2152
|
+
grpc_core::CallCombinerClosureList* closures) {
|
2153
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2154
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2155
|
+
batch->handler_private.extra_arg = calld->subchannel_call;
|
2156
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
2157
|
+
start_batch_in_call_combiner, batch,
|
2158
|
+
grpc_schedule_on_exec_ctx);
|
2159
|
+
if (grpc_client_channel_trace.enabled()) {
|
2160
|
+
char* batch_str = grpc_transport_stream_op_batch_string(batch);
|
2161
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: starting subchannel batch: %s", chand,
|
2162
|
+
calld, batch_str);
|
2163
|
+
gpr_free(batch_str);
|
2164
|
+
}
|
2165
|
+
closures->Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
|
2166
|
+
"start_subchannel_batch");
|
2167
|
+
}
|
2168
|
+
|
2072
2169
|
// Adds retriable send_initial_metadata op to batch_data.
|
2073
2170
|
static void add_retriable_send_initial_metadata_op(
|
2074
2171
|
call_data* calld, subchannel_call_retry_state* retry_state,
|
@@ -2082,30 +2179,30 @@ static void add_retriable_send_initial_metadata_op(
|
|
2082
2179
|
//
|
2083
2180
|
// If we've already completed one or more attempts, add the
|
2084
2181
|
// grpc-retry-attempts header.
|
2085
|
-
|
2182
|
+
retry_state->send_initial_metadata_storage =
|
2086
2183
|
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2087
2184
|
calld->arena, sizeof(grpc_linked_mdelem) *
|
2088
2185
|
(calld->send_initial_metadata.list.count +
|
2089
2186
|
(calld->num_attempts_completed > 0))));
|
2090
2187
|
grpc_metadata_batch_copy(&calld->send_initial_metadata,
|
2091
|
-
&
|
2092
|
-
|
2093
|
-
if (
|
2094
|
-
|
2095
|
-
grpc_metadata_batch_remove(
|
2096
|
-
|
2097
|
-
|
2098
|
-
}
|
2099
|
-
if (calld->num_attempts_completed > 0) {
|
2188
|
+
&retry_state->send_initial_metadata,
|
2189
|
+
retry_state->send_initial_metadata_storage);
|
2190
|
+
if (GPR_UNLIKELY(retry_state->send_initial_metadata.idx.named
|
2191
|
+
.grpc_previous_rpc_attempts != nullptr)) {
|
2192
|
+
grpc_metadata_batch_remove(&retry_state->send_initial_metadata,
|
2193
|
+
retry_state->send_initial_metadata.idx.named
|
2194
|
+
.grpc_previous_rpc_attempts);
|
2195
|
+
}
|
2196
|
+
if (GPR_UNLIKELY(calld->num_attempts_completed > 0)) {
|
2100
2197
|
grpc_mdelem retry_md = grpc_mdelem_from_slices(
|
2101
2198
|
GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
|
2102
2199
|
*retry_count_strings[calld->num_attempts_completed - 1]);
|
2103
2200
|
grpc_error* error = grpc_metadata_batch_add_tail(
|
2104
|
-
&
|
2105
|
-
&
|
2106
|
-
|
2201
|
+
&retry_state->send_initial_metadata,
|
2202
|
+
&retry_state->send_initial_metadata_storage[calld->send_initial_metadata
|
2203
|
+
.list.count],
|
2107
2204
|
retry_md);
|
2108
|
-
if (error != GRPC_ERROR_NONE) {
|
2205
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
2109
2206
|
gpr_log(GPR_ERROR, "error adding retry metadata: %s",
|
2110
2207
|
grpc_error_string(error));
|
2111
2208
|
GPR_ASSERT(false);
|
@@ -2114,7 +2211,7 @@ static void add_retriable_send_initial_metadata_op(
|
|
2114
2211
|
retry_state->started_send_initial_metadata = true;
|
2115
2212
|
batch_data->batch.send_initial_metadata = true;
|
2116
2213
|
batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
|
2117
|
-
&
|
2214
|
+
&retry_state->send_initial_metadata;
|
2118
2215
|
batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
|
2119
2216
|
calld->send_initial_metadata_flags;
|
2120
2217
|
batch_data->batch.payload->send_initial_metadata.peer_string =
|
@@ -2128,17 +2225,17 @@ static void add_retriable_send_message_op(
|
|
2128
2225
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2129
2226
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2130
2227
|
if (grpc_client_channel_trace.enabled()) {
|
2131
|
-
gpr_log(
|
2228
|
+
gpr_log(GPR_INFO,
|
2132
2229
|
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
|
2133
2230
|
chand, calld, retry_state->started_send_message_count);
|
2134
2231
|
}
|
2135
2232
|
grpc_core::ByteStreamCache* cache =
|
2136
|
-
calld->send_messages[retry_state->started_send_message_count];
|
2233
|
+
(*calld->send_messages)[retry_state->started_send_message_count];
|
2137
2234
|
++retry_state->started_send_message_count;
|
2138
|
-
|
2235
|
+
retry_state->send_message.Init(cache);
|
2139
2236
|
batch_data->batch.send_message = true;
|
2140
2237
|
batch_data->batch.payload->send_message.send_message.reset(
|
2141
|
-
|
2238
|
+
retry_state->send_message.get());
|
2142
2239
|
}
|
2143
2240
|
|
2144
2241
|
// Adds retriable send_trailing_metadata op to batch_data.
|
@@ -2148,17 +2245,17 @@ static void add_retriable_send_trailing_metadata_op(
|
|
2148
2245
|
// We need to make a copy of the metadata batch for each attempt, since
|
2149
2246
|
// the filters in the subchannel stack may modify this batch, and we don't
|
2150
2247
|
// want those modifications to be passed forward to subsequent attempts.
|
2151
|
-
|
2248
|
+
retry_state->send_trailing_metadata_storage =
|
2152
2249
|
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2153
2250
|
calld->arena, sizeof(grpc_linked_mdelem) *
|
2154
2251
|
calld->send_trailing_metadata.list.count));
|
2155
2252
|
grpc_metadata_batch_copy(&calld->send_trailing_metadata,
|
2156
|
-
&
|
2157
|
-
|
2253
|
+
&retry_state->send_trailing_metadata,
|
2254
|
+
retry_state->send_trailing_metadata_storage);
|
2158
2255
|
retry_state->started_send_trailing_metadata = true;
|
2159
2256
|
batch_data->batch.send_trailing_metadata = true;
|
2160
2257
|
batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
|
2161
|
-
&
|
2258
|
+
&retry_state->send_trailing_metadata;
|
2162
2259
|
}
|
2163
2260
|
|
2164
2261
|
// Adds retriable recv_initial_metadata op to batch_data.
|
@@ -2167,16 +2264,16 @@ static void add_retriable_recv_initial_metadata_op(
|
|
2167
2264
|
subchannel_batch_data* batch_data) {
|
2168
2265
|
retry_state->started_recv_initial_metadata = true;
|
2169
2266
|
batch_data->batch.recv_initial_metadata = true;
|
2170
|
-
grpc_metadata_batch_init(&
|
2267
|
+
grpc_metadata_batch_init(&retry_state->recv_initial_metadata);
|
2171
2268
|
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
|
2172
|
-
&
|
2269
|
+
&retry_state->recv_initial_metadata;
|
2173
2270
|
batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
|
2174
|
-
&
|
2175
|
-
GRPC_CLOSURE_INIT(&
|
2271
|
+
&retry_state->trailing_metadata_available;
|
2272
|
+
GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
|
2176
2273
|
recv_initial_metadata_ready, batch_data,
|
2177
2274
|
grpc_schedule_on_exec_ctx);
|
2178
2275
|
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
|
2179
|
-
&
|
2276
|
+
&retry_state->recv_initial_metadata_ready;
|
2180
2277
|
}
|
2181
2278
|
|
2182
2279
|
// Adds retriable recv_message op to batch_data.
|
@@ -2186,11 +2283,11 @@ static void add_retriable_recv_message_op(
|
|
2186
2283
|
++retry_state->started_recv_message_count;
|
2187
2284
|
batch_data->batch.recv_message = true;
|
2188
2285
|
batch_data->batch.payload->recv_message.recv_message =
|
2189
|
-
&
|
2190
|
-
GRPC_CLOSURE_INIT(&
|
2286
|
+
&retry_state->recv_message;
|
2287
|
+
GRPC_CLOSURE_INIT(&retry_state->recv_message_ready, recv_message_ready,
|
2191
2288
|
batch_data, grpc_schedule_on_exec_ctx);
|
2192
2289
|
batch_data->batch.payload->recv_message.recv_message_ready =
|
2193
|
-
&
|
2290
|
+
&retry_state->recv_message_ready;
|
2194
2291
|
}
|
2195
2292
|
|
2196
2293
|
// Adds retriable recv_trailing_metadata op to batch_data.
|
@@ -2199,12 +2296,17 @@ static void add_retriable_recv_trailing_metadata_op(
|
|
2199
2296
|
subchannel_batch_data* batch_data) {
|
2200
2297
|
retry_state->started_recv_trailing_metadata = true;
|
2201
2298
|
batch_data->batch.recv_trailing_metadata = true;
|
2202
|
-
grpc_metadata_batch_init(&
|
2299
|
+
grpc_metadata_batch_init(&retry_state->recv_trailing_metadata);
|
2203
2300
|
batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
|
2204
|
-
&
|
2205
|
-
batch_data->batch.collect_stats =
|
2206
|
-
|
2207
|
-
|
2301
|
+
&retry_state->recv_trailing_metadata;
|
2302
|
+
batch_data->batch.payload->recv_trailing_metadata.collect_stats =
|
2303
|
+
&retry_state->collect_stats;
|
2304
|
+
GRPC_CLOSURE_INIT(&retry_state->recv_trailing_metadata_ready,
|
2305
|
+
recv_trailing_metadata_ready, batch_data,
|
2306
|
+
grpc_schedule_on_exec_ctx);
|
2307
|
+
batch_data->batch.payload->recv_trailing_metadata
|
2308
|
+
.recv_trailing_metadata_ready =
|
2309
|
+
&retry_state->recv_trailing_metadata_ready;
|
2208
2310
|
}
|
2209
2311
|
|
2210
2312
|
// Helper function used to start a recv_trailing_metadata batch. This
|
@@ -2215,7 +2317,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
|
|
2215
2317
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2216
2318
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2217
2319
|
if (grpc_client_channel_trace.enabled()) {
|
2218
|
-
gpr_log(
|
2320
|
+
gpr_log(GPR_INFO,
|
2219
2321
|
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
|
2220
2322
|
"started; starting it internally",
|
2221
2323
|
chand, calld);
|
@@ -2224,8 +2326,14 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
|
|
2224
2326
|
static_cast<subchannel_call_retry_state*>(
|
2225
2327
|
grpc_connected_subchannel_call_get_parent_data(
|
2226
2328
|
calld->subchannel_call));
|
2227
|
-
|
2329
|
+
// Create batch_data with 2 refs, since this batch will be unreffed twice:
|
2330
|
+
// once for the recv_trailing_metadata_ready callback when the subchannel
|
2331
|
+
// batch returns, and again when we actually get a recv_trailing_metadata
|
2332
|
+
// op from the surface.
|
2333
|
+
subchannel_batch_data* batch_data =
|
2334
|
+
batch_data_create(elem, 2, false /* set_on_complete */);
|
2228
2335
|
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2336
|
+
retry_state->recv_trailing_metadata_internal_batch = batch_data;
|
2229
2337
|
// Note: This will release the call combiner.
|
2230
2338
|
grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
|
2231
2339
|
}
|
@@ -2243,29 +2351,30 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
|
|
2243
2351
|
!retry_state->started_send_initial_metadata &&
|
2244
2352
|
!calld->pending_send_initial_metadata) {
|
2245
2353
|
if (grpc_client_channel_trace.enabled()) {
|
2246
|
-
gpr_log(
|
2354
|
+
gpr_log(GPR_INFO,
|
2247
2355
|
"chand=%p calld=%p: replaying previously completed "
|
2248
2356
|
"send_initial_metadata op",
|
2249
2357
|
chand, calld);
|
2250
2358
|
}
|
2251
|
-
replay_batch_data = batch_data_create(elem, 1);
|
2359
|
+
replay_batch_data = batch_data_create(elem, 1, true /* set_on_complete */);
|
2252
2360
|
add_retriable_send_initial_metadata_op(calld, retry_state,
|
2253
2361
|
replay_batch_data);
|
2254
2362
|
}
|
2255
2363
|
// send_message.
|
2256
2364
|
// Note that we can only have one send_message op in flight at a time.
|
2257
|
-
if (retry_state->started_send_message_count < calld->send_messages
|
2365
|
+
if (retry_state->started_send_message_count < calld->send_messages->size() &&
|
2258
2366
|
retry_state->started_send_message_count ==
|
2259
2367
|
retry_state->completed_send_message_count &&
|
2260
2368
|
!calld->pending_send_message) {
|
2261
2369
|
if (grpc_client_channel_trace.enabled()) {
|
2262
|
-
gpr_log(
|
2370
|
+
gpr_log(GPR_INFO,
|
2263
2371
|
"chand=%p calld=%p: replaying previously completed "
|
2264
2372
|
"send_message op",
|
2265
2373
|
chand, calld);
|
2266
2374
|
}
|
2267
2375
|
if (replay_batch_data == nullptr) {
|
2268
|
-
replay_batch_data =
|
2376
|
+
replay_batch_data =
|
2377
|
+
batch_data_create(elem, 1, true /* set_on_complete */);
|
2269
2378
|
}
|
2270
2379
|
add_retriable_send_message_op(elem, retry_state, replay_batch_data);
|
2271
2380
|
}
|
@@ -2274,17 +2383,18 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
|
|
2274
2383
|
// to start, since we can't send down any more send_message ops after
|
2275
2384
|
// send_trailing_metadata.
|
2276
2385
|
if (calld->seen_send_trailing_metadata &&
|
2277
|
-
retry_state->started_send_message_count == calld->send_messages
|
2386
|
+
retry_state->started_send_message_count == calld->send_messages->size() &&
|
2278
2387
|
!retry_state->started_send_trailing_metadata &&
|
2279
2388
|
!calld->pending_send_trailing_metadata) {
|
2280
2389
|
if (grpc_client_channel_trace.enabled()) {
|
2281
|
-
gpr_log(
|
2390
|
+
gpr_log(GPR_INFO,
|
2282
2391
|
"chand=%p calld=%p: replaying previously completed "
|
2283
2392
|
"send_trailing_metadata op",
|
2284
2393
|
chand, calld);
|
2285
2394
|
}
|
2286
2395
|
if (replay_batch_data == nullptr) {
|
2287
|
-
replay_batch_data =
|
2396
|
+
replay_batch_data =
|
2397
|
+
batch_data_create(elem, 1, true /* set_on_complete */);
|
2288
2398
|
}
|
2289
2399
|
add_retriable_send_trailing_metadata_op(calld, retry_state,
|
2290
2400
|
replay_batch_data);
|
@@ -2296,7 +2406,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
|
|
2296
2406
|
// *num_batches as needed.
|
2297
2407
|
static void add_subchannel_batches_for_pending_batches(
|
2298
2408
|
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
2299
|
-
|
2409
|
+
grpc_core::CallCombinerClosureList* closures) {
|
2300
2410
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2301
2411
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
2302
2412
|
pending_batch* pending = &calld->pending_batches[i];
|
@@ -2325,7 +2435,7 @@ static void add_subchannel_batches_for_pending_batches(
|
|
2325
2435
|
// send_message ops after send_trailing_metadata.
|
2326
2436
|
if (batch->send_trailing_metadata &&
|
2327
2437
|
(retry_state->started_send_message_count + batch->send_message <
|
2328
|
-
calld->send_messages
|
2438
|
+
calld->send_messages->size() ||
|
2329
2439
|
retry_state->started_send_trailing_metadata)) {
|
2330
2440
|
continue;
|
2331
2441
|
}
|
@@ -2339,20 +2449,46 @@ static void add_subchannel_batches_for_pending_batches(
|
|
2339
2449
|
}
|
2340
2450
|
if (batch->recv_trailing_metadata &&
|
2341
2451
|
retry_state->started_recv_trailing_metadata) {
|
2452
|
+
// If we previously completed a recv_trailing_metadata op
|
2453
|
+
// initiated by start_internal_recv_trailing_metadata(), use the
|
2454
|
+
// result of that instead of trying to re-start this op.
|
2455
|
+
if (GPR_UNLIKELY((retry_state->recv_trailing_metadata_internal_batch !=
|
2456
|
+
nullptr))) {
|
2457
|
+
// If the batch completed, then trigger the completion callback
|
2458
|
+
// directly, so that we return the previously returned results to
|
2459
|
+
// the application. Otherwise, just unref the internally
|
2460
|
+
// started subchannel batch, since we'll propagate the
|
2461
|
+
// completion when it completes.
|
2462
|
+
if (retry_state->completed_recv_trailing_metadata) {
|
2463
|
+
// Batches containing recv_trailing_metadata always succeed.
|
2464
|
+
closures->Add(
|
2465
|
+
&retry_state->recv_trailing_metadata_ready, GRPC_ERROR_NONE,
|
2466
|
+
"re-executing recv_trailing_metadata_ready to propagate "
|
2467
|
+
"internally triggered result");
|
2468
|
+
} else {
|
2469
|
+
batch_data_unref(retry_state->recv_trailing_metadata_internal_batch);
|
2470
|
+
}
|
2471
|
+
retry_state->recv_trailing_metadata_internal_batch = nullptr;
|
2472
|
+
}
|
2342
2473
|
continue;
|
2343
2474
|
}
|
2344
2475
|
// If we're not retrying, just send the batch as-is.
|
2345
2476
|
if (calld->method_params == nullptr ||
|
2346
2477
|
calld->method_params->retry_policy() == nullptr ||
|
2347
2478
|
calld->retry_committed) {
|
2348
|
-
|
2479
|
+
add_closure_for_subchannel_batch(elem, batch, closures);
|
2349
2480
|
pending_batch_clear(calld, pending);
|
2350
2481
|
continue;
|
2351
2482
|
}
|
2352
2483
|
// Create batch with the right number of callbacks.
|
2353
|
-
const
|
2354
|
-
|
2355
|
-
|
2484
|
+
const bool has_send_ops = batch->send_initial_metadata ||
|
2485
|
+
batch->send_message ||
|
2486
|
+
batch->send_trailing_metadata;
|
2487
|
+
const int num_callbacks = has_send_ops + batch->recv_initial_metadata +
|
2488
|
+
batch->recv_message +
|
2489
|
+
batch->recv_trailing_metadata;
|
2490
|
+
subchannel_batch_data* batch_data = batch_data_create(
|
2491
|
+
elem, num_callbacks, has_send_ops /* set_on_complete */);
|
2356
2492
|
// Cache send ops if needed.
|
2357
2493
|
maybe_cache_send_ops_for_batch(calld, pending);
|
2358
2494
|
// send_initial_metadata.
|
@@ -2379,10 +2515,18 @@ static void add_subchannel_batches_for_pending_batches(
|
|
2379
2515
|
}
|
2380
2516
|
// recv_trailing_metadata.
|
2381
2517
|
if (batch->recv_trailing_metadata) {
|
2382
|
-
GPR_ASSERT(batch->collect_stats);
|
2383
2518
|
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2384
2519
|
}
|
2385
|
-
|
2520
|
+
add_closure_for_subchannel_batch(elem, &batch_data->batch, closures);
|
2521
|
+
// Track number of pending subchannel send batches.
|
2522
|
+
// If this is the first one, take a ref to the call stack.
|
2523
|
+
if (batch->send_initial_metadata || batch->send_message ||
|
2524
|
+
batch->send_trailing_metadata) {
|
2525
|
+
if (calld->num_pending_retriable_subchannel_send_batches == 0) {
|
2526
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "subchannel_send_batches");
|
2527
|
+
}
|
2528
|
+
++calld->num_pending_retriable_subchannel_send_batches;
|
2529
|
+
}
|
2386
2530
|
}
|
2387
2531
|
}
|
2388
2532
|
|
@@ -2393,69 +2537,39 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
|
|
2393
2537
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2394
2538
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2395
2539
|
if (grpc_client_channel_trace.enabled()) {
|
2396
|
-
gpr_log(
|
2540
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
|
2397
2541
|
chand, calld);
|
2398
2542
|
}
|
2399
2543
|
subchannel_call_retry_state* retry_state =
|
2400
2544
|
static_cast<subchannel_call_retry_state*>(
|
2401
2545
|
grpc_connected_subchannel_call_get_parent_data(
|
2402
2546
|
calld->subchannel_call));
|
2403
|
-
//
|
2404
|
-
|
2405
|
-
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
2406
|
-
size_t num_batches = 0;
|
2547
|
+
// Construct list of closures to execute, one for each pending batch.
|
2548
|
+
grpc_core::CallCombinerClosureList closures;
|
2407
2549
|
// Replay previously-returned send_* ops if needed.
|
2408
2550
|
subchannel_batch_data* replay_batch_data =
|
2409
2551
|
maybe_create_subchannel_batch_for_replay(elem, retry_state);
|
2410
2552
|
if (replay_batch_data != nullptr) {
|
2411
|
-
|
2553
|
+
add_closure_for_subchannel_batch(elem, &replay_batch_data->batch,
|
2554
|
+
&closures);
|
2555
|
+
// Track number of pending subchannel send batches.
|
2556
|
+
// If this is the first one, take a ref to the call stack.
|
2557
|
+
if (calld->num_pending_retriable_subchannel_send_batches == 0) {
|
2558
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "subchannel_send_batches");
|
2559
|
+
}
|
2560
|
+
++calld->num_pending_retriable_subchannel_send_batches;
|
2412
2561
|
}
|
2413
2562
|
// Now add pending batches.
|
2414
|
-
add_subchannel_batches_for_pending_batches(elem, retry_state,
|
2415
|
-
&num_batches);
|
2563
|
+
add_subchannel_batches_for_pending_batches(elem, retry_state, &closures);
|
2416
2564
|
// Start batches on subchannel call.
|
2417
|
-
// Note that the call combiner will be yielded for each batch that we
|
2418
|
-
// send down. We're already running in the call combiner, so one of
|
2419
|
-
// the batches can be started directly, but the others will have to
|
2420
|
-
// re-enter the call combiner.
|
2421
2565
|
if (grpc_client_channel_trace.enabled()) {
|
2422
|
-
gpr_log(
|
2566
|
+
gpr_log(GPR_INFO,
|
2423
2567
|
"chand=%p calld=%p: starting %" PRIuPTR
|
2424
2568
|
" retriable batches on subchannel_call=%p",
|
2425
|
-
chand, calld,
|
2426
|
-
}
|
2427
|
-
if (num_batches == 0) {
|
2428
|
-
// This should be fairly rare, but it can happen when (e.g.) an
|
2429
|
-
// attempt completes before it has finished replaying all
|
2430
|
-
// previously sent messages.
|
2431
|
-
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2432
|
-
"no retriable subchannel batches to start");
|
2433
|
-
} else {
|
2434
|
-
for (size_t i = 1; i < num_batches; ++i) {
|
2435
|
-
if (grpc_client_channel_trace.enabled()) {
|
2436
|
-
char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
|
2437
|
-
gpr_log(GPR_DEBUG,
|
2438
|
-
"chand=%p calld=%p: starting batch in call combiner: %s", chand,
|
2439
|
-
calld, batch_str);
|
2440
|
-
gpr_free(batch_str);
|
2441
|
-
}
|
2442
|
-
batches[i]->handler_private.extra_arg = calld->subchannel_call;
|
2443
|
-
GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
|
2444
|
-
start_batch_in_call_combiner, batches[i],
|
2445
|
-
grpc_schedule_on_exec_ctx);
|
2446
|
-
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
2447
|
-
&batches[i]->handler_private.closure,
|
2448
|
-
GRPC_ERROR_NONE, "start_subchannel_batch");
|
2449
|
-
}
|
2450
|
-
if (grpc_client_channel_trace.enabled()) {
|
2451
|
-
char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
|
2452
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
|
2453
|
-
batch_str);
|
2454
|
-
gpr_free(batch_str);
|
2455
|
-
}
|
2456
|
-
// Note: This will release the call combiner.
|
2457
|
-
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
|
2569
|
+
chand, calld, closures.size(), calld->subchannel_call);
|
2458
2570
|
}
|
2571
|
+
// Note: This will yield the call combiner.
|
2572
|
+
closures.RunClosures(calld->call_combiner);
|
2459
2573
|
}
|
2460
2574
|
|
2461
2575
|
//
|
@@ -2480,10 +2594,10 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
|
|
2480
2594
|
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
|
2481
2595
|
call_args, &calld->subchannel_call);
|
2482
2596
|
if (grpc_client_channel_trace.enabled()) {
|
2483
|
-
gpr_log(
|
2597
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
2484
2598
|
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
2485
2599
|
}
|
2486
|
-
if (new_error != GRPC_ERROR_NONE) {
|
2600
|
+
if (GPR_UNLIKELY(new_error != GRPC_ERROR_NONE)) {
|
2487
2601
|
new_error = grpc_error_add_child(new_error, error);
|
2488
2602
|
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2489
2603
|
} else {
|
@@ -2504,7 +2618,7 @@ static void pick_done(void* arg, grpc_error* error) {
|
|
2504
2618
|
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2505
2619
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2506
2620
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2507
|
-
if (calld->pick.connected_subchannel == nullptr) {
|
2621
|
+
if (GPR_UNLIKELY(calld->pick.connected_subchannel == nullptr)) {
|
2508
2622
|
// Failed to create subchannel.
|
2509
2623
|
// If there was no error, this is an LB policy drop, in which case
|
2510
2624
|
// we return an error; otherwise, we may retry.
|
@@ -2521,7 +2635,7 @@ static void pick_done(void* arg, grpc_error* error) {
|
|
2521
2635
|
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2522
2636
|
"Failed to create subchannel", &error, 1);
|
2523
2637
|
if (grpc_client_channel_trace.enabled()) {
|
2524
|
-
gpr_log(
|
2638
|
+
gpr_log(GPR_INFO,
|
2525
2639
|
"chand=%p calld=%p: failed to create subchannel: error=%s",
|
2526
2640
|
chand, calld, grpc_error_string(new_error));
|
2527
2641
|
}
|
@@ -2533,59 +2647,134 @@ static void pick_done(void* arg, grpc_error* error) {
|
|
2533
2647
|
}
|
2534
2648
|
}
|
2535
2649
|
|
2650
|
+
static void maybe_add_call_to_channel_interested_parties_locked(
|
2651
|
+
grpc_call_element* elem) {
|
2652
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2653
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2654
|
+
if (!calld->pollent_added_to_interested_parties) {
|
2655
|
+
calld->pollent_added_to_interested_parties = true;
|
2656
|
+
grpc_polling_entity_add_to_pollset_set(calld->pollent,
|
2657
|
+
chand->interested_parties);
|
2658
|
+
}
|
2659
|
+
}
|
2660
|
+
|
2661
|
+
static void maybe_del_call_from_channel_interested_parties_locked(
|
2662
|
+
grpc_call_element* elem) {
|
2663
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2664
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2665
|
+
if (calld->pollent_added_to_interested_parties) {
|
2666
|
+
calld->pollent_added_to_interested_parties = false;
|
2667
|
+
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2668
|
+
chand->interested_parties);
|
2669
|
+
}
|
2670
|
+
}
|
2671
|
+
|
2536
2672
|
// Invoked when a pick is completed to leave the client_channel combiner
|
2537
2673
|
// and continue processing in the call combiner.
|
2674
|
+
// If needed, removes the call's polling entity from chand->interested_parties.
|
2538
2675
|
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2539
2676
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2677
|
+
maybe_del_call_from_channel_interested_parties_locked(elem);
|
2540
2678
|
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
|
2541
2679
|
grpc_schedule_on_exec_ctx);
|
2542
2680
|
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
|
2543
2681
|
}
|
2544
2682
|
|
2545
|
-
|
2546
|
-
// either (a) the pick was deferred pending a resolver result or (b) the
|
2547
|
-
// pick was done asynchronously. Removes the call's polling entity from
|
2548
|
-
// chand->interested_parties before invoking pick_done_locked().
|
2549
|
-
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2550
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2551
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2552
|
-
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2553
|
-
chand->interested_parties);
|
2554
|
-
pick_done_locked(elem, error);
|
2555
|
-
}
|
2683
|
+
namespace grpc_core {
|
2556
2684
|
|
2557
|
-
//
|
2558
|
-
|
2559
|
-
|
2560
|
-
|
2561
|
-
|
2562
|
-
|
2563
|
-
|
2564
|
-
// in which case we will be cancelling the pick on a policy other than
|
2565
|
-
// the one we started it on. However, this will just be a no-op.
|
2566
|
-
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
|
2685
|
+
// Performs subchannel pick via LB policy.
|
2686
|
+
class LbPicker {
|
2687
|
+
public:
|
2688
|
+
// Starts a pick on chand->lb_policy.
|
2689
|
+
static void StartLocked(grpc_call_element* elem) {
|
2690
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2691
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2567
2692
|
if (grpc_client_channel_trace.enabled()) {
|
2568
|
-
gpr_log(
|
2693
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
2569
2694
|
chand, calld, chand->lb_policy.get());
|
2570
2695
|
}
|
2571
|
-
|
2696
|
+
// If this is a retry, use the send_initial_metadata payload that
|
2697
|
+
// we've cached; otherwise, use the pending batch. The
|
2698
|
+
// send_initial_metadata batch will be the first pending batch in the
|
2699
|
+
// list, as set by get_batch_index() above.
|
2700
|
+
calld->pick.initial_metadata =
|
2701
|
+
calld->seen_send_initial_metadata
|
2702
|
+
? &calld->send_initial_metadata
|
2703
|
+
: calld->pending_batches[0]
|
2704
|
+
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2705
|
+
calld->pick.initial_metadata_flags =
|
2706
|
+
calld->seen_send_initial_metadata
|
2707
|
+
? calld->send_initial_metadata_flags
|
2708
|
+
: calld->pending_batches[0]
|
2709
|
+
.batch->payload->send_initial_metadata
|
2710
|
+
.send_initial_metadata_flags;
|
2711
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, &LbPicker::DoneLocked, elem,
|
2712
|
+
grpc_combiner_scheduler(chand->combiner));
|
2713
|
+
calld->pick.on_complete = &calld->pick_closure;
|
2714
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
2715
|
+
grpc_error* error = GRPC_ERROR_NONE;
|
2716
|
+
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick, &error);
|
2717
|
+
if (GPR_LIKELY(pick_done)) {
|
2718
|
+
// Pick completed synchronously.
|
2719
|
+
if (grpc_client_channel_trace.enabled()) {
|
2720
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
|
2721
|
+
chand, calld);
|
2722
|
+
}
|
2723
|
+
pick_done_locked(elem, error);
|
2724
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2725
|
+
} else {
|
2726
|
+
// Pick will be returned asynchronously.
|
2727
|
+
// Add the polling entity from call_data to the channel_data's
|
2728
|
+
// interested_parties, so that the I/O of the LB policy can be done
|
2729
|
+
// under it. It will be removed in pick_done_locked().
|
2730
|
+
maybe_add_call_to_channel_interested_parties_locked(elem);
|
2731
|
+
// Request notification on call cancellation.
|
2732
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
2733
|
+
grpc_call_combiner_set_notify_on_cancel(
|
2734
|
+
calld->call_combiner,
|
2735
|
+
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
2736
|
+
&LbPicker::CancelLocked, elem,
|
2737
|
+
grpc_combiner_scheduler(chand->combiner)));
|
2738
|
+
}
|
2572
2739
|
}
|
2573
|
-
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2574
|
-
}
|
2575
2740
|
|
2576
|
-
|
2577
|
-
//
|
2578
|
-
|
2579
|
-
|
2580
|
-
|
2581
|
-
|
2582
|
-
|
2583
|
-
|
2584
|
-
|
2741
|
+
private:
|
2742
|
+
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
2743
|
+
// Unrefs the LB policy and invokes pick_done_locked().
|
2744
|
+
static void DoneLocked(void* arg, grpc_error* error) {
|
2745
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2746
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2747
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2748
|
+
if (grpc_client_channel_trace.enabled()) {
|
2749
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously",
|
2750
|
+
chand, calld);
|
2751
|
+
}
|
2752
|
+
pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2753
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2585
2754
|
}
|
2586
|
-
|
2587
|
-
|
2588
|
-
|
2755
|
+
|
2756
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2757
|
+
// holding the call combiner.
|
2758
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
2759
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2760
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2761
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2762
|
+
// Note: chand->lb_policy may have changed since we started our pick,
|
2763
|
+
// in which case we will be cancelling the pick on a policy other than
|
2764
|
+
// the one we started it on. However, this will just be a no-op.
|
2765
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE && chand->lb_policy != nullptr)) {
|
2766
|
+
if (grpc_client_channel_trace.enabled()) {
|
2767
|
+
gpr_log(GPR_INFO,
|
2768
|
+
"chand=%p calld=%p: cancelling pick from LB policy %p", chand,
|
2769
|
+
calld, chand->lb_policy.get());
|
2770
|
+
}
|
2771
|
+
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
|
2772
|
+
}
|
2773
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2774
|
+
}
|
2775
|
+
};
|
2776
|
+
|
2777
|
+
} // namespace grpc_core
|
2589
2778
|
|
2590
2779
|
// Applies service config to the call. Must be invoked once we know
|
2591
2780
|
// that the resolver has returned results to the channel.
|
@@ -2593,7 +2782,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
|
2593
2782
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2594
2783
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2595
2784
|
if (grpc_client_channel_trace.enabled()) {
|
2596
|
-
gpr_log(
|
2785
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
|
2597
2786
|
chand, calld);
|
2598
2787
|
}
|
2599
2788
|
if (chand->retry_throttle_data != nullptr) {
|
@@ -2615,6 +2804,24 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
|
2615
2804
|
grpc_deadline_state_reset(elem, calld->deadline);
|
2616
2805
|
}
|
2617
2806
|
}
|
2807
|
+
// If the service config set wait_for_ready and the application
|
2808
|
+
// did not explicitly set it, use the value from the service config.
|
2809
|
+
uint32_t* send_initial_metadata_flags =
|
2810
|
+
&calld->pending_batches[0]
|
2811
|
+
.batch->payload->send_initial_metadata
|
2812
|
+
.send_initial_metadata_flags;
|
2813
|
+
if (GPR_UNLIKELY(
|
2814
|
+
calld->method_params->wait_for_ready() !=
|
2815
|
+
ClientChannelMethodParams::WAIT_FOR_READY_UNSET &&
|
2816
|
+
!(*send_initial_metadata_flags &
|
2817
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET))) {
|
2818
|
+
if (calld->method_params->wait_for_ready() ==
|
2819
|
+
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2820
|
+
*send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2821
|
+
} else {
|
2822
|
+
*send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2823
|
+
}
|
2824
|
+
}
|
2618
2825
|
}
|
2619
2826
|
}
|
2620
2827
|
// If no retry policy, disable retries.
|
@@ -2625,214 +2832,164 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
|
2625
2832
|
}
|
2626
2833
|
}
|
2627
2834
|
|
2628
|
-
//
|
2629
|
-
|
2630
|
-
|
2631
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2835
|
+
// Invoked once resolver results are available.
|
2836
|
+
static void process_service_config_and_start_lb_pick_locked(
|
2837
|
+
grpc_call_element* elem) {
|
2632
2838
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2633
|
-
if (grpc_client_channel_trace.enabled()) {
|
2634
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
2635
|
-
chand, calld, chand->lb_policy.get());
|
2636
|
-
}
|
2637
2839
|
// Only get service config data on the first attempt.
|
2638
|
-
if (calld->num_attempts_completed == 0) {
|
2840
|
+
if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
|
2639
2841
|
apply_service_config_to_call_locked(elem);
|
2640
2842
|
}
|
2641
|
-
//
|
2642
|
-
|
2643
|
-
// method, use that.
|
2644
|
-
//
|
2645
|
-
// The send_initial_metadata batch will be the first one in the list,
|
2646
|
-
// as set by get_batch_index() above.
|
2647
|
-
calld->pick.initial_metadata =
|
2648
|
-
calld->seen_send_initial_metadata
|
2649
|
-
? &calld->send_initial_metadata
|
2650
|
-
: calld->pending_batches[0]
|
2651
|
-
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2652
|
-
uint32_t send_initial_metadata_flags =
|
2653
|
-
calld->seen_send_initial_metadata
|
2654
|
-
? calld->send_initial_metadata_flags
|
2655
|
-
: calld->pending_batches[0]
|
2656
|
-
.batch->payload->send_initial_metadata
|
2657
|
-
.send_initial_metadata_flags;
|
2658
|
-
const bool wait_for_ready_set_from_api =
|
2659
|
-
send_initial_metadata_flags &
|
2660
|
-
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
2661
|
-
const bool wait_for_ready_set_from_service_config =
|
2662
|
-
calld->method_params != nullptr &&
|
2663
|
-
calld->method_params->wait_for_ready() !=
|
2664
|
-
ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
|
2665
|
-
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
|
2666
|
-
if (calld->method_params->wait_for_ready() ==
|
2667
|
-
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2668
|
-
send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2669
|
-
} else {
|
2670
|
-
send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2671
|
-
}
|
2672
|
-
}
|
2673
|
-
calld->pick.initial_metadata_flags = send_initial_metadata_flags;
|
2674
|
-
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
|
2675
|
-
grpc_combiner_scheduler(chand->combiner));
|
2676
|
-
calld->pick.on_complete = &calld->pick_closure;
|
2677
|
-
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
2678
|
-
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
|
2679
|
-
if (pick_done) {
|
2680
|
-
// Pick completed synchronously.
|
2681
|
-
if (grpc_client_channel_trace.enabled()) {
|
2682
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
|
2683
|
-
chand, calld);
|
2684
|
-
}
|
2685
|
-
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2686
|
-
} else {
|
2687
|
-
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
2688
|
-
grpc_call_combiner_set_notify_on_cancel(
|
2689
|
-
calld->call_combiner,
|
2690
|
-
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
2691
|
-
pick_callback_cancel_locked, elem,
|
2692
|
-
grpc_combiner_scheduler(chand->combiner)));
|
2693
|
-
}
|
2694
|
-
return pick_done;
|
2843
|
+
// Start LB pick.
|
2844
|
+
grpc_core::LbPicker::StartLocked(elem);
|
2695
2845
|
}
|
2696
2846
|
|
2697
|
-
|
2698
|
-
|
2699
|
-
|
2700
|
-
|
2701
|
-
|
2702
|
-
|
2703
|
-
|
2704
|
-
|
2705
|
-
|
2706
|
-
static void pick_after_resolver_result_cancel_locked(void* arg,
|
2707
|
-
grpc_error* error) {
|
2708
|
-
pick_after_resolver_result_args* args =
|
2709
|
-
static_cast<pick_after_resolver_result_args*>(arg);
|
2710
|
-
if (args->finished) {
|
2711
|
-
gpr_free(args);
|
2712
|
-
return;
|
2713
|
-
}
|
2714
|
-
// If we don't yet have a resolver result, then a closure for
|
2715
|
-
// pick_after_resolver_result_done_locked() will have been added to
|
2716
|
-
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
2717
|
-
// until after this call has been destroyed. We mark the operation as
|
2718
|
-
// finished, so that when pick_after_resolver_result_done_locked()
|
2719
|
-
// is called, it will be a no-op. We also immediately invoke
|
2720
|
-
// async_pick_done_locked() to propagate the error back to the caller.
|
2721
|
-
args->finished = true;
|
2722
|
-
grpc_call_element* elem = args->elem;
|
2723
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2724
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2725
|
-
if (grpc_client_channel_trace.enabled()) {
|
2726
|
-
gpr_log(GPR_DEBUG,
|
2727
|
-
"chand=%p calld=%p: cancelling pick waiting for resolver result",
|
2728
|
-
chand, calld);
|
2729
|
-
}
|
2730
|
-
// Note: Although we are not in the call combiner here, we are
|
2731
|
-
// basically stealing the call combiner from the pending pick, so
|
2732
|
-
// it's safe to call async_pick_done_locked() here -- we are
|
2733
|
-
// essentially calling it here instead of calling it in
|
2734
|
-
// pick_after_resolver_result_done_locked().
|
2735
|
-
async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2736
|
-
"Pick cancelled", &error, 1));
|
2737
|
-
}
|
2738
|
-
|
2739
|
-
static void pick_after_resolver_result_done_locked(void* arg,
|
2740
|
-
grpc_error* error) {
|
2741
|
-
pick_after_resolver_result_args* args =
|
2742
|
-
static_cast<pick_after_resolver_result_args*>(arg);
|
2743
|
-
if (args->finished) {
|
2744
|
-
/* cancelled, do nothing */
|
2745
|
-
if (grpc_client_channel_trace.enabled()) {
|
2746
|
-
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
|
2747
|
-
}
|
2748
|
-
gpr_free(args);
|
2749
|
-
return;
|
2750
|
-
}
|
2751
|
-
args->finished = true;
|
2752
|
-
grpc_call_element* elem = args->elem;
|
2753
|
-
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2754
|
-
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2755
|
-
if (error != GRPC_ERROR_NONE) {
|
2847
|
+
namespace grpc_core {
|
2848
|
+
|
2849
|
+
// Handles waiting for a resolver result.
|
2850
|
+
// Used only for the first call on an idle channel.
|
2851
|
+
class ResolverResultWaiter {
|
2852
|
+
public:
|
2853
|
+
explicit ResolverResultWaiter(grpc_call_element* elem) : elem_(elem) {
|
2854
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2855
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2756
2856
|
if (grpc_client_channel_trace.enabled()) {
|
2757
|
-
gpr_log(
|
2857
|
+
gpr_log(GPR_INFO,
|
2858
|
+
"chand=%p calld=%p: deferring pick pending resolver result",
|
2758
2859
|
chand, calld);
|
2759
2860
|
}
|
2760
|
-
|
2761
|
-
|
2762
|
-
|
2763
|
-
|
2764
|
-
|
2765
|
-
|
2861
|
+
// Add closure to be run when a resolver result is available.
|
2862
|
+
GRPC_CLOSURE_INIT(&done_closure_, &ResolverResultWaiter::DoneLocked, this,
|
2863
|
+
grpc_combiner_scheduler(chand->combiner));
|
2864
|
+
AddToWaitingList();
|
2865
|
+
// Set cancellation closure, so that we abort if the call is cancelled.
|
2866
|
+
GRPC_CLOSURE_INIT(&cancel_closure_, &ResolverResultWaiter::CancelLocked,
|
2867
|
+
this, grpc_combiner_scheduler(chand->combiner));
|
2868
|
+
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
|
2869
|
+
&cancel_closure_);
|
2870
|
+
}
|
2871
|
+
|
2872
|
+
private:
|
2873
|
+
// Adds closure_ to chand->waiting_for_resolver_result_closures.
|
2874
|
+
void AddToWaitingList() {
|
2875
|
+
channel_data* chand = static_cast<channel_data*>(elem_->channel_data);
|
2876
|
+
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
2877
|
+
&done_closure_, GRPC_ERROR_NONE);
|
2878
|
+
}
|
2879
|
+
|
2880
|
+
// Invoked when a resolver result is available.
|
2881
|
+
static void DoneLocked(void* arg, grpc_error* error) {
|
2882
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
2883
|
+
// If CancelLocked() has already run, delete ourselves without doing
|
2884
|
+
// anything. Note that the call stack may have already been destroyed,
|
2885
|
+
// so it's not safe to access anything in elem_.
|
2886
|
+
if (GPR_UNLIKELY(self->finished_)) {
|
2887
|
+
if (grpc_client_channel_trace.enabled()) {
|
2888
|
+
gpr_log(GPR_INFO, "call cancelled before resolver result");
|
2889
|
+
}
|
2890
|
+
Delete(self);
|
2891
|
+
return;
|
2766
2892
|
}
|
2767
|
-
|
2768
|
-
|
2769
|
-
|
2770
|
-
|
2771
|
-
|
2772
|
-
uint32_t send_initial_metadata_flags =
|
2773
|
-
calld->seen_send_initial_metadata
|
2774
|
-
? calld->send_initial_metadata_flags
|
2775
|
-
: calld->pending_batches[0]
|
2776
|
-
.batch->payload->send_initial_metadata
|
2777
|
-
.send_initial_metadata_flags;
|
2778
|
-
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2893
|
+
// Otherwise, process the resolver result.
|
2894
|
+
grpc_call_element* elem = self->elem_;
|
2895
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2896
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2897
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
2779
2898
|
if (grpc_client_channel_trace.enabled()) {
|
2780
|
-
gpr_log(
|
2781
|
-
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2782
|
-
"wait_for_ready=true; trying again",
|
2899
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
|
2783
2900
|
chand, calld);
|
2784
2901
|
}
|
2785
|
-
|
2902
|
+
pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2903
|
+
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
|
2904
|
+
// Shutting down.
|
2905
|
+
if (grpc_client_channel_trace.enabled()) {
|
2906
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
|
2907
|
+
calld);
|
2908
|
+
}
|
2909
|
+
pick_done_locked(elem,
|
2910
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2911
|
+
} else if (GPR_UNLIKELY(chand->lb_policy == nullptr)) {
|
2912
|
+
// Transient resolver failure.
|
2913
|
+
// If call has wait_for_ready=true, try again; otherwise, fail.
|
2914
|
+
uint32_t send_initial_metadata_flags =
|
2915
|
+
calld->seen_send_initial_metadata
|
2916
|
+
? calld->send_initial_metadata_flags
|
2917
|
+
: calld->pending_batches[0]
|
2918
|
+
.batch->payload->send_initial_metadata
|
2919
|
+
.send_initial_metadata_flags;
|
2920
|
+
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2921
|
+
if (grpc_client_channel_trace.enabled()) {
|
2922
|
+
gpr_log(GPR_INFO,
|
2923
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2924
|
+
"wait_for_ready=true; trying again",
|
2925
|
+
chand, calld);
|
2926
|
+
}
|
2927
|
+
// Re-add ourselves to the waiting list.
|
2928
|
+
self->AddToWaitingList();
|
2929
|
+
// Return early so that we don't set finished_ to true below.
|
2930
|
+
return;
|
2931
|
+
} else {
|
2932
|
+
if (grpc_client_channel_trace.enabled()) {
|
2933
|
+
gpr_log(GPR_INFO,
|
2934
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2935
|
+
"wait_for_ready=false; failing",
|
2936
|
+
chand, calld);
|
2937
|
+
}
|
2938
|
+
pick_done_locked(
|
2939
|
+
elem,
|
2940
|
+
grpc_error_set_int(
|
2941
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2942
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2943
|
+
}
|
2786
2944
|
} else {
|
2787
2945
|
if (grpc_client_channel_trace.enabled()) {
|
2788
|
-
gpr_log(
|
2789
|
-
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2790
|
-
"wait_for_ready=false; failing",
|
2946
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing LB pick",
|
2791
2947
|
chand, calld);
|
2792
2948
|
}
|
2793
|
-
|
2794
|
-
elem,
|
2795
|
-
grpc_error_set_int(
|
2796
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2797
|
-
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2949
|
+
process_service_config_and_start_lb_pick_locked(elem);
|
2798
2950
|
}
|
2799
|
-
|
2800
|
-
|
2801
|
-
|
2802
|
-
|
2951
|
+
self->finished_ = true;
|
2952
|
+
}
|
2953
|
+
|
2954
|
+
// Invoked when the call is cancelled.
|
2955
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2956
|
+
// holding the call combiner.
|
2957
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
2958
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
2959
|
+
// If DoneLocked() has already run, delete ourselves without doing anything.
|
2960
|
+
if (GPR_LIKELY(self->finished_)) {
|
2961
|
+
Delete(self);
|
2962
|
+
return;
|
2803
2963
|
}
|
2804
|
-
|
2805
|
-
|
2806
|
-
|
2807
|
-
|
2808
|
-
|
2809
|
-
|
2810
|
-
|
2964
|
+
// If we are being cancelled, immediately invoke pick_done_locked()
|
2965
|
+
// to propagate the error back to the caller.
|
2966
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
2967
|
+
grpc_call_element* elem = self->elem_;
|
2968
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2969
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2970
|
+
if (grpc_client_channel_trace.enabled()) {
|
2971
|
+
gpr_log(GPR_INFO,
|
2972
|
+
"chand=%p calld=%p: cancelling call waiting for name "
|
2973
|
+
"resolution",
|
2974
|
+
chand, calld);
|
2975
|
+
}
|
2976
|
+
// Note: Although we are not in the call combiner here, we are
|
2977
|
+
// basically stealing the call combiner from the pending pick, so
|
2978
|
+
// it's safe to call pick_done_locked() here -- we are essentially
|
2979
|
+
// calling it here instead of calling it in DoneLocked().
|
2980
|
+
pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2981
|
+
"Pick cancelled", &error, 1));
|
2811
2982
|
}
|
2983
|
+
self->finished_ = true;
|
2812
2984
|
}
|
2813
|
-
}
|
2814
2985
|
|
2815
|
-
|
2816
|
-
|
2817
|
-
|
2818
|
-
|
2819
|
-
|
2820
|
-
|
2821
|
-
|
2822
|
-
}
|
2823
|
-
pick_after_resolver_result_args* args =
|
2824
|
-
static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
|
2825
|
-
args->elem = elem;
|
2826
|
-
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
|
2827
|
-
args, grpc_combiner_scheduler(chand->combiner));
|
2828
|
-
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
2829
|
-
&args->closure, GRPC_ERROR_NONE);
|
2830
|
-
grpc_call_combiner_set_notify_on_cancel(
|
2831
|
-
calld->call_combiner,
|
2832
|
-
GRPC_CLOSURE_INIT(&args->cancel_closure,
|
2833
|
-
pick_after_resolver_result_cancel_locked, args,
|
2834
|
-
grpc_combiner_scheduler(chand->combiner)));
|
2835
|
-
}
|
2986
|
+
grpc_call_element* elem_;
|
2987
|
+
grpc_closure done_closure_;
|
2988
|
+
grpc_closure cancel_closure_;
|
2989
|
+
bool finished_ = false;
|
2990
|
+
};
|
2991
|
+
|
2992
|
+
} // namespace grpc_core
|
2836
2993
|
|
2837
2994
|
static void start_pick_locked(void* arg, grpc_error* ignored) {
|
2838
2995
|
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
@@ -2840,32 +2997,25 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
|
|
2840
2997
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2841
2998
|
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
|
2842
2999
|
GPR_ASSERT(calld->subchannel_call == nullptr);
|
2843
|
-
if (chand->lb_policy != nullptr) {
|
2844
|
-
// We already have
|
2845
|
-
|
2846
|
-
|
2847
|
-
|
2848
|
-
|
2849
|
-
|
3000
|
+
if (GPR_LIKELY(chand->lb_policy != nullptr)) {
|
3001
|
+
// We already have resolver results, so process the service config
|
3002
|
+
// and start an LB pick.
|
3003
|
+
process_service_config_and_start_lb_pick_locked(elem);
|
3004
|
+
} else if (GPR_UNLIKELY(chand->resolver == nullptr)) {
|
3005
|
+
pick_done_locked(elem,
|
3006
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2850
3007
|
} else {
|
2851
3008
|
// We do not yet have an LB policy, so wait for a resolver result.
|
2852
|
-
if (chand->
|
2853
|
-
pick_done_locked(elem,
|
2854
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2855
|
-
return;
|
2856
|
-
}
|
2857
|
-
if (!chand->started_resolving) {
|
3009
|
+
if (GPR_UNLIKELY(!chand->started_resolving)) {
|
2858
3010
|
start_resolving_locked(chand);
|
2859
3011
|
}
|
2860
|
-
|
3012
|
+
// Create a new waiter, which will delete itself when done.
|
3013
|
+
grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
|
3014
|
+
// Add the polling entity from call_data to the channel_data's
|
3015
|
+
// interested_parties, so that the I/O of the resolver can be done
|
3016
|
+
// under it. It will be removed in pick_done_locked().
|
3017
|
+
maybe_add_call_to_channel_interested_parties_locked(elem);
|
2861
3018
|
}
|
2862
|
-
// We need to wait for either a resolver result or for an async result
|
2863
|
-
// from the LB policy. Add the polling entity from call_data to the
|
2864
|
-
// channel_data's interested_parties, so that the I/O of the LB policy
|
2865
|
-
// and resolver can be done under it. The polling entity will be
|
2866
|
-
// removed in async_pick_done_locked().
|
2867
|
-
grpc_polling_entity_add_to_pollset_set(calld->pollent,
|
2868
|
-
chand->interested_parties);
|
2869
3019
|
}
|
2870
3020
|
|
2871
3021
|
//
|
@@ -2877,13 +3027,13 @@ static void cc_start_transport_stream_op_batch(
|
|
2877
3027
|
GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
|
2878
3028
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2879
3029
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2880
|
-
if (chand->deadline_checking_enabled) {
|
3030
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
2881
3031
|
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
|
2882
3032
|
}
|
2883
3033
|
// If we've previously been cancelled, immediately fail any new batches.
|
2884
|
-
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
3034
|
+
if (GPR_UNLIKELY(calld->cancel_error != GRPC_ERROR_NONE)) {
|
2885
3035
|
if (grpc_client_channel_trace.enabled()) {
|
2886
|
-
gpr_log(
|
3036
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
|
2887
3037
|
chand, calld, grpc_error_string(calld->cancel_error));
|
2888
3038
|
}
|
2889
3039
|
// Note: This will release the call combiner.
|
@@ -2892,7 +3042,7 @@ static void cc_start_transport_stream_op_batch(
|
|
2892
3042
|
return;
|
2893
3043
|
}
|
2894
3044
|
// Handle cancellation.
|
2895
|
-
if (batch->cancel_stream) {
|
3045
|
+
if (GPR_UNLIKELY(batch->cancel_stream)) {
|
2896
3046
|
// Stash a copy of cancel_error in our call data, so that we can use
|
2897
3047
|
// it for subsequent operations. This ensures that if the call is
|
2898
3048
|
// cancelled before any batches are passed down (e.g., if the deadline
|
@@ -2902,7 +3052,7 @@ static void cc_start_transport_stream_op_batch(
|
|
2902
3052
|
calld->cancel_error =
|
2903
3053
|
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
|
2904
3054
|
if (grpc_client_channel_trace.enabled()) {
|
2905
|
-
gpr_log(
|
3055
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
|
2906
3056
|
calld, grpc_error_string(calld->cancel_error));
|
2907
3057
|
}
|
2908
3058
|
// If we do not have a subchannel call (i.e., a pick has not yet
|
@@ -2928,7 +3078,7 @@ static void cc_start_transport_stream_op_batch(
|
|
2928
3078
|
// streaming calls).
|
2929
3079
|
if (calld->subchannel_call != nullptr) {
|
2930
3080
|
if (grpc_client_channel_trace.enabled()) {
|
2931
|
-
gpr_log(
|
3081
|
+
gpr_log(GPR_INFO,
|
2932
3082
|
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
|
2933
3083
|
calld, calld->subchannel_call);
|
2934
3084
|
}
|
@@ -2938,9 +3088,9 @@ static void cc_start_transport_stream_op_batch(
|
|
2938
3088
|
// We do not yet have a subchannel call.
|
2939
3089
|
// For batches containing a send_initial_metadata op, enter the channel
|
2940
3090
|
// combiner to start a pick.
|
2941
|
-
if (batch->send_initial_metadata) {
|
3091
|
+
if (GPR_LIKELY(batch->send_initial_metadata)) {
|
2942
3092
|
if (grpc_client_channel_trace.enabled()) {
|
2943
|
-
gpr_log(
|
3093
|
+
gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
|
2944
3094
|
chand, calld);
|
2945
3095
|
}
|
2946
3096
|
GRPC_CLOSURE_SCHED(
|
@@ -2950,8 +3100,8 @@ static void cc_start_transport_stream_op_batch(
|
|
2950
3100
|
} else {
|
2951
3101
|
// For all other batches, release the call combiner.
|
2952
3102
|
if (grpc_client_channel_trace.enabled()) {
|
2953
|
-
gpr_log(
|
2954
|
-
"chand=%p calld=%p: saved batch,
|
3103
|
+
gpr_log(GPR_INFO,
|
3104
|
+
"chand=%p calld=%p: saved batch, yielding call combiner", chand,
|
2955
3105
|
calld);
|
2956
3106
|
}
|
2957
3107
|
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
@@ -2971,11 +3121,12 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
|
|
2971
3121
|
calld->arena = args->arena;
|
2972
3122
|
calld->owning_call = args->call_stack;
|
2973
3123
|
calld->call_combiner = args->call_combiner;
|
2974
|
-
if (chand->deadline_checking_enabled) {
|
3124
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
2975
3125
|
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
|
2976
3126
|
calld->deadline);
|
2977
3127
|
}
|
2978
3128
|
calld->enable_retries = chand->enable_retries;
|
3129
|
+
calld->send_messages.Init();
|
2979
3130
|
return GRPC_ERROR_NONE;
|
2980
3131
|
}
|
2981
3132
|
|
@@ -2985,14 +3136,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
2985
3136
|
grpc_closure* then_schedule_closure) {
|
2986
3137
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2987
3138
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2988
|
-
if (chand->deadline_checking_enabled) {
|
3139
|
+
if (GPR_LIKELY(chand->deadline_checking_enabled)) {
|
2989
3140
|
grpc_deadline_state_destroy(elem);
|
2990
3141
|
}
|
2991
3142
|
grpc_slice_unref_internal(calld->path);
|
2992
3143
|
calld->retry_throttle_data.reset();
|
2993
3144
|
calld->method_params.reset();
|
2994
3145
|
GRPC_ERROR_UNREF(calld->cancel_error);
|
2995
|
-
if (calld->subchannel_call != nullptr) {
|
3146
|
+
if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
|
2996
3147
|
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
|
2997
3148
|
then_schedule_closure);
|
2998
3149
|
then_schedule_closure = nullptr;
|
@@ -3002,7 +3153,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
3002
3153
|
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
3003
3154
|
GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
|
3004
3155
|
}
|
3005
|
-
if (calld->pick.connected_subchannel != nullptr) {
|
3156
|
+
if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
|
3006
3157
|
calld->pick.connected_subchannel.reset();
|
3007
3158
|
}
|
3008
3159
|
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
|
@@ -3011,6 +3162,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
3011
3162
|
calld->pick.subchannel_call_context[i].value);
|
3012
3163
|
}
|
3013
3164
|
}
|
3165
|
+
calld->send_messages.Destroy();
|
3014
3166
|
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
|
3015
3167
|
}
|
3016
3168
|
|
@@ -3051,6 +3203,16 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
|
|
3051
3203
|
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
|
3052
3204
|
}
|
3053
3205
|
|
3206
|
+
void grpc_client_channel_populate_child_refs(
|
3207
|
+
grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
|
3208
|
+
grpc_core::ChildRefsList* child_channels) {
|
3209
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3210
|
+
if (chand->lb_policy != nullptr) {
|
3211
|
+
chand->lb_policy->FillChildRefsForChannelz(child_subchannels,
|
3212
|
+
child_channels);
|
3213
|
+
}
|
3214
|
+
}
|
3215
|
+
|
3054
3216
|
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
|
3055
3217
|
grpc_channel_element* elem, int try_to_connect) {
|
3056
3218
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
@@ -3149,7 +3311,7 @@ static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
|
|
3149
3311
|
"external_connectivity_watcher");
|
3150
3312
|
external_connectivity_watcher_list_remove(w->chand, w);
|
3151
3313
|
gpr_free(w);
|
3152
|
-
|
3314
|
+
GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
|
3153
3315
|
}
|
3154
3316
|
|
3155
3317
|
static void watch_connectivity_state_locked(void* arg,
|
@@ -3159,6 +3321,8 @@ static void watch_connectivity_state_locked(void* arg,
|
|
3159
3321
|
external_connectivity_watcher* found = nullptr;
|
3160
3322
|
if (w->state != nullptr) {
|
3161
3323
|
external_connectivity_watcher_list_append(w->chand, w);
|
3324
|
+
// An assumption is being made that the closure is scheduled on the exec ctx
|
3325
|
+
// scheduler and that GRPC_CLOSURE_RUN would run the closure immediately.
|
3162
3326
|
GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
|
3163
3327
|
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
|
3164
3328
|
grpc_combiner_scheduler(w->chand->combiner));
|