grpc 1.10.0 → 1.11.0.pre2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +2098 -501
- data/include/grpc/byte_buffer.h +2 -0
- data/include/grpc/byte_buffer_reader.h +2 -0
- data/include/grpc/census.h +2 -0
- data/include/grpc/fork.h +2 -0
- data/include/grpc/grpc.h +10 -0
- data/include/grpc/grpc_cronet.h +2 -0
- data/include/grpc/grpc_posix.h +2 -1
- data/include/grpc/grpc_security.h +21 -0
- data/include/grpc/grpc_security_constants.h +1 -0
- data/include/grpc/impl/codegen/byte_buffer.h +2 -0
- data/include/grpc/impl/codegen/grpc_types.h +24 -0
- data/include/grpc/impl/codegen/slice.h +1 -1
- data/include/grpc/impl/codegen/sync.h +1 -0
- data/include/grpc/impl/codegen/sync_custom.h +2 -0
- data/include/grpc/impl/codegen/sync_generic.h +2 -0
- data/include/grpc/impl/codegen/sync_posix.h +2 -0
- data/include/grpc/impl/codegen/sync_windows.h +2 -0
- data/include/grpc/slice.h +2 -0
- data/include/grpc/slice_buffer.h +2 -0
- data/include/grpc/status.h +2 -0
- data/include/grpc/support/alloc.h +2 -2
- data/include/grpc/support/atm.h +2 -0
- data/include/grpc/support/atm_gcc_atomic.h +2 -0
- data/include/grpc/support/atm_gcc_sync.h +2 -0
- data/include/grpc/support/atm_windows.h +2 -0
- data/include/grpc/support/log.h +1 -1
- data/include/grpc/support/sync.h +2 -0
- data/include/grpc/support/sync_custom.h +2 -0
- data/include/grpc/support/sync_generic.h +2 -0
- data/include/grpc/support/sync_posix.h +2 -0
- data/include/grpc/support/sync_windows.h +2 -0
- data/include/grpc/support/time.h +2 -0
- data/src/boringssl/err_data.c +444 -438
- data/src/core/ext/census/grpc_context.cc +2 -0
- data/src/core/ext/filters/client_channel/backup_poller.cc +13 -8
- data/src/core/ext/filters/client_channel/backup_poller.h +3 -2
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +2 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +1988 -433
- data/src/core/ext/filters/client_channel/client_channel.h +2 -0
- data/src/core/ext/filters/client_channel/client_channel_factory.cc +2 -0
- data/src/core/ext/filters/client_channel/client_channel_factory.h +2 -0
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +2 -27
- data/src/core/ext/filters/client_channel/connector.cc +2 -0
- data/src/core/ext/filters/client_channel/connector.h +2 -0
- data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +2 -0
- data/src/core/ext/filters/client_channel/http_proxy.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy.h +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +96 -78
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +9 -17
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +70 -62
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +4 -2
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +4 -2
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +2 -0
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +3 -1
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +2 -1
- data/src/core/ext/filters/client_channel/lb_policy_registry.cc +2 -0
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +2 -1
- data/src/core/ext/filters/client_channel/method_params.cc +178 -0
- data/src/core/ext/filters/client_channel/method_params.h +74 -0
- data/src/core/ext/filters/client_channel/parse_address.cc +17 -13
- data/src/core/ext/filters/client_channel/parse_address.h +2 -0
- data/src/core/ext/filters/client_channel/proxy_mapper.cc +2 -0
- data/src/core/ext/filters/client_channel/proxy_mapper.h +2 -0
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +2 -0
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +2 -0
- data/src/core/ext/filters/client_channel/resolver.cc +2 -0
- data/src/core/ext/filters/client_channel/resolver.h +6 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +24 -5
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +2 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +1 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +55 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +8 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +1 -0
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +30 -3
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +7 -0
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +2 -1
- data/src/core/ext/filters/client_channel/resolver_factory.h +2 -0
- data/src/core/ext/filters/client_channel/resolver_registry.cc +2 -0
- data/src/core/ext/filters/client_channel/resolver_registry.h +2 -0
- data/src/core/ext/filters/client_channel/retry_throttle.cc +102 -120
- data/src/core/ext/filters/client_channel/retry_throttle.h +52 -25
- data/src/core/ext/filters/client_channel/subchannel.cc +14 -4
- data/src/core/ext/filters/client_channel/subchannel.h +10 -1
- data/src/core/ext/filters/client_channel/subchannel_index.cc +2 -0
- data/src/core/ext/filters/client_channel/subchannel_index.h +2 -0
- data/src/core/ext/filters/client_channel/uri_parser.cc +2 -1
- data/src/core/ext/filters/client_channel/uri_parser.h +2 -1
- data/src/core/ext/filters/deadline/deadline_filter.cc +2 -1
- data/src/core/ext/filters/deadline/deadline_filter.h +2 -0
- data/src/core/ext/filters/http/client/http_client_filter.cc +27 -25
- data/src/core/ext/filters/http/client/http_client_filter.h +2 -0
- data/src/core/ext/filters/http/client_authority_filter.cc +156 -0
- data/src/core/ext/filters/http/client_authority_filter.h +34 -0
- data/src/core/ext/filters/http/http_filters_plugin.cc +2 -0
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +21 -22
- data/src/core/ext/filters/http/message_compress/message_compress_filter.h +2 -0
- data/src/core/ext/filters/http/server/http_server_filter.cc +11 -8
- data/src/core/ext/filters/http/server/http_server_filter.h +2 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +2 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +2 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +2 -0
- data/src/core/ext/filters/max_age/max_age_filter.cc +2 -0
- data/src/core/ext/filters/max_age/max_age_filter.h +2 -0
- data/src/core/ext/filters/message_size/message_size_filter.cc +52 -49
- data/src/core/ext/filters/message_size/message_size_filter.h +2 -0
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +5 -1
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +2 -0
- data/src/core/ext/filters/workarounds/workaround_utils.cc +2 -0
- data/src/core/ext/filters/workarounds/workaround_utils.h +2 -0
- data/src/core/ext/transport/chttp2/alpn/alpn.cc +3 -1
- data/src/core/ext/transport/chttp2/alpn/alpn.h +2 -0
- data/src/core/ext/transport/chttp2/client/authority.cc +42 -0
- data/src/core/ext/transport/chttp2/client/authority.h +36 -0
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +2 -0
- data/src/core/ext/transport/chttp2/client/chttp2_connector.h +2 -0
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +10 -3
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +2 -2
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +37 -25
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +2 -0
- data/src/core/ext/transport/chttp2/server/chttp2_server.h +3 -1
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +2 -1
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +2 -1
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +3 -1
- data/src/core/ext/transport/chttp2/transport/bin_decoder.h +2 -0
- data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/bin_encoder.h +2 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +152 -182
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -0
- data/src/core/ext/transport/chttp2/transport/frame.h +2 -1
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +15 -19
- data/src/core/ext/transport/chttp2/transport/frame_data.h +7 -5
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/frame_goaway.h +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/frame_ping.h +2 -1
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +2 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/frame_window_update.h +2 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +2 -1
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +2 -1
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +2 -2
- data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/hpack_table.h +2 -1
- data/src/core/ext/transport/chttp2/transport/http2_settings.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +2 -0
- data/src/core/ext/transport/chttp2/transport/huffsyms.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +3 -2
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +2 -0
- data/src/core/ext/transport/chttp2/transport/internal.h +60 -24
- data/src/core/ext/transport/chttp2/transport/parsing.cc +2 -4
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/stream_map.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/varint.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +10 -6
- data/src/core/ext/transport/inproc/inproc_plugin.cc +2 -0
- data/src/core/ext/transport/inproc/inproc_transport.cc +20 -23
- data/src/core/ext/transport/inproc/inproc_transport.h +2 -0
- data/src/core/lib/avl/avl.cc +2 -0
- data/src/core/lib/avl/avl.h +2 -0
- data/src/core/lib/backoff/backoff.cc +2 -0
- data/src/core/lib/backoff/backoff.h +2 -0
- data/src/core/lib/channel/channel_args.h +2 -0
- data/src/core/lib/channel/channel_stack.cc +3 -1
- data/src/core/lib/channel/channel_stack.h +2 -0
- data/src/core/lib/channel/channel_stack_builder.cc +2 -0
- data/src/core/lib/channel/channel_stack_builder.h +2 -0
- data/src/core/lib/channel/channel_trace.cc +239 -0
- data/src/core/lib/channel/channel_trace.h +133 -0
- data/src/core/lib/channel/channel_trace_registry.cc +80 -0
- data/src/core/lib/channel/channel_trace_registry.h +43 -0
- data/src/core/lib/channel/connected_channel.cc +2 -0
- data/src/core/lib/channel/connected_channel.h +2 -0
- data/src/core/lib/channel/handshaker.cc +2 -0
- data/src/core/lib/channel/handshaker.h +2 -0
- data/src/core/lib/channel/handshaker_factory.cc +2 -0
- data/src/core/lib/channel/handshaker_factory.h +2 -1
- data/src/core/lib/channel/handshaker_registry.cc +2 -0
- data/src/core/lib/channel/handshaker_registry.h +2 -1
- data/src/core/lib/channel/status_util.cc +100 -0
- data/src/core/lib/channel/status_util.h +58 -0
- data/src/core/lib/compression/algorithm_metadata.h +2 -0
- data/src/core/lib/compression/compression.cc +2 -0
- data/src/core/lib/compression/compression_internal.cc +2 -0
- data/src/core/lib/compression/compression_internal.h +2 -0
- data/src/core/lib/compression/message_compress.cc +2 -0
- data/src/core/lib/compression/message_compress.h +2 -0
- data/src/core/lib/compression/stream_compression.cc +2 -0
- data/src/core/lib/compression/stream_compression.h +2 -0
- data/src/core/lib/compression/stream_compression_gzip.cc +2 -0
- data/src/core/lib/compression/stream_compression_gzip.h +2 -0
- data/src/core/lib/compression/stream_compression_identity.cc +2 -1
- data/src/core/lib/compression/stream_compression_identity.h +2 -0
- data/src/core/lib/debug/stats.cc +2 -0
- data/src/core/lib/debug/stats.h +2 -0
- data/src/core/lib/debug/stats_data.cc +3 -1
- data/src/core/lib/debug/stats_data.h +2 -0
- data/src/core/lib/debug/trace.cc +2 -0
- data/src/core/lib/debug/trace.h +2 -1
- data/src/core/lib/gpr/alloc.cc +2 -1
- data/src/core/lib/gpr/arena.cc +47 -0
- data/src/core/lib/gpr/arena.h +2 -0
- data/src/core/lib/gpr/atm.cc +2 -0
- data/src/core/lib/gpr/cpu_linux.cc +5 -1
- data/src/core/lib/gpr/cpu_posix.cc +1 -1
- data/src/core/lib/gpr/env.h +2 -0
- data/src/core/lib/gpr/fork.cc +2 -0
- data/src/core/lib/gpr/host_port.cc +2 -0
- data/src/core/lib/gpr/log.cc +2 -1
- data/src/core/lib/gpr/log_linux.cc +1 -0
- data/src/core/lib/gpr/mpscq.cc +2 -0
- data/src/core/lib/gpr/mpscq.h +2 -0
- data/src/core/lib/gpr/murmur_hash.cc +2 -0
- data/src/core/lib/gpr/spinlock.h +2 -0
- data/src/core/lib/gpr/string.cc +2 -1
- data/src/core/lib/gpr/string.h +2 -2
- data/src/core/lib/gpr/sync.cc +2 -0
- data/src/core/lib/gpr/time.cc +2 -0
- data/src/core/lib/gpr/time_posix.cc +1 -0
- data/src/core/lib/gpr/time_precise.cc +2 -0
- data/src/core/lib/gpr/time_precise.h +2 -0
- data/src/core/lib/gpr/tls_gcc.h +2 -0
- data/src/core/lib/gpr/tls_msvc.h +2 -0
- data/src/core/lib/gpr/tls_pthread.h +2 -0
- data/src/core/lib/gpr/tmpfile.h +2 -0
- data/src/core/lib/gprpp/atomic_with_atm.h +2 -0
- data/src/core/lib/gprpp/atomic_with_std.h +2 -0
- data/src/core/lib/gprpp/inlined_vector.h +2 -0
- data/src/core/lib/gprpp/manual_constructor.h +3 -1
- data/src/core/lib/gprpp/memory.h +5 -3
- data/src/core/lib/gprpp/orphanable.h +3 -0
- data/src/core/lib/gprpp/ref_counted.h +4 -0
- data/src/core/lib/gprpp/ref_counted_ptr.h +3 -0
- data/src/core/lib/gprpp/thd.h +135 -0
- data/src/core/lib/gprpp/thd_posix.cc +209 -0
- data/src/core/lib/gprpp/thd_windows.cc +162 -0
- data/src/core/lib/http/format_request.cc +2 -0
- data/src/core/lib/http/format_request.h +2 -0
- data/src/core/lib/http/httpcli.cc +2 -0
- data/src/core/lib/http/httpcli.h +2 -0
- data/src/core/lib/http/httpcli_security_connector.cc +16 -7
- data/src/core/lib/http/parser.cc +2 -0
- data/src/core/lib/http/parser.h +2 -1
- data/src/core/lib/iomgr/call_combiner.cc +2 -0
- data/src/core/lib/iomgr/call_combiner.h +2 -1
- data/src/core/lib/iomgr/combiner.cc +2 -0
- data/src/core/lib/iomgr/combiner.h +2 -0
- data/src/core/lib/iomgr/endpoint.cc +4 -0
- data/src/core/lib/iomgr/endpoint.h +2 -0
- data/src/core/lib/iomgr/endpoint_pair.h +2 -0
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -0
- data/src/core/lib/iomgr/endpoint_pair_uv.cc +2 -0
- data/src/core/lib/iomgr/endpoint_pair_windows.cc +7 -4
- data/src/core/lib/iomgr/error.h +2 -0
- data/src/core/lib/iomgr/error_internal.h +2 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +2 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +2 -0
- data/src/core/lib/iomgr/ev_epollex_linux.cc +4 -18
- data/src/core/lib/iomgr/ev_epollex_linux.h +2 -0
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +2 -0
- data/src/core/lib/iomgr/ev_epollsig_linux.h +2 -0
- data/src/core/lib/iomgr/ev_poll_posix.cc +61 -31
- data/src/core/lib/iomgr/ev_poll_posix.h +2 -0
- data/src/core/lib/iomgr/ev_posix.cc +35 -19
- data/src/core/lib/iomgr/ev_posix.h +2 -0
- data/src/core/lib/iomgr/ev_windows.cc +2 -0
- data/src/core/lib/iomgr/exec_ctx.cc +3 -1
- data/src/core/lib/iomgr/exec_ctx.h +21 -9
- data/src/core/lib/iomgr/executor.cc +13 -11
- data/src/core/lib/iomgr/executor.h +2 -0
- data/src/core/lib/iomgr/fork_posix.cc +4 -2
- data/src/core/lib/iomgr/fork_windows.cc +2 -0
- data/src/core/lib/iomgr/gethostname_fallback.cc +2 -0
- data/src/core/lib/iomgr/gethostname_host_name_max.cc +2 -0
- data/src/core/lib/iomgr/gethostname_sysconf.cc +2 -0
- data/src/core/lib/iomgr/iocp_windows.cc +3 -1
- data/src/core/lib/iomgr/iocp_windows.h +3 -0
- data/src/core/lib/iomgr/iomgr.cc +2 -1
- data/src/core/lib/iomgr/iomgr.h +2 -0
- data/src/core/lib/iomgr/iomgr_custom.cc +63 -0
- data/src/core/lib/iomgr/iomgr_custom.h +47 -0
- data/src/core/lib/iomgr/iomgr_internal.cc +43 -0
- data/src/core/lib/iomgr/iomgr_internal.h +14 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +30 -3
- data/src/core/lib/iomgr/iomgr_posix.h +2 -0
- data/src/core/lib/iomgr/iomgr_uv.cc +17 -20
- data/src/core/lib/iomgr/iomgr_windows.cc +29 -3
- data/src/core/lib/iomgr/is_epollexclusive_available.cc +2 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +2 -0
- data/src/core/lib/iomgr/load_file.cc +2 -0
- data/src/core/lib/iomgr/load_file.h +2 -0
- data/src/core/lib/iomgr/lockfree_event.cc +2 -0
- data/src/core/lib/iomgr/lockfree_event.h +14 -1
- data/src/core/lib/iomgr/nameser.h +2 -0
- data/src/core/lib/iomgr/network_status_tracker.cc +3 -1
- data/src/core/lib/iomgr/network_status_tracker.h +2 -0
- data/src/core/lib/iomgr/polling_entity.cc +2 -0
- data/src/core/lib/iomgr/polling_entity.h +2 -0
- data/src/core/lib/iomgr/pollset.cc +56 -0
- data/src/core/lib/iomgr/pollset.h +19 -0
- data/src/core/lib/iomgr/pollset_custom.cc +106 -0
- data/src/core/lib/iomgr/{timer_generic.h → pollset_custom.h} +15 -17
- data/src/core/lib/iomgr/pollset_set.cc +55 -0
- data/src/core/lib/iomgr/pollset_set.h +13 -0
- data/src/core/lib/iomgr/pollset_set_custom.cc +48 -0
- data/src/core/lib/iomgr/{pollset_uv.h → pollset_set_custom.h} +6 -7
- data/src/core/lib/iomgr/pollset_set_windows.cc +17 -10
- data/src/core/lib/iomgr/pollset_set_windows.h +2 -0
- data/src/core/lib/iomgr/pollset_uv.cc +42 -105
- data/src/core/lib/iomgr/pollset_windows.cc +20 -12
- data/src/core/lib/iomgr/pollset_windows.h +2 -0
- data/src/core/lib/iomgr/port.h +10 -19
- data/src/core/lib/iomgr/resolve_address.cc +50 -0
- data/src/core/lib/iomgr/resolve_address.h +39 -10
- data/src/core/lib/iomgr/resolve_address_custom.cc +187 -0
- data/src/core/lib/iomgr/resolve_address_custom.h +43 -0
- data/src/core/lib/iomgr/resolve_address_posix.cc +10 -22
- data/src/core/lib/iomgr/resolve_address_windows.cc +10 -22
- data/src/core/lib/iomgr/resource_quota.cc +2 -0
- data/src/core/lib/iomgr/resource_quota.h +3 -5
- data/src/core/lib/iomgr/sockaddr.h +3 -11
- data/src/core/lib/iomgr/sockaddr_custom.h +54 -0
- data/src/core/lib/iomgr/sockaddr_posix.h +26 -0
- data/src/core/lib/iomgr/sockaddr_utils.cc +91 -71
- data/src/core/lib/iomgr/sockaddr_utils.h +4 -0
- data/src/core/lib/iomgr/sockaddr_windows.h +21 -0
- data/src/core/lib/iomgr/socket_factory_posix.cc +2 -0
- data/src/core/lib/iomgr/socket_factory_posix.h +2 -0
- data/src/core/lib/iomgr/socket_mutator.cc +2 -0
- data/src/core/lib/iomgr/socket_mutator.h +2 -0
- data/src/core/lib/iomgr/socket_utils.h +11 -0
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +15 -6
- data/src/core/lib/iomgr/socket_utils_linux.cc +4 -4
- data/src/core/lib/iomgr/socket_utils_posix.cc +3 -2
- data/src/core/lib/iomgr/socket_utils_posix.h +2 -0
- data/src/core/lib/iomgr/socket_utils_uv.cc +13 -2
- data/src/core/lib/iomgr/socket_utils_windows.cc +10 -0
- data/src/core/lib/iomgr/socket_windows.cc +2 -0
- data/src/core/lib/iomgr/socket_windows.h +2 -1
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +2 -0
- data/src/core/lib/iomgr/tcp_client.cc +36 -0
- data/src/core/lib/iomgr/tcp_client.h +13 -0
- data/src/core/lib/iomgr/tcp_client_custom.cc +151 -0
- data/src/core/lib/iomgr/tcp_client_posix.cc +11 -24
- data/src/core/lib/iomgr/tcp_client_posix.h +2 -0
- data/src/core/lib/iomgr/tcp_client_windows.cc +10 -23
- data/src/core/lib/iomgr/tcp_custom.cc +365 -0
- data/src/core/lib/iomgr/tcp_custom.h +81 -0
- data/src/core/lib/iomgr/tcp_posix.cc +3 -1
- data/src/core/lib/iomgr/tcp_posix.h +2 -0
- data/src/core/lib/iomgr/tcp_server.cc +73 -0
- data/src/core/lib/iomgr/tcp_server.h +24 -0
- data/src/core/lib/iomgr/tcp_server_custom.cc +472 -0
- data/src/core/lib/iomgr/tcp_server_posix.cc +41 -23
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +2 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +7 -7
- data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +8 -6
- data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +2 -0
- data/src/core/lib/iomgr/tcp_server_windows.cc +43 -21
- data/src/core/lib/iomgr/tcp_uv.cc +308 -314
- data/src/core/lib/iomgr/tcp_windows.cc +3 -1
- data/src/core/lib/iomgr/tcp_windows.h +2 -0
- data/src/core/lib/iomgr/time_averaged_stats.cc +2 -0
- data/src/core/lib/iomgr/timer.cc +45 -0
- data/src/core/lib/iomgr/timer.h +36 -15
- data/src/core/lib/iomgr/timer_custom.cc +93 -0
- data/src/core/lib/iomgr/timer_custom.h +43 -0
- data/src/core/lib/iomgr/timer_generic.cc +12 -10
- data/src/core/lib/iomgr/timer_heap.cc +2 -4
- data/src/core/lib/iomgr/timer_heap.h +2 -0
- data/src/core/lib/iomgr/timer_manager.cc +12 -20
- data/src/core/lib/iomgr/timer_manager.h +2 -0
- data/src/core/lib/iomgr/timer_uv.cc +15 -49
- data/src/core/lib/iomgr/udp_server.cc +271 -230
- data/src/core/lib/iomgr/udp_server.h +44 -20
- data/src/core/lib/iomgr/unix_sockets_posix.cc +10 -7
- data/src/core/lib/iomgr/unix_sockets_posix.h +2 -0
- data/src/core/lib/iomgr/unix_sockets_posix_noop.cc +2 -0
- data/src/core/lib/iomgr/wakeup_fd_cv.cc +3 -1
- data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -0
- data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +2 -0
- data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +2 -0
- data/src/core/lib/iomgr/wakeup_fd_pipe.cc +2 -0
- data/src/core/lib/iomgr/wakeup_fd_pipe.h +2 -0
- data/src/core/lib/iomgr/wakeup_fd_posix.cc +2 -0
- data/src/core/lib/iomgr/wakeup_fd_posix.h +2 -0
- data/src/core/lib/json/json.cc +38 -0
- data/src/core/lib/json/json.h +22 -1
- data/src/core/lib/json/json_reader.cc +2 -2
- data/src/core/lib/json/json_reader.h +1 -0
- data/src/core/lib/json/json_string.cc +2 -0
- data/src/core/lib/json/json_writer.cc +2 -2
- data/src/core/lib/json/json_writer.h +2 -0
- data/src/core/lib/profiling/basic_timers.cc +11 -9
- data/src/core/lib/profiling/timers.h +6 -3
- data/src/core/lib/security/context/security_context.cc +2 -0
- data/src/core/lib/security/context/security_context.h +2 -0
- data/src/core/lib/security/credentials/alts/alts_credentials.cc +119 -0
- data/src/core/lib/security/credentials/alts/alts_credentials.h +102 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment.cc +72 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment.h +57 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc +67 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc +33 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc +114 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +126 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc +46 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +112 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc +58 -0
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +2 -0
- data/src/core/lib/security/credentials/composite/composite_credentials.h +2 -0
- data/src/core/lib/security/credentials/credentials.cc +2 -0
- data/src/core/lib/security/credentials/credentials.h +2 -0
- data/src/core/lib/security/credentials/credentials_metadata.cc +2 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +2 -3
- data/src/core/lib/security/credentials/fake/fake_credentials.h +5 -0
- data/src/core/lib/security/credentials/google_default/credentials_generic.cc +2 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +2 -0
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +2 -0
- data/src/core/lib/security/credentials/iam/iam_credentials.h +2 -0
- data/src/core/lib/security/credentials/jwt/json_token.cc +2 -0
- data/src/core/lib/security/credentials/jwt/json_token.h +2 -0
- data/src/core/lib/security/credentials/jwt/jwt_credentials.h +2 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +2 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +2 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +2 -0
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +2 -0
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +11 -2
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +2 -0
- data/src/core/lib/security/security_connector/alts_security_connector.cc +287 -0
- data/src/core/lib/security/security_connector/alts_security_connector.h +69 -0
- data/src/core/lib/security/security_connector/security_connector.cc +174 -74
- data/src/core/lib/security/security_connector/security_connector.h +41 -7
- data/src/core/lib/security/transport/auth_filters.h +2 -0
- data/src/core/lib/security/transport/client_auth_filter.cc +14 -28
- data/src/core/lib/security/transport/secure_endpoint.cc +2 -0
- data/src/core/lib/security/transport/secure_endpoint.h +2 -0
- data/src/core/lib/security/transport/security_handshaker.cc +2 -0
- data/src/core/lib/security/transport/security_handshaker.h +2 -1
- data/src/core/lib/security/transport/server_auth_filter.cc +2 -0
- data/src/core/lib/security/transport/target_authority_table.cc +75 -0
- data/src/core/lib/security/transport/{lb_targets_info.h → target_authority_table.h} +16 -8
- data/src/core/lib/security/transport/tsi_error.cc +2 -0
- data/src/core/lib/security/transport/tsi_error.h +2 -0
- data/src/core/lib/security/util/json_util.cc +2 -0
- data/src/core/lib/security/util/json_util.h +2 -0
- data/src/core/lib/slice/b64.cc +2 -0
- data/src/core/lib/slice/b64.h +2 -0
- data/src/core/lib/slice/percent_encoding.cc +2 -0
- data/src/core/lib/slice/percent_encoding.h +2 -0
- data/src/core/lib/slice/slice.cc +2 -0
- data/src/core/lib/slice/slice_buffer.cc +3 -1
- data/src/core/lib/slice/slice_hash_table.h +178 -45
- data/src/core/lib/slice/slice_intern.cc +2 -0
- data/src/core/lib/slice/slice_internal.h +2 -2
- data/src/core/lib/slice/slice_string_helpers.cc +2 -0
- data/src/core/lib/slice/slice_string_helpers.h +2 -1
- data/src/core/lib/slice/slice_weak_hash_table.h +105 -0
- data/src/core/lib/surface/api_trace.cc +3 -1
- data/src/core/lib/surface/api_trace.h +2 -0
- data/src/core/lib/surface/byte_buffer.cc +3 -0
- data/src/core/lib/surface/byte_buffer_reader.cc +3 -0
- data/src/core/lib/surface/call.cc +46 -80
- data/src/core/lib/surface/call.h +2 -0
- data/src/core/lib/surface/call_details.cc +2 -0
- data/src/core/lib/surface/call_log_batch.cc +2 -0
- data/src/core/lib/surface/call_test_only.h +2 -0
- data/src/core/lib/surface/channel.cc +72 -41
- data/src/core/lib/surface/channel.h +2 -0
- data/src/core/lib/surface/channel_init.cc +2 -0
- data/src/core/lib/surface/channel_init.h +2 -0
- data/src/core/lib/surface/channel_ping.cc +2 -0
- data/src/core/lib/surface/channel_stack_type.cc +3 -2
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.h +2 -0
- data/src/core/lib/surface/completion_queue_factory.cc +3 -1
- data/src/core/lib/surface/completion_queue_factory.h +2 -0
- data/src/core/lib/surface/event_string.cc +2 -0
- data/src/core/lib/surface/event_string.h +2 -0
- data/src/core/lib/surface/init.cc +5 -2
- data/src/core/lib/surface/init_secure.cc +5 -2
- data/src/core/lib/surface/lame_client.cc +7 -5
- data/src/core/lib/surface/lame_client.h +2 -0
- data/src/core/lib/surface/metadata_array.cc +2 -0
- data/src/core/lib/surface/server.cc +2 -0
- data/src/core/lib/surface/server.h +2 -0
- data/src/core/lib/surface/validate_metadata.cc +2 -1
- data/src/core/lib/surface/validate_metadata.h +2 -0
- data/src/core/lib/surface/version.cc +4 -2
- data/src/core/lib/transport/bdp_estimator.cc +2 -0
- data/src/core/lib/transport/byte_stream.cc +94 -116
- data/src/core/lib/transport/byte_stream.h +111 -78
- data/src/core/lib/transport/connectivity_state.cc +2 -0
- data/src/core/lib/transport/connectivity_state.h +3 -1
- data/src/core/lib/transport/error_utils.cc +2 -0
- data/src/core/lib/transport/error_utils.h +2 -0
- data/src/core/lib/transport/metadata.cc +2 -0
- data/src/core/lib/transport/metadata.h +3 -1
- data/src/core/lib/transport/metadata_batch.cc +26 -0
- data/src/core/lib/transport/metadata_batch.h +12 -1
- data/src/core/lib/transport/pid_controller.cc +2 -0
- data/src/core/lib/transport/pid_controller.h +2 -0
- data/src/core/lib/transport/service_config.cc +21 -175
- data/src/core/lib/transport/service_config.h +223 -35
- data/src/core/lib/transport/static_metadata.cc +310 -294
- data/src/core/lib/transport/static_metadata.h +96 -82
- data/src/core/lib/transport/status_conversion.cc +2 -0
- data/src/core/lib/transport/status_conversion.h +3 -0
- data/src/core/lib/transport/status_metadata.cc +54 -0
- data/src/core/lib/{iomgr/timer_uv.h → transport/status_metadata.h} +10 -12
- data/src/core/lib/transport/timeout_encoding.cc +2 -1
- data/src/core/lib/transport/timeout_encoding.h +2 -0
- data/src/core/lib/transport/transport.cc +3 -1
- data/src/core/lib/transport/transport.h +33 -7
- data/src/core/lib/transport/transport_impl.h +2 -0
- data/src/core/lib/transport/transport_op_string.cc +10 -3
- data/src/core/plugin_registry/grpc_plugin_registry.cc +10 -4
- data/src/core/tsi/alts/crypt/aes_gcm.cc +687 -0
- data/src/core/tsi/alts/crypt/gsec.cc +189 -0
- data/src/core/tsi/alts/crypt/gsec.h +454 -0
- data/src/core/tsi/alts/frame_protector/alts_counter.cc +118 -0
- data/src/core/tsi/alts/frame_protector/alts_counter.h +98 -0
- data/src/core/tsi/alts/frame_protector/alts_crypter.cc +66 -0
- data/src/core/tsi/alts/frame_protector/alts_crypter.h +255 -0
- data/src/core/tsi/alts/frame_protector/alts_frame_protector.cc +407 -0
- data/src/core/tsi/alts/frame_protector/alts_frame_protector.h +55 -0
- data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc +114 -0
- data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h +114 -0
- data/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc +105 -0
- data/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc +103 -0
- data/src/core/tsi/alts/frame_protector/frame_handler.cc +218 -0
- data/src/core/tsi/alts/frame_protector/frame_handler.h +236 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +316 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +137 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc +520 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h +323 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc +143 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +149 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +73 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_event.h +93 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +483 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +83 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +52 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +58 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_utils.h +52 -0
- data/src/core/tsi/alts/handshaker/altscontext.pb.c +48 -0
- data/src/core/tsi/alts/handshaker/altscontext.pb.h +64 -0
- data/src/core/tsi/alts/handshaker/handshaker.pb.c +123 -0
- data/src/core/tsi/alts/handshaker/handshaker.pb.h +255 -0
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +50 -0
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +78 -0
- data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +196 -0
- data/src/core/tsi/alts/handshaker/transport_security_common_api.h +163 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +180 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +52 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +144 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h +49 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h +91 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +174 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h +100 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc +476 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h +199 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +296 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +52 -0
- data/src/core/tsi/alts_transport_security.cc +3 -1
- data/src/core/tsi/alts_transport_security.h +4 -2
- data/src/core/tsi/fake_transport_security.cc +2 -1
- data/src/core/tsi/fake_transport_security.h +2 -0
- data/src/core/tsi/ssl/session_cache/ssl_session.h +73 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +58 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +211 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +93 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +76 -0
- data/src/core/tsi/ssl_transport_security.cc +266 -62
- data/src/core/tsi/ssl_transport_security.h +128 -6
- data/src/core/tsi/ssl_types.h +2 -0
- data/src/core/tsi/transport_security.cc +2 -0
- data/src/core/tsi/transport_security.h +2 -0
- data/src/core/tsi/transport_security_adapter.cc +2 -0
- data/src/core/tsi/transport_security_adapter.h +2 -0
- data/src/core/tsi/transport_security_grpc.cc +2 -0
- data/src/core/tsi/transport_security_grpc.h +2 -0
- data/src/core/tsi/transport_security_interface.h +2 -0
- data/src/ruby/ext/grpc/extconf.rb +1 -2
- data/src/ruby/ext/grpc/rb_call.c +1 -13
- data/src/ruby/ext/grpc/rb_channel.c +6 -6
- data/src/ruby/ext/grpc/rb_compression_options.c +1 -1
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +10 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +15 -0
- data/src/ruby/lib/grpc/core/time_consts.rb +1 -1
- data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -8
- data/src/ruby/lib/grpc/generic/client_stub.rb +6 -10
- data/src/ruby/lib/grpc/generic/interceptors.rb +1 -1
- data/src/ruby/lib/grpc/generic/rpc_server.rb +2 -2
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +133 -0
- data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +54 -0
- data/src/ruby/spec/pb/package_with_underscore/data.proto +23 -0
- data/src/ruby/spec/pb/package_with_underscore/service.proto +23 -0
- data/third_party/address_sorting/address_sorting.c +369 -0
- data/third_party/address_sorting/address_sorting_internal.h +70 -0
- data/third_party/address_sorting/address_sorting_posix.c +97 -0
- data/third_party/address_sorting/address_sorting_windows.c +55 -0
- data/third_party/address_sorting/include/address_sorting/address_sorting.h +110 -0
- data/third_party/boringssl/crypto/asn1/a_enum.c +20 -9
- data/third_party/boringssl/crypto/asn1/a_i2d_fp.c +3 -0
- data/third_party/boringssl/crypto/asn1/a_int.c +19 -8
- data/third_party/boringssl/crypto/asn1/a_object.c +0 -128
- data/third_party/boringssl/crypto/asn1/asn1_locl.h +3 -0
- data/third_party/boringssl/crypto/asn1/tasn_fre.c +2 -4
- data/third_party/boringssl/crypto/asn1/tasn_new.c +3 -2
- data/third_party/boringssl/crypto/bn_extra/bn_asn1.c +0 -16
- data/third_party/boringssl/crypto/buf/buf.c +14 -0
- data/third_party/boringssl/crypto/bytestring/cbb.c +93 -0
- data/third_party/boringssl/crypto/conf/conf.c +2 -2
- data/third_party/boringssl/crypto/cpu-intel.c +17 -17
- data/third_party/boringssl/crypto/crypto.c +16 -4
- data/third_party/boringssl/crypto/curve25519/spake25519.c +11 -11
- data/third_party/boringssl/crypto/curve25519/x25519-x86_64.c +1 -1
- data/third_party/boringssl/crypto/dsa/dsa.c +9 -21
- data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +2 -2
- data/third_party/boringssl/crypto/ecdsa_extra/ecdsa_asn1.c +1 -8
- data/third_party/boringssl/crypto/evp/p_rsa_asn1.c +2 -23
- data/third_party/boringssl/crypto/ex_data.c +0 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/add.c +7 -11
- data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +19 -16
- data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +15 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/div.c +53 -46
- data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +242 -85
- data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +42 -47
- data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +176 -34
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +118 -65
- data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +94 -61
- data/third_party/boringssl/crypto/fipsmodule/bn/random.c +79 -63
- data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +26 -28
- data/third_party/boringssl/crypto/fipsmodule/cipher/cipher.c +2 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +250 -149
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +0 -27
- data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +54 -20
- data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +3 -3
- data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +7 -41
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +6 -40
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +17 -122
- data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +3 -64
- data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +27 -9
- data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +203 -205
- data/third_party/boringssl/crypto/fipsmodule/modes/cbc.c +14 -15
- data/third_party/boringssl/crypto/fipsmodule/modes/cfb.c +12 -8
- data/third_party/boringssl/crypto/fipsmodule/modes/ctr.c +4 -3
- data/third_party/boringssl/crypto/fipsmodule/modes/gcm.c +25 -36
- data/third_party/boringssl/crypto/fipsmodule/modes/internal.h +10 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +0 -4
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +2 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +9 -19
- data/third_party/boringssl/crypto/lhash/lhash.c +19 -0
- data/third_party/boringssl/crypto/obj/obj.c +29 -69
- data/third_party/boringssl/crypto/pem/pem_lib.c +2 -2
- data/third_party/boringssl/crypto/poly1305/poly1305_vec.c +4 -55
- data/third_party/boringssl/crypto/rsa_extra/rsa_asn1.c +3 -22
- data/third_party/boringssl/crypto/x509/by_dir.c +1 -3
- data/third_party/boringssl/crypto/x509/by_file.c +0 -1
- data/third_party/boringssl/crypto/x509/x509_lu.c +0 -1
- data/third_party/boringssl/crypto/x509/x509_obj.c +1 -3
- data/third_party/boringssl/crypto/x509/x509_txt.c +0 -6
- data/third_party/boringssl/crypto/x509/x509_vfy.c +0 -1
- data/third_party/boringssl/crypto/x509/x509_vpm.c +0 -1
- data/third_party/boringssl/crypto/x509/x_algor.c +2 -2
- data/third_party/boringssl/crypto/x509v3/v3_alt.c +3 -4
- data/third_party/boringssl/crypto/x509v3/v3_genn.c +1 -0
- data/third_party/boringssl/crypto/x509v3/v3_info.c +1 -2
- data/third_party/boringssl/crypto/x509v3/v3_lib.c +15 -7
- data/third_party/boringssl/crypto/x509v3/v3_utl.c +41 -2
- data/third_party/boringssl/include/openssl/asn1.h +0 -1
- data/third_party/boringssl/include/openssl/base.h +1 -1
- data/third_party/boringssl/include/openssl/bio.h +5 -2
- data/third_party/boringssl/include/openssl/bn.h +2 -17
- data/third_party/boringssl/include/openssl/buf.h +4 -0
- data/third_party/boringssl/include/openssl/bytestring.h +11 -0
- data/third_party/boringssl/include/openssl/chacha.h +5 -1
- data/third_party/boringssl/include/openssl/cipher.h +10 -0
- data/third_party/boringssl/include/openssl/conf.h +4 -8
- data/third_party/boringssl/include/openssl/dsa.h +2 -18
- data/third_party/boringssl/include/openssl/ec.h +5 -5
- data/third_party/boringssl/include/openssl/ecdsa.h +10 -28
- data/third_party/boringssl/include/openssl/evp.h +0 -4
- data/third_party/boringssl/include/openssl/lhash.h +1 -18
- data/third_party/boringssl/include/openssl/obj.h +1 -0
- data/third_party/boringssl/include/openssl/rsa.h +3 -4
- data/third_party/boringssl/include/openssl/ssl.h +35 -54
- data/third_party/boringssl/include/openssl/ssl3.h +2 -0
- data/third_party/boringssl/include/openssl/stack.h +1 -1
- data/third_party/boringssl/include/openssl/tls1.h +1 -16
- data/third_party/boringssl/include/openssl/x509.h +3 -2
- data/third_party/boringssl/include/openssl/x509_vfy.h +0 -2
- data/third_party/boringssl/include/openssl/x509v3.h +1 -0
- data/third_party/boringssl/ssl/custom_extensions.cc +1 -1
- data/third_party/boringssl/ssl/d1_both.cc +120 -129
- data/third_party/boringssl/ssl/d1_lib.cc +23 -21
- data/third_party/boringssl/ssl/d1_pkt.cc +39 -143
- data/third_party/boringssl/ssl/dtls_method.cc +16 -23
- data/third_party/boringssl/ssl/dtls_record.cc +11 -4
- data/third_party/boringssl/ssl/handshake.cc +109 -40
- data/third_party/boringssl/ssl/handshake_client.cc +104 -96
- data/third_party/boringssl/ssl/handshake_server.cc +62 -72
- data/third_party/boringssl/ssl/internal.h +397 -318
- data/third_party/boringssl/ssl/s3_both.cc +173 -191
- data/third_party/boringssl/ssl/s3_lib.cc +26 -34
- data/third_party/boringssl/ssl/s3_pkt.cc +105 -247
- data/third_party/boringssl/ssl/ssl_asn1.cc +22 -22
- data/third_party/boringssl/ssl/ssl_buffer.cc +98 -108
- data/third_party/boringssl/ssl/ssl_cert.cc +12 -1
- data/third_party/boringssl/ssl/ssl_cipher.cc +23 -28
- data/third_party/boringssl/ssl/ssl_key_share.cc +11 -6
- data/third_party/boringssl/ssl/ssl_lib.cc +190 -113
- data/third_party/boringssl/ssl/ssl_privkey.cc +76 -106
- data/third_party/boringssl/ssl/ssl_session.cc +3 -3
- data/third_party/boringssl/ssl/ssl_stat.cc +3 -3
- data/third_party/boringssl/ssl/ssl_transcript.cc +38 -22
- data/third_party/boringssl/ssl/ssl_versions.cc +64 -31
- data/third_party/boringssl/ssl/t1_enc.cc +137 -154
- data/third_party/boringssl/ssl/t1_lib.cc +463 -478
- data/third_party/boringssl/ssl/tls13_both.cc +57 -58
- data/third_party/boringssl/ssl/tls13_client.cc +256 -121
- data/third_party/boringssl/ssl/tls13_enc.cc +187 -72
- data/third_party/boringssl/ssl/tls13_server.cc +187 -86
- data/third_party/boringssl/ssl/tls_method.cc +20 -30
- data/third_party/boringssl/ssl/tls_record.cc +77 -40
- data/third_party/boringssl/third_party/fiat/curve25519.c +5062 -0
- data/third_party/boringssl/{crypto/curve25519 → third_party/fiat}/internal.h +40 -27
- data/third_party/nanopb/pb.h +1 -1
- metadata +147 -45
- data/src/core/lib/gpr/thd.cc +0 -49
- data/src/core/lib/gpr/thd.h +0 -71
- data/src/core/lib/gpr/thd_posix.cc +0 -154
- data/src/core/lib/gpr/thd_windows.cc +0 -107
- data/src/core/lib/iomgr/iomgr_uv.h +0 -37
- data/src/core/lib/iomgr/pollset_set_uv.cc +0 -43
- data/src/core/lib/iomgr/resolve_address_uv.cc +0 -284
- data/src/core/lib/iomgr/tcp_client_uv.cc +0 -175
- data/src/core/lib/iomgr/tcp_server_uv.cc +0 -471
- data/src/core/lib/iomgr/tcp_uv.h +0 -51
- data/src/core/lib/security/transport/lb_targets_info.cc +0 -59
- data/src/core/lib/slice/slice_hash_table.cc +0 -145
- data/third_party/boringssl/crypto/curve25519/curve25519.c +0 -4938
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
*
|
3
|
-
* Copyright
|
3
|
+
* Copyright 2017 gRPC authors.
|
4
4
|
*
|
5
5
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
6
|
* you may not use this file except in compliance with the License.
|
@@ -16,6 +16,8 @@
|
|
16
16
|
*
|
17
17
|
*/
|
18
18
|
|
19
|
+
#include <grpc/support/port_platform.h>
|
20
|
+
|
19
21
|
#include "src/core/ext/filters/client_channel/backup_poller.h"
|
20
22
|
|
21
23
|
#include <grpc/grpc.h>
|
@@ -125,13 +127,7 @@ static void run_poller(void* arg, grpc_error* error) {
|
|
125
127
|
&p->run_poller_closure);
|
126
128
|
}
|
127
129
|
|
128
|
-
void
|
129
|
-
grpc_pollset_set* interested_parties) {
|
130
|
-
gpr_once_init(&g_once, init_globals);
|
131
|
-
if (g_poll_interval_ms == 0) {
|
132
|
-
return;
|
133
|
-
}
|
134
|
-
gpr_mu_lock(&g_poller_mu);
|
130
|
+
static void g_poller_init_locked() {
|
135
131
|
if (g_poller == nullptr) {
|
136
132
|
g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
|
137
133
|
g_poller->pollset =
|
@@ -147,7 +143,16 @@ void grpc_client_channel_start_backup_polling(
|
|
147
143
|
grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
|
148
144
|
&g_poller->run_poller_closure);
|
149
145
|
}
|
146
|
+
}
|
150
147
|
|
148
|
+
void grpc_client_channel_start_backup_polling(
|
149
|
+
grpc_pollset_set* interested_parties) {
|
150
|
+
gpr_once_init(&g_once, init_globals);
|
151
|
+
if (g_poll_interval_ms == 0) {
|
152
|
+
return;
|
153
|
+
}
|
154
|
+
gpr_mu_lock(&g_poller_mu);
|
155
|
+
g_poller_init_locked();
|
151
156
|
gpr_ref(&g_poller->refs);
|
152
157
|
/* Get a reference to g_poller->pollset before releasing g_poller_mu to make
|
153
158
|
* TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
|
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
*
|
3
|
-
* Copyright
|
3
|
+
* Copyright 2017 gRPC authors.
|
4
4
|
*
|
5
5
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
6
|
* you may not use this file except in compliance with the License.
|
@@ -19,9 +19,10 @@
|
|
19
19
|
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
|
20
20
|
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
|
21
21
|
|
22
|
+
#include <grpc/support/port_platform.h>
|
23
|
+
|
22
24
|
#include <grpc/grpc.h>
|
23
25
|
#include "src/core/lib/channel/channel_stack.h"
|
24
|
-
#include "src/core/lib/iomgr/exec_ctx.h"
|
25
26
|
|
26
27
|
/* Start polling \a interested_parties periodically in the timer thread */
|
27
28
|
void grpc_client_channel_start_backup_polling(
|
@@ -21,6 +21,7 @@
|
|
21
21
|
#include "src/core/ext/filters/client_channel/client_channel.h"
|
22
22
|
|
23
23
|
#include <inttypes.h>
|
24
|
+
#include <limits.h>
|
24
25
|
#include <stdbool.h>
|
25
26
|
#include <stdio.h>
|
26
27
|
#include <string.h>
|
@@ -33,153 +34,75 @@
|
|
33
34
|
#include "src/core/ext/filters/client_channel/backup_poller.h"
|
34
35
|
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
|
35
36
|
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
37
|
+
#include "src/core/ext/filters/client_channel/method_params.h"
|
36
38
|
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
|
37
39
|
#include "src/core/ext/filters/client_channel/resolver_registry.h"
|
38
40
|
#include "src/core/ext/filters/client_channel/retry_throttle.h"
|
39
41
|
#include "src/core/ext/filters/client_channel/subchannel.h"
|
40
42
|
#include "src/core/ext/filters/deadline/deadline_filter.h"
|
43
|
+
#include "src/core/lib/backoff/backoff.h"
|
41
44
|
#include "src/core/lib/channel/channel_args.h"
|
42
45
|
#include "src/core/lib/channel/connected_channel.h"
|
46
|
+
#include "src/core/lib/channel/status_util.h"
|
43
47
|
#include "src/core/lib/gpr/string.h"
|
48
|
+
#include "src/core/lib/gprpp/inlined_vector.h"
|
49
|
+
#include "src/core/lib/gprpp/manual_constructor.h"
|
44
50
|
#include "src/core/lib/iomgr/combiner.h"
|
45
51
|
#include "src/core/lib/iomgr/iomgr.h"
|
46
52
|
#include "src/core/lib/iomgr/polling_entity.h"
|
47
53
|
#include "src/core/lib/profiling/timers.h"
|
48
54
|
#include "src/core/lib/slice/slice_internal.h"
|
55
|
+
#include "src/core/lib/slice/slice_string_helpers.h"
|
49
56
|
#include "src/core/lib/surface/channel.h"
|
50
57
|
#include "src/core/lib/transport/connectivity_state.h"
|
58
|
+
#include "src/core/lib/transport/error_utils.h"
|
51
59
|
#include "src/core/lib/transport/metadata.h"
|
52
60
|
#include "src/core/lib/transport/metadata_batch.h"
|
53
61
|
#include "src/core/lib/transport/service_config.h"
|
54
62
|
#include "src/core/lib/transport/static_metadata.h"
|
63
|
+
#include "src/core/lib/transport/status_metadata.h"
|
64
|
+
|
65
|
+
using grpc_core::internal::ClientChannelMethodParams;
|
66
|
+
using grpc_core::internal::ServerRetryThrottleData;
|
55
67
|
|
56
68
|
/* Client channel implementation */
|
57
69
|
|
70
|
+
// By default, we buffer 256 KiB per RPC for retries.
|
71
|
+
// TODO(roth): Do we have any data to suggest a better value?
|
72
|
+
#define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
|
73
|
+
|
74
|
+
// This value was picked arbitrarily. It can be changed if there is
|
75
|
+
// any even moderately compelling reason to do so.
|
76
|
+
#define RETRY_BACKOFF_JITTER 0.2
|
77
|
+
|
58
78
|
grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
|
59
79
|
|
60
80
|
/*************************************************************************
|
61
|
-
*
|
81
|
+
* CHANNEL-WIDE FUNCTIONS
|
62
82
|
*/
|
63
83
|
|
64
|
-
typedef enum {
|
65
|
-
/* zero so it can be default initialized */
|
66
|
-
WAIT_FOR_READY_UNSET = 0,
|
67
|
-
WAIT_FOR_READY_FALSE,
|
68
|
-
WAIT_FOR_READY_TRUE
|
69
|
-
} wait_for_ready_value;
|
70
|
-
|
71
|
-
typedef struct {
|
72
|
-
gpr_refcount refs;
|
73
|
-
grpc_millis timeout;
|
74
|
-
wait_for_ready_value wait_for_ready;
|
75
|
-
} method_parameters;
|
76
|
-
|
77
|
-
static method_parameters* method_parameters_ref(
|
78
|
-
method_parameters* method_params) {
|
79
|
-
gpr_ref(&method_params->refs);
|
80
|
-
return method_params;
|
81
|
-
}
|
82
|
-
|
83
|
-
static void method_parameters_unref(method_parameters* method_params) {
|
84
|
-
if (gpr_unref(&method_params->refs)) {
|
85
|
-
gpr_free(method_params);
|
86
|
-
}
|
87
|
-
}
|
88
|
-
|
89
|
-
// Wrappers to pass to grpc_service_config_create_method_config_table().
|
90
|
-
static void* method_parameters_ref_wrapper(void* value) {
|
91
|
-
return method_parameters_ref(static_cast<method_parameters*>(value));
|
92
|
-
}
|
93
|
-
static void method_parameters_unref_wrapper(void* value) {
|
94
|
-
method_parameters_unref(static_cast<method_parameters*>(value));
|
95
|
-
}
|
96
|
-
|
97
|
-
static bool parse_wait_for_ready(grpc_json* field,
|
98
|
-
wait_for_ready_value* wait_for_ready) {
|
99
|
-
if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
|
100
|
-
return false;
|
101
|
-
}
|
102
|
-
*wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
|
103
|
-
: WAIT_FOR_READY_FALSE;
|
104
|
-
return true;
|
105
|
-
}
|
106
|
-
|
107
|
-
static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
|
108
|
-
if (field->type != GRPC_JSON_STRING) return false;
|
109
|
-
size_t len = strlen(field->value);
|
110
|
-
if (field->value[len - 1] != 's') return false;
|
111
|
-
char* buf = gpr_strdup(field->value);
|
112
|
-
buf[len - 1] = '\0'; // Remove trailing 's'.
|
113
|
-
char* decimal_point = strchr(buf, '.');
|
114
|
-
int nanos = 0;
|
115
|
-
if (decimal_point != nullptr) {
|
116
|
-
*decimal_point = '\0';
|
117
|
-
nanos = gpr_parse_nonnegative_int(decimal_point + 1);
|
118
|
-
if (nanos == -1) {
|
119
|
-
gpr_free(buf);
|
120
|
-
return false;
|
121
|
-
}
|
122
|
-
int num_digits = static_cast<int>(strlen(decimal_point + 1));
|
123
|
-
if (num_digits > 9) { // We don't accept greater precision than nanos.
|
124
|
-
gpr_free(buf);
|
125
|
-
return false;
|
126
|
-
}
|
127
|
-
for (int i = 0; i < (9 - num_digits); ++i) {
|
128
|
-
nanos *= 10;
|
129
|
-
}
|
130
|
-
}
|
131
|
-
int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
|
132
|
-
gpr_free(buf);
|
133
|
-
if (seconds == -1) return false;
|
134
|
-
*timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
|
135
|
-
return true;
|
136
|
-
}
|
137
|
-
|
138
|
-
static void* method_parameters_create_from_json(const grpc_json* json) {
|
139
|
-
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
|
140
|
-
grpc_millis timeout = 0;
|
141
|
-
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
|
142
|
-
if (field->key == nullptr) continue;
|
143
|
-
if (strcmp(field->key, "waitForReady") == 0) {
|
144
|
-
if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
|
145
|
-
if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
|
146
|
-
} else if (strcmp(field->key, "timeout") == 0) {
|
147
|
-
if (timeout > 0) return nullptr; // Duplicate.
|
148
|
-
if (!parse_timeout(field, &timeout)) return nullptr;
|
149
|
-
}
|
150
|
-
}
|
151
|
-
method_parameters* value =
|
152
|
-
static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
|
153
|
-
gpr_ref_init(&value->refs, 1);
|
154
|
-
value->timeout = timeout;
|
155
|
-
value->wait_for_ready = wait_for_ready;
|
156
|
-
return value;
|
157
|
-
}
|
158
|
-
|
159
84
|
struct external_connectivity_watcher;
|
160
85
|
|
161
|
-
|
162
|
-
|
163
|
-
|
86
|
+
typedef grpc_core::SliceHashTable<
|
87
|
+
grpc_core::RefCountedPtr<ClientChannelMethodParams>>
|
88
|
+
MethodParamsTable;
|
164
89
|
|
165
90
|
typedef struct client_channel_channel_data {
|
166
|
-
/** resolver for this channel */
|
167
91
|
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
|
168
|
-
/** have we started resolving this channel */
|
169
92
|
bool started_resolving;
|
170
|
-
/** is deadline checking enabled? */
|
171
93
|
bool deadline_checking_enabled;
|
172
|
-
/** client channel factory */
|
173
94
|
grpc_client_channel_factory* client_channel_factory;
|
95
|
+
bool enable_retries;
|
96
|
+
size_t per_rpc_retry_buffer_size;
|
174
97
|
|
175
98
|
/** combiner protecting all variables below in this data structure */
|
176
99
|
grpc_combiner* combiner;
|
177
100
|
/** currently active load balancer */
|
178
101
|
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
|
179
102
|
/** retry throttle data */
|
180
|
-
|
103
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
181
104
|
/** maps method names to method_parameters structs */
|
182
|
-
|
105
|
+
grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
|
183
106
|
/** incoming resolver result - set by resolver.next() */
|
184
107
|
grpc_channel_args* resolver_result;
|
185
108
|
/** a list of closures that are all waiting for resolver result to come in */
|
@@ -200,7 +123,7 @@ typedef struct client_channel_channel_data {
|
|
200
123
|
gpr_mu external_connectivity_watcher_list_mu;
|
201
124
|
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
|
202
125
|
|
203
|
-
/* the following properties are guarded by a mutex since
|
126
|
+
/* the following properties are guarded by a mutex since APIs require them
|
204
127
|
to be instantaneously available */
|
205
128
|
gpr_mu info_mu;
|
206
129
|
char* info_lb_policy_name;
|
@@ -303,12 +226,11 @@ static void start_resolving_locked(channel_data* chand) {
|
|
303
226
|
|
304
227
|
typedef struct {
|
305
228
|
char* server_name;
|
306
|
-
|
229
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
307
230
|
} service_config_parsing_state;
|
308
231
|
|
309
|
-
static void parse_retry_throttle_params(
|
310
|
-
|
311
|
-
static_cast<service_config_parsing_state*>(arg);
|
232
|
+
static void parse_retry_throttle_params(
|
233
|
+
const grpc_json* field, service_config_parsing_state* parsing_state) {
|
312
234
|
if (strcmp(field->key, "retryThrottling") == 0) {
|
313
235
|
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
|
314
236
|
if (field->type != GRPC_JSON_OBJECT) return;
|
@@ -357,7 +279,7 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
|
|
357
279
|
}
|
358
280
|
}
|
359
281
|
parsing_state->retry_throttle_data =
|
360
|
-
|
282
|
+
grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
|
361
283
|
parsing_state->server_name, max_milli_tokens, milli_token_ratio);
|
362
284
|
}
|
363
285
|
}
|
@@ -382,21 +304,26 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
|
|
382
304
|
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
|
383
305
|
}
|
384
306
|
|
307
|
+
// TODO(roth): The logic in this function is very hard to follow. We
|
308
|
+
// should refactor this so that it's easier to understand, perhaps as
|
309
|
+
// part of changing the resolver API to more clearly differentiate
|
310
|
+
// between transient failures and shutdown.
|
385
311
|
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
386
312
|
channel_data* chand = static_cast<channel_data*>(arg);
|
387
313
|
if (grpc_client_channel_trace.enabled()) {
|
388
|
-
gpr_log(GPR_DEBUG,
|
389
|
-
|
314
|
+
gpr_log(GPR_DEBUG,
|
315
|
+
"chand=%p: got resolver result: resolver_result=%p error=%s", chand,
|
316
|
+
chand->resolver_result, grpc_error_string(error));
|
390
317
|
}
|
391
|
-
// Extract the following fields from the resolver result, if non-
|
318
|
+
// Extract the following fields from the resolver result, if non-nullptr.
|
392
319
|
bool lb_policy_updated = false;
|
393
320
|
bool lb_policy_created = false;
|
394
321
|
char* lb_policy_name_dup = nullptr;
|
395
322
|
bool lb_policy_name_changed = false;
|
396
323
|
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
|
397
324
|
char* service_config_json = nullptr;
|
398
|
-
|
399
|
-
|
325
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
326
|
+
grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
|
400
327
|
if (chand->resolver_result != nullptr) {
|
401
328
|
if (chand->resolver != nullptr) {
|
402
329
|
// Find LB policy name.
|
@@ -431,7 +358,6 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
431
358
|
// Use pick_first if nothing was specified and we didn't select grpclb
|
432
359
|
// above.
|
433
360
|
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
|
434
|
-
|
435
361
|
// Check to see if we're already using the right LB policy.
|
436
362
|
// Note: It's safe to use chand->info_lb_policy_name here without
|
437
363
|
// taking a lock on chand->info_mu, because this function is the
|
@@ -469,42 +395,40 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
469
395
|
new_lb_policy->SetReresolutionClosureLocked(&args->closure);
|
470
396
|
}
|
471
397
|
}
|
398
|
+
// Before we clean up, save a copy of lb_policy_name, since it might
|
399
|
+
// be pointing to data inside chand->resolver_result.
|
400
|
+
// The copy will be saved in chand->lb_policy_name below.
|
401
|
+
lb_policy_name_dup = gpr_strdup(lb_policy_name);
|
472
402
|
// Find service config.
|
473
403
|
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
474
404
|
GRPC_ARG_SERVICE_CONFIG);
|
475
405
|
service_config_json =
|
476
406
|
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
|
477
407
|
if (service_config_json != nullptr) {
|
478
|
-
|
479
|
-
|
408
|
+
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
|
409
|
+
grpc_core::ServiceConfig::Create(service_config_json);
|
480
410
|
if (service_config != nullptr) {
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
411
|
+
if (chand->enable_retries) {
|
412
|
+
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
413
|
+
GRPC_ARG_SERVER_URI);
|
414
|
+
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
|
415
|
+
GPR_ASSERT(server_uri != nullptr);
|
416
|
+
grpc_uri* uri = grpc_uri_parse(server_uri, true);
|
417
|
+
GPR_ASSERT(uri->path[0] != '\0');
|
418
|
+
service_config_parsing_state parsing_state;
|
419
|
+
memset(&parsing_state, 0, sizeof(parsing_state));
|
420
|
+
parsing_state.server_name =
|
421
|
+
uri->path[0] == '/' ? uri->path + 1 : uri->path;
|
422
|
+
service_config->ParseGlobalParams(parse_retry_throttle_params,
|
423
|
+
&parsing_state);
|
424
|
+
grpc_uri_destroy(uri);
|
425
|
+
retry_throttle_data = std::move(parsing_state.retry_throttle_data);
|
426
|
+
}
|
427
|
+
method_params_table = service_config->CreateMethodConfigTable(
|
428
|
+
ClientChannelMethodParams::CreateFromJson);
|
499
429
|
}
|
500
430
|
}
|
501
|
-
// Before we clean up, save a copy of lb_policy_name, since it might
|
502
|
-
// be pointing to data inside chand->resolver_result.
|
503
|
-
// The copy will be saved in chand->lb_policy_name below.
|
504
|
-
lb_policy_name_dup = gpr_strdup(lb_policy_name);
|
505
431
|
}
|
506
|
-
grpc_channel_args_destroy(chand->resolver_result);
|
507
|
-
chand->resolver_result = nullptr;
|
508
432
|
}
|
509
433
|
if (grpc_client_channel_trace.enabled()) {
|
510
434
|
gpr_log(GPR_DEBUG,
|
@@ -514,7 +438,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
514
438
|
lb_policy_name_changed ? " (changed)" : "", service_config_json);
|
515
439
|
}
|
516
440
|
// Now swap out fields in chand. Note that the new values may still
|
517
|
-
// be
|
441
|
+
// be nullptr if (e.g.) the resolver failed to return results or the
|
518
442
|
// results did not contain the necessary data.
|
519
443
|
//
|
520
444
|
// First, swap out the data used by cc_get_channel_info().
|
@@ -529,21 +453,15 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
529
453
|
}
|
530
454
|
gpr_mu_unlock(&chand->info_mu);
|
531
455
|
// Swap out the retry throttle data.
|
532
|
-
|
533
|
-
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
|
534
|
-
}
|
535
|
-
chand->retry_throttle_data = retry_throttle_data;
|
456
|
+
chand->retry_throttle_data = std::move(retry_throttle_data);
|
536
457
|
// Swap out the method params table.
|
537
|
-
|
538
|
-
grpc_slice_hash_table_unref(chand->method_params_table);
|
539
|
-
}
|
540
|
-
chand->method_params_table = method_params_table;
|
458
|
+
chand->method_params_table = std::move(method_params_table);
|
541
459
|
// If we have a new LB policy or are shutting down (in which case
|
542
|
-
// new_lb_policy will be
|
543
|
-
// and removing its fds from chand->interested_parties.
|
544
|
-
// this if either (a) we updated the existing
|
545
|
-
// to create the new LB policy (in
|
546
|
-
// most recent one we had).
|
460
|
+
// new_lb_policy will be nullptr), swap out the LB policy, unreffing the
|
461
|
+
// old one and removing its fds from chand->interested_parties.
|
462
|
+
// Note that we do NOT do this if either (a) we updated the existing
|
463
|
+
// LB policy above or (b) we failed to create the new LB policy (in
|
464
|
+
// which case we want to continue using the most recent one we had).
|
547
465
|
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
|
548
466
|
chand->resolver == nullptr) {
|
549
467
|
if (chand->lb_policy != nullptr) {
|
@@ -580,6 +498,8 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
580
498
|
"Channel disconnected", &error, 1));
|
581
499
|
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
582
500
|
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
|
501
|
+
grpc_channel_args_destroy(chand->resolver_result);
|
502
|
+
chand->resolver_result = nullptr;
|
583
503
|
} else { // Not shutting down.
|
584
504
|
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
|
585
505
|
grpc_error* state_error =
|
@@ -598,11 +518,16 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
|
598
518
|
chand->exit_idle_when_lb_policy_arrives = false;
|
599
519
|
}
|
600
520
|
watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
|
521
|
+
} else if (chand->resolver_result == nullptr) {
|
522
|
+
// Transient failure.
|
523
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
601
524
|
}
|
602
525
|
if (!lb_policy_updated) {
|
603
526
|
set_channel_connectivity_state_locked(
|
604
527
|
chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
|
605
528
|
}
|
529
|
+
grpc_channel_args_destroy(chand->resolver_result);
|
530
|
+
chand->resolver_result = nullptr;
|
606
531
|
chand->resolver->NextLocked(&chand->resolver_result,
|
607
532
|
&chand->on_resolver_result_changed);
|
608
533
|
GRPC_ERROR_UNREF(state_error);
|
@@ -722,9 +647,17 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
|
|
722
647
|
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
|
723
648
|
"client_channel");
|
724
649
|
grpc_client_channel_start_backup_polling(chand->interested_parties);
|
650
|
+
// Record max per-RPC retry buffer size.
|
651
|
+
const grpc_arg* arg = grpc_channel_args_find(
|
652
|
+
args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE);
|
653
|
+
chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer(
|
654
|
+
arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX});
|
655
|
+
// Record enable_retries.
|
656
|
+
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
|
657
|
+
chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
|
725
658
|
// Record client channel factory.
|
726
|
-
|
727
|
-
|
659
|
+
arg = grpc_channel_args_find(args->channel_args,
|
660
|
+
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
|
728
661
|
if (arg == nullptr) {
|
729
662
|
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
730
663
|
"Missing client channel factory in args for client channel filter");
|
@@ -790,12 +723,8 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
|
|
790
723
|
}
|
791
724
|
gpr_free(chand->info_lb_policy_name);
|
792
725
|
gpr_free(chand->info_service_config_json);
|
793
|
-
|
794
|
-
|
795
|
-
}
|
796
|
-
if (chand->method_params_table != nullptr) {
|
797
|
-
grpc_slice_hash_table_unref(chand->method_params_table);
|
798
|
-
}
|
726
|
+
chand->retry_throttle_data.reset();
|
727
|
+
chand->method_params_table.reset();
|
799
728
|
grpc_client_channel_stop_backup_polling(chand->interested_parties);
|
800
729
|
grpc_connectivity_state_destroy(&chand->state_tracker);
|
801
730
|
grpc_pollset_set_destroy(chand->interested_parties);
|
@@ -809,15 +738,123 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
|
|
809
738
|
*/
|
810
739
|
|
811
740
|
// Max number of batches that can be pending on a call at any given
|
812
|
-
// time. This includes:
|
741
|
+
// time. This includes one batch for each of the following ops:
|
813
742
|
// recv_initial_metadata
|
814
743
|
// send_initial_metadata
|
815
744
|
// recv_message
|
816
745
|
// send_message
|
817
746
|
// recv_trailing_metadata
|
818
747
|
// send_trailing_metadata
|
819
|
-
|
820
|
-
|
748
|
+
#define MAX_PENDING_BATCHES 6
|
749
|
+
|
750
|
+
// Retry support:
|
751
|
+
//
|
752
|
+
// In order to support retries, we act as a proxy for stream op batches.
|
753
|
+
// When we get a batch from the surface, we add it to our list of pending
|
754
|
+
// batches, and we then use those batches to construct separate "child"
|
755
|
+
// batches to be started on the subchannel call. When the child batches
|
756
|
+
// return, we then decide which pending batches have been completed and
|
757
|
+
// schedule their callbacks accordingly. If a subchannel call fails and
|
758
|
+
// we want to retry it, we do a new pick and start again, constructing
|
759
|
+
// new "child" batches for the new subchannel call.
|
760
|
+
//
|
761
|
+
// Note that retries are committed when receiving data from the server
|
762
|
+
// (except for Trailers-Only responses). However, there may be many
|
763
|
+
// send ops started before receiving any data, so we may have already
|
764
|
+
// completed some number of send ops (and returned the completions up to
|
765
|
+
// the surface) by the time we realize that we need to retry. To deal
|
766
|
+
// with this, we cache data for send ops, so that we can replay them on a
|
767
|
+
// different subchannel call even after we have completed the original
|
768
|
+
// batches.
|
769
|
+
//
|
770
|
+
// There are two sets of data to maintain:
|
771
|
+
// - In call_data (in the parent channel), we maintain a list of pending
|
772
|
+
// ops and cached data for send ops.
|
773
|
+
// - In the subchannel call, we maintain state to indicate what ops have
|
774
|
+
// already been sent down to that call.
|
775
|
+
//
|
776
|
+
// When constructing the "child" batches, we compare those two sets of
|
777
|
+
// data to see which batches need to be sent to the subchannel call.
|
778
|
+
|
779
|
+
// TODO(roth): In subsequent PRs:
|
780
|
+
// - add support for transparent retries (including initial metadata)
|
781
|
+
// - figure out how to record stats in census for retries
|
782
|
+
// (census filter is on top of this one)
|
783
|
+
// - add census stats for retries
|
784
|
+
|
785
|
+
// State used for starting a retryable batch on a subchannel call.
|
786
|
+
// This provides its own grpc_transport_stream_op_batch and other data
|
787
|
+
// structures needed to populate the ops in the batch.
|
788
|
+
// We allocate one struct on the arena for each attempt at starting a
|
789
|
+
// batch on a given subchannel call.
|
790
|
+
typedef struct {
|
791
|
+
gpr_refcount refs;
|
792
|
+
grpc_call_element* elem;
|
793
|
+
grpc_subchannel_call* subchannel_call; // Holds a ref.
|
794
|
+
// The batch to use in the subchannel call.
|
795
|
+
// Its payload field points to subchannel_call_retry_state.batch_payload.
|
796
|
+
grpc_transport_stream_op_batch batch;
|
797
|
+
// For send_initial_metadata.
|
798
|
+
// Note that we need to make a copy of the initial metadata for each
|
799
|
+
// subchannel call instead of just referring to the copy in call_data,
|
800
|
+
// because filters in the subchannel stack will probably add entries,
|
801
|
+
// so we need to start in a pristine state for each attempt of the call.
|
802
|
+
grpc_linked_mdelem* send_initial_metadata_storage;
|
803
|
+
grpc_metadata_batch send_initial_metadata;
|
804
|
+
// For send_message.
|
805
|
+
grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
|
806
|
+
send_message;
|
807
|
+
// For send_trailing_metadata.
|
808
|
+
grpc_linked_mdelem* send_trailing_metadata_storage;
|
809
|
+
grpc_metadata_batch send_trailing_metadata;
|
810
|
+
// For intercepting recv_initial_metadata.
|
811
|
+
grpc_metadata_batch recv_initial_metadata;
|
812
|
+
grpc_closure recv_initial_metadata_ready;
|
813
|
+
bool trailing_metadata_available;
|
814
|
+
// For intercepting recv_message.
|
815
|
+
grpc_closure recv_message_ready;
|
816
|
+
grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
|
817
|
+
// For intercepting recv_trailing_metadata.
|
818
|
+
grpc_metadata_batch recv_trailing_metadata;
|
819
|
+
grpc_transport_stream_stats collect_stats;
|
820
|
+
// For intercepting on_complete.
|
821
|
+
grpc_closure on_complete;
|
822
|
+
} subchannel_batch_data;
|
823
|
+
|
824
|
+
// Retry state associated with a subchannel call.
|
825
|
+
// Stored in the parent_data of the subchannel call object.
|
826
|
+
typedef struct {
|
827
|
+
// subchannel_batch_data.batch.payload points to this.
|
828
|
+
grpc_transport_stream_op_batch_payload batch_payload;
|
829
|
+
// These fields indicate which ops have been started and completed on
|
830
|
+
// this subchannel call.
|
831
|
+
size_t started_send_message_count;
|
832
|
+
size_t completed_send_message_count;
|
833
|
+
size_t started_recv_message_count;
|
834
|
+
size_t completed_recv_message_count;
|
835
|
+
bool started_send_initial_metadata : 1;
|
836
|
+
bool completed_send_initial_metadata : 1;
|
837
|
+
bool started_send_trailing_metadata : 1;
|
838
|
+
bool completed_send_trailing_metadata : 1;
|
839
|
+
bool started_recv_initial_metadata : 1;
|
840
|
+
bool completed_recv_initial_metadata : 1;
|
841
|
+
bool started_recv_trailing_metadata : 1;
|
842
|
+
bool completed_recv_trailing_metadata : 1;
|
843
|
+
// State for callback processing.
|
844
|
+
bool retry_dispatched : 1;
|
845
|
+
bool recv_initial_metadata_ready_deferred : 1;
|
846
|
+
bool recv_message_ready_deferred : 1;
|
847
|
+
grpc_error* recv_initial_metadata_error;
|
848
|
+
grpc_error* recv_message_error;
|
849
|
+
} subchannel_call_retry_state;
|
850
|
+
|
851
|
+
// Pending batches stored in call data.
|
852
|
+
typedef struct {
|
853
|
+
// The pending batch. If nullptr, this slot is empty.
|
854
|
+
grpc_transport_stream_op_batch* batch;
|
855
|
+
// Indicates whether payload for send ops has been cached in call data.
|
856
|
+
bool send_ops_cached;
|
857
|
+
} pending_batch;
|
821
858
|
|
822
859
|
/** Call data. Holds a pointer to grpc_subchannel_call and the
|
823
860
|
associated machinery to create such a pointer.
|
@@ -840,254 +877,1754 @@ typedef struct client_channel_call_data {
|
|
840
877
|
grpc_call_stack* owning_call;
|
841
878
|
grpc_call_combiner* call_combiner;
|
842
879
|
|
843
|
-
|
844
|
-
|
880
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
881
|
+
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
|
845
882
|
|
846
883
|
grpc_subchannel_call* subchannel_call;
|
847
|
-
|
884
|
+
|
885
|
+
// Set when we get a cancel_stream op.
|
886
|
+
grpc_error* cancel_error;
|
848
887
|
|
849
888
|
grpc_core::LoadBalancingPolicy::PickState pick;
|
850
|
-
grpc_closure
|
851
|
-
grpc_closure
|
889
|
+
grpc_closure pick_closure;
|
890
|
+
grpc_closure pick_cancel_closure;
|
852
891
|
|
853
892
|
grpc_polling_entity* pollent;
|
854
893
|
|
855
|
-
|
856
|
-
|
857
|
-
|
894
|
+
// Batches are added to this list when received from above.
|
895
|
+
// They are removed when we are done handling the batch (i.e., when
|
896
|
+
// either we have invoked all of the batch's callbacks or we have
|
897
|
+
// passed the batch down to the subchannel call and are not
|
898
|
+
// intercepting any of its callbacks).
|
899
|
+
pending_batch pending_batches[MAX_PENDING_BATCHES];
|
900
|
+
bool pending_send_initial_metadata : 1;
|
901
|
+
bool pending_send_message : 1;
|
902
|
+
bool pending_send_trailing_metadata : 1;
|
903
|
+
|
904
|
+
// Retry state.
|
905
|
+
bool enable_retries : 1;
|
906
|
+
bool retry_committed : 1;
|
907
|
+
bool last_attempt_got_server_pushback : 1;
|
908
|
+
int num_attempts_completed;
|
909
|
+
size_t bytes_buffered_for_retry;
|
910
|
+
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
|
911
|
+
grpc_timer retry_timer;
|
912
|
+
|
913
|
+
// Cached data for retrying send ops.
|
914
|
+
// send_initial_metadata
|
915
|
+
bool seen_send_initial_metadata;
|
916
|
+
grpc_linked_mdelem* send_initial_metadata_storage;
|
917
|
+
grpc_metadata_batch send_initial_metadata;
|
918
|
+
uint32_t send_initial_metadata_flags;
|
919
|
+
gpr_atm* peer_string;
|
920
|
+
// send_message
|
921
|
+
// When we get a send_message op, we replace the original byte stream
|
922
|
+
// with a CachingByteStream that caches the slices to a local buffer for
|
923
|
+
// use in retries.
|
924
|
+
// Note: We inline the cache for the first 3 send_message ops and use
|
925
|
+
// dynamic allocation after that. This number was essentially picked
|
926
|
+
// at random; it could be changed in the future to tune performance.
|
927
|
+
grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
|
928
|
+
// send_trailing_metadata
|
929
|
+
bool seen_send_trailing_metadata;
|
930
|
+
grpc_linked_mdelem* send_trailing_metadata_storage;
|
931
|
+
grpc_metadata_batch send_trailing_metadata;
|
932
|
+
} call_data;
|
858
933
|
|
859
|
-
|
934
|
+
// Forward declarations.
|
935
|
+
static void retry_commit(grpc_call_element* elem,
|
936
|
+
subchannel_call_retry_state* retry_state);
|
937
|
+
static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
|
938
|
+
static void on_complete(void* arg, grpc_error* error);
|
939
|
+
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
|
940
|
+
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
|
941
|
+
static void start_pick_locked(void* arg, grpc_error* ignored);
|
942
|
+
|
943
|
+
//
|
944
|
+
// send op data caching
|
945
|
+
//
|
946
|
+
|
947
|
+
// Caches data for send ops so that it can be retried later, if not
|
948
|
+
// already cached.
|
949
|
+
static void maybe_cache_send_ops_for_batch(call_data* calld,
|
950
|
+
pending_batch* pending) {
|
951
|
+
if (pending->send_ops_cached) return;
|
952
|
+
pending->send_ops_cached = true;
|
953
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
954
|
+
// Save a copy of metadata for send_initial_metadata ops.
|
955
|
+
if (batch->send_initial_metadata) {
|
956
|
+
calld->seen_send_initial_metadata = true;
|
957
|
+
GPR_ASSERT(calld->send_initial_metadata_storage == nullptr);
|
958
|
+
grpc_metadata_batch* send_initial_metadata =
|
959
|
+
batch->payload->send_initial_metadata.send_initial_metadata;
|
960
|
+
calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc(
|
961
|
+
calld->arena,
|
962
|
+
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
|
963
|
+
grpc_metadata_batch_copy(send_initial_metadata,
|
964
|
+
&calld->send_initial_metadata,
|
965
|
+
calld->send_initial_metadata_storage);
|
966
|
+
calld->send_initial_metadata_flags =
|
967
|
+
batch->payload->send_initial_metadata.send_initial_metadata_flags;
|
968
|
+
calld->peer_string = batch->payload->send_initial_metadata.peer_string;
|
969
|
+
}
|
970
|
+
// Set up cache for send_message ops.
|
971
|
+
if (batch->send_message) {
|
972
|
+
grpc_core::ByteStreamCache* cache =
|
973
|
+
static_cast<grpc_core::ByteStreamCache*>(
|
974
|
+
gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
|
975
|
+
new (cache) grpc_core::ByteStreamCache(
|
976
|
+
std::move(batch->payload->send_message.send_message));
|
977
|
+
calld->send_messages.push_back(cache);
|
978
|
+
}
|
979
|
+
// Save metadata batch for send_trailing_metadata ops.
|
980
|
+
if (batch->send_trailing_metadata) {
|
981
|
+
calld->seen_send_trailing_metadata = true;
|
982
|
+
GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr);
|
983
|
+
grpc_metadata_batch* send_trailing_metadata =
|
984
|
+
batch->payload->send_trailing_metadata.send_trailing_metadata;
|
985
|
+
calld->send_trailing_metadata_storage =
|
986
|
+
(grpc_linked_mdelem*)gpr_arena_alloc(
|
987
|
+
calld->arena,
|
988
|
+
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
|
989
|
+
grpc_metadata_batch_copy(send_trailing_metadata,
|
990
|
+
&calld->send_trailing_metadata,
|
991
|
+
calld->send_trailing_metadata_storage);
|
992
|
+
}
|
993
|
+
}
|
860
994
|
|
861
|
-
|
862
|
-
|
863
|
-
|
995
|
+
// Frees cached send ops that have already been completed after
|
996
|
+
// committing the call.
|
997
|
+
static void free_cached_send_op_data_after_commit(
|
998
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
|
999
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1000
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1001
|
+
if (retry_state->completed_send_initial_metadata) {
|
1002
|
+
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
|
1003
|
+
}
|
1004
|
+
for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
|
1005
|
+
if (grpc_client_channel_trace.enabled()) {
|
1006
|
+
gpr_log(GPR_DEBUG,
|
1007
|
+
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1008
|
+
"]",
|
1009
|
+
chand, calld, i);
|
1010
|
+
}
|
1011
|
+
calld->send_messages[i]->Destroy();
|
1012
|
+
}
|
1013
|
+
if (retry_state->completed_send_trailing_metadata) {
|
1014
|
+
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
|
1015
|
+
}
|
1016
|
+
}
|
864
1017
|
|
865
|
-
|
866
|
-
|
1018
|
+
// Frees cached send ops that were completed by the completed batch in
|
1019
|
+
// batch_data. Used when batches are completed after the call is committed.
|
1020
|
+
static void free_cached_send_op_data_for_completed_batch(
|
1021
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1022
|
+
subchannel_call_retry_state* retry_state) {
|
1023
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
867
1024
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
868
|
-
|
1025
|
+
if (batch_data->batch.send_initial_metadata) {
|
1026
|
+
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
|
1027
|
+
}
|
1028
|
+
if (batch_data->batch.send_message) {
|
1029
|
+
if (grpc_client_channel_trace.enabled()) {
|
1030
|
+
gpr_log(GPR_DEBUG,
|
1031
|
+
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1032
|
+
"]",
|
1033
|
+
chand, calld, retry_state->completed_send_message_count - 1);
|
1034
|
+
}
|
1035
|
+
calld->send_messages[retry_state->completed_send_message_count - 1]
|
1036
|
+
->Destroy();
|
1037
|
+
}
|
1038
|
+
if (batch_data->batch.send_trailing_metadata) {
|
1039
|
+
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
|
1040
|
+
}
|
1041
|
+
}
|
1042
|
+
|
1043
|
+
//
|
1044
|
+
// pending_batches management
|
1045
|
+
//
|
1046
|
+
|
1047
|
+
// Returns the index into calld->pending_batches to be used for batch.
|
1048
|
+
static size_t get_batch_index(grpc_transport_stream_op_batch* batch) {
|
1049
|
+
// Note: It is important the send_initial_metadata be the first entry
|
1050
|
+
// here, since the code in pick_subchannel_locked() assumes it will be.
|
1051
|
+
if (batch->send_initial_metadata) return 0;
|
1052
|
+
if (batch->send_message) return 1;
|
1053
|
+
if (batch->send_trailing_metadata) return 2;
|
1054
|
+
if (batch->recv_initial_metadata) return 3;
|
1055
|
+
if (batch->recv_message) return 4;
|
1056
|
+
if (batch->recv_trailing_metadata) return 5;
|
1057
|
+
GPR_UNREACHABLE_CODE(return (size_t)-1);
|
869
1058
|
}
|
870
1059
|
|
871
1060
|
// This is called via the call combiner, so access to calld is synchronized.
|
872
|
-
static void
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
|
879
|
-
|
880
|
-
|
1061
|
+
static void pending_batches_add(grpc_call_element* elem,
|
1062
|
+
grpc_transport_stream_op_batch* batch) {
|
1063
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1064
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1065
|
+
const size_t idx = get_batch_index(batch);
|
1066
|
+
if (grpc_client_channel_trace.enabled()) {
|
1067
|
+
gpr_log(GPR_DEBUG,
|
1068
|
+
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
|
1069
|
+
calld, idx);
|
1070
|
+
}
|
1071
|
+
pending_batch* pending = &calld->pending_batches[idx];
|
1072
|
+
GPR_ASSERT(pending->batch == nullptr);
|
1073
|
+
pending->batch = batch;
|
1074
|
+
pending->send_ops_cached = false;
|
1075
|
+
if (calld->enable_retries) {
|
1076
|
+
// Update state in calld about pending batches.
|
1077
|
+
// Also check if the batch takes us over the retry buffer limit.
|
1078
|
+
// Note: We don't check the size of trailing metadata here, because
|
1079
|
+
// gRPC clients do not send trailing metadata.
|
1080
|
+
if (batch->send_initial_metadata) {
|
1081
|
+
calld->pending_send_initial_metadata = true;
|
1082
|
+
calld->bytes_buffered_for_retry += grpc_metadata_batch_size(
|
1083
|
+
batch->payload->send_initial_metadata.send_initial_metadata);
|
1084
|
+
}
|
1085
|
+
if (batch->send_message) {
|
1086
|
+
calld->pending_send_message = true;
|
1087
|
+
calld->bytes_buffered_for_retry +=
|
1088
|
+
batch->payload->send_message.send_message->length();
|
1089
|
+
}
|
1090
|
+
if (batch->send_trailing_metadata) {
|
1091
|
+
calld->pending_send_trailing_metadata = true;
|
1092
|
+
}
|
1093
|
+
if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
|
1094
|
+
if (grpc_client_channel_trace.enabled()) {
|
1095
|
+
gpr_log(GPR_DEBUG,
|
1096
|
+
"chand=%p calld=%p: exceeded retry buffer size, committing",
|
1097
|
+
chand, calld);
|
1098
|
+
}
|
1099
|
+
subchannel_call_retry_state* retry_state =
|
1100
|
+
calld->subchannel_call == nullptr
|
1101
|
+
? nullptr
|
1102
|
+
: static_cast<subchannel_call_retry_state*>(
|
1103
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1104
|
+
calld->subchannel_call));
|
1105
|
+
retry_commit(elem, retry_state);
|
1106
|
+
// If we are not going to retry and have not yet started, pretend
|
1107
|
+
// retries are disabled so that we don't bother with retry overhead.
|
1108
|
+
if (calld->num_attempts_completed == 0) {
|
1109
|
+
if (grpc_client_channel_trace.enabled()) {
|
1110
|
+
gpr_log(GPR_DEBUG,
|
1111
|
+
"chand=%p calld=%p: disabling retries before first attempt",
|
1112
|
+
chand, calld);
|
1113
|
+
}
|
1114
|
+
calld->enable_retries = false;
|
1115
|
+
}
|
1116
|
+
}
|
1117
|
+
}
|
1118
|
+
}
|
1119
|
+
|
1120
|
+
static void pending_batch_clear(call_data* calld, pending_batch* pending) {
|
1121
|
+
if (calld->enable_retries) {
|
1122
|
+
if (pending->batch->send_initial_metadata) {
|
1123
|
+
calld->pending_send_initial_metadata = false;
|
1124
|
+
}
|
1125
|
+
if (pending->batch->send_message) {
|
1126
|
+
calld->pending_send_message = false;
|
1127
|
+
}
|
1128
|
+
if (pending->batch->send_trailing_metadata) {
|
1129
|
+
calld->pending_send_trailing_metadata = false;
|
1130
|
+
}
|
881
1131
|
}
|
1132
|
+
pending->batch = nullptr;
|
882
1133
|
}
|
883
1134
|
|
884
1135
|
// This is called via the call combiner, so access to calld is synchronized.
|
885
1136
|
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
892
|
-
}
|
1137
|
+
grpc_transport_stream_op_batch* batch =
|
1138
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
1139
|
+
call_data* calld = static_cast<call_data*>(batch->handler_private.extra_arg);
|
1140
|
+
// Note: This will release the call combiner.
|
1141
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
1142
|
+
batch, GRPC_ERROR_REF(error), calld->call_combiner);
|
893
1143
|
}
|
894
1144
|
|
895
1145
|
// This is called via the call combiner, so access to calld is synchronized.
|
896
|
-
|
897
|
-
|
1146
|
+
// If yield_call_combiner is true, assumes responsibility for yielding
|
1147
|
+
// the call combiner.
|
1148
|
+
static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
|
1149
|
+
bool yield_call_combiner) {
|
1150
|
+
GPR_ASSERT(error != GRPC_ERROR_NONE);
|
898
1151
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
899
1152
|
if (grpc_client_channel_trace.enabled()) {
|
1153
|
+
size_t num_batches = 0;
|
1154
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1155
|
+
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1156
|
+
}
|
900
1157
|
gpr_log(GPR_DEBUG,
|
901
1158
|
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
|
902
|
-
elem->channel_data, calld,
|
903
|
-
|
1159
|
+
elem->channel_data, calld, num_batches, grpc_error_string(error));
|
1160
|
+
}
|
1161
|
+
grpc_transport_stream_op_batch*
|
1162
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1163
|
+
size_t num_batches = 0;
|
1164
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1165
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1166
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1167
|
+
if (batch != nullptr) {
|
1168
|
+
batches[num_batches++] = batch;
|
1169
|
+
pending_batch_clear(calld, pending);
|
1170
|
+
}
|
904
1171
|
}
|
905
|
-
for (size_t i = 0; i <
|
906
|
-
|
907
|
-
|
1172
|
+
for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
|
1173
|
+
grpc_transport_stream_op_batch* batch = batches[i];
|
1174
|
+
batch->handler_private.extra_arg = calld;
|
1175
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1176
|
+
fail_pending_batch_in_call_combiner, batch,
|
908
1177
|
grpc_schedule_on_exec_ctx);
|
909
|
-
GRPC_CALL_COMBINER_START(
|
910
|
-
|
911
|
-
|
912
|
-
}
|
913
|
-
if (
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
1178
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1179
|
+
&batch->handler_private.closure,
|
1180
|
+
GRPC_ERROR_REF(error), "pending_batches_fail");
|
1181
|
+
}
|
1182
|
+
if (yield_call_combiner) {
|
1183
|
+
if (num_batches > 0) {
|
1184
|
+
// Note: This will release the call combiner.
|
1185
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
1186
|
+
batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
|
1187
|
+
} else {
|
1188
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
|
1189
|
+
}
|
920
1190
|
}
|
921
1191
|
GRPC_ERROR_UNREF(error);
|
922
1192
|
}
|
923
1193
|
|
924
1194
|
// This is called via the call combiner, so access to calld is synchronized.
|
925
|
-
static void
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
1195
|
+
static void resume_pending_batch_in_call_combiner(void* arg,
|
1196
|
+
grpc_error* ignored) {
|
1197
|
+
grpc_transport_stream_op_batch* batch =
|
1198
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
1199
|
+
grpc_subchannel_call* subchannel_call =
|
1200
|
+
static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
|
1201
|
+
// Note: This will release the call combiner.
|
1202
|
+
grpc_subchannel_call_process_op(subchannel_call, batch);
|
933
1203
|
}
|
934
1204
|
|
935
1205
|
// This is called via the call combiner, so access to calld is synchronized.
|
936
|
-
static void
|
1206
|
+
static void pending_batches_resume(grpc_call_element* elem) {
|
937
1207
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
938
1208
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1209
|
+
if (calld->enable_retries) {
|
1210
|
+
start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE);
|
1211
|
+
return;
|
1212
|
+
}
|
1213
|
+
// Retries not enabled; send down batches as-is.
|
939
1214
|
if (grpc_client_channel_trace.enabled()) {
|
1215
|
+
size_t num_batches = 0;
|
1216
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1217
|
+
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1218
|
+
}
|
940
1219
|
gpr_log(GPR_DEBUG,
|
941
|
-
"chand=%p calld=%p:
|
942
|
-
" pending batches
|
943
|
-
chand, calld, calld->
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
948
|
-
|
1220
|
+
"chand=%p calld=%p: starting %" PRIuPTR
|
1221
|
+
" pending batches on subchannel_call=%p",
|
1222
|
+
chand, calld, num_batches, calld->subchannel_call);
|
1223
|
+
}
|
1224
|
+
grpc_transport_stream_op_batch*
|
1225
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1226
|
+
size_t num_batches = 0;
|
1227
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1228
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1229
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1230
|
+
if (batch != nullptr) {
|
1231
|
+
batches[num_batches++] = batch;
|
1232
|
+
pending_batch_clear(calld, pending);
|
1233
|
+
}
|
1234
|
+
}
|
1235
|
+
for (size_t i = 1; i < num_batches; ++i) {
|
1236
|
+
grpc_transport_stream_op_batch* batch = batches[i];
|
1237
|
+
batch->handler_private.extra_arg = calld->subchannel_call;
|
1238
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1239
|
+
resume_pending_batch_in_call_combiner, batch,
|
949
1240
|
grpc_schedule_on_exec_ctx);
|
950
|
-
GRPC_CALL_COMBINER_START(
|
951
|
-
|
952
|
-
|
1241
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1242
|
+
&batch->handler_private.closure, GRPC_ERROR_NONE,
|
1243
|
+
"pending_batches_resume");
|
953
1244
|
}
|
954
|
-
GPR_ASSERT(
|
955
|
-
|
956
|
-
|
1245
|
+
GPR_ASSERT(num_batches > 0);
|
1246
|
+
// Note: This will release the call combiner.
|
1247
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
|
957
1248
|
}
|
958
1249
|
|
959
|
-
|
960
|
-
|
961
|
-
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
1250
|
+
static void maybe_clear_pending_batch(grpc_call_element* elem,
|
1251
|
+
pending_batch* pending) {
|
962
1252
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
963
1253
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
method_parameters_ref(calld->method_params);
|
977
|
-
// If the deadline from the service config is shorter than the one
|
978
|
-
// from the client API, reset the deadline timer.
|
979
|
-
if (chand->deadline_checking_enabled &&
|
980
|
-
calld->method_params->timeout != 0) {
|
981
|
-
const grpc_millis per_method_deadline =
|
982
|
-
grpc_timespec_to_millis_round_up(calld->call_start_time) +
|
983
|
-
calld->method_params->timeout;
|
984
|
-
if (per_method_deadline < calld->deadline) {
|
985
|
-
calld->deadline = per_method_deadline;
|
986
|
-
grpc_deadline_state_reset(elem, calld->deadline);
|
987
|
-
}
|
988
|
-
}
|
1254
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1255
|
+
// We clear the pending batch if all of its callbacks have been
|
1256
|
+
// scheduled and reset to nullptr.
|
1257
|
+
if (batch->on_complete == nullptr &&
|
1258
|
+
(!batch->recv_initial_metadata ||
|
1259
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
|
1260
|
+
nullptr) &&
|
1261
|
+
(!batch->recv_message ||
|
1262
|
+
batch->payload->recv_message.recv_message_ready == nullptr)) {
|
1263
|
+
if (grpc_client_channel_trace.enabled()) {
|
1264
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
|
1265
|
+
calld);
|
989
1266
|
}
|
1267
|
+
pending_batch_clear(calld, pending);
|
990
1268
|
}
|
991
1269
|
}
|
992
1270
|
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
calld->path, // path
|
1000
|
-
calld->call_start_time, // start_time
|
1001
|
-
calld->deadline, // deadline
|
1002
|
-
calld->arena, // arena
|
1003
|
-
calld->pick.subchannel_call_context, // context
|
1004
|
-
calld->call_combiner // call_combiner
|
1005
|
-
};
|
1006
|
-
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
|
1007
|
-
call_args, &calld->subchannel_call);
|
1008
|
-
if (grpc_client_channel_trace.enabled()) {
|
1009
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
1010
|
-
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
1271
|
+
// Returns true if all ops in the pending batch have been completed.
|
1272
|
+
static bool pending_batch_is_completed(
|
1273
|
+
pending_batch* pending, call_data* calld,
|
1274
|
+
subchannel_call_retry_state* retry_state) {
|
1275
|
+
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1276
|
+
return false;
|
1011
1277
|
}
|
1012
|
-
if (
|
1013
|
-
|
1014
|
-
|
1015
|
-
} else {
|
1016
|
-
waiting_for_pick_batches_resume(elem);
|
1278
|
+
if (pending->batch->send_initial_metadata &&
|
1279
|
+
!retry_state->completed_send_initial_metadata) {
|
1280
|
+
return false;
|
1017
1281
|
}
|
1018
|
-
|
1282
|
+
if (pending->batch->send_message &&
|
1283
|
+
retry_state->completed_send_message_count < calld->send_messages.size()) {
|
1284
|
+
return false;
|
1285
|
+
}
|
1286
|
+
if (pending->batch->send_trailing_metadata &&
|
1287
|
+
!retry_state->completed_send_trailing_metadata) {
|
1288
|
+
return false;
|
1289
|
+
}
|
1290
|
+
if (pending->batch->recv_initial_metadata &&
|
1291
|
+
!retry_state->completed_recv_initial_metadata) {
|
1292
|
+
return false;
|
1293
|
+
}
|
1294
|
+
if (pending->batch->recv_message &&
|
1295
|
+
retry_state->completed_recv_message_count <
|
1296
|
+
retry_state->started_recv_message_count) {
|
1297
|
+
return false;
|
1298
|
+
}
|
1299
|
+
if (pending->batch->recv_trailing_metadata &&
|
1300
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1301
|
+
return false;
|
1302
|
+
}
|
1303
|
+
return true;
|
1019
1304
|
}
|
1020
1305
|
|
1021
|
-
//
|
1022
|
-
static
|
1023
|
-
|
1024
|
-
|
1025
|
-
if (
|
1026
|
-
|
1027
|
-
GRPC_ERROR_UNREF(calld->error);
|
1028
|
-
calld->error = error == GRPC_ERROR_NONE
|
1029
|
-
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
1030
|
-
"Call dropped by load balancing policy")
|
1031
|
-
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1032
|
-
"Failed to create subchannel", &error, 1);
|
1033
|
-
if (grpc_client_channel_trace.enabled()) {
|
1034
|
-
gpr_log(GPR_DEBUG,
|
1035
|
-
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
|
1036
|
-
calld, grpc_error_string(calld->error));
|
1037
|
-
}
|
1038
|
-
waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
|
1039
|
-
} else {
|
1040
|
-
/* Create call on subchannel. */
|
1041
|
-
create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
|
1306
|
+
// Returns true if any op in the batch was not yet started.
|
1307
|
+
static bool pending_batch_is_unstarted(
|
1308
|
+
pending_batch* pending, call_data* calld,
|
1309
|
+
subchannel_call_retry_state* retry_state) {
|
1310
|
+
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1311
|
+
return false;
|
1042
1312
|
}
|
1043
|
-
|
1313
|
+
if (pending->batch->send_initial_metadata &&
|
1314
|
+
!retry_state->started_send_initial_metadata) {
|
1315
|
+
return true;
|
1316
|
+
}
|
1317
|
+
if (pending->batch->send_message &&
|
1318
|
+
retry_state->started_send_message_count < calld->send_messages.size()) {
|
1319
|
+
return true;
|
1320
|
+
}
|
1321
|
+
if (pending->batch->send_trailing_metadata &&
|
1322
|
+
!retry_state->started_send_trailing_metadata) {
|
1323
|
+
return true;
|
1324
|
+
}
|
1325
|
+
if (pending->batch->recv_initial_metadata &&
|
1326
|
+
!retry_state->started_recv_initial_metadata) {
|
1327
|
+
return true;
|
1328
|
+
}
|
1329
|
+
if (pending->batch->recv_message &&
|
1330
|
+
retry_state->completed_recv_message_count ==
|
1331
|
+
retry_state->started_recv_message_count) {
|
1332
|
+
return true;
|
1333
|
+
}
|
1334
|
+
if (pending->batch->recv_trailing_metadata &&
|
1335
|
+
!retry_state->started_recv_trailing_metadata) {
|
1336
|
+
return true;
|
1337
|
+
}
|
1338
|
+
return false;
|
1044
1339
|
}
|
1045
1340
|
|
1046
|
-
//
|
1047
|
-
//
|
1048
|
-
//
|
1049
|
-
|
1050
|
-
|
1341
|
+
//
|
1342
|
+
// retry code
|
1343
|
+
//
|
1344
|
+
|
1345
|
+
// Commits the call so that no further retry attempts will be performed.
|
1346
|
+
static void retry_commit(grpc_call_element* elem,
|
1347
|
+
subchannel_call_retry_state* retry_state) {
|
1051
1348
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1052
1349
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1350
|
+
if (calld->retry_committed) return;
|
1351
|
+
calld->retry_committed = true;
|
1352
|
+
if (grpc_client_channel_trace.enabled()) {
|
1353
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
|
1354
|
+
}
|
1355
|
+
if (retry_state != nullptr) {
|
1356
|
+
free_cached_send_op_data_after_commit(elem, retry_state);
|
1357
|
+
}
|
1056
1358
|
}
|
1057
1359
|
|
1058
|
-
//
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1360
|
+
// Starts a retry after appropriate back-off.
|
1361
|
+
static void do_retry(grpc_call_element* elem,
|
1362
|
+
subchannel_call_retry_state* retry_state,
|
1363
|
+
grpc_millis server_pushback_ms) {
|
1062
1364
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1063
1365
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1366
|
+
GPR_ASSERT(calld->method_params != nullptr);
|
1367
|
+
const ClientChannelMethodParams::RetryPolicy* retry_policy =
|
1368
|
+
calld->method_params->retry_policy();
|
1369
|
+
GPR_ASSERT(retry_policy != nullptr);
|
1370
|
+
// Reset subchannel call and connected subchannel.
|
1371
|
+
if (calld->subchannel_call != nullptr) {
|
1372
|
+
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
|
1373
|
+
"client_channel_call_retry");
|
1374
|
+
calld->subchannel_call = nullptr;
|
1375
|
+
}
|
1376
|
+
if (calld->pick.connected_subchannel != nullptr) {
|
1377
|
+
calld->pick.connected_subchannel.reset();
|
1378
|
+
}
|
1379
|
+
// Compute backoff delay.
|
1380
|
+
grpc_millis next_attempt_time;
|
1381
|
+
if (server_pushback_ms >= 0) {
|
1382
|
+
next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
|
1383
|
+
calld->last_attempt_got_server_pushback = true;
|
1384
|
+
} else {
|
1385
|
+
if (calld->num_attempts_completed == 1 ||
|
1386
|
+
calld->last_attempt_got_server_pushback) {
|
1387
|
+
calld->retry_backoff.Init(
|
1388
|
+
grpc_core::BackOff::Options()
|
1389
|
+
.set_initial_backoff(retry_policy->initial_backoff)
|
1390
|
+
.set_multiplier(retry_policy->backoff_multiplier)
|
1391
|
+
.set_jitter(RETRY_BACKOFF_JITTER)
|
1392
|
+
.set_max_backoff(retry_policy->max_backoff));
|
1393
|
+
calld->last_attempt_got_server_pushback = false;
|
1071
1394
|
}
|
1072
|
-
|
1395
|
+
next_attempt_time = calld->retry_backoff->NextAttemptTime();
|
1073
1396
|
}
|
1074
|
-
|
1397
|
+
if (grpc_client_channel_trace.enabled()) {
|
1398
|
+
gpr_log(GPR_DEBUG,
|
1399
|
+
"chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
|
1400
|
+
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
|
1401
|
+
}
|
1402
|
+
// Schedule retry after computed delay.
|
1403
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem,
|
1404
|
+
grpc_combiner_scheduler(chand->combiner));
|
1405
|
+
grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure);
|
1406
|
+
// Update bookkeeping.
|
1407
|
+
if (retry_state != nullptr) retry_state->retry_dispatched = true;
|
1075
1408
|
}
|
1076
1409
|
|
1077
|
-
//
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1410
|
+
// Returns true if the call is being retried.
|
1411
|
+
static bool maybe_retry(grpc_call_element* elem,
|
1412
|
+
subchannel_batch_data* batch_data,
|
1413
|
+
grpc_status_code status,
|
1414
|
+
grpc_mdelem* server_pushback_md) {
|
1081
1415
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1082
1416
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1083
|
-
|
1084
|
-
|
1085
|
-
|
1417
|
+
// Get retry policy.
|
1418
|
+
if (calld->method_params == nullptr) return false;
|
1419
|
+
const ClientChannelMethodParams::RetryPolicy* retry_policy =
|
1420
|
+
calld->method_params->retry_policy();
|
1421
|
+
if (retry_policy == nullptr) return false;
|
1422
|
+
// If we've already dispatched a retry from this call, return true.
|
1423
|
+
// This catches the case where the batch has multiple callbacks
|
1424
|
+
// (i.e., it includes either recv_message or recv_initial_metadata).
|
1425
|
+
subchannel_call_retry_state* retry_state = nullptr;
|
1426
|
+
if (batch_data != nullptr) {
|
1427
|
+
retry_state = static_cast<subchannel_call_retry_state*>(
|
1428
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1429
|
+
batch_data->subchannel_call));
|
1430
|
+
if (retry_state->retry_dispatched) {
|
1431
|
+
if (grpc_client_channel_trace.enabled()) {
|
1432
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
|
1433
|
+
calld);
|
1434
|
+
}
|
1435
|
+
return true;
|
1436
|
+
}
|
1437
|
+
}
|
1438
|
+
// Check status.
|
1439
|
+
if (status == GRPC_STATUS_OK) {
|
1440
|
+
if (calld->retry_throttle_data != nullptr) {
|
1441
|
+
calld->retry_throttle_data->RecordSuccess();
|
1442
|
+
}
|
1443
|
+
if (grpc_client_channel_trace.enabled()) {
|
1444
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
|
1445
|
+
}
|
1446
|
+
return false;
|
1447
|
+
}
|
1448
|
+
// Status is not OK. Check whether the status is retryable.
|
1449
|
+
if (!retry_policy->retryable_status_codes.Contains(status)) {
|
1450
|
+
if (grpc_client_channel_trace.enabled()) {
|
1451
|
+
gpr_log(GPR_DEBUG,
|
1452
|
+
"chand=%p calld=%p: status %s not configured as retryable", chand,
|
1453
|
+
calld, grpc_status_code_to_string(status));
|
1454
|
+
}
|
1455
|
+
return false;
|
1456
|
+
}
|
1457
|
+
// Record the failure and check whether retries are throttled.
|
1458
|
+
// Note that it's important for this check to come after the status
|
1459
|
+
// code check above, since we should only record failures whose statuses
|
1460
|
+
// match the configured retryable status codes, so that we don't count
|
1461
|
+
// things like failures due to malformed requests (INVALID_ARGUMENT).
|
1462
|
+
// Conversely, it's important for this to come before the remaining
|
1463
|
+
// checks, so that we don't fail to record failures due to other factors.
|
1464
|
+
if (calld->retry_throttle_data != nullptr &&
|
1465
|
+
!calld->retry_throttle_data->RecordFailure()) {
|
1466
|
+
if (grpc_client_channel_trace.enabled()) {
|
1467
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
|
1468
|
+
}
|
1469
|
+
return false;
|
1470
|
+
}
|
1471
|
+
// Check whether the call is committed.
|
1472
|
+
if (calld->retry_committed) {
|
1473
|
+
if (grpc_client_channel_trace.enabled()) {
|
1474
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
|
1475
|
+
calld);
|
1476
|
+
}
|
1477
|
+
return false;
|
1478
|
+
}
|
1479
|
+
// Check whether we have retries remaining.
|
1480
|
+
++calld->num_attempts_completed;
|
1481
|
+
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
|
1482
|
+
if (grpc_client_channel_trace.enabled()) {
|
1483
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
|
1484
|
+
calld, retry_policy->max_attempts);
|
1485
|
+
}
|
1486
|
+
return false;
|
1487
|
+
}
|
1488
|
+
// If the call was cancelled from the surface, don't retry.
|
1489
|
+
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
1490
|
+
if (grpc_client_channel_trace.enabled()) {
|
1491
|
+
gpr_log(GPR_DEBUG,
|
1492
|
+
"chand=%p calld=%p: call cancelled from surface, not retrying",
|
1493
|
+
chand, calld);
|
1494
|
+
}
|
1495
|
+
return false;
|
1496
|
+
}
|
1497
|
+
// Check server push-back.
|
1498
|
+
grpc_millis server_pushback_ms = -1;
|
1499
|
+
if (server_pushback_md != nullptr) {
|
1500
|
+
// If the value is "-1" or any other unparseable string, we do not retry.
|
1501
|
+
uint32_t ms;
|
1502
|
+
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
|
1503
|
+
if (grpc_client_channel_trace.enabled()) {
|
1504
|
+
gpr_log(GPR_DEBUG,
|
1505
|
+
"chand=%p calld=%p: not retrying due to server push-back",
|
1506
|
+
chand, calld);
|
1507
|
+
}
|
1508
|
+
return false;
|
1509
|
+
} else {
|
1510
|
+
if (grpc_client_channel_trace.enabled()) {
|
1511
|
+
gpr_log(GPR_DEBUG,
|
1512
|
+
"chand=%p calld=%p: server push-back: retry in %u ms", chand,
|
1513
|
+
calld, ms);
|
1514
|
+
}
|
1515
|
+
server_pushback_ms = (grpc_millis)ms;
|
1516
|
+
}
|
1517
|
+
}
|
1518
|
+
do_retry(elem, retry_state, server_pushback_ms);
|
1519
|
+
return true;
|
1520
|
+
}
|
1521
|
+
|
1522
|
+
//
|
1523
|
+
// subchannel_batch_data
|
1524
|
+
//
|
1525
|
+
|
1526
|
+
static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
|
1527
|
+
int refcount) {
|
1528
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1529
|
+
subchannel_call_retry_state* retry_state =
|
1530
|
+
static_cast<subchannel_call_retry_state*>(
|
1531
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1532
|
+
calld->subchannel_call));
|
1533
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
|
1534
|
+
gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
|
1535
|
+
batch_data->elem = elem;
|
1536
|
+
batch_data->subchannel_call =
|
1537
|
+
GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
|
1538
|
+
batch_data->batch.payload = &retry_state->batch_payload;
|
1539
|
+
gpr_ref_init(&batch_data->refs, refcount);
|
1540
|
+
GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
|
1541
|
+
grpc_schedule_on_exec_ctx);
|
1542
|
+
batch_data->batch.on_complete = &batch_data->on_complete;
|
1543
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
|
1544
|
+
return batch_data;
|
1545
|
+
}
|
1546
|
+
|
1547
|
+
static void batch_data_unref(subchannel_batch_data* batch_data) {
|
1548
|
+
if (gpr_unref(&batch_data->refs)) {
|
1549
|
+
if (batch_data->send_initial_metadata_storage != nullptr) {
|
1550
|
+
grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
|
1551
|
+
}
|
1552
|
+
if (batch_data->send_trailing_metadata_storage != nullptr) {
|
1553
|
+
grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
|
1554
|
+
}
|
1555
|
+
if (batch_data->batch.recv_initial_metadata) {
|
1556
|
+
grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
|
1557
|
+
}
|
1558
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1559
|
+
grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
|
1560
|
+
}
|
1561
|
+
GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
|
1562
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1563
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
|
1564
|
+
}
|
1565
|
+
}
|
1566
|
+
|
1567
|
+
//
|
1568
|
+
// recv_initial_metadata callback handling
|
1569
|
+
//
|
1570
|
+
|
1571
|
+
// Invokes recv_initial_metadata_ready for a subchannel batch.
|
1572
|
+
static void invoke_recv_initial_metadata_callback(void* arg,
|
1573
|
+
grpc_error* error) {
|
1574
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1575
|
+
channel_data* chand =
|
1576
|
+
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1577
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1578
|
+
// Find pending batch.
|
1579
|
+
pending_batch* pending = nullptr;
|
1580
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1581
|
+
grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
|
1582
|
+
if (batch != nullptr && batch->recv_initial_metadata &&
|
1583
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
|
1584
|
+
nullptr) {
|
1585
|
+
if (grpc_client_channel_trace.enabled()) {
|
1586
|
+
gpr_log(GPR_DEBUG,
|
1587
|
+
"chand=%p calld=%p: invoking recv_initial_metadata_ready for "
|
1588
|
+
"pending batch at index %" PRIuPTR,
|
1589
|
+
chand, calld, i);
|
1590
|
+
}
|
1591
|
+
pending = &calld->pending_batches[i];
|
1592
|
+
break;
|
1593
|
+
}
|
1594
|
+
}
|
1595
|
+
GPR_ASSERT(pending != nullptr);
|
1596
|
+
// Return metadata.
|
1597
|
+
grpc_metadata_batch_move(
|
1598
|
+
&batch_data->recv_initial_metadata,
|
1599
|
+
pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
|
1600
|
+
// Update bookkeeping.
|
1601
|
+
// Note: Need to do this before invoking the callback, since invoking
|
1602
|
+
// the callback will result in yielding the call combiner.
|
1603
|
+
grpc_closure* recv_initial_metadata_ready =
|
1604
|
+
pending->batch->payload->recv_initial_metadata
|
1605
|
+
.recv_initial_metadata_ready;
|
1606
|
+
pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
1607
|
+
nullptr;
|
1608
|
+
maybe_clear_pending_batch(batch_data->elem, pending);
|
1609
|
+
batch_data_unref(batch_data);
|
1610
|
+
// Invoke callback.
|
1611
|
+
GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error));
|
1612
|
+
}
|
1613
|
+
|
1614
|
+
// Intercepts recv_initial_metadata_ready callback for retries.
|
1615
|
+
// Commits the call and returns the initial metadata up the stack.
|
1616
|
+
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
1617
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1618
|
+
grpc_call_element* elem = batch_data->elem;
|
1619
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1620
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1621
|
+
if (grpc_client_channel_trace.enabled()) {
|
1622
|
+
gpr_log(GPR_DEBUG,
|
1623
|
+
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
|
1624
|
+
chand, calld, grpc_error_string(error));
|
1625
|
+
}
|
1626
|
+
subchannel_call_retry_state* retry_state =
|
1627
|
+
static_cast<subchannel_call_retry_state*>(
|
1628
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1629
|
+
batch_data->subchannel_call));
|
1630
|
+
// If we got an error or a Trailers-Only response and have not yet gotten
|
1631
|
+
// the recv_trailing_metadata on_complete callback, then defer
|
1632
|
+
// propagating this callback back to the surface. We can evaluate whether
|
1633
|
+
// to retry when recv_trailing_metadata comes back.
|
1634
|
+
if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
|
1635
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1636
|
+
if (grpc_client_channel_trace.enabled()) {
|
1637
|
+
gpr_log(GPR_DEBUG,
|
1638
|
+
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
|
1639
|
+
"(Trailers-Only)",
|
1640
|
+
chand, calld);
|
1641
|
+
}
|
1642
|
+
retry_state->recv_initial_metadata_ready_deferred = true;
|
1643
|
+
retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
|
1644
|
+
if (!retry_state->started_recv_trailing_metadata) {
|
1645
|
+
// recv_trailing_metadata not yet started by application; start it
|
1646
|
+
// ourselves to get status.
|
1647
|
+
start_internal_recv_trailing_metadata(elem);
|
1648
|
+
} else {
|
1649
|
+
GRPC_CALL_COMBINER_STOP(
|
1650
|
+
calld->call_combiner,
|
1651
|
+
"recv_initial_metadata_ready trailers-only or error");
|
1652
|
+
}
|
1653
|
+
return;
|
1654
|
+
}
|
1655
|
+
// Received valid initial metadata, so commit the call.
|
1656
|
+
retry_commit(elem, retry_state);
|
1657
|
+
// Manually invoking a callback function; it does not take ownership of error.
|
1658
|
+
invoke_recv_initial_metadata_callback(batch_data, error);
|
1659
|
+
GRPC_ERROR_UNREF(error);
|
1660
|
+
}
|
1661
|
+
|
1662
|
+
//
|
1663
|
+
// recv_message callback handling
|
1664
|
+
//
|
1665
|
+
|
1666
|
+
// Invokes recv_message_ready for a subchannel batch.
|
1667
|
+
static void invoke_recv_message_callback(void* arg, grpc_error* error) {
|
1668
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1669
|
+
channel_data* chand =
|
1670
|
+
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1671
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1672
|
+
// Find pending op.
|
1673
|
+
pending_batch* pending = nullptr;
|
1674
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1675
|
+
grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
|
1676
|
+
if (batch != nullptr && batch->recv_message &&
|
1677
|
+
batch->payload->recv_message.recv_message_ready != nullptr) {
|
1678
|
+
if (grpc_client_channel_trace.enabled()) {
|
1679
|
+
gpr_log(GPR_DEBUG,
|
1680
|
+
"chand=%p calld=%p: invoking recv_message_ready for "
|
1681
|
+
"pending batch at index %" PRIuPTR,
|
1682
|
+
chand, calld, i);
|
1683
|
+
}
|
1684
|
+
pending = &calld->pending_batches[i];
|
1685
|
+
break;
|
1686
|
+
}
|
1687
|
+
}
|
1688
|
+
GPR_ASSERT(pending != nullptr);
|
1689
|
+
// Return payload.
|
1690
|
+
*pending->batch->payload->recv_message.recv_message =
|
1691
|
+
std::move(batch_data->recv_message);
|
1692
|
+
// Update bookkeeping.
|
1693
|
+
// Note: Need to do this before invoking the callback, since invoking
|
1694
|
+
// the callback will result in yielding the call combiner.
|
1695
|
+
grpc_closure* recv_message_ready =
|
1696
|
+
pending->batch->payload->recv_message.recv_message_ready;
|
1697
|
+
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1698
|
+
maybe_clear_pending_batch(batch_data->elem, pending);
|
1699
|
+
batch_data_unref(batch_data);
|
1700
|
+
// Invoke callback.
|
1701
|
+
GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error));
|
1702
|
+
}
|
1703
|
+
|
1704
|
+
// Intercepts recv_message_ready callback for retries.
|
1705
|
+
// Commits the call and returns the message up the stack.
|
1706
|
+
static void recv_message_ready(void* arg, grpc_error* error) {
|
1707
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1708
|
+
grpc_call_element* elem = batch_data->elem;
|
1709
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1710
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1711
|
+
if (grpc_client_channel_trace.enabled()) {
|
1712
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
|
1713
|
+
chand, calld, grpc_error_string(error));
|
1714
|
+
}
|
1715
|
+
subchannel_call_retry_state* retry_state =
|
1716
|
+
static_cast<subchannel_call_retry_state*>(
|
1717
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1718
|
+
batch_data->subchannel_call));
|
1719
|
+
// If we got an error or the payload was nullptr and we have not yet gotten
|
1720
|
+
// the recv_trailing_metadata on_complete callback, then defer
|
1721
|
+
// propagating this callback back to the surface. We can evaluate whether
|
1722
|
+
// to retry when recv_trailing_metadata comes back.
|
1723
|
+
if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
|
1724
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1725
|
+
if (grpc_client_channel_trace.enabled()) {
|
1726
|
+
gpr_log(GPR_DEBUG,
|
1727
|
+
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
|
1728
|
+
"message and recv_trailing_metadata pending)",
|
1729
|
+
chand, calld);
|
1730
|
+
}
|
1731
|
+
retry_state->recv_message_ready_deferred = true;
|
1732
|
+
retry_state->recv_message_error = GRPC_ERROR_REF(error);
|
1733
|
+
if (!retry_state->started_recv_trailing_metadata) {
|
1734
|
+
// recv_trailing_metadata not yet started by application; start it
|
1735
|
+
// ourselves to get status.
|
1736
|
+
start_internal_recv_trailing_metadata(elem);
|
1737
|
+
} else {
|
1738
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null");
|
1739
|
+
}
|
1740
|
+
return;
|
1741
|
+
}
|
1742
|
+
// Received a valid message, so commit the call.
|
1743
|
+
retry_commit(elem, retry_state);
|
1744
|
+
// Manually invoking a callback function; it does not take ownership of error.
|
1745
|
+
invoke_recv_message_callback(batch_data, error);
|
1746
|
+
GRPC_ERROR_UNREF(error);
|
1747
|
+
}
|
1748
|
+
|
1749
|
+
//
|
1750
|
+
// on_complete callback handling
|
1751
|
+
//
|
1752
|
+
|
1753
|
+
// Updates retry_state to reflect the ops completed in batch_data.
|
1754
|
+
static void update_retry_state_for_completed_batch(
|
1755
|
+
subchannel_batch_data* batch_data,
|
1756
|
+
subchannel_call_retry_state* retry_state) {
|
1757
|
+
if (batch_data->batch.send_initial_metadata) {
|
1758
|
+
retry_state->completed_send_initial_metadata = true;
|
1759
|
+
}
|
1760
|
+
if (batch_data->batch.send_message) {
|
1761
|
+
++retry_state->completed_send_message_count;
|
1762
|
+
}
|
1763
|
+
if (batch_data->batch.send_trailing_metadata) {
|
1764
|
+
retry_state->completed_send_trailing_metadata = true;
|
1765
|
+
}
|
1766
|
+
if (batch_data->batch.recv_initial_metadata) {
|
1767
|
+
retry_state->completed_recv_initial_metadata = true;
|
1768
|
+
}
|
1769
|
+
if (batch_data->batch.recv_message) {
|
1770
|
+
++retry_state->completed_recv_message_count;
|
1771
|
+
}
|
1772
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1773
|
+
retry_state->completed_recv_trailing_metadata = true;
|
1774
|
+
}
|
1775
|
+
}
|
1776
|
+
|
1777
|
+
// Represents a closure that needs to run as a result of a completed batch.
|
1778
|
+
typedef struct {
|
1779
|
+
grpc_closure* closure;
|
1780
|
+
grpc_error* error;
|
1781
|
+
const char* reason;
|
1782
|
+
} closure_to_execute;
|
1783
|
+
|
1784
|
+
// Adds any necessary closures for deferred recv_initial_metadata and
|
1785
|
+
// recv_message callbacks to closures, updating *num_closures as needed.
|
1786
|
+
static void add_closures_for_deferred_recv_callbacks(
|
1787
|
+
subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
|
1788
|
+
closure_to_execute* closures, size_t* num_closures) {
|
1789
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1790
|
+
retry_state->recv_initial_metadata_ready_deferred) {
|
1791
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1792
|
+
closure->closure =
|
1793
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
|
1794
|
+
invoke_recv_initial_metadata_callback, batch_data,
|
1795
|
+
grpc_schedule_on_exec_ctx);
|
1796
|
+
closure->error = retry_state->recv_initial_metadata_error;
|
1797
|
+
closure->reason = "resuming recv_initial_metadata_ready";
|
1798
|
+
}
|
1799
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1800
|
+
retry_state->recv_message_ready_deferred) {
|
1801
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1802
|
+
closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
|
1803
|
+
invoke_recv_message_callback,
|
1804
|
+
batch_data, grpc_schedule_on_exec_ctx);
|
1805
|
+
closure->error = retry_state->recv_message_error;
|
1806
|
+
closure->reason = "resuming recv_message_ready";
|
1807
|
+
}
|
1808
|
+
}
|
1809
|
+
|
1810
|
+
// If there are any cached ops to replay or pending ops to start on the
|
1811
|
+
// subchannel call, adds a closure to closures to invoke
|
1812
|
+
// start_retriable_subchannel_batches(), updating *num_closures as needed.
|
1813
|
+
static void add_closures_for_replay_or_pending_send_ops(
|
1814
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1815
|
+
subchannel_call_retry_state* retry_state, closure_to_execute* closures,
|
1816
|
+
size_t* num_closures) {
|
1817
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1818
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1819
|
+
bool have_pending_send_message_ops =
|
1820
|
+
retry_state->started_send_message_count < calld->send_messages.size();
|
1821
|
+
bool have_pending_send_trailing_metadata_op =
|
1822
|
+
calld->seen_send_trailing_metadata &&
|
1823
|
+
!retry_state->started_send_trailing_metadata;
|
1824
|
+
if (!have_pending_send_message_ops &&
|
1825
|
+
!have_pending_send_trailing_metadata_op) {
|
1826
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1827
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1828
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1829
|
+
if (batch == nullptr || pending->send_ops_cached) continue;
|
1830
|
+
if (batch->send_message) have_pending_send_message_ops = true;
|
1831
|
+
if (batch->send_trailing_metadata) {
|
1832
|
+
have_pending_send_trailing_metadata_op = true;
|
1833
|
+
}
|
1834
|
+
}
|
1835
|
+
}
|
1836
|
+
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
|
1837
|
+
if (grpc_client_channel_trace.enabled()) {
|
1838
|
+
gpr_log(GPR_DEBUG,
|
1839
|
+
"chand=%p calld=%p: starting next batch for pending send op(s)",
|
1840
|
+
chand, calld);
|
1841
|
+
}
|
1842
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1843
|
+
closure->closure = GRPC_CLOSURE_INIT(
|
1844
|
+
&batch_data->batch.handler_private.closure,
|
1845
|
+
start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
|
1846
|
+
closure->error = GRPC_ERROR_NONE;
|
1847
|
+
closure->reason = "starting next batch for send_* op(s)";
|
1848
|
+
}
|
1849
|
+
}
|
1850
|
+
|
1851
|
+
// For any pending batch completed in batch_data, adds the necessary
|
1852
|
+
// completion closures to closures, updating *num_closures as needed.
|
1853
|
+
static void add_closures_for_completed_pending_batches(
|
1854
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1855
|
+
subchannel_call_retry_state* retry_state, grpc_error* error,
|
1856
|
+
closure_to_execute* closures, size_t* num_closures) {
|
1857
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1858
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1859
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1860
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1861
|
+
if (pending_batch_is_completed(pending, calld, retry_state)) {
|
1862
|
+
if (grpc_client_channel_trace.enabled()) {
|
1863
|
+
gpr_log(GPR_DEBUG,
|
1864
|
+
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
|
1865
|
+
chand, calld, i);
|
1866
|
+
}
|
1867
|
+
// Copy the trailing metadata to return it to the surface.
|
1868
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1869
|
+
grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
|
1870
|
+
pending->batch->payload->recv_trailing_metadata
|
1871
|
+
.recv_trailing_metadata);
|
1872
|
+
}
|
1873
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1874
|
+
closure->closure = pending->batch->on_complete;
|
1875
|
+
closure->error = GRPC_ERROR_REF(error);
|
1876
|
+
closure->reason = "on_complete for pending batch";
|
1877
|
+
pending->batch->on_complete = nullptr;
|
1878
|
+
maybe_clear_pending_batch(elem, pending);
|
1879
|
+
}
|
1880
|
+
}
|
1881
|
+
GRPC_ERROR_UNREF(error);
|
1882
|
+
}
|
1883
|
+
|
1884
|
+
// For any pending batch containing an op that has not yet been started,
|
1885
|
+
// adds the pending batch's completion closures to closures, updating
|
1886
|
+
// *num_closures as needed.
|
1887
|
+
static void add_closures_to_fail_unstarted_pending_batches(
|
1888
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
1889
|
+
grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
|
1890
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1891
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1892
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1893
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1894
|
+
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
|
1895
|
+
if (grpc_client_channel_trace.enabled()) {
|
1896
|
+
gpr_log(GPR_DEBUG,
|
1897
|
+
"chand=%p calld=%p: failing unstarted pending batch at index "
|
1898
|
+
"%" PRIuPTR,
|
1899
|
+
chand, calld, i);
|
1900
|
+
}
|
1901
|
+
if (pending->batch->recv_initial_metadata) {
|
1902
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1903
|
+
closure->closure = pending->batch->payload->recv_initial_metadata
|
1904
|
+
.recv_initial_metadata_ready;
|
1905
|
+
closure->error = GRPC_ERROR_REF(error);
|
1906
|
+
closure->reason =
|
1907
|
+
"failing recv_initial_metadata_ready for pending batch";
|
1908
|
+
pending->batch->payload->recv_initial_metadata
|
1909
|
+
.recv_initial_metadata_ready = nullptr;
|
1910
|
+
}
|
1911
|
+
if (pending->batch->recv_message) {
|
1912
|
+
*pending->batch->payload->recv_message.recv_message = nullptr;
|
1913
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1914
|
+
closure->closure =
|
1915
|
+
pending->batch->payload->recv_message.recv_message_ready;
|
1916
|
+
closure->error = GRPC_ERROR_REF(error);
|
1917
|
+
closure->reason = "failing recv_message_ready for pending batch";
|
1918
|
+
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1919
|
+
}
|
1920
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1921
|
+
closure->closure = pending->batch->on_complete;
|
1922
|
+
closure->error = GRPC_ERROR_REF(error);
|
1923
|
+
closure->reason = "failing on_complete for pending batch";
|
1924
|
+
pending->batch->on_complete = nullptr;
|
1925
|
+
maybe_clear_pending_batch(elem, pending);
|
1926
|
+
}
|
1927
|
+
}
|
1928
|
+
GRPC_ERROR_UNREF(error);
|
1929
|
+
}
|
1930
|
+
|
1931
|
+
// Callback used to intercept on_complete from subchannel calls.
|
1932
|
+
// Called only when retries are enabled.
|
1933
|
+
static void on_complete(void* arg, grpc_error* error) {
|
1934
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1935
|
+
grpc_call_element* elem = batch_data->elem;
|
1936
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1937
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1938
|
+
if (grpc_client_channel_trace.enabled()) {
|
1939
|
+
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
|
1940
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
|
1941
|
+
chand, calld, grpc_error_string(error), batch_str);
|
1942
|
+
gpr_free(batch_str);
|
1943
|
+
}
|
1944
|
+
subchannel_call_retry_state* retry_state =
|
1945
|
+
static_cast<subchannel_call_retry_state*>(
|
1946
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1947
|
+
batch_data->subchannel_call));
|
1948
|
+
// If we have previously completed recv_trailing_metadata, then the
|
1949
|
+
// call is finished.
|
1950
|
+
bool call_finished = retry_state->completed_recv_trailing_metadata;
|
1951
|
+
// Update bookkeeping in retry_state.
|
1952
|
+
update_retry_state_for_completed_batch(batch_data, retry_state);
|
1953
|
+
if (call_finished) {
|
1954
|
+
if (grpc_client_channel_trace.enabled()) {
|
1955
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
|
1956
|
+
calld);
|
1957
|
+
}
|
1958
|
+
} else {
|
1959
|
+
// Check if this batch finished the call, and if so, get its status.
|
1960
|
+
// The call is finished if either (a) this callback was invoked with
|
1961
|
+
// an error or (b) we receive status.
|
1962
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
1963
|
+
grpc_mdelem* server_pushback_md = nullptr;
|
1964
|
+
if (error != GRPC_ERROR_NONE) { // Case (a).
|
1965
|
+
call_finished = true;
|
1966
|
+
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
1967
|
+
nullptr);
|
1968
|
+
} else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
|
1969
|
+
call_finished = true;
|
1970
|
+
grpc_metadata_batch* md_batch =
|
1971
|
+
batch_data->batch.payload->recv_trailing_metadata
|
1972
|
+
.recv_trailing_metadata;
|
1973
|
+
GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
|
1974
|
+
status = grpc_get_status_code_from_metadata(
|
1975
|
+
md_batch->idx.named.grpc_status->md);
|
1976
|
+
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
|
1977
|
+
server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
|
1978
|
+
}
|
1979
|
+
} else if (retry_state->completed_recv_trailing_metadata) {
|
1980
|
+
call_finished = true;
|
1981
|
+
}
|
1982
|
+
if (call_finished && grpc_client_channel_trace.enabled()) {
|
1983
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
|
1984
|
+
calld, grpc_status_code_to_string(status));
|
1985
|
+
}
|
1986
|
+
// If the call is finished, check if we should retry.
|
1987
|
+
if (call_finished &&
|
1988
|
+
maybe_retry(elem, batch_data, status, server_pushback_md)) {
|
1989
|
+
// Unref batch_data for deferred recv_initial_metadata_ready or
|
1990
|
+
// recv_message_ready callbacks, if any.
|
1991
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1992
|
+
retry_state->recv_initial_metadata_ready_deferred) {
|
1993
|
+
batch_data_unref(batch_data);
|
1994
|
+
GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
|
1995
|
+
}
|
1996
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1997
|
+
retry_state->recv_message_ready_deferred) {
|
1998
|
+
batch_data_unref(batch_data);
|
1999
|
+
GRPC_ERROR_UNREF(retry_state->recv_message_error);
|
2000
|
+
}
|
2001
|
+
batch_data_unref(batch_data);
|
2002
|
+
return;
|
2003
|
+
}
|
2004
|
+
}
|
2005
|
+
// If the call is finished or retries are committed, free cached data for
|
2006
|
+
// send ops that we've just completed.
|
2007
|
+
if (call_finished || calld->retry_committed) {
|
2008
|
+
free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
|
2009
|
+
}
|
2010
|
+
// Call not being retried.
|
2011
|
+
// Construct list of closures to execute.
|
2012
|
+
// Max number of closures is number of pending batches plus one for
|
2013
|
+
// each of:
|
2014
|
+
// - recv_initial_metadata_ready (either deferred or unstarted)
|
2015
|
+
// - recv_message_ready (either deferred or unstarted)
|
2016
|
+
// - starting a new batch for pending send ops
|
2017
|
+
closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
|
2018
|
+
size_t num_closures = 0;
|
2019
|
+
// If there are deferred recv_initial_metadata_ready or recv_message_ready
|
2020
|
+
// callbacks, add them to closures.
|
2021
|
+
add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
|
2022
|
+
&num_closures);
|
2023
|
+
// Find pending batches whose ops are now complete and add their
|
2024
|
+
// on_complete callbacks to closures.
|
2025
|
+
add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
|
2026
|
+
GRPC_ERROR_REF(error), closures,
|
2027
|
+
&num_closures);
|
2028
|
+
// Add closures to handle any pending batches that have not yet been started.
|
2029
|
+
// If the call is finished, we fail these batches; otherwise, we add a
|
2030
|
+
// callback to start_retriable_subchannel_batches() to start them on
|
2031
|
+
// the subchannel call.
|
2032
|
+
if (call_finished) {
|
2033
|
+
add_closures_to_fail_unstarted_pending_batches(
|
2034
|
+
elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
|
2035
|
+
} else {
|
2036
|
+
add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
|
2037
|
+
closures, &num_closures);
|
2038
|
+
}
|
2039
|
+
// Don't need batch_data anymore.
|
2040
|
+
batch_data_unref(batch_data);
|
2041
|
+
// Schedule all of the closures identified above.
|
2042
|
+
// Note that the call combiner will be yielded for each closure that
|
2043
|
+
// we schedule. We're already running in the call combiner, so one of
|
2044
|
+
// the closures can be scheduled directly, but the others will
|
2045
|
+
// have to re-enter the call combiner.
|
2046
|
+
if (num_closures > 0) {
|
2047
|
+
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
|
2048
|
+
for (size_t i = 1; i < num_closures; ++i) {
|
2049
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
|
2050
|
+
closures[i].error, closures[i].reason);
|
2051
|
+
}
|
2052
|
+
} else {
|
2053
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2054
|
+
"no closures to run for on_complete");
|
2055
|
+
}
|
2056
|
+
}
|
2057
|
+
|
2058
|
+
//
|
2059
|
+
// subchannel batch construction
|
2060
|
+
//
|
2061
|
+
|
2062
|
+
// Helper function used to start a subchannel batch in the call combiner.
|
2063
|
+
static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
|
2064
|
+
grpc_transport_stream_op_batch* batch =
|
2065
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
2066
|
+
grpc_subchannel_call* subchannel_call =
|
2067
|
+
static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
|
2068
|
+
// Note: This will release the call combiner.
|
2069
|
+
grpc_subchannel_call_process_op(subchannel_call, batch);
|
2070
|
+
}
|
2071
|
+
|
2072
|
+
// Adds retriable send_initial_metadata op to batch_data.
|
2073
|
+
static void add_retriable_send_initial_metadata_op(
|
2074
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2075
|
+
subchannel_batch_data* batch_data) {
|
2076
|
+
// Maps the number of retries to the corresponding metadata value slice.
|
2077
|
+
static const grpc_slice* retry_count_strings[] = {
|
2078
|
+
&GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4};
|
2079
|
+
// We need to make a copy of the metadata batch for each attempt, since
|
2080
|
+
// the filters in the subchannel stack may modify this batch, and we don't
|
2081
|
+
// want those modifications to be passed forward to subsequent attempts.
|
2082
|
+
//
|
2083
|
+
// If we've already completed one or more attempts, add the
|
2084
|
+
// grpc-retry-attempts header.
|
2085
|
+
batch_data->send_initial_metadata_storage =
|
2086
|
+
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2087
|
+
calld->arena, sizeof(grpc_linked_mdelem) *
|
2088
|
+
(calld->send_initial_metadata.list.count +
|
2089
|
+
(calld->num_attempts_completed > 0))));
|
2090
|
+
grpc_metadata_batch_copy(&calld->send_initial_metadata,
|
2091
|
+
&batch_data->send_initial_metadata,
|
2092
|
+
batch_data->send_initial_metadata_storage);
|
2093
|
+
if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts !=
|
2094
|
+
nullptr) {
|
2095
|
+
grpc_metadata_batch_remove(
|
2096
|
+
&batch_data->send_initial_metadata,
|
2097
|
+
batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
|
2098
|
+
}
|
2099
|
+
if (calld->num_attempts_completed > 0) {
|
2100
|
+
grpc_mdelem retry_md = grpc_mdelem_from_slices(
|
2101
|
+
GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
|
2102
|
+
*retry_count_strings[calld->num_attempts_completed - 1]);
|
2103
|
+
grpc_error* error = grpc_metadata_batch_add_tail(
|
2104
|
+
&batch_data->send_initial_metadata,
|
2105
|
+
&batch_data->send_initial_metadata_storage[calld->send_initial_metadata
|
2106
|
+
.list.count],
|
2107
|
+
retry_md);
|
2108
|
+
if (error != GRPC_ERROR_NONE) {
|
2109
|
+
gpr_log(GPR_ERROR, "error adding retry metadata: %s",
|
2110
|
+
grpc_error_string(error));
|
2111
|
+
GPR_ASSERT(false);
|
2112
|
+
}
|
2113
|
+
}
|
2114
|
+
retry_state->started_send_initial_metadata = true;
|
2115
|
+
batch_data->batch.send_initial_metadata = true;
|
2116
|
+
batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
|
2117
|
+
&batch_data->send_initial_metadata;
|
2118
|
+
batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
|
2119
|
+
calld->send_initial_metadata_flags;
|
2120
|
+
batch_data->batch.payload->send_initial_metadata.peer_string =
|
2121
|
+
calld->peer_string;
|
2122
|
+
}
|
2123
|
+
|
2124
|
+
// Adds retriable send_message op to batch_data.
|
2125
|
+
static void add_retriable_send_message_op(
|
2126
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
2127
|
+
subchannel_batch_data* batch_data) {
|
2128
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2129
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2130
|
+
if (grpc_client_channel_trace.enabled()) {
|
2131
|
+
gpr_log(GPR_DEBUG,
|
2132
|
+
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
|
2133
|
+
chand, calld, retry_state->started_send_message_count);
|
2134
|
+
}
|
2135
|
+
grpc_core::ByteStreamCache* cache =
|
2136
|
+
calld->send_messages[retry_state->started_send_message_count];
|
2137
|
+
++retry_state->started_send_message_count;
|
2138
|
+
batch_data->send_message.Init(cache);
|
2139
|
+
batch_data->batch.send_message = true;
|
2140
|
+
batch_data->batch.payload->send_message.send_message.reset(
|
2141
|
+
batch_data->send_message.get());
|
2142
|
+
}
|
2143
|
+
|
2144
|
+
// Adds retriable send_trailing_metadata op to batch_data.
|
2145
|
+
static void add_retriable_send_trailing_metadata_op(
|
2146
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2147
|
+
subchannel_batch_data* batch_data) {
|
2148
|
+
// We need to make a copy of the metadata batch for each attempt, since
|
2149
|
+
// the filters in the subchannel stack may modify this batch, and we don't
|
2150
|
+
// want those modifications to be passed forward to subsequent attempts.
|
2151
|
+
batch_data->send_trailing_metadata_storage =
|
2152
|
+
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2153
|
+
calld->arena, sizeof(grpc_linked_mdelem) *
|
2154
|
+
calld->send_trailing_metadata.list.count));
|
2155
|
+
grpc_metadata_batch_copy(&calld->send_trailing_metadata,
|
2156
|
+
&batch_data->send_trailing_metadata,
|
2157
|
+
batch_data->send_trailing_metadata_storage);
|
2158
|
+
retry_state->started_send_trailing_metadata = true;
|
2159
|
+
batch_data->batch.send_trailing_metadata = true;
|
2160
|
+
batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
|
2161
|
+
&batch_data->send_trailing_metadata;
|
2162
|
+
}
|
2163
|
+
|
2164
|
+
// Adds retriable recv_initial_metadata op to batch_data.
|
2165
|
+
static void add_retriable_recv_initial_metadata_op(
|
2166
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2167
|
+
subchannel_batch_data* batch_data) {
|
2168
|
+
retry_state->started_recv_initial_metadata = true;
|
2169
|
+
batch_data->batch.recv_initial_metadata = true;
|
2170
|
+
grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
|
2171
|
+
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
|
2172
|
+
&batch_data->recv_initial_metadata;
|
2173
|
+
batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
|
2174
|
+
&batch_data->trailing_metadata_available;
|
2175
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
|
2176
|
+
recv_initial_metadata_ready, batch_data,
|
2177
|
+
grpc_schedule_on_exec_ctx);
|
2178
|
+
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
|
2179
|
+
&batch_data->recv_initial_metadata_ready;
|
2180
|
+
}
|
2181
|
+
|
2182
|
+
// Adds retriable recv_message op to batch_data.
|
2183
|
+
static void add_retriable_recv_message_op(
|
2184
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2185
|
+
subchannel_batch_data* batch_data) {
|
2186
|
+
++retry_state->started_recv_message_count;
|
2187
|
+
batch_data->batch.recv_message = true;
|
2188
|
+
batch_data->batch.payload->recv_message.recv_message =
|
2189
|
+
&batch_data->recv_message;
|
2190
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
|
2191
|
+
batch_data, grpc_schedule_on_exec_ctx);
|
2192
|
+
batch_data->batch.payload->recv_message.recv_message_ready =
|
2193
|
+
&batch_data->recv_message_ready;
|
2194
|
+
}
|
2195
|
+
|
2196
|
+
// Adds retriable recv_trailing_metadata op to batch_data.
|
2197
|
+
static void add_retriable_recv_trailing_metadata_op(
|
2198
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2199
|
+
subchannel_batch_data* batch_data) {
|
2200
|
+
retry_state->started_recv_trailing_metadata = true;
|
2201
|
+
batch_data->batch.recv_trailing_metadata = true;
|
2202
|
+
grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
|
2203
|
+
batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
|
2204
|
+
&batch_data->recv_trailing_metadata;
|
2205
|
+
batch_data->batch.collect_stats = true;
|
2206
|
+
batch_data->batch.payload->collect_stats.collect_stats =
|
2207
|
+
&batch_data->collect_stats;
|
2208
|
+
}
|
2209
|
+
|
2210
|
+
// Helper function used to start a recv_trailing_metadata batch. This
|
2211
|
+
// is used in the case where a recv_initial_metadata or recv_message
|
2212
|
+
// op fails in a way that we know the call is over but when the application
|
2213
|
+
// has not yet started its own recv_trailing_metadata op.
|
2214
|
+
static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
|
2215
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2216
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2217
|
+
if (grpc_client_channel_trace.enabled()) {
|
2218
|
+
gpr_log(GPR_DEBUG,
|
2219
|
+
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
|
2220
|
+
"started; starting it internally",
|
2221
|
+
chand, calld);
|
2222
|
+
}
|
2223
|
+
subchannel_call_retry_state* retry_state =
|
2224
|
+
static_cast<subchannel_call_retry_state*>(
|
2225
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2226
|
+
calld->subchannel_call));
|
2227
|
+
subchannel_batch_data* batch_data = batch_data_create(elem, 1);
|
2228
|
+
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2229
|
+
// Note: This will release the call combiner.
|
2230
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
|
2231
|
+
}
|
2232
|
+
|
2233
|
+
// If there are any cached send ops that need to be replayed on the
|
2234
|
+
// current subchannel call, creates and returns a new subchannel batch
|
2235
|
+
// to replay those ops. Otherwise, returns nullptr.
|
2236
|
+
static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
|
2237
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
|
2238
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2239
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2240
|
+
subchannel_batch_data* replay_batch_data = nullptr;
|
2241
|
+
// send_initial_metadata.
|
2242
|
+
if (calld->seen_send_initial_metadata &&
|
2243
|
+
!retry_state->started_send_initial_metadata &&
|
2244
|
+
!calld->pending_send_initial_metadata) {
|
2245
|
+
if (grpc_client_channel_trace.enabled()) {
|
2246
|
+
gpr_log(GPR_DEBUG,
|
2247
|
+
"chand=%p calld=%p: replaying previously completed "
|
2248
|
+
"send_initial_metadata op",
|
2249
|
+
chand, calld);
|
2250
|
+
}
|
2251
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2252
|
+
add_retriable_send_initial_metadata_op(calld, retry_state,
|
2253
|
+
replay_batch_data);
|
2254
|
+
}
|
2255
|
+
// send_message.
|
2256
|
+
// Note that we can only have one send_message op in flight at a time.
|
2257
|
+
if (retry_state->started_send_message_count < calld->send_messages.size() &&
|
2258
|
+
retry_state->started_send_message_count ==
|
2259
|
+
retry_state->completed_send_message_count &&
|
2260
|
+
!calld->pending_send_message) {
|
2261
|
+
if (grpc_client_channel_trace.enabled()) {
|
2262
|
+
gpr_log(GPR_DEBUG,
|
2263
|
+
"chand=%p calld=%p: replaying previously completed "
|
2264
|
+
"send_message op",
|
2265
|
+
chand, calld);
|
2266
|
+
}
|
2267
|
+
if (replay_batch_data == nullptr) {
|
2268
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2269
|
+
}
|
2270
|
+
add_retriable_send_message_op(elem, retry_state, replay_batch_data);
|
2271
|
+
}
|
2272
|
+
// send_trailing_metadata.
|
2273
|
+
// Note that we only add this op if we have no more send_message ops
|
2274
|
+
// to start, since we can't send down any more send_message ops after
|
2275
|
+
// send_trailing_metadata.
|
2276
|
+
if (calld->seen_send_trailing_metadata &&
|
2277
|
+
retry_state->started_send_message_count == calld->send_messages.size() &&
|
2278
|
+
!retry_state->started_send_trailing_metadata &&
|
2279
|
+
!calld->pending_send_trailing_metadata) {
|
2280
|
+
if (grpc_client_channel_trace.enabled()) {
|
2281
|
+
gpr_log(GPR_DEBUG,
|
2282
|
+
"chand=%p calld=%p: replaying previously completed "
|
2283
|
+
"send_trailing_metadata op",
|
2284
|
+
chand, calld);
|
2285
|
+
}
|
2286
|
+
if (replay_batch_data == nullptr) {
|
2287
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2288
|
+
}
|
2289
|
+
add_retriable_send_trailing_metadata_op(calld, retry_state,
|
2290
|
+
replay_batch_data);
|
2291
|
+
}
|
2292
|
+
return replay_batch_data;
|
2293
|
+
}
|
2294
|
+
|
2295
|
+
// Adds subchannel batches for pending batches to batches, updating
|
2296
|
+
// *num_batches as needed.
|
2297
|
+
static void add_subchannel_batches_for_pending_batches(
|
2298
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
2299
|
+
grpc_transport_stream_op_batch** batches, size_t* num_batches) {
|
2300
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2301
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
2302
|
+
pending_batch* pending = &calld->pending_batches[i];
|
2303
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
2304
|
+
if (batch == nullptr) continue;
|
2305
|
+
// Skip any batch that either (a) has already been started on this
|
2306
|
+
// subchannel call or (b) we can't start yet because we're still
|
2307
|
+
// replaying send ops that need to be completed first.
|
2308
|
+
// TODO(roth): Note that if any one op in the batch can't be sent
|
2309
|
+
// yet due to ops that we're replaying, we don't start any of the ops
|
2310
|
+
// in the batch. This is probably okay, but it could conceivably
|
2311
|
+
// lead to increased latency in some cases -- e.g., we could delay
|
2312
|
+
// starting a recv op due to it being in the same batch with a send
|
2313
|
+
// op. If/when we revamp the callback protocol in
|
2314
|
+
// transport_stream_op_batch, we may be able to fix this.
|
2315
|
+
if (batch->send_initial_metadata &&
|
2316
|
+
retry_state->started_send_initial_metadata) {
|
2317
|
+
continue;
|
2318
|
+
}
|
2319
|
+
if (batch->send_message && retry_state->completed_send_message_count <
|
2320
|
+
retry_state->started_send_message_count) {
|
2321
|
+
continue;
|
2322
|
+
}
|
2323
|
+
// Note that we only start send_trailing_metadata if we have no more
|
2324
|
+
// send_message ops to start, since we can't send down any more
|
2325
|
+
// send_message ops after send_trailing_metadata.
|
2326
|
+
if (batch->send_trailing_metadata &&
|
2327
|
+
(retry_state->started_send_message_count + batch->send_message <
|
2328
|
+
calld->send_messages.size() ||
|
2329
|
+
retry_state->started_send_trailing_metadata)) {
|
2330
|
+
continue;
|
2331
|
+
}
|
2332
|
+
if (batch->recv_initial_metadata &&
|
2333
|
+
retry_state->started_recv_initial_metadata) {
|
2334
|
+
continue;
|
2335
|
+
}
|
2336
|
+
if (batch->recv_message && retry_state->completed_recv_message_count <
|
2337
|
+
retry_state->started_recv_message_count) {
|
2338
|
+
continue;
|
2339
|
+
}
|
2340
|
+
if (batch->recv_trailing_metadata &&
|
2341
|
+
retry_state->started_recv_trailing_metadata) {
|
2342
|
+
continue;
|
2343
|
+
}
|
2344
|
+
// If we're not retrying, just send the batch as-is.
|
2345
|
+
if (calld->method_params == nullptr ||
|
2346
|
+
calld->method_params->retry_policy() == nullptr ||
|
2347
|
+
calld->retry_committed) {
|
2348
|
+
batches[(*num_batches)++] = batch;
|
2349
|
+
pending_batch_clear(calld, pending);
|
2350
|
+
continue;
|
2351
|
+
}
|
2352
|
+
// Create batch with the right number of callbacks.
|
2353
|
+
const int num_callbacks =
|
2354
|
+
1 + batch->recv_initial_metadata + batch->recv_message;
|
2355
|
+
subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
|
2356
|
+
// Cache send ops if needed.
|
2357
|
+
maybe_cache_send_ops_for_batch(calld, pending);
|
2358
|
+
// send_initial_metadata.
|
2359
|
+
if (batch->send_initial_metadata) {
|
2360
|
+
add_retriable_send_initial_metadata_op(calld, retry_state, batch_data);
|
2361
|
+
}
|
2362
|
+
// send_message.
|
2363
|
+
if (batch->send_message) {
|
2364
|
+
add_retriable_send_message_op(elem, retry_state, batch_data);
|
2365
|
+
}
|
2366
|
+
// send_trailing_metadata.
|
2367
|
+
if (batch->send_trailing_metadata) {
|
2368
|
+
add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data);
|
2369
|
+
}
|
2370
|
+
// recv_initial_metadata.
|
2371
|
+
if (batch->recv_initial_metadata) {
|
2372
|
+
// recv_flags is only used on the server side.
|
2373
|
+
GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
|
2374
|
+
add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data);
|
2375
|
+
}
|
2376
|
+
// recv_message.
|
2377
|
+
if (batch->recv_message) {
|
2378
|
+
add_retriable_recv_message_op(calld, retry_state, batch_data);
|
2379
|
+
}
|
2380
|
+
// recv_trailing_metadata.
|
2381
|
+
if (batch->recv_trailing_metadata) {
|
2382
|
+
GPR_ASSERT(batch->collect_stats);
|
2383
|
+
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2384
|
+
}
|
2385
|
+
batches[(*num_batches)++] = &batch_data->batch;
|
2386
|
+
}
|
2387
|
+
}
|
2388
|
+
|
2389
|
+
// Constructs and starts whatever subchannel batches are needed on the
|
2390
|
+
// subchannel call.
|
2391
|
+
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
|
2392
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2393
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2394
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2395
|
+
if (grpc_client_channel_trace.enabled()) {
|
2396
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
|
2397
|
+
chand, calld);
|
2398
|
+
}
|
2399
|
+
subchannel_call_retry_state* retry_state =
|
2400
|
+
static_cast<subchannel_call_retry_state*>(
|
2401
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2402
|
+
calld->subchannel_call));
|
2403
|
+
// We can start up to 6 batches.
|
2404
|
+
grpc_transport_stream_op_batch*
|
2405
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
2406
|
+
size_t num_batches = 0;
|
2407
|
+
// Replay previously-returned send_* ops if needed.
|
2408
|
+
subchannel_batch_data* replay_batch_data =
|
2409
|
+
maybe_create_subchannel_batch_for_replay(elem, retry_state);
|
2410
|
+
if (replay_batch_data != nullptr) {
|
2411
|
+
batches[num_batches++] = &replay_batch_data->batch;
|
2412
|
+
}
|
2413
|
+
// Now add pending batches.
|
2414
|
+
add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
|
2415
|
+
&num_batches);
|
2416
|
+
// Start batches on subchannel call.
|
2417
|
+
// Note that the call combiner will be yielded for each batch that we
|
2418
|
+
// send down. We're already running in the call combiner, so one of
|
2419
|
+
// the batches can be started directly, but the others will have to
|
2420
|
+
// re-enter the call combiner.
|
2421
|
+
if (grpc_client_channel_trace.enabled()) {
|
2422
|
+
gpr_log(GPR_DEBUG,
|
2423
|
+
"chand=%p calld=%p: starting %" PRIuPTR
|
2424
|
+
" retriable batches on subchannel_call=%p",
|
2425
|
+
chand, calld, num_batches, calld->subchannel_call);
|
2426
|
+
}
|
2427
|
+
if (num_batches == 0) {
|
2428
|
+
// This should be fairly rare, but it can happen when (e.g.) an
|
2429
|
+
// attempt completes before it has finished replaying all
|
2430
|
+
// previously sent messages.
|
2431
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2432
|
+
"no retriable subchannel batches to start");
|
2433
|
+
} else {
|
2434
|
+
for (size_t i = 1; i < num_batches; ++i) {
|
2435
|
+
if (grpc_client_channel_trace.enabled()) {
|
2436
|
+
char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
|
2437
|
+
gpr_log(GPR_DEBUG,
|
2438
|
+
"chand=%p calld=%p: starting batch in call combiner: %s", chand,
|
2439
|
+
calld, batch_str);
|
2440
|
+
gpr_free(batch_str);
|
2441
|
+
}
|
2442
|
+
batches[i]->handler_private.extra_arg = calld->subchannel_call;
|
2443
|
+
GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
|
2444
|
+
start_batch_in_call_combiner, batches[i],
|
2445
|
+
grpc_schedule_on_exec_ctx);
|
2446
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
2447
|
+
&batches[i]->handler_private.closure,
|
2448
|
+
GRPC_ERROR_NONE, "start_subchannel_batch");
|
2449
|
+
}
|
2450
|
+
if (grpc_client_channel_trace.enabled()) {
|
2451
|
+
char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
|
2452
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
|
2453
|
+
batch_str);
|
2454
|
+
gpr_free(batch_str);
|
2455
|
+
}
|
2456
|
+
// Note: This will release the call combiner.
|
2457
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
|
2458
|
+
}
|
2459
|
+
}
|
2460
|
+
|
2461
|
+
//
|
2462
|
+
// LB pick
|
2463
|
+
//
|
2464
|
+
|
2465
|
+
static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
|
2466
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2467
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2468
|
+
const size_t parent_data_size =
|
2469
|
+
calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
|
2470
|
+
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
|
2471
|
+
calld->pollent, // pollent
|
2472
|
+
calld->path, // path
|
2473
|
+
calld->call_start_time, // start_time
|
2474
|
+
calld->deadline, // deadline
|
2475
|
+
calld->arena, // arena
|
2476
|
+
calld->pick.subchannel_call_context, // context
|
2477
|
+
calld->call_combiner, // call_combiner
|
2478
|
+
parent_data_size // parent_data_size
|
2479
|
+
};
|
2480
|
+
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
|
2481
|
+
call_args, &calld->subchannel_call);
|
2482
|
+
if (grpc_client_channel_trace.enabled()) {
|
2483
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
2484
|
+
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
2485
|
+
}
|
2486
|
+
if (new_error != GRPC_ERROR_NONE) {
|
2487
|
+
new_error = grpc_error_add_child(new_error, error);
|
2488
|
+
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2489
|
+
} else {
|
2490
|
+
if (parent_data_size > 0) {
|
2491
|
+
subchannel_call_retry_state* retry_state =
|
2492
|
+
static_cast<subchannel_call_retry_state*>(
|
2493
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2494
|
+
calld->subchannel_call));
|
2495
|
+
retry_state->batch_payload.context = calld->pick.subchannel_call_context;
|
2496
|
+
}
|
2497
|
+
pending_batches_resume(elem);
|
2498
|
+
}
|
2499
|
+
GRPC_ERROR_UNREF(error);
|
2500
|
+
}
|
2501
|
+
|
2502
|
+
// Invoked when a pick is completed, on both success or failure.
|
2503
|
+
static void pick_done(void* arg, grpc_error* error) {
|
2504
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2505
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2506
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2507
|
+
if (calld->pick.connected_subchannel == nullptr) {
|
2508
|
+
// Failed to create subchannel.
|
2509
|
+
// If there was no error, this is an LB policy drop, in which case
|
2510
|
+
// we return an error; otherwise, we may retry.
|
2511
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
2512
|
+
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
2513
|
+
nullptr);
|
2514
|
+
if (error == GRPC_ERROR_NONE || !calld->enable_retries ||
|
2515
|
+
!maybe_retry(elem, nullptr /* batch_data */, status,
|
2516
|
+
nullptr /* server_pushback_md */)) {
|
2517
|
+
grpc_error* new_error =
|
2518
|
+
error == GRPC_ERROR_NONE
|
2519
|
+
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
2520
|
+
"Call dropped by load balancing policy")
|
2521
|
+
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2522
|
+
"Failed to create subchannel", &error, 1);
|
2523
|
+
if (grpc_client_channel_trace.enabled()) {
|
2524
|
+
gpr_log(GPR_DEBUG,
|
2525
|
+
"chand=%p calld=%p: failed to create subchannel: error=%s",
|
2526
|
+
chand, calld, grpc_error_string(new_error));
|
2527
|
+
}
|
2528
|
+
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2529
|
+
}
|
2530
|
+
} else {
|
2531
|
+
/* Create call on subchannel. */
|
2532
|
+
create_subchannel_call(elem, GRPC_ERROR_REF(error));
|
2533
|
+
}
|
2534
|
+
}
|
2535
|
+
|
2536
|
+
// Invoked when a pick is completed to leave the client_channel combiner
|
2537
|
+
// and continue processing in the call combiner.
|
2538
|
+
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2539
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2540
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
|
2541
|
+
grpc_schedule_on_exec_ctx);
|
2542
|
+
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
|
2543
|
+
}
|
2544
|
+
|
2545
|
+
// A wrapper around pick_done_locked() that is used in cases where
|
2546
|
+
// either (a) the pick was deferred pending a resolver result or (b) the
|
2547
|
+
// pick was done asynchronously. Removes the call's polling entity from
|
2548
|
+
// chand->interested_parties before invoking pick_done_locked().
|
2549
|
+
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2550
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2551
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2552
|
+
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2553
|
+
chand->interested_parties);
|
2554
|
+
pick_done_locked(elem, error);
|
2555
|
+
}
|
2556
|
+
|
2557
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2558
|
+
// holding the call combiner.
|
2559
|
+
static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
|
2560
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2561
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2562
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2563
|
+
// Note: chand->lb_policy may have changed since we started our pick,
|
2564
|
+
// in which case we will be cancelling the pick on a policy other than
|
2565
|
+
// the one we started it on. However, this will just be a no-op.
|
2566
|
+
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
|
2567
|
+
if (grpc_client_channel_trace.enabled()) {
|
2568
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
|
2569
|
+
chand, calld, chand->lb_policy.get());
|
2570
|
+
}
|
2571
|
+
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
|
2572
|
+
}
|
2573
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2574
|
+
}
|
2575
|
+
|
2576
|
+
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
2577
|
+
// Unrefs the LB policy and invokes async_pick_done_locked().
|
2578
|
+
static void pick_callback_done_locked(void* arg, grpc_error* error) {
|
2579
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2580
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2581
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2582
|
+
if (grpc_client_channel_trace.enabled()) {
|
2583
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
|
2584
|
+
chand, calld);
|
1086
2585
|
}
|
1087
2586
|
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
|
1088
2587
|
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
1089
2588
|
}
|
1090
2589
|
|
2590
|
+
// Applies service config to the call. Must be invoked once we know
|
2591
|
+
// that the resolver has returned results to the channel.
|
2592
|
+
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
2593
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2594
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2595
|
+
if (grpc_client_channel_trace.enabled()) {
|
2596
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
|
2597
|
+
chand, calld);
|
2598
|
+
}
|
2599
|
+
if (chand->retry_throttle_data != nullptr) {
|
2600
|
+
calld->retry_throttle_data = chand->retry_throttle_data->Ref();
|
2601
|
+
}
|
2602
|
+
if (chand->method_params_table != nullptr) {
|
2603
|
+
calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup(
|
2604
|
+
*chand->method_params_table, calld->path);
|
2605
|
+
if (calld->method_params != nullptr) {
|
2606
|
+
// If the deadline from the service config is shorter than the one
|
2607
|
+
// from the client API, reset the deadline timer.
|
2608
|
+
if (chand->deadline_checking_enabled &&
|
2609
|
+
calld->method_params->timeout() != 0) {
|
2610
|
+
const grpc_millis per_method_deadline =
|
2611
|
+
grpc_timespec_to_millis_round_up(calld->call_start_time) +
|
2612
|
+
calld->method_params->timeout();
|
2613
|
+
if (per_method_deadline < calld->deadline) {
|
2614
|
+
calld->deadline = per_method_deadline;
|
2615
|
+
grpc_deadline_state_reset(elem, calld->deadline);
|
2616
|
+
}
|
2617
|
+
}
|
2618
|
+
}
|
2619
|
+
}
|
2620
|
+
// If no retry policy, disable retries.
|
2621
|
+
// TODO(roth): Remove this when adding support for transparent retries.
|
2622
|
+
if (calld->method_params == nullptr ||
|
2623
|
+
calld->method_params->retry_policy() == nullptr) {
|
2624
|
+
calld->enable_retries = false;
|
2625
|
+
}
|
2626
|
+
}
|
2627
|
+
|
1091
2628
|
// Starts a pick on chand->lb_policy.
|
1092
2629
|
// Returns true if pick is completed synchronously.
|
1093
2630
|
static bool pick_callback_start_locked(grpc_call_element* elem) {
|
@@ -1097,33 +2634,46 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
|
|
1097
2634
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
1098
2635
|
chand, calld, chand->lb_policy.get());
|
1099
2636
|
}
|
1100
|
-
|
2637
|
+
// Only get service config data on the first attempt.
|
2638
|
+
if (calld->num_attempts_completed == 0) {
|
2639
|
+
apply_service_config_to_call_locked(elem);
|
2640
|
+
}
|
1101
2641
|
// If the application explicitly set wait_for_ready, use that.
|
1102
2642
|
// Otherwise, if the service config specified a value for this
|
1103
2643
|
// method, use that.
|
1104
|
-
|
1105
|
-
|
1106
|
-
|
2644
|
+
//
|
2645
|
+
// The send_initial_metadata batch will be the first one in the list,
|
2646
|
+
// as set by get_batch_index() above.
|
2647
|
+
calld->pick.initial_metadata =
|
2648
|
+
calld->seen_send_initial_metadata
|
2649
|
+
? &calld->send_initial_metadata
|
2650
|
+
: calld->pending_batches[0]
|
2651
|
+
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2652
|
+
uint32_t send_initial_metadata_flags =
|
2653
|
+
calld->seen_send_initial_metadata
|
2654
|
+
? calld->send_initial_metadata_flags
|
2655
|
+
: calld->pending_batches[0]
|
2656
|
+
.batch->payload->send_initial_metadata
|
2657
|
+
.send_initial_metadata_flags;
|
1107
2658
|
const bool wait_for_ready_set_from_api =
|
1108
|
-
|
2659
|
+
send_initial_metadata_flags &
|
1109
2660
|
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
1110
2661
|
const bool wait_for_ready_set_from_service_config =
|
1111
2662
|
calld->method_params != nullptr &&
|
1112
|
-
calld->method_params->wait_for_ready !=
|
2663
|
+
calld->method_params->wait_for_ready() !=
|
2664
|
+
ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
|
1113
2665
|
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
|
1114
|
-
if (calld->method_params->wait_for_ready ==
|
1115
|
-
|
2666
|
+
if (calld->method_params->wait_for_ready() ==
|
2667
|
+
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2668
|
+
send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1116
2669
|
} else {
|
1117
|
-
|
2670
|
+
send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
1118
2671
|
}
|
1119
2672
|
}
|
1120
|
-
calld->pick.
|
1121
|
-
|
1122
|
-
.send_initial_metadata;
|
1123
|
-
calld->pick.initial_metadata_flags = initial_metadata_flags;
|
1124
|
-
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
|
2673
|
+
calld->pick.initial_metadata_flags = send_initial_metadata_flags;
|
2674
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
|
1125
2675
|
grpc_combiner_scheduler(chand->combiner));
|
1126
|
-
calld->pick.on_complete = &calld->
|
2676
|
+
calld->pick.on_complete = &calld->pick_closure;
|
1127
2677
|
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
1128
2678
|
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
|
1129
2679
|
if (pick_done) {
|
@@ -1137,7 +2687,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
|
|
1137
2687
|
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
1138
2688
|
grpc_call_combiner_set_notify_on_cancel(
|
1139
2689
|
calld->call_combiner,
|
1140
|
-
GRPC_CLOSURE_INIT(&calld->
|
2690
|
+
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
1141
2691
|
pick_callback_cancel_locked, elem,
|
1142
2692
|
grpc_combiner_scheduler(chand->combiner)));
|
1143
2693
|
}
|
@@ -1186,8 +2736,6 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
|
|
1186
2736
|
"Pick cancelled", &error, 1));
|
1187
2737
|
}
|
1188
2738
|
|
1189
|
-
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
|
1190
|
-
|
1191
2739
|
static void pick_after_resolver_result_done_locked(void* arg,
|
1192
2740
|
grpc_error* error) {
|
1193
2741
|
pick_after_resolver_result_args* args =
|
@@ -1210,7 +2758,45 @@ static void pick_after_resolver_result_done_locked(void* arg,
|
|
1210
2758
|
chand, calld);
|
1211
2759
|
}
|
1212
2760
|
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
|
1213
|
-
} else if (chand->
|
2761
|
+
} else if (chand->resolver == nullptr) {
|
2762
|
+
// Shutting down.
|
2763
|
+
if (grpc_client_channel_trace.enabled()) {
|
2764
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
|
2765
|
+
calld);
|
2766
|
+
}
|
2767
|
+
async_pick_done_locked(
|
2768
|
+
elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2769
|
+
} else if (chand->lb_policy == nullptr) {
|
2770
|
+
// Transient resolver failure.
|
2771
|
+
// If call has wait_for_ready=true, try again; otherwise, fail.
|
2772
|
+
uint32_t send_initial_metadata_flags =
|
2773
|
+
calld->seen_send_initial_metadata
|
2774
|
+
? calld->send_initial_metadata_flags
|
2775
|
+
: calld->pending_batches[0]
|
2776
|
+
.batch->payload->send_initial_metadata
|
2777
|
+
.send_initial_metadata_flags;
|
2778
|
+
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2779
|
+
if (grpc_client_channel_trace.enabled()) {
|
2780
|
+
gpr_log(GPR_DEBUG,
|
2781
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2782
|
+
"wait_for_ready=true; trying again",
|
2783
|
+
chand, calld);
|
2784
|
+
}
|
2785
|
+
pick_after_resolver_result_start_locked(elem);
|
2786
|
+
} else {
|
2787
|
+
if (grpc_client_channel_trace.enabled()) {
|
2788
|
+
gpr_log(GPR_DEBUG,
|
2789
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2790
|
+
"wait_for_ready=false; failing",
|
2791
|
+
chand, calld);
|
2792
|
+
}
|
2793
|
+
async_pick_done_locked(
|
2794
|
+
elem,
|
2795
|
+
grpc_error_set_int(
|
2796
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2797
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2798
|
+
}
|
2799
|
+
} else {
|
1214
2800
|
if (grpc_client_channel_trace.enabled()) {
|
1215
2801
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
|
1216
2802
|
chand, calld);
|
@@ -1224,30 +2810,6 @@ static void pick_after_resolver_result_done_locked(void* arg,
|
|
1224
2810
|
async_pick_done_locked(elem, GRPC_ERROR_NONE);
|
1225
2811
|
}
|
1226
2812
|
}
|
1227
|
-
// TODO(roth): It should be impossible for chand->lb_policy to be NULL
|
1228
|
-
// here, so the rest of this code should never actually be executed.
|
1229
|
-
// However, we have reports of a crash on iOS that triggers this case,
|
1230
|
-
// so we are temporarily adding this to restore branches that were
|
1231
|
-
// removed in https://github.com/grpc/grpc/pull/12297. Need to figure
|
1232
|
-
// out what is actually causing this to occur and then figure out the
|
1233
|
-
// right way to deal with it.
|
1234
|
-
else if (chand->resolver != nullptr) {
|
1235
|
-
// No LB policy, so try again.
|
1236
|
-
if (grpc_client_channel_trace.enabled()) {
|
1237
|
-
gpr_log(GPR_DEBUG,
|
1238
|
-
"chand=%p calld=%p: resolver returned but no LB policy, "
|
1239
|
-
"trying again",
|
1240
|
-
chand, calld);
|
1241
|
-
}
|
1242
|
-
pick_after_resolver_result_start_locked(elem);
|
1243
|
-
} else {
|
1244
|
-
if (grpc_client_channel_trace.enabled()) {
|
1245
|
-
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
|
1246
|
-
calld);
|
1247
|
-
}
|
1248
|
-
async_pick_done_locked(
|
1249
|
-
elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
1250
|
-
}
|
1251
2813
|
}
|
1252
2814
|
|
1253
2815
|
static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
|
@@ -1277,6 +2839,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
|
|
1277
2839
|
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1278
2840
|
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1279
2841
|
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
|
2842
|
+
GPR_ASSERT(calld->subchannel_call == nullptr);
|
1280
2843
|
if (chand->lb_policy != nullptr) {
|
1281
2844
|
// We already have an LB policy, so ask it for a pick.
|
1282
2845
|
if (pick_callback_start_locked(elem)) {
|
@@ -1305,24 +2868,9 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
|
|
1305
2868
|
chand->interested_parties);
|
1306
2869
|
}
|
1307
2870
|
|
1308
|
-
|
1309
|
-
|
1310
|
-
|
1311
|
-
if (calld->retry_throttle_data != nullptr) {
|
1312
|
-
if (error == GRPC_ERROR_NONE) {
|
1313
|
-
grpc_server_retry_throttle_data_record_success(
|
1314
|
-
calld->retry_throttle_data);
|
1315
|
-
} else {
|
1316
|
-
// TODO(roth): In a subsequent PR, check the return value here and
|
1317
|
-
// decide whether or not to retry. Note that we should only
|
1318
|
-
// record failures whose statuses match the configured retryable
|
1319
|
-
// or non-fatal status codes.
|
1320
|
-
grpc_server_retry_throttle_data_record_failure(
|
1321
|
-
calld->retry_throttle_data);
|
1322
|
-
}
|
1323
|
-
}
|
1324
|
-
GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
|
1325
|
-
}
|
2871
|
+
//
|
2872
|
+
// filter call vtable functions
|
2873
|
+
//
|
1326
2874
|
|
1327
2875
|
static void cc_start_transport_stream_op_batch(
|
1328
2876
|
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
|
@@ -1333,46 +2881,47 @@ static void cc_start_transport_stream_op_batch(
|
|
1333
2881
|
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
|
1334
2882
|
}
|
1335
2883
|
// If we've previously been cancelled, immediately fail any new batches.
|
1336
|
-
if (calld->
|
2884
|
+
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
1337
2885
|
if (grpc_client_channel_trace.enabled()) {
|
1338
2886
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
|
1339
|
-
chand, calld, grpc_error_string(calld->
|
2887
|
+
chand, calld, grpc_error_string(calld->cancel_error));
|
1340
2888
|
}
|
2889
|
+
// Note: This will release the call combiner.
|
1341
2890
|
grpc_transport_stream_op_batch_finish_with_failure(
|
1342
|
-
batch, GRPC_ERROR_REF(calld->
|
2891
|
+
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
|
1343
2892
|
return;
|
1344
2893
|
}
|
2894
|
+
// Handle cancellation.
|
1345
2895
|
if (batch->cancel_stream) {
|
1346
2896
|
// Stash a copy of cancel_error in our call data, so that we can use
|
1347
2897
|
// it for subsequent operations. This ensures that if the call is
|
1348
2898
|
// cancelled before any batches are passed down (e.g., if the deadline
|
1349
2899
|
// is in the past when the call starts), we can return the right
|
1350
2900
|
// error to the caller when the first batch does get passed down.
|
1351
|
-
GRPC_ERROR_UNREF(calld->
|
1352
|
-
calld->
|
2901
|
+
GRPC_ERROR_UNREF(calld->cancel_error);
|
2902
|
+
calld->cancel_error =
|
2903
|
+
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
|
1353
2904
|
if (grpc_client_channel_trace.enabled()) {
|
1354
2905
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
|
1355
|
-
calld, grpc_error_string(calld->
|
2906
|
+
calld, grpc_error_string(calld->cancel_error));
|
1356
2907
|
}
|
1357
|
-
// If we have a subchannel call
|
1358
|
-
//
|
1359
|
-
|
1360
|
-
|
2908
|
+
// If we do not have a subchannel call (i.e., a pick has not yet
|
2909
|
+
// been started), fail all pending batches. Otherwise, send the
|
2910
|
+
// cancellation down to the subchannel call.
|
2911
|
+
if (calld->subchannel_call == nullptr) {
|
2912
|
+
pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
|
2913
|
+
false /* yield_call_combiner */);
|
2914
|
+
// Note: This will release the call combiner.
|
2915
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
2916
|
+
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
|
1361
2917
|
} else {
|
1362
|
-
|
1363
|
-
|
2918
|
+
// Note: This will release the call combiner.
|
2919
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batch);
|
1364
2920
|
}
|
1365
2921
|
return;
|
1366
2922
|
}
|
1367
|
-
//
|
1368
|
-
|
1369
|
-
if (batch->recv_trailing_metadata) {
|
1370
|
-
GPR_ASSERT(batch->on_complete != nullptr);
|
1371
|
-
calld->original_on_complete = batch->on_complete;
|
1372
|
-
GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
|
1373
|
-
grpc_schedule_on_exec_ctx);
|
1374
|
-
batch->on_complete = &calld->on_complete;
|
1375
|
-
}
|
2923
|
+
// Add the batch to the pending list.
|
2924
|
+
pending_batches_add(elem, batch);
|
1376
2925
|
// Check if we've already gotten a subchannel call.
|
1377
2926
|
// Note that once we have completed the pick, we do not need to enter
|
1378
2927
|
// the channel combiner, which is more efficient (especially for
|
@@ -1380,15 +2929,13 @@ static void cc_start_transport_stream_op_batch(
|
|
1380
2929
|
if (calld->subchannel_call != nullptr) {
|
1381
2930
|
if (grpc_client_channel_trace.enabled()) {
|
1382
2931
|
gpr_log(GPR_DEBUG,
|
1383
|
-
"chand=%p calld=%p:
|
2932
|
+
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
|
1384
2933
|
calld, calld->subchannel_call);
|
1385
2934
|
}
|
1386
|
-
|
2935
|
+
pending_batches_resume(elem);
|
1387
2936
|
return;
|
1388
2937
|
}
|
1389
2938
|
// We do not yet have a subchannel call.
|
1390
|
-
// Add the batch to the waiting-for-pick list.
|
1391
|
-
waiting_for_pick_batches_add(calld, batch);
|
1392
2939
|
// For batches containing a send_initial_metadata op, enter the channel
|
1393
2940
|
// combiner to start a pick.
|
1394
2941
|
if (batch->send_initial_metadata) {
|
@@ -1428,6 +2975,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
|
|
1428
2975
|
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
|
1429
2976
|
calld->deadline);
|
1430
2977
|
}
|
2978
|
+
calld->enable_retries = chand->enable_retries;
|
1431
2979
|
return GRPC_ERROR_NONE;
|
1432
2980
|
}
|
1433
2981
|
|
@@ -1441,10 +2989,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
1441
2989
|
grpc_deadline_state_destroy(elem);
|
1442
2990
|
}
|
1443
2991
|
grpc_slice_unref_internal(calld->path);
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1447
|
-
GRPC_ERROR_UNREF(calld->error);
|
2992
|
+
calld->retry_throttle_data.reset();
|
2993
|
+
calld->method_params.reset();
|
2994
|
+
GRPC_ERROR_UNREF(calld->cancel_error);
|
1448
2995
|
if (calld->subchannel_call != nullptr) {
|
1449
2996
|
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
|
1450
2997
|
then_schedule_closure);
|
@@ -1452,7 +2999,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
|
|
1452
2999
|
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
|
1453
3000
|
"client_channel_destroy_call");
|
1454
3001
|
}
|
1455
|
-
|
3002
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
3003
|
+
GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
|
3004
|
+
}
|
1456
3005
|
if (calld->pick.connected_subchannel != nullptr) {
|
1457
3006
|
calld->pick.connected_subchannel.reset();
|
1458
3007
|
}
|
@@ -1652,3 +3201,9 @@ void grpc_client_channel_watch_connectivity_state(
|
|
1652
3201
|
grpc_combiner_scheduler(chand->combiner)),
|
1653
3202
|
GRPC_ERROR_NONE);
|
1654
3203
|
}
|
3204
|
+
|
3205
|
+
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
|
3206
|
+
grpc_call_element* elem) {
|
3207
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
3208
|
+
return calld->subchannel_call;
|
3209
|
+
}
|