grpc-flamingo 1.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.yardopts +1 -0
- data/Makefile +23896 -0
- data/etc/roots.pem +4475 -0
- data/include/grpc/byte_buffer.h +27 -0
- data/include/grpc/byte_buffer_reader.h +26 -0
- data/include/grpc/census.h +40 -0
- data/include/grpc/compression.h +75 -0
- data/include/grpc/fork.h +26 -0
- data/include/grpc/grpc.h +469 -0
- data/include/grpc/grpc_cronet.h +38 -0
- data/include/grpc/grpc_posix.h +67 -0
- data/include/grpc/grpc_security.h +495 -0
- data/include/grpc/grpc_security_constants.h +107 -0
- data/include/grpc/impl/codegen/atm.h +95 -0
- data/include/grpc/impl/codegen/atm_gcc_atomic.h +91 -0
- data/include/grpc/impl/codegen/atm_gcc_sync.h +83 -0
- data/include/grpc/impl/codegen/atm_windows.h +126 -0
- data/include/grpc/impl/codegen/byte_buffer.h +88 -0
- data/include/grpc/impl/codegen/byte_buffer_reader.h +42 -0
- data/include/grpc/impl/codegen/compression_types.h +107 -0
- data/include/grpc/impl/codegen/connectivity_state.h +44 -0
- data/include/grpc/impl/codegen/fork.h +48 -0
- data/include/grpc/impl/codegen/gpr_slice.h +69 -0
- data/include/grpc/impl/codegen/gpr_types.h +59 -0
- data/include/grpc/impl/codegen/grpc_types.h +669 -0
- data/include/grpc/impl/codegen/port_platform.h +507 -0
- data/include/grpc/impl/codegen/propagation_bits.h +52 -0
- data/include/grpc/impl/codegen/slice.h +147 -0
- data/include/grpc/impl/codegen/status.h +153 -0
- data/include/grpc/impl/codegen/sync.h +63 -0
- data/include/grpc/impl/codegen/sync_custom.h +38 -0
- data/include/grpc/impl/codegen/sync_generic.h +48 -0
- data/include/grpc/impl/codegen/sync_posix.h +34 -0
- data/include/grpc/impl/codegen/sync_windows.h +36 -0
- data/include/grpc/load_reporting.h +48 -0
- data/include/grpc/module.modulemap +74 -0
- data/include/grpc/slice.h +172 -0
- data/include/grpc/slice_buffer.h +84 -0
- data/include/grpc/status.h +26 -0
- data/include/grpc/support/alloc.h +68 -0
- data/include/grpc/support/atm.h +26 -0
- data/include/grpc/support/atm_gcc_atomic.h +26 -0
- data/include/grpc/support/atm_gcc_sync.h +26 -0
- data/include/grpc/support/atm_windows.h +26 -0
- data/include/grpc/support/cpu.h +44 -0
- data/include/grpc/support/log.h +104 -0
- data/include/grpc/support/log_windows.h +38 -0
- data/include/grpc/support/port_platform.h +24 -0
- data/include/grpc/support/string_util.h +49 -0
- data/include/grpc/support/sync.h +298 -0
- data/include/grpc/support/sync_custom.h +26 -0
- data/include/grpc/support/sync_generic.h +26 -0
- data/include/grpc/support/sync_posix.h +26 -0
- data/include/grpc/support/sync_windows.h +26 -0
- data/include/grpc/support/thd_id.h +44 -0
- data/include/grpc/support/time.h +92 -0
- data/include/grpc/support/workaround_list.h +31 -0
- data/src/boringssl/err_data.c +1348 -0
- data/src/core/ext/census/grpc_context.cc +38 -0
- data/src/core/ext/filters/client_channel/backup_poller.cc +174 -0
- data/src/core/ext/filters/client_channel/backup_poller.h +35 -0
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +248 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +3209 -0
- data/src/core/ext/filters/client_channel/client_channel.h +57 -0
- data/src/core/ext/filters/client_channel/client_channel_factory.cc +67 -0
- data/src/core/ext/filters/client_channel/client_channel_factory.h +74 -0
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +62 -0
- data/src/core/ext/filters/client_channel/connector.cc +41 -0
- data/src/core/ext/filters/client_channel/connector.h +73 -0
- data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +370 -0
- data/src/core/ext/filters/client_channel/http_connect_handshaker.h +34 -0
- data/src/core/ext/filters/client_channel/http_proxy.cc +195 -0
- data/src/core/ext/filters/client_channel/http_proxy.h +24 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +138 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +29 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +1906 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +36 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +108 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +152 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +67 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +304 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +88 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +102 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +190 -0
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +591 -0
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +687 -0
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +253 -0
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +136 -0
- data/src/core/ext/filters/client_channel/lb_policy.cc +59 -0
- data/src/core/ext/filters/client_channel/lb_policy.h +201 -0
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +155 -0
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +127 -0
- data/src/core/ext/filters/client_channel/lb_policy_registry.cc +97 -0
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +54 -0
- data/src/core/ext/filters/client_channel/method_params.cc +178 -0
- data/src/core/ext/filters/client_channel/method_params.h +74 -0
- data/src/core/ext/filters/client_channel/parse_address.cc +192 -0
- data/src/core/ext/filters/client_channel/parse_address.h +50 -0
- data/src/core/ext/filters/client_channel/proxy_mapper.cc +48 -0
- data/src/core/ext/filters/client_channel/proxy_mapper.h +74 -0
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +122 -0
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +44 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +493 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +53 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +351 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +593 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +74 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +59 -0
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +340 -0
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +297 -0
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +83 -0
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +214 -0
- data/src/core/ext/filters/client_channel/resolver.cc +35 -0
- data/src/core/ext/filters/client_channel/resolver.h +134 -0
- data/src/core/ext/filters/client_channel/resolver_factory.h +71 -0
- data/src/core/ext/filters/client_channel/resolver_registry.cc +178 -0
- data/src/core/ext/filters/client_channel/resolver_registry.h +83 -0
- data/src/core/ext/filters/client_channel/retry_throttle.cc +191 -0
- data/src/core/ext/filters/client_channel/retry_throttle.h +77 -0
- data/src/core/ext/filters/client_channel/subchannel.cc +815 -0
- data/src/core/ext/filters/client_channel/subchannel.h +183 -0
- data/src/core/ext/filters/client_channel/subchannel_index.cc +254 -0
- data/src/core/ext/filters/client_channel/subchannel_index.h +79 -0
- data/src/core/ext/filters/client_channel/uri_parser.cc +314 -0
- data/src/core/ext/filters/client_channel/uri_parser.h +50 -0
- data/src/core/ext/filters/deadline/deadline_filter.cc +386 -0
- data/src/core/ext/filters/deadline/deadline_filter.h +93 -0
- data/src/core/ext/filters/http/client/http_client_filter.cc +558 -0
- data/src/core/ext/filters/http/client/http_client_filter.h +31 -0
- data/src/core/ext/filters/http/client_authority_filter.cc +156 -0
- data/src/core/ext/filters/http/client_authority_filter.h +34 -0
- data/src/core/ext/filters/http/http_filters_plugin.cc +89 -0
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +499 -0
- data/src/core/ext/filters/http/message_compress/message_compress_filter.h +53 -0
- data/src/core/ext/filters/http/server/http_server_filter.cc +434 -0
- data/src/core/ext/filters/http/server/http_server_filter.h +29 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +222 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +30 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +71 -0
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +61 -0
- data/src/core/ext/filters/max_age/max_age_filter.cc +543 -0
- data/src/core/ext/filters/max_age/max_age_filter.h +26 -0
- data/src/core/ext/filters/message_size/message_size_filter.cc +324 -0
- data/src/core/ext/filters/message_size/message_size_filter.h +26 -0
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +208 -0
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +27 -0
- data/src/core/ext/filters/workarounds/workaround_utils.cc +53 -0
- data/src/core/ext/filters/workarounds/workaround_utils.h +39 -0
- data/src/core/ext/transport/chttp2/alpn/alpn.cc +44 -0
- data/src/core/ext/transport/chttp2/alpn/alpn.h +36 -0
- data/src/core/ext/transport/chttp2/client/authority.cc +42 -0
- data/src/core/ext/transport/chttp2/client/authority.h +36 -0
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +229 -0
- data/src/core/ext/transport/chttp2/client/chttp2_connector.h +28 -0
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +110 -0
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +79 -0
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +230 -0
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +353 -0
- data/src/core/ext/transport/chttp2/server/chttp2_server.h +33 -0
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +45 -0
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +74 -0
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +89 -0
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +249 -0
- data/src/core/ext/transport/chttp2/transport/bin_decoder.h +56 -0
- data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +231 -0
- data/src/core/ext/transport/chttp2/transport/bin_encoder.h +41 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +35 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +3102 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +45 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +405 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.h +482 -0
- data/src/core/ext/transport/chttp2/transport/frame.h +47 -0
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +314 -0
- data/src/core/ext/transport/chttp2/transport/frame_data.h +84 -0
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +186 -0
- data/src/core/ext/transport/chttp2/transport/frame_goaway.h +62 -0
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +131 -0
- data/src/core/ext/transport/chttp2/transport/frame_ping.h +45 -0
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +112 -0
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +43 -0
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +238 -0
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +60 -0
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +122 -0
- data/src/core/ext/transport/chttp2/transport/frame_window_update.h +45 -0
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +699 -0
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +95 -0
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1680 -0
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +109 -0
- data/src/core/ext/transport/chttp2/transport/hpack_table.cc +368 -0
- data/src/core/ext/transport/chttp2/transport/hpack_table.h +95 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.cc +62 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +62 -0
- data/src/core/ext/transport/chttp2/transport/huffsyms.cc +92 -0
- data/src/core/ext/transport/chttp2/transport/huffsyms.h +33 -0
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +73 -0
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +49 -0
- data/src/core/ext/transport/chttp2/transport/internal.h +799 -0
- data/src/core/ext/transport/chttp2/transport/parsing.cc +745 -0
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +216 -0
- data/src/core/ext/transport/chttp2/transport/stream_map.cc +167 -0
- data/src/core/ext/transport/chttp2/transport/stream_map.h +68 -0
- data/src/core/ext/transport/chttp2/transport/varint.cc +56 -0
- data/src/core/ext/transport/chttp2/transport/varint.h +60 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +641 -0
- data/src/core/ext/transport/inproc/inproc_plugin.cc +28 -0
- data/src/core/ext/transport/inproc/inproc_transport.cc +1240 -0
- data/src/core/ext/transport/inproc/inproc_transport.h +35 -0
- data/src/core/lib/avl/avl.cc +306 -0
- data/src/core/lib/avl/avl.h +94 -0
- data/src/core/lib/backoff/backoff.cc +78 -0
- data/src/core/lib/backoff/backoff.h +89 -0
- data/src/core/lib/channel/channel_args.cc +413 -0
- data/src/core/lib/channel/channel_args.h +127 -0
- data/src/core/lib/channel/channel_stack.cc +258 -0
- data/src/core/lib/channel/channel_stack.h +280 -0
- data/src/core/lib/channel/channel_stack_builder.cc +314 -0
- data/src/core/lib/channel/channel_stack_builder.h +160 -0
- data/src/core/lib/channel/channel_trace.cc +239 -0
- data/src/core/lib/channel/channel_trace.h +133 -0
- data/src/core/lib/channel/channel_trace_registry.cc +80 -0
- data/src/core/lib/channel/channel_trace_registry.h +43 -0
- data/src/core/lib/channel/connected_channel.cc +236 -0
- data/src/core/lib/channel/connected_channel.h +34 -0
- data/src/core/lib/channel/context.h +49 -0
- data/src/core/lib/channel/handshaker.cc +259 -0
- data/src/core/lib/channel/handshaker.h +166 -0
- data/src/core/lib/channel/handshaker_factory.cc +41 -0
- data/src/core/lib/channel/handshaker_factory.h +50 -0
- data/src/core/lib/channel/handshaker_registry.cc +97 -0
- data/src/core/lib/channel/handshaker_registry.h +48 -0
- data/src/core/lib/channel/status_util.cc +100 -0
- data/src/core/lib/channel/status_util.h +58 -0
- data/src/core/lib/compression/algorithm_metadata.h +61 -0
- data/src/core/lib/compression/compression.cc +174 -0
- data/src/core/lib/compression/compression_internal.cc +276 -0
- data/src/core/lib/compression/compression_internal.h +88 -0
- data/src/core/lib/compression/message_compress.cc +187 -0
- data/src/core/lib/compression/message_compress.h +40 -0
- data/src/core/lib/compression/stream_compression.cc +79 -0
- data/src/core/lib/compression/stream_compression.h +116 -0
- data/src/core/lib/compression/stream_compression_gzip.cc +230 -0
- data/src/core/lib/compression/stream_compression_gzip.h +28 -0
- data/src/core/lib/compression/stream_compression_identity.cc +94 -0
- data/src/core/lib/compression/stream_compression_identity.h +29 -0
- data/src/core/lib/debug/stats.cc +178 -0
- data/src/core/lib/debug/stats.h +61 -0
- data/src/core/lib/debug/stats_data.cc +682 -0
- data/src/core/lib/debug/stats_data.h +435 -0
- data/src/core/lib/debug/trace.cc +144 -0
- data/src/core/lib/debug/trace.h +104 -0
- data/src/core/lib/gpr/alloc.cc +99 -0
- data/src/core/lib/gpr/arena.cc +152 -0
- data/src/core/lib/gpr/arena.h +41 -0
- data/src/core/lib/gpr/atm.cc +35 -0
- data/src/core/lib/gpr/cpu_iphone.cc +36 -0
- data/src/core/lib/gpr/cpu_linux.cc +82 -0
- data/src/core/lib/gpr/cpu_posix.cc +81 -0
- data/src/core/lib/gpr/cpu_windows.cc +33 -0
- data/src/core/lib/gpr/env.h +43 -0
- data/src/core/lib/gpr/env_linux.cc +82 -0
- data/src/core/lib/gpr/env_posix.cc +47 -0
- data/src/core/lib/gpr/env_windows.cc +72 -0
- data/src/core/lib/gpr/fork.cc +78 -0
- data/src/core/lib/gpr/fork.h +35 -0
- data/src/core/lib/gpr/host_port.cc +98 -0
- data/src/core/lib/gpr/host_port.h +43 -0
- data/src/core/lib/gpr/log.cc +96 -0
- data/src/core/lib/gpr/log_android.cc +72 -0
- data/src/core/lib/gpr/log_linux.cc +93 -0
- data/src/core/lib/gpr/log_posix.cc +90 -0
- data/src/core/lib/gpr/log_windows.cc +97 -0
- data/src/core/lib/gpr/mpscq.cc +117 -0
- data/src/core/lib/gpr/mpscq.h +86 -0
- data/src/core/lib/gpr/murmur_hash.cc +80 -0
- data/src/core/lib/gpr/murmur_hash.h +29 -0
- data/src/core/lib/gpr/spinlock.h +46 -0
- data/src/core/lib/gpr/string.cc +319 -0
- data/src/core/lib/gpr/string.h +109 -0
- data/src/core/lib/gpr/string_posix.cc +72 -0
- data/src/core/lib/gpr/string_util_windows.cc +82 -0
- data/src/core/lib/gpr/string_windows.cc +69 -0
- data/src/core/lib/gpr/string_windows.h +32 -0
- data/src/core/lib/gpr/sync.cc +124 -0
- data/src/core/lib/gpr/sync_posix.cc +107 -0
- data/src/core/lib/gpr/sync_windows.cc +118 -0
- data/src/core/lib/gpr/time.cc +251 -0
- data/src/core/lib/gpr/time_posix.cc +167 -0
- data/src/core/lib/gpr/time_precise.cc +78 -0
- data/src/core/lib/gpr/time_precise.h +29 -0
- data/src/core/lib/gpr/time_windows.cc +98 -0
- data/src/core/lib/gpr/tls.h +68 -0
- data/src/core/lib/gpr/tls_gcc.h +52 -0
- data/src/core/lib/gpr/tls_msvc.h +52 -0
- data/src/core/lib/gpr/tls_pthread.cc +30 -0
- data/src/core/lib/gpr/tls_pthread.h +56 -0
- data/src/core/lib/gpr/tmpfile.h +32 -0
- data/src/core/lib/gpr/tmpfile_msys.cc +58 -0
- data/src/core/lib/gpr/tmpfile_posix.cc +70 -0
- data/src/core/lib/gpr/tmpfile_windows.cc +69 -0
- data/src/core/lib/gpr/useful.h +65 -0
- data/src/core/lib/gpr/wrap_memcpy.cc +42 -0
- data/src/core/lib/gprpp/abstract.h +34 -0
- data/src/core/lib/gprpp/atomic.h +30 -0
- data/src/core/lib/gprpp/atomic_with_atm.h +57 -0
- data/src/core/lib/gprpp/atomic_with_std.h +35 -0
- data/src/core/lib/gprpp/debug_location.h +52 -0
- data/src/core/lib/gprpp/inlined_vector.h +136 -0
- data/src/core/lib/gprpp/manual_constructor.h +213 -0
- data/src/core/lib/gprpp/memory.h +111 -0
- data/src/core/lib/gprpp/orphanable.h +199 -0
- data/src/core/lib/gprpp/ref_counted.h +169 -0
- data/src/core/lib/gprpp/ref_counted_ptr.h +112 -0
- data/src/core/lib/gprpp/thd.h +135 -0
- data/src/core/lib/gprpp/thd_posix.cc +209 -0
- data/src/core/lib/gprpp/thd_windows.cc +162 -0
- data/src/core/lib/http/format_request.cc +122 -0
- data/src/core/lib/http/format_request.h +34 -0
- data/src/core/lib/http/httpcli.cc +303 -0
- data/src/core/lib/http/httpcli.h +127 -0
- data/src/core/lib/http/httpcli_security_connector.cc +202 -0
- data/src/core/lib/http/parser.cc +371 -0
- data/src/core/lib/http/parser.h +113 -0
- data/src/core/lib/iomgr/block_annotate.h +57 -0
- data/src/core/lib/iomgr/call_combiner.cc +212 -0
- data/src/core/lib/iomgr/call_combiner.h +112 -0
- data/src/core/lib/iomgr/closure.h +351 -0
- data/src/core/lib/iomgr/combiner.cc +358 -0
- data/src/core/lib/iomgr/combiner.h +66 -0
- data/src/core/lib/iomgr/endpoint.cc +63 -0
- data/src/core/lib/iomgr/endpoint.h +98 -0
- data/src/core/lib/iomgr/endpoint_pair.h +34 -0
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +73 -0
- data/src/core/lib/iomgr/endpoint_pair_uv.cc +40 -0
- data/src/core/lib/iomgr/endpoint_pair_windows.cc +87 -0
- data/src/core/lib/iomgr/error.cc +793 -0
- data/src/core/lib/iomgr/error.h +207 -0
- data/src/core/lib/iomgr/error_internal.h +63 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +1248 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +31 -0
- data/src/core/lib/iomgr/ev_epollex_linux.cc +1494 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +30 -0
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +1735 -0
- data/src/core/lib/iomgr/ev_epollsig_linux.h +35 -0
- data/src/core/lib/iomgr/ev_poll_posix.cc +1758 -0
- data/src/core/lib/iomgr/ev_poll_posix.h +29 -0
- data/src/core/lib/iomgr/ev_posix.cc +330 -0
- data/src/core/lib/iomgr/ev_posix.h +145 -0
- data/src/core/lib/iomgr/ev_windows.cc +30 -0
- data/src/core/lib/iomgr/exec_ctx.cc +147 -0
- data/src/core/lib/iomgr/exec_ctx.h +210 -0
- data/src/core/lib/iomgr/executor.cc +301 -0
- data/src/core/lib/iomgr/executor.h +50 -0
- data/src/core/lib/iomgr/fork_posix.cc +89 -0
- data/src/core/lib/iomgr/fork_windows.cc +41 -0
- data/src/core/lib/iomgr/gethostname.h +26 -0
- data/src/core/lib/iomgr/gethostname_fallback.cc +30 -0
- data/src/core/lib/iomgr/gethostname_host_name_max.cc +40 -0
- data/src/core/lib/iomgr/gethostname_sysconf.cc +40 -0
- data/src/core/lib/iomgr/iocp_windows.cc +152 -0
- data/src/core/lib/iomgr/iocp_windows.h +48 -0
- data/src/core/lib/iomgr/iomgr.cc +178 -0
- data/src/core/lib/iomgr/iomgr.h +36 -0
- data/src/core/lib/iomgr/iomgr_custom.cc +63 -0
- data/src/core/lib/iomgr/iomgr_custom.h +47 -0
- data/src/core/lib/iomgr/iomgr_internal.cc +43 -0
- data/src/core/lib/iomgr/iomgr_internal.h +57 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +67 -0
- data/src/core/lib/iomgr/iomgr_posix.h +26 -0
- data/src/core/lib/iomgr/iomgr_uv.cc +40 -0
- data/src/core/lib/iomgr/iomgr_windows.cc +87 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.cc +104 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +36 -0
- data/src/core/lib/iomgr/load_file.cc +80 -0
- data/src/core/lib/iomgr/load_file.h +35 -0
- data/src/core/lib/iomgr/lockfree_event.cc +250 -0
- data/src/core/lib/iomgr/lockfree_event.h +72 -0
- data/src/core/lib/iomgr/nameser.h +106 -0
- data/src/core/lib/iomgr/network_status_tracker.cc +36 -0
- data/src/core/lib/iomgr/network_status_tracker.h +32 -0
- data/src/core/lib/iomgr/polling_entity.cc +87 -0
- data/src/core/lib/iomgr/polling_entity.h +68 -0
- data/src/core/lib/iomgr/pollset.cc +56 -0
- data/src/core/lib/iomgr/pollset.h +99 -0
- data/src/core/lib/iomgr/pollset_custom.cc +106 -0
- data/src/core/lib/iomgr/pollset_custom.h +35 -0
- data/src/core/lib/iomgr/pollset_set.cc +55 -0
- data/src/core/lib/iomgr/pollset_set.h +55 -0
- data/src/core/lib/iomgr/pollset_set_custom.cc +48 -0
- data/src/core/lib/iomgr/pollset_set_custom.h +26 -0
- data/src/core/lib/iomgr/pollset_set_windows.cc +51 -0
- data/src/core/lib/iomgr/pollset_set_windows.h +26 -0
- data/src/core/lib/iomgr/pollset_uv.cc +93 -0
- data/src/core/lib/iomgr/pollset_windows.cc +229 -0
- data/src/core/lib/iomgr/pollset_windows.h +70 -0
- data/src/core/lib/iomgr/port.h +147 -0
- data/src/core/lib/iomgr/resolve_address.cc +50 -0
- data/src/core/lib/iomgr/resolve_address.h +83 -0
- data/src/core/lib/iomgr/resolve_address_custom.cc +187 -0
- data/src/core/lib/iomgr/resolve_address_custom.h +43 -0
- data/src/core/lib/iomgr/resolve_address_posix.cc +180 -0
- data/src/core/lib/iomgr/resolve_address_windows.cc +165 -0
- data/src/core/lib/iomgr/resource_quota.cc +871 -0
- data/src/core/lib/iomgr/resource_quota.h +142 -0
- data/src/core/lib/iomgr/sockaddr.h +32 -0
- data/src/core/lib/iomgr/sockaddr_custom.h +54 -0
- data/src/core/lib/iomgr/sockaddr_posix.h +55 -0
- data/src/core/lib/iomgr/sockaddr_utils.cc +298 -0
- data/src/core/lib/iomgr/sockaddr_utils.h +84 -0
- data/src/core/lib/iomgr/sockaddr_windows.h +55 -0
- data/src/core/lib/iomgr/socket_factory_posix.cc +94 -0
- data/src/core/lib/iomgr/socket_factory_posix.h +69 -0
- data/src/core/lib/iomgr/socket_mutator.cc +83 -0
- data/src/core/lib/iomgr/socket_mutator.h +61 -0
- data/src/core/lib/iomgr/socket_utils.h +38 -0
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +327 -0
- data/src/core/lib/iomgr/socket_utils_linux.cc +43 -0
- data/src/core/lib/iomgr/socket_utils_posix.cc +59 -0
- data/src/core/lib/iomgr/socket_utils_posix.h +134 -0
- data/src/core/lib/iomgr/socket_utils_uv.cc +45 -0
- data/src/core/lib/iomgr/socket_utils_windows.cc +43 -0
- data/src/core/lib/iomgr/socket_windows.cc +151 -0
- data/src/core/lib/iomgr/socket_windows.h +113 -0
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +30 -0
- data/src/core/lib/iomgr/tcp_client.cc +36 -0
- data/src/core/lib/iomgr/tcp_client.h +52 -0
- data/src/core/lib/iomgr/tcp_client_custom.cc +151 -0
- data/src/core/lib/iomgr/tcp_client_posix.cc +359 -0
- data/src/core/lib/iomgr/tcp_client_posix.h +68 -0
- data/src/core/lib/iomgr/tcp_client_windows.cc +231 -0
- data/src/core/lib/iomgr/tcp_custom.cc +365 -0
- data/src/core/lib/iomgr/tcp_custom.h +81 -0
- data/src/core/lib/iomgr/tcp_posix.cc +814 -0
- data/src/core/lib/iomgr/tcp_posix.h +57 -0
- data/src/core/lib/iomgr/tcp_server.cc +73 -0
- data/src/core/lib/iomgr/tcp_server.h +122 -0
- data/src/core/lib/iomgr/tcp_server_custom.cc +472 -0
- data/src/core/lib/iomgr/tcp_server_posix.cc +582 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +122 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +208 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +184 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +36 -0
- data/src/core/lib/iomgr/tcp_server_windows.cc +559 -0
- data/src/core/lib/iomgr/tcp_uv.cc +417 -0
- data/src/core/lib/iomgr/tcp_windows.cc +455 -0
- data/src/core/lib/iomgr/tcp_windows.h +51 -0
- data/src/core/lib/iomgr/time_averaged_stats.cc +64 -0
- data/src/core/lib/iomgr/time_averaged_stats.h +73 -0
- data/src/core/lib/iomgr/timer.cc +45 -0
- data/src/core/lib/iomgr/timer.h +125 -0
- data/src/core/lib/iomgr/timer_custom.cc +93 -0
- data/src/core/lib/iomgr/timer_custom.h +43 -0
- data/src/core/lib/iomgr/timer_generic.cc +663 -0
- data/src/core/lib/iomgr/timer_heap.cc +135 -0
- data/src/core/lib/iomgr/timer_heap.h +44 -0
- data/src/core/lib/iomgr/timer_manager.cc +347 -0
- data/src/core/lib/iomgr/timer_manager.h +39 -0
- data/src/core/lib/iomgr/timer_uv.cc +63 -0
- data/src/core/lib/iomgr/udp_server.cc +692 -0
- data/src/core/lib/iomgr/udp_server.h +103 -0
- data/src/core/lib/iomgr/unix_sockets_posix.cc +104 -0
- data/src/core/lib/iomgr/unix_sockets_posix.h +43 -0
- data/src/core/lib/iomgr/unix_sockets_posix_noop.cc +49 -0
- data/src/core/lib/iomgr/wakeup_fd_cv.cc +107 -0
- data/src/core/lib/iomgr/wakeup_fd_cv.h +69 -0
- data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +83 -0
- data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +38 -0
- data/src/core/lib/iomgr/wakeup_fd_pipe.cc +100 -0
- data/src/core/lib/iomgr/wakeup_fd_pipe.h +28 -0
- data/src/core/lib/iomgr/wakeup_fd_posix.cc +87 -0
- data/src/core/lib/iomgr/wakeup_fd_posix.h +96 -0
- data/src/core/lib/json/json.cc +86 -0
- data/src/core/lib/json/json.h +94 -0
- data/src/core/lib/json/json_common.h +34 -0
- data/src/core/lib/json/json_reader.cc +663 -0
- data/src/core/lib/json/json_reader.h +146 -0
- data/src/core/lib/json/json_string.cc +367 -0
- data/src/core/lib/json/json_writer.cc +245 -0
- data/src/core/lib/json/json_writer.h +84 -0
- data/src/core/lib/profiling/basic_timers.cc +286 -0
- data/src/core/lib/profiling/stap_timers.cc +50 -0
- data/src/core/lib/profiling/timers.h +94 -0
- data/src/core/lib/security/context/security_context.cc +348 -0
- data/src/core/lib/security/context/security_context.h +115 -0
- data/src/core/lib/security/credentials/alts/alts_credentials.cc +119 -0
- data/src/core/lib/security/credentials/alts/alts_credentials.h +102 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment.cc +72 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment.h +57 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc +67 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc +33 -0
- data/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc +114 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +126 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc +46 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +112 -0
- data/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc +58 -0
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +269 -0
- data/src/core/lib/security/credentials/composite/composite_credentials.h +59 -0
- data/src/core/lib/security/credentials/credentials.cc +286 -0
- data/src/core/lib/security/credentials/credentials.h +246 -0
- data/src/core/lib/security/credentials/credentials_metadata.cc +62 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +136 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.h +64 -0
- data/src/core/lib/security/credentials/google_default/credentials_generic.cc +41 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +322 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +45 -0
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +86 -0
- data/src/core/lib/security/credentials/iam/iam_credentials.h +31 -0
- data/src/core/lib/security/credentials/jwt/json_token.cc +314 -0
- data/src/core/lib/security/credentials/jwt/json_token.h +75 -0
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +190 -0
- data/src/core/lib/security/credentials/jwt/jwt_credentials.h +49 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +934 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +123 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +532 -0
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +106 -0
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +271 -0
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +46 -0
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +349 -0
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +54 -0
- data/src/core/lib/security/security_connector/alts_security_connector.cc +287 -0
- data/src/core/lib/security/security_connector/alts_security_connector.h +69 -0
- data/src/core/lib/security/security_connector/security_connector.cc +1200 -0
- data/src/core/lib/security/security_connector/security_connector.h +283 -0
- data/src/core/lib/security/transport/auth_filters.h +37 -0
- data/src/core/lib/security/transport/client_auth_filter.cc +418 -0
- data/src/core/lib/security/transport/secure_endpoint.cc +429 -0
- data/src/core/lib/security/transport/secure_endpoint.h +41 -0
- data/src/core/lib/security/transport/security_handshaker.cc +526 -0
- data/src/core/lib/security/transport/security_handshaker.h +34 -0
- data/src/core/lib/security/transport/server_auth_filter.cc +269 -0
- data/src/core/lib/security/transport/target_authority_table.cc +75 -0
- data/src/core/lib/security/transport/target_authority_table.h +40 -0
- data/src/core/lib/security/transport/tsi_error.cc +29 -0
- data/src/core/lib/security/transport/tsi_error.h +29 -0
- data/src/core/lib/security/util/json_util.cc +48 -0
- data/src/core/lib/security/util/json_util.h +42 -0
- data/src/core/lib/slice/b64.cc +240 -0
- data/src/core/lib/slice/b64.h +51 -0
- data/src/core/lib/slice/percent_encoding.cc +169 -0
- data/src/core/lib/slice/percent_encoding.h +65 -0
- data/src/core/lib/slice/slice.cc +489 -0
- data/src/core/lib/slice/slice_buffer.cc +359 -0
- data/src/core/lib/slice/slice_hash_table.h +201 -0
- data/src/core/lib/slice/slice_intern.cc +332 -0
- data/src/core/lib/slice/slice_internal.h +49 -0
- data/src/core/lib/slice/slice_string_helpers.cc +118 -0
- data/src/core/lib/slice/slice_string_helpers.h +47 -0
- data/src/core/lib/slice/slice_weak_hash_table.h +105 -0
- data/src/core/lib/surface/api_trace.cc +24 -0
- data/src/core/lib/surface/api_trace.h +52 -0
- data/src/core/lib/surface/byte_buffer.cc +92 -0
- data/src/core/lib/surface/byte_buffer_reader.cc +129 -0
- data/src/core/lib/surface/call.cc +2002 -0
- data/src/core/lib/surface/call.h +109 -0
- data/src/core/lib/surface/call_details.cc +42 -0
- data/src/core/lib/surface/call_log_batch.cc +120 -0
- data/src/core/lib/surface/call_test_only.h +43 -0
- data/src/core/lib/surface/channel.cc +450 -0
- data/src/core/lib/surface/channel.h +83 -0
- data/src/core/lib/surface/channel_init.cc +109 -0
- data/src/core/lib/surface/channel_init.h +73 -0
- data/src/core/lib/surface/channel_ping.cc +65 -0
- data/src/core/lib/surface/channel_stack_type.cc +58 -0
- data/src/core/lib/surface/channel_stack_type.h +47 -0
- data/src/core/lib/surface/completion_queue.cc +1262 -0
- data/src/core/lib/surface/completion_queue.h +93 -0
- data/src/core/lib/surface/completion_queue_factory.cc +79 -0
- data/src/core/lib/surface/completion_queue_factory.h +38 -0
- data/src/core/lib/surface/event_string.cc +68 -0
- data/src/core/lib/surface/event_string.h +29 -0
- data/src/core/lib/surface/init.cc +196 -0
- data/src/core/lib/surface/init.h +27 -0
- data/src/core/lib/surface/init_secure.cc +81 -0
- data/src/core/lib/surface/lame_client.cc +180 -0
- data/src/core/lib/surface/lame_client.h +28 -0
- data/src/core/lib/surface/metadata_array.cc +36 -0
- data/src/core/lib/surface/server.cc +1445 -0
- data/src/core/lib/surface/server.h +58 -0
- data/src/core/lib/surface/validate_metadata.cc +95 -0
- data/src/core/lib/surface/validate_metadata.h +30 -0
- data/src/core/lib/surface/version.cc +28 -0
- data/src/core/lib/transport/bdp_estimator.cc +87 -0
- data/src/core/lib/transport/bdp_estimator.h +94 -0
- data/src/core/lib/transport/byte_stream.cc +160 -0
- data/src/core/lib/transport/byte_stream.h +164 -0
- data/src/core/lib/transport/connectivity_state.cc +196 -0
- data/src/core/lib/transport/connectivity_state.h +87 -0
- data/src/core/lib/transport/error_utils.cc +118 -0
- data/src/core/lib/transport/error_utils.h +46 -0
- data/src/core/lib/transport/http2_errors.h +41 -0
- data/src/core/lib/transport/metadata.cc +539 -0
- data/src/core/lib/transport/metadata.h +165 -0
- data/src/core/lib/transport/metadata_batch.cc +329 -0
- data/src/core/lib/transport/metadata_batch.h +150 -0
- data/src/core/lib/transport/pid_controller.cc +51 -0
- data/src/core/lib/transport/pid_controller.h +116 -0
- data/src/core/lib/transport/service_config.cc +106 -0
- data/src/core/lib/transport/service_config.h +249 -0
- data/src/core/lib/transport/static_metadata.cc +601 -0
- data/src/core/lib/transport/static_metadata.h +603 -0
- data/src/core/lib/transport/status_conversion.cc +100 -0
- data/src/core/lib/transport/status_conversion.h +38 -0
- data/src/core/lib/transport/status_metadata.cc +54 -0
- data/src/core/lib/transport/status_metadata.h +30 -0
- data/src/core/lib/transport/timeout_encoding.cc +144 -0
- data/src/core/lib/transport/timeout_encoding.h +37 -0
- data/src/core/lib/transport/transport.cc +278 -0
- data/src/core/lib/transport/transport.h +378 -0
- data/src/core/lib/transport/transport_impl.h +71 -0
- data/src/core/lib/transport/transport_op_string.cc +214 -0
- data/src/core/plugin_registry/grpc_plugin_registry.cc +97 -0
- data/src/core/tsi/alts/crypt/aes_gcm.cc +687 -0
- data/src/core/tsi/alts/crypt/gsec.cc +189 -0
- data/src/core/tsi/alts/crypt/gsec.h +454 -0
- data/src/core/tsi/alts/frame_protector/alts_counter.cc +118 -0
- data/src/core/tsi/alts/frame_protector/alts_counter.h +98 -0
- data/src/core/tsi/alts/frame_protector/alts_crypter.cc +66 -0
- data/src/core/tsi/alts/frame_protector/alts_crypter.h +255 -0
- data/src/core/tsi/alts/frame_protector/alts_frame_protector.cc +407 -0
- data/src/core/tsi/alts/frame_protector/alts_frame_protector.h +55 -0
- data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc +114 -0
- data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h +114 -0
- data/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc +105 -0
- data/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc +103 -0
- data/src/core/tsi/alts/frame_protector/frame_handler.cc +218 -0
- data/src/core/tsi/alts/frame_protector/frame_handler.h +236 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +316 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +137 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc +520 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h +323 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc +143 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +149 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +73 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_event.h +93 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +483 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +83 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +52 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +58 -0
- data/src/core/tsi/alts/handshaker/alts_tsi_utils.h +52 -0
- data/src/core/tsi/alts/handshaker/altscontext.pb.c +48 -0
- data/src/core/tsi/alts/handshaker/altscontext.pb.h +64 -0
- data/src/core/tsi/alts/handshaker/handshaker.pb.c +123 -0
- data/src/core/tsi/alts/handshaker/handshaker.pb.h +255 -0
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +50 -0
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +78 -0
- data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +196 -0
- data/src/core/tsi/alts/handshaker/transport_security_common_api.h +163 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +180 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +52 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +144 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h +49 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h +91 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +174 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h +100 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc +476 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h +199 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +296 -0
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +52 -0
- data/src/core/tsi/alts_transport_security.cc +63 -0
- data/src/core/tsi/alts_transport_security.h +47 -0
- data/src/core/tsi/fake_transport_security.cc +787 -0
- data/src/core/tsi/fake_transport_security.h +45 -0
- data/src/core/tsi/ssl/session_cache/ssl_session.h +73 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +58 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +211 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +93 -0
- data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +76 -0
- data/src/core/tsi/ssl_transport_security.cc +1831 -0
- data/src/core/tsi/ssl_transport_security.h +314 -0
- data/src/core/tsi/ssl_types.h +42 -0
- data/src/core/tsi/transport_security.cc +326 -0
- data/src/core/tsi/transport_security.h +127 -0
- data/src/core/tsi/transport_security_adapter.cc +235 -0
- data/src/core/tsi/transport_security_adapter.h +41 -0
- data/src/core/tsi/transport_security_grpc.cc +66 -0
- data/src/core/tsi/transport_security_grpc.h +74 -0
- data/src/core/tsi/transport_security_interface.h +454 -0
- data/src/ruby/bin/apis/google/protobuf/empty.rb +29 -0
- data/src/ruby/bin/apis/pubsub_demo.rb +241 -0
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb +159 -0
- data/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb +88 -0
- data/src/ruby/bin/math_client.rb +132 -0
- data/src/ruby/bin/math_pb.rb +32 -0
- data/src/ruby/bin/math_server.rb +191 -0
- data/src/ruby/bin/math_services_pb.rb +51 -0
- data/src/ruby/bin/noproto_client.rb +93 -0
- data/src/ruby/bin/noproto_server.rb +97 -0
- data/src/ruby/ext/grpc/extconf.rb +118 -0
- data/src/ruby/ext/grpc/rb_byte_buffer.c +64 -0
- data/src/ruby/ext/grpc/rb_byte_buffer.h +35 -0
- data/src/ruby/ext/grpc/rb_call.c +1041 -0
- data/src/ruby/ext/grpc/rb_call.h +53 -0
- data/src/ruby/ext/grpc/rb_call_credentials.c +290 -0
- data/src/ruby/ext/grpc/rb_call_credentials.h +31 -0
- data/src/ruby/ext/grpc/rb_channel.c +828 -0
- data/src/ruby/ext/grpc/rb_channel.h +34 -0
- data/src/ruby/ext/grpc/rb_channel_args.c +155 -0
- data/src/ruby/ext/grpc/rb_channel_args.h +38 -0
- data/src/ruby/ext/grpc/rb_channel_credentials.c +263 -0
- data/src/ruby/ext/grpc/rb_channel_credentials.h +32 -0
- data/src/ruby/ext/grpc/rb_completion_queue.c +100 -0
- data/src/ruby/ext/grpc/rb_completion_queue.h +36 -0
- data/src/ruby/ext/grpc/rb_compression_options.c +468 -0
- data/src/ruby/ext/grpc/rb_compression_options.h +29 -0
- data/src/ruby/ext/grpc/rb_event_thread.c +141 -0
- data/src/ruby/ext/grpc/rb_event_thread.h +21 -0
- data/src/ruby/ext/grpc/rb_grpc.c +340 -0
- data/src/ruby/ext/grpc/rb_grpc.h +72 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +507 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +766 -0
- data/src/ruby/ext/grpc/rb_loader.c +57 -0
- data/src/ruby/ext/grpc/rb_loader.h +25 -0
- data/src/ruby/ext/grpc/rb_server.c +366 -0
- data/src/ruby/ext/grpc/rb_server.h +32 -0
- data/src/ruby/ext/grpc/rb_server_credentials.c +243 -0
- data/src/ruby/ext/grpc/rb_server_credentials.h +32 -0
- data/src/ruby/lib/grpc/core/time_consts.rb +56 -0
- data/src/ruby/lib/grpc/errors.rb +201 -0
- data/src/ruby/lib/grpc/generic/active_call.rb +674 -0
- data/src/ruby/lib/grpc/generic/bidi_call.rb +233 -0
- data/src/ruby/lib/grpc/generic/client_stub.rb +501 -0
- data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
- data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
- data/src/ruby/lib/grpc/generic/rpc_desc.rb +204 -0
- data/src/ruby/lib/grpc/generic/rpc_server.rb +490 -0
- data/src/ruby/lib/grpc/generic/service.rb +210 -0
- data/src/ruby/lib/grpc/google_rpc_status_utils.rb +35 -0
- data/src/ruby/lib/grpc/grpc.rb +24 -0
- data/src/ruby/lib/grpc/logconfig.rb +44 -0
- data/src/ruby/lib/grpc/notifier.rb +45 -0
- data/src/ruby/lib/grpc/version.rb +18 -0
- data/src/ruby/lib/grpc.rb +35 -0
- data/src/ruby/pb/README.md +42 -0
- data/src/ruby/pb/generate_proto_ruby.sh +43 -0
- data/src/ruby/pb/grpc/health/checker.rb +76 -0
- data/src/ruby/pb/grpc/health/v1/health_pb.rb +28 -0
- data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +41 -0
- data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +44 -0
- data/src/ruby/pb/grpc/testing/metrics_pb.rb +28 -0
- data/src/ruby/pb/grpc/testing/metrics_services_pb.rb +49 -0
- data/src/ruby/pb/src/proto/grpc/testing/empty_pb.rb +15 -0
- data/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb +82 -0
- data/src/ruby/pb/src/proto/grpc/testing/test_pb.rb +14 -0
- data/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb +102 -0
- data/src/ruby/pb/test/client.rb +764 -0
- data/src/ruby/pb/test/server.rb +252 -0
- data/src/ruby/spec/call_credentials_spec.rb +42 -0
- data/src/ruby/spec/call_spec.rb +180 -0
- data/src/ruby/spec/channel_connection_spec.rb +126 -0
- data/src/ruby/spec/channel_credentials_spec.rb +82 -0
- data/src/ruby/spec/channel_spec.rb +190 -0
- data/src/ruby/spec/client_auth_spec.rb +137 -0
- data/src/ruby/spec/client_server_spec.rb +664 -0
- data/src/ruby/spec/compression_options_spec.rb +149 -0
- data/src/ruby/spec/error_sanity_spec.rb +49 -0
- data/src/ruby/spec/generic/active_call_spec.rb +672 -0
- data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
- data/src/ruby/spec/generic/client_stub_spec.rb +1067 -0
- data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
- data/src/ruby/spec/generic/rpc_desc_spec.rb +374 -0
- data/src/ruby/spec/generic/rpc_server_pool_spec.rb +127 -0
- data/src/ruby/spec/generic/rpc_server_spec.rb +726 -0
- data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
- data/src/ruby/spec/generic/service_spec.rb +261 -0
- data/src/ruby/spec/google_rpc_status_utils_spec.rb +293 -0
- data/src/ruby/spec/pb/duplicate/codegen_spec.rb +56 -0
- data/src/ruby/spec/pb/health/checker_spec.rb +236 -0
- data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +54 -0
- data/src/ruby/spec/pb/package_with_underscore/data.proto +23 -0
- data/src/ruby/spec/pb/package_with_underscore/service.proto +23 -0
- data/src/ruby/spec/server_credentials_spec.rb +79 -0
- data/src/ruby/spec/server_spec.rb +209 -0
- data/src/ruby/spec/spec_helper.rb +60 -0
- data/src/ruby/spec/support/helpers.rb +107 -0
- data/src/ruby/spec/support/services.rb +147 -0
- data/src/ruby/spec/testdata/README +1 -0
- data/src/ruby/spec/testdata/ca.pem +15 -0
- data/src/ruby/spec/testdata/client.key +16 -0
- data/src/ruby/spec/testdata/client.pem +14 -0
- data/src/ruby/spec/testdata/server1.key +16 -0
- data/src/ruby/spec/testdata/server1.pem +16 -0
- data/src/ruby/spec/time_consts_spec.rb +74 -0
- data/third_party/address_sorting/address_sorting.c +369 -0
- data/third_party/address_sorting/address_sorting_internal.h +70 -0
- data/third_party/address_sorting/address_sorting_posix.c +97 -0
- data/third_party/address_sorting/address_sorting_windows.c +55 -0
- data/third_party/address_sorting/include/address_sorting/address_sorting.h +110 -0
- data/third_party/boringssl/crypto/asn1/a_bitstr.c +271 -0
- data/third_party/boringssl/crypto/asn1/a_bool.c +110 -0
- data/third_party/boringssl/crypto/asn1/a_d2i_fp.c +297 -0
- data/third_party/boringssl/crypto/asn1/a_dup.c +111 -0
- data/third_party/boringssl/crypto/asn1/a_enum.c +195 -0
- data/third_party/boringssl/crypto/asn1/a_gentm.c +261 -0
- data/third_party/boringssl/crypto/asn1/a_i2d_fp.c +150 -0
- data/third_party/boringssl/crypto/asn1/a_int.c +474 -0
- data/third_party/boringssl/crypto/asn1/a_mbstr.c +409 -0
- data/third_party/boringssl/crypto/asn1/a_object.c +275 -0
- data/third_party/boringssl/crypto/asn1/a_octet.c +77 -0
- data/third_party/boringssl/crypto/asn1/a_print.c +93 -0
- data/third_party/boringssl/crypto/asn1/a_strnid.c +312 -0
- data/third_party/boringssl/crypto/asn1/a_time.c +213 -0
- data/third_party/boringssl/crypto/asn1/a_type.c +151 -0
- data/third_party/boringssl/crypto/asn1/a_utctm.c +303 -0
- data/third_party/boringssl/crypto/asn1/a_utf8.c +234 -0
- data/third_party/boringssl/crypto/asn1/asn1_lib.c +442 -0
- data/third_party/boringssl/crypto/asn1/asn1_locl.h +101 -0
- data/third_party/boringssl/crypto/asn1/asn1_par.c +80 -0
- data/third_party/boringssl/crypto/asn1/asn_pack.c +105 -0
- data/third_party/boringssl/crypto/asn1/f_enum.c +93 -0
- data/third_party/boringssl/crypto/asn1/f_int.c +97 -0
- data/third_party/boringssl/crypto/asn1/f_string.c +91 -0
- data/third_party/boringssl/crypto/asn1/tasn_dec.c +1223 -0
- data/third_party/boringssl/crypto/asn1/tasn_enc.c +662 -0
- data/third_party/boringssl/crypto/asn1/tasn_fre.c +244 -0
- data/third_party/boringssl/crypto/asn1/tasn_new.c +387 -0
- data/third_party/boringssl/crypto/asn1/tasn_typ.c +131 -0
- data/third_party/boringssl/crypto/asn1/tasn_utl.c +280 -0
- data/third_party/boringssl/crypto/asn1/time_support.c +206 -0
- data/third_party/boringssl/crypto/base64/base64.c +466 -0
- data/third_party/boringssl/crypto/bio/bio.c +636 -0
- data/third_party/boringssl/crypto/bio/bio_mem.c +330 -0
- data/third_party/boringssl/crypto/bio/connect.c +542 -0
- data/third_party/boringssl/crypto/bio/fd.c +275 -0
- data/third_party/boringssl/crypto/bio/file.c +313 -0
- data/third_party/boringssl/crypto/bio/hexdump.c +192 -0
- data/third_party/boringssl/crypto/bio/internal.h +111 -0
- data/third_party/boringssl/crypto/bio/pair.c +489 -0
- data/third_party/boringssl/crypto/bio/printf.c +115 -0
- data/third_party/boringssl/crypto/bio/socket.c +202 -0
- data/third_party/boringssl/crypto/bio/socket_helper.c +114 -0
- data/third_party/boringssl/crypto/bn_extra/bn_asn1.c +64 -0
- data/third_party/boringssl/crypto/bn_extra/convert.c +465 -0
- data/third_party/boringssl/crypto/buf/buf.c +231 -0
- data/third_party/boringssl/crypto/bytestring/asn1_compat.c +52 -0
- data/third_party/boringssl/crypto/bytestring/ber.c +264 -0
- data/third_party/boringssl/crypto/bytestring/cbb.c +568 -0
- data/third_party/boringssl/crypto/bytestring/cbs.c +487 -0
- data/third_party/boringssl/crypto/bytestring/internal.h +75 -0
- data/third_party/boringssl/crypto/chacha/chacha.c +167 -0
- data/third_party/boringssl/crypto/cipher_extra/cipher_extra.c +114 -0
- data/third_party/boringssl/crypto/cipher_extra/derive_key.c +152 -0
- data/third_party/boringssl/crypto/cipher_extra/e_aesctrhmac.c +281 -0
- data/third_party/boringssl/crypto/cipher_extra/e_aesgcmsiv.c +867 -0
- data/third_party/boringssl/crypto/cipher_extra/e_chacha20poly1305.c +326 -0
- data/third_party/boringssl/crypto/cipher_extra/e_null.c +85 -0
- data/third_party/boringssl/crypto/cipher_extra/e_rc2.c +460 -0
- data/third_party/boringssl/crypto/cipher_extra/e_rc4.c +87 -0
- data/third_party/boringssl/crypto/cipher_extra/e_ssl3.c +460 -0
- data/third_party/boringssl/crypto/cipher_extra/e_tls.c +681 -0
- data/third_party/boringssl/crypto/cipher_extra/internal.h +128 -0
- data/third_party/boringssl/crypto/cipher_extra/tls_cbc.c +482 -0
- data/third_party/boringssl/crypto/cmac/cmac.c +241 -0
- data/third_party/boringssl/crypto/conf/conf.c +803 -0
- data/third_party/boringssl/crypto/conf/conf_def.h +127 -0
- data/third_party/boringssl/crypto/conf/internal.h +31 -0
- data/third_party/boringssl/crypto/cpu-aarch64-linux.c +61 -0
- data/third_party/boringssl/crypto/cpu-arm-linux.c +363 -0
- data/third_party/boringssl/crypto/cpu-arm.c +38 -0
- data/third_party/boringssl/crypto/cpu-intel.c +288 -0
- data/third_party/boringssl/crypto/cpu-ppc64le.c +38 -0
- data/third_party/boringssl/crypto/crypto.c +198 -0
- data/third_party/boringssl/crypto/curve25519/spake25519.c +539 -0
- data/third_party/boringssl/crypto/curve25519/x25519-x86_64.c +247 -0
- data/third_party/boringssl/crypto/dh/check.c +217 -0
- data/third_party/boringssl/crypto/dh/dh.c +519 -0
- data/third_party/boringssl/crypto/dh/dh_asn1.c +160 -0
- data/third_party/boringssl/crypto/dh/params.c +93 -0
- data/third_party/boringssl/crypto/digest_extra/digest_extra.c +240 -0
- data/third_party/boringssl/crypto/dsa/dsa.c +984 -0
- data/third_party/boringssl/crypto/dsa/dsa_asn1.c +339 -0
- data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +563 -0
- data/third_party/boringssl/crypto/ecdh/ecdh.c +161 -0
- data/third_party/boringssl/crypto/ecdsa_extra/ecdsa_asn1.c +275 -0
- data/third_party/boringssl/crypto/engine/engine.c +98 -0
- data/third_party/boringssl/crypto/err/err.c +847 -0
- data/third_party/boringssl/crypto/err/internal.h +58 -0
- data/third_party/boringssl/crypto/evp/digestsign.c +231 -0
- data/third_party/boringssl/crypto/evp/evp.c +362 -0
- data/third_party/boringssl/crypto/evp/evp_asn1.c +337 -0
- data/third_party/boringssl/crypto/evp/evp_ctx.c +446 -0
- data/third_party/boringssl/crypto/evp/internal.h +252 -0
- data/third_party/boringssl/crypto/evp/p_dsa_asn1.c +268 -0
- data/third_party/boringssl/crypto/evp/p_ec.c +239 -0
- data/third_party/boringssl/crypto/evp/p_ec_asn1.c +256 -0
- data/third_party/boringssl/crypto/evp/p_ed25519.c +71 -0
- data/third_party/boringssl/crypto/evp/p_ed25519_asn1.c +190 -0
- data/third_party/boringssl/crypto/evp/p_rsa.c +634 -0
- data/third_party/boringssl/crypto/evp/p_rsa_asn1.c +189 -0
- data/third_party/boringssl/crypto/evp/pbkdf.c +146 -0
- data/third_party/boringssl/crypto/evp/print.c +489 -0
- data/third_party/boringssl/crypto/evp/scrypt.c +209 -0
- data/third_party/boringssl/crypto/evp/sign.c +151 -0
- data/third_party/boringssl/crypto/ex_data.c +261 -0
- data/third_party/boringssl/crypto/fipsmodule/aes/aes.c +1100 -0
- data/third_party/boringssl/crypto/fipsmodule/aes/internal.h +100 -0
- data/third_party/boringssl/crypto/fipsmodule/aes/key_wrap.c +138 -0
- data/third_party/boringssl/crypto/fipsmodule/aes/mode_wrappers.c +112 -0
- data/third_party/boringssl/crypto/fipsmodule/bcm.c +679 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/add.c +371 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +540 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +370 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/bytes.c +269 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +254 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/ctx.c +303 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/div.c +733 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +1390 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +627 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +710 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +413 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/jacobi.c +146 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +483 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +207 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +902 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +894 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/random.c +299 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.c +254 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.h +53 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +305 -0
- data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +502 -0
- data/third_party/boringssl/crypto/fipsmodule/cipher/aead.c +284 -0
- data/third_party/boringssl/crypto/fipsmodule/cipher/cipher.c +615 -0
- data/third_party/boringssl/crypto/fipsmodule/cipher/e_aes.c +1437 -0
- data/third_party/boringssl/crypto/fipsmodule/cipher/e_des.c +233 -0
- data/third_party/boringssl/crypto/fipsmodule/cipher/internal.h +129 -0
- data/third_party/boringssl/crypto/fipsmodule/delocate.h +88 -0
- data/third_party/boringssl/crypto/fipsmodule/des/des.c +785 -0
- data/third_party/boringssl/crypto/fipsmodule/des/internal.h +238 -0
- data/third_party/boringssl/crypto/fipsmodule/digest/digest.c +256 -0
- data/third_party/boringssl/crypto/fipsmodule/digest/digests.c +280 -0
- data/third_party/boringssl/crypto/fipsmodule/digest/internal.h +112 -0
- data/third_party/boringssl/crypto/fipsmodule/digest/md32_common.h +268 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +943 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +517 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +277 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +316 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +404 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +1131 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +1674 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64-table.h +9543 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +456 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.h +113 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +1052 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/util-64.c +109 -0
- data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +474 -0
- data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +442 -0
- data/third_party/boringssl/crypto/fipsmodule/hmac/hmac.c +228 -0
- data/third_party/boringssl/crypto/fipsmodule/is_fips.c +27 -0
- data/third_party/boringssl/crypto/fipsmodule/md4/md4.c +254 -0
- data/third_party/boringssl/crypto/fipsmodule/md5/md5.c +298 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/cbc.c +211 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/cfb.c +234 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/ctr.c +220 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/gcm.c +1063 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/internal.h +384 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/ofb.c +95 -0
- data/third_party/boringssl/crypto/fipsmodule/modes/polyval.c +91 -0
- data/third_party/boringssl/crypto/fipsmodule/rand/ctrdrbg.c +200 -0
- data/third_party/boringssl/crypto/fipsmodule/rand/internal.h +92 -0
- data/third_party/boringssl/crypto/fipsmodule/rand/rand.c +358 -0
- data/third_party/boringssl/crypto/fipsmodule/rand/urandom.c +302 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +263 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +131 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/padding.c +692 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +857 -0
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +1051 -0
- data/third_party/boringssl/crypto/fipsmodule/sha/sha1-altivec.c +361 -0
- data/third_party/boringssl/crypto/fipsmodule/sha/sha1.c +375 -0
- data/third_party/boringssl/crypto/fipsmodule/sha/sha256.c +337 -0
- data/third_party/boringssl/crypto/fipsmodule/sha/sha512.c +608 -0
- data/third_party/boringssl/crypto/hkdf/hkdf.c +112 -0
- data/third_party/boringssl/crypto/internal.h +676 -0
- data/third_party/boringssl/crypto/lhash/lhash.c +336 -0
- data/third_party/boringssl/crypto/mem.c +237 -0
- data/third_party/boringssl/crypto/obj/obj.c +621 -0
- data/third_party/boringssl/crypto/obj/obj_dat.h +6244 -0
- data/third_party/boringssl/crypto/obj/obj_xref.c +122 -0
- data/third_party/boringssl/crypto/pem/pem_all.c +262 -0
- data/third_party/boringssl/crypto/pem/pem_info.c +379 -0
- data/third_party/boringssl/crypto/pem/pem_lib.c +776 -0
- data/third_party/boringssl/crypto/pem/pem_oth.c +88 -0
- data/third_party/boringssl/crypto/pem/pem_pk8.c +258 -0
- data/third_party/boringssl/crypto/pem/pem_pkey.c +227 -0
- data/third_party/boringssl/crypto/pem/pem_x509.c +65 -0
- data/third_party/boringssl/crypto/pem/pem_xaux.c +65 -0
- data/third_party/boringssl/crypto/pkcs7/internal.h +49 -0
- data/third_party/boringssl/crypto/pkcs7/pkcs7.c +166 -0
- data/third_party/boringssl/crypto/pkcs7/pkcs7_x509.c +233 -0
- data/third_party/boringssl/crypto/pkcs8/internal.h +120 -0
- data/third_party/boringssl/crypto/pkcs8/p5_pbev2.c +307 -0
- data/third_party/boringssl/crypto/pkcs8/pkcs8.c +513 -0
- data/third_party/boringssl/crypto/pkcs8/pkcs8_x509.c +789 -0
- data/third_party/boringssl/crypto/poly1305/internal.h +41 -0
- data/third_party/boringssl/crypto/poly1305/poly1305.c +318 -0
- data/third_party/boringssl/crypto/poly1305/poly1305_arm.c +304 -0
- data/third_party/boringssl/crypto/poly1305/poly1305_vec.c +839 -0
- data/third_party/boringssl/crypto/pool/internal.h +45 -0
- data/third_party/boringssl/crypto/pool/pool.c +200 -0
- data/third_party/boringssl/crypto/rand_extra/deterministic.c +48 -0
- data/third_party/boringssl/crypto/rand_extra/forkunsafe.c +46 -0
- data/third_party/boringssl/crypto/rand_extra/fuchsia.c +43 -0
- data/third_party/boringssl/crypto/rand_extra/rand_extra.c +70 -0
- data/third_party/boringssl/crypto/rand_extra/windows.c +53 -0
- data/third_party/boringssl/crypto/rc4/rc4.c +98 -0
- data/third_party/boringssl/crypto/refcount_c11.c +67 -0
- data/third_party/boringssl/crypto/refcount_lock.c +53 -0
- data/third_party/boringssl/crypto/rsa_extra/rsa_asn1.c +325 -0
- data/third_party/boringssl/crypto/stack/stack.c +380 -0
- data/third_party/boringssl/crypto/thread.c +110 -0
- data/third_party/boringssl/crypto/thread_none.c +59 -0
- data/third_party/boringssl/crypto/thread_pthread.c +176 -0
- data/third_party/boringssl/crypto/thread_win.c +237 -0
- data/third_party/boringssl/crypto/x509/a_digest.c +96 -0
- data/third_party/boringssl/crypto/x509/a_sign.c +128 -0
- data/third_party/boringssl/crypto/x509/a_strex.c +633 -0
- data/third_party/boringssl/crypto/x509/a_verify.c +115 -0
- data/third_party/boringssl/crypto/x509/algorithm.c +153 -0
- data/third_party/boringssl/crypto/x509/asn1_gen.c +841 -0
- data/third_party/boringssl/crypto/x509/by_dir.c +451 -0
- data/third_party/boringssl/crypto/x509/by_file.c +274 -0
- data/third_party/boringssl/crypto/x509/charmap.h +15 -0
- data/third_party/boringssl/crypto/x509/i2d_pr.c +83 -0
- data/third_party/boringssl/crypto/x509/internal.h +66 -0
- data/third_party/boringssl/crypto/x509/rsa_pss.c +385 -0
- data/third_party/boringssl/crypto/x509/t_crl.c +128 -0
- data/third_party/boringssl/crypto/x509/t_req.c +246 -0
- data/third_party/boringssl/crypto/x509/t_x509.c +547 -0
- data/third_party/boringssl/crypto/x509/t_x509a.c +111 -0
- data/third_party/boringssl/crypto/x509/vpm_int.h +70 -0
- data/third_party/boringssl/crypto/x509/x509.c +157 -0
- data/third_party/boringssl/crypto/x509/x509_att.c +381 -0
- data/third_party/boringssl/crypto/x509/x509_cmp.c +477 -0
- data/third_party/boringssl/crypto/x509/x509_d2.c +106 -0
- data/third_party/boringssl/crypto/x509/x509_def.c +103 -0
- data/third_party/boringssl/crypto/x509/x509_ext.c +206 -0
- data/third_party/boringssl/crypto/x509/x509_lu.c +725 -0
- data/third_party/boringssl/crypto/x509/x509_obj.c +198 -0
- data/third_party/boringssl/crypto/x509/x509_r2x.c +117 -0
- data/third_party/boringssl/crypto/x509/x509_req.c +322 -0
- data/third_party/boringssl/crypto/x509/x509_set.c +164 -0
- data/third_party/boringssl/crypto/x509/x509_trs.c +326 -0
- data/third_party/boringssl/crypto/x509/x509_txt.c +205 -0
- data/third_party/boringssl/crypto/x509/x509_v3.c +278 -0
- data/third_party/boringssl/crypto/x509/x509_vfy.c +2472 -0
- data/third_party/boringssl/crypto/x509/x509_vpm.c +648 -0
- data/third_party/boringssl/crypto/x509/x509cset.c +170 -0
- data/third_party/boringssl/crypto/x509/x509name.c +389 -0
- data/third_party/boringssl/crypto/x509/x509rset.c +81 -0
- data/third_party/boringssl/crypto/x509/x509spki.c +137 -0
- data/third_party/boringssl/crypto/x509/x_algor.c +151 -0
- data/third_party/boringssl/crypto/x509/x_all.c +501 -0
- data/third_party/boringssl/crypto/x509/x_attrib.c +111 -0
- data/third_party/boringssl/crypto/x509/x_crl.c +541 -0
- data/third_party/boringssl/crypto/x509/x_exten.c +75 -0
- data/third_party/boringssl/crypto/x509/x_info.c +98 -0
- data/third_party/boringssl/crypto/x509/x_name.c +541 -0
- data/third_party/boringssl/crypto/x509/x_pkey.c +106 -0
- data/third_party/boringssl/crypto/x509/x_pubkey.c +368 -0
- data/third_party/boringssl/crypto/x509/x_req.c +109 -0
- data/third_party/boringssl/crypto/x509/x_sig.c +69 -0
- data/third_party/boringssl/crypto/x509/x_spki.c +80 -0
- data/third_party/boringssl/crypto/x509/x_val.c +69 -0
- data/third_party/boringssl/crypto/x509/x_x509.c +328 -0
- data/third_party/boringssl/crypto/x509/x_x509a.c +198 -0
- data/third_party/boringssl/crypto/x509v3/ext_dat.h +143 -0
- data/third_party/boringssl/crypto/x509v3/pcy_cache.c +284 -0
- data/third_party/boringssl/crypto/x509v3/pcy_data.c +130 -0
- data/third_party/boringssl/crypto/x509v3/pcy_int.h +217 -0
- data/third_party/boringssl/crypto/x509v3/pcy_lib.c +155 -0
- data/third_party/boringssl/crypto/x509v3/pcy_map.c +130 -0
- data/third_party/boringssl/crypto/x509v3/pcy_node.c +188 -0
- data/third_party/boringssl/crypto/x509v3/pcy_tree.c +840 -0
- data/third_party/boringssl/crypto/x509v3/v3_akey.c +204 -0
- data/third_party/boringssl/crypto/x509v3/v3_akeya.c +72 -0
- data/third_party/boringssl/crypto/x509v3/v3_alt.c +623 -0
- data/third_party/boringssl/crypto/x509v3/v3_bcons.c +133 -0
- data/third_party/boringssl/crypto/x509v3/v3_bitst.c +141 -0
- data/third_party/boringssl/crypto/x509v3/v3_conf.c +462 -0
- data/third_party/boringssl/crypto/x509v3/v3_cpols.c +502 -0
- data/third_party/boringssl/crypto/x509v3/v3_crld.c +561 -0
- data/third_party/boringssl/crypto/x509v3/v3_enum.c +100 -0
- data/third_party/boringssl/crypto/x509v3/v3_extku.c +148 -0
- data/third_party/boringssl/crypto/x509v3/v3_genn.c +251 -0
- data/third_party/boringssl/crypto/x509v3/v3_ia5.c +122 -0
- data/third_party/boringssl/crypto/x509v3/v3_info.c +219 -0
- data/third_party/boringssl/crypto/x509v3/v3_int.c +91 -0
- data/third_party/boringssl/crypto/x509v3/v3_lib.c +370 -0
- data/third_party/boringssl/crypto/x509v3/v3_ncons.c +501 -0
- data/third_party/boringssl/crypto/x509v3/v3_pci.c +287 -0
- data/third_party/boringssl/crypto/x509v3/v3_pcia.c +57 -0
- data/third_party/boringssl/crypto/x509v3/v3_pcons.c +139 -0
- data/third_party/boringssl/crypto/x509v3/v3_pku.c +110 -0
- data/third_party/boringssl/crypto/x509v3/v3_pmaps.c +154 -0
- data/third_party/boringssl/crypto/x509v3/v3_prn.c +229 -0
- data/third_party/boringssl/crypto/x509v3/v3_purp.c +866 -0
- data/third_party/boringssl/crypto/x509v3/v3_skey.c +152 -0
- data/third_party/boringssl/crypto/x509v3/v3_sxnet.c +274 -0
- data/third_party/boringssl/crypto/x509v3/v3_utl.c +1352 -0
- data/third_party/boringssl/include/openssl/aead.h +423 -0
- data/third_party/boringssl/include/openssl/aes.h +170 -0
- data/third_party/boringssl/include/openssl/arm_arch.h +121 -0
- data/third_party/boringssl/include/openssl/asn1.h +982 -0
- data/third_party/boringssl/include/openssl/asn1_mac.h +18 -0
- data/third_party/boringssl/include/openssl/asn1t.h +892 -0
- data/third_party/boringssl/include/openssl/base.h +468 -0
- data/third_party/boringssl/include/openssl/base64.h +187 -0
- data/third_party/boringssl/include/openssl/bio.h +902 -0
- data/third_party/boringssl/include/openssl/blowfish.h +93 -0
- data/third_party/boringssl/include/openssl/bn.h +975 -0
- data/third_party/boringssl/include/openssl/buf.h +137 -0
- data/third_party/boringssl/include/openssl/buffer.h +18 -0
- data/third_party/boringssl/include/openssl/bytestring.h +480 -0
- data/third_party/boringssl/include/openssl/cast.h +96 -0
- data/third_party/boringssl/include/openssl/chacha.h +41 -0
- data/third_party/boringssl/include/openssl/cipher.h +608 -0
- data/third_party/boringssl/include/openssl/cmac.h +87 -0
- data/third_party/boringssl/include/openssl/conf.h +183 -0
- data/third_party/boringssl/include/openssl/cpu.h +196 -0
- data/third_party/boringssl/include/openssl/crypto.h +118 -0
- data/third_party/boringssl/include/openssl/curve25519.h +201 -0
- data/third_party/boringssl/include/openssl/des.h +177 -0
- data/third_party/boringssl/include/openssl/dh.h +298 -0
- data/third_party/boringssl/include/openssl/digest.h +316 -0
- data/third_party/boringssl/include/openssl/dsa.h +435 -0
- data/third_party/boringssl/include/openssl/dtls1.h +16 -0
- data/third_party/boringssl/include/openssl/ec.h +407 -0
- data/third_party/boringssl/include/openssl/ec_key.h +341 -0
- data/third_party/boringssl/include/openssl/ecdh.h +101 -0
- data/third_party/boringssl/include/openssl/ecdsa.h +199 -0
- data/third_party/boringssl/include/openssl/engine.h +109 -0
- data/third_party/boringssl/include/openssl/err.h +458 -0
- data/third_party/boringssl/include/openssl/evp.h +873 -0
- data/third_party/boringssl/include/openssl/ex_data.h +203 -0
- data/third_party/boringssl/include/openssl/hkdf.h +64 -0
- data/third_party/boringssl/include/openssl/hmac.h +186 -0
- data/third_party/boringssl/include/openssl/is_boringssl.h +16 -0
- data/third_party/boringssl/include/openssl/lhash.h +174 -0
- data/third_party/boringssl/include/openssl/lhash_macros.h +174 -0
- data/third_party/boringssl/include/openssl/md4.h +106 -0
- data/third_party/boringssl/include/openssl/md5.h +107 -0
- data/third_party/boringssl/include/openssl/mem.h +156 -0
- data/third_party/boringssl/include/openssl/nid.h +4242 -0
- data/third_party/boringssl/include/openssl/obj.h +233 -0
- data/third_party/boringssl/include/openssl/obj_mac.h +18 -0
- data/third_party/boringssl/include/openssl/objects.h +18 -0
- data/third_party/boringssl/include/openssl/opensslconf.h +67 -0
- data/third_party/boringssl/include/openssl/opensslv.h +18 -0
- data/third_party/boringssl/include/openssl/ossl_typ.h +18 -0
- data/third_party/boringssl/include/openssl/pem.h +397 -0
- data/third_party/boringssl/include/openssl/pkcs12.h +18 -0
- data/third_party/boringssl/include/openssl/pkcs7.h +82 -0
- data/third_party/boringssl/include/openssl/pkcs8.h +230 -0
- data/third_party/boringssl/include/openssl/poly1305.h +51 -0
- data/third_party/boringssl/include/openssl/pool.h +91 -0
- data/third_party/boringssl/include/openssl/rand.h +125 -0
- data/third_party/boringssl/include/openssl/rc4.h +96 -0
- data/third_party/boringssl/include/openssl/ripemd.h +107 -0
- data/third_party/boringssl/include/openssl/rsa.h +731 -0
- data/third_party/boringssl/include/openssl/safestack.h +16 -0
- data/third_party/boringssl/include/openssl/sha.h +256 -0
- data/third_party/boringssl/include/openssl/span.h +191 -0
- data/third_party/boringssl/include/openssl/srtp.h +18 -0
- data/third_party/boringssl/include/openssl/ssl.h +4592 -0
- data/third_party/boringssl/include/openssl/ssl3.h +333 -0
- data/third_party/boringssl/include/openssl/stack.h +485 -0
- data/third_party/boringssl/include/openssl/thread.h +191 -0
- data/third_party/boringssl/include/openssl/tls1.h +610 -0
- data/third_party/boringssl/include/openssl/type_check.h +91 -0
- data/third_party/boringssl/include/openssl/x509.h +1176 -0
- data/third_party/boringssl/include/openssl/x509_vfy.h +614 -0
- data/third_party/boringssl/include/openssl/x509v3.h +826 -0
- data/third_party/boringssl/ssl/bio_ssl.cc +179 -0
- data/third_party/boringssl/ssl/custom_extensions.cc +265 -0
- data/third_party/boringssl/ssl/d1_both.cc +837 -0
- data/third_party/boringssl/ssl/d1_lib.cc +267 -0
- data/third_party/boringssl/ssl/d1_pkt.cc +274 -0
- data/third_party/boringssl/ssl/d1_srtp.cc +232 -0
- data/third_party/boringssl/ssl/dtls_method.cc +193 -0
- data/third_party/boringssl/ssl/dtls_record.cc +353 -0
- data/third_party/boringssl/ssl/handshake.cc +616 -0
- data/third_party/boringssl/ssl/handshake_client.cc +1836 -0
- data/third_party/boringssl/ssl/handshake_server.cc +1662 -0
- data/third_party/boringssl/ssl/internal.h +3011 -0
- data/third_party/boringssl/ssl/s3_both.cc +585 -0
- data/third_party/boringssl/ssl/s3_lib.cc +224 -0
- data/third_party/boringssl/ssl/s3_pkt.cc +443 -0
- data/third_party/boringssl/ssl/ssl_aead_ctx.cc +415 -0
- data/third_party/boringssl/ssl/ssl_asn1.cc +840 -0
- data/third_party/boringssl/ssl/ssl_buffer.cc +286 -0
- data/third_party/boringssl/ssl/ssl_cert.cc +913 -0
- data/third_party/boringssl/ssl/ssl_cipher.cc +1777 -0
- data/third_party/boringssl/ssl/ssl_file.cc +583 -0
- data/third_party/boringssl/ssl/ssl_key_share.cc +250 -0
- data/third_party/boringssl/ssl/ssl_lib.cc +2650 -0
- data/third_party/boringssl/ssl/ssl_privkey.cc +488 -0
- data/third_party/boringssl/ssl/ssl_session.cc +1221 -0
- data/third_party/boringssl/ssl/ssl_stat.cc +224 -0
- data/third_party/boringssl/ssl/ssl_transcript.cc +398 -0
- data/third_party/boringssl/ssl/ssl_versions.cc +472 -0
- data/third_party/boringssl/ssl/ssl_x509.cc +1299 -0
- data/third_party/boringssl/ssl/t1_enc.cc +503 -0
- data/third_party/boringssl/ssl/t1_lib.cc +3457 -0
- data/third_party/boringssl/ssl/tls13_both.cc +551 -0
- data/third_party/boringssl/ssl/tls13_client.cc +977 -0
- data/third_party/boringssl/ssl/tls13_enc.cc +563 -0
- data/third_party/boringssl/ssl/tls13_server.cc +1068 -0
- data/third_party/boringssl/ssl/tls_method.cc +291 -0
- data/third_party/boringssl/ssl/tls_record.cc +712 -0
- data/third_party/boringssl/third_party/fiat/curve25519.c +5062 -0
- data/third_party/boringssl/third_party/fiat/internal.h +142 -0
- data/third_party/cares/ares_build.h +223 -0
- data/third_party/cares/cares/ares.h +658 -0
- data/third_party/cares/cares/ares__close_sockets.c +61 -0
- data/third_party/cares/cares/ares__get_hostent.c +261 -0
- data/third_party/cares/cares/ares__read_line.c +73 -0
- data/third_party/cares/cares/ares__timeval.c +111 -0
- data/third_party/cares/cares/ares_cancel.c +63 -0
- data/third_party/cares/cares/ares_create_query.c +202 -0
- data/third_party/cares/cares/ares_data.c +221 -0
- data/third_party/cares/cares/ares_data.h +72 -0
- data/third_party/cares/cares/ares_destroy.c +108 -0
- data/third_party/cares/cares/ares_dns.h +103 -0
- data/third_party/cares/cares/ares_expand_name.c +209 -0
- data/third_party/cares/cares/ares_expand_string.c +70 -0
- data/third_party/cares/cares/ares_fds.c +59 -0
- data/third_party/cares/cares/ares_free_hostent.c +41 -0
- data/third_party/cares/cares/ares_free_string.c +25 -0
- data/third_party/cares/cares/ares_getenv.c +30 -0
- data/third_party/cares/cares/ares_getenv.h +26 -0
- data/third_party/cares/cares/ares_gethostbyaddr.c +294 -0
- data/third_party/cares/cares/ares_gethostbyname.c +518 -0
- data/third_party/cares/cares/ares_getnameinfo.c +442 -0
- data/third_party/cares/cares/ares_getopt.c +122 -0
- data/third_party/cares/cares/ares_getopt.h +53 -0
- data/third_party/cares/cares/ares_getsock.c +66 -0
- data/third_party/cares/cares/ares_inet_net_pton.h +25 -0
- data/third_party/cares/cares/ares_init.c +2514 -0
- data/third_party/cares/cares/ares_iphlpapi.h +221 -0
- data/third_party/cares/cares/ares_ipv6.h +78 -0
- data/third_party/cares/cares/ares_library_init.c +177 -0
- data/third_party/cares/cares/ares_library_init.h +43 -0
- data/third_party/cares/cares/ares_llist.c +63 -0
- data/third_party/cares/cares/ares_llist.h +39 -0
- data/third_party/cares/cares/ares_mkquery.c +24 -0
- data/third_party/cares/cares/ares_nowarn.c +260 -0
- data/third_party/cares/cares/ares_nowarn.h +61 -0
- data/third_party/cares/cares/ares_options.c +402 -0
- data/third_party/cares/cares/ares_parse_a_reply.c +264 -0
- data/third_party/cares/cares/ares_parse_aaaa_reply.c +264 -0
- data/third_party/cares/cares/ares_parse_mx_reply.c +170 -0
- data/third_party/cares/cares/ares_parse_naptr_reply.c +193 -0
- data/third_party/cares/cares/ares_parse_ns_reply.c +183 -0
- data/third_party/cares/cares/ares_parse_ptr_reply.c +219 -0
- data/third_party/cares/cares/ares_parse_soa_reply.c +133 -0
- data/third_party/cares/cares/ares_parse_srv_reply.c +179 -0
- data/third_party/cares/cares/ares_parse_txt_reply.c +220 -0
- data/third_party/cares/cares/ares_platform.c +11035 -0
- data/third_party/cares/cares/ares_platform.h +43 -0
- data/third_party/cares/cares/ares_private.h +374 -0
- data/third_party/cares/cares/ares_process.c +1448 -0
- data/third_party/cares/cares/ares_query.c +186 -0
- data/third_party/cares/cares/ares_rules.h +125 -0
- data/third_party/cares/cares/ares_search.c +316 -0
- data/third_party/cares/cares/ares_send.c +131 -0
- data/third_party/cares/cares/ares_setup.h +217 -0
- data/third_party/cares/cares/ares_strcasecmp.c +66 -0
- data/third_party/cares/cares/ares_strcasecmp.h +30 -0
- data/third_party/cares/cares/ares_strdup.c +49 -0
- data/third_party/cares/cares/ares_strdup.h +24 -0
- data/third_party/cares/cares/ares_strerror.c +56 -0
- data/third_party/cares/cares/ares_timeout.c +88 -0
- data/third_party/cares/cares/ares_version.c +11 -0
- data/third_party/cares/cares/ares_version.h +24 -0
- data/third_party/cares/cares/ares_writev.c +79 -0
- data/third_party/cares/cares/bitncmp.c +59 -0
- data/third_party/cares/cares/bitncmp.h +26 -0
- data/third_party/cares/cares/config-win32.h +351 -0
- data/third_party/cares/cares/inet_net_pton.c +450 -0
- data/third_party/cares/cares/inet_ntop.c +208 -0
- data/third_party/cares/cares/setup_once.h +554 -0
- data/third_party/cares/cares/windows_port.c +22 -0
- data/third_party/cares/config_darwin/ares_config.h +425 -0
- data/third_party/cares/config_freebsd/ares_config.h +502 -0
- data/third_party/cares/config_linux/ares_config.h +458 -0
- data/third_party/cares/config_openbsd/ares_config.h +502 -0
- data/third_party/nanopb/pb.h +579 -0
- data/third_party/nanopb/pb_common.c +97 -0
- data/third_party/nanopb/pb_common.h +42 -0
- data/third_party/nanopb/pb_decode.c +1347 -0
- data/third_party/nanopb/pb_decode.h +149 -0
- data/third_party/nanopb/pb_encode.c +696 -0
- data/third_party/nanopb/pb_encode.h +154 -0
- data/third_party/zlib/adler32.c +186 -0
- data/third_party/zlib/compress.c +86 -0
- data/third_party/zlib/crc32.c +442 -0
- data/third_party/zlib/crc32.h +441 -0
- data/third_party/zlib/deflate.c +2163 -0
- data/third_party/zlib/deflate.h +349 -0
- data/third_party/zlib/gzclose.c +25 -0
- data/third_party/zlib/gzguts.h +218 -0
- data/third_party/zlib/gzlib.c +637 -0
- data/third_party/zlib/gzread.c +654 -0
- data/third_party/zlib/gzwrite.c +665 -0
- data/third_party/zlib/infback.c +640 -0
- data/third_party/zlib/inffast.c +323 -0
- data/third_party/zlib/inffast.h +11 -0
- data/third_party/zlib/inffixed.h +94 -0
- data/third_party/zlib/inflate.c +1561 -0
- data/third_party/zlib/inflate.h +125 -0
- data/third_party/zlib/inftrees.c +304 -0
- data/third_party/zlib/inftrees.h +62 -0
- data/third_party/zlib/trees.c +1203 -0
- data/third_party/zlib/trees.h +128 -0
- data/third_party/zlib/uncompr.c +93 -0
- data/third_party/zlib/zconf.h +534 -0
- data/third_party/zlib/zlib.h +1912 -0
- data/third_party/zlib/zutil.c +325 -0
- data/third_party/zlib/zutil.h +271 -0
- metadata +1586 -0
@@ -0,0 +1,3209 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2015 gRPC authors.
|
4
|
+
*
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
*
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
*
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*
|
17
|
+
*/
|
18
|
+
|
19
|
+
#include <grpc/support/port_platform.h>
|
20
|
+
|
21
|
+
#include "src/core/ext/filters/client_channel/client_channel.h"
|
22
|
+
|
23
|
+
#include <inttypes.h>
|
24
|
+
#include <limits.h>
|
25
|
+
#include <stdbool.h>
|
26
|
+
#include <stdio.h>
|
27
|
+
#include <string.h>
|
28
|
+
|
29
|
+
#include <grpc/support/alloc.h>
|
30
|
+
#include <grpc/support/log.h>
|
31
|
+
#include <grpc/support/string_util.h>
|
32
|
+
#include <grpc/support/sync.h>
|
33
|
+
|
34
|
+
#include "src/core/ext/filters/client_channel/backup_poller.h"
|
35
|
+
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
|
36
|
+
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
37
|
+
#include "src/core/ext/filters/client_channel/method_params.h"
|
38
|
+
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
|
39
|
+
#include "src/core/ext/filters/client_channel/resolver_registry.h"
|
40
|
+
#include "src/core/ext/filters/client_channel/retry_throttle.h"
|
41
|
+
#include "src/core/ext/filters/client_channel/subchannel.h"
|
42
|
+
#include "src/core/ext/filters/deadline/deadline_filter.h"
|
43
|
+
#include "src/core/lib/backoff/backoff.h"
|
44
|
+
#include "src/core/lib/channel/channel_args.h"
|
45
|
+
#include "src/core/lib/channel/connected_channel.h"
|
46
|
+
#include "src/core/lib/channel/status_util.h"
|
47
|
+
#include "src/core/lib/gpr/string.h"
|
48
|
+
#include "src/core/lib/gprpp/inlined_vector.h"
|
49
|
+
#include "src/core/lib/gprpp/manual_constructor.h"
|
50
|
+
#include "src/core/lib/iomgr/combiner.h"
|
51
|
+
#include "src/core/lib/iomgr/iomgr.h"
|
52
|
+
#include "src/core/lib/iomgr/polling_entity.h"
|
53
|
+
#include "src/core/lib/profiling/timers.h"
|
54
|
+
#include "src/core/lib/slice/slice_internal.h"
|
55
|
+
#include "src/core/lib/slice/slice_string_helpers.h"
|
56
|
+
#include "src/core/lib/surface/channel.h"
|
57
|
+
#include "src/core/lib/transport/connectivity_state.h"
|
58
|
+
#include "src/core/lib/transport/error_utils.h"
|
59
|
+
#include "src/core/lib/transport/metadata.h"
|
60
|
+
#include "src/core/lib/transport/metadata_batch.h"
|
61
|
+
#include "src/core/lib/transport/service_config.h"
|
62
|
+
#include "src/core/lib/transport/static_metadata.h"
|
63
|
+
#include "src/core/lib/transport/status_metadata.h"
|
64
|
+
|
65
|
+
using grpc_core::internal::ClientChannelMethodParams;
|
66
|
+
using grpc_core::internal::ServerRetryThrottleData;
|
67
|
+
|
68
|
+
/* Client channel implementation */
|
69
|
+
|
70
|
+
// By default, we buffer 256 KiB per RPC for retries.
|
71
|
+
// TODO(roth): Do we have any data to suggest a better value?
|
72
|
+
#define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
|
73
|
+
|
74
|
+
// This value was picked arbitrarily. It can be changed if there is
|
75
|
+
// any even moderately compelling reason to do so.
|
76
|
+
#define RETRY_BACKOFF_JITTER 0.2
|
77
|
+
|
78
|
+
grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
|
79
|
+
|
80
|
+
/*************************************************************************
|
81
|
+
* CHANNEL-WIDE FUNCTIONS
|
82
|
+
*/
|
83
|
+
|
84
|
+
struct external_connectivity_watcher;
|
85
|
+
|
86
|
+
typedef grpc_core::SliceHashTable<
|
87
|
+
grpc_core::RefCountedPtr<ClientChannelMethodParams>>
|
88
|
+
MethodParamsTable;
|
89
|
+
|
90
|
+
typedef struct client_channel_channel_data {
|
91
|
+
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
|
92
|
+
bool started_resolving;
|
93
|
+
bool deadline_checking_enabled;
|
94
|
+
grpc_client_channel_factory* client_channel_factory;
|
95
|
+
bool enable_retries;
|
96
|
+
size_t per_rpc_retry_buffer_size;
|
97
|
+
|
98
|
+
/** combiner protecting all variables below in this data structure */
|
99
|
+
grpc_combiner* combiner;
|
100
|
+
/** currently active load balancer */
|
101
|
+
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
|
102
|
+
/** retry throttle data */
|
103
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
104
|
+
/** maps method names to method_parameters structs */
|
105
|
+
grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
|
106
|
+
/** incoming resolver result - set by resolver.next() */
|
107
|
+
grpc_channel_args* resolver_result;
|
108
|
+
/** a list of closures that are all waiting for resolver result to come in */
|
109
|
+
grpc_closure_list waiting_for_resolver_result_closures;
|
110
|
+
/** resolver callback */
|
111
|
+
grpc_closure on_resolver_result_changed;
|
112
|
+
/** connectivity state being tracked */
|
113
|
+
grpc_connectivity_state_tracker state_tracker;
|
114
|
+
/** when an lb_policy arrives, should we try to exit idle */
|
115
|
+
bool exit_idle_when_lb_policy_arrives;
|
116
|
+
/** owning stack */
|
117
|
+
grpc_channel_stack* owning_stack;
|
118
|
+
/** interested parties (owned) */
|
119
|
+
grpc_pollset_set* interested_parties;
|
120
|
+
|
121
|
+
/* external_connectivity_watcher_list head is guarded by its own mutex, since
|
122
|
+
* counts need to be grabbed immediately without polling on a cq */
|
123
|
+
gpr_mu external_connectivity_watcher_list_mu;
|
124
|
+
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
|
125
|
+
|
126
|
+
/* the following properties are guarded by a mutex since APIs require them
|
127
|
+
to be instantaneously available */
|
128
|
+
gpr_mu info_mu;
|
129
|
+
char* info_lb_policy_name;
|
130
|
+
/** service config in JSON form */
|
131
|
+
char* info_service_config_json;
|
132
|
+
} channel_data;
|
133
|
+
|
134
|
+
typedef struct {
|
135
|
+
channel_data* chand;
|
136
|
+
/** used as an identifier, don't dereference it because the LB policy may be
|
137
|
+
* non-existing when the callback is run */
|
138
|
+
grpc_core::LoadBalancingPolicy* lb_policy;
|
139
|
+
grpc_closure closure;
|
140
|
+
} reresolution_request_args;
|
141
|
+
|
142
|
+
/** We create one watcher for each new lb_policy that is returned from a
|
143
|
+
resolver, to watch for state changes from the lb_policy. When a state
|
144
|
+
change is seen, we update the channel, and create a new watcher. */
|
145
|
+
typedef struct {
|
146
|
+
channel_data* chand;
|
147
|
+
grpc_closure on_changed;
|
148
|
+
grpc_connectivity_state state;
|
149
|
+
grpc_core::LoadBalancingPolicy* lb_policy;
|
150
|
+
} lb_policy_connectivity_watcher;
|
151
|
+
|
152
|
+
static void watch_lb_policy_locked(channel_data* chand,
|
153
|
+
grpc_core::LoadBalancingPolicy* lb_policy,
|
154
|
+
grpc_connectivity_state current_state);
|
155
|
+
|
156
|
+
static void set_channel_connectivity_state_locked(channel_data* chand,
|
157
|
+
grpc_connectivity_state state,
|
158
|
+
grpc_error* error,
|
159
|
+
const char* reason) {
|
160
|
+
/* TODO: Improve failure handling:
|
161
|
+
* - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
|
162
|
+
* - Hand over pending picks from old policies during the switch that happens
|
163
|
+
* when resolver provides an update. */
|
164
|
+
if (chand->lb_policy != nullptr) {
|
165
|
+
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
|
166
|
+
/* cancel picks with wait_for_ready=false */
|
167
|
+
chand->lb_policy->CancelMatchingPicksLocked(
|
168
|
+
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
|
169
|
+
/* check= */ 0, GRPC_ERROR_REF(error));
|
170
|
+
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
|
171
|
+
/* cancel all picks */
|
172
|
+
chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
|
173
|
+
GRPC_ERROR_REF(error));
|
174
|
+
}
|
175
|
+
}
|
176
|
+
if (grpc_client_channel_trace.enabled()) {
|
177
|
+
gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
|
178
|
+
grpc_connectivity_state_name(state));
|
179
|
+
}
|
180
|
+
grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
|
181
|
+
}
|
182
|
+
|
183
|
+
static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
|
184
|
+
lb_policy_connectivity_watcher* w =
|
185
|
+
static_cast<lb_policy_connectivity_watcher*>(arg);
|
186
|
+
/* check if the notification is for the latest policy */
|
187
|
+
if (w->lb_policy == w->chand->lb_policy.get()) {
|
188
|
+
if (grpc_client_channel_trace.enabled()) {
|
189
|
+
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
|
190
|
+
w->lb_policy, grpc_connectivity_state_name(w->state));
|
191
|
+
}
|
192
|
+
set_channel_connectivity_state_locked(w->chand, w->state,
|
193
|
+
GRPC_ERROR_REF(error), "lb_changed");
|
194
|
+
if (w->state != GRPC_CHANNEL_SHUTDOWN) {
|
195
|
+
watch_lb_policy_locked(w->chand, w->lb_policy, w->state);
|
196
|
+
}
|
197
|
+
}
|
198
|
+
GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy");
|
199
|
+
gpr_free(w);
|
200
|
+
}
|
201
|
+
|
202
|
+
static void watch_lb_policy_locked(channel_data* chand,
|
203
|
+
grpc_core::LoadBalancingPolicy* lb_policy,
|
204
|
+
grpc_connectivity_state current_state) {
|
205
|
+
lb_policy_connectivity_watcher* w =
|
206
|
+
static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
|
207
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
|
208
|
+
w->chand = chand;
|
209
|
+
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
|
210
|
+
grpc_combiner_scheduler(chand->combiner));
|
211
|
+
w->state = current_state;
|
212
|
+
w->lb_policy = lb_policy;
|
213
|
+
lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed);
|
214
|
+
}
|
215
|
+
|
216
|
+
static void start_resolving_locked(channel_data* chand) {
|
217
|
+
if (grpc_client_channel_trace.enabled()) {
|
218
|
+
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
|
219
|
+
}
|
220
|
+
GPR_ASSERT(!chand->started_resolving);
|
221
|
+
chand->started_resolving = true;
|
222
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
|
223
|
+
chand->resolver->NextLocked(&chand->resolver_result,
|
224
|
+
&chand->on_resolver_result_changed);
|
225
|
+
}
|
226
|
+
|
227
|
+
typedef struct {
|
228
|
+
char* server_name;
|
229
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
230
|
+
} service_config_parsing_state;
|
231
|
+
|
232
|
+
static void parse_retry_throttle_params(
|
233
|
+
const grpc_json* field, service_config_parsing_state* parsing_state) {
|
234
|
+
if (strcmp(field->key, "retryThrottling") == 0) {
|
235
|
+
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
|
236
|
+
if (field->type != GRPC_JSON_OBJECT) return;
|
237
|
+
int max_milli_tokens = 0;
|
238
|
+
int milli_token_ratio = 0;
|
239
|
+
for (grpc_json* sub_field = field->child; sub_field != nullptr;
|
240
|
+
sub_field = sub_field->next) {
|
241
|
+
if (sub_field->key == nullptr) return;
|
242
|
+
if (strcmp(sub_field->key, "maxTokens") == 0) {
|
243
|
+
if (max_milli_tokens != 0) return; // Duplicate.
|
244
|
+
if (sub_field->type != GRPC_JSON_NUMBER) return;
|
245
|
+
max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
|
246
|
+
if (max_milli_tokens == -1) return;
|
247
|
+
max_milli_tokens *= 1000;
|
248
|
+
} else if (strcmp(sub_field->key, "tokenRatio") == 0) {
|
249
|
+
if (milli_token_ratio != 0) return; // Duplicate.
|
250
|
+
if (sub_field->type != GRPC_JSON_NUMBER) return;
|
251
|
+
// We support up to 3 decimal digits.
|
252
|
+
size_t whole_len = strlen(sub_field->value);
|
253
|
+
uint32_t multiplier = 1;
|
254
|
+
uint32_t decimal_value = 0;
|
255
|
+
const char* decimal_point = strchr(sub_field->value, '.');
|
256
|
+
if (decimal_point != nullptr) {
|
257
|
+
whole_len = static_cast<size_t>(decimal_point - sub_field->value);
|
258
|
+
multiplier = 1000;
|
259
|
+
size_t decimal_len = strlen(decimal_point + 1);
|
260
|
+
if (decimal_len > 3) decimal_len = 3;
|
261
|
+
if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
|
262
|
+
&decimal_value)) {
|
263
|
+
return;
|
264
|
+
}
|
265
|
+
uint32_t decimal_multiplier = 1;
|
266
|
+
for (size_t i = 0; i < (3 - decimal_len); ++i) {
|
267
|
+
decimal_multiplier *= 10;
|
268
|
+
}
|
269
|
+
decimal_value *= decimal_multiplier;
|
270
|
+
}
|
271
|
+
uint32_t whole_value;
|
272
|
+
if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
|
273
|
+
&whole_value)) {
|
274
|
+
return;
|
275
|
+
}
|
276
|
+
milli_token_ratio =
|
277
|
+
static_cast<int>((whole_value * multiplier) + decimal_value);
|
278
|
+
if (milli_token_ratio <= 0) return;
|
279
|
+
}
|
280
|
+
}
|
281
|
+
parsing_state->retry_throttle_data =
|
282
|
+
grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
|
283
|
+
parsing_state->server_name, max_milli_tokens, milli_token_ratio);
|
284
|
+
}
|
285
|
+
}
|
286
|
+
|
287
|
+
static void request_reresolution_locked(void* arg, grpc_error* error) {
|
288
|
+
reresolution_request_args* args =
|
289
|
+
static_cast<reresolution_request_args*>(arg);
|
290
|
+
channel_data* chand = args->chand;
|
291
|
+
// If this invocation is for a stale LB policy, treat it as an LB shutdown
|
292
|
+
// signal.
|
293
|
+
if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE ||
|
294
|
+
chand->resolver == nullptr) {
|
295
|
+
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution");
|
296
|
+
gpr_free(args);
|
297
|
+
return;
|
298
|
+
}
|
299
|
+
if (grpc_client_channel_trace.enabled()) {
|
300
|
+
gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
|
301
|
+
}
|
302
|
+
chand->resolver->RequestReresolutionLocked();
|
303
|
+
// Give back the closure to the LB policy.
|
304
|
+
chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
|
305
|
+
}
|
306
|
+
|
307
|
+
// TODO(roth): The logic in this function is very hard to follow. We
|
308
|
+
// should refactor this so that it's easier to understand, perhaps as
|
309
|
+
// part of changing the resolver API to more clearly differentiate
|
310
|
+
// between transient failures and shutdown.
|
311
|
+
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
|
312
|
+
channel_data* chand = static_cast<channel_data*>(arg);
|
313
|
+
if (grpc_client_channel_trace.enabled()) {
|
314
|
+
gpr_log(GPR_DEBUG,
|
315
|
+
"chand=%p: got resolver result: resolver_result=%p error=%s", chand,
|
316
|
+
chand->resolver_result, grpc_error_string(error));
|
317
|
+
}
|
318
|
+
// Extract the following fields from the resolver result, if non-nullptr.
|
319
|
+
bool lb_policy_updated = false;
|
320
|
+
bool lb_policy_created = false;
|
321
|
+
char* lb_policy_name_dup = nullptr;
|
322
|
+
bool lb_policy_name_changed = false;
|
323
|
+
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
|
324
|
+
char* service_config_json = nullptr;
|
325
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
326
|
+
grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
|
327
|
+
if (chand->resolver_result != nullptr) {
|
328
|
+
if (chand->resolver != nullptr) {
|
329
|
+
// Find LB policy name.
|
330
|
+
const grpc_arg* channel_arg = grpc_channel_args_find(
|
331
|
+
chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
|
332
|
+
const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
|
333
|
+
// Special case: If at least one balancer address is present, we use
|
334
|
+
// the grpclb policy, regardless of what the resolver actually specified.
|
335
|
+
channel_arg =
|
336
|
+
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
|
337
|
+
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
|
338
|
+
grpc_lb_addresses* addresses =
|
339
|
+
static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
|
340
|
+
bool found_balancer_address = false;
|
341
|
+
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
342
|
+
if (addresses->addresses[i].is_balancer) {
|
343
|
+
found_balancer_address = true;
|
344
|
+
break;
|
345
|
+
}
|
346
|
+
}
|
347
|
+
if (found_balancer_address) {
|
348
|
+
if (lb_policy_name != nullptr &&
|
349
|
+
strcmp(lb_policy_name, "grpclb") != 0) {
|
350
|
+
gpr_log(GPR_INFO,
|
351
|
+
"resolver requested LB policy %s but provided at least one "
|
352
|
+
"balancer address -- forcing use of grpclb LB policy",
|
353
|
+
lb_policy_name);
|
354
|
+
}
|
355
|
+
lb_policy_name = "grpclb";
|
356
|
+
}
|
357
|
+
}
|
358
|
+
// Use pick_first if nothing was specified and we didn't select grpclb
|
359
|
+
// above.
|
360
|
+
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
|
361
|
+
// Check to see if we're already using the right LB policy.
|
362
|
+
// Note: It's safe to use chand->info_lb_policy_name here without
|
363
|
+
// taking a lock on chand->info_mu, because this function is the
|
364
|
+
// only thing that modifies its value, and it can only be invoked
|
365
|
+
// once at any given time.
|
366
|
+
lb_policy_name_changed =
|
367
|
+
chand->info_lb_policy_name == nullptr ||
|
368
|
+
gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
|
369
|
+
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
|
370
|
+
// Continue using the same LB policy. Update with new addresses.
|
371
|
+
lb_policy_updated = true;
|
372
|
+
chand->lb_policy->UpdateLocked(*chand->resolver_result);
|
373
|
+
} else {
|
374
|
+
// Instantiate new LB policy.
|
375
|
+
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
|
376
|
+
lb_policy_args.combiner = chand->combiner;
|
377
|
+
lb_policy_args.client_channel_factory = chand->client_channel_factory;
|
378
|
+
lb_policy_args.args = chand->resolver_result;
|
379
|
+
new_lb_policy =
|
380
|
+
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
|
381
|
+
lb_policy_name, lb_policy_args);
|
382
|
+
if (new_lb_policy == nullptr) {
|
383
|
+
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
|
384
|
+
lb_policy_name);
|
385
|
+
} else {
|
386
|
+
lb_policy_created = true;
|
387
|
+
reresolution_request_args* args =
|
388
|
+
static_cast<reresolution_request_args*>(
|
389
|
+
gpr_zalloc(sizeof(*args)));
|
390
|
+
args->chand = chand;
|
391
|
+
args->lb_policy = new_lb_policy.get();
|
392
|
+
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
|
393
|
+
grpc_combiner_scheduler(chand->combiner));
|
394
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
|
395
|
+
new_lb_policy->SetReresolutionClosureLocked(&args->closure);
|
396
|
+
}
|
397
|
+
}
|
398
|
+
// Before we clean up, save a copy of lb_policy_name, since it might
|
399
|
+
// be pointing to data inside chand->resolver_result.
|
400
|
+
// The copy will be saved in chand->lb_policy_name below.
|
401
|
+
lb_policy_name_dup = gpr_strdup(lb_policy_name);
|
402
|
+
// Find service config.
|
403
|
+
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
404
|
+
GRPC_ARG_SERVICE_CONFIG);
|
405
|
+
service_config_json =
|
406
|
+
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
|
407
|
+
if (service_config_json != nullptr) {
|
408
|
+
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
|
409
|
+
grpc_core::ServiceConfig::Create(service_config_json);
|
410
|
+
if (service_config != nullptr) {
|
411
|
+
if (chand->enable_retries) {
|
412
|
+
channel_arg = grpc_channel_args_find(chand->resolver_result,
|
413
|
+
GRPC_ARG_SERVER_URI);
|
414
|
+
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
|
415
|
+
GPR_ASSERT(server_uri != nullptr);
|
416
|
+
grpc_uri* uri = grpc_uri_parse(server_uri, true);
|
417
|
+
GPR_ASSERT(uri->path[0] != '\0');
|
418
|
+
service_config_parsing_state parsing_state;
|
419
|
+
memset(&parsing_state, 0, sizeof(parsing_state));
|
420
|
+
parsing_state.server_name =
|
421
|
+
uri->path[0] == '/' ? uri->path + 1 : uri->path;
|
422
|
+
service_config->ParseGlobalParams(parse_retry_throttle_params,
|
423
|
+
&parsing_state);
|
424
|
+
grpc_uri_destroy(uri);
|
425
|
+
retry_throttle_data = std::move(parsing_state.retry_throttle_data);
|
426
|
+
}
|
427
|
+
method_params_table = service_config->CreateMethodConfigTable(
|
428
|
+
ClientChannelMethodParams::CreateFromJson);
|
429
|
+
}
|
430
|
+
}
|
431
|
+
}
|
432
|
+
}
|
433
|
+
if (grpc_client_channel_trace.enabled()) {
|
434
|
+
gpr_log(GPR_DEBUG,
|
435
|
+
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
|
436
|
+
"service_config=\"%s\"",
|
437
|
+
chand, lb_policy_name_dup,
|
438
|
+
lb_policy_name_changed ? " (changed)" : "", service_config_json);
|
439
|
+
}
|
440
|
+
// Now swap out fields in chand. Note that the new values may still
|
441
|
+
// be nullptr if (e.g.) the resolver failed to return results or the
|
442
|
+
// results did not contain the necessary data.
|
443
|
+
//
|
444
|
+
// First, swap out the data used by cc_get_channel_info().
|
445
|
+
gpr_mu_lock(&chand->info_mu);
|
446
|
+
if (lb_policy_name_dup != nullptr) {
|
447
|
+
gpr_free(chand->info_lb_policy_name);
|
448
|
+
chand->info_lb_policy_name = lb_policy_name_dup;
|
449
|
+
}
|
450
|
+
if (service_config_json != nullptr) {
|
451
|
+
gpr_free(chand->info_service_config_json);
|
452
|
+
chand->info_service_config_json = service_config_json;
|
453
|
+
}
|
454
|
+
gpr_mu_unlock(&chand->info_mu);
|
455
|
+
// Swap out the retry throttle data.
|
456
|
+
chand->retry_throttle_data = std::move(retry_throttle_data);
|
457
|
+
// Swap out the method params table.
|
458
|
+
chand->method_params_table = std::move(method_params_table);
|
459
|
+
// If we have a new LB policy or are shutting down (in which case
|
460
|
+
// new_lb_policy will be nullptr), swap out the LB policy, unreffing the
|
461
|
+
// old one and removing its fds from chand->interested_parties.
|
462
|
+
// Note that we do NOT do this if either (a) we updated the existing
|
463
|
+
// LB policy above or (b) we failed to create the new LB policy (in
|
464
|
+
// which case we want to continue using the most recent one we had).
|
465
|
+
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
|
466
|
+
chand->resolver == nullptr) {
|
467
|
+
if (chand->lb_policy != nullptr) {
|
468
|
+
if (grpc_client_channel_trace.enabled()) {
|
469
|
+
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
|
470
|
+
chand->lb_policy.get());
|
471
|
+
}
|
472
|
+
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
|
473
|
+
chand->interested_parties);
|
474
|
+
chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
|
475
|
+
chand->lb_policy.reset();
|
476
|
+
}
|
477
|
+
chand->lb_policy = std::move(new_lb_policy);
|
478
|
+
}
|
479
|
+
// Now that we've swapped out the relevant fields of chand, check for
|
480
|
+
// error or shutdown.
|
481
|
+
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
|
482
|
+
if (grpc_client_channel_trace.enabled()) {
|
483
|
+
gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
|
484
|
+
}
|
485
|
+
if (chand->resolver != nullptr) {
|
486
|
+
if (grpc_client_channel_trace.enabled()) {
|
487
|
+
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
|
488
|
+
}
|
489
|
+
chand->resolver.reset();
|
490
|
+
}
|
491
|
+
set_channel_connectivity_state_locked(
|
492
|
+
chand, GRPC_CHANNEL_SHUTDOWN,
|
493
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
494
|
+
"Got resolver result after disconnection", &error, 1),
|
495
|
+
"resolver_gone");
|
496
|
+
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
|
497
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
498
|
+
"Channel disconnected", &error, 1));
|
499
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
500
|
+
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
|
501
|
+
grpc_channel_args_destroy(chand->resolver_result);
|
502
|
+
chand->resolver_result = nullptr;
|
503
|
+
} else { // Not shutting down.
|
504
|
+
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
|
505
|
+
grpc_error* state_error =
|
506
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
|
507
|
+
if (lb_policy_created) {
|
508
|
+
if (grpc_client_channel_trace.enabled()) {
|
509
|
+
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
|
510
|
+
}
|
511
|
+
GRPC_ERROR_UNREF(state_error);
|
512
|
+
state = chand->lb_policy->CheckConnectivityLocked(&state_error);
|
513
|
+
grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
|
514
|
+
chand->interested_parties);
|
515
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
516
|
+
if (chand->exit_idle_when_lb_policy_arrives) {
|
517
|
+
chand->lb_policy->ExitIdleLocked();
|
518
|
+
chand->exit_idle_when_lb_policy_arrives = false;
|
519
|
+
}
|
520
|
+
watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
|
521
|
+
} else if (chand->resolver_result == nullptr) {
|
522
|
+
// Transient failure.
|
523
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
524
|
+
}
|
525
|
+
if (!lb_policy_updated) {
|
526
|
+
set_channel_connectivity_state_locked(
|
527
|
+
chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
|
528
|
+
}
|
529
|
+
grpc_channel_args_destroy(chand->resolver_result);
|
530
|
+
chand->resolver_result = nullptr;
|
531
|
+
chand->resolver->NextLocked(&chand->resolver_result,
|
532
|
+
&chand->on_resolver_result_changed);
|
533
|
+
GRPC_ERROR_UNREF(state_error);
|
534
|
+
}
|
535
|
+
}
|
536
|
+
|
537
|
+
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
|
538
|
+
grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
|
539
|
+
grpc_channel_element* elem =
|
540
|
+
static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
|
541
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
542
|
+
|
543
|
+
if (op->on_connectivity_state_change != nullptr) {
|
544
|
+
grpc_connectivity_state_notify_on_state_change(
|
545
|
+
&chand->state_tracker, op->connectivity_state,
|
546
|
+
op->on_connectivity_state_change);
|
547
|
+
op->on_connectivity_state_change = nullptr;
|
548
|
+
op->connectivity_state = nullptr;
|
549
|
+
}
|
550
|
+
|
551
|
+
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
|
552
|
+
if (chand->lb_policy == nullptr) {
|
553
|
+
GRPC_CLOSURE_SCHED(
|
554
|
+
op->send_ping.on_initiate,
|
555
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
|
556
|
+
GRPC_CLOSURE_SCHED(
|
557
|
+
op->send_ping.on_ack,
|
558
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
|
559
|
+
} else {
|
560
|
+
chand->lb_policy->PingOneLocked(op->send_ping.on_initiate,
|
561
|
+
op->send_ping.on_ack);
|
562
|
+
op->bind_pollset = nullptr;
|
563
|
+
}
|
564
|
+
op->send_ping.on_initiate = nullptr;
|
565
|
+
op->send_ping.on_ack = nullptr;
|
566
|
+
}
|
567
|
+
|
568
|
+
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
|
569
|
+
if (chand->resolver != nullptr) {
|
570
|
+
set_channel_connectivity_state_locked(
|
571
|
+
chand, GRPC_CHANNEL_SHUTDOWN,
|
572
|
+
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
|
573
|
+
chand->resolver.reset();
|
574
|
+
if (!chand->started_resolving) {
|
575
|
+
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
|
576
|
+
GRPC_ERROR_REF(op->disconnect_with_error));
|
577
|
+
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
|
578
|
+
}
|
579
|
+
if (chand->lb_policy != nullptr) {
|
580
|
+
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
|
581
|
+
chand->interested_parties);
|
582
|
+
chand->lb_policy.reset();
|
583
|
+
}
|
584
|
+
}
|
585
|
+
GRPC_ERROR_UNREF(op->disconnect_with_error);
|
586
|
+
}
|
587
|
+
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
|
588
|
+
|
589
|
+
GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
|
590
|
+
}
|
591
|
+
|
592
|
+
static void cc_start_transport_op(grpc_channel_element* elem,
|
593
|
+
grpc_transport_op* op) {
|
594
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
595
|
+
|
596
|
+
GPR_ASSERT(op->set_accept_stream == false);
|
597
|
+
if (op->bind_pollset != nullptr) {
|
598
|
+
grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset);
|
599
|
+
}
|
600
|
+
|
601
|
+
op->handler_private.extra_arg = elem;
|
602
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
|
603
|
+
GRPC_CLOSURE_SCHED(
|
604
|
+
GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
|
605
|
+
op, grpc_combiner_scheduler(chand->combiner)),
|
606
|
+
GRPC_ERROR_NONE);
|
607
|
+
}
|
608
|
+
|
609
|
+
static void cc_get_channel_info(grpc_channel_element* elem,
|
610
|
+
const grpc_channel_info* info) {
|
611
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
612
|
+
gpr_mu_lock(&chand->info_mu);
|
613
|
+
if (info->lb_policy_name != nullptr) {
|
614
|
+
*info->lb_policy_name = chand->info_lb_policy_name == nullptr
|
615
|
+
? nullptr
|
616
|
+
: gpr_strdup(chand->info_lb_policy_name);
|
617
|
+
}
|
618
|
+
if (info->service_config_json != nullptr) {
|
619
|
+
*info->service_config_json =
|
620
|
+
chand->info_service_config_json == nullptr
|
621
|
+
? nullptr
|
622
|
+
: gpr_strdup(chand->info_service_config_json);
|
623
|
+
}
|
624
|
+
gpr_mu_unlock(&chand->info_mu);
|
625
|
+
}
|
626
|
+
|
627
|
+
/* Constructor for channel_data */
|
628
|
+
static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
|
629
|
+
grpc_channel_element_args* args) {
|
630
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
631
|
+
GPR_ASSERT(args->is_last);
|
632
|
+
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
|
633
|
+
// Initialize data members.
|
634
|
+
chand->combiner = grpc_combiner_create();
|
635
|
+
gpr_mu_init(&chand->info_mu);
|
636
|
+
gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
|
637
|
+
|
638
|
+
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
|
639
|
+
chand->external_connectivity_watcher_list_head = nullptr;
|
640
|
+
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
|
641
|
+
|
642
|
+
chand->owning_stack = args->channel_stack;
|
643
|
+
GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
|
644
|
+
on_resolver_result_changed_locked, chand,
|
645
|
+
grpc_combiner_scheduler(chand->combiner));
|
646
|
+
chand->interested_parties = grpc_pollset_set_create();
|
647
|
+
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
|
648
|
+
"client_channel");
|
649
|
+
grpc_client_channel_start_backup_polling(chand->interested_parties);
|
650
|
+
// Record max per-RPC retry buffer size.
|
651
|
+
const grpc_arg* arg = grpc_channel_args_find(
|
652
|
+
args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE);
|
653
|
+
chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer(
|
654
|
+
arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX});
|
655
|
+
// Record enable_retries.
|
656
|
+
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
|
657
|
+
chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
|
658
|
+
// Record client channel factory.
|
659
|
+
arg = grpc_channel_args_find(args->channel_args,
|
660
|
+
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
|
661
|
+
if (arg == nullptr) {
|
662
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
663
|
+
"Missing client channel factory in args for client channel filter");
|
664
|
+
}
|
665
|
+
if (arg->type != GRPC_ARG_POINTER) {
|
666
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
667
|
+
"client channel factory arg must be a pointer");
|
668
|
+
}
|
669
|
+
grpc_client_channel_factory_ref(
|
670
|
+
static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
|
671
|
+
chand->client_channel_factory =
|
672
|
+
static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
|
673
|
+
// Get server name to resolve, using proxy mapper if needed.
|
674
|
+
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
|
675
|
+
if (arg == nullptr) {
|
676
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
677
|
+
"Missing server uri in args for client channel filter");
|
678
|
+
}
|
679
|
+
if (arg->type != GRPC_ARG_STRING) {
|
680
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
681
|
+
"server uri arg must be a string");
|
682
|
+
}
|
683
|
+
char* proxy_name = nullptr;
|
684
|
+
grpc_channel_args* new_args = nullptr;
|
685
|
+
grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
|
686
|
+
&proxy_name, &new_args);
|
687
|
+
// Instantiate resolver.
|
688
|
+
chand->resolver = grpc_core::ResolverRegistry::CreateResolver(
|
689
|
+
proxy_name != nullptr ? proxy_name : arg->value.string,
|
690
|
+
new_args != nullptr ? new_args : args->channel_args,
|
691
|
+
chand->interested_parties, chand->combiner);
|
692
|
+
if (proxy_name != nullptr) gpr_free(proxy_name);
|
693
|
+
if (new_args != nullptr) grpc_channel_args_destroy(new_args);
|
694
|
+
if (chand->resolver == nullptr) {
|
695
|
+
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
|
696
|
+
}
|
697
|
+
chand->deadline_checking_enabled =
|
698
|
+
grpc_deadline_checking_enabled(args->channel_args);
|
699
|
+
return GRPC_ERROR_NONE;
|
700
|
+
}
|
701
|
+
|
702
|
+
static void shutdown_resolver_locked(void* arg, grpc_error* error) {
|
703
|
+
grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
|
704
|
+
resolver->Orphan();
|
705
|
+
}
|
706
|
+
|
707
|
+
/* Destructor for channel_data */
|
708
|
+
static void cc_destroy_channel_elem(grpc_channel_element* elem) {
|
709
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
710
|
+
if (chand->resolver != nullptr) {
|
711
|
+
GRPC_CLOSURE_SCHED(
|
712
|
+
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
|
713
|
+
grpc_combiner_scheduler(chand->combiner)),
|
714
|
+
GRPC_ERROR_NONE);
|
715
|
+
}
|
716
|
+
if (chand->client_channel_factory != nullptr) {
|
717
|
+
grpc_client_channel_factory_unref(chand->client_channel_factory);
|
718
|
+
}
|
719
|
+
if (chand->lb_policy != nullptr) {
|
720
|
+
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
|
721
|
+
chand->interested_parties);
|
722
|
+
chand->lb_policy.reset();
|
723
|
+
}
|
724
|
+
gpr_free(chand->info_lb_policy_name);
|
725
|
+
gpr_free(chand->info_service_config_json);
|
726
|
+
chand->retry_throttle_data.reset();
|
727
|
+
chand->method_params_table.reset();
|
728
|
+
grpc_client_channel_stop_backup_polling(chand->interested_parties);
|
729
|
+
grpc_connectivity_state_destroy(&chand->state_tracker);
|
730
|
+
grpc_pollset_set_destroy(chand->interested_parties);
|
731
|
+
GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
|
732
|
+
gpr_mu_destroy(&chand->info_mu);
|
733
|
+
gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
|
734
|
+
}
|
735
|
+
|
736
|
+
/*************************************************************************
|
737
|
+
* PER-CALL FUNCTIONS
|
738
|
+
*/
|
739
|
+
|
740
|
+
// Max number of batches that can be pending on a call at any given
|
741
|
+
// time. This includes one batch for each of the following ops:
|
742
|
+
// recv_initial_metadata
|
743
|
+
// send_initial_metadata
|
744
|
+
// recv_message
|
745
|
+
// send_message
|
746
|
+
// recv_trailing_metadata
|
747
|
+
// send_trailing_metadata
|
748
|
+
#define MAX_PENDING_BATCHES 6
|
749
|
+
|
750
|
+
// Retry support:
|
751
|
+
//
|
752
|
+
// In order to support retries, we act as a proxy for stream op batches.
|
753
|
+
// When we get a batch from the surface, we add it to our list of pending
|
754
|
+
// batches, and we then use those batches to construct separate "child"
|
755
|
+
// batches to be started on the subchannel call. When the child batches
|
756
|
+
// return, we then decide which pending batches have been completed and
|
757
|
+
// schedule their callbacks accordingly. If a subchannel call fails and
|
758
|
+
// we want to retry it, we do a new pick and start again, constructing
|
759
|
+
// new "child" batches for the new subchannel call.
|
760
|
+
//
|
761
|
+
// Note that retries are committed when receiving data from the server
|
762
|
+
// (except for Trailers-Only responses). However, there may be many
|
763
|
+
// send ops started before receiving any data, so we may have already
|
764
|
+
// completed some number of send ops (and returned the completions up to
|
765
|
+
// the surface) by the time we realize that we need to retry. To deal
|
766
|
+
// with this, we cache data for send ops, so that we can replay them on a
|
767
|
+
// different subchannel call even after we have completed the original
|
768
|
+
// batches.
|
769
|
+
//
|
770
|
+
// There are two sets of data to maintain:
|
771
|
+
// - In call_data (in the parent channel), we maintain a list of pending
|
772
|
+
// ops and cached data for send ops.
|
773
|
+
// - In the subchannel call, we maintain state to indicate what ops have
|
774
|
+
// already been sent down to that call.
|
775
|
+
//
|
776
|
+
// When constructing the "child" batches, we compare those two sets of
|
777
|
+
// data to see which batches need to be sent to the subchannel call.
|
778
|
+
|
779
|
+
// TODO(roth): In subsequent PRs:
|
780
|
+
// - add support for transparent retries (including initial metadata)
|
781
|
+
// - figure out how to record stats in census for retries
|
782
|
+
// (census filter is on top of this one)
|
783
|
+
// - add census stats for retries
|
784
|
+
|
785
|
+
// State used for starting a retryable batch on a subchannel call.
|
786
|
+
// This provides its own grpc_transport_stream_op_batch and other data
|
787
|
+
// structures needed to populate the ops in the batch.
|
788
|
+
// We allocate one struct on the arena for each attempt at starting a
|
789
|
+
// batch on a given subchannel call.
|
790
|
+
typedef struct {
|
791
|
+
gpr_refcount refs;
|
792
|
+
grpc_call_element* elem;
|
793
|
+
grpc_subchannel_call* subchannel_call; // Holds a ref.
|
794
|
+
// The batch to use in the subchannel call.
|
795
|
+
// Its payload field points to subchannel_call_retry_state.batch_payload.
|
796
|
+
grpc_transport_stream_op_batch batch;
|
797
|
+
// For send_initial_metadata.
|
798
|
+
// Note that we need to make a copy of the initial metadata for each
|
799
|
+
// subchannel call instead of just referring to the copy in call_data,
|
800
|
+
// because filters in the subchannel stack will probably add entries,
|
801
|
+
// so we need to start in a pristine state for each attempt of the call.
|
802
|
+
grpc_linked_mdelem* send_initial_metadata_storage;
|
803
|
+
grpc_metadata_batch send_initial_metadata;
|
804
|
+
// For send_message.
|
805
|
+
grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
|
806
|
+
send_message;
|
807
|
+
// For send_trailing_metadata.
|
808
|
+
grpc_linked_mdelem* send_trailing_metadata_storage;
|
809
|
+
grpc_metadata_batch send_trailing_metadata;
|
810
|
+
// For intercepting recv_initial_metadata.
|
811
|
+
grpc_metadata_batch recv_initial_metadata;
|
812
|
+
grpc_closure recv_initial_metadata_ready;
|
813
|
+
bool trailing_metadata_available;
|
814
|
+
// For intercepting recv_message.
|
815
|
+
grpc_closure recv_message_ready;
|
816
|
+
grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
|
817
|
+
// For intercepting recv_trailing_metadata.
|
818
|
+
grpc_metadata_batch recv_trailing_metadata;
|
819
|
+
grpc_transport_stream_stats collect_stats;
|
820
|
+
// For intercepting on_complete.
|
821
|
+
grpc_closure on_complete;
|
822
|
+
} subchannel_batch_data;
|
823
|
+
|
824
|
+
// Retry state associated with a subchannel call.
|
825
|
+
// Stored in the parent_data of the subchannel call object.
|
826
|
+
typedef struct {
|
827
|
+
// subchannel_batch_data.batch.payload points to this.
|
828
|
+
grpc_transport_stream_op_batch_payload batch_payload;
|
829
|
+
// These fields indicate which ops have been started and completed on
|
830
|
+
// this subchannel call.
|
831
|
+
size_t started_send_message_count;
|
832
|
+
size_t completed_send_message_count;
|
833
|
+
size_t started_recv_message_count;
|
834
|
+
size_t completed_recv_message_count;
|
835
|
+
bool started_send_initial_metadata : 1;
|
836
|
+
bool completed_send_initial_metadata : 1;
|
837
|
+
bool started_send_trailing_metadata : 1;
|
838
|
+
bool completed_send_trailing_metadata : 1;
|
839
|
+
bool started_recv_initial_metadata : 1;
|
840
|
+
bool completed_recv_initial_metadata : 1;
|
841
|
+
bool started_recv_trailing_metadata : 1;
|
842
|
+
bool completed_recv_trailing_metadata : 1;
|
843
|
+
// State for callback processing.
|
844
|
+
bool retry_dispatched : 1;
|
845
|
+
bool recv_initial_metadata_ready_deferred : 1;
|
846
|
+
bool recv_message_ready_deferred : 1;
|
847
|
+
grpc_error* recv_initial_metadata_error;
|
848
|
+
grpc_error* recv_message_error;
|
849
|
+
} subchannel_call_retry_state;
|
850
|
+
|
851
|
+
// Pending batches stored in call data.
|
852
|
+
typedef struct {
|
853
|
+
// The pending batch. If nullptr, this slot is empty.
|
854
|
+
grpc_transport_stream_op_batch* batch;
|
855
|
+
// Indicates whether payload for send ops has been cached in call data.
|
856
|
+
bool send_ops_cached;
|
857
|
+
} pending_batch;
|
858
|
+
|
859
|
+
/** Call data. Holds a pointer to grpc_subchannel_call and the
|
860
|
+
associated machinery to create such a pointer.
|
861
|
+
Handles queueing of stream ops until a call object is ready, waiting
|
862
|
+
for initial metadata before trying to create a call object,
|
863
|
+
and handling cancellation gracefully. */
|
864
|
+
typedef struct client_channel_call_data {
|
865
|
+
// State for handling deadlines.
|
866
|
+
// The code in deadline_filter.c requires this to be the first field.
|
867
|
+
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
|
868
|
+
// and this struct both independently store pointers to the call stack
|
869
|
+
// and call combiner. If/when we have time, find a way to avoid this
|
870
|
+
// without breaking the grpc_deadline_state abstraction.
|
871
|
+
grpc_deadline_state deadline_state;
|
872
|
+
|
873
|
+
grpc_slice path; // Request path.
|
874
|
+
gpr_timespec call_start_time;
|
875
|
+
grpc_millis deadline;
|
876
|
+
gpr_arena* arena;
|
877
|
+
grpc_call_stack* owning_call;
|
878
|
+
grpc_call_combiner* call_combiner;
|
879
|
+
|
880
|
+
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
|
881
|
+
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
|
882
|
+
|
883
|
+
grpc_subchannel_call* subchannel_call;
|
884
|
+
|
885
|
+
// Set when we get a cancel_stream op.
|
886
|
+
grpc_error* cancel_error;
|
887
|
+
|
888
|
+
grpc_core::LoadBalancingPolicy::PickState pick;
|
889
|
+
grpc_closure pick_closure;
|
890
|
+
grpc_closure pick_cancel_closure;
|
891
|
+
|
892
|
+
grpc_polling_entity* pollent;
|
893
|
+
|
894
|
+
// Batches are added to this list when received from above.
|
895
|
+
// They are removed when we are done handling the batch (i.e., when
|
896
|
+
// either we have invoked all of the batch's callbacks or we have
|
897
|
+
// passed the batch down to the subchannel call and are not
|
898
|
+
// intercepting any of its callbacks).
|
899
|
+
pending_batch pending_batches[MAX_PENDING_BATCHES];
|
900
|
+
bool pending_send_initial_metadata : 1;
|
901
|
+
bool pending_send_message : 1;
|
902
|
+
bool pending_send_trailing_metadata : 1;
|
903
|
+
|
904
|
+
// Retry state.
|
905
|
+
bool enable_retries : 1;
|
906
|
+
bool retry_committed : 1;
|
907
|
+
bool last_attempt_got_server_pushback : 1;
|
908
|
+
int num_attempts_completed;
|
909
|
+
size_t bytes_buffered_for_retry;
|
910
|
+
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
|
911
|
+
grpc_timer retry_timer;
|
912
|
+
|
913
|
+
// Cached data for retrying send ops.
|
914
|
+
// send_initial_metadata
|
915
|
+
bool seen_send_initial_metadata;
|
916
|
+
grpc_linked_mdelem* send_initial_metadata_storage;
|
917
|
+
grpc_metadata_batch send_initial_metadata;
|
918
|
+
uint32_t send_initial_metadata_flags;
|
919
|
+
gpr_atm* peer_string;
|
920
|
+
// send_message
|
921
|
+
// When we get a send_message op, we replace the original byte stream
|
922
|
+
// with a CachingByteStream that caches the slices to a local buffer for
|
923
|
+
// use in retries.
|
924
|
+
// Note: We inline the cache for the first 3 send_message ops and use
|
925
|
+
// dynamic allocation after that. This number was essentially picked
|
926
|
+
// at random; it could be changed in the future to tune performance.
|
927
|
+
grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
|
928
|
+
// send_trailing_metadata
|
929
|
+
bool seen_send_trailing_metadata;
|
930
|
+
grpc_linked_mdelem* send_trailing_metadata_storage;
|
931
|
+
grpc_metadata_batch send_trailing_metadata;
|
932
|
+
} call_data;
|
933
|
+
|
934
|
+
// Forward declarations.
|
935
|
+
static void retry_commit(grpc_call_element* elem,
|
936
|
+
subchannel_call_retry_state* retry_state);
|
937
|
+
static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
|
938
|
+
static void on_complete(void* arg, grpc_error* error);
|
939
|
+
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
|
940
|
+
static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
|
941
|
+
static void start_pick_locked(void* arg, grpc_error* ignored);
|
942
|
+
|
943
|
+
//
|
944
|
+
// send op data caching
|
945
|
+
//
|
946
|
+
|
947
|
+
// Caches data for send ops so that it can be retried later, if not
|
948
|
+
// already cached.
|
949
|
+
static void maybe_cache_send_ops_for_batch(call_data* calld,
|
950
|
+
pending_batch* pending) {
|
951
|
+
if (pending->send_ops_cached) return;
|
952
|
+
pending->send_ops_cached = true;
|
953
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
954
|
+
// Save a copy of metadata for send_initial_metadata ops.
|
955
|
+
if (batch->send_initial_metadata) {
|
956
|
+
calld->seen_send_initial_metadata = true;
|
957
|
+
GPR_ASSERT(calld->send_initial_metadata_storage == nullptr);
|
958
|
+
grpc_metadata_batch* send_initial_metadata =
|
959
|
+
batch->payload->send_initial_metadata.send_initial_metadata;
|
960
|
+
calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc(
|
961
|
+
calld->arena,
|
962
|
+
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
|
963
|
+
grpc_metadata_batch_copy(send_initial_metadata,
|
964
|
+
&calld->send_initial_metadata,
|
965
|
+
calld->send_initial_metadata_storage);
|
966
|
+
calld->send_initial_metadata_flags =
|
967
|
+
batch->payload->send_initial_metadata.send_initial_metadata_flags;
|
968
|
+
calld->peer_string = batch->payload->send_initial_metadata.peer_string;
|
969
|
+
}
|
970
|
+
// Set up cache for send_message ops.
|
971
|
+
if (batch->send_message) {
|
972
|
+
grpc_core::ByteStreamCache* cache =
|
973
|
+
static_cast<grpc_core::ByteStreamCache*>(
|
974
|
+
gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
|
975
|
+
new (cache) grpc_core::ByteStreamCache(
|
976
|
+
std::move(batch->payload->send_message.send_message));
|
977
|
+
calld->send_messages.push_back(cache);
|
978
|
+
}
|
979
|
+
// Save metadata batch for send_trailing_metadata ops.
|
980
|
+
if (batch->send_trailing_metadata) {
|
981
|
+
calld->seen_send_trailing_metadata = true;
|
982
|
+
GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr);
|
983
|
+
grpc_metadata_batch* send_trailing_metadata =
|
984
|
+
batch->payload->send_trailing_metadata.send_trailing_metadata;
|
985
|
+
calld->send_trailing_metadata_storage =
|
986
|
+
(grpc_linked_mdelem*)gpr_arena_alloc(
|
987
|
+
calld->arena,
|
988
|
+
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
|
989
|
+
grpc_metadata_batch_copy(send_trailing_metadata,
|
990
|
+
&calld->send_trailing_metadata,
|
991
|
+
calld->send_trailing_metadata_storage);
|
992
|
+
}
|
993
|
+
}
|
994
|
+
|
995
|
+
// Frees cached send ops that have already been completed after
|
996
|
+
// committing the call.
|
997
|
+
static void free_cached_send_op_data_after_commit(
|
998
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
|
999
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1000
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1001
|
+
if (retry_state->completed_send_initial_metadata) {
|
1002
|
+
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
|
1003
|
+
}
|
1004
|
+
for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
|
1005
|
+
if (grpc_client_channel_trace.enabled()) {
|
1006
|
+
gpr_log(GPR_DEBUG,
|
1007
|
+
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1008
|
+
"]",
|
1009
|
+
chand, calld, i);
|
1010
|
+
}
|
1011
|
+
calld->send_messages[i]->Destroy();
|
1012
|
+
}
|
1013
|
+
if (retry_state->completed_send_trailing_metadata) {
|
1014
|
+
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
|
1015
|
+
}
|
1016
|
+
}
|
1017
|
+
|
1018
|
+
// Frees cached send ops that were completed by the completed batch in
|
1019
|
+
// batch_data. Used when batches are completed after the call is committed.
|
1020
|
+
static void free_cached_send_op_data_for_completed_batch(
|
1021
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1022
|
+
subchannel_call_retry_state* retry_state) {
|
1023
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1024
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1025
|
+
if (batch_data->batch.send_initial_metadata) {
|
1026
|
+
grpc_metadata_batch_destroy(&calld->send_initial_metadata);
|
1027
|
+
}
|
1028
|
+
if (batch_data->batch.send_message) {
|
1029
|
+
if (grpc_client_channel_trace.enabled()) {
|
1030
|
+
gpr_log(GPR_DEBUG,
|
1031
|
+
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
|
1032
|
+
"]",
|
1033
|
+
chand, calld, retry_state->completed_send_message_count - 1);
|
1034
|
+
}
|
1035
|
+
calld->send_messages[retry_state->completed_send_message_count - 1]
|
1036
|
+
->Destroy();
|
1037
|
+
}
|
1038
|
+
if (batch_data->batch.send_trailing_metadata) {
|
1039
|
+
grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
|
1040
|
+
}
|
1041
|
+
}
|
1042
|
+
|
1043
|
+
//
|
1044
|
+
// pending_batches management
|
1045
|
+
//
|
1046
|
+
|
1047
|
+
// Returns the index into calld->pending_batches to be used for batch.
|
1048
|
+
static size_t get_batch_index(grpc_transport_stream_op_batch* batch) {
|
1049
|
+
// Note: It is important the send_initial_metadata be the first entry
|
1050
|
+
// here, since the code in pick_subchannel_locked() assumes it will be.
|
1051
|
+
if (batch->send_initial_metadata) return 0;
|
1052
|
+
if (batch->send_message) return 1;
|
1053
|
+
if (batch->send_trailing_metadata) return 2;
|
1054
|
+
if (batch->recv_initial_metadata) return 3;
|
1055
|
+
if (batch->recv_message) return 4;
|
1056
|
+
if (batch->recv_trailing_metadata) return 5;
|
1057
|
+
GPR_UNREACHABLE_CODE(return (size_t)-1);
|
1058
|
+
}
|
1059
|
+
|
1060
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
1061
|
+
static void pending_batches_add(grpc_call_element* elem,
|
1062
|
+
grpc_transport_stream_op_batch* batch) {
|
1063
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1064
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1065
|
+
const size_t idx = get_batch_index(batch);
|
1066
|
+
if (grpc_client_channel_trace.enabled()) {
|
1067
|
+
gpr_log(GPR_DEBUG,
|
1068
|
+
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
|
1069
|
+
calld, idx);
|
1070
|
+
}
|
1071
|
+
pending_batch* pending = &calld->pending_batches[idx];
|
1072
|
+
GPR_ASSERT(pending->batch == nullptr);
|
1073
|
+
pending->batch = batch;
|
1074
|
+
pending->send_ops_cached = false;
|
1075
|
+
if (calld->enable_retries) {
|
1076
|
+
// Update state in calld about pending batches.
|
1077
|
+
// Also check if the batch takes us over the retry buffer limit.
|
1078
|
+
// Note: We don't check the size of trailing metadata here, because
|
1079
|
+
// gRPC clients do not send trailing metadata.
|
1080
|
+
if (batch->send_initial_metadata) {
|
1081
|
+
calld->pending_send_initial_metadata = true;
|
1082
|
+
calld->bytes_buffered_for_retry += grpc_metadata_batch_size(
|
1083
|
+
batch->payload->send_initial_metadata.send_initial_metadata);
|
1084
|
+
}
|
1085
|
+
if (batch->send_message) {
|
1086
|
+
calld->pending_send_message = true;
|
1087
|
+
calld->bytes_buffered_for_retry +=
|
1088
|
+
batch->payload->send_message.send_message->length();
|
1089
|
+
}
|
1090
|
+
if (batch->send_trailing_metadata) {
|
1091
|
+
calld->pending_send_trailing_metadata = true;
|
1092
|
+
}
|
1093
|
+
if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
|
1094
|
+
if (grpc_client_channel_trace.enabled()) {
|
1095
|
+
gpr_log(GPR_DEBUG,
|
1096
|
+
"chand=%p calld=%p: exceeded retry buffer size, committing",
|
1097
|
+
chand, calld);
|
1098
|
+
}
|
1099
|
+
subchannel_call_retry_state* retry_state =
|
1100
|
+
calld->subchannel_call == nullptr
|
1101
|
+
? nullptr
|
1102
|
+
: static_cast<subchannel_call_retry_state*>(
|
1103
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1104
|
+
calld->subchannel_call));
|
1105
|
+
retry_commit(elem, retry_state);
|
1106
|
+
// If we are not going to retry and have not yet started, pretend
|
1107
|
+
// retries are disabled so that we don't bother with retry overhead.
|
1108
|
+
if (calld->num_attempts_completed == 0) {
|
1109
|
+
if (grpc_client_channel_trace.enabled()) {
|
1110
|
+
gpr_log(GPR_DEBUG,
|
1111
|
+
"chand=%p calld=%p: disabling retries before first attempt",
|
1112
|
+
chand, calld);
|
1113
|
+
}
|
1114
|
+
calld->enable_retries = false;
|
1115
|
+
}
|
1116
|
+
}
|
1117
|
+
}
|
1118
|
+
}
|
1119
|
+
|
1120
|
+
static void pending_batch_clear(call_data* calld, pending_batch* pending) {
|
1121
|
+
if (calld->enable_retries) {
|
1122
|
+
if (pending->batch->send_initial_metadata) {
|
1123
|
+
calld->pending_send_initial_metadata = false;
|
1124
|
+
}
|
1125
|
+
if (pending->batch->send_message) {
|
1126
|
+
calld->pending_send_message = false;
|
1127
|
+
}
|
1128
|
+
if (pending->batch->send_trailing_metadata) {
|
1129
|
+
calld->pending_send_trailing_metadata = false;
|
1130
|
+
}
|
1131
|
+
}
|
1132
|
+
pending->batch = nullptr;
|
1133
|
+
}
|
1134
|
+
|
1135
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
1136
|
+
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
|
1137
|
+
grpc_transport_stream_op_batch* batch =
|
1138
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
1139
|
+
call_data* calld = static_cast<call_data*>(batch->handler_private.extra_arg);
|
1140
|
+
// Note: This will release the call combiner.
|
1141
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
1142
|
+
batch, GRPC_ERROR_REF(error), calld->call_combiner);
|
1143
|
+
}
|
1144
|
+
|
1145
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
1146
|
+
// If yield_call_combiner is true, assumes responsibility for yielding
|
1147
|
+
// the call combiner.
|
1148
|
+
static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
|
1149
|
+
bool yield_call_combiner) {
|
1150
|
+
GPR_ASSERT(error != GRPC_ERROR_NONE);
|
1151
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1152
|
+
if (grpc_client_channel_trace.enabled()) {
|
1153
|
+
size_t num_batches = 0;
|
1154
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1155
|
+
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1156
|
+
}
|
1157
|
+
gpr_log(GPR_DEBUG,
|
1158
|
+
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
|
1159
|
+
elem->channel_data, calld, num_batches, grpc_error_string(error));
|
1160
|
+
}
|
1161
|
+
grpc_transport_stream_op_batch*
|
1162
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1163
|
+
size_t num_batches = 0;
|
1164
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1165
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1166
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1167
|
+
if (batch != nullptr) {
|
1168
|
+
batches[num_batches++] = batch;
|
1169
|
+
pending_batch_clear(calld, pending);
|
1170
|
+
}
|
1171
|
+
}
|
1172
|
+
for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
|
1173
|
+
grpc_transport_stream_op_batch* batch = batches[i];
|
1174
|
+
batch->handler_private.extra_arg = calld;
|
1175
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1176
|
+
fail_pending_batch_in_call_combiner, batch,
|
1177
|
+
grpc_schedule_on_exec_ctx);
|
1178
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1179
|
+
&batch->handler_private.closure,
|
1180
|
+
GRPC_ERROR_REF(error), "pending_batches_fail");
|
1181
|
+
}
|
1182
|
+
if (yield_call_combiner) {
|
1183
|
+
if (num_batches > 0) {
|
1184
|
+
// Note: This will release the call combiner.
|
1185
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
1186
|
+
batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
|
1187
|
+
} else {
|
1188
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
|
1189
|
+
}
|
1190
|
+
}
|
1191
|
+
GRPC_ERROR_UNREF(error);
|
1192
|
+
}
|
1193
|
+
|
1194
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
1195
|
+
static void resume_pending_batch_in_call_combiner(void* arg,
|
1196
|
+
grpc_error* ignored) {
|
1197
|
+
grpc_transport_stream_op_batch* batch =
|
1198
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
1199
|
+
grpc_subchannel_call* subchannel_call =
|
1200
|
+
static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
|
1201
|
+
// Note: This will release the call combiner.
|
1202
|
+
grpc_subchannel_call_process_op(subchannel_call, batch);
|
1203
|
+
}
|
1204
|
+
|
1205
|
+
// This is called via the call combiner, so access to calld is synchronized.
|
1206
|
+
static void pending_batches_resume(grpc_call_element* elem) {
|
1207
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1208
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1209
|
+
if (calld->enable_retries) {
|
1210
|
+
start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE);
|
1211
|
+
return;
|
1212
|
+
}
|
1213
|
+
// Retries not enabled; send down batches as-is.
|
1214
|
+
if (grpc_client_channel_trace.enabled()) {
|
1215
|
+
size_t num_batches = 0;
|
1216
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1217
|
+
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
|
1218
|
+
}
|
1219
|
+
gpr_log(GPR_DEBUG,
|
1220
|
+
"chand=%p calld=%p: starting %" PRIuPTR
|
1221
|
+
" pending batches on subchannel_call=%p",
|
1222
|
+
chand, calld, num_batches, calld->subchannel_call);
|
1223
|
+
}
|
1224
|
+
grpc_transport_stream_op_batch*
|
1225
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
1226
|
+
size_t num_batches = 0;
|
1227
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1228
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1229
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1230
|
+
if (batch != nullptr) {
|
1231
|
+
batches[num_batches++] = batch;
|
1232
|
+
pending_batch_clear(calld, pending);
|
1233
|
+
}
|
1234
|
+
}
|
1235
|
+
for (size_t i = 1; i < num_batches; ++i) {
|
1236
|
+
grpc_transport_stream_op_batch* batch = batches[i];
|
1237
|
+
batch->handler_private.extra_arg = calld->subchannel_call;
|
1238
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
|
1239
|
+
resume_pending_batch_in_call_combiner, batch,
|
1240
|
+
grpc_schedule_on_exec_ctx);
|
1241
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
1242
|
+
&batch->handler_private.closure, GRPC_ERROR_NONE,
|
1243
|
+
"pending_batches_resume");
|
1244
|
+
}
|
1245
|
+
GPR_ASSERT(num_batches > 0);
|
1246
|
+
// Note: This will release the call combiner.
|
1247
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
|
1248
|
+
}
|
1249
|
+
|
1250
|
+
static void maybe_clear_pending_batch(grpc_call_element* elem,
|
1251
|
+
pending_batch* pending) {
|
1252
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1253
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1254
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1255
|
+
// We clear the pending batch if all of its callbacks have been
|
1256
|
+
// scheduled and reset to nullptr.
|
1257
|
+
if (batch->on_complete == nullptr &&
|
1258
|
+
(!batch->recv_initial_metadata ||
|
1259
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
|
1260
|
+
nullptr) &&
|
1261
|
+
(!batch->recv_message ||
|
1262
|
+
batch->payload->recv_message.recv_message_ready == nullptr)) {
|
1263
|
+
if (grpc_client_channel_trace.enabled()) {
|
1264
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
|
1265
|
+
calld);
|
1266
|
+
}
|
1267
|
+
pending_batch_clear(calld, pending);
|
1268
|
+
}
|
1269
|
+
}
|
1270
|
+
|
1271
|
+
// Returns true if all ops in the pending batch have been completed.
|
1272
|
+
static bool pending_batch_is_completed(
|
1273
|
+
pending_batch* pending, call_data* calld,
|
1274
|
+
subchannel_call_retry_state* retry_state) {
|
1275
|
+
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1276
|
+
return false;
|
1277
|
+
}
|
1278
|
+
if (pending->batch->send_initial_metadata &&
|
1279
|
+
!retry_state->completed_send_initial_metadata) {
|
1280
|
+
return false;
|
1281
|
+
}
|
1282
|
+
if (pending->batch->send_message &&
|
1283
|
+
retry_state->completed_send_message_count < calld->send_messages.size()) {
|
1284
|
+
return false;
|
1285
|
+
}
|
1286
|
+
if (pending->batch->send_trailing_metadata &&
|
1287
|
+
!retry_state->completed_send_trailing_metadata) {
|
1288
|
+
return false;
|
1289
|
+
}
|
1290
|
+
if (pending->batch->recv_initial_metadata &&
|
1291
|
+
!retry_state->completed_recv_initial_metadata) {
|
1292
|
+
return false;
|
1293
|
+
}
|
1294
|
+
if (pending->batch->recv_message &&
|
1295
|
+
retry_state->completed_recv_message_count <
|
1296
|
+
retry_state->started_recv_message_count) {
|
1297
|
+
return false;
|
1298
|
+
}
|
1299
|
+
if (pending->batch->recv_trailing_metadata &&
|
1300
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1301
|
+
return false;
|
1302
|
+
}
|
1303
|
+
return true;
|
1304
|
+
}
|
1305
|
+
|
1306
|
+
// Returns true if any op in the batch was not yet started.
|
1307
|
+
static bool pending_batch_is_unstarted(
|
1308
|
+
pending_batch* pending, call_data* calld,
|
1309
|
+
subchannel_call_retry_state* retry_state) {
|
1310
|
+
if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
|
1311
|
+
return false;
|
1312
|
+
}
|
1313
|
+
if (pending->batch->send_initial_metadata &&
|
1314
|
+
!retry_state->started_send_initial_metadata) {
|
1315
|
+
return true;
|
1316
|
+
}
|
1317
|
+
if (pending->batch->send_message &&
|
1318
|
+
retry_state->started_send_message_count < calld->send_messages.size()) {
|
1319
|
+
return true;
|
1320
|
+
}
|
1321
|
+
if (pending->batch->send_trailing_metadata &&
|
1322
|
+
!retry_state->started_send_trailing_metadata) {
|
1323
|
+
return true;
|
1324
|
+
}
|
1325
|
+
if (pending->batch->recv_initial_metadata &&
|
1326
|
+
!retry_state->started_recv_initial_metadata) {
|
1327
|
+
return true;
|
1328
|
+
}
|
1329
|
+
if (pending->batch->recv_message &&
|
1330
|
+
retry_state->completed_recv_message_count ==
|
1331
|
+
retry_state->started_recv_message_count) {
|
1332
|
+
return true;
|
1333
|
+
}
|
1334
|
+
if (pending->batch->recv_trailing_metadata &&
|
1335
|
+
!retry_state->started_recv_trailing_metadata) {
|
1336
|
+
return true;
|
1337
|
+
}
|
1338
|
+
return false;
|
1339
|
+
}
|
1340
|
+
|
1341
|
+
//
|
1342
|
+
// retry code
|
1343
|
+
//
|
1344
|
+
|
1345
|
+
// Commits the call so that no further retry attempts will be performed.
|
1346
|
+
static void retry_commit(grpc_call_element* elem,
|
1347
|
+
subchannel_call_retry_state* retry_state) {
|
1348
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1349
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1350
|
+
if (calld->retry_committed) return;
|
1351
|
+
calld->retry_committed = true;
|
1352
|
+
if (grpc_client_channel_trace.enabled()) {
|
1353
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
|
1354
|
+
}
|
1355
|
+
if (retry_state != nullptr) {
|
1356
|
+
free_cached_send_op_data_after_commit(elem, retry_state);
|
1357
|
+
}
|
1358
|
+
}
|
1359
|
+
|
1360
|
+
// Starts a retry after appropriate back-off.
|
1361
|
+
static void do_retry(grpc_call_element* elem,
|
1362
|
+
subchannel_call_retry_state* retry_state,
|
1363
|
+
grpc_millis server_pushback_ms) {
|
1364
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1365
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1366
|
+
GPR_ASSERT(calld->method_params != nullptr);
|
1367
|
+
const ClientChannelMethodParams::RetryPolicy* retry_policy =
|
1368
|
+
calld->method_params->retry_policy();
|
1369
|
+
GPR_ASSERT(retry_policy != nullptr);
|
1370
|
+
// Reset subchannel call and connected subchannel.
|
1371
|
+
if (calld->subchannel_call != nullptr) {
|
1372
|
+
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
|
1373
|
+
"client_channel_call_retry");
|
1374
|
+
calld->subchannel_call = nullptr;
|
1375
|
+
}
|
1376
|
+
if (calld->pick.connected_subchannel != nullptr) {
|
1377
|
+
calld->pick.connected_subchannel.reset();
|
1378
|
+
}
|
1379
|
+
// Compute backoff delay.
|
1380
|
+
grpc_millis next_attempt_time;
|
1381
|
+
if (server_pushback_ms >= 0) {
|
1382
|
+
next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
|
1383
|
+
calld->last_attempt_got_server_pushback = true;
|
1384
|
+
} else {
|
1385
|
+
if (calld->num_attempts_completed == 1 ||
|
1386
|
+
calld->last_attempt_got_server_pushback) {
|
1387
|
+
calld->retry_backoff.Init(
|
1388
|
+
grpc_core::BackOff::Options()
|
1389
|
+
.set_initial_backoff(retry_policy->initial_backoff)
|
1390
|
+
.set_multiplier(retry_policy->backoff_multiplier)
|
1391
|
+
.set_jitter(RETRY_BACKOFF_JITTER)
|
1392
|
+
.set_max_backoff(retry_policy->max_backoff));
|
1393
|
+
calld->last_attempt_got_server_pushback = false;
|
1394
|
+
}
|
1395
|
+
next_attempt_time = calld->retry_backoff->NextAttemptTime();
|
1396
|
+
}
|
1397
|
+
if (grpc_client_channel_trace.enabled()) {
|
1398
|
+
gpr_log(GPR_DEBUG,
|
1399
|
+
"chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
|
1400
|
+
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
|
1401
|
+
}
|
1402
|
+
// Schedule retry after computed delay.
|
1403
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem,
|
1404
|
+
grpc_combiner_scheduler(chand->combiner));
|
1405
|
+
grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure);
|
1406
|
+
// Update bookkeeping.
|
1407
|
+
if (retry_state != nullptr) retry_state->retry_dispatched = true;
|
1408
|
+
}
|
1409
|
+
|
1410
|
+
// Returns true if the call is being retried.
|
1411
|
+
static bool maybe_retry(grpc_call_element* elem,
|
1412
|
+
subchannel_batch_data* batch_data,
|
1413
|
+
grpc_status_code status,
|
1414
|
+
grpc_mdelem* server_pushback_md) {
|
1415
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1416
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1417
|
+
// Get retry policy.
|
1418
|
+
if (calld->method_params == nullptr) return false;
|
1419
|
+
const ClientChannelMethodParams::RetryPolicy* retry_policy =
|
1420
|
+
calld->method_params->retry_policy();
|
1421
|
+
if (retry_policy == nullptr) return false;
|
1422
|
+
// If we've already dispatched a retry from this call, return true.
|
1423
|
+
// This catches the case where the batch has multiple callbacks
|
1424
|
+
// (i.e., it includes either recv_message or recv_initial_metadata).
|
1425
|
+
subchannel_call_retry_state* retry_state = nullptr;
|
1426
|
+
if (batch_data != nullptr) {
|
1427
|
+
retry_state = static_cast<subchannel_call_retry_state*>(
|
1428
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1429
|
+
batch_data->subchannel_call));
|
1430
|
+
if (retry_state->retry_dispatched) {
|
1431
|
+
if (grpc_client_channel_trace.enabled()) {
|
1432
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
|
1433
|
+
calld);
|
1434
|
+
}
|
1435
|
+
return true;
|
1436
|
+
}
|
1437
|
+
}
|
1438
|
+
// Check status.
|
1439
|
+
if (status == GRPC_STATUS_OK) {
|
1440
|
+
if (calld->retry_throttle_data != nullptr) {
|
1441
|
+
calld->retry_throttle_data->RecordSuccess();
|
1442
|
+
}
|
1443
|
+
if (grpc_client_channel_trace.enabled()) {
|
1444
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
|
1445
|
+
}
|
1446
|
+
return false;
|
1447
|
+
}
|
1448
|
+
// Status is not OK. Check whether the status is retryable.
|
1449
|
+
if (!retry_policy->retryable_status_codes.Contains(status)) {
|
1450
|
+
if (grpc_client_channel_trace.enabled()) {
|
1451
|
+
gpr_log(GPR_DEBUG,
|
1452
|
+
"chand=%p calld=%p: status %s not configured as retryable", chand,
|
1453
|
+
calld, grpc_status_code_to_string(status));
|
1454
|
+
}
|
1455
|
+
return false;
|
1456
|
+
}
|
1457
|
+
// Record the failure and check whether retries are throttled.
|
1458
|
+
// Note that it's important for this check to come after the status
|
1459
|
+
// code check above, since we should only record failures whose statuses
|
1460
|
+
// match the configured retryable status codes, so that we don't count
|
1461
|
+
// things like failures due to malformed requests (INVALID_ARGUMENT).
|
1462
|
+
// Conversely, it's important for this to come before the remaining
|
1463
|
+
// checks, so that we don't fail to record failures due to other factors.
|
1464
|
+
if (calld->retry_throttle_data != nullptr &&
|
1465
|
+
!calld->retry_throttle_data->RecordFailure()) {
|
1466
|
+
if (grpc_client_channel_trace.enabled()) {
|
1467
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
|
1468
|
+
}
|
1469
|
+
return false;
|
1470
|
+
}
|
1471
|
+
// Check whether the call is committed.
|
1472
|
+
if (calld->retry_committed) {
|
1473
|
+
if (grpc_client_channel_trace.enabled()) {
|
1474
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
|
1475
|
+
calld);
|
1476
|
+
}
|
1477
|
+
return false;
|
1478
|
+
}
|
1479
|
+
// Check whether we have retries remaining.
|
1480
|
+
++calld->num_attempts_completed;
|
1481
|
+
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
|
1482
|
+
if (grpc_client_channel_trace.enabled()) {
|
1483
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
|
1484
|
+
calld, retry_policy->max_attempts);
|
1485
|
+
}
|
1486
|
+
return false;
|
1487
|
+
}
|
1488
|
+
// If the call was cancelled from the surface, don't retry.
|
1489
|
+
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
1490
|
+
if (grpc_client_channel_trace.enabled()) {
|
1491
|
+
gpr_log(GPR_DEBUG,
|
1492
|
+
"chand=%p calld=%p: call cancelled from surface, not retrying",
|
1493
|
+
chand, calld);
|
1494
|
+
}
|
1495
|
+
return false;
|
1496
|
+
}
|
1497
|
+
// Check server push-back.
|
1498
|
+
grpc_millis server_pushback_ms = -1;
|
1499
|
+
if (server_pushback_md != nullptr) {
|
1500
|
+
// If the value is "-1" or any other unparseable string, we do not retry.
|
1501
|
+
uint32_t ms;
|
1502
|
+
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
|
1503
|
+
if (grpc_client_channel_trace.enabled()) {
|
1504
|
+
gpr_log(GPR_DEBUG,
|
1505
|
+
"chand=%p calld=%p: not retrying due to server push-back",
|
1506
|
+
chand, calld);
|
1507
|
+
}
|
1508
|
+
return false;
|
1509
|
+
} else {
|
1510
|
+
if (grpc_client_channel_trace.enabled()) {
|
1511
|
+
gpr_log(GPR_DEBUG,
|
1512
|
+
"chand=%p calld=%p: server push-back: retry in %u ms", chand,
|
1513
|
+
calld, ms);
|
1514
|
+
}
|
1515
|
+
server_pushback_ms = (grpc_millis)ms;
|
1516
|
+
}
|
1517
|
+
}
|
1518
|
+
do_retry(elem, retry_state, server_pushback_ms);
|
1519
|
+
return true;
|
1520
|
+
}
|
1521
|
+
|
1522
|
+
//
|
1523
|
+
// subchannel_batch_data
|
1524
|
+
//
|
1525
|
+
|
1526
|
+
static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
|
1527
|
+
int refcount) {
|
1528
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1529
|
+
subchannel_call_retry_state* retry_state =
|
1530
|
+
static_cast<subchannel_call_retry_state*>(
|
1531
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1532
|
+
calld->subchannel_call));
|
1533
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
|
1534
|
+
gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
|
1535
|
+
batch_data->elem = elem;
|
1536
|
+
batch_data->subchannel_call =
|
1537
|
+
GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
|
1538
|
+
batch_data->batch.payload = &retry_state->batch_payload;
|
1539
|
+
gpr_ref_init(&batch_data->refs, refcount);
|
1540
|
+
GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
|
1541
|
+
grpc_schedule_on_exec_ctx);
|
1542
|
+
batch_data->batch.on_complete = &batch_data->on_complete;
|
1543
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
|
1544
|
+
return batch_data;
|
1545
|
+
}
|
1546
|
+
|
1547
|
+
static void batch_data_unref(subchannel_batch_data* batch_data) {
|
1548
|
+
if (gpr_unref(&batch_data->refs)) {
|
1549
|
+
if (batch_data->send_initial_metadata_storage != nullptr) {
|
1550
|
+
grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
|
1551
|
+
}
|
1552
|
+
if (batch_data->send_trailing_metadata_storage != nullptr) {
|
1553
|
+
grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
|
1554
|
+
}
|
1555
|
+
if (batch_data->batch.recv_initial_metadata) {
|
1556
|
+
grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
|
1557
|
+
}
|
1558
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1559
|
+
grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
|
1560
|
+
}
|
1561
|
+
GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
|
1562
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1563
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
|
1564
|
+
}
|
1565
|
+
}
|
1566
|
+
|
1567
|
+
//
|
1568
|
+
// recv_initial_metadata callback handling
|
1569
|
+
//
|
1570
|
+
|
1571
|
+
// Invokes recv_initial_metadata_ready for a subchannel batch.
|
1572
|
+
static void invoke_recv_initial_metadata_callback(void* arg,
|
1573
|
+
grpc_error* error) {
|
1574
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1575
|
+
channel_data* chand =
|
1576
|
+
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1577
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1578
|
+
// Find pending batch.
|
1579
|
+
pending_batch* pending = nullptr;
|
1580
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1581
|
+
grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
|
1582
|
+
if (batch != nullptr && batch->recv_initial_metadata &&
|
1583
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
|
1584
|
+
nullptr) {
|
1585
|
+
if (grpc_client_channel_trace.enabled()) {
|
1586
|
+
gpr_log(GPR_DEBUG,
|
1587
|
+
"chand=%p calld=%p: invoking recv_initial_metadata_ready for "
|
1588
|
+
"pending batch at index %" PRIuPTR,
|
1589
|
+
chand, calld, i);
|
1590
|
+
}
|
1591
|
+
pending = &calld->pending_batches[i];
|
1592
|
+
break;
|
1593
|
+
}
|
1594
|
+
}
|
1595
|
+
GPR_ASSERT(pending != nullptr);
|
1596
|
+
// Return metadata.
|
1597
|
+
grpc_metadata_batch_move(
|
1598
|
+
&batch_data->recv_initial_metadata,
|
1599
|
+
pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
|
1600
|
+
// Update bookkeeping.
|
1601
|
+
// Note: Need to do this before invoking the callback, since invoking
|
1602
|
+
// the callback will result in yielding the call combiner.
|
1603
|
+
grpc_closure* recv_initial_metadata_ready =
|
1604
|
+
pending->batch->payload->recv_initial_metadata
|
1605
|
+
.recv_initial_metadata_ready;
|
1606
|
+
pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
1607
|
+
nullptr;
|
1608
|
+
maybe_clear_pending_batch(batch_data->elem, pending);
|
1609
|
+
batch_data_unref(batch_data);
|
1610
|
+
// Invoke callback.
|
1611
|
+
GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error));
|
1612
|
+
}
|
1613
|
+
|
1614
|
+
// Intercepts recv_initial_metadata_ready callback for retries.
|
1615
|
+
// Commits the call and returns the initial metadata up the stack.
|
1616
|
+
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
1617
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1618
|
+
grpc_call_element* elem = batch_data->elem;
|
1619
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1620
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1621
|
+
if (grpc_client_channel_trace.enabled()) {
|
1622
|
+
gpr_log(GPR_DEBUG,
|
1623
|
+
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
|
1624
|
+
chand, calld, grpc_error_string(error));
|
1625
|
+
}
|
1626
|
+
subchannel_call_retry_state* retry_state =
|
1627
|
+
static_cast<subchannel_call_retry_state*>(
|
1628
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1629
|
+
batch_data->subchannel_call));
|
1630
|
+
// If we got an error or a Trailers-Only response and have not yet gotten
|
1631
|
+
// the recv_trailing_metadata on_complete callback, then defer
|
1632
|
+
// propagating this callback back to the surface. We can evaluate whether
|
1633
|
+
// to retry when recv_trailing_metadata comes back.
|
1634
|
+
if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
|
1635
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1636
|
+
if (grpc_client_channel_trace.enabled()) {
|
1637
|
+
gpr_log(GPR_DEBUG,
|
1638
|
+
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
|
1639
|
+
"(Trailers-Only)",
|
1640
|
+
chand, calld);
|
1641
|
+
}
|
1642
|
+
retry_state->recv_initial_metadata_ready_deferred = true;
|
1643
|
+
retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
|
1644
|
+
if (!retry_state->started_recv_trailing_metadata) {
|
1645
|
+
// recv_trailing_metadata not yet started by application; start it
|
1646
|
+
// ourselves to get status.
|
1647
|
+
start_internal_recv_trailing_metadata(elem);
|
1648
|
+
} else {
|
1649
|
+
GRPC_CALL_COMBINER_STOP(
|
1650
|
+
calld->call_combiner,
|
1651
|
+
"recv_initial_metadata_ready trailers-only or error");
|
1652
|
+
}
|
1653
|
+
return;
|
1654
|
+
}
|
1655
|
+
// Received valid initial metadata, so commit the call.
|
1656
|
+
retry_commit(elem, retry_state);
|
1657
|
+
// Manually invoking a callback function; it does not take ownership of error.
|
1658
|
+
invoke_recv_initial_metadata_callback(batch_data, error);
|
1659
|
+
GRPC_ERROR_UNREF(error);
|
1660
|
+
}
|
1661
|
+
|
1662
|
+
//
|
1663
|
+
// recv_message callback handling
|
1664
|
+
//
|
1665
|
+
|
1666
|
+
// Invokes recv_message_ready for a subchannel batch.
|
1667
|
+
static void invoke_recv_message_callback(void* arg, grpc_error* error) {
|
1668
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1669
|
+
channel_data* chand =
|
1670
|
+
static_cast<channel_data*>(batch_data->elem->channel_data);
|
1671
|
+
call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
|
1672
|
+
// Find pending op.
|
1673
|
+
pending_batch* pending = nullptr;
|
1674
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1675
|
+
grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
|
1676
|
+
if (batch != nullptr && batch->recv_message &&
|
1677
|
+
batch->payload->recv_message.recv_message_ready != nullptr) {
|
1678
|
+
if (grpc_client_channel_trace.enabled()) {
|
1679
|
+
gpr_log(GPR_DEBUG,
|
1680
|
+
"chand=%p calld=%p: invoking recv_message_ready for "
|
1681
|
+
"pending batch at index %" PRIuPTR,
|
1682
|
+
chand, calld, i);
|
1683
|
+
}
|
1684
|
+
pending = &calld->pending_batches[i];
|
1685
|
+
break;
|
1686
|
+
}
|
1687
|
+
}
|
1688
|
+
GPR_ASSERT(pending != nullptr);
|
1689
|
+
// Return payload.
|
1690
|
+
*pending->batch->payload->recv_message.recv_message =
|
1691
|
+
std::move(batch_data->recv_message);
|
1692
|
+
// Update bookkeeping.
|
1693
|
+
// Note: Need to do this before invoking the callback, since invoking
|
1694
|
+
// the callback will result in yielding the call combiner.
|
1695
|
+
grpc_closure* recv_message_ready =
|
1696
|
+
pending->batch->payload->recv_message.recv_message_ready;
|
1697
|
+
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1698
|
+
maybe_clear_pending_batch(batch_data->elem, pending);
|
1699
|
+
batch_data_unref(batch_data);
|
1700
|
+
// Invoke callback.
|
1701
|
+
GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error));
|
1702
|
+
}
|
1703
|
+
|
1704
|
+
// Intercepts recv_message_ready callback for retries.
|
1705
|
+
// Commits the call and returns the message up the stack.
|
1706
|
+
static void recv_message_ready(void* arg, grpc_error* error) {
|
1707
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1708
|
+
grpc_call_element* elem = batch_data->elem;
|
1709
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1710
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1711
|
+
if (grpc_client_channel_trace.enabled()) {
|
1712
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
|
1713
|
+
chand, calld, grpc_error_string(error));
|
1714
|
+
}
|
1715
|
+
subchannel_call_retry_state* retry_state =
|
1716
|
+
static_cast<subchannel_call_retry_state*>(
|
1717
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1718
|
+
batch_data->subchannel_call));
|
1719
|
+
// If we got an error or the payload was nullptr and we have not yet gotten
|
1720
|
+
// the recv_trailing_metadata on_complete callback, then defer
|
1721
|
+
// propagating this callback back to the surface. We can evaluate whether
|
1722
|
+
// to retry when recv_trailing_metadata comes back.
|
1723
|
+
if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
|
1724
|
+
!retry_state->completed_recv_trailing_metadata) {
|
1725
|
+
if (grpc_client_channel_trace.enabled()) {
|
1726
|
+
gpr_log(GPR_DEBUG,
|
1727
|
+
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
|
1728
|
+
"message and recv_trailing_metadata pending)",
|
1729
|
+
chand, calld);
|
1730
|
+
}
|
1731
|
+
retry_state->recv_message_ready_deferred = true;
|
1732
|
+
retry_state->recv_message_error = GRPC_ERROR_REF(error);
|
1733
|
+
if (!retry_state->started_recv_trailing_metadata) {
|
1734
|
+
// recv_trailing_metadata not yet started by application; start it
|
1735
|
+
// ourselves to get status.
|
1736
|
+
start_internal_recv_trailing_metadata(elem);
|
1737
|
+
} else {
|
1738
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null");
|
1739
|
+
}
|
1740
|
+
return;
|
1741
|
+
}
|
1742
|
+
// Received a valid message, so commit the call.
|
1743
|
+
retry_commit(elem, retry_state);
|
1744
|
+
// Manually invoking a callback function; it does not take ownership of error.
|
1745
|
+
invoke_recv_message_callback(batch_data, error);
|
1746
|
+
GRPC_ERROR_UNREF(error);
|
1747
|
+
}
|
1748
|
+
|
1749
|
+
//
|
1750
|
+
// on_complete callback handling
|
1751
|
+
//
|
1752
|
+
|
1753
|
+
// Updates retry_state to reflect the ops completed in batch_data.
|
1754
|
+
static void update_retry_state_for_completed_batch(
|
1755
|
+
subchannel_batch_data* batch_data,
|
1756
|
+
subchannel_call_retry_state* retry_state) {
|
1757
|
+
if (batch_data->batch.send_initial_metadata) {
|
1758
|
+
retry_state->completed_send_initial_metadata = true;
|
1759
|
+
}
|
1760
|
+
if (batch_data->batch.send_message) {
|
1761
|
+
++retry_state->completed_send_message_count;
|
1762
|
+
}
|
1763
|
+
if (batch_data->batch.send_trailing_metadata) {
|
1764
|
+
retry_state->completed_send_trailing_metadata = true;
|
1765
|
+
}
|
1766
|
+
if (batch_data->batch.recv_initial_metadata) {
|
1767
|
+
retry_state->completed_recv_initial_metadata = true;
|
1768
|
+
}
|
1769
|
+
if (batch_data->batch.recv_message) {
|
1770
|
+
++retry_state->completed_recv_message_count;
|
1771
|
+
}
|
1772
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1773
|
+
retry_state->completed_recv_trailing_metadata = true;
|
1774
|
+
}
|
1775
|
+
}
|
1776
|
+
|
1777
|
+
// Represents a closure that needs to run as a result of a completed batch.
|
1778
|
+
typedef struct {
|
1779
|
+
grpc_closure* closure;
|
1780
|
+
grpc_error* error;
|
1781
|
+
const char* reason;
|
1782
|
+
} closure_to_execute;
|
1783
|
+
|
1784
|
+
// Adds any necessary closures for deferred recv_initial_metadata and
|
1785
|
+
// recv_message callbacks to closures, updating *num_closures as needed.
|
1786
|
+
static void add_closures_for_deferred_recv_callbacks(
|
1787
|
+
subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
|
1788
|
+
closure_to_execute* closures, size_t* num_closures) {
|
1789
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1790
|
+
retry_state->recv_initial_metadata_ready_deferred) {
|
1791
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1792
|
+
closure->closure =
|
1793
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
|
1794
|
+
invoke_recv_initial_metadata_callback, batch_data,
|
1795
|
+
grpc_schedule_on_exec_ctx);
|
1796
|
+
closure->error = retry_state->recv_initial_metadata_error;
|
1797
|
+
closure->reason = "resuming recv_initial_metadata_ready";
|
1798
|
+
}
|
1799
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1800
|
+
retry_state->recv_message_ready_deferred) {
|
1801
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1802
|
+
closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
|
1803
|
+
invoke_recv_message_callback,
|
1804
|
+
batch_data, grpc_schedule_on_exec_ctx);
|
1805
|
+
closure->error = retry_state->recv_message_error;
|
1806
|
+
closure->reason = "resuming recv_message_ready";
|
1807
|
+
}
|
1808
|
+
}
|
1809
|
+
|
1810
|
+
// If there are any cached ops to replay or pending ops to start on the
|
1811
|
+
// subchannel call, adds a closure to closures to invoke
|
1812
|
+
// start_retriable_subchannel_batches(), updating *num_closures as needed.
|
1813
|
+
static void add_closures_for_replay_or_pending_send_ops(
|
1814
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1815
|
+
subchannel_call_retry_state* retry_state, closure_to_execute* closures,
|
1816
|
+
size_t* num_closures) {
|
1817
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1818
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1819
|
+
bool have_pending_send_message_ops =
|
1820
|
+
retry_state->started_send_message_count < calld->send_messages.size();
|
1821
|
+
bool have_pending_send_trailing_metadata_op =
|
1822
|
+
calld->seen_send_trailing_metadata &&
|
1823
|
+
!retry_state->started_send_trailing_metadata;
|
1824
|
+
if (!have_pending_send_message_ops &&
|
1825
|
+
!have_pending_send_trailing_metadata_op) {
|
1826
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1827
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1828
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
1829
|
+
if (batch == nullptr || pending->send_ops_cached) continue;
|
1830
|
+
if (batch->send_message) have_pending_send_message_ops = true;
|
1831
|
+
if (batch->send_trailing_metadata) {
|
1832
|
+
have_pending_send_trailing_metadata_op = true;
|
1833
|
+
}
|
1834
|
+
}
|
1835
|
+
}
|
1836
|
+
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
|
1837
|
+
if (grpc_client_channel_trace.enabled()) {
|
1838
|
+
gpr_log(GPR_DEBUG,
|
1839
|
+
"chand=%p calld=%p: starting next batch for pending send op(s)",
|
1840
|
+
chand, calld);
|
1841
|
+
}
|
1842
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1843
|
+
closure->closure = GRPC_CLOSURE_INIT(
|
1844
|
+
&batch_data->batch.handler_private.closure,
|
1845
|
+
start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
|
1846
|
+
closure->error = GRPC_ERROR_NONE;
|
1847
|
+
closure->reason = "starting next batch for send_* op(s)";
|
1848
|
+
}
|
1849
|
+
}
|
1850
|
+
|
1851
|
+
// For any pending batch completed in batch_data, adds the necessary
|
1852
|
+
// completion closures to closures, updating *num_closures as needed.
|
1853
|
+
static void add_closures_for_completed_pending_batches(
|
1854
|
+
grpc_call_element* elem, subchannel_batch_data* batch_data,
|
1855
|
+
subchannel_call_retry_state* retry_state, grpc_error* error,
|
1856
|
+
closure_to_execute* closures, size_t* num_closures) {
|
1857
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1858
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1859
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1860
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1861
|
+
if (pending_batch_is_completed(pending, calld, retry_state)) {
|
1862
|
+
if (grpc_client_channel_trace.enabled()) {
|
1863
|
+
gpr_log(GPR_DEBUG,
|
1864
|
+
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
|
1865
|
+
chand, calld, i);
|
1866
|
+
}
|
1867
|
+
// Copy the trailing metadata to return it to the surface.
|
1868
|
+
if (batch_data->batch.recv_trailing_metadata) {
|
1869
|
+
grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
|
1870
|
+
pending->batch->payload->recv_trailing_metadata
|
1871
|
+
.recv_trailing_metadata);
|
1872
|
+
}
|
1873
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1874
|
+
closure->closure = pending->batch->on_complete;
|
1875
|
+
closure->error = GRPC_ERROR_REF(error);
|
1876
|
+
closure->reason = "on_complete for pending batch";
|
1877
|
+
pending->batch->on_complete = nullptr;
|
1878
|
+
maybe_clear_pending_batch(elem, pending);
|
1879
|
+
}
|
1880
|
+
}
|
1881
|
+
GRPC_ERROR_UNREF(error);
|
1882
|
+
}
|
1883
|
+
|
1884
|
+
// For any pending batch containing an op that has not yet been started,
|
1885
|
+
// adds the pending batch's completion closures to closures, updating
|
1886
|
+
// *num_closures as needed.
|
1887
|
+
static void add_closures_to_fail_unstarted_pending_batches(
|
1888
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
1889
|
+
grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
|
1890
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1891
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1892
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
1893
|
+
pending_batch* pending = &calld->pending_batches[i];
|
1894
|
+
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
|
1895
|
+
if (grpc_client_channel_trace.enabled()) {
|
1896
|
+
gpr_log(GPR_DEBUG,
|
1897
|
+
"chand=%p calld=%p: failing unstarted pending batch at index "
|
1898
|
+
"%" PRIuPTR,
|
1899
|
+
chand, calld, i);
|
1900
|
+
}
|
1901
|
+
if (pending->batch->recv_initial_metadata) {
|
1902
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1903
|
+
closure->closure = pending->batch->payload->recv_initial_metadata
|
1904
|
+
.recv_initial_metadata_ready;
|
1905
|
+
closure->error = GRPC_ERROR_REF(error);
|
1906
|
+
closure->reason =
|
1907
|
+
"failing recv_initial_metadata_ready for pending batch";
|
1908
|
+
pending->batch->payload->recv_initial_metadata
|
1909
|
+
.recv_initial_metadata_ready = nullptr;
|
1910
|
+
}
|
1911
|
+
if (pending->batch->recv_message) {
|
1912
|
+
*pending->batch->payload->recv_message.recv_message = nullptr;
|
1913
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1914
|
+
closure->closure =
|
1915
|
+
pending->batch->payload->recv_message.recv_message_ready;
|
1916
|
+
closure->error = GRPC_ERROR_REF(error);
|
1917
|
+
closure->reason = "failing recv_message_ready for pending batch";
|
1918
|
+
pending->batch->payload->recv_message.recv_message_ready = nullptr;
|
1919
|
+
}
|
1920
|
+
closure_to_execute* closure = &closures[(*num_closures)++];
|
1921
|
+
closure->closure = pending->batch->on_complete;
|
1922
|
+
closure->error = GRPC_ERROR_REF(error);
|
1923
|
+
closure->reason = "failing on_complete for pending batch";
|
1924
|
+
pending->batch->on_complete = nullptr;
|
1925
|
+
maybe_clear_pending_batch(elem, pending);
|
1926
|
+
}
|
1927
|
+
}
|
1928
|
+
GRPC_ERROR_UNREF(error);
|
1929
|
+
}
|
1930
|
+
|
1931
|
+
// Callback used to intercept on_complete from subchannel calls.
|
1932
|
+
// Called only when retries are enabled.
|
1933
|
+
static void on_complete(void* arg, grpc_error* error) {
|
1934
|
+
subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
|
1935
|
+
grpc_call_element* elem = batch_data->elem;
|
1936
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
1937
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
1938
|
+
if (grpc_client_channel_trace.enabled()) {
|
1939
|
+
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
|
1940
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
|
1941
|
+
chand, calld, grpc_error_string(error), batch_str);
|
1942
|
+
gpr_free(batch_str);
|
1943
|
+
}
|
1944
|
+
subchannel_call_retry_state* retry_state =
|
1945
|
+
static_cast<subchannel_call_retry_state*>(
|
1946
|
+
grpc_connected_subchannel_call_get_parent_data(
|
1947
|
+
batch_data->subchannel_call));
|
1948
|
+
// If we have previously completed recv_trailing_metadata, then the
|
1949
|
+
// call is finished.
|
1950
|
+
bool call_finished = retry_state->completed_recv_trailing_metadata;
|
1951
|
+
// Update bookkeeping in retry_state.
|
1952
|
+
update_retry_state_for_completed_batch(batch_data, retry_state);
|
1953
|
+
if (call_finished) {
|
1954
|
+
if (grpc_client_channel_trace.enabled()) {
|
1955
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
|
1956
|
+
calld);
|
1957
|
+
}
|
1958
|
+
} else {
|
1959
|
+
// Check if this batch finished the call, and if so, get its status.
|
1960
|
+
// The call is finished if either (a) this callback was invoked with
|
1961
|
+
// an error or (b) we receive status.
|
1962
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
1963
|
+
grpc_mdelem* server_pushback_md = nullptr;
|
1964
|
+
if (error != GRPC_ERROR_NONE) { // Case (a).
|
1965
|
+
call_finished = true;
|
1966
|
+
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
1967
|
+
nullptr);
|
1968
|
+
} else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
|
1969
|
+
call_finished = true;
|
1970
|
+
grpc_metadata_batch* md_batch =
|
1971
|
+
batch_data->batch.payload->recv_trailing_metadata
|
1972
|
+
.recv_trailing_metadata;
|
1973
|
+
GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
|
1974
|
+
status = grpc_get_status_code_from_metadata(
|
1975
|
+
md_batch->idx.named.grpc_status->md);
|
1976
|
+
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
|
1977
|
+
server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
|
1978
|
+
}
|
1979
|
+
} else if (retry_state->completed_recv_trailing_metadata) {
|
1980
|
+
call_finished = true;
|
1981
|
+
}
|
1982
|
+
if (call_finished && grpc_client_channel_trace.enabled()) {
|
1983
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
|
1984
|
+
calld, grpc_status_code_to_string(status));
|
1985
|
+
}
|
1986
|
+
// If the call is finished, check if we should retry.
|
1987
|
+
if (call_finished &&
|
1988
|
+
maybe_retry(elem, batch_data, status, server_pushback_md)) {
|
1989
|
+
// Unref batch_data for deferred recv_initial_metadata_ready or
|
1990
|
+
// recv_message_ready callbacks, if any.
|
1991
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1992
|
+
retry_state->recv_initial_metadata_ready_deferred) {
|
1993
|
+
batch_data_unref(batch_data);
|
1994
|
+
GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
|
1995
|
+
}
|
1996
|
+
if (batch_data->batch.recv_trailing_metadata &&
|
1997
|
+
retry_state->recv_message_ready_deferred) {
|
1998
|
+
batch_data_unref(batch_data);
|
1999
|
+
GRPC_ERROR_UNREF(retry_state->recv_message_error);
|
2000
|
+
}
|
2001
|
+
batch_data_unref(batch_data);
|
2002
|
+
return;
|
2003
|
+
}
|
2004
|
+
}
|
2005
|
+
// If the call is finished or retries are committed, free cached data for
|
2006
|
+
// send ops that we've just completed.
|
2007
|
+
if (call_finished || calld->retry_committed) {
|
2008
|
+
free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
|
2009
|
+
}
|
2010
|
+
// Call not being retried.
|
2011
|
+
// Construct list of closures to execute.
|
2012
|
+
// Max number of closures is number of pending batches plus one for
|
2013
|
+
// each of:
|
2014
|
+
// - recv_initial_metadata_ready (either deferred or unstarted)
|
2015
|
+
// - recv_message_ready (either deferred or unstarted)
|
2016
|
+
// - starting a new batch for pending send ops
|
2017
|
+
closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
|
2018
|
+
size_t num_closures = 0;
|
2019
|
+
// If there are deferred recv_initial_metadata_ready or recv_message_ready
|
2020
|
+
// callbacks, add them to closures.
|
2021
|
+
add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
|
2022
|
+
&num_closures);
|
2023
|
+
// Find pending batches whose ops are now complete and add their
|
2024
|
+
// on_complete callbacks to closures.
|
2025
|
+
add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
|
2026
|
+
GRPC_ERROR_REF(error), closures,
|
2027
|
+
&num_closures);
|
2028
|
+
// Add closures to handle any pending batches that have not yet been started.
|
2029
|
+
// If the call is finished, we fail these batches; otherwise, we add a
|
2030
|
+
// callback to start_retriable_subchannel_batches() to start them on
|
2031
|
+
// the subchannel call.
|
2032
|
+
if (call_finished) {
|
2033
|
+
add_closures_to_fail_unstarted_pending_batches(
|
2034
|
+
elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
|
2035
|
+
} else {
|
2036
|
+
add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
|
2037
|
+
closures, &num_closures);
|
2038
|
+
}
|
2039
|
+
// Don't need batch_data anymore.
|
2040
|
+
batch_data_unref(batch_data);
|
2041
|
+
// Schedule all of the closures identified above.
|
2042
|
+
// Note that the call combiner will be yielded for each closure that
|
2043
|
+
// we schedule. We're already running in the call combiner, so one of
|
2044
|
+
// the closures can be scheduled directly, but the others will
|
2045
|
+
// have to re-enter the call combiner.
|
2046
|
+
if (num_closures > 0) {
|
2047
|
+
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
|
2048
|
+
for (size_t i = 1; i < num_closures; ++i) {
|
2049
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
|
2050
|
+
closures[i].error, closures[i].reason);
|
2051
|
+
}
|
2052
|
+
} else {
|
2053
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2054
|
+
"no closures to run for on_complete");
|
2055
|
+
}
|
2056
|
+
}
|
2057
|
+
|
2058
|
+
//
|
2059
|
+
// subchannel batch construction
|
2060
|
+
//
|
2061
|
+
|
2062
|
+
// Helper function used to start a subchannel batch in the call combiner.
|
2063
|
+
static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
|
2064
|
+
grpc_transport_stream_op_batch* batch =
|
2065
|
+
static_cast<grpc_transport_stream_op_batch*>(arg);
|
2066
|
+
grpc_subchannel_call* subchannel_call =
|
2067
|
+
static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
|
2068
|
+
// Note: This will release the call combiner.
|
2069
|
+
grpc_subchannel_call_process_op(subchannel_call, batch);
|
2070
|
+
}
|
2071
|
+
|
2072
|
+
// Adds retriable send_initial_metadata op to batch_data.
|
2073
|
+
static void add_retriable_send_initial_metadata_op(
|
2074
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2075
|
+
subchannel_batch_data* batch_data) {
|
2076
|
+
// Maps the number of retries to the corresponding metadata value slice.
|
2077
|
+
static const grpc_slice* retry_count_strings[] = {
|
2078
|
+
&GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4};
|
2079
|
+
// We need to make a copy of the metadata batch for each attempt, since
|
2080
|
+
// the filters in the subchannel stack may modify this batch, and we don't
|
2081
|
+
// want those modifications to be passed forward to subsequent attempts.
|
2082
|
+
//
|
2083
|
+
// If we've already completed one or more attempts, add the
|
2084
|
+
// grpc-retry-attempts header.
|
2085
|
+
batch_data->send_initial_metadata_storage =
|
2086
|
+
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2087
|
+
calld->arena, sizeof(grpc_linked_mdelem) *
|
2088
|
+
(calld->send_initial_metadata.list.count +
|
2089
|
+
(calld->num_attempts_completed > 0))));
|
2090
|
+
grpc_metadata_batch_copy(&calld->send_initial_metadata,
|
2091
|
+
&batch_data->send_initial_metadata,
|
2092
|
+
batch_data->send_initial_metadata_storage);
|
2093
|
+
if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts !=
|
2094
|
+
nullptr) {
|
2095
|
+
grpc_metadata_batch_remove(
|
2096
|
+
&batch_data->send_initial_metadata,
|
2097
|
+
batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
|
2098
|
+
}
|
2099
|
+
if (calld->num_attempts_completed > 0) {
|
2100
|
+
grpc_mdelem retry_md = grpc_mdelem_from_slices(
|
2101
|
+
GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
|
2102
|
+
*retry_count_strings[calld->num_attempts_completed - 1]);
|
2103
|
+
grpc_error* error = grpc_metadata_batch_add_tail(
|
2104
|
+
&batch_data->send_initial_metadata,
|
2105
|
+
&batch_data->send_initial_metadata_storage[calld->send_initial_metadata
|
2106
|
+
.list.count],
|
2107
|
+
retry_md);
|
2108
|
+
if (error != GRPC_ERROR_NONE) {
|
2109
|
+
gpr_log(GPR_ERROR, "error adding retry metadata: %s",
|
2110
|
+
grpc_error_string(error));
|
2111
|
+
GPR_ASSERT(false);
|
2112
|
+
}
|
2113
|
+
}
|
2114
|
+
retry_state->started_send_initial_metadata = true;
|
2115
|
+
batch_data->batch.send_initial_metadata = true;
|
2116
|
+
batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
|
2117
|
+
&batch_data->send_initial_metadata;
|
2118
|
+
batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
|
2119
|
+
calld->send_initial_metadata_flags;
|
2120
|
+
batch_data->batch.payload->send_initial_metadata.peer_string =
|
2121
|
+
calld->peer_string;
|
2122
|
+
}
|
2123
|
+
|
2124
|
+
// Adds retriable send_message op to batch_data.
|
2125
|
+
static void add_retriable_send_message_op(
|
2126
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
2127
|
+
subchannel_batch_data* batch_data) {
|
2128
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2129
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2130
|
+
if (grpc_client_channel_trace.enabled()) {
|
2131
|
+
gpr_log(GPR_DEBUG,
|
2132
|
+
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
|
2133
|
+
chand, calld, retry_state->started_send_message_count);
|
2134
|
+
}
|
2135
|
+
grpc_core::ByteStreamCache* cache =
|
2136
|
+
calld->send_messages[retry_state->started_send_message_count];
|
2137
|
+
++retry_state->started_send_message_count;
|
2138
|
+
batch_data->send_message.Init(cache);
|
2139
|
+
batch_data->batch.send_message = true;
|
2140
|
+
batch_data->batch.payload->send_message.send_message.reset(
|
2141
|
+
batch_data->send_message.get());
|
2142
|
+
}
|
2143
|
+
|
2144
|
+
// Adds retriable send_trailing_metadata op to batch_data.
|
2145
|
+
static void add_retriable_send_trailing_metadata_op(
|
2146
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2147
|
+
subchannel_batch_data* batch_data) {
|
2148
|
+
// We need to make a copy of the metadata batch for each attempt, since
|
2149
|
+
// the filters in the subchannel stack may modify this batch, and we don't
|
2150
|
+
// want those modifications to be passed forward to subsequent attempts.
|
2151
|
+
batch_data->send_trailing_metadata_storage =
|
2152
|
+
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
|
2153
|
+
calld->arena, sizeof(grpc_linked_mdelem) *
|
2154
|
+
calld->send_trailing_metadata.list.count));
|
2155
|
+
grpc_metadata_batch_copy(&calld->send_trailing_metadata,
|
2156
|
+
&batch_data->send_trailing_metadata,
|
2157
|
+
batch_data->send_trailing_metadata_storage);
|
2158
|
+
retry_state->started_send_trailing_metadata = true;
|
2159
|
+
batch_data->batch.send_trailing_metadata = true;
|
2160
|
+
batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
|
2161
|
+
&batch_data->send_trailing_metadata;
|
2162
|
+
}
|
2163
|
+
|
2164
|
+
// Adds retriable recv_initial_metadata op to batch_data.
|
2165
|
+
static void add_retriable_recv_initial_metadata_op(
|
2166
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2167
|
+
subchannel_batch_data* batch_data) {
|
2168
|
+
retry_state->started_recv_initial_metadata = true;
|
2169
|
+
batch_data->batch.recv_initial_metadata = true;
|
2170
|
+
grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
|
2171
|
+
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
|
2172
|
+
&batch_data->recv_initial_metadata;
|
2173
|
+
batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
|
2174
|
+
&batch_data->trailing_metadata_available;
|
2175
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
|
2176
|
+
recv_initial_metadata_ready, batch_data,
|
2177
|
+
grpc_schedule_on_exec_ctx);
|
2178
|
+
batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
|
2179
|
+
&batch_data->recv_initial_metadata_ready;
|
2180
|
+
}
|
2181
|
+
|
2182
|
+
// Adds retriable recv_message op to batch_data.
|
2183
|
+
static void add_retriable_recv_message_op(
|
2184
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2185
|
+
subchannel_batch_data* batch_data) {
|
2186
|
+
++retry_state->started_recv_message_count;
|
2187
|
+
batch_data->batch.recv_message = true;
|
2188
|
+
batch_data->batch.payload->recv_message.recv_message =
|
2189
|
+
&batch_data->recv_message;
|
2190
|
+
GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
|
2191
|
+
batch_data, grpc_schedule_on_exec_ctx);
|
2192
|
+
batch_data->batch.payload->recv_message.recv_message_ready =
|
2193
|
+
&batch_data->recv_message_ready;
|
2194
|
+
}
|
2195
|
+
|
2196
|
+
// Adds retriable recv_trailing_metadata op to batch_data.
|
2197
|
+
static void add_retriable_recv_trailing_metadata_op(
|
2198
|
+
call_data* calld, subchannel_call_retry_state* retry_state,
|
2199
|
+
subchannel_batch_data* batch_data) {
|
2200
|
+
retry_state->started_recv_trailing_metadata = true;
|
2201
|
+
batch_data->batch.recv_trailing_metadata = true;
|
2202
|
+
grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
|
2203
|
+
batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
|
2204
|
+
&batch_data->recv_trailing_metadata;
|
2205
|
+
batch_data->batch.collect_stats = true;
|
2206
|
+
batch_data->batch.payload->collect_stats.collect_stats =
|
2207
|
+
&batch_data->collect_stats;
|
2208
|
+
}
|
2209
|
+
|
2210
|
+
// Helper function used to start a recv_trailing_metadata batch. This
|
2211
|
+
// is used in the case where a recv_initial_metadata or recv_message
|
2212
|
+
// op fails in a way that we know the call is over but when the application
|
2213
|
+
// has not yet started its own recv_trailing_metadata op.
|
2214
|
+
static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
|
2215
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2216
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2217
|
+
if (grpc_client_channel_trace.enabled()) {
|
2218
|
+
gpr_log(GPR_DEBUG,
|
2219
|
+
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
|
2220
|
+
"started; starting it internally",
|
2221
|
+
chand, calld);
|
2222
|
+
}
|
2223
|
+
subchannel_call_retry_state* retry_state =
|
2224
|
+
static_cast<subchannel_call_retry_state*>(
|
2225
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2226
|
+
calld->subchannel_call));
|
2227
|
+
subchannel_batch_data* batch_data = batch_data_create(elem, 1);
|
2228
|
+
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2229
|
+
// Note: This will release the call combiner.
|
2230
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
|
2231
|
+
}
|
2232
|
+
|
2233
|
+
// If there are any cached send ops that need to be replayed on the
|
2234
|
+
// current subchannel call, creates and returns a new subchannel batch
|
2235
|
+
// to replay those ops. Otherwise, returns nullptr.
|
2236
|
+
static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
|
2237
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
|
2238
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2239
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2240
|
+
subchannel_batch_data* replay_batch_data = nullptr;
|
2241
|
+
// send_initial_metadata.
|
2242
|
+
if (calld->seen_send_initial_metadata &&
|
2243
|
+
!retry_state->started_send_initial_metadata &&
|
2244
|
+
!calld->pending_send_initial_metadata) {
|
2245
|
+
if (grpc_client_channel_trace.enabled()) {
|
2246
|
+
gpr_log(GPR_DEBUG,
|
2247
|
+
"chand=%p calld=%p: replaying previously completed "
|
2248
|
+
"send_initial_metadata op",
|
2249
|
+
chand, calld);
|
2250
|
+
}
|
2251
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2252
|
+
add_retriable_send_initial_metadata_op(calld, retry_state,
|
2253
|
+
replay_batch_data);
|
2254
|
+
}
|
2255
|
+
// send_message.
|
2256
|
+
// Note that we can only have one send_message op in flight at a time.
|
2257
|
+
if (retry_state->started_send_message_count < calld->send_messages.size() &&
|
2258
|
+
retry_state->started_send_message_count ==
|
2259
|
+
retry_state->completed_send_message_count &&
|
2260
|
+
!calld->pending_send_message) {
|
2261
|
+
if (grpc_client_channel_trace.enabled()) {
|
2262
|
+
gpr_log(GPR_DEBUG,
|
2263
|
+
"chand=%p calld=%p: replaying previously completed "
|
2264
|
+
"send_message op",
|
2265
|
+
chand, calld);
|
2266
|
+
}
|
2267
|
+
if (replay_batch_data == nullptr) {
|
2268
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2269
|
+
}
|
2270
|
+
add_retriable_send_message_op(elem, retry_state, replay_batch_data);
|
2271
|
+
}
|
2272
|
+
// send_trailing_metadata.
|
2273
|
+
// Note that we only add this op if we have no more send_message ops
|
2274
|
+
// to start, since we can't send down any more send_message ops after
|
2275
|
+
// send_trailing_metadata.
|
2276
|
+
if (calld->seen_send_trailing_metadata &&
|
2277
|
+
retry_state->started_send_message_count == calld->send_messages.size() &&
|
2278
|
+
!retry_state->started_send_trailing_metadata &&
|
2279
|
+
!calld->pending_send_trailing_metadata) {
|
2280
|
+
if (grpc_client_channel_trace.enabled()) {
|
2281
|
+
gpr_log(GPR_DEBUG,
|
2282
|
+
"chand=%p calld=%p: replaying previously completed "
|
2283
|
+
"send_trailing_metadata op",
|
2284
|
+
chand, calld);
|
2285
|
+
}
|
2286
|
+
if (replay_batch_data == nullptr) {
|
2287
|
+
replay_batch_data = batch_data_create(elem, 1);
|
2288
|
+
}
|
2289
|
+
add_retriable_send_trailing_metadata_op(calld, retry_state,
|
2290
|
+
replay_batch_data);
|
2291
|
+
}
|
2292
|
+
return replay_batch_data;
|
2293
|
+
}
|
2294
|
+
|
2295
|
+
// Adds subchannel batches for pending batches to batches, updating
|
2296
|
+
// *num_batches as needed.
|
2297
|
+
static void add_subchannel_batches_for_pending_batches(
|
2298
|
+
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
|
2299
|
+
grpc_transport_stream_op_batch** batches, size_t* num_batches) {
|
2300
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2301
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
2302
|
+
pending_batch* pending = &calld->pending_batches[i];
|
2303
|
+
grpc_transport_stream_op_batch* batch = pending->batch;
|
2304
|
+
if (batch == nullptr) continue;
|
2305
|
+
// Skip any batch that either (a) has already been started on this
|
2306
|
+
// subchannel call or (b) we can't start yet because we're still
|
2307
|
+
// replaying send ops that need to be completed first.
|
2308
|
+
// TODO(roth): Note that if any one op in the batch can't be sent
|
2309
|
+
// yet due to ops that we're replaying, we don't start any of the ops
|
2310
|
+
// in the batch. This is probably okay, but it could conceivably
|
2311
|
+
// lead to increased latency in some cases -- e.g., we could delay
|
2312
|
+
// starting a recv op due to it being in the same batch with a send
|
2313
|
+
// op. If/when we revamp the callback protocol in
|
2314
|
+
// transport_stream_op_batch, we may be able to fix this.
|
2315
|
+
if (batch->send_initial_metadata &&
|
2316
|
+
retry_state->started_send_initial_metadata) {
|
2317
|
+
continue;
|
2318
|
+
}
|
2319
|
+
if (batch->send_message && retry_state->completed_send_message_count <
|
2320
|
+
retry_state->started_send_message_count) {
|
2321
|
+
continue;
|
2322
|
+
}
|
2323
|
+
// Note that we only start send_trailing_metadata if we have no more
|
2324
|
+
// send_message ops to start, since we can't send down any more
|
2325
|
+
// send_message ops after send_trailing_metadata.
|
2326
|
+
if (batch->send_trailing_metadata &&
|
2327
|
+
(retry_state->started_send_message_count + batch->send_message <
|
2328
|
+
calld->send_messages.size() ||
|
2329
|
+
retry_state->started_send_trailing_metadata)) {
|
2330
|
+
continue;
|
2331
|
+
}
|
2332
|
+
if (batch->recv_initial_metadata &&
|
2333
|
+
retry_state->started_recv_initial_metadata) {
|
2334
|
+
continue;
|
2335
|
+
}
|
2336
|
+
if (batch->recv_message && retry_state->completed_recv_message_count <
|
2337
|
+
retry_state->started_recv_message_count) {
|
2338
|
+
continue;
|
2339
|
+
}
|
2340
|
+
if (batch->recv_trailing_metadata &&
|
2341
|
+
retry_state->started_recv_trailing_metadata) {
|
2342
|
+
continue;
|
2343
|
+
}
|
2344
|
+
// If we're not retrying, just send the batch as-is.
|
2345
|
+
if (calld->method_params == nullptr ||
|
2346
|
+
calld->method_params->retry_policy() == nullptr ||
|
2347
|
+
calld->retry_committed) {
|
2348
|
+
batches[(*num_batches)++] = batch;
|
2349
|
+
pending_batch_clear(calld, pending);
|
2350
|
+
continue;
|
2351
|
+
}
|
2352
|
+
// Create batch with the right number of callbacks.
|
2353
|
+
const int num_callbacks =
|
2354
|
+
1 + batch->recv_initial_metadata + batch->recv_message;
|
2355
|
+
subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
|
2356
|
+
// Cache send ops if needed.
|
2357
|
+
maybe_cache_send_ops_for_batch(calld, pending);
|
2358
|
+
// send_initial_metadata.
|
2359
|
+
if (batch->send_initial_metadata) {
|
2360
|
+
add_retriable_send_initial_metadata_op(calld, retry_state, batch_data);
|
2361
|
+
}
|
2362
|
+
// send_message.
|
2363
|
+
if (batch->send_message) {
|
2364
|
+
add_retriable_send_message_op(elem, retry_state, batch_data);
|
2365
|
+
}
|
2366
|
+
// send_trailing_metadata.
|
2367
|
+
if (batch->send_trailing_metadata) {
|
2368
|
+
add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data);
|
2369
|
+
}
|
2370
|
+
// recv_initial_metadata.
|
2371
|
+
if (batch->recv_initial_metadata) {
|
2372
|
+
// recv_flags is only used on the server side.
|
2373
|
+
GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
|
2374
|
+
add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data);
|
2375
|
+
}
|
2376
|
+
// recv_message.
|
2377
|
+
if (batch->recv_message) {
|
2378
|
+
add_retriable_recv_message_op(calld, retry_state, batch_data);
|
2379
|
+
}
|
2380
|
+
// recv_trailing_metadata.
|
2381
|
+
if (batch->recv_trailing_metadata) {
|
2382
|
+
GPR_ASSERT(batch->collect_stats);
|
2383
|
+
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
|
2384
|
+
}
|
2385
|
+
batches[(*num_batches)++] = &batch_data->batch;
|
2386
|
+
}
|
2387
|
+
}
|
2388
|
+
|
2389
|
+
// Constructs and starts whatever subchannel batches are needed on the
|
2390
|
+
// subchannel call.
|
2391
|
+
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
|
2392
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2393
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2394
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2395
|
+
if (grpc_client_channel_trace.enabled()) {
|
2396
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
|
2397
|
+
chand, calld);
|
2398
|
+
}
|
2399
|
+
subchannel_call_retry_state* retry_state =
|
2400
|
+
static_cast<subchannel_call_retry_state*>(
|
2401
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2402
|
+
calld->subchannel_call));
|
2403
|
+
// We can start up to 6 batches.
|
2404
|
+
grpc_transport_stream_op_batch*
|
2405
|
+
batches[GPR_ARRAY_SIZE(calld->pending_batches)];
|
2406
|
+
size_t num_batches = 0;
|
2407
|
+
// Replay previously-returned send_* ops if needed.
|
2408
|
+
subchannel_batch_data* replay_batch_data =
|
2409
|
+
maybe_create_subchannel_batch_for_replay(elem, retry_state);
|
2410
|
+
if (replay_batch_data != nullptr) {
|
2411
|
+
batches[num_batches++] = &replay_batch_data->batch;
|
2412
|
+
}
|
2413
|
+
// Now add pending batches.
|
2414
|
+
add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
|
2415
|
+
&num_batches);
|
2416
|
+
// Start batches on subchannel call.
|
2417
|
+
// Note that the call combiner will be yielded for each batch that we
|
2418
|
+
// send down. We're already running in the call combiner, so one of
|
2419
|
+
// the batches can be started directly, but the others will have to
|
2420
|
+
// re-enter the call combiner.
|
2421
|
+
if (grpc_client_channel_trace.enabled()) {
|
2422
|
+
gpr_log(GPR_DEBUG,
|
2423
|
+
"chand=%p calld=%p: starting %" PRIuPTR
|
2424
|
+
" retriable batches on subchannel_call=%p",
|
2425
|
+
chand, calld, num_batches, calld->subchannel_call);
|
2426
|
+
}
|
2427
|
+
if (num_batches == 0) {
|
2428
|
+
// This should be fairly rare, but it can happen when (e.g.) an
|
2429
|
+
// attempt completes before it has finished replaying all
|
2430
|
+
// previously sent messages.
|
2431
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2432
|
+
"no retriable subchannel batches to start");
|
2433
|
+
} else {
|
2434
|
+
for (size_t i = 1; i < num_batches; ++i) {
|
2435
|
+
if (grpc_client_channel_trace.enabled()) {
|
2436
|
+
char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
|
2437
|
+
gpr_log(GPR_DEBUG,
|
2438
|
+
"chand=%p calld=%p: starting batch in call combiner: %s", chand,
|
2439
|
+
calld, batch_str);
|
2440
|
+
gpr_free(batch_str);
|
2441
|
+
}
|
2442
|
+
batches[i]->handler_private.extra_arg = calld->subchannel_call;
|
2443
|
+
GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
|
2444
|
+
start_batch_in_call_combiner, batches[i],
|
2445
|
+
grpc_schedule_on_exec_ctx);
|
2446
|
+
GRPC_CALL_COMBINER_START(calld->call_combiner,
|
2447
|
+
&batches[i]->handler_private.closure,
|
2448
|
+
GRPC_ERROR_NONE, "start_subchannel_batch");
|
2449
|
+
}
|
2450
|
+
if (grpc_client_channel_trace.enabled()) {
|
2451
|
+
char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
|
2452
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
|
2453
|
+
batch_str);
|
2454
|
+
gpr_free(batch_str);
|
2455
|
+
}
|
2456
|
+
// Note: This will release the call combiner.
|
2457
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
|
2458
|
+
}
|
2459
|
+
}
|
2460
|
+
|
2461
|
+
//
|
2462
|
+
// LB pick
|
2463
|
+
//
|
2464
|
+
|
2465
|
+
static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
|
2466
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2467
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2468
|
+
const size_t parent_data_size =
|
2469
|
+
calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
|
2470
|
+
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
|
2471
|
+
calld->pollent, // pollent
|
2472
|
+
calld->path, // path
|
2473
|
+
calld->call_start_time, // start_time
|
2474
|
+
calld->deadline, // deadline
|
2475
|
+
calld->arena, // arena
|
2476
|
+
calld->pick.subchannel_call_context, // context
|
2477
|
+
calld->call_combiner, // call_combiner
|
2478
|
+
parent_data_size // parent_data_size
|
2479
|
+
};
|
2480
|
+
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
|
2481
|
+
call_args, &calld->subchannel_call);
|
2482
|
+
if (grpc_client_channel_trace.enabled()) {
|
2483
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
|
2484
|
+
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
|
2485
|
+
}
|
2486
|
+
if (new_error != GRPC_ERROR_NONE) {
|
2487
|
+
new_error = grpc_error_add_child(new_error, error);
|
2488
|
+
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2489
|
+
} else {
|
2490
|
+
if (parent_data_size > 0) {
|
2491
|
+
subchannel_call_retry_state* retry_state =
|
2492
|
+
static_cast<subchannel_call_retry_state*>(
|
2493
|
+
grpc_connected_subchannel_call_get_parent_data(
|
2494
|
+
calld->subchannel_call));
|
2495
|
+
retry_state->batch_payload.context = calld->pick.subchannel_call_context;
|
2496
|
+
}
|
2497
|
+
pending_batches_resume(elem);
|
2498
|
+
}
|
2499
|
+
GRPC_ERROR_UNREF(error);
|
2500
|
+
}
|
2501
|
+
|
2502
|
+
// Invoked when a pick is completed, on both success or failure.
|
2503
|
+
static void pick_done(void* arg, grpc_error* error) {
|
2504
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2505
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2506
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2507
|
+
if (calld->pick.connected_subchannel == nullptr) {
|
2508
|
+
// Failed to create subchannel.
|
2509
|
+
// If there was no error, this is an LB policy drop, in which case
|
2510
|
+
// we return an error; otherwise, we may retry.
|
2511
|
+
grpc_status_code status = GRPC_STATUS_OK;
|
2512
|
+
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
|
2513
|
+
nullptr);
|
2514
|
+
if (error == GRPC_ERROR_NONE || !calld->enable_retries ||
|
2515
|
+
!maybe_retry(elem, nullptr /* batch_data */, status,
|
2516
|
+
nullptr /* server_pushback_md */)) {
|
2517
|
+
grpc_error* new_error =
|
2518
|
+
error == GRPC_ERROR_NONE
|
2519
|
+
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
2520
|
+
"Call dropped by load balancing policy")
|
2521
|
+
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2522
|
+
"Failed to create subchannel", &error, 1);
|
2523
|
+
if (grpc_client_channel_trace.enabled()) {
|
2524
|
+
gpr_log(GPR_DEBUG,
|
2525
|
+
"chand=%p calld=%p: failed to create subchannel: error=%s",
|
2526
|
+
chand, calld, grpc_error_string(new_error));
|
2527
|
+
}
|
2528
|
+
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
|
2529
|
+
}
|
2530
|
+
} else {
|
2531
|
+
/* Create call on subchannel. */
|
2532
|
+
create_subchannel_call(elem, GRPC_ERROR_REF(error));
|
2533
|
+
}
|
2534
|
+
}
|
2535
|
+
|
2536
|
+
// Invoked when a pick is completed to leave the client_channel combiner
|
2537
|
+
// and continue processing in the call combiner.
|
2538
|
+
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2539
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2540
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
|
2541
|
+
grpc_schedule_on_exec_ctx);
|
2542
|
+
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
|
2543
|
+
}
|
2544
|
+
|
2545
|
+
// A wrapper around pick_done_locked() that is used in cases where
|
2546
|
+
// either (a) the pick was deferred pending a resolver result or (b) the
|
2547
|
+
// pick was done asynchronously. Removes the call's polling entity from
|
2548
|
+
// chand->interested_parties before invoking pick_done_locked().
|
2549
|
+
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
|
2550
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2551
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2552
|
+
grpc_polling_entity_del_from_pollset_set(calld->pollent,
|
2553
|
+
chand->interested_parties);
|
2554
|
+
pick_done_locked(elem, error);
|
2555
|
+
}
|
2556
|
+
|
2557
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2558
|
+
// holding the call combiner.
|
2559
|
+
static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
|
2560
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2561
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2562
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2563
|
+
// Note: chand->lb_policy may have changed since we started our pick,
|
2564
|
+
// in which case we will be cancelling the pick on a policy other than
|
2565
|
+
// the one we started it on. However, this will just be a no-op.
|
2566
|
+
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
|
2567
|
+
if (grpc_client_channel_trace.enabled()) {
|
2568
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
|
2569
|
+
chand, calld, chand->lb_policy.get());
|
2570
|
+
}
|
2571
|
+
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
|
2572
|
+
}
|
2573
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
|
2574
|
+
}
|
2575
|
+
|
2576
|
+
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
2577
|
+
// Unrefs the LB policy and invokes async_pick_done_locked().
|
2578
|
+
static void pick_callback_done_locked(void* arg, grpc_error* error) {
|
2579
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2580
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2581
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2582
|
+
if (grpc_client_channel_trace.enabled()) {
|
2583
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
|
2584
|
+
chand, calld);
|
2585
|
+
}
|
2586
|
+
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2587
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2588
|
+
}
|
2589
|
+
|
2590
|
+
// Applies service config to the call. Must be invoked once we know
|
2591
|
+
// that the resolver has returned results to the channel.
|
2592
|
+
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
|
2593
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2594
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2595
|
+
if (grpc_client_channel_trace.enabled()) {
|
2596
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
|
2597
|
+
chand, calld);
|
2598
|
+
}
|
2599
|
+
if (chand->retry_throttle_data != nullptr) {
|
2600
|
+
calld->retry_throttle_data = chand->retry_throttle_data->Ref();
|
2601
|
+
}
|
2602
|
+
if (chand->method_params_table != nullptr) {
|
2603
|
+
calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup(
|
2604
|
+
*chand->method_params_table, calld->path);
|
2605
|
+
if (calld->method_params != nullptr) {
|
2606
|
+
// If the deadline from the service config is shorter than the one
|
2607
|
+
// from the client API, reset the deadline timer.
|
2608
|
+
if (chand->deadline_checking_enabled &&
|
2609
|
+
calld->method_params->timeout() != 0) {
|
2610
|
+
const grpc_millis per_method_deadline =
|
2611
|
+
grpc_timespec_to_millis_round_up(calld->call_start_time) +
|
2612
|
+
calld->method_params->timeout();
|
2613
|
+
if (per_method_deadline < calld->deadline) {
|
2614
|
+
calld->deadline = per_method_deadline;
|
2615
|
+
grpc_deadline_state_reset(elem, calld->deadline);
|
2616
|
+
}
|
2617
|
+
}
|
2618
|
+
}
|
2619
|
+
}
|
2620
|
+
// If no retry policy, disable retries.
|
2621
|
+
// TODO(roth): Remove this when adding support for transparent retries.
|
2622
|
+
if (calld->method_params == nullptr ||
|
2623
|
+
calld->method_params->retry_policy() == nullptr) {
|
2624
|
+
calld->enable_retries = false;
|
2625
|
+
}
|
2626
|
+
}
|
2627
|
+
|
2628
|
+
// Starts a pick on chand->lb_policy.
|
2629
|
+
// Returns true if pick is completed synchronously.
|
2630
|
+
static bool pick_callback_start_locked(grpc_call_element* elem) {
|
2631
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2632
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2633
|
+
if (grpc_client_channel_trace.enabled()) {
|
2634
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
|
2635
|
+
chand, calld, chand->lb_policy.get());
|
2636
|
+
}
|
2637
|
+
// Only get service config data on the first attempt.
|
2638
|
+
if (calld->num_attempts_completed == 0) {
|
2639
|
+
apply_service_config_to_call_locked(elem);
|
2640
|
+
}
|
2641
|
+
// If the application explicitly set wait_for_ready, use that.
|
2642
|
+
// Otherwise, if the service config specified a value for this
|
2643
|
+
// method, use that.
|
2644
|
+
//
|
2645
|
+
// The send_initial_metadata batch will be the first one in the list,
|
2646
|
+
// as set by get_batch_index() above.
|
2647
|
+
calld->pick.initial_metadata =
|
2648
|
+
calld->seen_send_initial_metadata
|
2649
|
+
? &calld->send_initial_metadata
|
2650
|
+
: calld->pending_batches[0]
|
2651
|
+
.batch->payload->send_initial_metadata.send_initial_metadata;
|
2652
|
+
uint32_t send_initial_metadata_flags =
|
2653
|
+
calld->seen_send_initial_metadata
|
2654
|
+
? calld->send_initial_metadata_flags
|
2655
|
+
: calld->pending_batches[0]
|
2656
|
+
.batch->payload->send_initial_metadata
|
2657
|
+
.send_initial_metadata_flags;
|
2658
|
+
const bool wait_for_ready_set_from_api =
|
2659
|
+
send_initial_metadata_flags &
|
2660
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
|
2661
|
+
const bool wait_for_ready_set_from_service_config =
|
2662
|
+
calld->method_params != nullptr &&
|
2663
|
+
calld->method_params->wait_for_ready() !=
|
2664
|
+
ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
|
2665
|
+
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
|
2666
|
+
if (calld->method_params->wait_for_ready() ==
|
2667
|
+
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
|
2668
|
+
send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2669
|
+
} else {
|
2670
|
+
send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
|
2671
|
+
}
|
2672
|
+
}
|
2673
|
+
calld->pick.initial_metadata_flags = send_initial_metadata_flags;
|
2674
|
+
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
|
2675
|
+
grpc_combiner_scheduler(chand->combiner));
|
2676
|
+
calld->pick.on_complete = &calld->pick_closure;
|
2677
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
|
2678
|
+
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
|
2679
|
+
if (pick_done) {
|
2680
|
+
// Pick completed synchronously.
|
2681
|
+
if (grpc_client_channel_trace.enabled()) {
|
2682
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
|
2683
|
+
chand, calld);
|
2684
|
+
}
|
2685
|
+
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
|
2686
|
+
} else {
|
2687
|
+
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
|
2688
|
+
grpc_call_combiner_set_notify_on_cancel(
|
2689
|
+
calld->call_combiner,
|
2690
|
+
GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
|
2691
|
+
pick_callback_cancel_locked, elem,
|
2692
|
+
grpc_combiner_scheduler(chand->combiner)));
|
2693
|
+
}
|
2694
|
+
return pick_done;
|
2695
|
+
}
|
2696
|
+
|
2697
|
+
typedef struct {
|
2698
|
+
grpc_call_element* elem;
|
2699
|
+
bool finished;
|
2700
|
+
grpc_closure closure;
|
2701
|
+
grpc_closure cancel_closure;
|
2702
|
+
} pick_after_resolver_result_args;
|
2703
|
+
|
2704
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
2705
|
+
// holding the call combiner.
|
2706
|
+
static void pick_after_resolver_result_cancel_locked(void* arg,
|
2707
|
+
grpc_error* error) {
|
2708
|
+
pick_after_resolver_result_args* args =
|
2709
|
+
static_cast<pick_after_resolver_result_args*>(arg);
|
2710
|
+
if (args->finished) {
|
2711
|
+
gpr_free(args);
|
2712
|
+
return;
|
2713
|
+
}
|
2714
|
+
// If we don't yet have a resolver result, then a closure for
|
2715
|
+
// pick_after_resolver_result_done_locked() will have been added to
|
2716
|
+
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
2717
|
+
// until after this call has been destroyed. We mark the operation as
|
2718
|
+
// finished, so that when pick_after_resolver_result_done_locked()
|
2719
|
+
// is called, it will be a no-op. We also immediately invoke
|
2720
|
+
// async_pick_done_locked() to propagate the error back to the caller.
|
2721
|
+
args->finished = true;
|
2722
|
+
grpc_call_element* elem = args->elem;
|
2723
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2724
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2725
|
+
if (grpc_client_channel_trace.enabled()) {
|
2726
|
+
gpr_log(GPR_DEBUG,
|
2727
|
+
"chand=%p calld=%p: cancelling pick waiting for resolver result",
|
2728
|
+
chand, calld);
|
2729
|
+
}
|
2730
|
+
// Note: Although we are not in the call combiner here, we are
|
2731
|
+
// basically stealing the call combiner from the pending pick, so
|
2732
|
+
// it's safe to call async_pick_done_locked() here -- we are
|
2733
|
+
// essentially calling it here instead of calling it in
|
2734
|
+
// pick_after_resolver_result_done_locked().
|
2735
|
+
async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
2736
|
+
"Pick cancelled", &error, 1));
|
2737
|
+
}
|
2738
|
+
|
2739
|
+
static void pick_after_resolver_result_done_locked(void* arg,
|
2740
|
+
grpc_error* error) {
|
2741
|
+
pick_after_resolver_result_args* args =
|
2742
|
+
static_cast<pick_after_resolver_result_args*>(arg);
|
2743
|
+
if (args->finished) {
|
2744
|
+
/* cancelled, do nothing */
|
2745
|
+
if (grpc_client_channel_trace.enabled()) {
|
2746
|
+
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
|
2747
|
+
}
|
2748
|
+
gpr_free(args);
|
2749
|
+
return;
|
2750
|
+
}
|
2751
|
+
args->finished = true;
|
2752
|
+
grpc_call_element* elem = args->elem;
|
2753
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2754
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2755
|
+
if (error != GRPC_ERROR_NONE) {
|
2756
|
+
if (grpc_client_channel_trace.enabled()) {
|
2757
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
|
2758
|
+
chand, calld);
|
2759
|
+
}
|
2760
|
+
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
|
2761
|
+
} else if (chand->resolver == nullptr) {
|
2762
|
+
// Shutting down.
|
2763
|
+
if (grpc_client_channel_trace.enabled()) {
|
2764
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
|
2765
|
+
calld);
|
2766
|
+
}
|
2767
|
+
async_pick_done_locked(
|
2768
|
+
elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2769
|
+
} else if (chand->lb_policy == nullptr) {
|
2770
|
+
// Transient resolver failure.
|
2771
|
+
// If call has wait_for_ready=true, try again; otherwise, fail.
|
2772
|
+
uint32_t send_initial_metadata_flags =
|
2773
|
+
calld->seen_send_initial_metadata
|
2774
|
+
? calld->send_initial_metadata_flags
|
2775
|
+
: calld->pending_batches[0]
|
2776
|
+
.batch->payload->send_initial_metadata
|
2777
|
+
.send_initial_metadata_flags;
|
2778
|
+
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
2779
|
+
if (grpc_client_channel_trace.enabled()) {
|
2780
|
+
gpr_log(GPR_DEBUG,
|
2781
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2782
|
+
"wait_for_ready=true; trying again",
|
2783
|
+
chand, calld);
|
2784
|
+
}
|
2785
|
+
pick_after_resolver_result_start_locked(elem);
|
2786
|
+
} else {
|
2787
|
+
if (grpc_client_channel_trace.enabled()) {
|
2788
|
+
gpr_log(GPR_DEBUG,
|
2789
|
+
"chand=%p calld=%p: resolver returned but no LB policy; "
|
2790
|
+
"wait_for_ready=false; failing",
|
2791
|
+
chand, calld);
|
2792
|
+
}
|
2793
|
+
async_pick_done_locked(
|
2794
|
+
elem,
|
2795
|
+
grpc_error_set_int(
|
2796
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
2797
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
2798
|
+
}
|
2799
|
+
} else {
|
2800
|
+
if (grpc_client_channel_trace.enabled()) {
|
2801
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
|
2802
|
+
chand, calld);
|
2803
|
+
}
|
2804
|
+
if (pick_callback_start_locked(elem)) {
|
2805
|
+
// Even if the LB policy returns a result synchronously, we have
|
2806
|
+
// already added our polling entity to chand->interested_parties
|
2807
|
+
// in order to wait for the resolver result, so we need to
|
2808
|
+
// remove it here. Therefore, we call async_pick_done_locked()
|
2809
|
+
// instead of pick_done_locked().
|
2810
|
+
async_pick_done_locked(elem, GRPC_ERROR_NONE);
|
2811
|
+
}
|
2812
|
+
}
|
2813
|
+
}
|
2814
|
+
|
2815
|
+
static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
|
2816
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2817
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2818
|
+
if (grpc_client_channel_trace.enabled()) {
|
2819
|
+
gpr_log(GPR_DEBUG,
|
2820
|
+
"chand=%p calld=%p: deferring pick pending resolver result", chand,
|
2821
|
+
calld);
|
2822
|
+
}
|
2823
|
+
pick_after_resolver_result_args* args =
|
2824
|
+
static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
|
2825
|
+
args->elem = elem;
|
2826
|
+
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
|
2827
|
+
args, grpc_combiner_scheduler(chand->combiner));
|
2828
|
+
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
|
2829
|
+
&args->closure, GRPC_ERROR_NONE);
|
2830
|
+
grpc_call_combiner_set_notify_on_cancel(
|
2831
|
+
calld->call_combiner,
|
2832
|
+
GRPC_CLOSURE_INIT(&args->cancel_closure,
|
2833
|
+
pick_after_resolver_result_cancel_locked, args,
|
2834
|
+
grpc_combiner_scheduler(chand->combiner)));
|
2835
|
+
}
|
2836
|
+
|
2837
|
+
static void start_pick_locked(void* arg, grpc_error* ignored) {
|
2838
|
+
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
|
2839
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2840
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2841
|
+
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
|
2842
|
+
GPR_ASSERT(calld->subchannel_call == nullptr);
|
2843
|
+
if (chand->lb_policy != nullptr) {
|
2844
|
+
// We already have an LB policy, so ask it for a pick.
|
2845
|
+
if (pick_callback_start_locked(elem)) {
|
2846
|
+
// Pick completed synchronously.
|
2847
|
+
pick_done_locked(elem, GRPC_ERROR_NONE);
|
2848
|
+
return;
|
2849
|
+
}
|
2850
|
+
} else {
|
2851
|
+
// We do not yet have an LB policy, so wait for a resolver result.
|
2852
|
+
if (chand->resolver == nullptr) {
|
2853
|
+
pick_done_locked(elem,
|
2854
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
2855
|
+
return;
|
2856
|
+
}
|
2857
|
+
if (!chand->started_resolving) {
|
2858
|
+
start_resolving_locked(chand);
|
2859
|
+
}
|
2860
|
+
pick_after_resolver_result_start_locked(elem);
|
2861
|
+
}
|
2862
|
+
// We need to wait for either a resolver result or for an async result
|
2863
|
+
// from the LB policy. Add the polling entity from call_data to the
|
2864
|
+
// channel_data's interested_parties, so that the I/O of the LB policy
|
2865
|
+
// and resolver can be done under it. The polling entity will be
|
2866
|
+
// removed in async_pick_done_locked().
|
2867
|
+
grpc_polling_entity_add_to_pollset_set(calld->pollent,
|
2868
|
+
chand->interested_parties);
|
2869
|
+
}
|
2870
|
+
|
2871
|
+
//
|
2872
|
+
// filter call vtable functions
|
2873
|
+
//
|
2874
|
+
|
2875
|
+
static void cc_start_transport_stream_op_batch(
|
2876
|
+
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
|
2877
|
+
GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
|
2878
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2879
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2880
|
+
if (chand->deadline_checking_enabled) {
|
2881
|
+
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
|
2882
|
+
}
|
2883
|
+
// If we've previously been cancelled, immediately fail any new batches.
|
2884
|
+
if (calld->cancel_error != GRPC_ERROR_NONE) {
|
2885
|
+
if (grpc_client_channel_trace.enabled()) {
|
2886
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
|
2887
|
+
chand, calld, grpc_error_string(calld->cancel_error));
|
2888
|
+
}
|
2889
|
+
// Note: This will release the call combiner.
|
2890
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
2891
|
+
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
|
2892
|
+
return;
|
2893
|
+
}
|
2894
|
+
// Handle cancellation.
|
2895
|
+
if (batch->cancel_stream) {
|
2896
|
+
// Stash a copy of cancel_error in our call data, so that we can use
|
2897
|
+
// it for subsequent operations. This ensures that if the call is
|
2898
|
+
// cancelled before any batches are passed down (e.g., if the deadline
|
2899
|
+
// is in the past when the call starts), we can return the right
|
2900
|
+
// error to the caller when the first batch does get passed down.
|
2901
|
+
GRPC_ERROR_UNREF(calld->cancel_error);
|
2902
|
+
calld->cancel_error =
|
2903
|
+
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
|
2904
|
+
if (grpc_client_channel_trace.enabled()) {
|
2905
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
|
2906
|
+
calld, grpc_error_string(calld->cancel_error));
|
2907
|
+
}
|
2908
|
+
// If we do not have a subchannel call (i.e., a pick has not yet
|
2909
|
+
// been started), fail all pending batches. Otherwise, send the
|
2910
|
+
// cancellation down to the subchannel call.
|
2911
|
+
if (calld->subchannel_call == nullptr) {
|
2912
|
+
pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
|
2913
|
+
false /* yield_call_combiner */);
|
2914
|
+
// Note: This will release the call combiner.
|
2915
|
+
grpc_transport_stream_op_batch_finish_with_failure(
|
2916
|
+
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
|
2917
|
+
} else {
|
2918
|
+
// Note: This will release the call combiner.
|
2919
|
+
grpc_subchannel_call_process_op(calld->subchannel_call, batch);
|
2920
|
+
}
|
2921
|
+
return;
|
2922
|
+
}
|
2923
|
+
// Add the batch to the pending list.
|
2924
|
+
pending_batches_add(elem, batch);
|
2925
|
+
// Check if we've already gotten a subchannel call.
|
2926
|
+
// Note that once we have completed the pick, we do not need to enter
|
2927
|
+
// the channel combiner, which is more efficient (especially for
|
2928
|
+
// streaming calls).
|
2929
|
+
if (calld->subchannel_call != nullptr) {
|
2930
|
+
if (grpc_client_channel_trace.enabled()) {
|
2931
|
+
gpr_log(GPR_DEBUG,
|
2932
|
+
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
|
2933
|
+
calld, calld->subchannel_call);
|
2934
|
+
}
|
2935
|
+
pending_batches_resume(elem);
|
2936
|
+
return;
|
2937
|
+
}
|
2938
|
+
// We do not yet have a subchannel call.
|
2939
|
+
// For batches containing a send_initial_metadata op, enter the channel
|
2940
|
+
// combiner to start a pick.
|
2941
|
+
if (batch->send_initial_metadata) {
|
2942
|
+
if (grpc_client_channel_trace.enabled()) {
|
2943
|
+
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
|
2944
|
+
chand, calld);
|
2945
|
+
}
|
2946
|
+
GRPC_CLOSURE_SCHED(
|
2947
|
+
GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
|
2948
|
+
elem, grpc_combiner_scheduler(chand->combiner)),
|
2949
|
+
GRPC_ERROR_NONE);
|
2950
|
+
} else {
|
2951
|
+
// For all other batches, release the call combiner.
|
2952
|
+
if (grpc_client_channel_trace.enabled()) {
|
2953
|
+
gpr_log(GPR_DEBUG,
|
2954
|
+
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
|
2955
|
+
calld);
|
2956
|
+
}
|
2957
|
+
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
|
2958
|
+
"batch does not include send_initial_metadata");
|
2959
|
+
}
|
2960
|
+
}
|
2961
|
+
|
2962
|
+
/* Constructor for call_data */
|
2963
|
+
static grpc_error* cc_init_call_elem(grpc_call_element* elem,
|
2964
|
+
const grpc_call_element_args* args) {
|
2965
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2966
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2967
|
+
// Initialize data members.
|
2968
|
+
calld->path = grpc_slice_ref_internal(args->path);
|
2969
|
+
calld->call_start_time = args->start_time;
|
2970
|
+
calld->deadline = args->deadline;
|
2971
|
+
calld->arena = args->arena;
|
2972
|
+
calld->owning_call = args->call_stack;
|
2973
|
+
calld->call_combiner = args->call_combiner;
|
2974
|
+
if (chand->deadline_checking_enabled) {
|
2975
|
+
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
|
2976
|
+
calld->deadline);
|
2977
|
+
}
|
2978
|
+
calld->enable_retries = chand->enable_retries;
|
2979
|
+
return GRPC_ERROR_NONE;
|
2980
|
+
}
|
2981
|
+
|
2982
|
+
/* Destructor for call_data */
|
2983
|
+
static void cc_destroy_call_elem(grpc_call_element* elem,
|
2984
|
+
const grpc_call_final_info* final_info,
|
2985
|
+
grpc_closure* then_schedule_closure) {
|
2986
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
2987
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
2988
|
+
if (chand->deadline_checking_enabled) {
|
2989
|
+
grpc_deadline_state_destroy(elem);
|
2990
|
+
}
|
2991
|
+
grpc_slice_unref_internal(calld->path);
|
2992
|
+
calld->retry_throttle_data.reset();
|
2993
|
+
calld->method_params.reset();
|
2994
|
+
GRPC_ERROR_UNREF(calld->cancel_error);
|
2995
|
+
if (calld->subchannel_call != nullptr) {
|
2996
|
+
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
|
2997
|
+
then_schedule_closure);
|
2998
|
+
then_schedule_closure = nullptr;
|
2999
|
+
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
|
3000
|
+
"client_channel_destroy_call");
|
3001
|
+
}
|
3002
|
+
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
|
3003
|
+
GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
|
3004
|
+
}
|
3005
|
+
if (calld->pick.connected_subchannel != nullptr) {
|
3006
|
+
calld->pick.connected_subchannel.reset();
|
3007
|
+
}
|
3008
|
+
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
|
3009
|
+
if (calld->pick.subchannel_call_context[i].value != nullptr) {
|
3010
|
+
calld->pick.subchannel_call_context[i].destroy(
|
3011
|
+
calld->pick.subchannel_call_context[i].value);
|
3012
|
+
}
|
3013
|
+
}
|
3014
|
+
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
|
3015
|
+
}
|
3016
|
+
|
3017
|
+
static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
|
3018
|
+
grpc_polling_entity* pollent) {
|
3019
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
3020
|
+
calld->pollent = pollent;
|
3021
|
+
}
|
3022
|
+
|
3023
|
+
/*************************************************************************
|
3024
|
+
* EXPORTED SYMBOLS
|
3025
|
+
*/
|
3026
|
+
|
3027
|
+
const grpc_channel_filter grpc_client_channel_filter = {
|
3028
|
+
cc_start_transport_stream_op_batch,
|
3029
|
+
cc_start_transport_op,
|
3030
|
+
sizeof(call_data),
|
3031
|
+
cc_init_call_elem,
|
3032
|
+
cc_set_pollset_or_pollset_set,
|
3033
|
+
cc_destroy_call_elem,
|
3034
|
+
sizeof(channel_data),
|
3035
|
+
cc_init_channel_elem,
|
3036
|
+
cc_destroy_channel_elem,
|
3037
|
+
cc_get_channel_info,
|
3038
|
+
"client-channel",
|
3039
|
+
};
|
3040
|
+
|
3041
|
+
static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
|
3042
|
+
channel_data* chand = static_cast<channel_data*>(arg);
|
3043
|
+
if (chand->lb_policy != nullptr) {
|
3044
|
+
chand->lb_policy->ExitIdleLocked();
|
3045
|
+
} else {
|
3046
|
+
chand->exit_idle_when_lb_policy_arrives = true;
|
3047
|
+
if (!chand->started_resolving && chand->resolver != nullptr) {
|
3048
|
+
start_resolving_locked(chand);
|
3049
|
+
}
|
3050
|
+
}
|
3051
|
+
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
|
3052
|
+
}
|
3053
|
+
|
3054
|
+
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
|
3055
|
+
grpc_channel_element* elem, int try_to_connect) {
|
3056
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3057
|
+
grpc_connectivity_state out =
|
3058
|
+
grpc_connectivity_state_check(&chand->state_tracker);
|
3059
|
+
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
|
3060
|
+
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
|
3061
|
+
GRPC_CLOSURE_SCHED(
|
3062
|
+
GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
|
3063
|
+
grpc_combiner_scheduler(chand->combiner)),
|
3064
|
+
GRPC_ERROR_NONE);
|
3065
|
+
}
|
3066
|
+
return out;
|
3067
|
+
}
|
3068
|
+
|
3069
|
+
typedef struct external_connectivity_watcher {
|
3070
|
+
channel_data* chand;
|
3071
|
+
grpc_polling_entity pollent;
|
3072
|
+
grpc_closure* on_complete;
|
3073
|
+
grpc_closure* watcher_timer_init;
|
3074
|
+
grpc_connectivity_state* state;
|
3075
|
+
grpc_closure my_closure;
|
3076
|
+
struct external_connectivity_watcher* next;
|
3077
|
+
} external_connectivity_watcher;
|
3078
|
+
|
3079
|
+
static external_connectivity_watcher* lookup_external_connectivity_watcher(
|
3080
|
+
channel_data* chand, grpc_closure* on_complete) {
|
3081
|
+
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
|
3082
|
+
external_connectivity_watcher* w =
|
3083
|
+
chand->external_connectivity_watcher_list_head;
|
3084
|
+
while (w != nullptr && w->on_complete != on_complete) {
|
3085
|
+
w = w->next;
|
3086
|
+
}
|
3087
|
+
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
|
3088
|
+
return w;
|
3089
|
+
}
|
3090
|
+
|
3091
|
+
static void external_connectivity_watcher_list_append(
|
3092
|
+
channel_data* chand, external_connectivity_watcher* w) {
|
3093
|
+
GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
|
3094
|
+
|
3095
|
+
gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
|
3096
|
+
GPR_ASSERT(!w->next);
|
3097
|
+
w->next = chand->external_connectivity_watcher_list_head;
|
3098
|
+
chand->external_connectivity_watcher_list_head = w;
|
3099
|
+
gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu);
|
3100
|
+
}
|
3101
|
+
|
3102
|
+
static void external_connectivity_watcher_list_remove(
|
3103
|
+
channel_data* chand, external_connectivity_watcher* too_remove) {
|
3104
|
+
GPR_ASSERT(
|
3105
|
+
lookup_external_connectivity_watcher(chand, too_remove->on_complete));
|
3106
|
+
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
|
3107
|
+
if (too_remove == chand->external_connectivity_watcher_list_head) {
|
3108
|
+
chand->external_connectivity_watcher_list_head = too_remove->next;
|
3109
|
+
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
|
3110
|
+
return;
|
3111
|
+
}
|
3112
|
+
external_connectivity_watcher* w =
|
3113
|
+
chand->external_connectivity_watcher_list_head;
|
3114
|
+
while (w != nullptr) {
|
3115
|
+
if (w->next == too_remove) {
|
3116
|
+
w->next = w->next->next;
|
3117
|
+
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
|
3118
|
+
return;
|
3119
|
+
}
|
3120
|
+
w = w->next;
|
3121
|
+
}
|
3122
|
+
GPR_UNREACHABLE_CODE(return );
|
3123
|
+
}
|
3124
|
+
|
3125
|
+
int grpc_client_channel_num_external_connectivity_watchers(
|
3126
|
+
grpc_channel_element* elem) {
|
3127
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3128
|
+
int count = 0;
|
3129
|
+
|
3130
|
+
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
|
3131
|
+
external_connectivity_watcher* w =
|
3132
|
+
chand->external_connectivity_watcher_list_head;
|
3133
|
+
while (w != nullptr) {
|
3134
|
+
count++;
|
3135
|
+
w = w->next;
|
3136
|
+
}
|
3137
|
+
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
|
3138
|
+
|
3139
|
+
return count;
|
3140
|
+
}
|
3141
|
+
|
3142
|
+
static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
|
3143
|
+
external_connectivity_watcher* w =
|
3144
|
+
static_cast<external_connectivity_watcher*>(arg);
|
3145
|
+
grpc_closure* follow_up = w->on_complete;
|
3146
|
+
grpc_polling_entity_del_from_pollset_set(&w->pollent,
|
3147
|
+
w->chand->interested_parties);
|
3148
|
+
GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
|
3149
|
+
"external_connectivity_watcher");
|
3150
|
+
external_connectivity_watcher_list_remove(w->chand, w);
|
3151
|
+
gpr_free(w);
|
3152
|
+
GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error));
|
3153
|
+
}
|
3154
|
+
|
3155
|
+
static void watch_connectivity_state_locked(void* arg,
|
3156
|
+
grpc_error* error_ignored) {
|
3157
|
+
external_connectivity_watcher* w =
|
3158
|
+
static_cast<external_connectivity_watcher*>(arg);
|
3159
|
+
external_connectivity_watcher* found = nullptr;
|
3160
|
+
if (w->state != nullptr) {
|
3161
|
+
external_connectivity_watcher_list_append(w->chand, w);
|
3162
|
+
GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
|
3163
|
+
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
|
3164
|
+
grpc_combiner_scheduler(w->chand->combiner));
|
3165
|
+
grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
|
3166
|
+
w->state, &w->my_closure);
|
3167
|
+
} else {
|
3168
|
+
GPR_ASSERT(w->watcher_timer_init == nullptr);
|
3169
|
+
found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
|
3170
|
+
if (found) {
|
3171
|
+
GPR_ASSERT(found->on_complete == w->on_complete);
|
3172
|
+
grpc_connectivity_state_notify_on_state_change(
|
3173
|
+
&found->chand->state_tracker, nullptr, &found->my_closure);
|
3174
|
+
}
|
3175
|
+
grpc_polling_entity_del_from_pollset_set(&w->pollent,
|
3176
|
+
w->chand->interested_parties);
|
3177
|
+
GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
|
3178
|
+
"external_connectivity_watcher");
|
3179
|
+
gpr_free(w);
|
3180
|
+
}
|
3181
|
+
}
|
3182
|
+
|
3183
|
+
void grpc_client_channel_watch_connectivity_state(
|
3184
|
+
grpc_channel_element* elem, grpc_polling_entity pollent,
|
3185
|
+
grpc_connectivity_state* state, grpc_closure* closure,
|
3186
|
+
grpc_closure* watcher_timer_init) {
|
3187
|
+
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
|
3188
|
+
external_connectivity_watcher* w =
|
3189
|
+
static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w)));
|
3190
|
+
w->chand = chand;
|
3191
|
+
w->pollent = pollent;
|
3192
|
+
w->on_complete = closure;
|
3193
|
+
w->state = state;
|
3194
|
+
w->watcher_timer_init = watcher_timer_init;
|
3195
|
+
grpc_polling_entity_add_to_pollset_set(&w->pollent,
|
3196
|
+
chand->interested_parties);
|
3197
|
+
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
|
3198
|
+
"external_connectivity_watcher");
|
3199
|
+
GRPC_CLOSURE_SCHED(
|
3200
|
+
GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
|
3201
|
+
grpc_combiner_scheduler(chand->combiner)),
|
3202
|
+
GRPC_ERROR_NONE);
|
3203
|
+
}
|
3204
|
+
|
3205
|
+
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
|
3206
|
+
grpc_call_element* elem) {
|
3207
|
+
call_data* calld = static_cast<call_data*>(elem->call_data);
|
3208
|
+
return calld->subchannel_call;
|
3209
|
+
}
|